diff --git a/sys/dev/axgbe/if_axgbe_pci.c b/sys/dev/axgbe/if_axgbe_pci.c index dcf769d14f75..beb4ff338dc7 100644 --- a/sys/dev/axgbe/if_axgbe_pci.c +++ b/sys/dev/axgbe/if_axgbe_pci.c @@ -1,2435 +1,2433 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2020 Advanced Micro Devices, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Contact Information : * Rajesh Kumar * Shreyank Amartya */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xgbe.h" #include "xgbe-common.h" #include "miibus_if.h" #include "ifdi_if.h" #include "opt_inet.h" #include "opt_inet6.h" MALLOC_DEFINE(M_AXGBE, "axgbe", "axgbe data"); extern struct if_txrx axgbe_txrx; static int axgbe_sph_enable; /* Function prototypes */ static void *axgbe_register(device_t); static int axgbe_if_attach_pre(if_ctx_t); static int axgbe_if_attach_post(if_ctx_t); static int axgbe_if_detach(if_ctx_t); static void axgbe_if_stop(if_ctx_t); static void axgbe_if_init(if_ctx_t); /* Queue related routines */ static int axgbe_if_tx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); static int axgbe_if_rx_queues_alloc(if_ctx_t, caddr_t *, uint64_t *, int, int); static int axgbe_alloc_channels(if_ctx_t); static void axgbe_free_channels(struct axgbe_if_softc *); static void axgbe_if_queues_free(if_ctx_t); static int axgbe_if_tx_queue_intr_enable(if_ctx_t, uint16_t); static int axgbe_if_rx_queue_intr_enable(if_ctx_t, uint16_t); /* Interrupt related routines */ static void axgbe_if_disable_intr(if_ctx_t); static void axgbe_if_enable_intr(if_ctx_t); static int axgbe_if_msix_intr_assign(if_ctx_t, int); static void xgbe_free_intr(struct xgbe_prv_data *, struct resource *, void *, int); /* Init and Iflib routines */ static void axgbe_pci_init(struct xgbe_prv_data *); static void axgbe_pci_stop(if_ctx_t); static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *, struct xgbe_channel *); static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *); static int axgbe_if_mtu_set(if_ctx_t, uint32_t); static void axgbe_if_update_admin_status(if_ctx_t); static void axgbe_if_media_status(if_ctx_t, struct ifmediareq *); static int axgbe_if_media_change(if_ctx_t); static int axgbe_if_promisc_set(if_ctx_t, int); static uint64_t axgbe_if_get_counter(if_ctx_t, ift_counter); static void axgbe_if_vlan_register(if_ctx_t, uint16_t); static void axgbe_if_vlan_unregister(if_ctx_t, uint16_t); #if __FreeBSD_version >= 1300000 static bool axgbe_if_needs_restart(if_ctx_t, enum iflib_restart_event); #endif static void axgbe_set_counts(if_ctx_t); static void axgbe_init_iflib_softc_ctx(struct axgbe_if_softc *); /* MII interface registered functions */ static int axgbe_miibus_readreg(device_t, int, int); static int axgbe_miibus_writereg(device_t, int, int, int); static void axgbe_miibus_statchg(device_t); /* ISR routines */ static int axgbe_dev_isr(void *); static void axgbe_ecc_isr(void *); static void axgbe_i2c_isr(void *); static void axgbe_an_isr(void *); static int axgbe_msix_que(void *); /* Timer routines */ static void xgbe_service(void *, int); static void xgbe_service_timer(void *); static void xgbe_init_timers(struct xgbe_prv_data *); static void xgbe_stop_timers(struct xgbe_prv_data *); /* Dump routines */ static void xgbe_dump_prop_registers(struct xgbe_prv_data *); /* * Allocate only for MAC (BAR0) and PCS (BAR1) registers, and just point the * MSI-X table bar (BAR5) to iflib. iflib will do the allocation for MSI-X * table. */ static struct resource_spec axgbe_pci_mac_spec[] = { { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, /* MAC regs */ { SYS_RES_MEMORY, PCIR_BAR(1), RF_ACTIVE }, /* PCS regs */ { -1, 0 } }; static const pci_vendor_info_t axgbe_vendor_info_array[] = { PVID(0x1022, 0x1458, "AMD 10 Gigabit Ethernet Driver"), PVID(0x1022, 0x1459, "AMD 10 Gigabit Ethernet Driver"), PVID_END }; static struct xgbe_version_data xgbe_v2a = { .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2, .xpcs_access = XGBE_XPCS_ACCESS_V2, .mmc_64bit = 1, .tx_max_fifo_size = 229376, .rx_max_fifo_size = 229376, .tx_tstamp_workaround = 1, .ecc_support = 1, .i2c_support = 1, .irq_reissue_support = 1, .tx_desc_prefetch = 5, .rx_desc_prefetch = 5, .an_cdr_workaround = 1, }; static struct xgbe_version_data xgbe_v2b = { .init_function_ptrs_phy_impl = xgbe_init_function_ptrs_phy_v2, .xpcs_access = XGBE_XPCS_ACCESS_V2, .mmc_64bit = 1, .tx_max_fifo_size = 65536, .rx_max_fifo_size = 65536, .tx_tstamp_workaround = 1, .ecc_support = 1, .i2c_support = 1, .irq_reissue_support = 1, .tx_desc_prefetch = 5, .rx_desc_prefetch = 5, .an_cdr_workaround = 1, }; /* Device Interface */ static device_method_t ax_methods[] = { DEVMETHOD(device_register, axgbe_register), DEVMETHOD(device_probe, iflib_device_probe), DEVMETHOD(device_attach, iflib_device_attach), DEVMETHOD(device_detach, iflib_device_detach), /* MII interface */ DEVMETHOD(miibus_readreg, axgbe_miibus_readreg), DEVMETHOD(miibus_writereg, axgbe_miibus_writereg), DEVMETHOD(miibus_statchg, axgbe_miibus_statchg), DEVMETHOD_END }; static driver_t ax_driver = { "ax", ax_methods, sizeof(struct axgbe_if_softc), }; DRIVER_MODULE(axp, pci, ax_driver, 0, 0); DRIVER_MODULE(miibus, ax, miibus_driver, 0, 0); IFLIB_PNP_INFO(pci, ax_driver, axgbe_vendor_info_array); MODULE_DEPEND(ax, pci, 1, 1, 1); MODULE_DEPEND(ax, ether, 1, 1, 1); MODULE_DEPEND(ax, iflib, 1, 1, 1); MODULE_DEPEND(ax, miibus, 1, 1, 1); /* Iflib Interface */ static device_method_t axgbe_if_methods[] = { DEVMETHOD(ifdi_attach_pre, axgbe_if_attach_pre), DEVMETHOD(ifdi_attach_post, axgbe_if_attach_post), DEVMETHOD(ifdi_detach, axgbe_if_detach), DEVMETHOD(ifdi_init, axgbe_if_init), DEVMETHOD(ifdi_stop, axgbe_if_stop), DEVMETHOD(ifdi_msix_intr_assign, axgbe_if_msix_intr_assign), DEVMETHOD(ifdi_intr_enable, axgbe_if_enable_intr), DEVMETHOD(ifdi_intr_disable, axgbe_if_disable_intr), DEVMETHOD(ifdi_tx_queue_intr_enable, axgbe_if_tx_queue_intr_enable), DEVMETHOD(ifdi_rx_queue_intr_enable, axgbe_if_rx_queue_intr_enable), DEVMETHOD(ifdi_tx_queues_alloc, axgbe_if_tx_queues_alloc), DEVMETHOD(ifdi_rx_queues_alloc, axgbe_if_rx_queues_alloc), DEVMETHOD(ifdi_queues_free, axgbe_if_queues_free), DEVMETHOD(ifdi_update_admin_status, axgbe_if_update_admin_status), DEVMETHOD(ifdi_mtu_set, axgbe_if_mtu_set), DEVMETHOD(ifdi_media_status, axgbe_if_media_status), DEVMETHOD(ifdi_media_change, axgbe_if_media_change), DEVMETHOD(ifdi_promisc_set, axgbe_if_promisc_set), DEVMETHOD(ifdi_get_counter, axgbe_if_get_counter), DEVMETHOD(ifdi_vlan_register, axgbe_if_vlan_register), DEVMETHOD(ifdi_vlan_unregister, axgbe_if_vlan_unregister), #if __FreeBSD_version >= 1300000 DEVMETHOD(ifdi_needs_restart, axgbe_if_needs_restart), #endif DEVMETHOD_END }; static driver_t axgbe_if_driver = { "axgbe_if", axgbe_if_methods, sizeof(struct axgbe_if_softc) }; /* Iflib Shared Context */ static struct if_shared_ctx axgbe_sctx_init = { .isc_magic = IFLIB_MAGIC, .isc_driver = &axgbe_if_driver, .isc_q_align = PAGE_SIZE, .isc_tx_maxsize = XGBE_TSO_MAX_SIZE + sizeof(struct ether_vlan_header), .isc_tx_maxsegsize = PAGE_SIZE, .isc_tso_maxsize = XGBE_TSO_MAX_SIZE + sizeof(struct ether_vlan_header), .isc_tso_maxsegsize = PAGE_SIZE, .isc_rx_maxsize = MJUM9BYTES, .isc_rx_maxsegsize = MJUM9BYTES, .isc_rx_nsegments = 1, .isc_admin_intrcnt = 4, .isc_vendor_info = axgbe_vendor_info_array, .isc_driver_version = XGBE_DRV_VERSION, .isc_ntxd_min = {XGBE_TX_DESC_CNT_MIN}, .isc_ntxd_default = {XGBE_TX_DESC_CNT_DEFAULT}, .isc_ntxd_max = {XGBE_TX_DESC_CNT_MAX}, .isc_ntxqs = 1, .isc_flags = IFLIB_TSO_INIT_IP | IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_NEED_ETHER_PAD, }; static void * axgbe_register(device_t dev) { int axgbe_nfl; int axgbe_nrxqs; int error, i; char *value = NULL; value = kern_getenv("dev.ax.sph_enable"); if (value) { axgbe_sph_enable = strtol(value, NULL, 10); freeenv(value); } else { /* * No tunable found, generate one with default values * Note: only a reboot will reveal the new kenv */ error = kern_setenv("dev.ax.sph_enable", "1"); if (error) { printf("Error setting tunable, using default driver values\n"); } axgbe_sph_enable = 1; } if (!axgbe_sph_enable) { axgbe_nfl = 1; axgbe_nrxqs = 1; } else { axgbe_nfl = 2; axgbe_nrxqs = 2; } axgbe_sctx_init.isc_nfl = axgbe_nfl; axgbe_sctx_init.isc_nrxqs = axgbe_nrxqs; for (i = 0 ; i < axgbe_nrxqs ; i++) { axgbe_sctx_init.isc_nrxd_min[i] = XGBE_RX_DESC_CNT_MIN; axgbe_sctx_init.isc_nrxd_default[i] = XGBE_RX_DESC_CNT_DEFAULT; axgbe_sctx_init.isc_nrxd_max[i] = XGBE_RX_DESC_CNT_MAX; } return (&axgbe_sctx_init); } /* MII Interface Functions */ static int axgbe_miibus_readreg(device_t dev, int phy, int reg) { struct axgbe_if_softc *sc = iflib_get_softc(device_get_softc(dev)); struct xgbe_prv_data *pdata = &sc->pdata; int val; axgbe_printf(3, "%s: phy %d reg %d\n", __func__, phy, reg); val = xgbe_phy_mii_read(pdata, phy, reg); axgbe_printf(2, "%s: val 0x%x\n", __func__, val); return (val & 0xFFFF); } static int axgbe_miibus_writereg(device_t dev, int phy, int reg, int val) { struct axgbe_if_softc *sc = iflib_get_softc(device_get_softc(dev)); struct xgbe_prv_data *pdata = &sc->pdata; axgbe_printf(3, "%s: phy %d reg %d val 0x%x\n", __func__, phy, reg, val); xgbe_phy_mii_write(pdata, phy, reg, val); return(0); } static void axgbe_miibus_statchg(device_t dev) { struct axgbe_if_softc *sc = iflib_get_softc(device_get_softc(dev)); struct xgbe_prv_data *pdata = &sc->pdata; struct mii_data *mii = device_get_softc(pdata->axgbe_miibus); if_t ifp = pdata->netdev; int bmsr; axgbe_printf(2, "%s: Link %d/%d\n", __func__, pdata->phy.link, pdata->phy_link); if (mii == NULL || ifp == NULL || (if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) return; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: pdata->phy.link = 1; break; case IFM_1000_T: case IFM_1000_SX: case IFM_2500_SX: pdata->phy.link = 1; break; default: pdata->phy.link = 0; break; } } else pdata->phy_link = 0; bmsr = axgbe_miibus_readreg(pdata->dev, pdata->mdio_addr, MII_BMSR); if (bmsr & BMSR_ANEG) { axgbe_printf(2, "%s: Autoneg Done\n", __func__); /* Raise AN Interrupt */ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XGBE_AN_CL73_INT_MASK); } } static int axgbe_if_attach_pre(if_ctx_t ctx) { struct axgbe_if_softc *sc; struct xgbe_prv_data *pdata; struct resource *mac_res[2]; if_softc_ctx_t scctx; if_shared_ctx_t sctx; device_t dev; unsigned int ma_lo, ma_hi; unsigned int reg; int ret; sc = iflib_get_softc(ctx); sc->pdata.dev = dev = iflib_get_dev(ctx); sc->sctx = sctx = iflib_get_sctx(ctx); sc->scctx = scctx = iflib_get_softc_ctx(ctx); sc->media = iflib_get_media(ctx); sc->ctx = ctx; sc->link_status = LINK_STATE_DOWN; pdata = &sc->pdata; pdata->netdev = iflib_get_ifp(ctx); spin_lock_init(&pdata->xpcs_lock); /* Initialize locks */ mtx_init(&pdata->rss_mutex, "xgbe rss mutex lock", NULL, MTX_DEF); mtx_init(&pdata->mdio_mutex, "xgbe MDIO mutex lock", NULL, MTX_SPIN); /* Allocate VLAN bitmap */ pdata->active_vlans = bit_alloc(VLAN_NVID, M_AXGBE, M_WAITOK|M_ZERO); pdata->num_active_vlans = 0; /* Get the version data */ DBGPR("%s: Device ID: 0x%x\n", __func__, pci_get_device(dev)); if (pci_get_device(dev) == 0x1458) sc->pdata.vdata = &xgbe_v2a; else if (pci_get_device(dev) == 0x1459) sc->pdata.vdata = &xgbe_v2b; /* PCI setup */ if (bus_alloc_resources(dev, axgbe_pci_mac_spec, mac_res)) { axgbe_error("Unable to allocate bus resources\n"); ret = ENXIO; goto free_vlans; } sc->pdata.xgmac_res = mac_res[0]; sc->pdata.xpcs_res = mac_res[1]; /* Set the PCS indirect addressing definition registers*/ pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; /* Configure the PCS indirect addressing support */ reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg); pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); pdata->xpcs_window <<= 6; pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7); pdata->xpcs_window_mask = pdata->xpcs_window_size - 1; DBGPR("xpcs window def : %#010x\n", pdata->xpcs_window_def_reg); DBGPR("xpcs window sel : %#010x\n", pdata->xpcs_window_sel_reg); DBGPR("xpcs window : %#010x\n", pdata->xpcs_window); DBGPR("xpcs window size : %#010x\n", pdata->xpcs_window_size); DBGPR("xpcs window mask : %#010x\n", pdata->xpcs_window_mask); /* Enable all interrupts in the hardware */ XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); /* Retrieve the MAC address */ ma_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO); ma_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI); pdata->mac_addr[0] = ma_lo & 0xff; pdata->mac_addr[1] = (ma_lo >> 8) & 0xff; pdata->mac_addr[2] = (ma_lo >>16) & 0xff; pdata->mac_addr[3] = (ma_lo >> 24) & 0xff; pdata->mac_addr[4] = ma_hi & 0xff; pdata->mac_addr[5] = (ma_hi >> 8) & 0xff; if (!XP_GET_BITS(ma_hi, XP_MAC_ADDR_HI, VALID)) { axgbe_error("Invalid mac address\n"); ret = EINVAL; goto release_bus_resource; } iflib_set_mac(ctx, pdata->mac_addr); /* Clock settings */ pdata->sysclk_rate = XGBE_V2_DMA_CLOCK_FREQ; pdata->ptpclk_rate = XGBE_V2_PTP_CLOCK_FREQ; /* Set the DMA coherency values */ pdata->coherent = 1; pdata->arcr = XGBE_DMA_PCI_ARCR; pdata->awcr = XGBE_DMA_PCI_AWCR; pdata->awarcr = XGBE_DMA_PCI_AWARCR; /* Read the port property registers */ pdata->pp0 = XP_IOREAD(pdata, XP_PROP_0); pdata->pp1 = XP_IOREAD(pdata, XP_PROP_1); pdata->pp2 = XP_IOREAD(pdata, XP_PROP_2); pdata->pp3 = XP_IOREAD(pdata, XP_PROP_3); pdata->pp4 = XP_IOREAD(pdata, XP_PROP_4); DBGPR("port property 0 = %#010x\n", pdata->pp0); DBGPR("port property 1 = %#010x\n", pdata->pp1); DBGPR("port property 2 = %#010x\n", pdata->pp2); DBGPR("port property 3 = %#010x\n", pdata->pp3); DBGPR("port property 4 = %#010x\n", pdata->pp4); /* Set the maximum channels and queues */ pdata->tx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, MAX_TX_DMA); pdata->rx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, MAX_RX_DMA); pdata->tx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, MAX_TX_QUEUES); pdata->rx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, MAX_RX_QUEUES); DBGPR("max tx/rx channel count = %u/%u\n", pdata->tx_max_channel_count, pdata->rx_max_channel_count); DBGPR("max tx/rx hw queue count = %u/%u\n", pdata->tx_max_q_count, pdata->rx_max_q_count); axgbe_set_counts(ctx); /* Set the maximum fifo amounts */ pdata->tx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2, TX_FIFO_SIZE); pdata->tx_max_fifo_size *= 16384; pdata->tx_max_fifo_size = min(pdata->tx_max_fifo_size, pdata->vdata->tx_max_fifo_size); pdata->rx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2, RX_FIFO_SIZE); pdata->rx_max_fifo_size *= 16384; pdata->rx_max_fifo_size = min(pdata->rx_max_fifo_size, pdata->vdata->rx_max_fifo_size); DBGPR("max tx/rx max fifo size = %u/%u\n", pdata->tx_max_fifo_size, pdata->rx_max_fifo_size); /* Initialize IFLIB if_softc_ctx_t */ axgbe_init_iflib_softc_ctx(sc); /* Alloc channels */ if (axgbe_alloc_channels(ctx)) { axgbe_error("Unable to allocate channel memory\n"); ret = ENOMEM; goto release_bus_resource; } TASK_INIT(&pdata->service_work, 0, xgbe_service, pdata); /* create the workqueue */ pdata->dev_workqueue = taskqueue_create("axgbe", M_WAITOK, taskqueue_thread_enqueue, &pdata->dev_workqueue); if (pdata->dev_workqueue == NULL) { axgbe_error("Unable to allocate workqueue\n"); ret = ENOMEM; goto free_channels; } ret = taskqueue_start_threads(&pdata->dev_workqueue, 1, PI_NET, "axgbe dev taskq"); if (ret) { axgbe_error("Unable to start taskqueue\n"); ret = ENOMEM; goto free_task_queue; } /* Init timers */ xgbe_init_timers(pdata); return (0); free_task_queue: taskqueue_free(pdata->dev_workqueue); free_channels: axgbe_free_channels(sc); release_bus_resource: bus_release_resources(dev, axgbe_pci_mac_spec, mac_res); free_vlans: free(pdata->active_vlans, M_AXGBE); return (ret); } /* axgbe_if_attach_pre */ static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata) { xgbe_init_function_ptrs_dev(&pdata->hw_if); xgbe_init_function_ptrs_phy(&pdata->phy_if); xgbe_init_function_ptrs_i2c(&pdata->i2c_if); xgbe_init_function_ptrs_desc(&pdata->desc_if); pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if); } static void axgbe_set_counts(if_ctx_t ctx) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; cpuset_t lcpus; int cpu_count, err; size_t len; /* Set all function pointers */ xgbe_init_all_fptrs(pdata); /* Populate the hardware features */ xgbe_get_all_hw_features(pdata); if (!pdata->tx_max_channel_count) pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt; if (!pdata->rx_max_channel_count) pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt; if (!pdata->tx_max_q_count) pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt; if (!pdata->rx_max_q_count) pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt; /* * Calculate the number of Tx and Rx rings to be created * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set * the number of Tx queues to the number of Tx channels * enabled * -Rx (DMA) Channels do not map 1-to-1 so use the actual * number of Rx queues or maximum allowed */ /* Get cpu count from sysctl */ len = sizeof(cpu_count); err = kernel_sysctlbyname(curthread, "hw.ncpu", &cpu_count, &len, NULL, 0, NULL, 0); if (err) { axgbe_error("Unable to fetch number of cpus\n"); cpu_count = 1; } if (bus_get_cpus(pdata->dev, INTR_CPUS, sizeof(lcpus), &lcpus) != 0) { axgbe_error("Unable to fetch CPU list\n"); /* TODO - handle CPU_COPY(&all_cpus, &lcpus); */ } DBGPR("ncpu %d intrcpu %d\n", cpu_count, CPU_COUNT(&lcpus)); pdata->tx_ring_count = min(CPU_COUNT(&lcpus), pdata->hw_feat.tx_ch_cnt); pdata->tx_ring_count = min(pdata->tx_ring_count, pdata->tx_max_channel_count); pdata->tx_ring_count = min(pdata->tx_ring_count, pdata->tx_max_q_count); pdata->tx_q_count = pdata->tx_ring_count; pdata->rx_ring_count = min(CPU_COUNT(&lcpus), pdata->hw_feat.rx_ch_cnt); pdata->rx_ring_count = min(pdata->rx_ring_count, pdata->rx_max_channel_count); pdata->rx_q_count = min(pdata->hw_feat.rx_q_cnt, pdata->rx_max_q_count); DBGPR("TX/RX max channel count = %u/%u\n", pdata->tx_max_channel_count, pdata->rx_max_channel_count); DBGPR("TX/RX max queue count = %u/%u\n", pdata->tx_max_q_count, pdata->rx_max_q_count); DBGPR("TX/RX DMA ring count = %u/%u\n", pdata->tx_ring_count, pdata->rx_ring_count); DBGPR("TX/RX hardware queue count = %u/%u\n", pdata->tx_q_count, pdata->rx_q_count); } /* axgbe_set_counts */ static void axgbe_init_iflib_softc_ctx(struct axgbe_if_softc *sc) { struct xgbe_prv_data *pdata = &sc->pdata; if_softc_ctx_t scctx = sc->scctx; if_shared_ctx_t sctx = sc->sctx; int i; scctx->isc_nrxqsets = pdata->rx_q_count; scctx->isc_ntxqsets = pdata->tx_q_count; scctx->isc_msix_bar = pci_msix_table_bar(pdata->dev); scctx->isc_tx_nsegments = 32; for (i = 0; i < sctx->isc_ntxqs; i++) { scctx->isc_txqsizes[i] = roundup2(scctx->isc_ntxd[i] * sizeof(struct xgbe_ring_desc), 128); scctx->isc_txd_size[i] = sizeof(struct xgbe_ring_desc); } for (i = 0; i < sctx->isc_nrxqs; i++) { scctx->isc_rxqsizes[i] = roundup2(scctx->isc_nrxd[i] * sizeof(struct xgbe_ring_desc), 128); scctx->isc_rxd_size[i] = sizeof(struct xgbe_ring_desc); } scctx->isc_tx_tso_segments_max = 32; scctx->isc_tx_tso_size_max = XGBE_TSO_MAX_SIZE; scctx->isc_tx_tso_segsize_max = PAGE_SIZE; /* * Set capabilities * 1) IFLIB automatically adds IFCAP_HWSTATS, so need to set explicitly * 2) isc_tx_csum_flags is mandatory if IFCAP_TXCSUM (included in * IFCAP_HWCSUM) is set */ scctx->isc_tx_csum_flags = (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_SCTP_IPV6 | CSUM_TSO); scctx->isc_capenable = (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_VLAN_HWTSO); scctx->isc_capabilities = scctx->isc_capenable; /* * Set rss_table_size alone when adding RSS support. rss_table_mask * will be set by IFLIB based on rss_table_size */ scctx->isc_rss_table_size = XGBE_RSS_MAX_TABLE_SIZE; scctx->isc_ntxqsets_max = XGBE_MAX_QUEUES; scctx->isc_nrxqsets_max = XGBE_MAX_QUEUES; scctx->isc_txrx = &axgbe_txrx; } static int axgbe_alloc_channels(if_ctx_t ctx) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; struct xgbe_channel *channel; int i, j, count; DBGPR("%s: txqs %d rxqs %d\n", __func__, pdata->tx_ring_count, pdata->rx_ring_count); /* Iflibe sets based on isc_ntxqsets/nrxqsets */ count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count); /* Allocate channel memory */ for (i = 0; i < count ; i++) { channel = (struct xgbe_channel*)malloc(sizeof(struct xgbe_channel), M_AXGBE, M_NOWAIT | M_ZERO); if (channel == NULL) { for (j = 0; j < i; j++) { free(pdata->channel[j], M_AXGBE); pdata->channel[j] = NULL; } return (ENOMEM); } pdata->channel[i] = channel; } pdata->total_channel_count = count; DBGPR("Channel count set to: %u\n", pdata->total_channel_count); for (i = 0; i < count; i++) { channel = pdata->channel[i]; snprintf(channel->name, sizeof(channel->name), "channel-%d",i); channel->pdata = pdata; channel->queue_index = i; channel->dma_tag = rman_get_bustag(pdata->xgmac_res); bus_space_subregion(channel->dma_tag, rman_get_bushandle(pdata->xgmac_res), DMA_CH_BASE + (DMA_CH_INC * i), DMA_CH_INC, &channel->dma_handle); channel->tx_ring = NULL; channel->rx_ring = NULL; } return (0); } /* axgbe_alloc_channels */ static void axgbe_free_channels(struct axgbe_if_softc *sc) { struct xgbe_prv_data *pdata = &sc->pdata; int i; for (i = 0; i < pdata->total_channel_count ; i++) { free(pdata->channel[i], M_AXGBE); pdata->channel[i] = NULL; } pdata->total_channel_count = 0; pdata->channel_count = 0; } static void xgbe_service(void *ctx, int pending) { struct xgbe_prv_data *pdata = ctx; struct axgbe_if_softc *sc = (struct axgbe_if_softc *)pdata; bool prev_state = false; /* Get previous link status */ prev_state = pdata->phy.link; pdata->phy_if.phy_status(pdata); if (prev_state != pdata->phy.link) { pdata->phy_link = pdata->phy.link; axgbe_if_update_admin_status(sc->ctx); } callout_reset(&pdata->service_timer, 1*hz, xgbe_service_timer, pdata); } static void xgbe_service_timer(void *data) { struct xgbe_prv_data *pdata = data; taskqueue_enqueue(pdata->dev_workqueue, &pdata->service_work); } static void xgbe_init_timers(struct xgbe_prv_data *pdata) { callout_init(&pdata->service_timer, 1); } static void xgbe_start_timers(struct xgbe_prv_data *pdata) { callout_reset(&pdata->service_timer, 1*hz, xgbe_service_timer, pdata); } static void xgbe_stop_timers(struct xgbe_prv_data *pdata) { callout_drain(&pdata->service_timer); callout_stop(&pdata->service_timer); } static void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata) { axgbe_printf(1, "\n************* PHY Reg dump *********************\n"); axgbe_printf(1, "PCS Control Reg (%#06x) = %#06x\n", MDIO_CTRL1, XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1)); axgbe_printf(1, "PCS Status Reg (%#06x) = %#06x\n", MDIO_STAT1, XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1)); axgbe_printf(1, "Phy Id (PHYS ID 1 %#06x)= %#06x\n", MDIO_DEVID1, XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1)); axgbe_printf(1, "Phy Id (PHYS ID 2 %#06x)= %#06x\n", MDIO_DEVID2, XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2)); axgbe_printf(1, "Devices in Package (%#06x)= %#06x\n", MDIO_DEVS1, XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1)); axgbe_printf(1, "Devices in Package (%#06x)= %#06x\n", MDIO_DEVS2, XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2)); axgbe_printf(1, "Auto-Neg Control Reg (%#06x) = %#06x\n", MDIO_CTRL1, XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1)); axgbe_printf(1, "Auto-Neg Status Reg (%#06x) = %#06x\n", MDIO_STAT1, XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1)); axgbe_printf(1, "Auto-Neg Ad Reg 1 (%#06x) = %#06x\n", MDIO_AN_ADVERTISE, XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE)); axgbe_printf(1, "Auto-Neg Ad Reg 2 (%#06x) = %#06x\n", MDIO_AN_ADVERTISE + 1, XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1)); axgbe_printf(1, "Auto-Neg Ad Reg 3 (%#06x) = %#06x\n", MDIO_AN_ADVERTISE + 2, XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2)); axgbe_printf(1, "Auto-Neg Completion Reg (%#06x) = %#06x\n", MDIO_AN_COMP_STAT, XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT)); axgbe_printf(1, "\n************************************************\n"); } static void xgbe_dump_prop_registers(struct xgbe_prv_data *pdata) { int i; axgbe_printf(1, "\n************* PROP Reg dump ********************\n"); for (i = 0 ; i < 38 ; i++) { axgbe_printf(1, "PROP Offset 0x%08x = %08x\n", (XP_PROP_0 + (i * 4)), XP_IOREAD(pdata, (XP_PROP_0 + (i * 4)))); } } static void xgbe_dump_dma_registers(struct xgbe_prv_data *pdata, int ch) { struct xgbe_channel *channel; int i; axgbe_printf(1, "\n************* DMA Reg dump *********************\n"); axgbe_printf(1, "DMA MR Reg (%08x) = %08x\n", DMA_MR, XGMAC_IOREAD(pdata, DMA_MR)); axgbe_printf(1, "DMA SBMR Reg (%08x) = %08x\n", DMA_SBMR, XGMAC_IOREAD(pdata, DMA_SBMR)); axgbe_printf(1, "DMA ISR Reg (%08x) = %08x\n", DMA_ISR, XGMAC_IOREAD(pdata, DMA_ISR)); axgbe_printf(1, "DMA AXIARCR Reg (%08x) = %08x\n", DMA_AXIARCR, XGMAC_IOREAD(pdata, DMA_AXIARCR)); axgbe_printf(1, "DMA AXIAWCR Reg (%08x) = %08x\n", DMA_AXIAWCR, XGMAC_IOREAD(pdata, DMA_AXIAWCR)); axgbe_printf(1, "DMA AXIAWARCR Reg (%08x) = %08x\n", DMA_AXIAWARCR, XGMAC_IOREAD(pdata, DMA_AXIAWARCR)); axgbe_printf(1, "DMA DSR0 Reg (%08x) = %08x\n", DMA_DSR0, XGMAC_IOREAD(pdata, DMA_DSR0)); axgbe_printf(1, "DMA DSR1 Reg (%08x) = %08x\n", DMA_DSR1, XGMAC_IOREAD(pdata, DMA_DSR1)); axgbe_printf(1, "DMA DSR2 Reg (%08x) = %08x\n", DMA_DSR2, XGMAC_IOREAD(pdata, DMA_DSR2)); axgbe_printf(1, "DMA DSR3 Reg (%08x) = %08x\n", DMA_DSR3, XGMAC_IOREAD(pdata, DMA_DSR3)); axgbe_printf(1, "DMA DSR4 Reg (%08x) = %08x\n", DMA_DSR4, XGMAC_IOREAD(pdata, DMA_DSR4)); axgbe_printf(1, "DMA TXEDMACR Reg (%08x) = %08x\n", DMA_TXEDMACR, XGMAC_IOREAD(pdata, DMA_TXEDMACR)); axgbe_printf(1, "DMA RXEDMACR Reg (%08x) = %08x\n", DMA_RXEDMACR, XGMAC_IOREAD(pdata, DMA_RXEDMACR)); for (i = 0 ; i < 8 ; i++ ) { if (ch >= 0) { if (i != ch) continue; } channel = pdata->channel[i]; axgbe_printf(1, "\n************* DMA CH %d dump ****************\n", i); axgbe_printf(1, "DMA_CH_CR Reg (%08x) = %08x\n", DMA_CH_CR, XGMAC_DMA_IOREAD(channel, DMA_CH_CR)); axgbe_printf(1, "DMA_CH_TCR Reg (%08x) = %08x\n", DMA_CH_TCR, XGMAC_DMA_IOREAD(channel, DMA_CH_TCR)); axgbe_printf(1, "DMA_CH_RCR Reg (%08x) = %08x\n", DMA_CH_RCR, XGMAC_DMA_IOREAD(channel, DMA_CH_RCR)); axgbe_printf(1, "DMA_CH_TDLR_HI Reg (%08x) = %08x\n", DMA_CH_TDLR_HI, XGMAC_DMA_IOREAD(channel, DMA_CH_TDLR_HI)); axgbe_printf(1, "DMA_CH_TDLR_LO Reg (%08x) = %08x\n", DMA_CH_TDLR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_TDLR_LO)); axgbe_printf(1, "DMA_CH_RDLR_HI Reg (%08x) = %08x\n", DMA_CH_RDLR_HI, XGMAC_DMA_IOREAD(channel, DMA_CH_RDLR_HI)); axgbe_printf(1, "DMA_CH_RDLR_LO Reg (%08x) = %08x\n", DMA_CH_RDLR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_RDLR_LO)); axgbe_printf(1, "DMA_CH_TDTR_LO Reg (%08x) = %08x\n", DMA_CH_TDTR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_TDTR_LO)); axgbe_printf(1, "DMA_CH_RDTR_LO Reg (%08x) = %08x\n", DMA_CH_RDTR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_RDTR_LO)); axgbe_printf(1, "DMA_CH_TDRLR Reg (%08x) = %08x\n", DMA_CH_TDRLR, XGMAC_DMA_IOREAD(channel, DMA_CH_TDRLR)); axgbe_printf(1, "DMA_CH_RDRLR Reg (%08x) = %08x\n", DMA_CH_RDRLR, XGMAC_DMA_IOREAD(channel, DMA_CH_RDRLR)); axgbe_printf(1, "DMA_CH_IER Reg (%08x) = %08x\n", DMA_CH_IER, XGMAC_DMA_IOREAD(channel, DMA_CH_IER)); axgbe_printf(1, "DMA_CH_RIWT Reg (%08x) = %08x\n", DMA_CH_RIWT, XGMAC_DMA_IOREAD(channel, DMA_CH_RIWT)); axgbe_printf(1, "DMA_CH_CATDR_LO Reg (%08x) = %08x\n", DMA_CH_CATDR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_CATDR_LO)); axgbe_printf(1, "DMA_CH_CARDR_LO Reg (%08x) = %08x\n", DMA_CH_CARDR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_CARDR_LO)); axgbe_printf(1, "DMA_CH_CATBR_HI Reg (%08x) = %08x\n", DMA_CH_CATBR_HI, XGMAC_DMA_IOREAD(channel, DMA_CH_CATBR_HI)); axgbe_printf(1, "DMA_CH_CATBR_LO Reg (%08x) = %08x\n", DMA_CH_CATBR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_CATBR_LO)); axgbe_printf(1, "DMA_CH_CARBR_HI Reg (%08x) = %08x\n", DMA_CH_CARBR_HI, XGMAC_DMA_IOREAD(channel, DMA_CH_CARBR_HI)); axgbe_printf(1, "DMA_CH_CARBR_LO Reg (%08x) = %08x\n", DMA_CH_CARBR_LO, XGMAC_DMA_IOREAD(channel, DMA_CH_CARBR_LO)); axgbe_printf(1, "DMA_CH_SR Reg (%08x) = %08x\n", DMA_CH_SR, XGMAC_DMA_IOREAD(channel, DMA_CH_SR)); axgbe_printf(1, "DMA_CH_DSR Reg (%08x) = %08x\n", DMA_CH_DSR, XGMAC_DMA_IOREAD(channel, DMA_CH_DSR)); axgbe_printf(1, "DMA_CH_DCFL Reg (%08x) = %08x\n", DMA_CH_DCFL, XGMAC_DMA_IOREAD(channel, DMA_CH_DCFL)); axgbe_printf(1, "DMA_CH_MFC Reg (%08x) = %08x\n", DMA_CH_MFC, XGMAC_DMA_IOREAD(channel, DMA_CH_MFC)); axgbe_printf(1, "DMA_CH_TDTRO Reg (%08x) = %08x\n", DMA_CH_TDTRO, XGMAC_DMA_IOREAD(channel, DMA_CH_TDTRO)); axgbe_printf(1, "DMA_CH_RDTRO Reg (%08x) = %08x\n", DMA_CH_RDTRO, XGMAC_DMA_IOREAD(channel, DMA_CH_RDTRO)); axgbe_printf(1, "DMA_CH_TDWRO Reg (%08x) = %08x\n", DMA_CH_TDWRO, XGMAC_DMA_IOREAD(channel, DMA_CH_TDWRO)); axgbe_printf(1, "DMA_CH_RDWRO Reg (%08x) = %08x\n", DMA_CH_RDWRO, XGMAC_DMA_IOREAD(channel, DMA_CH_RDWRO)); } } static void xgbe_dump_mtl_registers(struct xgbe_prv_data *pdata) { int i; axgbe_printf(1, "\n************* MTL Reg dump *********************\n"); axgbe_printf(1, "MTL OMR Reg (%08x) = %08x\n", MTL_OMR, XGMAC_IOREAD(pdata, MTL_OMR)); axgbe_printf(1, "MTL FDCR Reg (%08x) = %08x\n", MTL_FDCR, XGMAC_IOREAD(pdata, MTL_FDCR)); axgbe_printf(1, "MTL FDSR Reg (%08x) = %08x\n", MTL_FDSR, XGMAC_IOREAD(pdata, MTL_FDSR)); axgbe_printf(1, "MTL FDDR Reg (%08x) = %08x\n", MTL_FDDR, XGMAC_IOREAD(pdata, MTL_FDDR)); axgbe_printf(1, "MTL ISR Reg (%08x) = %08x\n", MTL_ISR, XGMAC_IOREAD(pdata, MTL_ISR)); axgbe_printf(1, "MTL RQDCM0R Reg (%08x) = %08x\n", MTL_RQDCM0R, XGMAC_IOREAD(pdata, MTL_RQDCM0R)); axgbe_printf(1, "MTL RQDCM1R Reg (%08x) = %08x\n", MTL_RQDCM1R, XGMAC_IOREAD(pdata, MTL_RQDCM1R)); axgbe_printf(1, "MTL RQDCM2R Reg (%08x) = %08x\n", MTL_RQDCM2R, XGMAC_IOREAD(pdata, MTL_RQDCM2R)); axgbe_printf(1, "MTL TCPM0R Reg (%08x) = %08x\n", MTL_TCPM0R, XGMAC_IOREAD(pdata, MTL_TCPM0R)); axgbe_printf(1, "MTL TCPM1R Reg (%08x) = %08x\n", MTL_TCPM1R, XGMAC_IOREAD(pdata, MTL_TCPM1R)); for (i = 0 ; i < 8 ; i++ ) { axgbe_printf(1, "\n************* MTL CH %d dump ****************\n", i); axgbe_printf(1, "MTL_Q_TQOMR Reg (%08x) = %08x\n", MTL_Q_TQOMR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQOMR)); axgbe_printf(1, "MTL_Q_TQUR Reg (%08x) = %08x\n", MTL_Q_TQUR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQUR)); axgbe_printf(1, "MTL_Q_TQDR Reg (%08x) = %08x\n", MTL_Q_TQDR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQDR)); axgbe_printf(1, "MTL_Q_TC0ETSCR Reg (%08x) = %08x\n", MTL_Q_TC0ETSCR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TC0ETSCR)); axgbe_printf(1, "MTL_Q_TC0ETSSR Reg (%08x) = %08x\n", MTL_Q_TC0ETSSR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TC0ETSSR)); axgbe_printf(1, "MTL_Q_TC0QWR Reg (%08x) = %08x\n", MTL_Q_TC0QWR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TC0QWR)); axgbe_printf(1, "MTL_Q_RQOMR Reg (%08x) = %08x\n", MTL_Q_RQOMR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQOMR)); axgbe_printf(1, "MTL_Q_RQMPOCR Reg (%08x) = %08x\n", MTL_Q_RQMPOCR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQMPOCR)); axgbe_printf(1, "MTL_Q_RQDR Reg (%08x) = %08x\n", MTL_Q_RQDR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQDR)); axgbe_printf(1, "MTL_Q_RQCR Reg (%08x) = %08x\n", MTL_Q_RQCR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQCR)); axgbe_printf(1, "MTL_Q_RQFCR Reg (%08x) = %08x\n", MTL_Q_RQFCR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQFCR)); axgbe_printf(1, "MTL_Q_IER Reg (%08x) = %08x\n", MTL_Q_IER, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_IER)); axgbe_printf(1, "MTL_Q_ISR Reg (%08x) = %08x\n", MTL_Q_ISR, XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR)); } } static void xgbe_dump_mac_registers(struct xgbe_prv_data *pdata) { axgbe_printf(1, "\n************* MAC Reg dump **********************\n"); axgbe_printf(1, "MAC TCR Reg (%08x) = %08x\n", MAC_TCR, XGMAC_IOREAD(pdata, MAC_TCR)); axgbe_printf(1, "MAC RCR Reg (%08x) = %08x\n", MAC_RCR, XGMAC_IOREAD(pdata, MAC_RCR)); axgbe_printf(1, "MAC PFR Reg (%08x) = %08x\n", MAC_PFR, XGMAC_IOREAD(pdata, MAC_PFR)); axgbe_printf(1, "MAC WTR Reg (%08x) = %08x\n", MAC_WTR, XGMAC_IOREAD(pdata, MAC_WTR)); axgbe_printf(1, "MAC HTR0 Reg (%08x) = %08x\n", MAC_HTR0, XGMAC_IOREAD(pdata, MAC_HTR0)); axgbe_printf(1, "MAC HTR1 Reg (%08x) = %08x\n", MAC_HTR1, XGMAC_IOREAD(pdata, MAC_HTR1)); axgbe_printf(1, "MAC HTR2 Reg (%08x) = %08x\n", MAC_HTR2, XGMAC_IOREAD(pdata, MAC_HTR2)); axgbe_printf(1, "MAC HTR3 Reg (%08x) = %08x\n", MAC_HTR3, XGMAC_IOREAD(pdata, MAC_HTR3)); axgbe_printf(1, "MAC HTR4 Reg (%08x) = %08x\n", MAC_HTR4, XGMAC_IOREAD(pdata, MAC_HTR4)); axgbe_printf(1, "MAC HTR5 Reg (%08x) = %08x\n", MAC_HTR5, XGMAC_IOREAD(pdata, MAC_HTR5)); axgbe_printf(1, "MAC HTR6 Reg (%08x) = %08x\n", MAC_HTR6, XGMAC_IOREAD(pdata, MAC_HTR6)); axgbe_printf(1, "MAC HTR7 Reg (%08x) = %08x\n", MAC_HTR7, XGMAC_IOREAD(pdata, MAC_HTR7)); axgbe_printf(1, "MAC VLANTR Reg (%08x) = %08x\n", MAC_VLANTR, XGMAC_IOREAD(pdata, MAC_VLANTR)); axgbe_printf(1, "MAC VLANHTR Reg (%08x) = %08x\n", MAC_VLANHTR, XGMAC_IOREAD(pdata, MAC_VLANHTR)); axgbe_printf(1, "MAC VLANIR Reg (%08x) = %08x\n", MAC_VLANIR, XGMAC_IOREAD(pdata, MAC_VLANIR)); axgbe_printf(1, "MAC IVLANIR Reg (%08x) = %08x\n", MAC_IVLANIR, XGMAC_IOREAD(pdata, MAC_IVLANIR)); axgbe_printf(1, "MAC RETMR Reg (%08x) = %08x\n", MAC_RETMR, XGMAC_IOREAD(pdata, MAC_RETMR)); axgbe_printf(1, "MAC Q0TFCR Reg (%08x) = %08x\n", MAC_Q0TFCR, XGMAC_IOREAD(pdata, MAC_Q0TFCR)); axgbe_printf(1, "MAC Q1TFCR Reg (%08x) = %08x\n", MAC_Q1TFCR, XGMAC_IOREAD(pdata, MAC_Q1TFCR)); axgbe_printf(1, "MAC Q2TFCR Reg (%08x) = %08x\n", MAC_Q2TFCR, XGMAC_IOREAD(pdata, MAC_Q2TFCR)); axgbe_printf(1, "MAC Q3TFCR Reg (%08x) = %08x\n", MAC_Q3TFCR, XGMAC_IOREAD(pdata, MAC_Q3TFCR)); axgbe_printf(1, "MAC Q4TFCR Reg (%08x) = %08x\n", MAC_Q4TFCR, XGMAC_IOREAD(pdata, MAC_Q4TFCR)); axgbe_printf(1, "MAC Q5TFCR Reg (%08x) = %08x\n", MAC_Q5TFCR, XGMAC_IOREAD(pdata, MAC_Q5TFCR)); axgbe_printf(1, "MAC Q6TFCR Reg (%08x) = %08x\n", MAC_Q6TFCR, XGMAC_IOREAD(pdata, MAC_Q6TFCR)); axgbe_printf(1, "MAC Q7TFCR Reg (%08x) = %08x\n", MAC_Q7TFCR, XGMAC_IOREAD(pdata, MAC_Q7TFCR)); axgbe_printf(1, "MAC RFCR Reg (%08x) = %08x\n", MAC_RFCR, XGMAC_IOREAD(pdata, MAC_RFCR)); axgbe_printf(1, "MAC RQC0R Reg (%08x) = %08x\n", MAC_RQC0R, XGMAC_IOREAD(pdata, MAC_RQC0R)); axgbe_printf(1, "MAC RQC1R Reg (%08x) = %08x\n", MAC_RQC1R, XGMAC_IOREAD(pdata, MAC_RQC1R)); axgbe_printf(1, "MAC RQC2R Reg (%08x) = %08x\n", MAC_RQC2R, XGMAC_IOREAD(pdata, MAC_RQC2R)); axgbe_printf(1, "MAC RQC3R Reg (%08x) = %08x\n", MAC_RQC3R, XGMAC_IOREAD(pdata, MAC_RQC3R)); axgbe_printf(1, "MAC ISR Reg (%08x) = %08x\n", MAC_ISR, XGMAC_IOREAD(pdata, MAC_ISR)); axgbe_printf(1, "MAC IER Reg (%08x) = %08x\n", MAC_IER, XGMAC_IOREAD(pdata, MAC_IER)); axgbe_printf(1, "MAC RTSR Reg (%08x) = %08x\n", MAC_RTSR, XGMAC_IOREAD(pdata, MAC_RTSR)); axgbe_printf(1, "MAC PMTCSR Reg (%08x) = %08x\n", MAC_PMTCSR, XGMAC_IOREAD(pdata, MAC_PMTCSR)); axgbe_printf(1, "MAC RWKPFR Reg (%08x) = %08x\n", MAC_RWKPFR, XGMAC_IOREAD(pdata, MAC_RWKPFR)); axgbe_printf(1, "MAC LPICSR Reg (%08x) = %08x\n", MAC_LPICSR, XGMAC_IOREAD(pdata, MAC_LPICSR)); axgbe_printf(1, "MAC LPITCR Reg (%08x) = %08x\n", MAC_LPITCR, XGMAC_IOREAD(pdata, MAC_LPITCR)); axgbe_printf(1, "MAC TIR Reg (%08x) = %08x\n", MAC_TIR, XGMAC_IOREAD(pdata, MAC_TIR)); axgbe_printf(1, "MAC VR Reg (%08x) = %08x\n", MAC_VR, XGMAC_IOREAD(pdata, MAC_VR)); axgbe_printf(1, "MAC DR Reg (%08x) = %08x\n", MAC_DR, XGMAC_IOREAD(pdata, MAC_DR)); axgbe_printf(1, "MAC HWF0R Reg (%08x) = %08x\n", MAC_HWF0R, XGMAC_IOREAD(pdata, MAC_HWF0R)); axgbe_printf(1, "MAC HWF1R Reg (%08x) = %08x\n", MAC_HWF1R, XGMAC_IOREAD(pdata, MAC_HWF1R)); axgbe_printf(1, "MAC HWF2R Reg (%08x) = %08x\n", MAC_HWF2R, XGMAC_IOREAD(pdata, MAC_HWF2R)); axgbe_printf(1, "MAC MDIOSCAR Reg (%08x) = %08x\n", MAC_MDIOSCAR, XGMAC_IOREAD(pdata, MAC_MDIOSCAR)); axgbe_printf(1, "MAC MDIOSCCDR Reg (%08x) = %08x\n", MAC_MDIOSCCDR, XGMAC_IOREAD(pdata, MAC_MDIOSCCDR)); axgbe_printf(1, "MAC MDIOISR Reg (%08x) = %08x\n", MAC_MDIOISR, XGMAC_IOREAD(pdata, MAC_MDIOISR)); axgbe_printf(1, "MAC MDIOIER Reg (%08x) = %08x\n", MAC_MDIOIER, XGMAC_IOREAD(pdata, MAC_MDIOIER)); axgbe_printf(1, "MAC MDIOCL22R Reg (%08x) = %08x\n", MAC_MDIOCL22R, XGMAC_IOREAD(pdata, MAC_MDIOCL22R)); axgbe_printf(1, "MAC GPIOCR Reg (%08x) = %08x\n", MAC_GPIOCR, XGMAC_IOREAD(pdata, MAC_GPIOCR)); axgbe_printf(1, "MAC GPIOSR Reg (%08x) = %08x\n", MAC_GPIOSR, XGMAC_IOREAD(pdata, MAC_GPIOSR)); axgbe_printf(1, "MAC MACA0HR Reg (%08x) = %08x\n", MAC_MACA0HR, XGMAC_IOREAD(pdata, MAC_MACA0HR)); axgbe_printf(1, "MAC MACA0LR Reg (%08x) = %08x\n", MAC_TCR, XGMAC_IOREAD(pdata, MAC_MACA0LR)); axgbe_printf(1, "MAC MACA1HR Reg (%08x) = %08x\n", MAC_MACA1HR, XGMAC_IOREAD(pdata, MAC_MACA1HR)); axgbe_printf(1, "MAC MACA1LR Reg (%08x) = %08x\n", MAC_MACA1LR, XGMAC_IOREAD(pdata, MAC_MACA1LR)); axgbe_printf(1, "MAC RSSCR Reg (%08x) = %08x\n", MAC_RSSCR, XGMAC_IOREAD(pdata, MAC_RSSCR)); axgbe_printf(1, "MAC RSSDR Reg (%08x) = %08x\n", MAC_RSSDR, XGMAC_IOREAD(pdata, MAC_RSSDR)); axgbe_printf(1, "MAC RSSAR Reg (%08x) = %08x\n", MAC_RSSAR, XGMAC_IOREAD(pdata, MAC_RSSAR)); axgbe_printf(1, "MAC TSCR Reg (%08x) = %08x\n", MAC_TSCR, XGMAC_IOREAD(pdata, MAC_TSCR)); axgbe_printf(1, "MAC SSIR Reg (%08x) = %08x\n", MAC_SSIR, XGMAC_IOREAD(pdata, MAC_SSIR)); axgbe_printf(1, "MAC STSR Reg (%08x) = %08x\n", MAC_STSR, XGMAC_IOREAD(pdata, MAC_STSR)); axgbe_printf(1, "MAC STNR Reg (%08x) = %08x\n", MAC_STNR, XGMAC_IOREAD(pdata, MAC_STNR)); axgbe_printf(1, "MAC STSUR Reg (%08x) = %08x\n", MAC_STSUR, XGMAC_IOREAD(pdata, MAC_STSUR)); axgbe_printf(1, "MAC STNUR Reg (%08x) = %08x\n", MAC_STNUR, XGMAC_IOREAD(pdata, MAC_STNUR)); axgbe_printf(1, "MAC TSAR Reg (%08x) = %08x\n", MAC_TSAR, XGMAC_IOREAD(pdata, MAC_TSAR)); axgbe_printf(1, "MAC TSSR Reg (%08x) = %08x\n", MAC_TSSR, XGMAC_IOREAD(pdata, MAC_TSSR)); axgbe_printf(1, "MAC TXSNR Reg (%08x) = %08x\n", MAC_TXSNR, XGMAC_IOREAD(pdata, MAC_TXSNR)); axgbe_printf(1, "MAC TXSSR Reg (%08x) = %08x\n", MAC_TXSSR, XGMAC_IOREAD(pdata, MAC_TXSSR)); } static void xgbe_dump_rmon_counters(struct xgbe_prv_data *pdata) { struct xgbe_mmc_stats *stats = &pdata->mmc_stats; axgbe_printf(1, "\n************* RMON counters dump ***************\n"); pdata->hw_if.read_mmc_stats(pdata); axgbe_printf(1, "rmon txoctetcount_gb (%08x) = %08lx\n", MMC_TXOCTETCOUNT_GB_LO, stats->txoctetcount_gb); axgbe_printf(1, "rmon txframecount_gb (%08x) = %08lx\n", MMC_TXFRAMECOUNT_GB_LO, stats->txframecount_gb); axgbe_printf(1, "rmon txbroadcastframes_g (%08x) = %08lx\n", MMC_TXBROADCASTFRAMES_G_LO, stats->txbroadcastframes_g); axgbe_printf(1, "rmon txmulticastframes_g (%08x) = %08lx\n", MMC_TXMULTICASTFRAMES_G_LO, stats->txmulticastframes_g); axgbe_printf(1, "rmon tx64octets_gb (%08x) = %08lx\n", MMC_TX64OCTETS_GB_LO, stats->tx64octets_gb); axgbe_printf(1, "rmon tx65to127octets_gb (%08x) = %08lx\n", MMC_TX65TO127OCTETS_GB_LO, stats->tx65to127octets_gb); axgbe_printf(1, "rmon tx128to255octets_gb (%08x) = %08lx\n", MMC_TX128TO255OCTETS_GB_LO, stats->tx128to255octets_gb); axgbe_printf(1, "rmon tx256to511octets_gb (%08x) = %08lx\n", MMC_TX256TO511OCTETS_GB_LO, stats->tx256to511octets_gb); axgbe_printf(1, "rmon tx512to1023octets_gb (%08x) = %08lx\n", MMC_TX512TO1023OCTETS_GB_LO, stats->tx512to1023octets_gb); axgbe_printf(1, "rmon tx1024tomaxoctets_gb (%08x) = %08lx\n", MMC_TX1024TOMAXOCTETS_GB_LO, stats->tx1024tomaxoctets_gb); axgbe_printf(1, "rmon txunicastframes_gb (%08x) = %08lx\n", MMC_TXUNICASTFRAMES_GB_LO, stats->txunicastframes_gb); axgbe_printf(1, "rmon txmulticastframes_gb (%08x) = %08lx\n", MMC_TXMULTICASTFRAMES_GB_LO, stats->txmulticastframes_gb); axgbe_printf(1, "rmon txbroadcastframes_gb (%08x) = %08lx\n", MMC_TXBROADCASTFRAMES_GB_LO, stats->txbroadcastframes_gb); axgbe_printf(1, "rmon txunderflowerror (%08x) = %08lx\n", MMC_TXUNDERFLOWERROR_LO, stats->txunderflowerror); axgbe_printf(1, "rmon txoctetcount_g (%08x) = %08lx\n", MMC_TXOCTETCOUNT_G_LO, stats->txoctetcount_g); axgbe_printf(1, "rmon txframecount_g (%08x) = %08lx\n", MMC_TXFRAMECOUNT_G_LO, stats->txframecount_g); axgbe_printf(1, "rmon txpauseframes (%08x) = %08lx\n", MMC_TXPAUSEFRAMES_LO, stats->txpauseframes); axgbe_printf(1, "rmon txvlanframes_g (%08x) = %08lx\n", MMC_TXVLANFRAMES_G_LO, stats->txvlanframes_g); axgbe_printf(1, "rmon rxframecount_gb (%08x) = %08lx\n", MMC_RXFRAMECOUNT_GB_LO, stats->rxframecount_gb); axgbe_printf(1, "rmon rxoctetcount_gb (%08x) = %08lx\n", MMC_RXOCTETCOUNT_GB_LO, stats->rxoctetcount_gb); axgbe_printf(1, "rmon rxoctetcount_g (%08x) = %08lx\n", MMC_RXOCTETCOUNT_G_LO, stats->rxoctetcount_g); axgbe_printf(1, "rmon rxbroadcastframes_g (%08x) = %08lx\n", MMC_RXBROADCASTFRAMES_G_LO, stats->rxbroadcastframes_g); axgbe_printf(1, "rmon rxmulticastframes_g (%08x) = %08lx\n", MMC_RXMULTICASTFRAMES_G_LO, stats->rxmulticastframes_g); axgbe_printf(1, "rmon rxcrcerror (%08x) = %08lx\n", MMC_RXCRCERROR_LO, stats->rxcrcerror); axgbe_printf(1, "rmon rxrunterror (%08x) = %08lx\n", MMC_RXRUNTERROR, stats->rxrunterror); axgbe_printf(1, "rmon rxjabbererror (%08x) = %08lx\n", MMC_RXJABBERERROR, stats->rxjabbererror); axgbe_printf(1, "rmon rxundersize_g (%08x) = %08lx\n", MMC_RXUNDERSIZE_G, stats->rxundersize_g); axgbe_printf(1, "rmon rxoversize_g (%08x) = %08lx\n", MMC_RXOVERSIZE_G, stats->rxoversize_g); axgbe_printf(1, "rmon rx64octets_gb (%08x) = %08lx\n", MMC_RX64OCTETS_GB_LO, stats->rx64octets_gb); axgbe_printf(1, "rmon rx65to127octets_gb (%08x) = %08lx\n", MMC_RX65TO127OCTETS_GB_LO, stats->rx65to127octets_gb); axgbe_printf(1, "rmon rx128to255octets_gb (%08x) = %08lx\n", MMC_RX128TO255OCTETS_GB_LO, stats->rx128to255octets_gb); axgbe_printf(1, "rmon rx256to511octets_gb (%08x) = %08lx\n", MMC_RX256TO511OCTETS_GB_LO, stats->rx256to511octets_gb); axgbe_printf(1, "rmon rx512to1023octets_gb (%08x) = %08lx\n", MMC_RX512TO1023OCTETS_GB_LO, stats->rx512to1023octets_gb); axgbe_printf(1, "rmon rx1024tomaxoctets_gb (%08x) = %08lx\n", MMC_RX1024TOMAXOCTETS_GB_LO, stats->rx1024tomaxoctets_gb); axgbe_printf(1, "rmon rxunicastframes_g (%08x) = %08lx\n", MMC_RXUNICASTFRAMES_G_LO, stats->rxunicastframes_g); axgbe_printf(1, "rmon rxlengtherror (%08x) = %08lx\n", MMC_RXLENGTHERROR_LO, stats->rxlengtherror); axgbe_printf(1, "rmon rxoutofrangetype (%08x) = %08lx\n", MMC_RXOUTOFRANGETYPE_LO, stats->rxoutofrangetype); axgbe_printf(1, "rmon rxpauseframes (%08x) = %08lx\n", MMC_RXPAUSEFRAMES_LO, stats->rxpauseframes); axgbe_printf(1, "rmon rxfifooverflow (%08x) = %08lx\n", MMC_RXFIFOOVERFLOW_LO, stats->rxfifooverflow); axgbe_printf(1, "rmon rxvlanframes_gb (%08x) = %08lx\n", MMC_RXVLANFRAMES_GB_LO, stats->rxvlanframes_gb); axgbe_printf(1, "rmon rxwatchdogerror (%08x) = %08lx\n", MMC_RXWATCHDOGERROR, stats->rxwatchdogerror); } void xgbe_dump_i2c_registers(struct xgbe_prv_data *pdata) { axgbe_printf(1, "*************** I2C Registers **************\n"); axgbe_printf(1, " IC_CON : %010x\n", XI2C_IOREAD(pdata, 0x00)); axgbe_printf(1, " IC_TAR : %010x\n", XI2C_IOREAD(pdata, 0x04)); axgbe_printf(1, " IC_HS_MADDR : %010x\n", XI2C_IOREAD(pdata, 0x0c)); axgbe_printf(1, " IC_INTR_STAT : %010x\n", XI2C_IOREAD(pdata, 0x2c)); axgbe_printf(1, " IC_INTR_MASK : %010x\n", XI2C_IOREAD(pdata, 0x30)); axgbe_printf(1, " IC_RAW_INTR_STAT : %010x\n", XI2C_IOREAD(pdata, 0x34)); axgbe_printf(1, " IC_RX_TL : %010x\n", XI2C_IOREAD(pdata, 0x38)); axgbe_printf(1, " IC_TX_TL : %010x\n", XI2C_IOREAD(pdata, 0x3c)); axgbe_printf(1, " IC_ENABLE : %010x\n", XI2C_IOREAD(pdata, 0x6c)); axgbe_printf(1, " IC_STATUS : %010x\n", XI2C_IOREAD(pdata, 0x70)); axgbe_printf(1, " IC_TXFLR : %010x\n", XI2C_IOREAD(pdata, 0x74)); axgbe_printf(1, " IC_RXFLR : %010x\n", XI2C_IOREAD(pdata, 0x78)); axgbe_printf(1, " IC_ENABLE_STATUS : %010x\n", XI2C_IOREAD(pdata, 0x9c)); axgbe_printf(1, " IC_COMP_PARAM1 : %010x\n", XI2C_IOREAD(pdata, 0xf4)); } static void xgbe_dump_active_vlans(struct xgbe_prv_data *pdata) { int i; for(i=0 ; iactive_vlans[i]); } axgbe_printf(1, "\n"); } static void xgbe_default_config(struct xgbe_prv_data *pdata) { pdata->blen = DMA_SBMR_BLEN_64; pdata->pbl = DMA_PBL_128; pdata->aal = 1; pdata->rd_osr_limit = 8; pdata->wr_osr_limit = 8; pdata->tx_sf_mode = MTL_TSF_ENABLE; pdata->tx_threshold = MTL_TX_THRESHOLD_64; pdata->tx_osp_mode = DMA_OSP_ENABLE; pdata->rx_sf_mode = MTL_RSF_DISABLE; pdata->rx_threshold = MTL_RX_THRESHOLD_64; pdata->pause_autoneg = 1; pdata->tx_pause = 1; pdata->rx_pause = 1; pdata->phy_speed = SPEED_UNKNOWN; pdata->power_down = 0; pdata->enable_rss = 1; } static int axgbe_if_attach_post(if_ctx_t ctx) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; if_t ifp = pdata->netdev; struct xgbe_phy_if *phy_if = &pdata->phy_if; struct xgbe_hw_if *hw_if = &pdata->hw_if; if_softc_ctx_t scctx = sc->scctx; int i, ret; /* set split header support based on tunable */ pdata->sph_enable = axgbe_sph_enable; /* Initialize ECC timestamps */ pdata->tx_sec_period = ticks; pdata->tx_ded_period = ticks; pdata->rx_sec_period = ticks; pdata->rx_ded_period = ticks; pdata->desc_sec_period = ticks; pdata->desc_ded_period = ticks; /* Reset the hardware */ ret = hw_if->exit(&sc->pdata); if (ret) axgbe_error("%s: exit error %d\n", __func__, ret); /* Configure the defaults */ xgbe_default_config(pdata); /* Set default max values if not provided */ if (!pdata->tx_max_fifo_size) pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size; if (!pdata->rx_max_fifo_size) pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size; DBGPR("%s: tx fifo 0x%x rx fifo 0x%x\n", __func__, pdata->tx_max_fifo_size, pdata->rx_max_fifo_size); /* Set and validate the number of descriptors for a ring */ MPASS(powerof2(XGBE_TX_DESC_CNT)); pdata->tx_desc_count = XGBE_TX_DESC_CNT; MPASS(powerof2(XGBE_RX_DESC_CNT)); pdata->rx_desc_count = XGBE_RX_DESC_CNT; /* Adjust the number of queues based on interrupts assigned */ if (pdata->channel_irq_count) { pdata->tx_ring_count = min_t(unsigned int, pdata->tx_ring_count, pdata->channel_irq_count); pdata->rx_ring_count = min_t(unsigned int, pdata->rx_ring_count, pdata->channel_irq_count); DBGPR("adjusted TX %u/%u RX %u/%u\n", pdata->tx_ring_count, pdata->tx_q_count, pdata->rx_ring_count, pdata->rx_q_count); } /* Set channel count based on interrupts assigned */ pdata->channel_count = max_t(unsigned int, scctx->isc_ntxqsets, scctx->isc_nrxqsets); DBGPR("Channel count set to: %u\n", pdata->channel_count); /* Get RSS key */ #ifdef RSS rss_getkey((uint8_t *)pdata->rss_key); #else arc4rand(&pdata->rss_key, ARRAY_SIZE(pdata->rss_key), 0); #endif XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); /* Initialize the PHY device */ pdata->sysctl_an_cdr_workaround = pdata->vdata->an_cdr_workaround; phy_if->phy_init(pdata); /* Set the coalescing */ xgbe_init_rx_coalesce(&sc->pdata); xgbe_init_tx_coalesce(&sc->pdata); ifmedia_add(sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL); ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL); ifmedia_add(sc->media, IFM_ETHER | IFM_10G_SFI, 0, NULL); ifmedia_add(sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL); ifmedia_add(sc->media, IFM_ETHER | IFM_1000_CX, 0, NULL); ifmedia_add(sc->media, IFM_ETHER | IFM_1000_LX, 0, NULL); ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SX, 0, NULL); ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); ifmedia_add(sc->media, IFM_ETHER | IFM_1000_SGMII, 0, NULL); ifmedia_add(sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); ifmedia_add(sc->media, IFM_ETHER | IFM_100_SGMII, 0, NULL); ifmedia_add(sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(sc->media, IFM_ETHER | IFM_AUTO); /* Initialize the phy */ pdata->phy_link = -1; pdata->phy_speed = SPEED_UNKNOWN; ret = phy_if->phy_reset(pdata); if (ret) return (ret); /* Calculate the Rx buffer size before allocating rings */ ret = xgbe_calc_rx_buf_size(pdata->netdev, if_getmtu(pdata->netdev)); pdata->rx_buf_size = ret; DBGPR("%s: rx_buf_size %d\n", __func__, ret); /* Setup RSS lookup table */ for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++) XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, i % pdata->rx_ring_count); /* * Mark the device down until it is initialized, which happens * when the device is accessed first (for configuring the iface, * eg: setting IP) */ set_bit(XGBE_DOWN, &pdata->dev_state); DBGPR("mtu %d\n", if_getmtu(ifp)); scctx->isc_max_frame_size = if_getmtu(ifp) + 18; scctx->isc_min_frame_size = XGMAC_MIN_PACKET; axgbe_sysctl_init(pdata); axgbe_pci_init(pdata); return (0); } /* axgbe_if_attach_post */ static void xgbe_free_intr(struct xgbe_prv_data *pdata, struct resource *res, void *tag, int rid) { if (tag) bus_teardown_intr(pdata->dev, res, tag); if (res) bus_release_resource(pdata->dev, SYS_RES_IRQ, rid, res); } static void axgbe_interrupts_free(if_ctx_t ctx) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; if_softc_ctx_t scctx = sc->scctx; struct xgbe_channel *channel; struct if_irq irq; int i; axgbe_printf(2, "%s: mode %d\n", __func__, scctx->isc_intr); /* Free dev_irq */ iflib_irq_free(ctx, &pdata->dev_irq); /* Free ecc_irq */ xgbe_free_intr(pdata, pdata->ecc_irq_res, pdata->ecc_irq_tag, pdata->ecc_rid); /* Free i2c_irq */ xgbe_free_intr(pdata, pdata->i2c_irq_res, pdata->i2c_irq_tag, pdata->i2c_rid); /* Free an_irq */ xgbe_free_intr(pdata, pdata->an_irq_res, pdata->an_irq_tag, pdata->an_rid); for (i = 0; i < scctx->isc_nrxqsets; i++) { channel = pdata->channel[i]; axgbe_printf(2, "%s: rid %d\n", __func__, channel->dma_irq_rid); irq.ii_res = channel->dma_irq_res; irq.ii_tag = channel->dma_irq_tag; iflib_irq_free(ctx, &irq); } } static int axgbe_if_detach(if_ctx_t ctx) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; struct xgbe_phy_if *phy_if = &pdata->phy_if; struct resource *mac_res[2]; mac_res[0] = pdata->xgmac_res; mac_res[1] = pdata->xpcs_res; + phy_if->phy_stop(pdata); phy_if->phy_exit(pdata); /* Free Interrupts */ axgbe_interrupts_free(ctx); /* Free workqueues */ taskqueue_free(pdata->dev_workqueue); /* Release bus resources */ bus_release_resources(iflib_get_dev(ctx), axgbe_pci_mac_spec, mac_res); /* Free VLAN bitmap */ free(pdata->active_vlans, M_AXGBE); axgbe_sysctl_exit(pdata); return (0); } /* axgbe_if_detach */ static void axgbe_pci_init(struct xgbe_prv_data *pdata) { struct xgbe_phy_if *phy_if = &pdata->phy_if; struct xgbe_hw_if *hw_if = &pdata->hw_if; int ret = 0; if (!__predict_false((test_bit(XGBE_DOWN, &pdata->dev_state)))) { axgbe_printf(1, "%s: Starting when XGBE_UP\n", __func__); return; } hw_if->init(pdata); ret = phy_if->phy_start(pdata); if (ret) { axgbe_error("%s: phy start %d\n", __func__, ret); ret = hw_if->exit(pdata); if (ret) axgbe_error("%s: exit error %d\n", __func__, ret); return; } hw_if->enable_tx(pdata); hw_if->enable_rx(pdata); xgbe_start_timers(pdata); clear_bit(XGBE_DOWN, &pdata->dev_state); xgbe_dump_phy_registers(pdata); xgbe_dump_prop_registers(pdata); xgbe_dump_dma_registers(pdata, -1); xgbe_dump_mtl_registers(pdata); xgbe_dump_mac_registers(pdata); xgbe_dump_rmon_counters(pdata); } static void axgbe_if_init(if_ctx_t ctx) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; axgbe_pci_init(pdata); } static void axgbe_pci_stop(if_ctx_t ctx) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; - struct xgbe_phy_if *phy_if = &pdata->phy_if; struct xgbe_hw_if *hw_if = &pdata->hw_if; int ret; if (__predict_false(test_bit(XGBE_DOWN, &pdata->dev_state))) { axgbe_printf(1, "%s: Stopping when XGBE_DOWN\n", __func__); return; } xgbe_stop_timers(pdata); taskqueue_drain_all(pdata->dev_workqueue); hw_if->disable_tx(pdata); hw_if->disable_rx(pdata); - phy_if->phy_stop(pdata); - ret = hw_if->exit(pdata); if (ret) axgbe_error("%s: exit error %d\n", __func__, ret); set_bit(XGBE_DOWN, &pdata->dev_state); } static void axgbe_if_stop(if_ctx_t ctx) { axgbe_pci_stop(ctx); } static void axgbe_if_disable_intr(if_ctx_t ctx) { /* TODO - implement */ } static void axgbe_if_enable_intr(if_ctx_t ctx) { /* TODO - implement */ } static int axgbe_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *va, uint64_t *pa, int ntxqs, int ntxqsets) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; if_softc_ctx_t scctx = sc->scctx; struct xgbe_channel *channel; struct xgbe_ring *tx_ring; int i, j, k; MPASS(scctx->isc_ntxqsets > 0); MPASS(scctx->isc_ntxqsets == ntxqsets); MPASS(ntxqs == 1); axgbe_printf(1, "%s: txqsets %d/%d txqs %d\n", __func__, scctx->isc_ntxqsets, ntxqsets, ntxqs); for (i = 0 ; i < ntxqsets; i++) { channel = pdata->channel[i]; tx_ring = (struct xgbe_ring*)malloc(ntxqs * sizeof(struct xgbe_ring), M_AXGBE, M_NOWAIT | M_ZERO); if (tx_ring == NULL) { axgbe_error("Unable to allocate TX ring memory\n"); goto tx_ring_fail; } channel->tx_ring = tx_ring; for (j = 0; j < ntxqs; j++, tx_ring++) { tx_ring->rdata = (struct xgbe_ring_data*)malloc(scctx->isc_ntxd[j] * sizeof(struct xgbe_ring_data), M_AXGBE, M_NOWAIT); /* Get the virtual & physical address of hw queues */ tx_ring->rdesc = (struct xgbe_ring_desc *)va[i*ntxqs + j]; tx_ring->rdesc_paddr = pa[i*ntxqs + j]; tx_ring->rdesc_count = scctx->isc_ntxd[j]; spin_lock_init(&tx_ring->lock); } } axgbe_printf(1, "allocated for %d tx queues\n", scctx->isc_ntxqsets); return (0); tx_ring_fail: for (j = 0; j < i ; j++) { channel = pdata->channel[j]; tx_ring = channel->tx_ring; for (k = 0; k < ntxqs ; k++, tx_ring++) { if (tx_ring && tx_ring->rdata) free(tx_ring->rdata, M_AXGBE); } free(channel->tx_ring, M_AXGBE); channel->tx_ring = NULL; } return (ENOMEM); } /* axgbe_if_tx_queues_alloc */ static int axgbe_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *va, uint64_t *pa, int nrxqs, int nrxqsets) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; if_softc_ctx_t scctx = sc->scctx; struct xgbe_channel *channel; struct xgbe_ring *rx_ring; int i, j, k; MPASS(scctx->isc_nrxqsets > 0); MPASS(scctx->isc_nrxqsets == nrxqsets); if (!pdata->sph_enable) { MPASS(nrxqs == 1); } else { MPASS(nrxqs == 2); } axgbe_printf(1, "%s: rxqsets %d/%d rxqs %d\n", __func__, scctx->isc_nrxqsets, nrxqsets, nrxqs); for (i = 0 ; i < nrxqsets; i++) { channel = pdata->channel[i]; rx_ring = (struct xgbe_ring*)malloc(nrxqs * sizeof(struct xgbe_ring), M_AXGBE, M_NOWAIT | M_ZERO); if (rx_ring == NULL) { axgbe_error("Unable to allocate RX ring memory\n"); goto rx_ring_fail; } channel->rx_ring = rx_ring; for (j = 0; j < nrxqs; j++, rx_ring++) { rx_ring->rdata = (struct xgbe_ring_data*)malloc(scctx->isc_nrxd[j] * sizeof(struct xgbe_ring_data), M_AXGBE, M_NOWAIT); /* Get the virtual and physical address of the hw queues */ rx_ring->rdesc = (struct xgbe_ring_desc *)va[i*nrxqs + j]; rx_ring->rdesc_paddr = pa[i*nrxqs + j]; rx_ring->rdesc_count = scctx->isc_nrxd[j]; spin_lock_init(&rx_ring->lock); } } axgbe_printf(2, "allocated for %d rx queues\n", scctx->isc_nrxqsets); return (0); rx_ring_fail: for (j = 0 ; j < i ; j++) { channel = pdata->channel[j]; rx_ring = channel->rx_ring; for (k = 0; k < nrxqs ; k++, rx_ring++) { if (rx_ring && rx_ring->rdata) free(rx_ring->rdata, M_AXGBE); } free(channel->rx_ring, M_AXGBE); channel->rx_ring = NULL; } return (ENOMEM); } /* axgbe_if_rx_queues_alloc */ static void axgbe_if_queues_free(if_ctx_t ctx) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; if_softc_ctx_t scctx = sc->scctx; if_shared_ctx_t sctx = sc->sctx; struct xgbe_channel *channel; struct xgbe_ring *tx_ring; struct xgbe_ring *rx_ring; int i, j; for (i = 0 ; i < scctx->isc_ntxqsets; i++) { channel = pdata->channel[i]; tx_ring = channel->tx_ring; for (j = 0; j < sctx->isc_ntxqs ; j++, tx_ring++) { if (tx_ring && tx_ring->rdata) free(tx_ring->rdata, M_AXGBE); } free(channel->tx_ring, M_AXGBE); channel->tx_ring = NULL; } for (i = 0 ; i < scctx->isc_nrxqsets; i++) { channel = pdata->channel[i]; rx_ring = channel->rx_ring; for (j = 0; j < sctx->isc_nrxqs ; j++, rx_ring++) { if (rx_ring && rx_ring->rdata) free(rx_ring->rdata, M_AXGBE); } free(channel->rx_ring, M_AXGBE); channel->rx_ring = NULL; } axgbe_free_channels(sc); } /* axgbe_if_queues_free */ static void axgbe_if_vlan_register(if_ctx_t ctx, uint16_t vtag) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; struct xgbe_hw_if *hw_if = &pdata->hw_if; if (!bit_test(pdata->active_vlans, vtag)) { axgbe_printf(0, "Registering VLAN %d\n", vtag); bit_set(pdata->active_vlans, vtag); hw_if->update_vlan_hash_table(pdata); pdata->num_active_vlans++; axgbe_printf(1, "Total active vlans: %d\n", pdata->num_active_vlans); } else axgbe_printf(0, "VLAN %d already registered\n", vtag); xgbe_dump_active_vlans(pdata); } static void axgbe_if_vlan_unregister(if_ctx_t ctx, uint16_t vtag) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; struct xgbe_hw_if *hw_if = &pdata->hw_if; if (pdata->num_active_vlans == 0) { axgbe_printf(1, "No active VLANs to unregister\n"); return; } if (bit_test(pdata->active_vlans, vtag)){ axgbe_printf(0, "Un-Registering VLAN %d\n", vtag); bit_clear(pdata->active_vlans, vtag); hw_if->update_vlan_hash_table(pdata); pdata->num_active_vlans--; axgbe_printf(1, "Total active vlans: %d\n", pdata->num_active_vlans); } else axgbe_printf(0, "VLAN %d already unregistered\n", vtag); xgbe_dump_active_vlans(pdata); } #if __FreeBSD_version >= 1300000 static bool axgbe_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) { switch (event) { case IFLIB_RESTART_VLAN_CONFIG: default: return (true); } } #endif static int axgbe_if_msix_intr_assign(if_ctx_t ctx, int msix) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; if_softc_ctx_t scctx = sc->scctx; struct xgbe_channel *channel; struct if_irq irq; int i, error, rid = 0, flags; char buf[16]; MPASS(scctx->isc_intr != IFLIB_INTR_LEGACY); pdata->isr_as_tasklet = 1; if (scctx->isc_intr == IFLIB_INTR_MSI) { pdata->irq_count = 1; pdata->channel_irq_count = 1; return (0); } axgbe_printf(1, "%s: msix %d txqsets %d rxqsets %d\n", __func__, msix, scctx->isc_ntxqsets, scctx->isc_nrxqsets); flags = RF_ACTIVE; /* DEV INTR SETUP */ rid++; error = iflib_irq_alloc_generic(ctx, &pdata->dev_irq, rid, IFLIB_INTR_ADMIN, axgbe_dev_isr, sc, 0, "dev_irq"); if (error) { axgbe_error("Failed to register device interrupt rid %d name %s\n", rid, "dev_irq"); return (error); } /* ECC INTR SETUP */ rid++; pdata->ecc_rid = rid; pdata->ecc_irq_res = bus_alloc_resource_any(pdata->dev, SYS_RES_IRQ, &rid, flags); if (!pdata->ecc_irq_res) { axgbe_error("failed to allocate IRQ for rid %d, name %s.\n", rid, "ecc_irq"); return (ENOMEM); } error = bus_setup_intr(pdata->dev, pdata->ecc_irq_res, INTR_MPSAFE | INTR_TYPE_NET, NULL, axgbe_ecc_isr, sc, &pdata->ecc_irq_tag); if (error) { axgbe_error("failed to setup interrupt for rid %d, name %s: %d\n", rid, "ecc_irq", error); return (error); } /* I2C INTR SETUP */ rid++; pdata->i2c_rid = rid; pdata->i2c_irq_res = bus_alloc_resource_any(pdata->dev, SYS_RES_IRQ, &rid, flags); if (!pdata->i2c_irq_res) { axgbe_error("failed to allocate IRQ for rid %d, name %s.\n", rid, "i2c_irq"); return (ENOMEM); } error = bus_setup_intr(pdata->dev, pdata->i2c_irq_res, INTR_MPSAFE | INTR_TYPE_NET, NULL, axgbe_i2c_isr, sc, &pdata->i2c_irq_tag); if (error) { axgbe_error("failed to setup interrupt for rid %d, name %s: %d\n", rid, "i2c_irq", error); return (error); } /* AN INTR SETUP */ rid++; pdata->an_rid = rid; pdata->an_irq_res = bus_alloc_resource_any(pdata->dev, SYS_RES_IRQ, &rid, flags); if (!pdata->an_irq_res) { axgbe_error("failed to allocate IRQ for rid %d, name %s.\n", rid, "an_irq"); return (ENOMEM); } error = bus_setup_intr(pdata->dev, pdata->an_irq_res, INTR_MPSAFE | INTR_TYPE_NET, NULL, axgbe_an_isr, sc, &pdata->an_irq_tag); if (error) { axgbe_error("failed to setup interrupt for rid %d, name %s: %d\n", rid, "an_irq", error); return (error); } pdata->per_channel_irq = 1; pdata->channel_irq_mode = XGBE_IRQ_MODE_LEVEL; rid++; for (i = 0; i < scctx->isc_nrxqsets; i++, rid++) { channel = pdata->channel[i]; snprintf(buf, sizeof(buf), "rxq%d", i); error = iflib_irq_alloc_generic(ctx, &irq, rid, IFLIB_INTR_RXTX, axgbe_msix_que, channel, channel->queue_index, buf); if (error) { axgbe_error("Failed to allocated que int %d err: %d\n", i, error); return (error); } channel->dma_irq_rid = rid; channel->dma_irq_res = irq.ii_res; channel->dma_irq_tag = irq.ii_tag; axgbe_printf(1, "%s: channel count %d idx %d irq %d\n", __func__, scctx->isc_nrxqsets, i, rid); } pdata->irq_count = msix; pdata->channel_irq_count = scctx->isc_nrxqsets; for (i = 0; i < scctx->isc_ntxqsets; i++) { channel = pdata->channel[i]; snprintf(buf, sizeof(buf), "txq%d", i); irq.ii_res = channel->dma_irq_res; iflib_softirq_alloc_generic(ctx, &irq, IFLIB_INTR_TX, channel, channel->queue_index, buf); } return (0); } /* axgbe_if_msix_intr_assign */ static int xgbe_enable_rx_tx_int(struct xgbe_prv_data *pdata, struct xgbe_channel *channel) { struct xgbe_hw_if *hw_if = &pdata->hw_if; enum xgbe_int int_id; if (channel->tx_ring && channel->rx_ring) int_id = XGMAC_INT_DMA_CH_SR_TI_RI; else if (channel->tx_ring) int_id = XGMAC_INT_DMA_CH_SR_TI; else if (channel->rx_ring) int_id = XGMAC_INT_DMA_CH_SR_RI; else return (-1); axgbe_printf(1, "%s channel: %d rx_tx interrupt enabled %d\n", __func__, channel->queue_index, int_id); return (hw_if->enable_int(channel, int_id)); } static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata, struct xgbe_channel *channel) { struct xgbe_hw_if *hw_if = &pdata->hw_if; enum xgbe_int int_id; if (channel->tx_ring && channel->rx_ring) int_id = XGMAC_INT_DMA_CH_SR_TI_RI; else if (channel->tx_ring) int_id = XGMAC_INT_DMA_CH_SR_TI; else if (channel->rx_ring) int_id = XGMAC_INT_DMA_CH_SR_RI; else return; axgbe_printf(1, "%s channel: %d rx_tx interrupt disabled %d\n", __func__, channel->queue_index, int_id); hw_if->disable_int(channel, int_id); } static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata) { unsigned int i; for (i = 0; i < pdata->channel_count; i++) xgbe_disable_rx_tx_int(pdata, pdata->channel[i]); } static int axgbe_msix_que(void *arg) { struct xgbe_channel *channel = (struct xgbe_channel *)arg; struct xgbe_prv_data *pdata = channel->pdata; unsigned int dma_status; axgbe_printf(1, "%s: Channel: %d SR 0x%04x DSR 0x%04x IER:0x%04x D_ISR:0x%04x M_ISR:0x%04x\n", __func__, channel->queue_index, XGMAC_DMA_IOREAD(channel, DMA_CH_SR), XGMAC_DMA_IOREAD(channel, DMA_CH_DSR), XGMAC_DMA_IOREAD(channel, DMA_CH_IER), XGMAC_IOREAD(pdata, DMA_ISR), XGMAC_IOREAD(pdata, MAC_ISR)); (void)XGMAC_DMA_IOREAD(channel, DMA_CH_SR); /* Disable Tx and Rx channel interrupts */ xgbe_disable_rx_tx_int(pdata, channel); /* Clear the interrupts */ dma_status = 0; XGMAC_SET_BITS(dma_status, DMA_CH_SR, TI, 1); XGMAC_SET_BITS(dma_status, DMA_CH_SR, RI, 1); XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_status); return (FILTER_SCHEDULE_THREAD); } static int axgbe_dev_isr(void *arg) { struct axgbe_if_softc *sc = (struct axgbe_if_softc *)arg; struct xgbe_prv_data *pdata = &sc->pdata; struct xgbe_channel *channel; struct xgbe_hw_if *hw_if = &pdata->hw_if; unsigned int i, dma_isr, dma_ch_isr; unsigned int mac_isr, mac_mdioisr; int ret = FILTER_HANDLED; dma_isr = XGMAC_IOREAD(pdata, DMA_ISR); axgbe_printf(2, "%s DMA ISR: 0x%x\n", __func__, dma_isr); if (!dma_isr) return (FILTER_HANDLED); for (i = 0; i < pdata->channel_count; i++) { if (!(dma_isr & (1 << i))) continue; channel = pdata->channel[i]; dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); axgbe_printf(2, "%s: channel %d SR 0x%x DSR 0x%x\n", __func__, channel->queue_index, dma_ch_isr, XGMAC_DMA_IOREAD(channel, DMA_CH_DSR)); /* * The TI or RI interrupt bits may still be set even if using * per channel DMA interrupts. Check to be sure those are not * enabled before using the private data napi structure. */ if (!pdata->per_channel_irq && (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) || XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) { /* Disable Tx and Rx interrupts */ xgbe_disable_rx_tx_ints(pdata); } else { /* * Don't clear Rx/Tx status if doing per channel DMA * interrupts, these will be cleared by the ISR for * per channel DMA interrupts */ XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, TI, 0); XGMAC_SET_BITS(dma_ch_isr, DMA_CH_SR, RI, 0); } if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU)) pdata->ext_stats.rx_buffer_unavailable++; /* Restart the device on a Fatal Bus Error */ if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE)) axgbe_error("%s: Fatal bus error reported 0x%x\n", __func__, dma_ch_isr); /* Clear all interrupt signals */ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr); ret = FILTER_SCHEDULE_THREAD; } if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) { mac_isr = XGMAC_IOREAD(pdata, MAC_ISR); axgbe_printf(2, "%s MAC ISR: 0x%x\n", __func__, mac_isr); if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS)) hw_if->tx_mmc_int(pdata); if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS)) hw_if->rx_mmc_int(pdata); if (XGMAC_GET_BITS(mac_isr, MAC_ISR, SMI)) { mac_mdioisr = XGMAC_IOREAD(pdata, MAC_MDIOISR); if (XGMAC_GET_BITS(mac_mdioisr, MAC_MDIOISR, SNGLCOMPINT)) wakeup_one(pdata); } } return (ret); } /* axgbe_dev_isr */ static void axgbe_i2c_isr(void *arg) { struct axgbe_if_softc *sc = (struct axgbe_if_softc *)arg; sc->pdata.i2c_if.i2c_isr(&sc->pdata); } static void axgbe_ecc_isr(void *arg) { /* TODO - implement */ } static void axgbe_an_isr(void *arg) { struct axgbe_if_softc *sc = (struct axgbe_if_softc *)arg; sc->pdata.phy_if.an_isr(&sc->pdata); } static int axgbe_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; int ret; if (qid < pdata->tx_q_count) { ret = xgbe_enable_rx_tx_int(pdata, pdata->channel[qid]); if (ret) { axgbe_error("Enable TX INT failed\n"); return (ret); } } else axgbe_error("Queue ID exceed channel count\n"); return (0); } static int axgbe_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; int ret; if (qid < pdata->rx_q_count) { ret = xgbe_enable_rx_tx_int(pdata, pdata->channel[qid]); if (ret) { axgbe_error("Enable RX INT failed\n"); return (ret); } } else axgbe_error("Queue ID exceed channel count\n"); return (0); } static void axgbe_if_update_admin_status(if_ctx_t ctx) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; axgbe_printf(1, "%s: phy_link %d status %d speed %d\n", __func__, pdata->phy_link, sc->link_status, pdata->phy.speed); if (pdata->phy_link < 0) return; if (pdata->phy_link) { if (sc->link_status == LINK_STATE_DOWN) { sc->link_status = LINK_STATE_UP; if (pdata->phy.speed & SPEED_10000) iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(10)); else if (pdata->phy.speed & SPEED_2500) iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(2.5)); else if (pdata->phy.speed & SPEED_1000) iflib_link_state_change(ctx, LINK_STATE_UP, IF_Gbps(1)); else if (pdata->phy.speed & SPEED_100) iflib_link_state_change(ctx, LINK_STATE_UP, IF_Mbps(100)); else if (pdata->phy.speed & SPEED_10) iflib_link_state_change(ctx, LINK_STATE_UP, IF_Mbps(10)); } } else { if (sc->link_status == LINK_STATE_UP) { sc->link_status = LINK_STATE_DOWN; iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); } } } static int axgbe_if_media_change(if_ctx_t ctx) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct ifmedia *ifm = iflib_get_media(ctx); sx_xlock(&sc->pdata.an_mutex); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_10G_KR: sc->pdata.phy.speed = SPEED_10000; sc->pdata.phy.autoneg = AUTONEG_DISABLE; break; case IFM_2500_KX: sc->pdata.phy.speed = SPEED_2500; sc->pdata.phy.autoneg = AUTONEG_DISABLE; break; case IFM_1000_KX: sc->pdata.phy.speed = SPEED_1000; sc->pdata.phy.autoneg = AUTONEG_DISABLE; break; case IFM_100_TX: sc->pdata.phy.speed = SPEED_100; sc->pdata.phy.autoneg = AUTONEG_DISABLE; break; case IFM_AUTO: sc->pdata.phy.autoneg = AUTONEG_ENABLE; break; } sx_xunlock(&sc->pdata.an_mutex); return (-sc->pdata.phy_if.phy_config_aneg(&sc->pdata)); } static int axgbe_if_promisc_set(if_ctx_t ctx, int flags) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; if_t ifp = pdata->netdev; axgbe_printf(1, "%s: MAC_PFR 0x%x drv_flags 0x%x if_flags 0x%x\n", __func__, XGMAC_IOREAD(pdata, MAC_PFR), if_getdrvflags(ifp), if_getflags(ifp)); if (if_getflags(ifp) & IFF_PPROMISC) { axgbe_printf(1, "User requested to enter promisc mode\n"); if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == 1) { axgbe_printf(1, "Already in promisc mode\n"); return (0); } axgbe_printf(1, "Entering promisc mode\n"); XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1); XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0); } else { axgbe_printf(1, "User requested to leave promisc mode\n"); if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == 0) { axgbe_printf(1, "Already not in promisc mode\n"); return (0); } axgbe_printf(1, "Leaving promisc mode\n"); XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0); XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1); } return (0); } static uint64_t axgbe_if_get_counter(if_ctx_t ctx, ift_counter cnt) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); if_t ifp = iflib_get_ifp(ctx); struct xgbe_prv_data *pdata = &sc->pdata; struct xgbe_mmc_stats *pstats = &pdata->mmc_stats; pdata->hw_if.read_mmc_stats(pdata); switch(cnt) { case IFCOUNTER_IPACKETS: return (pstats->rxframecount_gb); case IFCOUNTER_IERRORS: return (pstats->rxframecount_gb - pstats->rxbroadcastframes_g - pstats->rxmulticastframes_g - pstats->rxunicastframes_g); case IFCOUNTER_OPACKETS: return (pstats->txframecount_gb); case IFCOUNTER_OERRORS: return (pstats->txframecount_gb - pstats->txframecount_g); case IFCOUNTER_IBYTES: return (pstats->rxoctetcount_gb); case IFCOUNTER_OBYTES: return (pstats->txoctetcount_gb); default: return (if_get_counter_default(ifp, cnt)); } } static int axgbe_if_mtu_set(if_ctx_t ctx, uint32_t mtu) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; int ret; if (mtu > XGMAC_JUMBO_PACKET_MTU) return (EINVAL); ret = xgbe_calc_rx_buf_size(pdata->netdev, mtu); pdata->rx_buf_size = ret; axgbe_printf(1, "%s: rx_buf_size %d\n", __func__, ret); sc->scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; return (0); } static void axgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) { struct axgbe_if_softc *sc = iflib_get_softc(ctx); struct xgbe_prv_data *pdata = &sc->pdata; ifmr->ifm_status = IFM_AVALID; if (!sc->pdata.phy.link) return; ifmr->ifm_active = IFM_ETHER; ifmr->ifm_status |= IFM_ACTIVE; axgbe_printf(1, "Speed 0x%x Mode %d\n", sc->pdata.phy.speed, pdata->phy_if.phy_impl.cur_mode(pdata)); pdata->phy_if.phy_impl.get_type(pdata, ifmr); ifmr->ifm_active |= IFM_FDX; ifmr->ifm_active |= IFM_ETH_TXPAUSE; ifmr->ifm_active |= IFM_ETH_RXPAUSE; } diff --git a/sys/dev/axgbe/xgbe-common.h b/sys/dev/axgbe/xgbe-common.h index 0f497e53cb6f..4d504682d1af 100644 --- a/sys/dev/axgbe/xgbe-common.h +++ b/sys/dev/axgbe/xgbe-common.h @@ -1,1744 +1,1752 @@ /* * AMD 10Gb Ethernet driver * * Copyright (c) 2014-2016,2020 Advanced Micro Devices, Inc. * * This file is available to you under your choice of the following two * licenses: * * License 1: GPLv2 * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or (at * your option) any later version. * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * * License 2: Modified BSD * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef __XGBE_COMMON_H__ #define __XGBE_COMMON_H__ #include #include /* DMA register offsets */ #define DMA_MR 0x3000 #define DMA_SBMR 0x3004 #define DMA_ISR 0x3008 #define DMA_AXIARCR 0x3010 #define DMA_AXIAWCR 0x3018 #define DMA_AXIAWARCR 0x301c #define DMA_DSR0 0x3020 #define DMA_DSR1 0x3024 #define DMA_DSR2 0x3028 #define DMA_DSR3 0x302C #define DMA_DSR4 0x3030 #define DMA_TXEDMACR 0x3040 #define DMA_RXEDMACR 0x3044 /* DMA register entry bit positions and sizes */ #define DMA_ISR_MACIS_INDEX 17 #define DMA_ISR_MACIS_WIDTH 1 #define DMA_ISR_MTLIS_INDEX 16 #define DMA_ISR_MTLIS_WIDTH 1 #define DMA_MR_INTM_INDEX 12 #define DMA_MR_INTM_WIDTH 2 #define DMA_MR_SWR_INDEX 0 #define DMA_MR_SWR_WIDTH 1 #define DMA_RXEDMACR_RDPS_INDEX 0 #define DMA_RXEDMACR_RDPS_WIDTH 3 #define DMA_SBMR_AAL_INDEX 12 #define DMA_SBMR_AAL_WIDTH 1 #define DMA_SBMR_EAME_INDEX 11 #define DMA_SBMR_EAME_WIDTH 1 #define DMA_SBMR_BLEN_INDEX 1 #define DMA_SBMR_BLEN_WIDTH 7 #define DMA_SBMR_RD_OSR_LMT_INDEX 16 #define DMA_SBMR_RD_OSR_LMT_WIDTH 6 #define DMA_SBMR_UNDEF_INDEX 0 #define DMA_SBMR_UNDEF_WIDTH 1 #define DMA_SBMR_WR_OSR_LMT_INDEX 24 #define DMA_SBMR_WR_OSR_LMT_WIDTH 6 #define DMA_TXEDMACR_TDPS_INDEX 0 #define DMA_TXEDMACR_TDPS_WIDTH 3 /* DMA register values */ #define DMA_SBMR_BLEN_256 256 #define DMA_SBMR_BLEN_128 128 #define DMA_SBMR_BLEN_64 64 #define DMA_SBMR_BLEN_32 32 #define DMA_SBMR_BLEN_16 16 #define DMA_SBMR_BLEN_8 8 #define DMA_SBMR_BLEN_4 4 #define DMA_DSR_RPS_WIDTH 4 #define DMA_DSR_TPS_WIDTH 4 #define DMA_DSR_Q_WIDTH (DMA_DSR_RPS_WIDTH + DMA_DSR_TPS_WIDTH) #define DMA_DSR0_RPS_START 8 #define DMA_DSR0_TPS_START 12 #define DMA_DSRX_FIRST_QUEUE 3 #define DMA_DSRX_INC 4 #define DMA_DSRX_QPR 4 #define DMA_DSRX_RPS_START 0 #define DMA_DSRX_TPS_START 4 #define DMA_TPS_STOPPED 0x00 #define DMA_TPS_SUSPENDED 0x06 /* DMA channel register offsets * Multiple channels can be active. The first channel has registers * that begin at 0x3100. Each subsequent channel has registers that * are accessed using an offset of 0x80 from the previous channel. */ #define DMA_CH_BASE 0x3100 #define DMA_CH_INC 0x80 #define DMA_CH_CR 0x00 #define DMA_CH_TCR 0x04 #define DMA_CH_RCR 0x08 #define DMA_CH_TDLR_HI 0x10 #define DMA_CH_TDLR_LO 0x14 #define DMA_CH_RDLR_HI 0x18 #define DMA_CH_RDLR_LO 0x1c #define DMA_CH_TDTR_LO 0x24 #define DMA_CH_RDTR_LO 0x2c #define DMA_CH_TDRLR 0x30 #define DMA_CH_RDRLR 0x34 #define DMA_CH_IER 0x38 #define DMA_CH_RIWT 0x3c #define DMA_CH_CATDR_LO 0x44 #define DMA_CH_CARDR_LO 0x4c #define DMA_CH_CATBR_HI 0x50 #define DMA_CH_CATBR_LO 0x54 #define DMA_CH_CARBR_HI 0x58 #define DMA_CH_CARBR_LO 0x5c #define DMA_CH_SR 0x60 #define DMA_CH_DSR 0x64 #define DMA_CH_DCFL 0x68 #define DMA_CH_MFC 0x6c #define DMA_CH_TDTRO 0x70 #define DMA_CH_RDTRO 0x74 #define DMA_CH_TDWRO 0x78 #define DMA_CH_RDWRO 0x7C /* DMA channel register entry bit positions and sizes */ #define DMA_CH_CR_PBLX8_INDEX 16 #define DMA_CH_CR_PBLX8_WIDTH 1 #define DMA_CH_CR_SPH_INDEX 24 #define DMA_CH_CR_SPH_WIDTH 1 #define DMA_CH_IER_AIE20_INDEX 15 #define DMA_CH_IER_AIE20_WIDTH 1 #define DMA_CH_IER_AIE_INDEX 14 #define DMA_CH_IER_AIE_WIDTH 1 #define DMA_CH_IER_FBEE_INDEX 12 #define DMA_CH_IER_FBEE_WIDTH 1 #define DMA_CH_IER_NIE20_INDEX 16 #define DMA_CH_IER_NIE20_WIDTH 1 #define DMA_CH_IER_NIE_INDEX 15 #define DMA_CH_IER_NIE_WIDTH 1 #define DMA_CH_IER_RBUE_INDEX 7 #define DMA_CH_IER_RBUE_WIDTH 1 #define DMA_CH_IER_RIE_INDEX 6 #define DMA_CH_IER_RIE_WIDTH 1 #define DMA_CH_IER_RSE_INDEX 8 #define DMA_CH_IER_RSE_WIDTH 1 #define DMA_CH_IER_TBUE_INDEX 2 #define DMA_CH_IER_TBUE_WIDTH 1 #define DMA_CH_IER_TIE_INDEX 0 #define DMA_CH_IER_TIE_WIDTH 1 #define DMA_CH_IER_TXSE_INDEX 1 #define DMA_CH_IER_TXSE_WIDTH 1 #define DMA_CH_RCR_PBL_INDEX 16 #define DMA_CH_RCR_PBL_WIDTH 6 #define DMA_CH_RCR_RBSZ_INDEX 1 #define DMA_CH_RCR_RBSZ_WIDTH 14 #define DMA_CH_RCR_SR_INDEX 0 #define DMA_CH_RCR_SR_WIDTH 1 #define DMA_CH_RIWT_RWT_INDEX 0 #define DMA_CH_RIWT_RWT_WIDTH 8 #define DMA_CH_SR_FBE_INDEX 12 #define DMA_CH_SR_FBE_WIDTH 1 #define DMA_CH_SR_RBU_INDEX 7 #define DMA_CH_SR_RBU_WIDTH 1 #define DMA_CH_SR_RI_INDEX 6 #define DMA_CH_SR_RI_WIDTH 1 #define DMA_CH_SR_RPS_INDEX 8 #define DMA_CH_SR_RPS_WIDTH 1 #define DMA_CH_SR_TBU_INDEX 2 #define DMA_CH_SR_TBU_WIDTH 1 #define DMA_CH_SR_TI_INDEX 0 #define DMA_CH_SR_TI_WIDTH 1 #define DMA_CH_SR_TPS_INDEX 1 #define DMA_CH_SR_TPS_WIDTH 1 #define DMA_CH_TCR_OSP_INDEX 4 #define DMA_CH_TCR_OSP_WIDTH 1 #define DMA_CH_TCR_PBL_INDEX 16 #define DMA_CH_TCR_PBL_WIDTH 6 #define DMA_CH_TCR_ST_INDEX 0 #define DMA_CH_TCR_ST_WIDTH 1 #define DMA_CH_TCR_TSE_INDEX 12 #define DMA_CH_TCR_TSE_WIDTH 1 /* DMA channel register values */ #define DMA_OSP_DISABLE 0x00 #define DMA_OSP_ENABLE 0x01 #define DMA_PBL_1 1 #define DMA_PBL_2 2 #define DMA_PBL_4 4 #define DMA_PBL_8 8 #define DMA_PBL_16 16 #define DMA_PBL_32 32 #define DMA_PBL_64 64 /* 8 x 8 */ #define DMA_PBL_128 128 /* 8 x 16 */ #define DMA_PBL_256 256 /* 8 x 32 */ #define DMA_PBL_X8_DISABLE 0x00 #define DMA_PBL_X8_ENABLE 0x01 /* MAC register offsets */ #define MAC_TCR 0x0000 #define MAC_RCR 0x0004 #define MAC_PFR 0x0008 #define MAC_WTR 0x000c #define MAC_HTR0 0x0010 #define MAC_HTR1 0x0014 #define MAC_HTR2 0x0018 #define MAC_HTR3 0x001c #define MAC_HTR4 0x0020 #define MAC_HTR5 0x0024 #define MAC_HTR6 0x0028 #define MAC_HTR7 0x002c #define MAC_VLANTR 0x0050 #define MAC_VLANHTR 0x0058 #define MAC_VLANIR 0x0060 #define MAC_IVLANIR 0x0064 #define MAC_RETMR 0x006c #define MAC_Q0TFCR 0x0070 #define MAC_Q1TFCR 0x0074 #define MAC_Q2TFCR 0x0078 #define MAC_Q3TFCR 0x007c #define MAC_Q4TFCR 0x0080 #define MAC_Q5TFCR 0x0084 #define MAC_Q6TFCR 0x0088 #define MAC_Q7TFCR 0x008c #define MAC_RFCR 0x0090 #define MAC_RQC0R 0x00a0 #define MAC_RQC1R 0x00a4 #define MAC_RQC2R 0x00a8 #define MAC_RQC3R 0x00ac #define MAC_ISR 0x00b0 #define MAC_IER 0x00b4 #define MAC_RTSR 0x00b8 #define MAC_PMTCSR 0x00c0 #define MAC_RWKPFR 0x00c4 #define MAC_LPICSR 0x00d0 #define MAC_LPITCR 0x00d4 #define MAC_TIR 0x00e0 #define MAC_VR 0x0110 #define MAC_DR 0x0114 #define MAC_HWF0R 0x011c #define MAC_HWF1R 0x0120 #define MAC_HWF2R 0x0124 #define MAC_MDIOSCAR 0x0200 #define MAC_MDIOSCCDR 0x0204 #define MAC_MDIOISR 0x0214 #define MAC_MDIOIER 0x0218 #define MAC_MDIOCL22R 0x0220 #define MAC_GPIOCR 0x0278 #define MAC_GPIOSR 0x027c #define MAC_MACA0HR 0x0300 #define MAC_MACA0LR 0x0304 #define MAC_MACA1HR 0x0308 #define MAC_MACA1LR 0x030c #define MAC_RSSCR 0x0c80 #define MAC_RSSAR 0x0c88 #define MAC_RSSDR 0x0c8c #define MAC_TSCR 0x0d00 #define MAC_SSIR 0x0d04 #define MAC_STSR 0x0d08 #define MAC_STNR 0x0d0c #define MAC_STSUR 0x0d10 #define MAC_STNUR 0x0d14 #define MAC_TSAR 0x0d18 #define MAC_TSSR 0x0d20 #define MAC_TXSNR 0x0d30 #define MAC_TXSSR 0x0d34 #define MAC_QTFCR_INC 4 #define MAC_MACA_INC 4 #define MAC_HTR_INC 4 #define MAC_RQC2_INC 4 #define MAC_RQC2_Q_PER_REG 4 /* MAC register entry bit positions and sizes */ #define MAC_HWF0R_ADDMACADRSEL_INDEX 18 #define MAC_HWF0R_ADDMACADRSEL_WIDTH 5 #define MAC_HWF0R_ARPOFFSEL_INDEX 9 #define MAC_HWF0R_ARPOFFSEL_WIDTH 1 #define MAC_HWF0R_EEESEL_INDEX 13 #define MAC_HWF0R_EEESEL_WIDTH 1 #define MAC_HWF0R_GMIISEL_INDEX 1 #define MAC_HWF0R_GMIISEL_WIDTH 1 #define MAC_HWF0R_MGKSEL_INDEX 7 #define MAC_HWF0R_MGKSEL_WIDTH 1 #define MAC_HWF0R_MMCSEL_INDEX 8 #define MAC_HWF0R_MMCSEL_WIDTH 1 #define MAC_HWF0R_RWKSEL_INDEX 6 #define MAC_HWF0R_RWKSEL_WIDTH 1 #define MAC_HWF0R_RXCOESEL_INDEX 16 #define MAC_HWF0R_RXCOESEL_WIDTH 1 #define MAC_HWF0R_SAVLANINS_INDEX 27 #define MAC_HWF0R_SAVLANINS_WIDTH 1 #define MAC_HWF0R_SMASEL_INDEX 5 #define MAC_HWF0R_SMASEL_WIDTH 1 #define MAC_HWF0R_TSSEL_INDEX 12 #define MAC_HWF0R_TSSEL_WIDTH 1 #define MAC_HWF0R_TSSTSSEL_INDEX 25 #define MAC_HWF0R_TSSTSSEL_WIDTH 2 #define MAC_HWF0R_TXCOESEL_INDEX 14 #define MAC_HWF0R_TXCOESEL_WIDTH 1 #define MAC_HWF0R_VLHASH_INDEX 4 #define MAC_HWF0R_VLHASH_WIDTH 1 #define MAC_HWF0R_VXN_INDEX 29 #define MAC_HWF0R_VXN_WIDTH 1 #define MAC_HWF1R_ADDR64_INDEX 14 #define MAC_HWF1R_ADDR64_WIDTH 2 #define MAC_HWF1R_ADVTHWORD_INDEX 13 #define MAC_HWF1R_ADVTHWORD_WIDTH 1 #define MAC_HWF1R_DBGMEMA_INDEX 19 #define MAC_HWF1R_DBGMEMA_WIDTH 1 #define MAC_HWF1R_DCBEN_INDEX 16 #define MAC_HWF1R_DCBEN_WIDTH 1 #define MAC_HWF1R_HASHTBLSZ_INDEX 24 #define MAC_HWF1R_HASHTBLSZ_WIDTH 3 #define MAC_HWF1R_L3L4FNUM_INDEX 27 #define MAC_HWF1R_L3L4FNUM_WIDTH 4 #define MAC_HWF1R_NUMTC_INDEX 21 #define MAC_HWF1R_NUMTC_WIDTH 3 #define MAC_HWF1R_RSSEN_INDEX 20 #define MAC_HWF1R_RSSEN_WIDTH 1 #define MAC_HWF1R_RXFIFOSIZE_INDEX 0 #define MAC_HWF1R_RXFIFOSIZE_WIDTH 5 #define MAC_HWF1R_SPHEN_INDEX 17 #define MAC_HWF1R_SPHEN_WIDTH 1 #define MAC_HWF1R_TSOEN_INDEX 18 #define MAC_HWF1R_TSOEN_WIDTH 1 #define MAC_HWF1R_TXFIFOSIZE_INDEX 6 #define MAC_HWF1R_TXFIFOSIZE_WIDTH 5 #define MAC_HWF2R_AUXSNAPNUM_INDEX 28 #define MAC_HWF2R_AUXSNAPNUM_WIDTH 3 #define MAC_HWF2R_PPSOUTNUM_INDEX 24 #define MAC_HWF2R_PPSOUTNUM_WIDTH 3 #define MAC_HWF2R_RXCHCNT_INDEX 12 #define MAC_HWF2R_RXCHCNT_WIDTH 4 #define MAC_HWF2R_RXQCNT_INDEX 0 #define MAC_HWF2R_RXQCNT_WIDTH 4 #define MAC_HWF2R_TXCHCNT_INDEX 18 #define MAC_HWF2R_TXCHCNT_WIDTH 4 #define MAC_HWF2R_TXQCNT_INDEX 6 #define MAC_HWF2R_TXQCNT_WIDTH 4 #define MAC_IER_TSIE_INDEX 12 #define MAC_IER_TSIE_WIDTH 1 #define MAC_ISR_MMCRXIS_INDEX 9 #define MAC_ISR_MMCRXIS_WIDTH 1 #define MAC_ISR_MMCTXIS_INDEX 10 #define MAC_ISR_MMCTXIS_WIDTH 1 #define MAC_ISR_PMTIS_INDEX 4 #define MAC_ISR_PMTIS_WIDTH 1 #define MAC_ISR_SMI_INDEX 1 #define MAC_ISR_SMI_WIDTH 1 #define MAC_ISR_TSIS_INDEX 12 #define MAC_ISR_TSIS_WIDTH 1 #define MAC_MACA1HR_AE_INDEX 31 #define MAC_MACA1HR_AE_WIDTH 1 #define MAC_MDIOIER_SNGLCOMPIE_INDEX 12 #define MAC_MDIOIER_SNGLCOMPIE_WIDTH 1 #define MAC_MDIOISR_SNGLCOMPINT_INDEX 12 #define MAC_MDIOISR_SNGLCOMPINT_WIDTH 1 #define MAC_MDIOSCAR_DA_INDEX 21 #define MAC_MDIOSCAR_DA_WIDTH 5 #define MAC_MDIOSCAR_PA_INDEX 16 #define MAC_MDIOSCAR_PA_WIDTH 5 #define MAC_MDIOSCAR_RA_INDEX 0 #define MAC_MDIOSCAR_RA_WIDTH 16 #define MAC_MDIOSCCDR_BUSY_INDEX 22 #define MAC_MDIOSCCDR_BUSY_WIDTH 1 #define MAC_MDIOSCCDR_CMD_INDEX 16 #define MAC_MDIOSCCDR_CMD_WIDTH 2 #define MAC_MDIOSCCDR_CR_INDEX 19 #define MAC_MDIOSCCDR_CR_WIDTH 3 #define MAC_MDIOSCCDR_DATA_INDEX 0 #define MAC_MDIOSCCDR_DATA_WIDTH 16 #define MAC_MDIOSCCDR_SADDR_INDEX 18 #define MAC_MDIOSCCDR_SADDR_WIDTH 1 #define MAC_PFR_HMC_INDEX 2 #define MAC_PFR_HMC_WIDTH 1 #define MAC_PFR_HPF_INDEX 10 #define MAC_PFR_HPF_WIDTH 1 #define MAC_PFR_HUC_INDEX 1 #define MAC_PFR_HUC_WIDTH 1 #define MAC_PFR_PM_INDEX 4 #define MAC_PFR_PM_WIDTH 1 #define MAC_PFR_PR_INDEX 0 #define MAC_PFR_PR_WIDTH 1 #define MAC_PFR_VTFE_INDEX 16 #define MAC_PFR_VTFE_WIDTH 1 #define MAC_PFR_VUCC_INDEX 22 #define MAC_PFR_VUCC_WIDTH 1 #define MAC_PMTCSR_MGKPKTEN_INDEX 1 #define MAC_PMTCSR_MGKPKTEN_WIDTH 1 #define MAC_PMTCSR_PWRDWN_INDEX 0 #define MAC_PMTCSR_PWRDWN_WIDTH 1 #define MAC_PMTCSR_RWKFILTRST_INDEX 31 #define MAC_PMTCSR_RWKFILTRST_WIDTH 1 #define MAC_PMTCSR_RWKPKTEN_INDEX 2 #define MAC_PMTCSR_RWKPKTEN_WIDTH 1 #define MAC_Q0TFCR_PT_INDEX 16 #define MAC_Q0TFCR_PT_WIDTH 16 #define MAC_Q0TFCR_TFE_INDEX 1 #define MAC_Q0TFCR_TFE_WIDTH 1 #define MAC_RCR_ACS_INDEX 1 #define MAC_RCR_ACS_WIDTH 1 #define MAC_RCR_CST_INDEX 2 #define MAC_RCR_CST_WIDTH 1 #define MAC_RCR_DCRCC_INDEX 3 #define MAC_RCR_DCRCC_WIDTH 1 #define MAC_RCR_HDSMS_INDEX 12 #define MAC_RCR_HDSMS_WIDTH 3 #define MAC_RCR_IPC_INDEX 9 #define MAC_RCR_IPC_WIDTH 1 #define MAC_RCR_JE_INDEX 8 #define MAC_RCR_JE_WIDTH 1 #define MAC_RCR_LM_INDEX 10 #define MAC_RCR_LM_WIDTH 1 #define MAC_RCR_RE_INDEX 0 #define MAC_RCR_RE_WIDTH 1 #define MAC_RCR_ARPEN_INDEX 31 #define MAC_RCR_ARPEN_WIDTH 1 #define MAC_RFCR_PFCE_INDEX 8 #define MAC_RFCR_PFCE_WIDTH 1 #define MAC_RFCR_RFE_INDEX 0 #define MAC_RFCR_RFE_WIDTH 1 #define MAC_RFCR_UP_INDEX 1 #define MAC_RFCR_UP_WIDTH 1 #define MAC_RQC0R_RXQ0EN_INDEX 0 #define MAC_RQC0R_RXQ0EN_WIDTH 2 #define MAC_RSSAR_ADDRT_INDEX 2 #define MAC_RSSAR_ADDRT_WIDTH 1 #define MAC_RSSAR_CT_INDEX 1 #define MAC_RSSAR_CT_WIDTH 1 #define MAC_RSSAR_OB_INDEX 0 #define MAC_RSSAR_OB_WIDTH 1 #define MAC_RSSAR_RSSIA_INDEX 8 #define MAC_RSSAR_RSSIA_WIDTH 8 #define MAC_RSSCR_IP2TE_INDEX 1 #define MAC_RSSCR_IP2TE_WIDTH 1 #define MAC_RSSCR_RSSE_INDEX 0 #define MAC_RSSCR_RSSE_WIDTH 1 #define MAC_RSSCR_TCP4TE_INDEX 2 #define MAC_RSSCR_TCP4TE_WIDTH 1 #define MAC_RSSCR_UDP4TE_INDEX 3 #define MAC_RSSCR_UDP4TE_WIDTH 1 #define MAC_RSSDR_DMCH_INDEX 0 #define MAC_RSSDR_DMCH_WIDTH 4 #define MAC_SSIR_SNSINC_INDEX 8 #define MAC_SSIR_SNSINC_WIDTH 8 #define MAC_SSIR_SSINC_INDEX 16 #define MAC_SSIR_SSINC_WIDTH 8 #define MAC_TCR_SS_INDEX 29 #define MAC_TCR_SS_WIDTH 2 #define MAC_TCR_TE_INDEX 0 #define MAC_TCR_TE_WIDTH 1 #define MAC_TCR_VNE_INDEX 24 #define MAC_TCR_VNE_WIDTH 1 #define MAC_TCR_VNM_INDEX 25 #define MAC_TCR_VNM_WIDTH 1 #define MAC_TIR_TNID_INDEX 0 #define MAC_TIR_TNID_WIDTH 16 #define MAC_TSCR_AV8021ASMEN_INDEX 28 #define MAC_TSCR_AV8021ASMEN_WIDTH 1 #define MAC_TSCR_SNAPTYPSEL_INDEX 16 #define MAC_TSCR_SNAPTYPSEL_WIDTH 2 #define MAC_TSCR_TSADDREG_INDEX 5 #define MAC_TSCR_TSADDREG_WIDTH 1 #define MAC_TSCR_TSCFUPDT_INDEX 1 #define MAC_TSCR_TSCFUPDT_WIDTH 1 #define MAC_TSCR_TSCTRLSSR_INDEX 9 #define MAC_TSCR_TSCTRLSSR_WIDTH 1 #define MAC_TSCR_TSENA_INDEX 0 #define MAC_TSCR_TSENA_WIDTH 1 #define MAC_TSCR_TSENALL_INDEX 8 #define MAC_TSCR_TSENALL_WIDTH 1 #define MAC_TSCR_TSEVNTENA_INDEX 14 #define MAC_TSCR_TSEVNTENA_WIDTH 1 #define MAC_TSCR_TSINIT_INDEX 2 #define MAC_TSCR_TSINIT_WIDTH 1 #define MAC_TSCR_TSIPENA_INDEX 11 #define MAC_TSCR_TSIPENA_WIDTH 1 #define MAC_TSCR_TSIPV4ENA_INDEX 13 #define MAC_TSCR_TSIPV4ENA_WIDTH 1 #define MAC_TSCR_TSIPV6ENA_INDEX 12 #define MAC_TSCR_TSIPV6ENA_WIDTH 1 #define MAC_TSCR_TSMSTRENA_INDEX 15 #define MAC_TSCR_TSMSTRENA_WIDTH 1 #define MAC_TSCR_TSVER2ENA_INDEX 10 #define MAC_TSCR_TSVER2ENA_WIDTH 1 #define MAC_TSCR_TXTSSTSM_INDEX 24 #define MAC_TSCR_TXTSSTSM_WIDTH 1 #define MAC_TSSR_TXTSC_INDEX 15 #define MAC_TSSR_TXTSC_WIDTH 1 #define MAC_TXSNR_TXTSSTSMIS_INDEX 31 #define MAC_TXSNR_TXTSSTSMIS_WIDTH 1 #define MAC_VLANHTR_VLHT_INDEX 0 #define MAC_VLANHTR_VLHT_WIDTH 16 #define MAC_VLANIR_VLTI_INDEX 20 #define MAC_VLANIR_VLTI_WIDTH 1 #define MAC_VLANIR_CSVL_INDEX 19 #define MAC_VLANIR_CSVL_WIDTH 1 #define MAC_VLANTR_DOVLTC_INDEX 20 #define MAC_VLANTR_DOVLTC_WIDTH 1 #define MAC_VLANTR_ERSVLM_INDEX 19 #define MAC_VLANTR_ERSVLM_WIDTH 1 #define MAC_VLANTR_ESVL_INDEX 18 #define MAC_VLANTR_ESVL_WIDTH 1 #define MAC_VLANTR_ETV_INDEX 16 #define MAC_VLANTR_ETV_WIDTH 1 #define MAC_VLANTR_EVLS_INDEX 21 #define MAC_VLANTR_EVLS_WIDTH 2 #define MAC_VLANTR_EVLRXS_INDEX 24 #define MAC_VLANTR_EVLRXS_WIDTH 1 #define MAC_VLANTR_VL_INDEX 0 #define MAC_VLANTR_VL_WIDTH 16 #define MAC_VLANTR_VTHM_INDEX 25 #define MAC_VLANTR_VTHM_WIDTH 1 #define MAC_VLANTR_VTIM_INDEX 17 #define MAC_VLANTR_VTIM_WIDTH 1 #define MAC_VR_DEVID_INDEX 8 #define MAC_VR_DEVID_WIDTH 8 #define MAC_VR_SNPSVER_INDEX 0 #define MAC_VR_SNPSVER_WIDTH 8 #define MAC_VR_USERVER_INDEX 16 #define MAC_VR_USERVER_WIDTH 8 /* MMC register offsets */ #define MMC_CR 0x0800 #define MMC_RISR 0x0804 #define MMC_TISR 0x0808 #define MMC_RIER 0x080c #define MMC_TIER 0x0810 #define MMC_TXOCTETCOUNT_GB_LO 0x0814 #define MMC_TXOCTETCOUNT_GB_HI 0x0818 #define MMC_TXFRAMECOUNT_GB_LO 0x081c #define MMC_TXFRAMECOUNT_GB_HI 0x0820 #define MMC_TXBROADCASTFRAMES_G_LO 0x0824 #define MMC_TXBROADCASTFRAMES_G_HI 0x0828 #define MMC_TXMULTICASTFRAMES_G_LO 0x082c #define MMC_TXMULTICASTFRAMES_G_HI 0x0830 #define MMC_TX64OCTETS_GB_LO 0x0834 #define MMC_TX64OCTETS_GB_HI 0x0838 #define MMC_TX65TO127OCTETS_GB_LO 0x083c #define MMC_TX65TO127OCTETS_GB_HI 0x0840 #define MMC_TX128TO255OCTETS_GB_LO 0x0844 #define MMC_TX128TO255OCTETS_GB_HI 0x0848 #define MMC_TX256TO511OCTETS_GB_LO 0x084c #define MMC_TX256TO511OCTETS_GB_HI 0x0850 #define MMC_TX512TO1023OCTETS_GB_LO 0x0854 #define MMC_TX512TO1023OCTETS_GB_HI 0x0858 #define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c #define MMC_TX1024TOMAXOCTETS_GB_HI 0x0860 #define MMC_TXUNICASTFRAMES_GB_LO 0x0864 #define MMC_TXUNICASTFRAMES_GB_HI 0x0868 #define MMC_TXMULTICASTFRAMES_GB_LO 0x086c #define MMC_TXMULTICASTFRAMES_GB_HI 0x0870 #define MMC_TXBROADCASTFRAMES_GB_LO 0x0874 #define MMC_TXBROADCASTFRAMES_GB_HI 0x0878 #define MMC_TXUNDERFLOWERROR_LO 0x087c #define MMC_TXUNDERFLOWERROR_HI 0x0880 #define MMC_TXOCTETCOUNT_G_LO 0x0884 #define MMC_TXOCTETCOUNT_G_HI 0x0888 #define MMC_TXFRAMECOUNT_G_LO 0x088c #define MMC_TXFRAMECOUNT_G_HI 0x0890 #define MMC_TXPAUSEFRAMES_LO 0x0894 #define MMC_TXPAUSEFRAMES_HI 0x0898 #define MMC_TXVLANFRAMES_G_LO 0x089c #define MMC_TXVLANFRAMES_G_HI 0x08a0 #define MMC_RXFRAMECOUNT_GB_LO 0x0900 #define MMC_RXFRAMECOUNT_GB_HI 0x0904 #define MMC_RXOCTETCOUNT_GB_LO 0x0908 #define MMC_RXOCTETCOUNT_GB_HI 0x090c #define MMC_RXOCTETCOUNT_G_LO 0x0910 #define MMC_RXOCTETCOUNT_G_HI 0x0914 #define MMC_RXBROADCASTFRAMES_G_LO 0x0918 #define MMC_RXBROADCASTFRAMES_G_HI 0x091c #define MMC_RXMULTICASTFRAMES_G_LO 0x0920 #define MMC_RXMULTICASTFRAMES_G_HI 0x0924 #define MMC_RXCRCERROR_LO 0x0928 #define MMC_RXCRCERROR_HI 0x092c #define MMC_RXRUNTERROR 0x0930 #define MMC_RXJABBERERROR 0x0934 #define MMC_RXUNDERSIZE_G 0x0938 #define MMC_RXOVERSIZE_G 0x093c #define MMC_RX64OCTETS_GB_LO 0x0940 #define MMC_RX64OCTETS_GB_HI 0x0944 #define MMC_RX65TO127OCTETS_GB_LO 0x0948 #define MMC_RX65TO127OCTETS_GB_HI 0x094c #define MMC_RX128TO255OCTETS_GB_LO 0x0950 #define MMC_RX128TO255OCTETS_GB_HI 0x0954 #define MMC_RX256TO511OCTETS_GB_LO 0x0958 #define MMC_RX256TO511OCTETS_GB_HI 0x095c #define MMC_RX512TO1023OCTETS_GB_LO 0x0960 #define MMC_RX512TO1023OCTETS_GB_HI 0x0964 #define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968 #define MMC_RX1024TOMAXOCTETS_GB_HI 0x096c #define MMC_RXUNICASTFRAMES_G_LO 0x0970 #define MMC_RXUNICASTFRAMES_G_HI 0x0974 #define MMC_RXLENGTHERROR_LO 0x0978 #define MMC_RXLENGTHERROR_HI 0x097c #define MMC_RXOUTOFRANGETYPE_LO 0x0980 #define MMC_RXOUTOFRANGETYPE_HI 0x0984 #define MMC_RXPAUSEFRAMES_LO 0x0988 #define MMC_RXPAUSEFRAMES_HI 0x098c #define MMC_RXFIFOOVERFLOW_LO 0x0990 #define MMC_RXFIFOOVERFLOW_HI 0x0994 #define MMC_RXVLANFRAMES_GB_LO 0x0998 #define MMC_RXVLANFRAMES_GB_HI 0x099c #define MMC_RXWATCHDOGERROR 0x09a0 /* MMC register entry bit positions and sizes */ #define MMC_CR_CR_INDEX 0 #define MMC_CR_CR_WIDTH 1 #define MMC_CR_CSR_INDEX 1 #define MMC_CR_CSR_WIDTH 1 #define MMC_CR_ROR_INDEX 2 #define MMC_CR_ROR_WIDTH 1 #define MMC_CR_MCF_INDEX 3 #define MMC_CR_MCF_WIDTH 1 #define MMC_CR_MCT_INDEX 4 #define MMC_CR_MCT_WIDTH 2 #define MMC_RIER_ALL_INTERRUPTS_INDEX 0 #define MMC_RIER_ALL_INTERRUPTS_WIDTH 23 #define MMC_RISR_RXFRAMECOUNT_GB_INDEX 0 #define MMC_RISR_RXFRAMECOUNT_GB_WIDTH 1 #define MMC_RISR_RXOCTETCOUNT_GB_INDEX 1 #define MMC_RISR_RXOCTETCOUNT_GB_WIDTH 1 #define MMC_RISR_RXOCTETCOUNT_G_INDEX 2 #define MMC_RISR_RXOCTETCOUNT_G_WIDTH 1 #define MMC_RISR_RXBROADCASTFRAMES_G_INDEX 3 #define MMC_RISR_RXBROADCASTFRAMES_G_WIDTH 1 #define MMC_RISR_RXMULTICASTFRAMES_G_INDEX 4 #define MMC_RISR_RXMULTICASTFRAMES_G_WIDTH 1 #define MMC_RISR_RXCRCERROR_INDEX 5 #define MMC_RISR_RXCRCERROR_WIDTH 1 #define MMC_RISR_RXRUNTERROR_INDEX 6 #define MMC_RISR_RXRUNTERROR_WIDTH 1 #define MMC_RISR_RXJABBERERROR_INDEX 7 #define MMC_RISR_RXJABBERERROR_WIDTH 1 #define MMC_RISR_RXUNDERSIZE_G_INDEX 8 #define MMC_RISR_RXUNDERSIZE_G_WIDTH 1 #define MMC_RISR_RXOVERSIZE_G_INDEX 9 #define MMC_RISR_RXOVERSIZE_G_WIDTH 1 #define MMC_RISR_RX64OCTETS_GB_INDEX 10 #define MMC_RISR_RX64OCTETS_GB_WIDTH 1 #define MMC_RISR_RX65TO127OCTETS_GB_INDEX 11 #define MMC_RISR_RX65TO127OCTETS_GB_WIDTH 1 #define MMC_RISR_RX128TO255OCTETS_GB_INDEX 12 #define MMC_RISR_RX128TO255OCTETS_GB_WIDTH 1 #define MMC_RISR_RX256TO511OCTETS_GB_INDEX 13 #define MMC_RISR_RX256TO511OCTETS_GB_WIDTH 1 #define MMC_RISR_RX512TO1023OCTETS_GB_INDEX 14 #define MMC_RISR_RX512TO1023OCTETS_GB_WIDTH 1 #define MMC_RISR_RX1024TOMAXOCTETS_GB_INDEX 15 #define MMC_RISR_RX1024TOMAXOCTETS_GB_WIDTH 1 #define MMC_RISR_RXUNICASTFRAMES_G_INDEX 16 #define MMC_RISR_RXUNICASTFRAMES_G_WIDTH 1 #define MMC_RISR_RXLENGTHERROR_INDEX 17 #define MMC_RISR_RXLENGTHERROR_WIDTH 1 #define MMC_RISR_RXOUTOFRANGETYPE_INDEX 18 #define MMC_RISR_RXOUTOFRANGETYPE_WIDTH 1 #define MMC_RISR_RXPAUSEFRAMES_INDEX 19 #define MMC_RISR_RXPAUSEFRAMES_WIDTH 1 #define MMC_RISR_RXFIFOOVERFLOW_INDEX 20 #define MMC_RISR_RXFIFOOVERFLOW_WIDTH 1 #define MMC_RISR_RXVLANFRAMES_GB_INDEX 21 #define MMC_RISR_RXVLANFRAMES_GB_WIDTH 1 #define MMC_RISR_RXWATCHDOGERROR_INDEX 22 #define MMC_RISR_RXWATCHDOGERROR_WIDTH 1 #define MMC_TIER_ALL_INTERRUPTS_INDEX 0 #define MMC_TIER_ALL_INTERRUPTS_WIDTH 18 #define MMC_TISR_TXOCTETCOUNT_GB_INDEX 0 #define MMC_TISR_TXOCTETCOUNT_GB_WIDTH 1 #define MMC_TISR_TXFRAMECOUNT_GB_INDEX 1 #define MMC_TISR_TXFRAMECOUNT_GB_WIDTH 1 #define MMC_TISR_TXBROADCASTFRAMES_G_INDEX 2 #define MMC_TISR_TXBROADCASTFRAMES_G_WIDTH 1 #define MMC_TISR_TXMULTICASTFRAMES_G_INDEX 3 #define MMC_TISR_TXMULTICASTFRAMES_G_WIDTH 1 #define MMC_TISR_TX64OCTETS_GB_INDEX 4 #define MMC_TISR_TX64OCTETS_GB_WIDTH 1 #define MMC_TISR_TX65TO127OCTETS_GB_INDEX 5 #define MMC_TISR_TX65TO127OCTETS_GB_WIDTH 1 #define MMC_TISR_TX128TO255OCTETS_GB_INDEX 6 #define MMC_TISR_TX128TO255OCTETS_GB_WIDTH 1 #define MMC_TISR_TX256TO511OCTETS_GB_INDEX 7 #define MMC_TISR_TX256TO511OCTETS_GB_WIDTH 1 #define MMC_TISR_TX512TO1023OCTETS_GB_INDEX 8 #define MMC_TISR_TX512TO1023OCTETS_GB_WIDTH 1 #define MMC_TISR_TX1024TOMAXOCTETS_GB_INDEX 9 #define MMC_TISR_TX1024TOMAXOCTETS_GB_WIDTH 1 #define MMC_TISR_TXUNICASTFRAMES_GB_INDEX 10 #define MMC_TISR_TXUNICASTFRAMES_GB_WIDTH 1 #define MMC_TISR_TXMULTICASTFRAMES_GB_INDEX 11 #define MMC_TISR_TXMULTICASTFRAMES_GB_WIDTH 1 #define MMC_TISR_TXBROADCASTFRAMES_GB_INDEX 12 #define MMC_TISR_TXBROADCASTFRAMES_GB_WIDTH 1 #define MMC_TISR_TXUNDERFLOWERROR_INDEX 13 #define MMC_TISR_TXUNDERFLOWERROR_WIDTH 1 #define MMC_TISR_TXOCTETCOUNT_G_INDEX 14 #define MMC_TISR_TXOCTETCOUNT_G_WIDTH 1 #define MMC_TISR_TXFRAMECOUNT_G_INDEX 15 #define MMC_TISR_TXFRAMECOUNT_G_WIDTH 1 #define MMC_TISR_TXPAUSEFRAMES_INDEX 16 #define MMC_TISR_TXPAUSEFRAMES_WIDTH 1 #define MMC_TISR_TXVLANFRAMES_G_INDEX 17 #define MMC_TISR_TXVLANFRAMES_G_WIDTH 1 /* MTL register offsets */ #define MTL_OMR 0x1000 #define MTL_FDCR 0x1008 #define MTL_FDSR 0x100c #define MTL_FDDR 0x1010 #define MTL_ISR 0x1020 #define MTL_RQDCM0R 0x1030 #define MTL_RQDCM1R 0x1034 #define MTL_RQDCM2R 0x1038 #define MTL_TCPM0R 0x1040 #define MTL_TCPM1R 0x1044 #define MTL_RQDCM_INC 4 #define MTL_RQDCM_Q_PER_REG 4 #define MTL_TCPM_INC 4 #define MTL_TCPM_TC_PER_REG 4 /* MTL register entry bit positions and sizes */ #define MTL_OMR_ETSALG_INDEX 5 #define MTL_OMR_ETSALG_WIDTH 2 #define MTL_OMR_RAA_INDEX 2 #define MTL_OMR_RAA_WIDTH 1 /* MTL queue register offsets * Multiple queues can be active. The first queue has registers * that begin at 0x1100. Each subsequent queue has registers that * are accessed using an offset of 0x80 from the previous queue. */ #define MTL_Q_BASE 0x1100 #define MTL_Q_INC 0x80 #define MTL_Q_TQOMR 0x00 #define MTL_Q_TQUR 0x04 #define MTL_Q_TQDR 0x08 #define MTL_Q_TC0ETSCR 0x10 #define MTL_Q_TC0ETSSR 0x14 #define MTL_Q_TC0QWR 0x18 #define MTL_Q_RQOMR 0x40 #define MTL_Q_RQMPOCR 0x44 #define MTL_Q_RQDR 0x48 #define MTL_Q_RQCR 0x4c #define MTL_Q_RQFCR 0x50 #define MTL_Q_IER 0x70 #define MTL_Q_ISR 0x74 /* MTL queue register entry bit positions and sizes */ #define MTL_Q_RQDR_PRXQ_INDEX 16 #define MTL_Q_RQDR_PRXQ_WIDTH 14 #define MTL_Q_RQDR_RXQSTS_INDEX 4 #define MTL_Q_RQDR_RXQSTS_WIDTH 2 #define MTL_Q_RQFCR_RFA_INDEX 1 #define MTL_Q_RQFCR_RFA_WIDTH 6 #define MTL_Q_RQFCR_RFD_INDEX 17 #define MTL_Q_RQFCR_RFD_WIDTH 6 #define MTL_Q_RQOMR_EHFC_INDEX 7 #define MTL_Q_RQOMR_EHFC_WIDTH 1 #define MTL_Q_RQOMR_RQS_INDEX 16 #define MTL_Q_RQOMR_RQS_WIDTH 9 #define MTL_Q_RQOMR_RSF_INDEX 5 #define MTL_Q_RQOMR_RSF_WIDTH 1 #define MTL_Q_RQOMR_RTC_INDEX 0 #define MTL_Q_RQOMR_RTC_WIDTH 2 #define MTL_Q_TQDR_TRCSTS_INDEX 1 #define MTL_Q_TQDR_TRCSTS_WIDTH 2 #define MTL_Q_TQDR_TXQSTS_INDEX 4 #define MTL_Q_TQDR_TXQSTS_WIDTH 1 #define MTL_Q_TQOMR_FTQ_INDEX 0 #define MTL_Q_TQOMR_FTQ_WIDTH 1 #define MTL_Q_TQOMR_Q2TCMAP_INDEX 8 #define MTL_Q_TQOMR_Q2TCMAP_WIDTH 3 #define MTL_Q_TQOMR_TQS_INDEX 16 #define MTL_Q_TQOMR_TQS_WIDTH 10 #define MTL_Q_TQOMR_TSF_INDEX 1 #define MTL_Q_TQOMR_TSF_WIDTH 1 #define MTL_Q_TQOMR_TTC_INDEX 4 #define MTL_Q_TQOMR_TTC_WIDTH 3 #define MTL_Q_TQOMR_TXQEN_INDEX 2 #define MTL_Q_TQOMR_TXQEN_WIDTH 2 /* MTL queue register value */ #define MTL_RSF_DISABLE 0x00 #define MTL_RSF_ENABLE 0x01 #define MTL_TSF_DISABLE 0x00 #define MTL_TSF_ENABLE 0x01 #define MTL_RX_THRESHOLD_64 0x00 #define MTL_RX_THRESHOLD_96 0x02 #define MTL_RX_THRESHOLD_128 0x03 #define MTL_TX_THRESHOLD_32 0x01 #define MTL_TX_THRESHOLD_64 0x00 #define MTL_TX_THRESHOLD_96 0x02 #define MTL_TX_THRESHOLD_128 0x03 #define MTL_TX_THRESHOLD_192 0x04 #define MTL_TX_THRESHOLD_256 0x05 #define MTL_TX_THRESHOLD_384 0x06 #define MTL_TX_THRESHOLD_512 0x07 #define MTL_ETSALG_WRR 0x00 #define MTL_ETSALG_WFQ 0x01 #define MTL_ETSALG_DWRR 0x02 #define MTL_RAA_SP 0x00 #define MTL_RAA_WSP 0x01 #define MTL_Q_DISABLED 0x00 #define MTL_Q_ENABLED 0x02 /* MTL traffic class register offsets * Multiple traffic classes can be active. The first class has registers * that begin at 0x1100. Each subsequent queue has registers that * are accessed using an offset of 0x80 from the previous queue. */ #define MTL_TC_BASE MTL_Q_BASE #define MTL_TC_INC MTL_Q_INC #define MTL_TC_ETSCR 0x10 #define MTL_TC_ETSSR 0x14 #define MTL_TC_QWR 0x18 /* MTL traffic class register entry bit positions and sizes */ #define MTL_TC_ETSCR_TSA_INDEX 0 #define MTL_TC_ETSCR_TSA_WIDTH 2 #define MTL_TC_QWR_QW_INDEX 0 #define MTL_TC_QWR_QW_WIDTH 21 /* MTL traffic class register value */ #define MTL_TSA_SP 0x00 #define MTL_TSA_ETS 0x02 /* PCS MMD select register offset * The MMD select register is used for accessing PCS registers * when the underlying APB3 interface is using indirect addressing. * Indirect addressing requires accessing registers in two phases, * an address phase and a data phase. The address phases requires * writing an address selection value to the MMD select regiesters. */ #define PCS_V1_WINDOW_SELECT 0x03fc #define PCS_V2_WINDOW_DEF 0x9060 #define PCS_V2_WINDOW_SELECT 0x9064 #define PCS_V2_RV_WINDOW_DEF 0x1060 #define PCS_V2_RV_WINDOW_SELECT 0x1064 /* PCS register entry bit positions and sizes */ #define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6 #define PCS_V2_WINDOW_DEF_OFFSET_WIDTH 14 #define PCS_V2_WINDOW_DEF_SIZE_INDEX 2 #define PCS_V2_WINDOW_DEF_SIZE_WIDTH 4 /* SerDes integration register offsets */ #define SIR0_KR_RT_1 0x002c #define SIR0_STATUS 0x0040 #define SIR1_SPEED 0x0000 /* SerDes integration register entry bit positions and sizes */ #define SIR0_KR_RT_1_RESET_INDEX 11 #define SIR0_KR_RT_1_RESET_WIDTH 1 #define SIR0_STATUS_RX_READY_INDEX 0 #define SIR0_STATUS_RX_READY_WIDTH 1 #define SIR0_STATUS_TX_READY_INDEX 8 #define SIR0_STATUS_TX_READY_WIDTH 1 #define SIR1_SPEED_CDR_RATE_INDEX 12 #define SIR1_SPEED_CDR_RATE_WIDTH 4 #define SIR1_SPEED_DATARATE_INDEX 4 #define SIR1_SPEED_DATARATE_WIDTH 2 #define SIR1_SPEED_PLLSEL_INDEX 3 #define SIR1_SPEED_PLLSEL_WIDTH 1 #define SIR1_SPEED_RATECHANGE_INDEX 6 #define SIR1_SPEED_RATECHANGE_WIDTH 1 #define SIR1_SPEED_TXAMP_INDEX 8 #define SIR1_SPEED_TXAMP_WIDTH 4 #define SIR1_SPEED_WORDMODE_INDEX 0 #define SIR1_SPEED_WORDMODE_WIDTH 3 /* SerDes RxTx register offsets */ #define RXTX_REG6 0x0018 #define RXTX_REG20 0x0050 #define RXTX_REG22 0x0058 #define RXTX_REG114 0x01c8 #define RXTX_REG129 0x0204 /* SerDes RxTx register entry bit positions and sizes */ #define RXTX_REG6_RESETB_RXD_INDEX 8 #define RXTX_REG6_RESETB_RXD_WIDTH 1 #define RXTX_REG20_BLWC_ENA_INDEX 2 #define RXTX_REG20_BLWC_ENA_WIDTH 1 #define RXTX_REG114_PQ_REG_INDEX 9 #define RXTX_REG114_PQ_REG_WIDTH 7 #define RXTX_REG129_RXDFE_CONFIG_INDEX 14 #define RXTX_REG129_RXDFE_CONFIG_WIDTH 2 /* MAC Control register offsets */ #define XP_PROP_0 0x0000 #define XP_PROP_1 0x0004 #define XP_PROP_2 0x0008 #define XP_PROP_3 0x000c #define XP_PROP_4 0x0010 #define XP_PROP_5 0x0014 #define XP_MAC_ADDR_LO 0x0020 #define XP_MAC_ADDR_HI 0x0024 #define XP_ECC_ISR 0x0030 #define XP_ECC_IER 0x0034 #define XP_ECC_CNT0 0x003c #define XP_ECC_CNT1 0x0040 #define XP_DRIVER_INT_REQ 0x0060 #define XP_DRIVER_INT_RO 0x0064 #define XP_DRIVER_SCRATCH_0 0x0068 #define XP_DRIVER_SCRATCH_1 0x006c #define XP_INT_REISSUE_EN 0x0074 #define XP_INT_EN 0x0078 #define XP_I2C_MUTEX 0x0080 #define XP_MDIO_MUTEX 0x0084 /* MAC Control register entry bit positions and sizes */ #define XP_DRIVER_INT_REQ_REQUEST_INDEX 0 #define XP_DRIVER_INT_REQ_REQUEST_WIDTH 1 #define XP_DRIVER_INT_RO_STATUS_INDEX 0 #define XP_DRIVER_INT_RO_STATUS_WIDTH 1 #define XP_DRIVER_SCRATCH_0_COMMAND_INDEX 0 #define XP_DRIVER_SCRATCH_0_COMMAND_WIDTH 8 #define XP_DRIVER_SCRATCH_0_SUB_COMMAND_INDEX 8 #define XP_DRIVER_SCRATCH_0_SUB_COMMAND_WIDTH 8 #define XP_ECC_CNT0_RX_DED_INDEX 24 #define XP_ECC_CNT0_RX_DED_WIDTH 8 #define XP_ECC_CNT0_RX_SEC_INDEX 16 #define XP_ECC_CNT0_RX_SEC_WIDTH 8 #define XP_ECC_CNT0_TX_DED_INDEX 8 #define XP_ECC_CNT0_TX_DED_WIDTH 8 #define XP_ECC_CNT0_TX_SEC_INDEX 0 #define XP_ECC_CNT0_TX_SEC_WIDTH 8 #define XP_ECC_CNT1_DESC_DED_INDEX 8 #define XP_ECC_CNT1_DESC_DED_WIDTH 8 #define XP_ECC_CNT1_DESC_SEC_INDEX 0 #define XP_ECC_CNT1_DESC_SEC_WIDTH 8 #define XP_ECC_IER_DESC_DED_INDEX 5 #define XP_ECC_IER_DESC_DED_WIDTH 1 #define XP_ECC_IER_DESC_SEC_INDEX 4 #define XP_ECC_IER_DESC_SEC_WIDTH 1 #define XP_ECC_IER_RX_DED_INDEX 3 #define XP_ECC_IER_RX_DED_WIDTH 1 #define XP_ECC_IER_RX_SEC_INDEX 2 #define XP_ECC_IER_RX_SEC_WIDTH 1 #define XP_ECC_IER_TX_DED_INDEX 1 #define XP_ECC_IER_TX_DED_WIDTH 1 #define XP_ECC_IER_TX_SEC_INDEX 0 #define XP_ECC_IER_TX_SEC_WIDTH 1 #define XP_ECC_ISR_DESC_DED_INDEX 5 #define XP_ECC_ISR_DESC_DED_WIDTH 1 #define XP_ECC_ISR_DESC_SEC_INDEX 4 #define XP_ECC_ISR_DESC_SEC_WIDTH 1 #define XP_ECC_ISR_RX_DED_INDEX 3 #define XP_ECC_ISR_RX_DED_WIDTH 1 #define XP_ECC_ISR_RX_SEC_INDEX 2 #define XP_ECC_ISR_RX_SEC_WIDTH 1 #define XP_ECC_ISR_TX_DED_INDEX 1 #define XP_ECC_ISR_TX_DED_WIDTH 1 #define XP_ECC_ISR_TX_SEC_INDEX 0 #define XP_ECC_ISR_TX_SEC_WIDTH 1 #define XP_I2C_MUTEX_BUSY_INDEX 31 #define XP_I2C_MUTEX_BUSY_WIDTH 1 #define XP_I2C_MUTEX_ID_INDEX 29 #define XP_I2C_MUTEX_ID_WIDTH 2 #define XP_I2C_MUTEX_ACTIVE_INDEX 0 #define XP_I2C_MUTEX_ACTIVE_WIDTH 1 #define XP_MAC_ADDR_HI_VALID_INDEX 31 #define XP_MAC_ADDR_HI_VALID_WIDTH 1 #define XP_PROP_0_CONN_TYPE_INDEX 28 #define XP_PROP_0_CONN_TYPE_WIDTH 3 #define XP_PROP_0_MDIO_ADDR_INDEX 16 #define XP_PROP_0_MDIO_ADDR_WIDTH 5 #define XP_PROP_0_PORT_ID_INDEX 0 #define XP_PROP_0_PORT_ID_WIDTH 8 #define XP_PROP_0_PORT_MODE_INDEX 8 #define XP_PROP_0_PORT_MODE_WIDTH 4 #define XP_PROP_0_PORT_SPEEDS_INDEX 23 #define XP_PROP_0_PORT_SPEEDS_WIDTH 4 #define XP_PROP_1_MAX_RX_DMA_INDEX 24 #define XP_PROP_1_MAX_RX_DMA_WIDTH 5 #define XP_PROP_1_MAX_RX_QUEUES_INDEX 8 #define XP_PROP_1_MAX_RX_QUEUES_WIDTH 5 #define XP_PROP_1_MAX_TX_DMA_INDEX 16 #define XP_PROP_1_MAX_TX_DMA_WIDTH 5 #define XP_PROP_1_MAX_TX_QUEUES_INDEX 0 #define XP_PROP_1_MAX_TX_QUEUES_WIDTH 5 #define XP_PROP_2_RX_FIFO_SIZE_INDEX 16 #define XP_PROP_2_RX_FIFO_SIZE_WIDTH 16 #define XP_PROP_2_TX_FIFO_SIZE_INDEX 0 #define XP_PROP_2_TX_FIFO_SIZE_WIDTH 16 #define XP_PROP_3_GPIO_MASK_INDEX 28 #define XP_PROP_3_GPIO_MASK_WIDTH 4 #define XP_PROP_3_GPIO_MOD_ABS_INDEX 20 #define XP_PROP_3_GPIO_MOD_ABS_WIDTH 4 #define XP_PROP_3_GPIO_RATE_SELECT_INDEX 16 #define XP_PROP_3_GPIO_RATE_SELECT_WIDTH 4 #define XP_PROP_3_GPIO_RX_LOS_INDEX 24 #define XP_PROP_3_GPIO_RX_LOS_WIDTH 4 #define XP_PROP_3_GPIO_TX_FAULT_INDEX 12 #define XP_PROP_3_GPIO_TX_FAULT_WIDTH 4 #define XP_PROP_3_GPIO_ADDR_INDEX 8 #define XP_PROP_3_GPIO_ADDR_WIDTH 3 #define XP_PROP_3_MDIO_RESET_INDEX 0 #define XP_PROP_3_MDIO_RESET_WIDTH 2 #define XP_PROP_3_MDIO_RESET_I2C_ADDR_INDEX 8 #define XP_PROP_3_MDIO_RESET_I2C_ADDR_WIDTH 3 #define XP_PROP_3_MDIO_RESET_I2C_GPIO_INDEX 12 #define XP_PROP_3_MDIO_RESET_I2C_GPIO_WIDTH 4 #define XP_PROP_3_MDIO_RESET_INT_GPIO_INDEX 4 #define XP_PROP_3_MDIO_RESET_INT_GPIO_WIDTH 2 #define XP_PROP_4_MUX_ADDR_HI_INDEX 8 #define XP_PROP_4_MUX_ADDR_HI_WIDTH 5 #define XP_PROP_4_MUX_ADDR_LO_INDEX 0 #define XP_PROP_4_MUX_ADDR_LO_WIDTH 3 #define XP_PROP_4_MUX_CHAN_INDEX 4 #define XP_PROP_4_MUX_CHAN_WIDTH 3 #define XP_PROP_4_REDRV_ADDR_INDEX 16 #define XP_PROP_4_REDRV_ADDR_WIDTH 7 #define XP_PROP_4_REDRV_IF_INDEX 23 #define XP_PROP_4_REDRV_IF_WIDTH 1 #define XP_PROP_4_REDRV_LANE_INDEX 24 #define XP_PROP_4_REDRV_LANE_WIDTH 3 #define XP_PROP_4_REDRV_MODEL_INDEX 28 #define XP_PROP_4_REDRV_MODEL_WIDTH 3 #define XP_PROP_4_REDRV_PRESENT_INDEX 31 #define XP_PROP_4_REDRV_PRESENT_WIDTH 1 /* I2C Control register offsets */ #define IC_CON 0x0000 #define IC_TAR 0x0004 #define IC_DATA_CMD 0x0010 #define IC_INTR_STAT 0x002c #define IC_INTR_MASK 0x0030 #define IC_RAW_INTR_STAT 0x0034 #define IC_CLR_INTR 0x0040 #define IC_CLR_TX_ABRT 0x0054 #define IC_CLR_STOP_DET 0x0060 #define IC_ENABLE 0x006c #define IC_TXFLR 0x0074 #define IC_RXFLR 0x0078 #define IC_TX_ABRT_SOURCE 0x0080 #define IC_ENABLE_STATUS 0x009c #define IC_COMP_PARAM_1 0x00f4 /* I2C Control register entry bit positions and sizes */ #define IC_COMP_PARAM_1_MAX_SPEED_MODE_INDEX 2 #define IC_COMP_PARAM_1_MAX_SPEED_MODE_WIDTH 2 #define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_INDEX 8 #define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_WIDTH 8 #define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_INDEX 16 #define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_WIDTH 8 #define IC_CON_MASTER_MODE_INDEX 0 #define IC_CON_MASTER_MODE_WIDTH 1 #define IC_CON_RESTART_EN_INDEX 5 #define IC_CON_RESTART_EN_WIDTH 1 #define IC_CON_RX_FIFO_FULL_HOLD_INDEX 9 #define IC_CON_RX_FIFO_FULL_HOLD_WIDTH 1 #define IC_CON_SLAVE_DISABLE_INDEX 6 #define IC_CON_SLAVE_DISABLE_WIDTH 1 #define IC_CON_SPEED_INDEX 1 #define IC_CON_SPEED_WIDTH 2 #define IC_DATA_CMD_CMD_INDEX 8 #define IC_DATA_CMD_CMD_WIDTH 1 #define IC_DATA_CMD_STOP_INDEX 9 #define IC_DATA_CMD_STOP_WIDTH 1 #define IC_ENABLE_ABORT_INDEX 1 #define IC_ENABLE_ABORT_WIDTH 1 #define IC_ENABLE_EN_INDEX 0 #define IC_ENABLE_EN_WIDTH 1 #define IC_ENABLE_STATUS_EN_INDEX 0 #define IC_ENABLE_STATUS_EN_WIDTH 1 #define IC_INTR_MASK_TX_EMPTY_INDEX 4 #define IC_INTR_MASK_TX_EMPTY_WIDTH 1 #define IC_RAW_INTR_STAT_RX_FULL_INDEX 2 #define IC_RAW_INTR_STAT_RX_FULL_WIDTH 1 #define IC_RAW_INTR_STAT_STOP_DET_INDEX 9 #define IC_RAW_INTR_STAT_STOP_DET_WIDTH 1 #define IC_RAW_INTR_STAT_TX_ABRT_INDEX 6 #define IC_RAW_INTR_STAT_TX_ABRT_WIDTH 1 #define IC_RAW_INTR_STAT_TX_EMPTY_INDEX 4 #define IC_RAW_INTR_STAT_TX_EMPTY_WIDTH 1 /* I2C Control register value */ #define IC_TX_ABRT_7B_ADDR_NOACK 0x0001 #define IC_TX_ABRT_ARB_LOST 0x1000 /* Descriptor/Packet entry bit positions and sizes */ #define RX_PACKET_ERRORS_CRC_INDEX 2 #define RX_PACKET_ERRORS_CRC_WIDTH 1 #define RX_PACKET_ERRORS_FRAME_INDEX 3 #define RX_PACKET_ERRORS_FRAME_WIDTH 1 #define RX_PACKET_ERRORS_LENGTH_INDEX 0 #define RX_PACKET_ERRORS_LENGTH_WIDTH 1 #define RX_PACKET_ERRORS_OVERRUN_INDEX 1 #define RX_PACKET_ERRORS_OVERRUN_WIDTH 1 #define RX_PACKET_ATTRIBUTES_CSUM_DONE_INDEX 0 #define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1 #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1 #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 #define RX_PACKET_ATTRIBUTES_LAST_INDEX 2 #define RX_PACKET_ATTRIBUTES_LAST_WIDTH 1 #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3 #define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1 #define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4 #define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1 #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5 #define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1 #define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6 #define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1 #define RX_PACKET_ATTRIBUTES_FIRST_INDEX 7 #define RX_PACKET_ATTRIBUTES_FIRST_WIDTH 1 #define RX_PACKET_ATTRIBUTES_TNP_INDEX 8 #define RX_PACKET_ATTRIBUTES_TNP_WIDTH 1 #define RX_PACKET_ATTRIBUTES_TNPCSUM_DONE_INDEX 9 #define RX_PACKET_ATTRIBUTES_TNPCSUM_DONE_WIDTH 1 #define RX_NORMAL_DESC0_OVT_INDEX 0 #define RX_NORMAL_DESC0_OVT_WIDTH 16 #define RX_NORMAL_DESC2_HL_INDEX 0 #define RX_NORMAL_DESC2_HL_WIDTH 10 #define RX_NORMAL_DESC2_TNP_INDEX 11 #define RX_NORMAL_DESC2_TNP_WIDTH 1 #define RX_NORMAL_DESC2_RPNG_INDEX 14 #define RX_NORMAL_DESC2_RPNG_WIDTH 1 #define RX_NORMAL_DESC3_CDA_INDEX 27 #define RX_NORMAL_DESC3_CDA_WIDTH 1 #define RX_NORMAL_DESC3_CTXT_INDEX 30 #define RX_NORMAL_DESC3_CTXT_WIDTH 1 #define RX_NORMAL_DESC3_ES_INDEX 15 #define RX_NORMAL_DESC3_ES_WIDTH 1 #define RX_NORMAL_DESC3_ETLT_INDEX 16 #define RX_NORMAL_DESC3_ETLT_WIDTH 4 #define RX_NORMAL_DESC3_FD_INDEX 29 #define RX_NORMAL_DESC3_FD_WIDTH 1 #define RX_NORMAL_DESC3_INTE_INDEX 30 #define RX_NORMAL_DESC3_INTE_WIDTH 1 #define RX_NORMAL_DESC3_L34T_INDEX 20 #define RX_NORMAL_DESC3_L34T_WIDTH 4 #define RX_NORMAL_DESC3_LD_INDEX 28 #define RX_NORMAL_DESC3_LD_WIDTH 1 #define RX_NORMAL_DESC3_OWN_INDEX 31 #define RX_NORMAL_DESC3_OWN_WIDTH 1 #define RX_NORMAL_DESC3_PL_INDEX 0 #define RX_NORMAL_DESC3_PL_WIDTH 14 #define RX_NORMAL_DESC3_RSV_INDEX 26 #define RX_NORMAL_DESC3_RSV_WIDTH 1 #define RX_DESC3_L34T_IPV4_TCP 1 #define RX_DESC3_L34T_IPV4_UDP 2 #define RX_DESC3_L34T_IPV4_ICMP 3 #define RX_DESC3_L34T_IPV4_UNKNOWN 7 #define RX_DESC3_L34T_IPV6_TCP 9 #define RX_DESC3_L34T_IPV6_UDP 10 #define RX_DESC3_L34T_IPV6_ICMP 11 #define RX_DESC3_L34T_IPV6_UNKNOWN 15 #define RX_CONTEXT_DESC3_TSA_INDEX 4 #define RX_CONTEXT_DESC3_TSA_WIDTH 1 #define RX_CONTEXT_DESC3_TSD_INDEX 6 #define RX_CONTEXT_DESC3_TSD_WIDTH 1 #define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0 #define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1 #define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX 1 #define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH 1 #define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 2 #define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1 #define TX_PACKET_ATTRIBUTES_PTP_INDEX 3 #define TX_PACKET_ATTRIBUTES_PTP_WIDTH 1 #define TX_PACKET_ATTRIBUTES_VXLAN_INDEX 4 #define TX_PACKET_ATTRIBUTES_VXLAN_WIDTH 1 #define TX_CONTEXT_DESC2_MSS_INDEX 0 #define TX_CONTEXT_DESC2_MSS_WIDTH 15 #define TX_CONTEXT_DESC3_CTXT_INDEX 30 #define TX_CONTEXT_DESC3_CTXT_WIDTH 1 #define TX_CONTEXT_DESC3_TCMSSV_INDEX 26 #define TX_CONTEXT_DESC3_TCMSSV_WIDTH 1 #define TX_CONTEXT_DESC3_VLTV_INDEX 16 #define TX_CONTEXT_DESC3_VLTV_WIDTH 1 #define TX_CONTEXT_DESC3_VT_INDEX 0 #define TX_CONTEXT_DESC3_VT_WIDTH 16 #define TX_NORMAL_DESC2_HL_B1L_INDEX 0 #define TX_NORMAL_DESC2_HL_B1L_WIDTH 14 #define TX_NORMAL_DESC2_IC_INDEX 31 #define TX_NORMAL_DESC2_IC_WIDTH 1 #define TX_NORMAL_DESC2_TTSE_INDEX 30 #define TX_NORMAL_DESC2_TTSE_WIDTH 1 #define TX_NORMAL_DESC2_VTIR_INDEX 14 #define TX_NORMAL_DESC2_VTIR_WIDTH 2 #define TX_NORMAL_DESC3_CIC_INDEX 16 #define TX_NORMAL_DESC3_CIC_WIDTH 2 #define TX_NORMAL_DESC3_CPC_INDEX 26 #define TX_NORMAL_DESC3_CPC_WIDTH 2 #define TX_NORMAL_DESC3_CTXT_INDEX 30 #define TX_NORMAL_DESC3_CTXT_WIDTH 1 #define TX_NORMAL_DESC3_FD_INDEX 29 #define TX_NORMAL_DESC3_FD_WIDTH 1 #define TX_NORMAL_DESC3_FL_INDEX 0 #define TX_NORMAL_DESC3_FL_WIDTH 15 #define TX_NORMAL_DESC3_LD_INDEX 28 #define TX_NORMAL_DESC3_LD_WIDTH 1 #define TX_NORMAL_DESC3_OWN_INDEX 31 #define TX_NORMAL_DESC3_OWN_WIDTH 1 #define TX_NORMAL_DESC3_TCPHDRLEN_INDEX 19 #define TX_NORMAL_DESC3_TCPHDRLEN_WIDTH 4 #define TX_NORMAL_DESC3_TCPPL_INDEX 0 #define TX_NORMAL_DESC3_TCPPL_WIDTH 18 #define TX_NORMAL_DESC3_TSE_INDEX 18 #define TX_NORMAL_DESC3_TSE_WIDTH 1 #define TX_NORMAL_DESC3_VNP_INDEX 23 #define TX_NORMAL_DESC3_VNP_WIDTH 3 #define TX_NORMAL_DESC2_VLAN_INSERT 0x2 #define TX_NORMAL_DESC3_VXLAN_PACKET 0x3 /* MDIO undefined or vendor specific registers */ #ifndef MDIO_PMA_10GBR_PMD_CTRL #define MDIO_PMA_10GBR_PMD_CTRL 0x0096 #endif #ifndef MDIO_PMA_10GBR_FECCTRL #define MDIO_PMA_10GBR_FECCTRL 0x00ab #endif #ifndef MDIO_PCS_DIG_CTRL #define MDIO_PCS_DIG_CTRL 0x8000 #endif #ifndef MDIO_AN_XNP #define MDIO_AN_XNP 0x0016 #endif #ifndef MDIO_AN_LPX #define MDIO_AN_LPX 0x0019 #endif #ifndef MDIO_AN_COMP_STAT #define MDIO_AN_COMP_STAT 0x0030 #endif #ifndef MDIO_AN_INTMASK #define MDIO_AN_INTMASK 0x8001 #endif #ifndef MDIO_AN_INT #define MDIO_AN_INT 0x8002 #endif #ifndef MDIO_VEND2_AN_ADVERTISE #define MDIO_VEND2_AN_ADVERTISE 0x0004 #endif #ifndef MDIO_VEND2_AN_LP_ABILITY #define MDIO_VEND2_AN_LP_ABILITY 0x0005 #endif #ifndef MDIO_VEND2_AN_CTRL #define MDIO_VEND2_AN_CTRL 0x8001 #endif #ifndef MDIO_VEND2_AN_STAT #define MDIO_VEND2_AN_STAT 0x8002 #endif #ifndef MDIO_VEND2_PMA_CDR_CONTROL #define MDIO_VEND2_PMA_CDR_CONTROL 0x8056 #endif +#ifndef MDIO_VEND2_PMA_MISC_CTRL0 +#define MDIO_VEND2_PMA_MISC_CTRL0 0x8090 +#endif + #ifndef MDIO_CTRL1_SPEED1G #define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100) #endif #ifndef MDIO_VEND2_CTRL1_AN_ENABLE #define MDIO_VEND2_CTRL1_AN_ENABLE BIT(12) #endif #ifndef MDIO_VEND2_CTRL1_AN_RESTART #define MDIO_VEND2_CTRL1_AN_RESTART BIT(9) #endif #ifndef MDIO_VEND2_CTRL1_SS6 #define MDIO_VEND2_CTRL1_SS6 BIT(6) #endif #ifndef MDIO_VEND2_CTRL1_SS13 #define MDIO_VEND2_CTRL1_SS13 BIT(13) #endif /* MDIO mask values */ #define XGBE_AN_CL73_INT_CMPLT BIT(0) #define XGBE_AN_CL73_INC_LINK BIT(1) #define XGBE_AN_CL73_PG_RCV BIT(2) #define XGBE_AN_CL73_INT_MASK 0x07 #define XGBE_XNP_MCF_NULL_MESSAGE 0x001 #define XGBE_XNP_ACK_PROCESSED BIT(12) #define XGBE_XNP_MP_FORMATTED BIT(13) #define XGBE_XNP_NP_EXCHANGE BIT(15) #define XGBE_KR_TRAINING_START BIT(0) #define XGBE_KR_TRAINING_ENABLE BIT(1) #define XGBE_PCS_CL37_BP BIT(12) #define XGBE_AN_CL37_INT_CMPLT BIT(0) #define XGBE_AN_CL37_INT_MASK 0x01 #define XGBE_AN_CL37_HD_MASK 0x40 #define XGBE_AN_CL37_FD_MASK 0x20 #define XGBE_AN_CL37_PCS_MODE_MASK 0x06 #define XGBE_AN_CL37_PCS_MODE_BASEX 0x00 #define XGBE_AN_CL37_PCS_MODE_SGMII 0x04 #define XGBE_AN_CL37_TX_CONFIG_MASK 0x08 #define XGBE_AN_CL37_MII_CTRL_8BIT 0x0100 #define XGBE_PMA_CDR_TRACK_EN_MASK 0x01 #define XGBE_PMA_CDR_TRACK_EN_OFF 0x00 #define XGBE_PMA_CDR_TRACK_EN_ON 0x01 +#define XGBE_PMA_PLL_CTRL_MASK BIT(15) +#define XGBE_PMA_PLL_CTRL_ENABLE BIT(15) +#define XGBE_PMA_PLL_CTRL_DISABLE 0x0000 + /* Bit setting and getting macros * The get macro will extract the current bit field value from within * the variable * * The set macro will clear the current bit field value within the * variable and then set the bit field of the variable to the * specified value */ #define GET_BITS(_var, _index, _width) \ (((_var) >> (_index)) & ((0x1 << (_width)) - 1)) #define SET_BITS(_var, _index, _width, _val) \ do { \ (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \ (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \ } while (0) #define GET_BITS_LE(_var, _index, _width) \ ((le32_to_cpu((_var)) >> (_index)) & ((0x1 << (_width)) - 1)) #define SET_BITS_LE(_var, _index, _width, _val) \ do { \ (_var) &= cpu_to_le32(~(((0x1 << (_width)) - 1) << (_index))); \ (_var) |= cpu_to_le32((((_val) & \ ((0x1 << (_width)) - 1)) << (_index))); \ } while (0) /* Bit setting and getting macros based on register fields * The get macro uses the bit field definitions formed using the input * names to extract the current bit field value from within the * variable * * The set macro uses the bit field definitions formed using the input * names to set the bit field of the variable to the specified value */ #define XGMAC_GET_BITS(_var, _prefix, _field) \ GET_BITS((_var), \ _prefix##_##_field##_INDEX, \ _prefix##_##_field##_WIDTH) #define XGMAC_SET_BITS(_var, _prefix, _field, _val) \ SET_BITS((_var), \ _prefix##_##_field##_INDEX, \ _prefix##_##_field##_WIDTH, (_val)) #define XGMAC_GET_BITS_LE(_var, _prefix, _field) \ GET_BITS_LE((_var), \ _prefix##_##_field##_INDEX, \ _prefix##_##_field##_WIDTH) #define XGMAC_SET_BITS_LE(_var, _prefix, _field, _val) \ SET_BITS_LE((_var), \ _prefix##_##_field##_INDEX, \ _prefix##_##_field##_WIDTH, (_val)) /* Macros for reading or writing registers * The ioread macros will get bit fields or full values using the * register definitions formed using the input names * * The iowrite macros will set bit fields or full values using the * register definitions formed using the input names */ #define XGMAC_IOREAD(_pdata, _reg) \ bus_read_4((_pdata)->xgmac_res, _reg) #define XGMAC_IOREAD_BITS(_pdata, _reg, _field) \ GET_BITS(XGMAC_IOREAD((_pdata), _reg), \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH) #define XGMAC_IOWRITE(_pdata, _reg, _val) \ bus_write_4((_pdata)->xgmac_res, _reg, (_val)) #define XGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \ do { \ uint32_t reg_val = XGMAC_IOREAD((_pdata), _reg); \ SET_BITS(reg_val, \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH, (_val)); \ XGMAC_IOWRITE((_pdata), _reg, reg_val); \ } while (0) /* Macros for reading or writing MTL queue or traffic class registers * Similar to the standard read and write macros except that the * base register value is calculated by the queue or traffic class number */ #define XGMAC_MTL_IOREAD(_pdata, _n, _reg) \ bus_read_4((_pdata)->xgmac_res, \ MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg) #define XGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \ GET_BITS(XGMAC_MTL_IOREAD((_pdata), (_n), _reg), \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH) #define XGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \ bus_write_4((_pdata)->xgmac_res, \ MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg, (_val)) #define XGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \ do { \ uint32_t reg_val = XGMAC_MTL_IOREAD((_pdata), (_n), _reg); \ SET_BITS(reg_val, \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH, (_val)); \ XGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \ } while (0) /* Macros for reading or writing DMA channel registers * Similar to the standard read and write macros except that the * base register value is obtained from the ring */ #define XGMAC_DMA_IOREAD(_channel, _reg) \ bus_space_read_4((_channel)->dma_tag, (_channel)->dma_handle, _reg) #define XGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \ GET_BITS(XGMAC_DMA_IOREAD((_channel), _reg), \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH) #define XGMAC_DMA_IOWRITE(_channel, _reg, _val) \ bus_space_write_4((_channel)->dma_tag, (_channel)->dma_handle, \ _reg, (_val)) #define XGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \ do { \ uint32_t reg_val = XGMAC_DMA_IOREAD((_channel), _reg); \ SET_BITS(reg_val, \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH, (_val)); \ XGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \ } while (0) /* Macros for building, reading or writing register values or bits * within the register values of XPCS registers. */ #define XPCS_GET_BITS(_var, _prefix, _field) \ GET_BITS((_var), \ _prefix##_##_field##_INDEX, \ _prefix##_##_field##_WIDTH) #define XPCS_SET_BITS(_var, _prefix, _field, _val) \ SET_BITS((_var), \ _prefix##_##_field##_INDEX, \ _prefix##_##_field##_WIDTH, (_val)) #define XPCS32_IOWRITE(_pdata, _off, _val) \ bus_write_4((_pdata)->xpcs_res, (_off), _val) #define XPCS32_IOREAD(_pdata, _off) \ bus_read_4((_pdata)->xpcs_res, (_off)) #define XPCS16_IOWRITE(_pdata, _off, _val) \ bus_write_2((_pdata)->xpcs_res, (_off), _val) #define XPCS16_IOREAD(_pdata, _off) \ bus_read_2((_pdata)->xpcs_res, (_off)) /* Macros for building, reading or writing register values or bits * within the register values of SerDes integration registers. */ #define XSIR_GET_BITS(_var, _prefix, _field) \ GET_BITS((_var), \ _prefix##_##_field##_INDEX, \ _prefix##_##_field##_WIDTH) #define XSIR_SET_BITS(_var, _prefix, _field, _val) \ SET_BITS((_var), \ _prefix##_##_field##_INDEX, \ _prefix##_##_field##_WIDTH, (_val)) #define XSIR0_IOREAD(_pdata, _reg) \ bus_read_2((_pdata)->sir0_res, _reg) #define XSIR0_IOREAD_BITS(_pdata, _reg, _field) \ GET_BITS(XSIR0_IOREAD((_pdata), _reg), \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH) #define XSIR0_IOWRITE(_pdata, _reg, _val) \ bus_write_2((_pdata)->sir0_res, _reg, (_val)) #define XSIR0_IOWRITE_BITS(_pdata, _reg, _field, _val) \ do { \ uint16_t reg_val = XSIR0_IOREAD((_pdata), _reg); \ SET_BITS(reg_val, \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH, (_val)); \ XSIR0_IOWRITE((_pdata), _reg, reg_val); \ } while (0) #define XSIR1_IOREAD(_pdata, _reg) \ bus_read_2((_pdata)->sir1_res, _reg) #define XSIR1_IOREAD_BITS(_pdata, _reg, _field) \ GET_BITS(XSIR1_IOREAD((_pdata), _reg), \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH) #define XSIR1_IOWRITE(_pdata, _reg, _val) \ bus_write_2((_pdata)->sir1_res, _reg, (_val)) #define XSIR1_IOWRITE_BITS(_pdata, _reg, _field, _val) \ do { \ uint16_t reg_val = XSIR1_IOREAD((_pdata), _reg); \ SET_BITS(reg_val, \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH, (_val)); \ XSIR1_IOWRITE((_pdata), _reg, reg_val); \ } while (0) /* Macros for building, reading or writing register values or bits * within the register values of SerDes RxTx registers. */ #define XRXTX_IOREAD(_pdata, _reg) \ bus_read_2((_pdata)->rxtx_res, _reg) #define XRXTX_IOREAD_BITS(_pdata, _reg, _field) \ GET_BITS(XRXTX_IOREAD((_pdata), _reg), \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH) #define XRXTX_IOWRITE(_pdata, _reg, _val) \ bus_write_2((_pdata)->rxtx_res, _reg, (_val)) #define XRXTX_IOWRITE_BITS(_pdata, _reg, _field, _val) \ do { \ uint16_t reg_val = XRXTX_IOREAD((_pdata), _reg); \ SET_BITS(reg_val, \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH, (_val)); \ XRXTX_IOWRITE((_pdata), _reg, reg_val); \ } while (0) /* Macros for building, reading or writing register values or bits * within the register values of MAC Control registers. */ #define XP_GET_BITS(_var, _prefix, _field) \ GET_BITS((_var), \ _prefix##_##_field##_INDEX, \ _prefix##_##_field##_WIDTH) #define XP_SET_BITS(_var, _prefix, _field, _val) \ SET_BITS((_var), \ _prefix##_##_field##_INDEX, \ _prefix##_##_field##_WIDTH, (_val)) #define XP_IOREAD(_pdata, _reg) \ bus_read_4((_pdata)->xgmac_res, _reg + XGBE_MAC_PROP_OFFSET) #define XP_IOREAD_BITS(_pdata, _reg, _field) \ GET_BITS(XP_IOREAD((_pdata), (_reg)), \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH) #define XP_IOWRITE(_pdata, _reg, _val) \ bus_write_4((_pdata)->xgmac_res, _reg + XGBE_MAC_PROP_OFFSET, \ (_val)) #define XP_IOWRITE_BITS(_pdata, _reg, _field, _val) \ do { \ uint32_t reg_val = XP_IOREAD((_pdata), (_reg)); \ SET_BITS(reg_val, \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH, (_val)); \ XP_IOWRITE((_pdata), (_reg), reg_val); \ } while (0) /* Macros for building, reading or writing register values or bits * within the register values of I2C Control registers. */ #define XI2C_GET_BITS(_var, _prefix, _field) \ GET_BITS((_var), \ _prefix##_##_field##_INDEX, \ _prefix##_##_field##_WIDTH) #define XI2C_SET_BITS(_var, _prefix, _field, _val) \ SET_BITS((_var), \ _prefix##_##_field##_INDEX, \ _prefix##_##_field##_WIDTH, (_val)) #define XI2C_IOREAD(_pdata, _reg) \ bus_read_4((_pdata)->xgmac_res, _reg + XGBE_I2C_CTRL_OFFSET) #define XI2C_IOREAD_BITS(_pdata, _reg, _field) \ GET_BITS(XI2C_IOREAD((_pdata), (_reg)), \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH) #define XI2C_IOWRITE(_pdata, _reg, _val) \ bus_write_4((_pdata)->xgmac_res, _reg + XGBE_I2C_CTRL_OFFSET, \ (_val)) #define XI2C_IOWRITE_BITS(_pdata, _reg, _field, _val) \ do { \ uint32_t reg_val = XI2C_IOREAD((_pdata), (_reg)); \ SET_BITS(reg_val, \ _reg##_##_field##_INDEX, \ _reg##_##_field##_WIDTH, (_val)); \ XI2C_IOWRITE((_pdata), (_reg), reg_val); \ } while (0) /* Macros for building, reading or writing register values or bits * using MDIO. Different from above because of the use of standardized * Linux include values. No shifting is performed with the bit * operations, everything works on mask values. */ #define XMDIO_READ(_pdata, _mmd, _reg) \ ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \ MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff))) #define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \ (XMDIO_READ((_pdata), _mmd, _reg) & _mask) #define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \ ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \ MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val))) #define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \ do { \ uint32_t mmd_val = XMDIO_READ((_pdata), _mmd, _reg); \ mmd_val &= ~_mask; \ mmd_val |= (_val); \ XMDIO_WRITE((_pdata), _mmd, _reg, mmd_val); \ } while (0) #endif diff --git a/sys/dev/axgbe/xgbe-i2c.c b/sys/dev/axgbe/xgbe-i2c.c index 59c767d0efc7..5883e96ed37e 100644 --- a/sys/dev/axgbe/xgbe-i2c.c +++ b/sys/dev/axgbe/xgbe-i2c.c @@ -1,530 +1,529 @@ /* * AMD 10Gb Ethernet driver * * Copyright (c) 2020 Advanced Micro Devices, Inc. * * This file is available to you under your choice of the following two * licenses: * * License 1: GPLv2 * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or (at * your option) any later version. * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * * License 2: Modified BSD * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include #include "xgbe.h" #include "xgbe-common.h" #define XGBE_ABORT_COUNT 500 #define XGBE_DISABLE_COUNT 1000 #define XGBE_STD_SPEED 1 #define XGBE_INTR_RX_FULL BIT(IC_RAW_INTR_STAT_RX_FULL_INDEX) #define XGBE_INTR_TX_EMPTY BIT(IC_RAW_INTR_STAT_TX_EMPTY_INDEX) #define XGBE_INTR_TX_ABRT BIT(IC_RAW_INTR_STAT_TX_ABRT_INDEX) #define XGBE_INTR_STOP_DET BIT(IC_RAW_INTR_STAT_STOP_DET_INDEX) #define XGBE_DEFAULT_INT_MASK (XGBE_INTR_RX_FULL | \ XGBE_INTR_TX_EMPTY | \ XGBE_INTR_TX_ABRT | \ XGBE_INTR_STOP_DET) #define XGBE_I2C_READ BIT(8) #define XGBE_I2C_STOP BIT(9) static int xgbe_i2c_abort(struct xgbe_prv_data *pdata) { unsigned int wait = XGBE_ABORT_COUNT; /* Must be enabled to recognize the abort request */ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, 1); /* Issue the abort */ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, ABORT, 1); while (wait--) { if (!XI2C_IOREAD_BITS(pdata, IC_ENABLE, ABORT)) return (0); DELAY(500); } return (-EBUSY); } static int xgbe_i2c_set_enable(struct xgbe_prv_data *pdata, bool enable) { unsigned int wait = XGBE_DISABLE_COUNT; unsigned int mode = enable ? 1 : 0; while (wait--) { XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, mode); if (XI2C_IOREAD_BITS(pdata, IC_ENABLE_STATUS, EN) == mode) return (0); DELAY(100); } return (-EBUSY); } static int xgbe_i2c_disable(struct xgbe_prv_data *pdata) { unsigned int ret; ret = xgbe_i2c_set_enable(pdata, false); if (ret) { /* Disable failed, try an abort */ ret = xgbe_i2c_abort(pdata); if (ret) { axgbe_error("%s: i2c_abort %d\n", __func__, ret); return (ret); } /* Abort succeeded, try to disable again */ ret = xgbe_i2c_set_enable(pdata, false); } axgbe_printf(3, "%s: final i2c_disable %d\n", __func__, ret); return (ret); } static int xgbe_i2c_enable(struct xgbe_prv_data *pdata) { return (xgbe_i2c_set_enable(pdata, true)); } static void xgbe_i2c_clear_all_interrupts(struct xgbe_prv_data *pdata) { XI2C_IOREAD(pdata, IC_CLR_INTR); } static void xgbe_i2c_disable_interrupts(struct xgbe_prv_data *pdata) { XI2C_IOWRITE(pdata, IC_INTR_MASK, 0); } static void xgbe_i2c_enable_interrupts(struct xgbe_prv_data *pdata) { XI2C_IOWRITE(pdata, IC_INTR_MASK, XGBE_DEFAULT_INT_MASK); } static void xgbe_i2c_write(struct xgbe_prv_data *pdata) { struct xgbe_i2c_op_state *state = &pdata->i2c.op_state; unsigned int tx_slots, cmd; /* Configured to never receive Rx overflows, so fill up Tx fifo */ tx_slots = pdata->i2c.tx_fifo_size - XI2C_IOREAD(pdata, IC_TXFLR); axgbe_printf(3, "%s: tx_slots %d tx_len %d\n", __func__, tx_slots, state->tx_len); while (tx_slots && state->tx_len) { if (state->op->cmd == XGBE_I2C_CMD_READ) cmd = XGBE_I2C_READ; else cmd = *state->tx_buf++; axgbe_printf(3, "%s: cmd %d tx_len %d\n", __func__, cmd, state->tx_len); if (state->tx_len == 1) XI2C_SET_BITS(cmd, IC_DATA_CMD, STOP, 1); XI2C_IOWRITE(pdata, IC_DATA_CMD, cmd); tx_slots--; state->tx_len--; } /* No more Tx operations, so ignore TX_EMPTY and return */ if (!state->tx_len) XI2C_IOWRITE_BITS(pdata, IC_INTR_MASK, TX_EMPTY, 0); } static void xgbe_i2c_read(struct xgbe_prv_data *pdata) { struct xgbe_i2c_op_state *state = &pdata->i2c.op_state; unsigned int rx_slots; /* Anything to be read? */ axgbe_printf(3, "%s: op cmd %d\n", __func__, state->op->cmd); if (state->op->cmd != XGBE_I2C_CMD_READ) return; rx_slots = XI2C_IOREAD(pdata, IC_RXFLR); axgbe_printf(3, "%s: rx_slots %d rx_len %d\n", __func__, rx_slots, state->rx_len); while (rx_slots && state->rx_len) { *state->rx_buf++ = XI2C_IOREAD(pdata, IC_DATA_CMD); state->rx_len--; rx_slots--; } } static void xgbe_i2c_clear_isr_interrupts(struct xgbe_prv_data *pdata, unsigned int isr) { struct xgbe_i2c_op_state *state = &pdata->i2c.op_state; if (isr & XGBE_INTR_TX_ABRT) { state->tx_abort_source = XI2C_IOREAD(pdata, IC_TX_ABRT_SOURCE); XI2C_IOREAD(pdata, IC_CLR_TX_ABRT); } if (isr & XGBE_INTR_STOP_DET) XI2C_IOREAD(pdata, IC_CLR_STOP_DET); } static void xgbe_i2c_isr(void *data) { struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data; struct xgbe_i2c_op_state *state = &pdata->i2c.op_state; unsigned int isr; isr = XI2C_IOREAD(pdata, IC_RAW_INTR_STAT); axgbe_printf(3, "%s: isr 0x%x\n", __func__, isr); if (!isr) goto reissue_check; axgbe_printf(3, "%s: I2C interrupt status=%#010x\n", __func__, isr); xgbe_i2c_clear_isr_interrupts(pdata, isr); if (isr & XGBE_INTR_TX_ABRT) { axgbe_printf(1, "%s: I2C TX_ABRT received (%#010x) for target " "%#04x\n", __func__, state->tx_abort_source, state->op->target); xgbe_i2c_disable_interrupts(pdata); state->ret = -EIO; goto out; } /* Check for data in the Rx fifo */ xgbe_i2c_read(pdata); /* Fill up the Tx fifo next */ xgbe_i2c_write(pdata); out: /* Complete on an error or STOP condition */ axgbe_printf(3, "%s: ret %d stop %d\n", __func__, state->ret, XI2C_GET_BITS(isr, IC_RAW_INTR_STAT, STOP_DET)); if (state->ret || XI2C_GET_BITS(isr, IC_RAW_INTR_STAT, STOP_DET)) pdata->i2c_complete = true; return; reissue_check: /* Reissue interrupt if status is not clear */ if (pdata->vdata->irq_reissue_support) XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 2); } static void xgbe_i2c_set_mode(struct xgbe_prv_data *pdata) { unsigned int reg; reg = XI2C_IOREAD(pdata, IC_CON); XI2C_SET_BITS(reg, IC_CON, MASTER_MODE, 1); XI2C_SET_BITS(reg, IC_CON, SLAVE_DISABLE, 1); XI2C_SET_BITS(reg, IC_CON, RESTART_EN, 1); XI2C_SET_BITS(reg, IC_CON, SPEED, XGBE_STD_SPEED); XI2C_SET_BITS(reg, IC_CON, RX_FIFO_FULL_HOLD, 1); XI2C_IOWRITE(pdata, IC_CON, reg); } static void xgbe_i2c_get_features(struct xgbe_prv_data *pdata) { struct xgbe_i2c *i2c = &pdata->i2c; unsigned int reg; reg = XI2C_IOREAD(pdata, IC_COMP_PARAM_1); i2c->max_speed_mode = XI2C_GET_BITS(reg, IC_COMP_PARAM_1, MAX_SPEED_MODE); i2c->rx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1, RX_BUFFER_DEPTH); i2c->tx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1, TX_BUFFER_DEPTH); axgbe_printf(3, "%s: I2C features: %s=%u, %s=%u, %s=%u\n", __func__, "MAX_SPEED_MODE", i2c->max_speed_mode, "RX_BUFFER_DEPTH", i2c->rx_fifo_size, "TX_BUFFER_DEPTH", i2c->tx_fifo_size); } static void xgbe_i2c_set_target(struct xgbe_prv_data *pdata, unsigned int addr) { XI2C_IOWRITE(pdata, IC_TAR, addr); } static void xgbe_i2c_combined_isr(struct xgbe_prv_data *pdata) { xgbe_i2c_isr(pdata); } static int xgbe_i2c_xfer(struct xgbe_prv_data *pdata, struct xgbe_i2c_op *op) { struct xgbe_i2c_op_state *state = &pdata->i2c.op_state; unsigned long timeout; int ret; mtx_lock(&pdata->i2c_mutex); axgbe_printf(3, "i2c xfer started ---->>>\n"); ret = xgbe_i2c_disable(pdata); if (ret) { axgbe_error("failed to disable i2c master\n"); goto out; } xgbe_i2c_set_target(pdata, op->target); memset(state, 0, sizeof(*state)); state->op = op; state->tx_len = op->len; state->tx_buf = op->buf; state->rx_len = op->len; state->rx_buf = op->buf; xgbe_i2c_clear_all_interrupts(pdata); ret = xgbe_i2c_enable(pdata); if (ret) { axgbe_error("failed to enable i2c master\n"); goto out; } /* Enabling the interrupts will cause the TX FIFO empty interrupt to * fire and begin to process the command via the ISR. */ xgbe_i2c_enable_interrupts(pdata); timeout = ticks + (20 * hz); while (ticks < timeout) { if (!pdata->i2c_complete) { DELAY(200); continue; } axgbe_printf(1, "%s: I2C OP complete\n", __func__); break; } if ((ticks >= timeout) && !pdata->i2c_complete) { axgbe_error("%s: operation timed out\n", __func__); ret = -ETIMEDOUT; goto disable; } ret = state->ret; - axgbe_printf(3, "%s: i2c xfer ret %d abrt_source 0x%x \n", __func__, + axgbe_printf(3, "%s: i2c xfer ret %d abrt_source 0x%x\n", __func__, ret, state->tx_abort_source); if (ret) { - - axgbe_error("%s: i2c xfer ret %d abrt_source 0x%x \n", __func__, + axgbe_printf(1, "%s: i2c xfer ret %d abrt_source 0x%x\n", __func__, ret, state->tx_abort_source); if (state->tx_abort_source & IC_TX_ABRT_7B_ADDR_NOACK) ret = -ENOTCONN; else if (state->tx_abort_source & IC_TX_ABRT_ARB_LOST) ret = -EAGAIN; } axgbe_printf(3, "i2c xfer finished ---->>>\n"); disable: pdata->i2c_complete = false; xgbe_i2c_disable_interrupts(pdata); xgbe_i2c_disable(pdata); out: mtx_unlock(&pdata->i2c_mutex); return (ret); } static void xgbe_i2c_stop(struct xgbe_prv_data *pdata) { if (!pdata->i2c.started) return; axgbe_printf(3, "stopping I2C\n"); pdata->i2c.started = 0; xgbe_i2c_disable_interrupts(pdata); xgbe_i2c_disable(pdata); xgbe_i2c_clear_all_interrupts(pdata); } static int xgbe_i2c_start(struct xgbe_prv_data *pdata) { if (pdata->i2c.started) return (0); pdata->i2c.started = 1; return (0); } static int xgbe_i2c_init(struct xgbe_prv_data *pdata) { int ret; /* initialize lock for i2c */ mtx_init(&pdata->i2c_mutex, "xgbe i2c mutex lock", NULL, MTX_DEF); pdata->i2c_complete = false; xgbe_i2c_disable_interrupts(pdata); ret = xgbe_i2c_disable(pdata); if (ret) { axgbe_error("failed to disable i2c master\n"); return (ret); } xgbe_i2c_get_features(pdata); xgbe_i2c_set_mode(pdata); xgbe_i2c_clear_all_interrupts(pdata); xgbe_dump_i2c_registers(pdata); return (0); } void xgbe_init_function_ptrs_i2c(struct xgbe_i2c_if *i2c_if) { i2c_if->i2c_init = xgbe_i2c_init; i2c_if->i2c_start = xgbe_i2c_start; i2c_if->i2c_stop = xgbe_i2c_stop; i2c_if->i2c_xfer = xgbe_i2c_xfer; i2c_if->i2c_isr = xgbe_i2c_combined_isr; } diff --git a/sys/dev/axgbe/xgbe-mdio.c b/sys/dev/axgbe/xgbe-mdio.c index 16488055e2c6..a5a9fdd016bc 100644 --- a/sys/dev/axgbe/xgbe-mdio.c +++ b/sys/dev/axgbe/xgbe-mdio.c @@ -1,1632 +1,1632 @@ /* * AMD 10Gb Ethernet driver * * Copyright (c) 2014-2016,2020 Advanced Micro Devices, Inc. * * This file is available to you under your choice of the following two * licenses: * * License 1: GPLv2 * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or (at * your option) any later version. * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * * License 2: Modified BSD * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include #include "xgbe.h" #include "xgbe-common.h" static void xgbe_an_state_machine(struct xgbe_prv_data *pdata); static void xgbe_an37_clear_interrupts(struct xgbe_prv_data *pdata) { int reg; reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT); reg &= ~XGBE_AN_CL37_INT_MASK; XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg); } static void xgbe_an37_disable_interrupts(struct xgbe_prv_data *pdata) { int reg; reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL); reg &= ~XGBE_AN_CL37_INT_MASK; XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg); reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL); reg &= ~XGBE_PCS_CL37_BP; XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg); } static void xgbe_an37_enable_interrupts(struct xgbe_prv_data *pdata) { int reg; reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL); reg |= XGBE_PCS_CL37_BP; XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg); reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL); reg |= XGBE_AN_CL37_INT_MASK; XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg); } static void xgbe_an73_clear_interrupts(struct xgbe_prv_data *pdata) { XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0); } static void xgbe_an73_disable_interrupts(struct xgbe_prv_data *pdata) { XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0); } static void xgbe_an73_enable_interrupts(struct xgbe_prv_data *pdata) { XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XGBE_AN_CL73_INT_MASK); } static void xgbe_an_enable_interrupts(struct xgbe_prv_data *pdata) { switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: xgbe_an73_enable_interrupts(pdata); break; case XGBE_AN_MODE_CL37: case XGBE_AN_MODE_CL37_SGMII: xgbe_an37_enable_interrupts(pdata); break; default: break; } } static void xgbe_an_clear_interrupts_all(struct xgbe_prv_data *pdata) { xgbe_an73_clear_interrupts(pdata); xgbe_an37_clear_interrupts(pdata); } static void xgbe_kr_mode(struct xgbe_prv_data *pdata) { /* Set MAC to 10G speed */ pdata->hw_if.set_speed(pdata, SPEED_10000); /* Call PHY implementation support to complete rate change */ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_KR); } static void xgbe_kx_2500_mode(struct xgbe_prv_data *pdata) { /* Set MAC to 2.5G speed */ pdata->hw_if.set_speed(pdata, SPEED_2500); /* Call PHY implementation support to complete rate change */ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_KX_2500); } static void xgbe_kx_1000_mode(struct xgbe_prv_data *pdata) { /* Set MAC to 1G speed */ pdata->hw_if.set_speed(pdata, SPEED_1000); /* Call PHY implementation support to complete rate change */ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_KX_1000); } static void xgbe_sfi_mode(struct xgbe_prv_data *pdata) { /* If a KR re-driver is present, change to KR mode instead */ if (pdata->kr_redrv) return (xgbe_kr_mode(pdata)); /* Set MAC to 10G speed */ pdata->hw_if.set_speed(pdata, SPEED_10000); /* Call PHY implementation support to complete rate change */ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SFI); } static void xgbe_x_mode(struct xgbe_prv_data *pdata) { /* Set MAC to 1G speed */ pdata->hw_if.set_speed(pdata, SPEED_1000); /* Call PHY implementation support to complete rate change */ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_X); } static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata) { /* Set MAC to 1G speed */ pdata->hw_if.set_speed(pdata, SPEED_1000); /* Call PHY implementation support to complete rate change */ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SGMII_1000); } static void xgbe_sgmii_100_mode(struct xgbe_prv_data *pdata) { /* Set MAC to 1G speed */ pdata->hw_if.set_speed(pdata, SPEED_1000); /* Call PHY implementation support to complete rate change */ pdata->phy_if.phy_impl.set_mode(pdata, XGBE_MODE_SGMII_100); } static enum xgbe_mode xgbe_cur_mode(struct xgbe_prv_data *pdata) { return (pdata->phy_if.phy_impl.cur_mode(pdata)); } static bool xgbe_in_kr_mode(struct xgbe_prv_data *pdata) { return (xgbe_cur_mode(pdata) == XGBE_MODE_KR); } static void xgbe_change_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { switch (mode) { case XGBE_MODE_KX_1000: xgbe_kx_1000_mode(pdata); break; case XGBE_MODE_KX_2500: xgbe_kx_2500_mode(pdata); break; case XGBE_MODE_KR: xgbe_kr_mode(pdata); break; case XGBE_MODE_SGMII_100: xgbe_sgmii_100_mode(pdata); break; case XGBE_MODE_SGMII_1000: xgbe_sgmii_1000_mode(pdata); break; case XGBE_MODE_X: xgbe_x_mode(pdata); break; case XGBE_MODE_SFI: xgbe_sfi_mode(pdata); break; case XGBE_MODE_UNKNOWN: break; default: axgbe_error("invalid operation mode requested (%u)\n", mode); } } static void xgbe_switch_mode(struct xgbe_prv_data *pdata) { xgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata)); } static bool xgbe_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { if (mode == xgbe_cur_mode(pdata)) return (false); xgbe_change_mode(pdata, mode); return (true); } static bool xgbe_use_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { return (pdata->phy_if.phy_impl.use_mode(pdata, mode)); } static void xgbe_an37_set(struct xgbe_prv_data *pdata, bool enable, bool restart) { unsigned int reg; reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_CTRL1); reg &= ~MDIO_VEND2_CTRL1_AN_ENABLE; if (enable) reg |= MDIO_VEND2_CTRL1_AN_ENABLE; if (restart) reg |= MDIO_VEND2_CTRL1_AN_RESTART; XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg); } static void xgbe_an37_restart(struct xgbe_prv_data *pdata) { xgbe_an37_enable_interrupts(pdata); xgbe_an37_set(pdata, true, true); } static void xgbe_an37_disable(struct xgbe_prv_data *pdata) { xgbe_an37_set(pdata, false, false); xgbe_an37_disable_interrupts(pdata); } static void xgbe_an73_set(struct xgbe_prv_data *pdata, bool enable, bool restart) { unsigned int reg; /* Disable KR training for now */ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); reg &= ~XGBE_KR_TRAINING_ENABLE; XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); /* Update AN settings */ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1); reg &= ~MDIO_AN_CTRL1_ENABLE; if (enable) reg |= MDIO_AN_CTRL1_ENABLE; if (restart) reg |= MDIO_AN_CTRL1_RESTART; XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg); } static void xgbe_an73_restart(struct xgbe_prv_data *pdata) { xgbe_an73_enable_interrupts(pdata); xgbe_an73_set(pdata, true, true); } static void xgbe_an73_disable(struct xgbe_prv_data *pdata) { xgbe_an73_set(pdata, false, false); xgbe_an73_disable_interrupts(pdata); pdata->an_start = 0; } static void xgbe_an_restart(struct xgbe_prv_data *pdata) { if (pdata->phy_if.phy_impl.an_pre) pdata->phy_if.phy_impl.an_pre(pdata); switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: xgbe_an73_restart(pdata); break; case XGBE_AN_MODE_CL37: case XGBE_AN_MODE_CL37_SGMII: xgbe_an37_restart(pdata); break; default: break; } } static void xgbe_an_disable(struct xgbe_prv_data *pdata) { if (pdata->phy_if.phy_impl.an_post) pdata->phy_if.phy_impl.an_post(pdata); switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: xgbe_an73_disable(pdata); break; case XGBE_AN_MODE_CL37: case XGBE_AN_MODE_CL37_SGMII: xgbe_an37_disable(pdata); break; default: break; } } static void xgbe_an_disable_all(struct xgbe_prv_data *pdata) { xgbe_an73_disable(pdata); xgbe_an37_disable(pdata); } static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata, enum xgbe_rx *state) { unsigned int ad_reg, lp_reg, reg; *state = XGBE_RX_COMPLETE; /* If we're not in KR mode then we're done */ if (!xgbe_in_kr_mode(pdata)) return (XGBE_AN_PAGE_RECEIVED); /* Enable/Disable FEC */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL); reg &= ~(MDIO_PMA_10GBR_FECABLE_ABLE | MDIO_PMA_10GBR_FECABLE_ERRABLE); if ((ad_reg & 0xc000) && (lp_reg & 0xc000)) reg |= pdata->fec_ability; XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg); /* Start KR training */ if (pdata->phy_if.phy_impl.kr_training_pre) pdata->phy_if.phy_impl.kr_training_pre(pdata); /* Start KR training */ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); reg |= XGBE_KR_TRAINING_ENABLE; reg |= XGBE_KR_TRAINING_START; XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); if (pdata->phy_if.phy_impl.kr_training_post) pdata->phy_if.phy_impl.kr_training_post(pdata); return (XGBE_AN_PAGE_RECEIVED); } static enum xgbe_an xgbe_an73_tx_xnp(struct xgbe_prv_data *pdata, enum xgbe_rx *state) { uint16_t msg; *state = XGBE_RX_XNP; msg = XGBE_XNP_MCF_NULL_MESSAGE; msg |= XGBE_XNP_MP_FORMATTED; XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0); XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0); XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP, msg); return (XGBE_AN_PAGE_RECEIVED); } static enum xgbe_an xgbe_an73_rx_bpa(struct xgbe_prv_data *pdata, enum xgbe_rx *state) { unsigned int link_support; unsigned int reg, ad_reg, lp_reg; /* Read Base Ability register 2 first */ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); /* Check for a supported mode, otherwise restart in a different one */ link_support = xgbe_in_kr_mode(pdata) ? 0x80 : 0x20; if (!(reg & link_support)) return (XGBE_AN_INCOMPAT_LINK); /* Check Extended Next Page support */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA); return (((ad_reg & XGBE_XNP_NP_EXCHANGE) || (lp_reg & XGBE_XNP_NP_EXCHANGE)) ? xgbe_an73_tx_xnp(pdata, state) : xgbe_an73_tx_training(pdata, state)); } static enum xgbe_an xgbe_an73_rx_xnp(struct xgbe_prv_data *pdata, enum xgbe_rx *state) { unsigned int ad_reg, lp_reg; /* Check Extended Next Page support */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_XNP); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPX); return (((ad_reg & XGBE_XNP_NP_EXCHANGE) || (lp_reg & XGBE_XNP_NP_EXCHANGE)) ? xgbe_an73_tx_xnp(pdata, state) : xgbe_an73_tx_training(pdata, state)); } static enum xgbe_an xgbe_an73_page_received(struct xgbe_prv_data *pdata) { enum xgbe_rx *state; unsigned long an_timeout; enum xgbe_an ret; if (!pdata->an_start) { pdata->an_start = ticks; } else { an_timeout = pdata->an_start + ((uint64_t)XGBE_AN_MS_TIMEOUT * (uint64_t)hz) / 1000ull; if ((int)(ticks - an_timeout) > 0) { /* Auto-negotiation timed out, reset state */ pdata->kr_state = XGBE_RX_BPA; pdata->kx_state = XGBE_RX_BPA; pdata->an_start = ticks; axgbe_printf(2, "CL73 AN timed out, resetting state\n"); } } state = xgbe_in_kr_mode(pdata) ? &pdata->kr_state : &pdata->kx_state; switch (*state) { case XGBE_RX_BPA: ret = xgbe_an73_rx_bpa(pdata, state); break; case XGBE_RX_XNP: ret = xgbe_an73_rx_xnp(pdata, state); break; default: ret = XGBE_AN_ERROR; } return (ret); } static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata) { /* Be sure we aren't looping trying to negotiate */ if (xgbe_in_kr_mode(pdata)) { pdata->kr_state = XGBE_RX_ERROR; if (!(XGBE_ADV(&pdata->phy, 1000baseKX_Full)) && !(XGBE_ADV(&pdata->phy, 2500baseX_Full))) return (XGBE_AN_NO_LINK); if (pdata->kx_state != XGBE_RX_BPA) return (XGBE_AN_NO_LINK); } else { pdata->kx_state = XGBE_RX_ERROR; if (!(XGBE_ADV(&pdata->phy, 10000baseKR_Full))) return (XGBE_AN_NO_LINK); if (pdata->kr_state != XGBE_RX_BPA) return (XGBE_AN_NO_LINK); } xgbe_an_disable(pdata); xgbe_switch_mode(pdata); xgbe_an_restart(pdata); return (XGBE_AN_INCOMPAT_LINK); } static void xgbe_an37_isr(struct xgbe_prv_data *pdata) { unsigned int reg; /* Disable AN interrupts */ xgbe_an37_disable_interrupts(pdata); /* Save the interrupt(s) that fired */ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT); pdata->an_int = reg & XGBE_AN_CL37_INT_MASK; pdata->an_status = reg & ~XGBE_AN_CL37_INT_MASK; if (pdata->an_int) { /* Clear the interrupt(s) that fired and process them */ reg &= ~XGBE_AN_CL37_INT_MASK; XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg); xgbe_an_state_machine(pdata); } else { /* Enable AN interrupts */ xgbe_an37_enable_interrupts(pdata); /* Reissue interrupt if status is not clear */ if (pdata->vdata->irq_reissue_support) XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 3); } } static void xgbe_an73_isr(struct xgbe_prv_data *pdata) { /* Disable AN interrupts */ xgbe_an73_disable_interrupts(pdata); /* Save the interrupt(s) that fired */ pdata->an_int = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT); if (pdata->an_int) { /* Clear the interrupt(s) that fired and process them */ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int); xgbe_an_state_machine(pdata); } else { /* Enable AN interrupts */ xgbe_an73_enable_interrupts(pdata); /* Reissue interrupt if status is not clear */ if (pdata->vdata->irq_reissue_support) XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 3); } } static void xgbe_an_isr_task(unsigned long data) { struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data; axgbe_printf(2, "AN interrupt received\n"); switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: xgbe_an73_isr(pdata); break; case XGBE_AN_MODE_CL37: case XGBE_AN_MODE_CL37_SGMII: xgbe_an37_isr(pdata); break; default: break; } } static void xgbe_an_combined_isr(struct xgbe_prv_data *pdata) { xgbe_an_isr_task((unsigned long)pdata); } static const char * xgbe_state_as_string(enum xgbe_an state) { switch (state) { case XGBE_AN_READY: return ("Ready"); case XGBE_AN_PAGE_RECEIVED: return ("Page-Received"); case XGBE_AN_INCOMPAT_LINK: return ("Incompatible-Link"); case XGBE_AN_COMPLETE: return ("Complete"); case XGBE_AN_NO_LINK: return ("No-Link"); case XGBE_AN_ERROR: return ("Error"); default: return ("Undefined"); } } static void xgbe_an37_state_machine(struct xgbe_prv_data *pdata) { enum xgbe_an cur_state = pdata->an_state; if (!pdata->an_int) return; if (pdata->an_int & XGBE_AN_CL37_INT_CMPLT) { pdata->an_state = XGBE_AN_COMPLETE; pdata->an_int &= ~XGBE_AN_CL37_INT_CMPLT; - - /* If SGMII is enabled, check the link status */ - if ((pdata->an_mode == XGBE_AN_MODE_CL37_SGMII) && - !(pdata->an_status & XGBE_SGMII_AN_LINK_STATUS)) - pdata->an_state = XGBE_AN_NO_LINK; } axgbe_printf(2, "%s: CL37 AN %s\n", __func__, xgbe_state_as_string(pdata->an_state)); cur_state = pdata->an_state; switch (pdata->an_state) { case XGBE_AN_READY: break; case XGBE_AN_COMPLETE: axgbe_printf(2, "Auto negotiation successful\n"); break; case XGBE_AN_NO_LINK: break; default: pdata->an_state = XGBE_AN_ERROR; } if (pdata->an_state == XGBE_AN_ERROR) { axgbe_printf(2, "error during auto-negotiation, state=%u\n", cur_state); pdata->an_int = 0; xgbe_an37_clear_interrupts(pdata); } if (pdata->an_state >= XGBE_AN_COMPLETE) { pdata->an_result = pdata->an_state; pdata->an_state = XGBE_AN_READY; if (pdata->phy_if.phy_impl.an_post) pdata->phy_if.phy_impl.an_post(pdata); axgbe_printf(2, "CL37 AN result: %s\n", xgbe_state_as_string(pdata->an_result)); } axgbe_printf(2, "%s: an_state %d an_int %d an_mode %d an_status %d\n", __func__, pdata->an_state, pdata->an_int, pdata->an_mode, pdata->an_status); xgbe_an37_enable_interrupts(pdata); } static void xgbe_an73_state_machine(struct xgbe_prv_data *pdata) { enum xgbe_an cur_state = pdata->an_state; if (!pdata->an_int) goto out; next_int: if (pdata->an_int & XGBE_AN_CL73_PG_RCV) { pdata->an_state = XGBE_AN_PAGE_RECEIVED; pdata->an_int &= ~XGBE_AN_CL73_PG_RCV; } else if (pdata->an_int & XGBE_AN_CL73_INC_LINK) { pdata->an_state = XGBE_AN_INCOMPAT_LINK; pdata->an_int &= ~XGBE_AN_CL73_INC_LINK; } else if (pdata->an_int & XGBE_AN_CL73_INT_CMPLT) { pdata->an_state = XGBE_AN_COMPLETE; pdata->an_int &= ~XGBE_AN_CL73_INT_CMPLT; } else { pdata->an_state = XGBE_AN_ERROR; } again: axgbe_printf(2, "CL73 AN %s\n", xgbe_state_as_string(pdata->an_state)); cur_state = pdata->an_state; switch (pdata->an_state) { case XGBE_AN_READY: pdata->an_supported = 0; break; case XGBE_AN_PAGE_RECEIVED: pdata->an_state = xgbe_an73_page_received(pdata); pdata->an_supported++; break; case XGBE_AN_INCOMPAT_LINK: pdata->an_supported = 0; pdata->parallel_detect = 0; pdata->an_state = xgbe_an73_incompat_link(pdata); break; case XGBE_AN_COMPLETE: pdata->parallel_detect = pdata->an_supported ? 0 : 1; axgbe_printf(2, "%s successful\n", pdata->an_supported ? "Auto negotiation" : "Parallel detection"); break; case XGBE_AN_NO_LINK: break; default: pdata->an_state = XGBE_AN_ERROR; } if (pdata->an_state == XGBE_AN_NO_LINK) { pdata->an_int = 0; xgbe_an73_clear_interrupts(pdata); } else if (pdata->an_state == XGBE_AN_ERROR) { axgbe_printf(2, "error during auto-negotiation, state=%u\n", cur_state); pdata->an_int = 0; xgbe_an73_clear_interrupts(pdata); } if (pdata->an_state >= XGBE_AN_COMPLETE) { pdata->an_result = pdata->an_state; pdata->an_state = XGBE_AN_READY; pdata->kr_state = XGBE_RX_BPA; pdata->kx_state = XGBE_RX_BPA; pdata->an_start = 0; if (pdata->phy_if.phy_impl.an_post) pdata->phy_if.phy_impl.an_post(pdata); axgbe_printf(2, "CL73 AN result: %s\n", xgbe_state_as_string(pdata->an_result)); } if (cur_state != pdata->an_state) goto again; if (pdata->an_int) goto next_int; out: /* Enable AN interrupts on the way out */ xgbe_an73_enable_interrupts(pdata); } static void xgbe_an_state_machine(struct xgbe_prv_data *pdata) { sx_xlock(&pdata->an_mutex); switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: xgbe_an73_state_machine(pdata); break; case XGBE_AN_MODE_CL37: case XGBE_AN_MODE_CL37_SGMII: xgbe_an37_state_machine(pdata); break; default: break; } /* Reissue interrupt if status is not clear */ if (pdata->vdata->irq_reissue_support) XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 3); sx_xunlock(&pdata->an_mutex); } static void xgbe_an37_init(struct xgbe_prv_data *pdata) { struct xgbe_phy local_phy; unsigned int reg; pdata->phy_if.phy_impl.an_advertising(pdata, &local_phy); axgbe_printf(2, "%s: advertising 0x%x\n", __func__, local_phy.advertising); /* Set up Advertisement register */ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE); if (XGBE_ADV(&local_phy, Pause)) reg |= 0x100; else reg &= ~0x100; if (XGBE_ADV(&local_phy, Asym_Pause)) reg |= 0x80; else reg &= ~0x80; /* Full duplex, but not half */ reg |= XGBE_AN_CL37_FD_MASK; reg &= ~XGBE_AN_CL37_HD_MASK; axgbe_printf(2, "%s: Writing reg: 0x%x\n", __func__, reg); XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE, reg); /* Set up the Control register */ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL); axgbe_printf(2, "%s: AN_ADVERTISE reg 0x%x an_mode %d\n", __func__, reg, pdata->an_mode); reg &= ~XGBE_AN_CL37_TX_CONFIG_MASK; reg &= ~XGBE_AN_CL37_PCS_MODE_MASK; switch (pdata->an_mode) { case XGBE_AN_MODE_CL37: reg |= XGBE_AN_CL37_PCS_MODE_BASEX; break; case XGBE_AN_MODE_CL37_SGMII: reg |= XGBE_AN_CL37_PCS_MODE_SGMII; break; default: break; } reg |= XGBE_AN_CL37_MII_CTRL_8BIT; axgbe_printf(2, "%s: Writing reg: 0x%x\n", __func__, reg); XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg); axgbe_printf(2, "CL37 AN (%s) initialized\n", (pdata->an_mode == XGBE_AN_MODE_CL37) ? "BaseX" : "SGMII"); } static void xgbe_an73_init(struct xgbe_prv_data *pdata) { /* * This local_phy is needed because phy-v2 alters the * advertising flag variable. so phy-v1 an_advertising is just copying */ struct xgbe_phy local_phy; unsigned int reg; pdata->phy_if.phy_impl.an_advertising(pdata, &local_phy); /* Set up Advertisement register 3 first */ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); if (XGBE_ADV(&local_phy, 10000baseR_FEC)) reg |= 0xc000; else reg &= ~0xc000; XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, reg); /* Set up Advertisement register 2 next */ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); if (XGBE_ADV(&local_phy, 10000baseKR_Full)) reg |= 0x80; else reg &= ~0x80; if (XGBE_ADV(&local_phy, 1000baseKX_Full) || XGBE_ADV(&local_phy, 2500baseX_Full)) reg |= 0x20; else reg &= ~0x20; XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, reg); /* Set up Advertisement register 1 last */ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); if (XGBE_ADV(&local_phy, Pause)) reg |= 0x400; else reg &= ~0x400; if (XGBE_ADV(&local_phy, Asym_Pause)) reg |= 0x800; else reg &= ~0x800; /* We don't intend to perform XNP */ reg &= ~XGBE_XNP_NP_EXCHANGE; XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg); axgbe_printf(2, "CL73 AN initialized\n"); } static void xgbe_an_init(struct xgbe_prv_data *pdata) { /* Set up advertisement registers based on current settings */ pdata->an_mode = pdata->phy_if.phy_impl.an_mode(pdata); axgbe_printf(2, "%s: setting up an_mode %d\n", __func__, pdata->an_mode); switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: xgbe_an73_init(pdata); break; case XGBE_AN_MODE_CL37: case XGBE_AN_MODE_CL37_SGMII: xgbe_an37_init(pdata); break; default: break; } } static const char * xgbe_phy_fc_string(struct xgbe_prv_data *pdata) { if (pdata->tx_pause && pdata->rx_pause) return ("rx/tx"); else if (pdata->rx_pause) return ("rx"); else if (pdata->tx_pause) return ("tx"); else return ("off"); } static const char * xgbe_phy_speed_string(int speed) { switch (speed) { case SPEED_100: return ("100Mbps"); case SPEED_1000: return ("1Gbps"); case SPEED_2500: return ("2.5Gbps"); case SPEED_10000: return ("10Gbps"); case SPEED_UNKNOWN: return ("Unknown"); default: return ("Unsupported"); } } static void xgbe_phy_print_status(struct xgbe_prv_data *pdata) { if (pdata->phy.link) axgbe_printf(0, "Link is UP - %s/%s - flow control %s\n", xgbe_phy_speed_string(pdata->phy.speed), pdata->phy.duplex == DUPLEX_FULL ? "Full" : "Half", xgbe_phy_fc_string(pdata)); else axgbe_printf(0, "Link is DOWN\n"); } static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata) { int new_state = 0; axgbe_printf(1, "link %d/%d tx %d/%d rx %d/%d speed %d/%d autoneg %d/%d\n", pdata->phy_link, pdata->phy.link, pdata->tx_pause, pdata->phy.tx_pause, pdata->rx_pause, pdata->phy.rx_pause, pdata->phy_speed, pdata->phy.speed, pdata->pause_autoneg, pdata->phy.pause_autoneg); if (pdata->phy.link) { /* Flow control support */ pdata->pause_autoneg = pdata->phy.pause_autoneg; if (pdata->tx_pause != pdata->phy.tx_pause) { new_state = 1; axgbe_printf(2, "tx pause %d/%d\n", pdata->tx_pause, pdata->phy.tx_pause); pdata->tx_pause = pdata->phy.tx_pause; pdata->hw_if.config_tx_flow_control(pdata); } if (pdata->rx_pause != pdata->phy.rx_pause) { new_state = 1; axgbe_printf(2, "rx pause %d/%d\n", pdata->rx_pause, pdata->phy.rx_pause); pdata->rx_pause = pdata->phy.rx_pause; pdata->hw_if.config_rx_flow_control(pdata); } /* Speed support */ if (pdata->phy_speed != pdata->phy.speed) { new_state = 1; pdata->phy_speed = pdata->phy.speed; } if (pdata->phy_link != pdata->phy.link) { new_state = 1; pdata->phy_link = pdata->phy.link; } } else if (pdata->phy_link) { new_state = 1; pdata->phy_link = 0; pdata->phy_speed = SPEED_UNKNOWN; } axgbe_printf(2, "phy_link %d Link %d new_state %d\n", pdata->phy_link, pdata->phy.link, new_state); if (new_state) xgbe_phy_print_status(pdata); } static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed) { return (pdata->phy_if.phy_impl.valid_speed(pdata, speed)); } static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata) { enum xgbe_mode mode; axgbe_printf(2, "fixed PHY configuration\n"); /* Disable auto-negotiation */ xgbe_an_disable(pdata); /* Set specified mode for specified speed */ mode = pdata->phy_if.phy_impl.get_mode(pdata, pdata->phy.speed); switch (mode) { case XGBE_MODE_KX_1000: case XGBE_MODE_KX_2500: case XGBE_MODE_KR: case XGBE_MODE_SGMII_100: case XGBE_MODE_SGMII_1000: case XGBE_MODE_X: case XGBE_MODE_SFI: break; case XGBE_MODE_UNKNOWN: default: return (-EINVAL); } /* Validate duplex mode */ if (pdata->phy.duplex != DUPLEX_FULL) return (-EINVAL); xgbe_set_mode(pdata, mode); return (0); } static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata, bool set_mode) { int ret; unsigned int reg = 0; sx_xlock(&pdata->an_mutex); set_bit(XGBE_LINK_INIT, &pdata->dev_state); pdata->link_check = ticks; ret = pdata->phy_if.phy_impl.an_config(pdata); if (ret) { axgbe_error("%s: an_config fail %d\n", __func__, ret); goto out; } if (pdata->phy.autoneg != AUTONEG_ENABLE) { ret = xgbe_phy_config_fixed(pdata); if (ret || !pdata->kr_redrv) { if (ret) axgbe_error("%s: fix conf fail %d\n", __func__, ret); goto out; } axgbe_printf(2, "AN redriver support\n"); } else axgbe_printf(2, "AN PHY configuration\n"); /* Disable auto-negotiation interrupt */ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0); reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK); axgbe_printf(2, "%s: set_mode %d AN int reg value 0x%x\n", __func__, set_mode, reg); /* Clear any auto-negotitation interrupts */ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0); /* Start auto-negotiation in a supported mode */ if (set_mode) { /* Start auto-negotiation in a supported mode */ if (xgbe_use_mode(pdata, XGBE_MODE_KR)) { xgbe_set_mode(pdata, XGBE_MODE_KR); } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) { xgbe_set_mode(pdata, XGBE_MODE_KX_2500); } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) { xgbe_set_mode(pdata, XGBE_MODE_KX_1000); } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) { xgbe_set_mode(pdata, XGBE_MODE_SFI); } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) { xgbe_set_mode(pdata, XGBE_MODE_X); } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) { xgbe_set_mode(pdata, XGBE_MODE_SGMII_1000); } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) { xgbe_set_mode(pdata, XGBE_MODE_SGMII_100); } else { XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07); ret = -EINVAL; goto out; } } /* Disable and stop any in progress auto-negotiation */ xgbe_an_disable_all(pdata); /* Clear any auto-negotitation interrupts */ xgbe_an_clear_interrupts_all(pdata); pdata->an_result = XGBE_AN_READY; pdata->an_state = XGBE_AN_READY; pdata->kr_state = XGBE_RX_BPA; pdata->kx_state = XGBE_RX_BPA; /* Re-enable auto-negotiation interrupt */ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07); reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK); /* Set up advertisement registers based on current settings */ xgbe_an_init(pdata); /* Enable and start auto-negotiation */ xgbe_an_restart(pdata); out: if (ret) { axgbe_printf(0, "%s: set_mode %d AN int reg value 0x%x ret value %d\n", __func__, set_mode, reg, ret); set_bit(XGBE_LINK_ERR, &pdata->dev_state); } else clear_bit(XGBE_LINK_ERR, &pdata->dev_state); sx_unlock(&pdata->an_mutex); return (ret); } static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) { return (__xgbe_phy_config_aneg(pdata, true)); } static int xgbe_phy_reconfig_aneg(struct xgbe_prv_data *pdata) { return (__xgbe_phy_config_aneg(pdata, false)); } static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata) { return (pdata->an_result == XGBE_AN_COMPLETE); } static void xgbe_check_link_timeout(struct xgbe_prv_data *pdata) { unsigned long link_timeout; link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * hz); if ((int)(ticks - link_timeout) > 0) { axgbe_printf(2, "AN link timeout\n"); xgbe_phy_config_aneg(pdata); } } static enum xgbe_mode xgbe_phy_status_aneg(struct xgbe_prv_data *pdata) { return (pdata->phy_if.phy_impl.an_outcome(pdata)); } static void xgbe_phy_status_result(struct xgbe_prv_data *pdata) { enum xgbe_mode mode; XGBE_ZERO_LP_ADV(&pdata->phy); if ((pdata->phy.autoneg != AUTONEG_ENABLE) || pdata->parallel_detect) mode = xgbe_cur_mode(pdata); else mode = xgbe_phy_status_aneg(pdata); axgbe_printf(3, "%s: xgbe mode %d\n", __func__, mode); switch (mode) { case XGBE_MODE_SGMII_100: pdata->phy.speed = SPEED_100; break; case XGBE_MODE_X: case XGBE_MODE_KX_1000: case XGBE_MODE_SGMII_1000: pdata->phy.speed = SPEED_1000; break; case XGBE_MODE_KX_2500: pdata->phy.speed = SPEED_2500; break; case XGBE_MODE_KR: case XGBE_MODE_SFI: pdata->phy.speed = SPEED_10000; break; case XGBE_MODE_UNKNOWN: default: axgbe_printf(1, "%s: unknown mode\n", __func__); pdata->phy.speed = SPEED_UNKNOWN; } pdata->phy.duplex = DUPLEX_FULL; axgbe_printf(2, "%s: speed %d duplex %d\n", __func__, pdata->phy.speed, pdata->phy.duplex); if (xgbe_set_mode(pdata, mode) && pdata->an_again) xgbe_phy_reconfig_aneg(pdata); } static void xgbe_phy_status(struct xgbe_prv_data *pdata) { bool link_aneg; int an_restart; if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) { axgbe_error("%s: LINK_ERR\n", __func__); pdata->phy.link = 0; + clear_bit(XGBE_LINK_ERR, &pdata->dev_state); goto adjust_link; } link_aneg = (pdata->phy.autoneg == AUTONEG_ENABLE); axgbe_printf(3, "link_aneg - %d\n", link_aneg); /* Get the link status. Link status is latched low, so read * once to clear and then read again to get current state */ pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata, &an_restart); axgbe_printf(1, "link_status returned Link:%d an_restart:%d aneg:%d\n", pdata->phy.link, an_restart, link_aneg); if (an_restart) { xgbe_phy_config_aneg(pdata); return; } if (pdata->phy.link) { axgbe_printf(2, "Link Active\n"); if (link_aneg && !xgbe_phy_aneg_done(pdata)) { axgbe_printf(1, "phy_link set check timeout\n"); xgbe_check_link_timeout(pdata); return; } axgbe_printf(2, "%s: Link write phy_status result\n", __func__); xgbe_phy_status_result(pdata); if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) clear_bit(XGBE_LINK_INIT, &pdata->dev_state); } else { axgbe_printf(2, "Link Deactive\n"); if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) { axgbe_printf(1, "phy_link not set check timeout\n"); xgbe_check_link_timeout(pdata); if (link_aneg) { axgbe_printf(2, "link_aneg case\n"); return; } } xgbe_phy_status_result(pdata); } adjust_link: axgbe_printf(2, "%s: Link %d\n", __func__, pdata->phy.link); xgbe_phy_adjust_link(pdata); } static void xgbe_phy_stop(struct xgbe_prv_data *pdata) { axgbe_printf(2, "stopping PHY\n"); if (!pdata->phy_started) return; /* Indicate the PHY is down */ pdata->phy_started = 0; /* Disable auto-negotiation */ xgbe_an_disable_all(pdata); pdata->phy_if.phy_impl.stop(pdata); pdata->phy.link = 0; xgbe_phy_adjust_link(pdata); } static int xgbe_phy_start(struct xgbe_prv_data *pdata) { - int ret; + int ret = 0; + + if (pdata->phy_started) + return (ret); DBGPR("-->xgbe_phy_start\n"); ret = pdata->phy_if.phy_impl.start(pdata); if (ret) { axgbe_error("%s: impl start ret %d\n", __func__, ret); return (ret); } /* Set initial mode - call the mode setting routines * directly to insure we are properly configured */ if (xgbe_use_mode(pdata, XGBE_MODE_KR)) { axgbe_printf(2, "%s: KR\n", __func__); xgbe_kr_mode(pdata); } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) { axgbe_printf(2, "%s: KX 2500\n", __func__); xgbe_kx_2500_mode(pdata); } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) { axgbe_printf(2, "%s: KX 1000\n", __func__); xgbe_kx_1000_mode(pdata); } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) { axgbe_printf(2, "%s: SFI\n", __func__); xgbe_sfi_mode(pdata); } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) { axgbe_printf(2, "%s: X\n", __func__); xgbe_x_mode(pdata); } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) { axgbe_printf(2, "%s: SGMII 1000\n", __func__); xgbe_sgmii_1000_mode(pdata); } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) { axgbe_printf(2, "%s: SGMII 100\n", __func__); xgbe_sgmii_100_mode(pdata); } else { axgbe_error("%s: invalid mode\n", __func__); ret = -EINVAL; goto err_stop; } /* Indicate the PHY is up and running */ pdata->phy_started = 1; /* Set up advertisement registers based on current settings */ xgbe_an_init(pdata); /* Enable auto-negotiation interrupts */ xgbe_an_enable_interrupts(pdata); ret = xgbe_phy_config_aneg(pdata); if (ret) axgbe_error("%s: phy_config_aneg %d\n", __func__, ret); return (ret); err_stop: pdata->phy_if.phy_impl.stop(pdata); return (ret); } static int xgbe_phy_reset(struct xgbe_prv_data *pdata) { int ret; ret = pdata->phy_if.phy_impl.reset(pdata); if (ret) { axgbe_error("%s: impl phy reset %d\n", __func__, ret); return (ret); } /* Disable auto-negotiation for now */ xgbe_an_disable_all(pdata); /* Clear auto-negotiation interrupts */ xgbe_an_clear_interrupts_all(pdata); return (0); } static int xgbe_phy_best_advertised_speed(struct xgbe_prv_data *pdata) { if (XGBE_ADV(&pdata->phy, 10000baseKR_Full)) return (SPEED_10000); else if (XGBE_ADV(&pdata->phy, 10000baseT_Full)) return (SPEED_10000); else if (XGBE_ADV(&pdata->phy, 2500baseX_Full)) return (SPEED_2500); else if (XGBE_ADV(&pdata->phy, 2500baseT_Full)) return (SPEED_2500); else if (XGBE_ADV(&pdata->phy, 1000baseKX_Full)) return (SPEED_1000); else if (XGBE_ADV(&pdata->phy, 1000baseT_Full)) return (SPEED_1000); else if (XGBE_ADV(&pdata->phy, 100baseT_Full)) return (SPEED_100); return (SPEED_UNKNOWN); } static void xgbe_phy_exit(struct xgbe_prv_data *pdata) { pdata->phy_if.phy_impl.exit(pdata); } static int xgbe_phy_init(struct xgbe_prv_data *pdata) { int ret = 0; DBGPR("-->xgbe_phy_init\n"); sx_init(&pdata->an_mutex, "axgbe AN lock"); pdata->mdio_mmd = MDIO_MMD_PCS; /* Initialize supported features */ pdata->fec_ability = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECABLE); pdata->fec_ability &= (MDIO_PMA_10GBR_FECABLE_ABLE | MDIO_PMA_10GBR_FECABLE_ERRABLE); /* Setup the phy (including supported features) */ ret = pdata->phy_if.phy_impl.init(pdata); if (ret) return (ret); /* Copy supported link modes to advertising link modes */ XGBE_LM_COPY(&pdata->phy, advertising, &pdata->phy, supported); pdata->phy.address = 0; if (XGBE_ADV(&pdata->phy, Autoneg)) { pdata->phy.autoneg = AUTONEG_ENABLE; pdata->phy.speed = SPEED_UNKNOWN; pdata->phy.duplex = DUPLEX_UNKNOWN; } else { pdata->phy.autoneg = AUTONEG_DISABLE; pdata->phy.speed = xgbe_phy_best_advertised_speed(pdata); pdata->phy.duplex = DUPLEX_FULL; } + pdata->phy_started = 0; pdata->phy.link = 0; pdata->phy.pause_autoneg = pdata->pause_autoneg; pdata->phy.tx_pause = pdata->tx_pause; pdata->phy.rx_pause = pdata->rx_pause; /* Fix up Flow Control advertising */ XGBE_CLR_ADV(&pdata->phy, Pause); XGBE_CLR_ADV(&pdata->phy, Asym_Pause); if (pdata->rx_pause) { XGBE_SET_ADV(&pdata->phy, Pause); XGBE_SET_ADV(&pdata->phy, Asym_Pause); } if (pdata->tx_pause) { if (XGBE_ADV(&pdata->phy, Asym_Pause)) XGBE_CLR_ADV(&pdata->phy, Asym_Pause); else XGBE_SET_ADV(&pdata->phy, Asym_Pause); } return (0); } void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if) { phy_if->phy_init = xgbe_phy_init; phy_if->phy_exit = xgbe_phy_exit; phy_if->phy_reset = xgbe_phy_reset; phy_if->phy_start = xgbe_phy_start; phy_if->phy_stop = xgbe_phy_stop; phy_if->phy_status = xgbe_phy_status; phy_if->phy_config_aneg = xgbe_phy_config_aneg; phy_if->phy_valid_speed = xgbe_phy_valid_speed; phy_if->an_isr = xgbe_an_combined_isr; } diff --git a/sys/dev/axgbe/xgbe-phy-v2.c b/sys/dev/axgbe/xgbe-phy-v2.c index 9a057c34c6c9..d8c372cac642 100644 --- a/sys/dev/axgbe/xgbe-phy-v2.c +++ b/sys/dev/axgbe/xgbe-phy-v2.c @@ -1,3783 +1,3896 @@ /* * AMD 10Gb Ethernet driver * * Copyright (c) 2020 Advanced Micro Devices, Inc. * * This file is available to you under your choice of the following two * licenses: * * License 1: GPLv2 * * This file is free software; you may copy, redistribute and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or (at * your option) any later version. * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * * License 2: Modified BSD * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Advanced Micro Devices, Inc. nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * This file incorporates work covered by the following copyright and * permission notice: * The Synopsys DWC ETHER XGMAC Software Driver and documentation * (hereinafter "Software") is an unsupported proprietary work of Synopsys, * Inc. unless otherwise expressly agreed to in writing between Synopsys * and you. * * The Software IS NOT an item of Licensed Software or Licensed Product * under any End User Software License Agreement or Agreement for Licensed * Product with Synopsys or any supplement thereto. Permission is hereby * granted, free of charge, to any person obtaining a copy of this software * annotated with this license and the Software, to deal in the Software * without restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished * to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include #include "xgbe.h" #include "xgbe-common.h" struct mtx xgbe_phy_comm_lock; #define XGBE_PHY_PORT_SPEED_100 BIT(0) #define XGBE_PHY_PORT_SPEED_1000 BIT(1) #define XGBE_PHY_PORT_SPEED_2500 BIT(2) #define XGBE_PHY_PORT_SPEED_10000 BIT(3) #define XGBE_MUTEX_RELEASE 0x80000000 #define XGBE_SFP_DIRECT 7 #define GPIO_MASK_WIDTH 4 /* I2C target addresses */ #define XGBE_SFP_SERIAL_ID_ADDRESS 0x50 #define XGBE_SFP_DIAG_INFO_ADDRESS 0x51 #define XGBE_SFP_PHY_ADDRESS 0x56 #define XGBE_GPIO_ADDRESS_PCA9555 0x20 /* SFP sideband signal indicators */ #define XGBE_GPIO_NO_TX_FAULT BIT(0) #define XGBE_GPIO_NO_RATE_SELECT BIT(1) #define XGBE_GPIO_NO_MOD_ABSENT BIT(2) #define XGBE_GPIO_NO_RX_LOS BIT(3) /* Rate-change complete wait/retry count */ #define XGBE_RATECHANGE_COUNT 500 /* CDR delay values for KR support (in usec) */ #define XGBE_CDR_DELAY_INIT 10000 #define XGBE_CDR_DELAY_INC 10000 #define XGBE_CDR_DELAY_MAX 100000 /* RRC frequency during link status check */ #define XGBE_RRC_FREQUENCY 10 +/* SFP port max PHY probe retries */ +#define XGBE_SFP_PHY_RETRY_MAX 5 + enum xgbe_port_mode { XGBE_PORT_MODE_RSVD = 0, XGBE_PORT_MODE_BACKPLANE, XGBE_PORT_MODE_BACKPLANE_2500, XGBE_PORT_MODE_1000BASE_T, XGBE_PORT_MODE_1000BASE_X, XGBE_PORT_MODE_NBASE_T, XGBE_PORT_MODE_10GBASE_T, XGBE_PORT_MODE_10GBASE_R, XGBE_PORT_MODE_SFP, XGBE_PORT_MODE_MAX, }; enum xgbe_conn_type { XGBE_CONN_TYPE_NONE = 0, XGBE_CONN_TYPE_SFP, XGBE_CONN_TYPE_MDIO, XGBE_CONN_TYPE_RSVD1, XGBE_CONN_TYPE_BACKPLANE, XGBE_CONN_TYPE_MAX, }; /* SFP/SFP+ related definitions */ enum xgbe_sfp_comm { XGBE_SFP_COMM_DIRECT = 0, XGBE_SFP_COMM_PCA9545, }; enum xgbe_sfp_cable { XGBE_SFP_CABLE_UNKNOWN = 0, XGBE_SFP_CABLE_ACTIVE, XGBE_SFP_CABLE_PASSIVE, }; enum xgbe_sfp_base { XGBE_SFP_BASE_UNKNOWN = 0, + XGBE_SFP_BASE_PX, + XGBE_SFP_BASE_BX10, + XGBE_SFP_BASE_100_FX, + XGBE_SFP_BASE_100_LX10, + XGBE_SFP_BASE_100_BX, XGBE_SFP_BASE_1000_T, XGBE_SFP_BASE_1000_SX, XGBE_SFP_BASE_1000_LX, XGBE_SFP_BASE_1000_CX, + XGBE_SFP_BASE_1000_BX, XGBE_SFP_BASE_10000_SR, XGBE_SFP_BASE_10000_LR, XGBE_SFP_BASE_10000_LRM, XGBE_SFP_BASE_10000_ER, XGBE_SFP_BASE_10000_CR, }; enum xgbe_sfp_speed { XGBE_SFP_SPEED_UNKNOWN = 0, + XGBE_SFP_SPEED_100, XGBE_SFP_SPEED_100_1000, XGBE_SFP_SPEED_1000, XGBE_SFP_SPEED_10000, + XGBE_SFP_SPEED_25000, }; /* SFP Serial ID Base ID values relative to an offset of 0 */ #define XGBE_SFP_BASE_ID 0 #define XGBE_SFP_ID_SFP 0x03 #define XGBE_SFP_BASE_EXT_ID 1 #define XGBE_SFP_EXT_ID_SFP 0x04 #define XGBE_SFP_BASE_CV 2 #define XGBE_SFP_BASE_CV_CP 0x21 #define XGBE_SFP_BASE_10GBE_CC 3 #define XGBE_SFP_BASE_10GBE_CC_SR BIT(4) #define XGBE_SFP_BASE_10GBE_CC_LR BIT(5) #define XGBE_SFP_BASE_10GBE_CC_LRM BIT(6) #define XGBE_SFP_BASE_10GBE_CC_ER BIT(7) #define XGBE_SFP_BASE_1GBE_CC 6 #define XGBE_SFP_BASE_1GBE_CC_SX BIT(0) #define XGBE_SFP_BASE_1GBE_CC_LX BIT(1) #define XGBE_SFP_BASE_1GBE_CC_CX BIT(2) #define XGBE_SFP_BASE_1GBE_CC_T BIT(3) +#define XGBE_SFP_BASE_100M_CC_LX10 BIT(4) +#define XGBE_SFP_BASE_100M_CC_FX BIT(5) +#define XGBE_SFP_BASE_CC_BX10 BIT(6) +#define XGBE_SFP_BASE_CC_PX BIT(7) #define XGBE_SFP_BASE_CABLE 8 #define XGBE_SFP_BASE_CABLE_PASSIVE BIT(2) #define XGBE_SFP_BASE_CABLE_ACTIVE BIT(3) #define XGBE_SFP_BASE_BR 12 +#define XGBE_SFP_BASE_BR_100M_MIN 0x1 +#define XGBE_SFP_BASE_BR_100M_MAX 0x2 #define XGBE_SFP_BASE_BR_1GBE_MIN 0x0a #define XGBE_SFP_BASE_BR_1GBE_MAX 0x0d #define XGBE_SFP_BASE_BR_10GBE_MIN 0x64 #define XGBE_SFP_BASE_BR_10GBE_MAX 0x68 +#define XGBE_SFP_BASE_BR_25GBE 0xFF + +/* Single mode, length of fiber in units of km */ +#define XGBE_SFP_BASE_SM_LEN_KM 14 +#define XGBE_SFP_BASE_SM_LEN_KM_MIN 0x0A + +/* Single mode, length of fiber in units of 100m */ +#define XGBE_SFP_BASE_SM_LEN_100M 15 +#define XGBE_SFP_BASE_SM_LEN_100M_MIN 0x64 #define XGBE_SFP_BASE_CU_CABLE_LEN 18 #define XGBE_SFP_BASE_VENDOR_NAME 20 #define XGBE_SFP_BASE_VENDOR_NAME_LEN 16 #define XGBE_SFP_BASE_VENDOR_PN 40 #define XGBE_SFP_BASE_VENDOR_PN_LEN 16 #define XGBE_SFP_BASE_VENDOR_REV 56 #define XGBE_SFP_BASE_VENDOR_REV_LEN 4 +/* + * Optical specification compliance - denotes wavelength + * for optical tranceivers + */ +#define XGBE_SFP_BASE_OSC 60 +#define XGBE_SFP_BASE_OSC_LEN 2 +#define XGBE_SFP_BASE_OSC_1310 0x051E + #define XGBE_SFP_BASE_CC 63 /* SFP Serial ID Extended ID values relative to an offset of 64 */ #define XGBE_SFP_BASE_VENDOR_SN 4 #define XGBE_SFP_BASE_VENDOR_SN_LEN 16 #define XGBE_SFP_EXTD_OPT1 1 #define XGBE_SFP_EXTD_OPT1_RX_LOS BIT(1) #define XGBE_SFP_EXTD_OPT1_TX_FAULT BIT(3) #define XGBE_SFP_EXTD_DIAG 28 #define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2) #define XGBE_SFP_EXTD_SFF_8472 30 #define XGBE_SFP_EXTD_CC 31 struct xgbe_sfp_eeprom { uint8_t base[64]; uint8_t extd[32]; uint8_t vendor[32]; }; #define XGBE_SFP_DIAGS_SUPPORTED(_x) \ ((_x)->extd[XGBE_SFP_EXTD_SFF_8472] && \ !((_x)->extd[XGBE_SFP_EXTD_DIAG] & XGBE_SFP_EXTD_DIAG_ADDR_CHANGE)) #define XGBE_SFP_EEPROM_BASE_LEN 256 #define XGBE_SFP_EEPROM_DIAG_LEN 256 #define XGBE_SFP_EEPROM_MAX (XGBE_SFP_EEPROM_BASE_LEN + \ XGBE_SFP_EEPROM_DIAG_LEN) #define XGBE_BEL_FUSE_VENDOR "BEL-FUSE " #define XGBE_BEL_FUSE_PARTNO "1GBT-SFP06 " struct xgbe_sfp_ascii { union { char vendor[XGBE_SFP_BASE_VENDOR_NAME_LEN + 1]; char partno[XGBE_SFP_BASE_VENDOR_PN_LEN + 1]; char rev[XGBE_SFP_BASE_VENDOR_REV_LEN + 1]; char serno[XGBE_SFP_BASE_VENDOR_SN_LEN + 1]; } u; }; /* MDIO PHY reset types */ enum xgbe_mdio_reset { XGBE_MDIO_RESET_NONE = 0, XGBE_MDIO_RESET_I2C_GPIO, XGBE_MDIO_RESET_INT_GPIO, XGBE_MDIO_RESET_MAX, }; /* Re-driver related definitions */ enum xgbe_phy_redrv_if { XGBE_PHY_REDRV_IF_MDIO = 0, XGBE_PHY_REDRV_IF_I2C, XGBE_PHY_REDRV_IF_MAX, }; enum xgbe_phy_redrv_model { XGBE_PHY_REDRV_MODEL_4223 = 0, XGBE_PHY_REDRV_MODEL_4227, XGBE_PHY_REDRV_MODEL_MAX, }; enum xgbe_phy_redrv_mode { XGBE_PHY_REDRV_MODE_CX = 5, XGBE_PHY_REDRV_MODE_SR = 9, }; #define XGBE_PHY_REDRV_MODE_REG 0x12b0 /* PHY related configuration information */ struct xgbe_phy_data { enum xgbe_port_mode port_mode; unsigned int port_id; unsigned int port_speeds; enum xgbe_conn_type conn_type; enum xgbe_mode cur_mode; enum xgbe_mode start_mode; unsigned int rrc_count; unsigned int mdio_addr; /* SFP Support */ enum xgbe_sfp_comm sfp_comm; unsigned int sfp_mux_address; unsigned int sfp_mux_channel; unsigned int sfp_gpio_address; unsigned int sfp_gpio_mask; unsigned int sfp_gpio_inputs; unsigned int sfp_gpio_rx_los; unsigned int sfp_gpio_tx_fault; unsigned int sfp_gpio_mod_absent; unsigned int sfp_gpio_rate_select; unsigned int sfp_rx_los; unsigned int sfp_tx_fault; unsigned int sfp_mod_absent; unsigned int sfp_changed; unsigned int sfp_phy_avail; unsigned int sfp_cable_len; enum xgbe_sfp_base sfp_base; enum xgbe_sfp_cable sfp_cable; enum xgbe_sfp_speed sfp_speed; struct xgbe_sfp_eeprom sfp_eeprom; /* External PHY support */ enum xgbe_mdio_mode phydev_mode; uint32_t phy_id; int phydev; enum xgbe_mdio_reset mdio_reset; unsigned int mdio_reset_addr; unsigned int mdio_reset_gpio; + int sfp_phy_retries; /* Re-driver support */ unsigned int redrv; unsigned int redrv_if; unsigned int redrv_addr; unsigned int redrv_lane; unsigned int redrv_model; /* KR AN support */ unsigned int phy_cdr_notrack; unsigned int phy_cdr_delay; uint8_t port_sfp_inputs; }; static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata); static int xgbe_phy_reset(struct xgbe_prv_data *pdata); +static int axgbe_ifmedia_upd(struct ifnet *ifp); +static void axgbe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); static int xgbe_phy_i2c_xfer(struct xgbe_prv_data *pdata, struct xgbe_i2c_op *i2c_op) { return (pdata->i2c_if.i2c_xfer(pdata, i2c_op)); } static int xgbe_phy_redrv_write(struct xgbe_prv_data *pdata, unsigned int reg, unsigned int val) { struct xgbe_phy_data *phy_data = pdata->phy_data; struct xgbe_i2c_op i2c_op; __be16 *redrv_val; uint8_t redrv_data[5], csum; unsigned int i, retry; int ret; /* High byte of register contains read/write indicator */ redrv_data[0] = ((reg >> 8) & 0xff) << 1; redrv_data[1] = reg & 0xff; redrv_val = (__be16 *)&redrv_data[2]; *redrv_val = cpu_to_be16(val); /* Calculate 1 byte checksum */ csum = 0; for (i = 0; i < 4; i++) { csum += redrv_data[i]; if (redrv_data[i] > csum) csum++; } redrv_data[4] = ~csum; retry = 1; again1: i2c_op.cmd = XGBE_I2C_CMD_WRITE; i2c_op.target = phy_data->redrv_addr; i2c_op.len = sizeof(redrv_data); i2c_op.buf = redrv_data; ret = xgbe_phy_i2c_xfer(pdata, &i2c_op); if (ret) { if ((ret == -EAGAIN) && retry--) goto again1; return (ret); } retry = 1; again2: i2c_op.cmd = XGBE_I2C_CMD_READ; i2c_op.target = phy_data->redrv_addr; i2c_op.len = 1; i2c_op.buf = redrv_data; ret = xgbe_phy_i2c_xfer(pdata, &i2c_op); if (ret) { if ((ret == -EAGAIN) && retry--) goto again2; return (ret); } if (redrv_data[0] != 0xff) { axgbe_error("Redriver write checksum error\n"); ret = -EIO; } return (ret); } static int xgbe_phy_i2c_write(struct xgbe_prv_data *pdata, unsigned int target, void *val, unsigned int val_len) { struct xgbe_i2c_op i2c_op; int retry, ret; retry = 1; again: /* Write the specfied register */ i2c_op.cmd = XGBE_I2C_CMD_WRITE; i2c_op.target = target; i2c_op.len = val_len; i2c_op.buf = val; ret = xgbe_phy_i2c_xfer(pdata, &i2c_op); if ((ret == -EAGAIN) && retry--) goto again; return (ret); } static int xgbe_phy_i2c_read(struct xgbe_prv_data *pdata, unsigned int target, void *reg, unsigned int reg_len, void *val, unsigned int val_len) { struct xgbe_i2c_op i2c_op; int retry, ret; axgbe_printf(3, "%s: target 0x%x reg_len %d val_len %d\n", __func__, target, reg_len, val_len); retry = 1; again1: /* Set the specified register to read */ i2c_op.cmd = XGBE_I2C_CMD_WRITE; i2c_op.target = target; i2c_op.len = reg_len; i2c_op.buf = reg; ret = xgbe_phy_i2c_xfer(pdata, &i2c_op); axgbe_printf(3, "%s: ret1 %d retry %d\n", __func__, ret, retry); if (ret) { if ((ret == -EAGAIN) && retry--) goto again1; return (ret); } retry = 1; again2: /* Read the specfied register */ i2c_op.cmd = XGBE_I2C_CMD_READ; i2c_op.target = target; i2c_op.len = val_len; i2c_op.buf = val; ret = xgbe_phy_i2c_xfer(pdata, &i2c_op); axgbe_printf(3, "%s: ret2 %d retry %d\n", __func__, ret, retry); if ((ret == -EAGAIN) && retry--) goto again2; return (ret); } static int xgbe_phy_sfp_put_mux(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; struct xgbe_i2c_op i2c_op; uint8_t mux_channel; if (phy_data->sfp_comm == XGBE_SFP_COMM_DIRECT) return (0); /* Select no mux channels */ mux_channel = 0; i2c_op.cmd = XGBE_I2C_CMD_WRITE; i2c_op.target = phy_data->sfp_mux_address; i2c_op.len = sizeof(mux_channel); i2c_op.buf = &mux_channel; return (xgbe_phy_i2c_xfer(pdata, &i2c_op)); } static int xgbe_phy_sfp_get_mux(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; struct xgbe_i2c_op i2c_op; uint8_t mux_channel; if (phy_data->sfp_comm == XGBE_SFP_COMM_DIRECT) return (0); /* Select desired mux channel */ mux_channel = 1 << phy_data->sfp_mux_channel; i2c_op.cmd = XGBE_I2C_CMD_WRITE; i2c_op.target = phy_data->sfp_mux_address; i2c_op.len = sizeof(mux_channel); i2c_op.buf = &mux_channel; return (xgbe_phy_i2c_xfer(pdata, &i2c_op)); } static void xgbe_phy_put_comm_ownership(struct xgbe_prv_data *pdata) { mtx_unlock(&xgbe_phy_comm_lock); } static int xgbe_phy_get_comm_ownership(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; unsigned long timeout; unsigned int mutex_id; /* The I2C and MDIO/GPIO bus is multiplexed between multiple devices, * the driver needs to take the software mutex and then the hardware * mutexes before being able to use the busses. */ mtx_lock(&xgbe_phy_comm_lock); /* Clear the mutexes */ XP_IOWRITE(pdata, XP_I2C_MUTEX, XGBE_MUTEX_RELEASE); XP_IOWRITE(pdata, XP_MDIO_MUTEX, XGBE_MUTEX_RELEASE); /* Mutex formats are the same for I2C and MDIO/GPIO */ mutex_id = 0; XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ID, phy_data->port_id); XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ACTIVE, 1); timeout = ticks + (5 * hz); while (ticks < timeout) { /* Must be all zeroes in order to obtain the mutex */ if (XP_IOREAD(pdata, XP_I2C_MUTEX) || XP_IOREAD(pdata, XP_MDIO_MUTEX)) { DELAY(200); continue; } /* Obtain the mutex */ XP_IOWRITE(pdata, XP_I2C_MUTEX, mutex_id); XP_IOWRITE(pdata, XP_MDIO_MUTEX, mutex_id); return (0); } mtx_unlock(&xgbe_phy_comm_lock); axgbe_error("unable to obtain hardware mutexes\n"); return (-ETIMEDOUT); } static int xgbe_phy_mdio_mii_write(struct xgbe_prv_data *pdata, int addr, int reg, uint16_t val) { struct xgbe_phy_data *phy_data = pdata->phy_data; if (reg & MII_ADDR_C45) { if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45) return (-ENOTSUP); } else { if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22) return (-ENOTSUP); } return (pdata->hw_if.write_ext_mii_regs(pdata, addr, reg, val)); } static int xgbe_phy_i2c_mii_write(struct xgbe_prv_data *pdata, int reg, uint16_t val) { __be16 *mii_val; uint8_t mii_data[3]; int ret; ret = xgbe_phy_sfp_get_mux(pdata); if (ret) return (ret); mii_data[0] = reg & 0xff; mii_val = (__be16 *)&mii_data[1]; *mii_val = cpu_to_be16(val); ret = xgbe_phy_i2c_write(pdata, XGBE_SFP_PHY_ADDRESS, mii_data, sizeof(mii_data)); xgbe_phy_sfp_put_mux(pdata); return (ret); } int xgbe_phy_mii_write(struct xgbe_prv_data *pdata, int addr, int reg, uint16_t val) { struct xgbe_phy_data *phy_data = pdata->phy_data; int ret; axgbe_printf(3, "%s: addr %d reg %d val %#x\n", __func__, addr, reg, val); ret = xgbe_phy_get_comm_ownership(pdata); if (ret) return (ret); if (phy_data->conn_type == XGBE_CONN_TYPE_SFP) ret = xgbe_phy_i2c_mii_write(pdata, reg, val); else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO) ret = xgbe_phy_mdio_mii_write(pdata, addr, reg, val); else ret = -ENOTSUP; xgbe_phy_put_comm_ownership(pdata); return (ret); } static int xgbe_phy_mdio_mii_read(struct xgbe_prv_data *pdata, int addr, int reg) { struct xgbe_phy_data *phy_data = pdata->phy_data; if (reg & MII_ADDR_C45) { if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL45) return (-ENOTSUP); } else { if (phy_data->phydev_mode != XGBE_MDIO_MODE_CL22) return (-ENOTSUP); } return (pdata->hw_if.read_ext_mii_regs(pdata, addr, reg)); } static int xgbe_phy_i2c_mii_read(struct xgbe_prv_data *pdata, int reg) { __be16 mii_val; uint8_t mii_reg; int ret; ret = xgbe_phy_sfp_get_mux(pdata); if (ret) return (ret); mii_reg = reg; ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_PHY_ADDRESS, &mii_reg, sizeof(mii_reg), &mii_val, sizeof(mii_val)); if (!ret) ret = be16_to_cpu(mii_val); xgbe_phy_sfp_put_mux(pdata); return (ret); } int xgbe_phy_mii_read(struct xgbe_prv_data *pdata, int addr, int reg) { struct xgbe_phy_data *phy_data = pdata->phy_data; int ret; axgbe_printf(3, "%s: addr %d reg %d\n", __func__, addr, reg); ret = xgbe_phy_get_comm_ownership(pdata); if (ret) return (ret); if (phy_data->conn_type == XGBE_CONN_TYPE_SFP) ret = xgbe_phy_i2c_mii_read(pdata, reg); else if (phy_data->conn_type & XGBE_CONN_TYPE_MDIO) ret = xgbe_phy_mdio_mii_read(pdata, addr, reg); else ret = -ENOTSUP; xgbe_phy_put_comm_ownership(pdata); return (ret); } static void xgbe_phy_sfp_phy_settings(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; if (!phy_data->sfp_mod_absent && !phy_data->sfp_changed) return; XGBE_ZERO_SUP(&pdata->phy); if (phy_data->sfp_mod_absent) { pdata->phy.speed = SPEED_UNKNOWN; pdata->phy.duplex = DUPLEX_UNKNOWN; pdata->phy.autoneg = AUTONEG_ENABLE; pdata->phy.pause_autoneg = AUTONEG_ENABLE; XGBE_SET_SUP(&pdata->phy, Autoneg); XGBE_SET_SUP(&pdata->phy, Pause); XGBE_SET_SUP(&pdata->phy, Asym_Pause); XGBE_SET_SUP(&pdata->phy, TP); XGBE_SET_SUP(&pdata->phy, FIBRE); XGBE_LM_COPY(&pdata->phy, advertising, &pdata->phy, supported); return; } switch (phy_data->sfp_base) { + case XGBE_SFP_BASE_100_FX: + case XGBE_SFP_BASE_100_LX10: + case XGBE_SFP_BASE_100_BX: + pdata->phy.speed = SPEED_100; + pdata->phy.duplex = DUPLEX_FULL; + pdata->phy.autoneg = AUTONEG_DISABLE; + pdata->phy.pause_autoneg = AUTONEG_DISABLE; + break; case XGBE_SFP_BASE_1000_T: case XGBE_SFP_BASE_1000_SX: case XGBE_SFP_BASE_1000_LX: case XGBE_SFP_BASE_1000_CX: pdata->phy.speed = SPEED_UNKNOWN; pdata->phy.duplex = DUPLEX_UNKNOWN; pdata->phy.autoneg = AUTONEG_ENABLE; pdata->phy.pause_autoneg = AUTONEG_ENABLE; XGBE_SET_SUP(&pdata->phy, Autoneg); XGBE_SET_SUP(&pdata->phy, Pause); XGBE_SET_SUP(&pdata->phy, Asym_Pause); if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T) { if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) XGBE_SET_SUP(&pdata->phy, 100baseT_Full); if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) XGBE_SET_SUP(&pdata->phy, 1000baseT_Full); } else { if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) XGBE_SET_SUP(&pdata->phy, 1000baseX_Full); } break; + case XGBE_SFP_BASE_1000_BX: + case XGBE_SFP_BASE_PX: + pdata->phy.speed = SPEED_1000; + pdata->phy.duplex = DUPLEX_FULL; + pdata->phy.autoneg = AUTONEG_DISABLE; + pdata->phy.pause_autoneg = AUTONEG_DISABLE; + break; case XGBE_SFP_BASE_10000_SR: case XGBE_SFP_BASE_10000_LR: case XGBE_SFP_BASE_10000_LRM: case XGBE_SFP_BASE_10000_ER: case XGBE_SFP_BASE_10000_CR: pdata->phy.speed = SPEED_10000; pdata->phy.duplex = DUPLEX_FULL; pdata->phy.autoneg = AUTONEG_DISABLE; pdata->phy.pause_autoneg = AUTONEG_DISABLE; if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) { switch (phy_data->sfp_base) { case XGBE_SFP_BASE_10000_SR: XGBE_SET_SUP(&pdata->phy, 10000baseSR_Full); break; case XGBE_SFP_BASE_10000_LR: XGBE_SET_SUP(&pdata->phy, 10000baseLR_Full); break; case XGBE_SFP_BASE_10000_LRM: XGBE_SET_SUP(&pdata->phy, 10000baseLRM_Full); break; case XGBE_SFP_BASE_10000_ER: XGBE_SET_SUP(&pdata->phy, 10000baseER_Full); break; case XGBE_SFP_BASE_10000_CR: XGBE_SET_SUP(&pdata->phy, 10000baseCR_Full); break; default: break; } } break; default: pdata->phy.speed = SPEED_UNKNOWN; pdata->phy.duplex = DUPLEX_UNKNOWN; pdata->phy.autoneg = AUTONEG_DISABLE; pdata->phy.pause_autoneg = AUTONEG_DISABLE; break; } switch (phy_data->sfp_base) { case XGBE_SFP_BASE_1000_T: case XGBE_SFP_BASE_1000_CX: case XGBE_SFP_BASE_10000_CR: XGBE_SET_SUP(&pdata->phy, TP); break; default: XGBE_SET_SUP(&pdata->phy, FIBRE); break; } XGBE_LM_COPY(&pdata->phy, advertising, &pdata->phy, supported); axgbe_printf(1, "%s: link speed %d spf_base 0x%x pause_autoneg %d " "advert 0x%x support 0x%x\n", __func__, pdata->phy.speed, phy_data->sfp_base, pdata->phy.pause_autoneg, pdata->phy.advertising, pdata->phy.supported); } static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom, enum xgbe_sfp_speed sfp_speed) { uint8_t *sfp_base, min, max; sfp_base = sfp_eeprom->base; switch (sfp_speed) { + case XGBE_SFP_SPEED_100: + min = XGBE_SFP_BASE_BR_100M_MIN; + max = XGBE_SFP_BASE_BR_100M_MAX; + break; case XGBE_SFP_SPEED_1000: min = XGBE_SFP_BASE_BR_1GBE_MIN; max = XGBE_SFP_BASE_BR_1GBE_MAX; break; case XGBE_SFP_SPEED_10000: min = XGBE_SFP_BASE_BR_10GBE_MIN; max = XGBE_SFP_BASE_BR_10GBE_MAX; break; + case XGBE_SFP_SPEED_25000: + min = XGBE_SFP_BASE_BR_25GBE; + max = XGBE_SFP_BASE_BR_25GBE; + break; default: return (false); } return ((sfp_base[XGBE_SFP_BASE_BR] >= min) && (sfp_base[XGBE_SFP_BASE_BR] <= max)); } static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; if (phy_data->phydev) phy_data->phydev = 0; + + if (pdata->axgbe_miibus != NULL) { + device_delete_child(pdata->dev, pdata->axgbe_miibus); + pdata->axgbe_miibus = NULL; + } } static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; unsigned int phy_id = phy_data->phy_id; if (phy_data->port_mode != XGBE_PORT_MODE_SFP) return (false); if ((phy_id & 0xfffffff0) != 0x01ff0cc0) return (false); /* Enable Base-T AN */ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x16, 0x0001); xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x00, 0x9140); xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x16, 0x0000); /* Enable SGMII at 100Base-T/1000Base-T Full Duplex */ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1b, 0x9084); xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x09, 0x0e00); xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x00, 0x8140); xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x04, 0x0d01); xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x00, 0x9140); axgbe_printf(3, "Finisar PHY quirk in place\n"); return (true); } static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom; unsigned int phy_id = phy_data->phy_id; int reg; if (phy_data->port_mode != XGBE_PORT_MODE_SFP) return (false); if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME], XGBE_BEL_FUSE_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN)) return (false); /* For Bel-Fuse, use the extra AN flag */ pdata->an_again = 1; if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN], XGBE_BEL_FUSE_PARTNO, XGBE_SFP_BASE_VENDOR_PN_LEN)) return (false); if ((phy_id & 0xfffffff0) != 0x03625d10) return (false); /* Disable RGMII mode */ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x18, 0x7007); reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x18); xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x18, reg & ~0x0080); /* Enable fiber register bank */ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1c, 0x7c00); reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x1c); reg &= 0x03ff; reg &= ~0x0001; xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1c, 0x8000 | 0x7c00 | reg | 0x0001); /* Power down SerDes */ reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x00); xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x00, reg | 0x00800); /* Configure SGMII-to-Copper mode */ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1c, 0x7c00); reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x1c); reg &= 0x03ff; reg &= ~0x0006; xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1c, 0x8000 | 0x7c00 | reg | 0x0004); /* Power up SerDes */ reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x00); xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x00, reg & ~0x00800); /* Enable copper register bank */ xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1c, 0x7c00); reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x1c); reg &= 0x03ff; reg &= ~0x0001; xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x1c, 0x8000 | 0x7c00 | reg); /* Power up SerDes */ reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x00); xgbe_phy_mii_write(pdata, phy_data->mdio_addr, 0x00, reg & ~0x00800); axgbe_printf(3, "BelFuse PHY quirk in place\n"); return (true); } static void xgbe_phy_external_phy_quirks(struct xgbe_prv_data *pdata) { if (xgbe_phy_belfuse_phy_quirks(pdata)) return; if (xgbe_phy_finisar_phy_quirks(pdata)) return; } static int xgbe_get_phy_id(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; uint32_t oui, model, phy_id1, phy_id2; int phy_reg; phy_reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x02); if (phy_reg < 0) return (-EIO); phy_id1 = (phy_reg & 0xffff); phy_data->phy_id = (phy_reg & 0xffff) << 16; phy_reg = xgbe_phy_mii_read(pdata, phy_data->mdio_addr, 0x03); if (phy_reg < 0) return (-EIO); phy_id2 = (phy_reg & 0xffff); phy_data->phy_id |= (phy_reg & 0xffff); oui = MII_OUI(phy_id1, phy_id2); model = MII_MODEL(phy_id2); axgbe_printf(2, "%s: phy_id1: 0x%x phy_id2: 0x%x oui: %#x model %#x\n", __func__, phy_id1, phy_id2, oui, model); return (0); } -static int -xgbe_phy_start_aneg(struct xgbe_prv_data *pdata) -{ - uint16_t ctl = 0; - int changed = 0; - int ret; - - if (AUTONEG_ENABLE != pdata->phy.autoneg) { - if (SPEED_1000 == pdata->phy.speed) - ctl |= BMCR_SPEED1; - else if (SPEED_100 == pdata->phy.speed) - ctl |= BMCR_SPEED100; - - if (DUPLEX_FULL == pdata->phy.duplex) - ctl |= BMCR_FDX; - - ret = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMCR); - if (ret) - return (ret); - - ret = xgbe_phy_mii_write(pdata, pdata->mdio_addr, MII_BMCR, - (ret & ~(~(BMCR_LOOP | BMCR_ISO | BMCR_PDOWN))) | ctl); - } - - ctl = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMCR); - if (ctl < 0) - return (ctl); - - if (!(ctl & BMCR_AUTOEN) || (ctl & BMCR_ISO)) - changed = 1; - - if (changed > 0) { - ret = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMCR); - if (ret) - return (ret); - - ret = xgbe_phy_mii_write(pdata, pdata->mdio_addr, MII_BMCR, - (ret & ~(BMCR_ISO)) | (BMCR_AUTOEN | BMCR_STARTNEG)); - } - - return (0); -} - static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; int ret; axgbe_printf(2, "%s: phydev %d phydev_mode %d sfp_phy_avail %d phy_id " "0x%08x\n", __func__, phy_data->phydev, phy_data->phydev_mode, phy_data->sfp_phy_avail, phy_data->phy_id); /* If we already have a PHY, just return */ if (phy_data->phydev) { axgbe_printf(3, "%s: phy present already\n", __func__); return (0); } /* Clear the extra AN flag */ pdata->an_again = 0; /* Check for the use of an external PHY */ if (phy_data->phydev_mode == XGBE_MDIO_MODE_NONE) { axgbe_printf(3, "%s: phydev_mode %d\n", __func__, phy_data->phydev_mode); return (0); } /* For SFP, only use an external PHY if available */ if ((phy_data->port_mode == XGBE_PORT_MODE_SFP) && !phy_data->sfp_phy_avail) { axgbe_printf(3, "%s: port_mode %d avail %d\n", __func__, phy_data->port_mode, phy_data->sfp_phy_avail); return (0); } /* Set the proper MDIO mode for the PHY */ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr, phy_data->phydev_mode); if (ret) { axgbe_error("mdio port/clause not compatible (%u/%u) ret %d\n", phy_data->mdio_addr, phy_data->phydev_mode, ret); return (ret); } ret = xgbe_get_phy_id(pdata); if (ret) return (ret); axgbe_printf(2, "Get phy_id 0x%08x\n", phy_data->phy_id); phy_data->phydev = 1; xgbe_phy_external_phy_quirks(pdata); - xgbe_phy_start_aneg(pdata); return (0); } static void xgbe_phy_sfp_external_phy(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; int ret; axgbe_printf(3, "%s: sfp_changed: 0x%x\n", __func__, phy_data->sfp_changed); - if (!phy_data->sfp_changed) + if (!phy_data->sfp_phy_retries && !phy_data->sfp_changed) return; phy_data->sfp_phy_avail = 0; if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T) return; /* Check access to the PHY by reading CTRL1 */ ret = xgbe_phy_i2c_mii_read(pdata, MII_BMCR); if (ret < 0) { - axgbe_error("%s: ext phy fail %d\n", __func__, ret); + phy_data->sfp_phy_retries++; + if (phy_data->sfp_phy_retries >= XGBE_SFP_PHY_RETRY_MAX) + phy_data->sfp_phy_retries = 0; + axgbe_printf(1, "%s: ext phy fail %d. retrying.\n", __func__, ret); return; } /* Successfully accessed the PHY */ phy_data->sfp_phy_avail = 1; axgbe_printf(3, "Successfully accessed External PHY\n"); + + /* Attach external PHY to the miibus */ + ret = mii_attach(pdata->dev, &pdata->axgbe_miibus, pdata->netdev, + (ifm_change_cb_t)axgbe_ifmedia_upd, + (ifm_stat_cb_t)axgbe_ifmedia_sts, BMSR_DEFCAPMASK, + pdata->mdio_addr, MII_OFFSET_ANY, MIIF_FORCEANEG); + + if (ret) { + axgbe_error("mii attach failed with err=(%d)\n", ret); + } } static bool xgbe_phy_check_sfp_rx_los(struct xgbe_phy_data *phy_data) { uint8_t *sfp_extd = phy_data->sfp_eeprom.extd; if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_RX_LOS)) return (false); if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) return (false); if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_rx_los)) return (true); return (false); } static bool xgbe_phy_check_sfp_tx_fault(struct xgbe_phy_data *phy_data) { uint8_t *sfp_extd = phy_data->sfp_eeprom.extd; if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_TX_FAULT)) return (false); if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) return (false); if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_tx_fault)) return (true); return (false); } static bool xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data) { if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) return (false); if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_mod_absent)) return (true); return (false); } static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom; uint8_t *sfp_base; + uint16_t wavelen = 0; sfp_base = sfp_eeprom->base; if (sfp_base[XGBE_SFP_BASE_ID] != XGBE_SFP_ID_SFP) { axgbe_error("base id %d\n", sfp_base[XGBE_SFP_BASE_ID]); return; } if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP) { axgbe_error("base id %d\n", sfp_base[XGBE_SFP_BASE_EXT_ID]); return; } /* Update transceiver signals (eeprom extd/options) */ phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data); phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data); /* Assume ACTIVE cable unless told it is PASSIVE */ if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) { phy_data->sfp_cable = XGBE_SFP_CABLE_PASSIVE; phy_data->sfp_cable_len = sfp_base[XGBE_SFP_BASE_CU_CABLE_LEN]; } else phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE; + wavelen = (sfp_base[XGBE_SFP_BASE_OSC] << 8) | sfp_base[XGBE_SFP_BASE_OSC + 1]; + /* * Determine the type of SFP. Certain 10G SFP+ modules read as * 1000BASE-CX. To prevent 10G DAC cables to be recognized as * 1G, we first check if it is a DAC and the bitrate is 10G. + * If it's greater than 10G, we assume the DAC is capable + * of multiple bitrates, set the MAC to 10G and hope for the best. */ if (((sfp_base[XGBE_SFP_BASE_CV] & XGBE_SFP_BASE_CV_CP) || - (phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE)) && - xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000)) + (phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE)) && + (xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000) || + xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_25000))) phy_data->sfp_base = XGBE_SFP_BASE_10000_CR; else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR) phy_data->sfp_base = XGBE_SFP_BASE_10000_SR; else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LR) phy_data->sfp_base = XGBE_SFP_BASE_10000_LR; else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LRM) phy_data->sfp_base = XGBE_SFP_BASE_10000_LRM; else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_ER) phy_data->sfp_base = XGBE_SFP_BASE_10000_ER; else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_SX) phy_data->sfp_base = XGBE_SFP_BASE_1000_SX; else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_LX) phy_data->sfp_base = XGBE_SFP_BASE_1000_LX; else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_CX) phy_data->sfp_base = XGBE_SFP_BASE_1000_CX; else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_T) phy_data->sfp_base = XGBE_SFP_BASE_1000_T; + else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_100M_CC_LX10) + phy_data->sfp_base = XGBE_SFP_BASE_100_LX10; + else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_100M_CC_FX) + phy_data->sfp_base = XGBE_SFP_BASE_100_FX; + else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_CC_BX10) { + /* BX10 can be either 100 or 1000 */ + if (xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_100)) { + phy_data->sfp_base = XGBE_SFP_BASE_100_BX; + } else { + /* default to 1000 */ + phy_data->sfp_base = XGBE_SFP_BASE_1000_BX; + } + } else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_CC_PX) + phy_data->sfp_base = XGBE_SFP_BASE_PX; + else if (xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_1000) + && (sfp_base[XGBE_SFP_BASE_SM_LEN_KM] >= XGBE_SFP_BASE_SM_LEN_KM_MIN + || sfp_base[XGBE_SFP_BASE_SM_LEN_100M] >= XGBE_SFP_BASE_SM_LEN_100M_MIN) + && wavelen >= XGBE_SFP_BASE_OSC_1310) + phy_data->sfp_base = XGBE_SFP_BASE_1000_BX; + else if (xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_100) + && (sfp_base[XGBE_SFP_BASE_SM_LEN_KM] >= XGBE_SFP_BASE_SM_LEN_KM_MIN + || sfp_base[XGBE_SFP_BASE_SM_LEN_100M] >= XGBE_SFP_BASE_SM_LEN_100M_MIN) + && wavelen >= XGBE_SFP_BASE_OSC_1310) + phy_data->sfp_base = XGBE_SFP_BASE_100_BX; switch (phy_data->sfp_base) { + case XGBE_SFP_BASE_100_FX: + case XGBE_SFP_BASE_100_LX10: + case XGBE_SFP_BASE_100_BX: + phy_data->sfp_speed = XGBE_SFP_SPEED_100; case XGBE_SFP_BASE_1000_T: phy_data->sfp_speed = XGBE_SFP_SPEED_100_1000; break; + case XGBE_SFP_BASE_PX: case XGBE_SFP_BASE_1000_SX: case XGBE_SFP_BASE_1000_LX: case XGBE_SFP_BASE_1000_CX: + case XGBE_SFP_BASE_1000_BX: phy_data->sfp_speed = XGBE_SFP_SPEED_1000; break; case XGBE_SFP_BASE_10000_SR: case XGBE_SFP_BASE_10000_LR: case XGBE_SFP_BASE_10000_LRM: case XGBE_SFP_BASE_10000_ER: case XGBE_SFP_BASE_10000_CR: phy_data->sfp_speed = XGBE_SFP_SPEED_10000; break; default: break; } axgbe_printf(3, "%s: sfp_base: 0x%x sfp_speed: 0x%x sfp_cable: 0x%x " "rx_los 0x%x tx_fault 0x%x\n", __func__, phy_data->sfp_base, phy_data->sfp_speed, phy_data->sfp_cable, phy_data->sfp_rx_los, phy_data->sfp_tx_fault); } static void xgbe_phy_sfp_eeprom_info(struct xgbe_prv_data *pdata, struct xgbe_sfp_eeprom *sfp_eeprom) { struct xgbe_sfp_ascii sfp_ascii; char *sfp_data = (char *)&sfp_ascii; - axgbe_printf(3, "SFP detected:\n"); + axgbe_printf(0, "SFP detected:\n"); memcpy(sfp_data, &sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME], XGBE_SFP_BASE_VENDOR_NAME_LEN); sfp_data[XGBE_SFP_BASE_VENDOR_NAME_LEN] = '\0'; - axgbe_printf(3, " vendor: %s\n", + axgbe_printf(0, " vendor: %s\n", sfp_data); memcpy(sfp_data, &sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN], XGBE_SFP_BASE_VENDOR_PN_LEN); sfp_data[XGBE_SFP_BASE_VENDOR_PN_LEN] = '\0'; - axgbe_printf(3, " part number: %s\n", + axgbe_printf(0, " part number: %s\n", sfp_data); memcpy(sfp_data, &sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_REV], XGBE_SFP_BASE_VENDOR_REV_LEN); sfp_data[XGBE_SFP_BASE_VENDOR_REV_LEN] = '\0'; - axgbe_printf(3, " revision level: %s\n", + axgbe_printf(0, " revision level: %s\n", sfp_data); memcpy(sfp_data, &sfp_eeprom->extd[XGBE_SFP_BASE_VENDOR_SN], XGBE_SFP_BASE_VENDOR_SN_LEN); sfp_data[XGBE_SFP_BASE_VENDOR_SN_LEN] = '\0'; - axgbe_printf(3, " serial number: %s\n", + axgbe_printf(0, " serial number: %s\n", sfp_data); } static bool xgbe_phy_sfp_verify_eeprom(uint8_t cc_in, uint8_t *buf, unsigned int len) { uint8_t cc; for (cc = 0; len; buf++, len--) cc += *buf; return ((cc == cc_in) ? true : false); } static void dump_sfp_eeprom(struct xgbe_prv_data *pdata, uint8_t *sfp_base) { axgbe_printf(3, "sfp_base[XGBE_SFP_BASE_ID] : 0x%04x\n", sfp_base[XGBE_SFP_BASE_ID]); axgbe_printf(3, "sfp_base[XGBE_SFP_BASE_EXT_ID] : 0x%04x\n", sfp_base[XGBE_SFP_BASE_EXT_ID]); axgbe_printf(3, "sfp_base[XGBE_SFP_BASE_CABLE] : 0x%04x\n", sfp_base[XGBE_SFP_BASE_CABLE]); } static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; struct xgbe_sfp_eeprom sfp_eeprom, *eeprom; uint8_t eeprom_addr, *base; int ret; ret = xgbe_phy_sfp_get_mux(pdata); if (ret) { axgbe_error("I2C error setting SFP MUX\n"); return (ret); } /* Read the SFP serial ID eeprom */ eeprom_addr = 0; ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_SERIAL_ID_ADDRESS, &eeprom_addr, sizeof(eeprom_addr), &sfp_eeprom, sizeof(sfp_eeprom)); - eeprom = &sfp_eeprom; - base = eeprom->base; - dump_sfp_eeprom(pdata, base); if (ret) { axgbe_error("I2C error reading SFP EEPROM\n"); goto put; } + eeprom = &sfp_eeprom; + base = eeprom->base; + dump_sfp_eeprom(pdata, base); + /* Validate the contents read */ if (!xgbe_phy_sfp_verify_eeprom(sfp_eeprom.base[XGBE_SFP_BASE_CC], sfp_eeprom.base, sizeof(sfp_eeprom.base) - 1)) { axgbe_error("verify eeprom base failed\n"); ret = -EINVAL; goto put; } if (!xgbe_phy_sfp_verify_eeprom(sfp_eeprom.extd[XGBE_SFP_EXTD_CC], sfp_eeprom.extd, sizeof(sfp_eeprom.extd) - 1)) { axgbe_error("verify eeprom extd failed\n"); ret = -EINVAL; goto put; } /* Check for an added or changed SFP */ if (memcmp(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom))) { phy_data->sfp_changed = 1; xgbe_phy_sfp_eeprom_info(pdata, &sfp_eeprom); memcpy(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom)); xgbe_phy_free_phy_device(pdata); } else phy_data->sfp_changed = 0; put: xgbe_phy_sfp_put_mux(pdata); return (ret); } static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; uint8_t gpio_reg, gpio_ports[2]; int ret, prev_sfp_inputs = phy_data->port_sfp_inputs; int shift = GPIO_MASK_WIDTH * (3 - phy_data->port_id); /* Read the input port registers */ axgbe_printf(3, "%s: befor sfp_mod:%d sfp_gpio_address:0x%x\n", __func__, phy_data->sfp_mod_absent, phy_data->sfp_gpio_address); gpio_reg = 0; ret = xgbe_phy_i2c_read(pdata, phy_data->sfp_gpio_address, &gpio_reg, sizeof(gpio_reg), gpio_ports, sizeof(gpio_ports)); if (ret) { axgbe_error("%s: I2C error reading SFP GPIO addr:0x%x\n", __func__, phy_data->sfp_gpio_address); return; } phy_data->sfp_gpio_inputs = (gpio_ports[1] << 8) | gpio_ports[0]; phy_data->port_sfp_inputs = (phy_data->sfp_gpio_inputs >> shift) & 0x0F; if (prev_sfp_inputs != phy_data->port_sfp_inputs) axgbe_printf(0, "%s: port_sfp_inputs: 0x%0x\n", __func__, phy_data->port_sfp_inputs); phy_data->sfp_mod_absent = xgbe_phy_check_sfp_mod_absent(phy_data); axgbe_printf(3, "%s: after sfp_mod:%d sfp_gpio_inputs:0x%x\n", __func__, phy_data->sfp_mod_absent, phy_data->sfp_gpio_inputs); } static void xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; xgbe_phy_free_phy_device(pdata); phy_data->sfp_mod_absent = 1; phy_data->sfp_phy_avail = 0; memset(&phy_data->sfp_eeprom, 0, sizeof(phy_data->sfp_eeprom)); } static void xgbe_phy_sfp_reset(struct xgbe_phy_data *phy_data) { phy_data->sfp_rx_los = 0; phy_data->sfp_tx_fault = 0; phy_data->sfp_mod_absent = 1; phy_data->sfp_base = XGBE_SFP_BASE_UNKNOWN; phy_data->sfp_cable = XGBE_SFP_CABLE_UNKNOWN; phy_data->sfp_speed = XGBE_SFP_SPEED_UNKNOWN; } static void xgbe_phy_sfp_detect(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; int ret, prev_sfp_state = phy_data->sfp_mod_absent; /* Reset the SFP signals and info */ xgbe_phy_sfp_reset(phy_data); ret = xgbe_phy_get_comm_ownership(pdata); if (ret) return; /* Read the SFP signals and check for module presence */ xgbe_phy_sfp_signals(pdata); if (phy_data->sfp_mod_absent) { if (prev_sfp_state != phy_data->sfp_mod_absent) axgbe_error("%s: mod absent\n", __func__); xgbe_phy_sfp_mod_absent(pdata); goto put; } ret = xgbe_phy_sfp_read_eeprom(pdata); if (ret) { /* Treat any error as if there isn't an SFP plugged in */ axgbe_error("%s: eeprom read failed\n", __func__); xgbe_phy_sfp_reset(phy_data); xgbe_phy_sfp_mod_absent(pdata); goto put; } xgbe_phy_sfp_parse_eeprom(pdata); xgbe_phy_sfp_external_phy(pdata); put: xgbe_phy_sfp_phy_settings(pdata); axgbe_printf(3, "%s: phy speed: 0x%x duplex: 0x%x autoneg: 0x%x " "pause_autoneg: 0x%x\n", __func__, pdata->phy.speed, pdata->phy.duplex, pdata->phy.autoneg, pdata->phy.pause_autoneg); xgbe_phy_put_comm_ownership(pdata); } static int xgbe_phy_module_eeprom(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; uint8_t eeprom_addr, eeprom_data[XGBE_SFP_EEPROM_MAX]; struct xgbe_sfp_eeprom *sfp_eeprom; int ret; if (phy_data->port_mode != XGBE_PORT_MODE_SFP) { ret = -ENXIO; goto done; } if (phy_data->sfp_mod_absent) { ret = -EIO; goto done; } ret = xgbe_phy_get_comm_ownership(pdata); if (ret) { ret = -EIO; goto done; } ret = xgbe_phy_sfp_get_mux(pdata); if (ret) { axgbe_error("I2C error setting SFP MUX\n"); ret = -EIO; goto put_own; } /* Read the SFP serial ID eeprom */ eeprom_addr = 0; ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_SERIAL_ID_ADDRESS, &eeprom_addr, sizeof(eeprom_addr), eeprom_data, XGBE_SFP_EEPROM_BASE_LEN); if (ret) { axgbe_error("I2C error reading SFP EEPROM\n"); ret = -EIO; goto put_mux; } sfp_eeprom = (struct xgbe_sfp_eeprom *)eeprom_data; if (XGBE_SFP_DIAGS_SUPPORTED(sfp_eeprom)) { /* Read the SFP diagnostic eeprom */ eeprom_addr = 0; ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_DIAG_INFO_ADDRESS, &eeprom_addr, sizeof(eeprom_addr), eeprom_data + XGBE_SFP_EEPROM_BASE_LEN, XGBE_SFP_EEPROM_DIAG_LEN); if (ret) { axgbe_error("I2C error reading SFP DIAGS\n"); ret = -EIO; goto put_mux; } } put_mux: xgbe_phy_sfp_put_mux(pdata); put_own: xgbe_phy_put_comm_ownership(pdata); done: return (ret); } static int xgbe_phy_module_info(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; if (phy_data->port_mode != XGBE_PORT_MODE_SFP) return (-ENXIO); if (phy_data->sfp_mod_absent) return (-EIO); return (0); } static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; pdata->phy.tx_pause = 0; pdata->phy.rx_pause = 0; if (!phy_data->phydev) return; if (pdata->phy.pause) XGBE_SET_LP_ADV(&pdata->phy, Pause); if (pdata->phy.asym_pause) XGBE_SET_LP_ADV(&pdata->phy, Asym_Pause); axgbe_printf(1, "%s: pause tx/rx %d/%d\n", __func__, pdata->phy.tx_pause, pdata->phy.rx_pause); } static enum xgbe_mode xgbe_phy_an37_sgmii_outcome(struct xgbe_prv_data *pdata) { enum xgbe_mode mode; XGBE_SET_LP_ADV(&pdata->phy, Autoneg); XGBE_SET_LP_ADV(&pdata->phy, TP); axgbe_printf(1, "%s: pause_autoneg %d\n", __func__, pdata->phy.pause_autoneg); /* Use external PHY to determine flow control */ if (pdata->phy.pause_autoneg) xgbe_phy_phydev_flowctrl(pdata); switch (pdata->an_status & XGBE_SGMII_AN_LINK_SPEED) { case XGBE_SGMII_AN_LINK_SPEED_100: if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) { XGBE_SET_LP_ADV(&pdata->phy, 100baseT_Full); mode = XGBE_MODE_SGMII_100; } else { /* Half-duplex not supported */ XGBE_SET_LP_ADV(&pdata->phy, 100baseT_Half); mode = XGBE_MODE_UNKNOWN; } break; case XGBE_SGMII_AN_LINK_SPEED_1000: + default: + /* Default to 1000 */ if (pdata->an_status & XGBE_SGMII_AN_LINK_DUPLEX) { XGBE_SET_LP_ADV(&pdata->phy, 1000baseT_Full); mode = XGBE_MODE_SGMII_1000; } else { /* Half-duplex not supported */ XGBE_SET_LP_ADV(&pdata->phy, 1000baseT_Half); - mode = XGBE_MODE_UNKNOWN; + mode = XGBE_MODE_SGMII_1000; } break; - default: - mode = XGBE_MODE_UNKNOWN; } return (mode); } static enum xgbe_mode xgbe_phy_an37_outcome(struct xgbe_prv_data *pdata) { enum xgbe_mode mode; unsigned int ad_reg, lp_reg; XGBE_SET_LP_ADV(&pdata->phy, Autoneg); XGBE_SET_LP_ADV(&pdata->phy, FIBRE); /* Compare Advertisement and Link Partner register */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_ADVERTISE); lp_reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_LP_ABILITY); if (lp_reg & 0x100) XGBE_SET_LP_ADV(&pdata->phy, Pause); if (lp_reg & 0x80) XGBE_SET_LP_ADV(&pdata->phy, Asym_Pause); axgbe_printf(1, "%s: pause_autoneg %d ad_reg 0x%x lp_reg 0x%x\n", __func__, pdata->phy.pause_autoneg, ad_reg, lp_reg); if (pdata->phy.pause_autoneg) { /* Set flow control based on auto-negotiation result */ pdata->phy.tx_pause = 0; pdata->phy.rx_pause = 0; if (ad_reg & lp_reg & 0x100) { pdata->phy.tx_pause = 1; pdata->phy.rx_pause = 1; } else if (ad_reg & lp_reg & 0x80) { if (ad_reg & 0x100) pdata->phy.rx_pause = 1; else if (lp_reg & 0x100) pdata->phy.tx_pause = 1; } } axgbe_printf(1, "%s: pause tx/rx %d/%d\n", __func__, pdata->phy.tx_pause, pdata->phy.rx_pause); if (lp_reg & 0x20) XGBE_SET_LP_ADV(&pdata->phy, 1000baseX_Full); /* Half duplex is not supported */ ad_reg &= lp_reg; mode = (ad_reg & 0x20) ? XGBE_MODE_X : XGBE_MODE_UNKNOWN; return (mode); } static enum xgbe_mode xgbe_phy_an73_redrv_outcome(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; enum xgbe_mode mode; unsigned int ad_reg, lp_reg; XGBE_SET_LP_ADV(&pdata->phy, Autoneg); XGBE_SET_LP_ADV(&pdata->phy, Backplane); axgbe_printf(1, "%s: pause_autoneg %d\n", __func__, pdata->phy.pause_autoneg); /* Use external PHY to determine flow control */ if (pdata->phy.pause_autoneg) xgbe_phy_phydev_flowctrl(pdata); /* Compare Advertisement and Link Partner register 2 */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); if (lp_reg & 0x80) XGBE_SET_LP_ADV(&pdata->phy, 10000baseKR_Full); if (lp_reg & 0x20) XGBE_SET_LP_ADV(&pdata->phy, 1000baseKX_Full); ad_reg &= lp_reg; if (ad_reg & 0x80) { switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: mode = XGBE_MODE_KR; break; default: mode = XGBE_MODE_SFI; break; } } else if (ad_reg & 0x20) { switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: mode = XGBE_MODE_KX_1000; break; case XGBE_PORT_MODE_1000BASE_X: mode = XGBE_MODE_X; break; case XGBE_PORT_MODE_SFP: switch (phy_data->sfp_base) { case XGBE_SFP_BASE_1000_T: if ((phy_data->phydev) && (pdata->phy.speed == SPEED_100)) mode = XGBE_MODE_SGMII_100; else mode = XGBE_MODE_SGMII_1000; break; case XGBE_SFP_BASE_1000_SX: case XGBE_SFP_BASE_1000_LX: case XGBE_SFP_BASE_1000_CX: default: mode = XGBE_MODE_X; break; } break; default: if ((phy_data->phydev) && (pdata->phy.speed == SPEED_100)) mode = XGBE_MODE_SGMII_100; else mode = XGBE_MODE_SGMII_1000; break; } } else { mode = XGBE_MODE_UNKNOWN; } /* Compare Advertisement and Link Partner register 3 */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); if (lp_reg & 0xc000) XGBE_SET_LP_ADV(&pdata->phy, 10000baseR_FEC); return (mode); } static enum xgbe_mode xgbe_phy_an73_outcome(struct xgbe_prv_data *pdata) { enum xgbe_mode mode; unsigned int ad_reg, lp_reg; XGBE_SET_LP_ADV(&pdata->phy, Autoneg); XGBE_SET_LP_ADV(&pdata->phy, Backplane); /* Compare Advertisement and Link Partner register 1 */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA); if (lp_reg & 0x400) XGBE_SET_LP_ADV(&pdata->phy, Pause); if (lp_reg & 0x800) XGBE_SET_LP_ADV(&pdata->phy, Asym_Pause); axgbe_printf(1, "%s: pause_autoneg %d ad_reg 0x%x lp_reg 0x%x\n", __func__, pdata->phy.pause_autoneg, ad_reg, lp_reg); if (pdata->phy.pause_autoneg) { /* Set flow control based on auto-negotiation result */ pdata->phy.tx_pause = 0; pdata->phy.rx_pause = 0; if (ad_reg & lp_reg & 0x400) { pdata->phy.tx_pause = 1; pdata->phy.rx_pause = 1; } else if (ad_reg & lp_reg & 0x800) { if (ad_reg & 0x400) pdata->phy.rx_pause = 1; else if (lp_reg & 0x400) pdata->phy.tx_pause = 1; } } axgbe_printf(1, "%s: pause tx/rx %d/%d\n", __func__, pdata->phy.tx_pause, pdata->phy.rx_pause); /* Compare Advertisement and Link Partner register 2 */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1); if (lp_reg & 0x80) XGBE_SET_LP_ADV(&pdata->phy, 10000baseKR_Full); if (lp_reg & 0x20) XGBE_SET_LP_ADV(&pdata->phy, 1000baseKX_Full); ad_reg &= lp_reg; if (ad_reg & 0x80) mode = XGBE_MODE_KR; else if (ad_reg & 0x20) mode = XGBE_MODE_KX_1000; else mode = XGBE_MODE_UNKNOWN; /* Compare Advertisement and Link Partner register 3 */ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2); lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2); if (lp_reg & 0xc000) XGBE_SET_LP_ADV(&pdata->phy, 10000baseR_FEC); return (mode); } static enum xgbe_mode xgbe_phy_an_outcome(struct xgbe_prv_data *pdata) { switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: return (xgbe_phy_an73_outcome(pdata)); case XGBE_AN_MODE_CL73_REDRV: return (xgbe_phy_an73_redrv_outcome(pdata)); case XGBE_AN_MODE_CL37: return (xgbe_phy_an37_outcome(pdata)); case XGBE_AN_MODE_CL37_SGMII: return (xgbe_phy_an37_sgmii_outcome(pdata)); default: return (XGBE_MODE_UNKNOWN); } } static void xgbe_phy_an_advertising(struct xgbe_prv_data *pdata, struct xgbe_phy *dphy) { struct xgbe_phy_data *phy_data = pdata->phy_data; XGBE_LM_COPY(dphy, advertising, &pdata->phy, advertising); /* Without a re-driver, just return current advertising */ if (!phy_data->redrv) return; /* With the KR re-driver we need to advertise a single speed */ XGBE_CLR_ADV(dphy, 1000baseKX_Full); XGBE_CLR_ADV(dphy, 10000baseKR_Full); /* Advertise FEC support is present */ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) XGBE_SET_ADV(dphy, 10000baseR_FEC); switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: XGBE_SET_ADV(dphy, 10000baseKR_Full); break; case XGBE_PORT_MODE_BACKPLANE_2500: XGBE_SET_ADV(dphy, 1000baseKX_Full); break; case XGBE_PORT_MODE_1000BASE_T: case XGBE_PORT_MODE_1000BASE_X: case XGBE_PORT_MODE_NBASE_T: XGBE_SET_ADV(dphy, 1000baseKX_Full); break; case XGBE_PORT_MODE_10GBASE_T: if ((phy_data->phydev) && (pdata->phy.speed == SPEED_10000)) XGBE_SET_ADV(dphy, 10000baseKR_Full); else XGBE_SET_ADV(dphy, 1000baseKX_Full); break; case XGBE_PORT_MODE_10GBASE_R: XGBE_SET_ADV(dphy, 10000baseKR_Full); break; case XGBE_PORT_MODE_SFP: switch (phy_data->sfp_base) { case XGBE_SFP_BASE_1000_T: case XGBE_SFP_BASE_1000_SX: case XGBE_SFP_BASE_1000_LX: case XGBE_SFP_BASE_1000_CX: XGBE_SET_ADV(dphy, 1000baseKX_Full); break; default: XGBE_SET_ADV(dphy, 10000baseKR_Full); break; } break; default: XGBE_SET_ADV(dphy, 10000baseKR_Full); break; } } static int xgbe_phy_an_config(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; int ret; ret = xgbe_phy_find_phy_device(pdata); if (ret) return (ret); axgbe_printf(2, "%s: find_phy_device return %s.\n", __func__, ret ? "Failure" : "Success"); if (!phy_data->phydev) return (0); - ret = xgbe_phy_start_aneg(pdata); return (ret); } static enum xgbe_an_mode xgbe_phy_an_sfp_mode(struct xgbe_phy_data *phy_data) { switch (phy_data->sfp_base) { case XGBE_SFP_BASE_1000_T: return (XGBE_AN_MODE_CL37_SGMII); case XGBE_SFP_BASE_1000_SX: case XGBE_SFP_BASE_1000_LX: case XGBE_SFP_BASE_1000_CX: return (XGBE_AN_MODE_CL37); default: return (XGBE_AN_MODE_NONE); } } static enum xgbe_an_mode xgbe_phy_an_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; /* A KR re-driver will always require CL73 AN */ if (phy_data->redrv) return (XGBE_AN_MODE_CL73_REDRV); switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: return (XGBE_AN_MODE_CL73); case XGBE_PORT_MODE_BACKPLANE_2500: return (XGBE_AN_MODE_NONE); case XGBE_PORT_MODE_1000BASE_T: return (XGBE_AN_MODE_CL37_SGMII); case XGBE_PORT_MODE_1000BASE_X: return (XGBE_AN_MODE_CL37); case XGBE_PORT_MODE_NBASE_T: return (XGBE_AN_MODE_CL37_SGMII); case XGBE_PORT_MODE_10GBASE_T: return (XGBE_AN_MODE_CL73); case XGBE_PORT_MODE_10GBASE_R: return (XGBE_AN_MODE_NONE); case XGBE_PORT_MODE_SFP: return (xgbe_phy_an_sfp_mode(phy_data)); default: return (XGBE_AN_MODE_NONE); } } static int xgbe_phy_set_redrv_mode_mdio(struct xgbe_prv_data *pdata, enum xgbe_phy_redrv_mode mode) { struct xgbe_phy_data *phy_data = pdata->phy_data; uint16_t redrv_reg, redrv_val; redrv_reg = XGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000); redrv_val = (uint16_t)mode; return (pdata->hw_if.write_ext_mii_regs(pdata, phy_data->redrv_addr, redrv_reg, redrv_val)); } static int xgbe_phy_set_redrv_mode_i2c(struct xgbe_prv_data *pdata, enum xgbe_phy_redrv_mode mode) { struct xgbe_phy_data *phy_data = pdata->phy_data; unsigned int redrv_reg; int ret; /* Calculate the register to write */ redrv_reg = XGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000); ret = xgbe_phy_redrv_write(pdata, redrv_reg, mode); return (ret); } static void xgbe_phy_set_redrv_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; enum xgbe_phy_redrv_mode mode; int ret; if (!phy_data->redrv) return; mode = XGBE_PHY_REDRV_MODE_CX; if ((phy_data->port_mode == XGBE_PORT_MODE_SFP) && (phy_data->sfp_base != XGBE_SFP_BASE_1000_CX) && (phy_data->sfp_base != XGBE_SFP_BASE_10000_CR)) mode = XGBE_PHY_REDRV_MODE_SR; ret = xgbe_phy_get_comm_ownership(pdata); if (ret) return; axgbe_printf(2, "%s: redrv_if set: %d\n", __func__, phy_data->redrv_if); if (phy_data->redrv_if) xgbe_phy_set_redrv_mode_i2c(pdata, mode); else xgbe_phy_set_redrv_mode_mdio(pdata, mode); xgbe_phy_put_comm_ownership(pdata); } +static void +xgbe_phy_pll_ctrl(struct xgbe_prv_data *pdata, bool enable) +{ + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_MISC_CTRL0, + XGBE_PMA_PLL_CTRL_MASK, + enable ? XGBE_PMA_PLL_CTRL_ENABLE + : XGBE_PMA_PLL_CTRL_DISABLE); + DELAY(200); +} + static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata, unsigned int cmd, unsigned int sub_cmd) { unsigned int s0 = 0; unsigned int wait; + xgbe_phy_pll_ctrl(pdata, false); + /* Log if a previous command did not complete */ if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) axgbe_error("firmware mailbox not ready for command\n"); /* Construct the command */ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, cmd); XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, sub_cmd); /* Issue the command */ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0); XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0); XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1); /* Wait for command to complete */ wait = XGBE_RATECHANGE_COUNT; while (wait--) { if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) { axgbe_printf(3, "%s: Rate change done\n", __func__); - return; + goto reenable_pll; } DELAY(2000); } axgbe_printf(3, "firmware mailbox command did not complete\n"); + +reenable_pll: + xgbe_phy_pll_ctrl(pdata, true); } static void xgbe_phy_rrc(struct xgbe_prv_data *pdata) { /* Receiver Reset Cycle */ xgbe_phy_perform_ratechange(pdata, 5, 0); axgbe_printf(3, "receiver reset complete\n"); } static void xgbe_phy_power_off(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; /* Power off */ xgbe_phy_perform_ratechange(pdata, 0, 0); phy_data->cur_mode = XGBE_MODE_UNKNOWN; axgbe_printf(3, "phy powered off\n"); } static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; xgbe_phy_set_redrv_mode(pdata); /* 10G/SFI */ axgbe_printf(3, "%s: cable %d len %d\n", __func__, phy_data->sfp_cable, phy_data->sfp_cable_len); if (phy_data->sfp_cable != XGBE_SFP_CABLE_PASSIVE) xgbe_phy_perform_ratechange(pdata, 3, 0); else { if (phy_data->sfp_cable_len <= 1) xgbe_phy_perform_ratechange(pdata, 3, 1); else if (phy_data->sfp_cable_len <= 3) xgbe_phy_perform_ratechange(pdata, 3, 2); else xgbe_phy_perform_ratechange(pdata, 3, 3); } phy_data->cur_mode = XGBE_MODE_SFI; axgbe_printf(3, "10GbE SFI mode set\n"); } static void xgbe_phy_x_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; xgbe_phy_set_redrv_mode(pdata); /* 1G/X */ xgbe_phy_perform_ratechange(pdata, 1, 3); phy_data->cur_mode = XGBE_MODE_X; axgbe_printf(3, "1GbE X mode set\n"); } static void xgbe_phy_sgmii_1000_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; xgbe_phy_set_redrv_mode(pdata); /* 1G/SGMII */ xgbe_phy_perform_ratechange(pdata, 1, 2); phy_data->cur_mode = XGBE_MODE_SGMII_1000; axgbe_printf(2, "1GbE SGMII mode set\n"); } static void xgbe_phy_sgmii_100_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; xgbe_phy_set_redrv_mode(pdata); /* 100M/SGMII */ xgbe_phy_perform_ratechange(pdata, 1, 1); phy_data->cur_mode = XGBE_MODE_SGMII_100; axgbe_printf(3, "100MbE SGMII mode set\n"); } static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; xgbe_phy_set_redrv_mode(pdata); /* 10G/KR */ xgbe_phy_perform_ratechange(pdata, 4, 0); phy_data->cur_mode = XGBE_MODE_KR; axgbe_printf(3, "10GbE KR mode set\n"); } static void xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; xgbe_phy_set_redrv_mode(pdata); /* 2.5G/KX */ xgbe_phy_perform_ratechange(pdata, 2, 0); phy_data->cur_mode = XGBE_MODE_KX_2500; axgbe_printf(3, "2.5GbE KX mode set\n"); } static void xgbe_phy_kx_1000_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; xgbe_phy_set_redrv_mode(pdata); /* 1G/KX */ xgbe_phy_perform_ratechange(pdata, 1, 3); phy_data->cur_mode = XGBE_MODE_KX_1000; axgbe_printf(3, "1GbE KX mode set\n"); } static enum xgbe_mode xgbe_phy_cur_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; return (phy_data->cur_mode); } static enum xgbe_mode xgbe_phy_switch_baset_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; /* No switching if not 10GBase-T */ if (phy_data->port_mode != XGBE_PORT_MODE_10GBASE_T) return (xgbe_phy_cur_mode(pdata)); switch (xgbe_phy_cur_mode(pdata)) { case XGBE_MODE_SGMII_100: case XGBE_MODE_SGMII_1000: return (XGBE_MODE_KR); case XGBE_MODE_KR: default: return (XGBE_MODE_SGMII_1000); } } static enum xgbe_mode xgbe_phy_switch_bp_2500_mode(struct xgbe_prv_data *pdata) { return (XGBE_MODE_KX_2500); } static enum xgbe_mode xgbe_phy_switch_bp_mode(struct xgbe_prv_data *pdata) { /* If we are in KR switch to KX, and vice-versa */ switch (xgbe_phy_cur_mode(pdata)) { case XGBE_MODE_KX_1000: return (XGBE_MODE_KR); case XGBE_MODE_KR: default: return (XGBE_MODE_KX_1000); } } static enum xgbe_mode xgbe_phy_switch_mode(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: return (xgbe_phy_switch_bp_mode(pdata)); case XGBE_PORT_MODE_BACKPLANE_2500: return (xgbe_phy_switch_bp_2500_mode(pdata)); case XGBE_PORT_MODE_1000BASE_T: case XGBE_PORT_MODE_NBASE_T: case XGBE_PORT_MODE_10GBASE_T: return (xgbe_phy_switch_baset_mode(pdata)); case XGBE_PORT_MODE_1000BASE_X: case XGBE_PORT_MODE_10GBASE_R: case XGBE_PORT_MODE_SFP: /* No switching, so just return current mode */ return (xgbe_phy_cur_mode(pdata)); default: return (XGBE_MODE_UNKNOWN); } } static enum xgbe_mode xgbe_phy_get_basex_mode(struct xgbe_phy_data *phy_data, int speed) { switch (speed) { case SPEED_1000: return (XGBE_MODE_X); case SPEED_10000: return (XGBE_MODE_KR); default: return (XGBE_MODE_UNKNOWN); } } static enum xgbe_mode xgbe_phy_get_baset_mode(struct xgbe_phy_data *phy_data, int speed) { switch (speed) { case SPEED_100: return (XGBE_MODE_SGMII_100); case SPEED_1000: return (XGBE_MODE_SGMII_1000); case SPEED_2500: return (XGBE_MODE_KX_2500); case SPEED_10000: return (XGBE_MODE_KR); default: return (XGBE_MODE_UNKNOWN); } } static enum xgbe_mode xgbe_phy_get_sfp_mode(struct xgbe_phy_data *phy_data, int speed) { switch (speed) { case SPEED_100: return (XGBE_MODE_SGMII_100); case SPEED_1000: if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T) return (XGBE_MODE_SGMII_1000); else return (XGBE_MODE_X); case SPEED_10000: case SPEED_UNKNOWN: return (XGBE_MODE_SFI); default: return (XGBE_MODE_UNKNOWN); } } static enum xgbe_mode xgbe_phy_get_bp_2500_mode(int speed) { switch (speed) { case SPEED_2500: return (XGBE_MODE_KX_2500); default: return (XGBE_MODE_UNKNOWN); } } static enum xgbe_mode xgbe_phy_get_bp_mode(int speed) { switch (speed) { case SPEED_1000: return (XGBE_MODE_KX_1000); case SPEED_10000: return (XGBE_MODE_KR); default: return (XGBE_MODE_UNKNOWN); } } static enum xgbe_mode xgbe_phy_get_mode(struct xgbe_prv_data *pdata, int speed) { struct xgbe_phy_data *phy_data = pdata->phy_data; switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: return (xgbe_phy_get_bp_mode(speed)); case XGBE_PORT_MODE_BACKPLANE_2500: return (xgbe_phy_get_bp_2500_mode(speed)); case XGBE_PORT_MODE_1000BASE_T: case XGBE_PORT_MODE_NBASE_T: case XGBE_PORT_MODE_10GBASE_T: return (xgbe_phy_get_baset_mode(phy_data, speed)); case XGBE_PORT_MODE_1000BASE_X: case XGBE_PORT_MODE_10GBASE_R: return (xgbe_phy_get_basex_mode(phy_data, speed)); case XGBE_PORT_MODE_SFP: return (xgbe_phy_get_sfp_mode(phy_data, speed)); default: return (XGBE_MODE_UNKNOWN); } } static void xgbe_phy_set_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { switch (mode) { case XGBE_MODE_KX_1000: xgbe_phy_kx_1000_mode(pdata); break; case XGBE_MODE_KX_2500: xgbe_phy_kx_2500_mode(pdata); break; case XGBE_MODE_KR: xgbe_phy_kr_mode(pdata); break; case XGBE_MODE_SGMII_100: xgbe_phy_sgmii_100_mode(pdata); break; case XGBE_MODE_SGMII_1000: xgbe_phy_sgmii_1000_mode(pdata); break; case XGBE_MODE_X: xgbe_phy_x_mode(pdata); break; case XGBE_MODE_SFI: xgbe_phy_sfi_mode(pdata); break; default: break; } } static void xgbe_phy_get_type(struct xgbe_prv_data *pdata, struct ifmediareq * ifmr) { struct xgbe_phy_data *phy_data = pdata->phy_data; switch (pdata->phy.speed) { case SPEED_10000: if (phy_data->port_mode == XGBE_PORT_MODE_BACKPLANE) ifmr->ifm_active |= IFM_10G_KR; else if(phy_data->port_mode == XGBE_PORT_MODE_10GBASE_T) ifmr->ifm_active |= IFM_10G_T; else if(phy_data->port_mode == XGBE_PORT_MODE_10GBASE_R) ifmr->ifm_active |= IFM_10G_KR; else if(phy_data->port_mode == XGBE_PORT_MODE_SFP) ifmr->ifm_active |= IFM_10G_SFI; else ifmr->ifm_active |= IFM_OTHER; break; case SPEED_2500: if (phy_data->port_mode == XGBE_PORT_MODE_BACKPLANE_2500) ifmr->ifm_active |= IFM_2500_KX; else ifmr->ifm_active |= IFM_OTHER; break; case SPEED_1000: if (phy_data->port_mode == XGBE_PORT_MODE_BACKPLANE) ifmr->ifm_active |= IFM_1000_KX; else if(phy_data->port_mode == XGBE_PORT_MODE_1000BASE_T) ifmr->ifm_active |= IFM_1000_T; #if 0 else if(phy_data->port_mode == XGBE_PORT_MODE_1000BASE_X) ifmr->ifm_active |= IFM_1000_SX; ifmr->ifm_active |= IFM_1000_LX; ifmr->ifm_active |= IFM_1000_CX; #endif else if(phy_data->port_mode == XGBE_PORT_MODE_SFP) ifmr->ifm_active |= IFM_1000_SGMII; else ifmr->ifm_active |= IFM_OTHER; break; case SPEED_100: if(phy_data->port_mode == XGBE_PORT_MODE_NBASE_T) ifmr->ifm_active |= IFM_100_T; else if(phy_data->port_mode == XGBE_PORT_MODE_SFP) - ifmr->ifm_active |= IFM_1000_SGMII; + ifmr->ifm_active |= IFM_100_SGMII; else ifmr->ifm_active |= IFM_OTHER; break; default: ifmr->ifm_active |= IFM_OTHER; axgbe_printf(1, "Unknown mode detected\n"); break; } } static bool xgbe_phy_check_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode, bool advert) { if (pdata->phy.autoneg == AUTONEG_ENABLE) return (advert); else { enum xgbe_mode cur_mode; cur_mode = xgbe_phy_get_mode(pdata, pdata->phy.speed); if (cur_mode == mode) return (true); } return (false); } static bool xgbe_phy_use_basex_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { switch (mode) { case XGBE_MODE_X: return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 1000baseX_Full))); case XGBE_MODE_KR: return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 10000baseKR_Full))); default: return (false); } } static bool xgbe_phy_use_baset_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { axgbe_printf(3, "%s: check mode %d\n", __func__, mode); switch (mode) { case XGBE_MODE_SGMII_100: return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 100baseT_Full))); case XGBE_MODE_SGMII_1000: return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 1000baseT_Full))); case XGBE_MODE_KX_2500: return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 2500baseT_Full))); case XGBE_MODE_KR: return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 10000baseT_Full))); default: return (false); } } static bool xgbe_phy_use_sfp_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { struct xgbe_phy_data *phy_data = pdata->phy_data; switch (mode) { case XGBE_MODE_X: if (phy_data->sfp_base == XGBE_SFP_BASE_1000_T) return (false); return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 1000baseX_Full))); case XGBE_MODE_SGMII_100: if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T) return (false); return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 100baseT_Full))); case XGBE_MODE_SGMII_1000: if (phy_data->sfp_base != XGBE_SFP_BASE_1000_T) return (false); return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 1000baseT_Full))); case XGBE_MODE_SFI: if (phy_data->sfp_mod_absent) return (true); return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 10000baseSR_Full) || XGBE_ADV(&pdata->phy, 10000baseLR_Full) || XGBE_ADV(&pdata->phy, 10000baseLRM_Full) || XGBE_ADV(&pdata->phy, 10000baseER_Full) || XGBE_ADV(&pdata->phy, 10000baseCR_Full))); default: return (false); } } static bool xgbe_phy_use_bp_2500_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { switch (mode) { case XGBE_MODE_KX_2500: return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 2500baseX_Full))); default: return (false); } } static bool xgbe_phy_use_bp_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { switch (mode) { case XGBE_MODE_KX_1000: return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 1000baseKX_Full))); case XGBE_MODE_KR: return (xgbe_phy_check_mode(pdata, mode, XGBE_ADV(&pdata->phy, 10000baseKR_Full))); default: return (false); } } static bool xgbe_phy_use_mode(struct xgbe_prv_data *pdata, enum xgbe_mode mode) { struct xgbe_phy_data *phy_data = pdata->phy_data; switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: return (xgbe_phy_use_bp_mode(pdata, mode)); case XGBE_PORT_MODE_BACKPLANE_2500: return (xgbe_phy_use_bp_2500_mode(pdata, mode)); case XGBE_PORT_MODE_1000BASE_T: axgbe_printf(3, "use_mode %s\n", xgbe_phy_use_baset_mode(pdata, mode) ? "found" : "Not found"); case XGBE_PORT_MODE_NBASE_T: case XGBE_PORT_MODE_10GBASE_T: return (xgbe_phy_use_baset_mode(pdata, mode)); case XGBE_PORT_MODE_1000BASE_X: case XGBE_PORT_MODE_10GBASE_R: return (xgbe_phy_use_basex_mode(pdata, mode)); case XGBE_PORT_MODE_SFP: return (xgbe_phy_use_sfp_mode(pdata, mode)); default: return (false); } } static bool xgbe_phy_valid_speed_basex_mode(struct xgbe_phy_data *phy_data, int speed) { switch (speed) { case SPEED_1000: return (phy_data->port_mode == XGBE_PORT_MODE_1000BASE_X); case SPEED_10000: return (phy_data->port_mode == XGBE_PORT_MODE_10GBASE_R); default: return (false); } } static bool xgbe_phy_valid_speed_baset_mode(struct xgbe_phy_data *phy_data, int speed) { switch (speed) { case SPEED_100: case SPEED_1000: return (true); case SPEED_2500: return (phy_data->port_mode == XGBE_PORT_MODE_NBASE_T); case SPEED_10000: return (phy_data->port_mode == XGBE_PORT_MODE_10GBASE_T); default: return (false); } } static bool xgbe_phy_valid_speed_sfp_mode(struct xgbe_phy_data *phy_data, int speed) { switch (speed) { case SPEED_100: - return (phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000); + return ((phy_data->sfp_speed == XGBE_SFP_SPEED_100) || + (phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000)); case SPEED_1000: return ((phy_data->sfp_speed == XGBE_SFP_SPEED_100_1000) || (phy_data->sfp_speed == XGBE_SFP_SPEED_1000)); case SPEED_10000: return (phy_data->sfp_speed == XGBE_SFP_SPEED_10000); default: return (false); } } static bool xgbe_phy_valid_speed_bp_2500_mode(int speed) { switch (speed) { case SPEED_2500: return (true); default: return (false); } } static bool xgbe_phy_valid_speed_bp_mode(int speed) { switch (speed) { case SPEED_1000: case SPEED_10000: return (true); default: return (false); } } static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed) { struct xgbe_phy_data *phy_data = pdata->phy_data; switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: return (xgbe_phy_valid_speed_bp_mode(speed)); case XGBE_PORT_MODE_BACKPLANE_2500: return (xgbe_phy_valid_speed_bp_2500_mode(speed)); case XGBE_PORT_MODE_1000BASE_T: case XGBE_PORT_MODE_NBASE_T: case XGBE_PORT_MODE_10GBASE_T: return (xgbe_phy_valid_speed_baset_mode(phy_data, speed)); case XGBE_PORT_MODE_1000BASE_X: case XGBE_PORT_MODE_10GBASE_R: return (xgbe_phy_valid_speed_basex_mode(phy_data, speed)); case XGBE_PORT_MODE_SFP: return (xgbe_phy_valid_speed_sfp_mode(phy_data, speed)); default: return (false); } } static int xgbe_upd_link(struct xgbe_prv_data *pdata) { int reg; axgbe_printf(2, "%s: Link %d\n", __func__, pdata->phy.link); reg = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMSR); + reg = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMSR); if (reg < 0) return (reg); if ((reg & BMSR_LINK) == 0) pdata->phy.link = 0; else pdata->phy.link = 1; axgbe_printf(2, "Link: %d updated reg %#x\n", pdata->phy.link, reg); return (0); } static int xgbe_phy_read_status(struct xgbe_prv_data *pdata) { int common_adv_gb = 0; int common_adv; int lpagb = 0; int adv, lpa; int ret; ret = xgbe_upd_link(pdata); if (ret) { axgbe_printf(2, "Link Update return %d\n", ret); return (ret); } if (AUTONEG_ENABLE == pdata->phy.autoneg) { if (pdata->phy.supported == SUPPORTED_1000baseT_Half || pdata->phy.supported == SUPPORTED_1000baseT_Full) { lpagb = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_100T2SR); if (lpagb < 0) return (lpagb); adv = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_100T2CR); if (adv < 0) return (adv); if (lpagb & GTSR_MAN_MS_FLT) { if (adv & GTCR_MAN_MS) axgbe_printf(2, "Master/Slave Resolution " "failed, maybe conflicting manual settings\n"); else axgbe_printf(2, "Master/Slave Resolution failed\n"); return (-ENOLINK); } if (pdata->phy.supported == SUPPORTED_1000baseT_Half) XGBE_SET_ADV(&pdata->phy, 1000baseT_Half); else if (pdata->phy.supported == SUPPORTED_1000baseT_Full) XGBE_SET_ADV(&pdata->phy, 1000baseT_Full); common_adv_gb = lpagb & adv << 2; } lpa = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_ANLPAR); if (lpa < 0) return (lpa); if (pdata->phy.supported == SUPPORTED_Autoneg) XGBE_SET_ADV(&pdata->phy, Autoneg); adv = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_ANAR); if (adv < 0) return (adv); common_adv = lpa & adv; pdata->phy.speed = SPEED_10; pdata->phy.duplex = DUPLEX_HALF; pdata->phy.pause = 0; pdata->phy.asym_pause = 0; axgbe_printf(2, "%s: lpa %#x adv %#x common_adv_gb %#x " "common_adv %#x\n", __func__, lpa, adv, common_adv_gb, common_adv); if (common_adv_gb & (GTSR_LP_1000TFDX | GTSR_LP_1000THDX)) { axgbe_printf(2, "%s: SPEED 1000\n", __func__); pdata->phy.speed = SPEED_1000; if (common_adv_gb & GTSR_LP_1000TFDX) pdata->phy.duplex = DUPLEX_FULL; } else if (common_adv & (ANLPAR_TX_FD | ANLPAR_TX)) { axgbe_printf(2, "%s: SPEED 100\n", __func__); pdata->phy.speed = SPEED_100; if (common_adv & ANLPAR_TX_FD) pdata->phy.duplex = DUPLEX_FULL; } else if (common_adv & ANLPAR_10_FD) pdata->phy.duplex = DUPLEX_FULL; if (pdata->phy.duplex == DUPLEX_FULL) { pdata->phy.pause = lpa & ANLPAR_FC ? 1 : 0; pdata->phy.asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0; } } else { int bmcr = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMCR); if (bmcr < 0) return (bmcr); if (bmcr & BMCR_FDX) pdata->phy.duplex = DUPLEX_FULL; else pdata->phy.duplex = DUPLEX_HALF; if (bmcr & BMCR_SPEED1) pdata->phy.speed = SPEED_1000; else if (bmcr & BMCR_SPEED100) pdata->phy.speed = SPEED_100; else pdata->phy.speed = SPEED_10; pdata->phy.pause = 0; pdata->phy.asym_pause = 0; axgbe_printf(2, "%s: link speed %#x duplex %#x media %#x " "autoneg %#x\n", __func__, pdata->phy.speed, pdata->phy.duplex, pdata->phy.link, pdata->phy.autoneg); } return (0); } +static void +xgbe_rrc(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + int ret; + + if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) { + axgbe_printf(1, "ENTERED RRC: rrc_count: %d\n", + phy_data->rrc_count); + phy_data->rrc_count = 0; + if (pdata->link_workaround) { + ret = xgbe_phy_reset(pdata); + if (ret) + axgbe_error("Error resetting phy\n"); + } else + xgbe_phy_rrc(pdata); + } +} + static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) { struct xgbe_phy_data *phy_data = pdata->phy_data; struct mii_data *mii = NULL; unsigned int reg; int ret; *an_restart = 0; if (phy_data->port_mode == XGBE_PORT_MODE_SFP) { /* Check SFP signals */ axgbe_printf(3, "%s: calling phy detect\n", __func__); xgbe_phy_sfp_detect(pdata); if (phy_data->sfp_changed) { axgbe_printf(1, "%s: SFP changed observed\n", __func__); *an_restart = 1; return (0); } if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los) { axgbe_printf(1, "%s: SFP absent 0x%x & sfp_rx_los 0x%x\n", __func__, phy_data->sfp_mod_absent, phy_data->sfp_rx_los); + + if (!phy_data->sfp_mod_absent) { + xgbe_rrc(pdata); + } + return (0); } - } else { + } + + if (phy_data->phydev || phy_data->port_mode != XGBE_PORT_MODE_SFP) { + if (pdata->axgbe_miibus == NULL) { + axgbe_printf(1, "%s: miibus not initialized", __func__); + goto mdio_read; + } + mii = device_get_softc(pdata->axgbe_miibus); mii_tick(mii); - + ret = xgbe_phy_read_status(pdata); if (ret) { - axgbe_printf(2, "Link: Read status returned %d\n", ret); - return (ret); + axgbe_error("Link: Read status returned %d\n", ret); + return (0); } axgbe_printf(2, "%s: link speed %#x duplex %#x media %#x " "autoneg %#x\n", __func__, pdata->phy.speed, pdata->phy.duplex, pdata->phy.link, pdata->phy.autoneg); ret = xgbe_phy_mii_read(pdata, pdata->mdio_addr, MII_BMSR); ret = (ret < 0) ? ret : (ret & BMSR_ACOMP); axgbe_printf(2, "Link: BMCR returned %d\n", ret); if ((pdata->phy.autoneg == AUTONEG_ENABLE) && !ret) return (0); - return (pdata->phy.link); + if (pdata->phy.link) + return (1); + + xgbe_rrc(pdata); } +mdio_read: + /* Link status is latched low, so read once to clear * and then read again to get current state */ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); axgbe_printf(1, "%s: link_status reg: 0x%x\n", __func__, reg); if (reg & MDIO_STAT1_LSTATUS) return (1); /* No link, attempt a receiver reset cycle */ - if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) { - axgbe_printf(1, "ENTERED RRC: rrc_count: %d\n", - phy_data->rrc_count); - phy_data->rrc_count = 0; - if (pdata->link_workaround) { - ret = xgbe_phy_reset(pdata); - if (ret) - axgbe_error("Error resetting phy\n"); - } else - xgbe_phy_rrc(pdata); - } + xgbe_rrc(pdata); return (0); } static void xgbe_phy_sfp_gpio_setup(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; phy_data->sfp_gpio_address = XGBE_GPIO_ADDRESS_PCA9555 + XP_GET_BITS(pdata->pp3, XP_PROP_3, GPIO_ADDR); phy_data->sfp_gpio_mask = XP_GET_BITS(pdata->pp3, XP_PROP_3, GPIO_MASK); phy_data->sfp_gpio_rx_los = XP_GET_BITS(pdata->pp3, XP_PROP_3, GPIO_RX_LOS); phy_data->sfp_gpio_tx_fault = XP_GET_BITS(pdata->pp3, XP_PROP_3, GPIO_TX_FAULT); phy_data->sfp_gpio_mod_absent = XP_GET_BITS(pdata->pp3, XP_PROP_3, GPIO_MOD_ABS); phy_data->sfp_gpio_rate_select = XP_GET_BITS(pdata->pp3, XP_PROP_3, GPIO_RATE_SELECT); DBGPR("SFP: gpio_address=%#x\n", phy_data->sfp_gpio_address); DBGPR("SFP: gpio_mask=%#x\n", phy_data->sfp_gpio_mask); DBGPR("SFP: gpio_rx_los=%u\n", phy_data->sfp_gpio_rx_los); DBGPR("SFP: gpio_tx_fault=%u\n", phy_data->sfp_gpio_tx_fault); DBGPR("SFP: gpio_mod_absent=%u\n", phy_data->sfp_gpio_mod_absent); DBGPR("SFP: gpio_rate_select=%u\n", phy_data->sfp_gpio_rate_select); } static void xgbe_phy_sfp_comm_setup(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; unsigned int mux_addr_hi, mux_addr_lo; mux_addr_hi = XP_GET_BITS(pdata->pp4, XP_PROP_4, MUX_ADDR_HI); mux_addr_lo = XP_GET_BITS(pdata->pp4, XP_PROP_4, MUX_ADDR_LO); if (mux_addr_lo == XGBE_SFP_DIRECT) return; phy_data->sfp_comm = XGBE_SFP_COMM_PCA9545; phy_data->sfp_mux_address = (mux_addr_hi << 2) + mux_addr_lo; phy_data->sfp_mux_channel = XP_GET_BITS(pdata->pp4, XP_PROP_4, MUX_CHAN); DBGPR("SFP: mux_address=%#x\n", phy_data->sfp_mux_address); DBGPR("SFP: mux_channel=%u\n", phy_data->sfp_mux_channel); } static void xgbe_phy_sfp_setup(struct xgbe_prv_data *pdata) { xgbe_phy_sfp_comm_setup(pdata); xgbe_phy_sfp_gpio_setup(pdata); } static int xgbe_phy_int_mdio_reset(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; unsigned int ret; ret = pdata->hw_if.set_gpio(pdata, phy_data->mdio_reset_gpio); if (ret) return (ret); ret = pdata->hw_if.clr_gpio(pdata, phy_data->mdio_reset_gpio); return (ret); } static int xgbe_phy_i2c_mdio_reset(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; uint8_t gpio_reg, gpio_ports[2], gpio_data[3]; int ret; /* Read the output port registers */ gpio_reg = 2; ret = xgbe_phy_i2c_read(pdata, phy_data->mdio_reset_addr, &gpio_reg, sizeof(gpio_reg), gpio_ports, sizeof(gpio_ports)); if (ret) return (ret); /* Prepare to write the GPIO data */ gpio_data[0] = 2; gpio_data[1] = gpio_ports[0]; gpio_data[2] = gpio_ports[1]; /* Set the GPIO pin */ if (phy_data->mdio_reset_gpio < 8) gpio_data[1] |= (1 << (phy_data->mdio_reset_gpio % 8)); else gpio_data[2] |= (1 << (phy_data->mdio_reset_gpio % 8)); /* Write the output port registers */ ret = xgbe_phy_i2c_write(pdata, phy_data->mdio_reset_addr, gpio_data, sizeof(gpio_data)); if (ret) return (ret); /* Clear the GPIO pin */ if (phy_data->mdio_reset_gpio < 8) gpio_data[1] &= ~(1 << (phy_data->mdio_reset_gpio % 8)); else gpio_data[2] &= ~(1 << (phy_data->mdio_reset_gpio % 8)); /* Write the output port registers */ ret = xgbe_phy_i2c_write(pdata, phy_data->mdio_reset_addr, gpio_data, sizeof(gpio_data)); return (ret); } static int xgbe_phy_mdio_reset(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; int ret; if (phy_data->conn_type != XGBE_CONN_TYPE_MDIO) return (0); ret = xgbe_phy_get_comm_ownership(pdata); if (ret) return (ret); if (phy_data->mdio_reset == XGBE_MDIO_RESET_I2C_GPIO) ret = xgbe_phy_i2c_mdio_reset(pdata); else if (phy_data->mdio_reset == XGBE_MDIO_RESET_INT_GPIO) ret = xgbe_phy_int_mdio_reset(pdata); xgbe_phy_put_comm_ownership(pdata); return (ret); } static bool xgbe_phy_redrv_error(struct xgbe_phy_data *phy_data) { if (!phy_data->redrv) return (false); if (phy_data->redrv_if >= XGBE_PHY_REDRV_IF_MAX) return (true); switch (phy_data->redrv_model) { case XGBE_PHY_REDRV_MODEL_4223: if (phy_data->redrv_lane > 3) return (true); break; case XGBE_PHY_REDRV_MODEL_4227: if (phy_data->redrv_lane > 1) return (true); break; default: return (true); } return (false); } static int xgbe_phy_mdio_reset_setup(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; if (phy_data->conn_type != XGBE_CONN_TYPE_MDIO) return (0); phy_data->mdio_reset = XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET); switch (phy_data->mdio_reset) { case XGBE_MDIO_RESET_NONE: case XGBE_MDIO_RESET_I2C_GPIO: case XGBE_MDIO_RESET_INT_GPIO: break; default: axgbe_error("unsupported MDIO reset (%#x)\n", phy_data->mdio_reset); return (-EINVAL); } if (phy_data->mdio_reset == XGBE_MDIO_RESET_I2C_GPIO) { phy_data->mdio_reset_addr = XGBE_GPIO_ADDRESS_PCA9555 + XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET_I2C_ADDR); phy_data->mdio_reset_gpio = XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET_I2C_GPIO); } else if (phy_data->mdio_reset == XGBE_MDIO_RESET_INT_GPIO) phy_data->mdio_reset_gpio = XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET_INT_GPIO); return (0); } static bool xgbe_phy_port_mode_mismatch(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) || (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)) return (false); break; case XGBE_PORT_MODE_BACKPLANE_2500: if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500) return (false); break; case XGBE_PORT_MODE_1000BASE_T: if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) || (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000)) return (false); break; case XGBE_PORT_MODE_1000BASE_X: if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) return (false); break; case XGBE_PORT_MODE_NBASE_T: if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) || (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) || (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500)) return (false); break; case XGBE_PORT_MODE_10GBASE_T: if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) || (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) || (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)) return (false); break; case XGBE_PORT_MODE_10GBASE_R: if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) return (false); break; case XGBE_PORT_MODE_SFP: if ((phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) || (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) || (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000)) return (false); break; default: break; } return (true); } static bool xgbe_phy_conn_type_mismatch(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; switch (phy_data->port_mode) { case XGBE_PORT_MODE_BACKPLANE: case XGBE_PORT_MODE_BACKPLANE_2500: if (phy_data->conn_type == XGBE_CONN_TYPE_BACKPLANE) return (false); break; case XGBE_PORT_MODE_1000BASE_T: case XGBE_PORT_MODE_1000BASE_X: case XGBE_PORT_MODE_NBASE_T: case XGBE_PORT_MODE_10GBASE_T: case XGBE_PORT_MODE_10GBASE_R: if (phy_data->conn_type == XGBE_CONN_TYPE_MDIO) return (false); break; case XGBE_PORT_MODE_SFP: if (phy_data->conn_type == XGBE_CONN_TYPE_SFP) return (false); break; default: break; } return (true); } static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata) { if (!XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_SPEEDS)) return (false); if (!XP_GET_BITS(pdata->pp0, XP_PROP_0, CONN_TYPE)) return (false); return (true); } static void xgbe_phy_cdr_track(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; axgbe_printf(2, "%s: an_cdr_workaround %d phy_cdr_notrack %d\n", __func__, pdata->sysctl_an_cdr_workaround, phy_data->phy_cdr_notrack); if (!pdata->sysctl_an_cdr_workaround) return; if (!phy_data->phy_cdr_notrack) return; DELAY(phy_data->phy_cdr_delay + 500); XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL, XGBE_PMA_CDR_TRACK_EN_MASK, XGBE_PMA_CDR_TRACK_EN_ON); phy_data->phy_cdr_notrack = 0; axgbe_printf(2, "CDR TRACK DONE\n"); } static void xgbe_phy_cdr_notrack(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; axgbe_printf(2, "%s: an_cdr_workaround %d phy_cdr_notrack %d\n", __func__, pdata->sysctl_an_cdr_workaround, phy_data->phy_cdr_notrack); if (!pdata->sysctl_an_cdr_workaround) return; if (phy_data->phy_cdr_notrack) return; XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL, XGBE_PMA_CDR_TRACK_EN_MASK, XGBE_PMA_CDR_TRACK_EN_OFF); xgbe_phy_rrc(pdata); phy_data->phy_cdr_notrack = 1; } static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata) { if (!pdata->sysctl_an_cdr_track_early) xgbe_phy_cdr_track(pdata); } static void xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata) { if (pdata->sysctl_an_cdr_track_early) xgbe_phy_cdr_track(pdata); } static void xgbe_phy_an_post(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: if (phy_data->cur_mode != XGBE_MODE_KR) break; xgbe_phy_cdr_track(pdata); switch (pdata->an_result) { case XGBE_AN_READY: case XGBE_AN_COMPLETE: break; default: if (phy_data->phy_cdr_delay < XGBE_CDR_DELAY_MAX) phy_data->phy_cdr_delay += XGBE_CDR_DELAY_INC; else phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT; break; } break; default: break; } } static void xgbe_phy_an_pre(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: if (phy_data->cur_mode != XGBE_MODE_KR) break; xgbe_phy_cdr_notrack(pdata); break; default: break; } } static void xgbe_phy_stop(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; /* If we have an external PHY, free it */ xgbe_phy_free_phy_device(pdata); /* Reset SFP data */ xgbe_phy_sfp_reset(phy_data); xgbe_phy_sfp_mod_absent(pdata); /* Reset CDR support */ xgbe_phy_cdr_track(pdata); /* Power off the PHY */ xgbe_phy_power_off(pdata); /* Stop the I2C controller */ pdata->i2c_if.i2c_stop(pdata); } static int xgbe_phy_start(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; int ret; axgbe_printf(2, "%s: redrv %d redrv_if %d start_mode %d\n", __func__, phy_data->redrv, phy_data->redrv_if, phy_data->start_mode); /* Start the I2C controller */ ret = pdata->i2c_if.i2c_start(pdata); if (ret) { axgbe_error("%s: impl i2c start ret %d\n", __func__, ret); return (ret); } /* Set the proper MDIO mode for the re-driver */ if (phy_data->redrv && !phy_data->redrv_if) { ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr, XGBE_MDIO_MODE_CL22); if (ret) { axgbe_error("redriver mdio port not compatible (%u)\n", phy_data->redrv_addr); return (ret); } } /* Start in highest supported mode */ xgbe_phy_set_mode(pdata, phy_data->start_mode); /* Reset CDR support */ xgbe_phy_cdr_track(pdata); /* After starting the I2C controller, we can check for an SFP */ switch (phy_data->port_mode) { case XGBE_PORT_MODE_SFP: axgbe_printf(3, "%s: calling phy detect\n", __func__); xgbe_phy_sfp_detect(pdata); break; default: break; } /* If we have an external PHY, start it */ ret = xgbe_phy_find_phy_device(pdata); if (ret) { axgbe_error("%s: impl find phy dev ret %d\n", __func__, ret); goto err_i2c; } axgbe_printf(3, "%s: impl return success\n", __func__); return (0); err_i2c: pdata->i2c_if.i2c_stop(pdata); return (ret); } static int xgbe_phy_reset(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; enum xgbe_mode cur_mode; int ret; /* Reset by power cycling the PHY */ cur_mode = phy_data->cur_mode; xgbe_phy_power_off(pdata); xgbe_phy_set_mode(pdata, cur_mode); axgbe_printf(3, "%s: mode %d\n", __func__, cur_mode); if (!phy_data->phydev) { axgbe_printf(1, "%s: no phydev\n", __func__); return (0); } /* Reset the external PHY */ ret = xgbe_phy_mdio_reset(pdata); if (ret) { axgbe_error("%s: mdio reset %d\n", __func__, ret); return (ret); } axgbe_printf(3, "%s: return success\n", __func__); return (0); } static void axgbe_ifmedia_sts(if_t ifp, struct ifmediareq *ifmr) { struct axgbe_if_softc *sc; struct xgbe_prv_data *pdata; struct mii_data *mii; sc = if_getsoftc(ifp); pdata = &sc->pdata; axgbe_printf(2, "%s: Invoked\n", __func__); mtx_lock_spin(&pdata->mdio_mutex); mii = device_get_softc(pdata->axgbe_miibus); axgbe_printf(2, "%s: media_active %#x media_status %#x\n", __func__, mii->mii_media_active, mii->mii_media_status); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; mtx_unlock_spin(&pdata->mdio_mutex); } static int axgbe_ifmedia_upd(if_t ifp) { struct xgbe_prv_data *pdata; struct axgbe_if_softc *sc; struct mii_data *mii; struct mii_softc *miisc; int ret; sc = if_getsoftc(ifp); pdata = &sc->pdata; axgbe_printf(2, "%s: Invoked\n", __func__); mtx_lock_spin(&pdata->mdio_mutex); mii = device_get_softc(pdata->axgbe_miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); ret = mii_mediachg(mii); mtx_unlock_spin(&pdata->mdio_mutex); return (ret); } static void xgbe_phy_exit(struct xgbe_prv_data *pdata) { if (pdata->axgbe_miibus != NULL) device_delete_child(pdata->dev, pdata->axgbe_miibus); /* free phy_data structure */ free(pdata->phy_data, M_AXGBE); } static int xgbe_phy_init(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data; int ret; /* Initialize the global lock */ if (!mtx_initialized(&xgbe_phy_comm_lock)) mtx_init(&xgbe_phy_comm_lock, "xgbe phy common lock", NULL, MTX_DEF); /* Check if enabled */ if (!xgbe_phy_port_enabled(pdata)) { axgbe_error("device is not enabled\n"); return (-ENODEV); } /* Initialize the I2C controller */ ret = pdata->i2c_if.i2c_init(pdata); if (ret) return (ret); phy_data = malloc(sizeof(*phy_data), M_AXGBE, M_WAITOK | M_ZERO); if (!phy_data) return (-ENOMEM); pdata->phy_data = phy_data; phy_data->port_mode = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_MODE); phy_data->port_id = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_ID); phy_data->port_speeds = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_SPEEDS); phy_data->conn_type = XP_GET_BITS(pdata->pp0, XP_PROP_0, CONN_TYPE); phy_data->mdio_addr = XP_GET_BITS(pdata->pp0, XP_PROP_0, MDIO_ADDR); pdata->mdio_addr = phy_data->mdio_addr; DBGPR("port mode=%u\n", phy_data->port_mode); DBGPR("port id=%u\n", phy_data->port_id); DBGPR("port speeds=%#x\n", phy_data->port_speeds); DBGPR("conn type=%u\n", phy_data->conn_type); DBGPR("mdio addr=%u\n", phy_data->mdio_addr); phy_data->redrv = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_PRESENT); phy_data->redrv_if = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_IF); phy_data->redrv_addr = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_ADDR); phy_data->redrv_lane = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_LANE); phy_data->redrv_model = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_MODEL); if (phy_data->redrv) { DBGPR("redrv present\n"); DBGPR("redrv i/f=%u\n", phy_data->redrv_if); DBGPR("redrv addr=%#x\n", phy_data->redrv_addr); DBGPR("redrv lane=%u\n", phy_data->redrv_lane); DBGPR("redrv model=%u\n", phy_data->redrv_model); } DBGPR("%s: redrv addr=%#x redrv i/f=%u\n", __func__, phy_data->redrv_addr, phy_data->redrv_if); /* Validate the connection requested */ if (xgbe_phy_conn_type_mismatch(pdata)) { axgbe_error("phy mode/connection mismatch " "(%#x/%#x)\n", phy_data->port_mode, phy_data->conn_type); return (-EINVAL); } /* Validate the mode requested */ if (xgbe_phy_port_mode_mismatch(pdata)) { axgbe_error("phy mode/speed mismatch " "(%#x/%#x)\n", phy_data->port_mode, phy_data->port_speeds); return (-EINVAL); } /* Check for and validate MDIO reset support */ ret = xgbe_phy_mdio_reset_setup(pdata); if (ret) { axgbe_error("%s, mdio_reset_setup ret %d\n", __func__, ret); return (ret); } /* Validate the re-driver information */ if (xgbe_phy_redrv_error(phy_data)) { axgbe_error("phy re-driver settings error\n"); return (-EINVAL); } pdata->kr_redrv = phy_data->redrv; /* Indicate current mode is unknown */ phy_data->cur_mode = XGBE_MODE_UNKNOWN; /* Initialize supported features. Current code does not support ethtool */ XGBE_ZERO_SUP(&pdata->phy); DBGPR("%s: port mode %d\n", __func__, phy_data->port_mode); switch (phy_data->port_mode) { /* Backplane support */ case XGBE_PORT_MODE_BACKPLANE: XGBE_SET_SUP(&pdata->phy, Autoneg); XGBE_SET_SUP(&pdata->phy, Pause); XGBE_SET_SUP(&pdata->phy, Asym_Pause); XGBE_SET_SUP(&pdata->phy, Backplane); if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) { XGBE_SET_SUP(&pdata->phy, 1000baseKX_Full); phy_data->start_mode = XGBE_MODE_KX_1000; } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) { XGBE_SET_SUP(&pdata->phy, 10000baseKR_Full); if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) XGBE_SET_SUP(&pdata->phy, 10000baseR_FEC); phy_data->start_mode = XGBE_MODE_KR; } phy_data->phydev_mode = XGBE_MDIO_MODE_NONE; break; case XGBE_PORT_MODE_BACKPLANE_2500: XGBE_SET_SUP(&pdata->phy, Pause); XGBE_SET_SUP(&pdata->phy, Asym_Pause); XGBE_SET_SUP(&pdata->phy, Backplane); XGBE_SET_SUP(&pdata->phy, 2500baseX_Full); phy_data->start_mode = XGBE_MODE_KX_2500; phy_data->phydev_mode = XGBE_MDIO_MODE_NONE; break; /* MDIO 1GBase-T support */ case XGBE_PORT_MODE_1000BASE_T: XGBE_SET_SUP(&pdata->phy, Autoneg); XGBE_SET_SUP(&pdata->phy, Pause); XGBE_SET_SUP(&pdata->phy, Asym_Pause); XGBE_SET_SUP(&pdata->phy, TP); if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) { XGBE_SET_SUP(&pdata->phy, 100baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_100; } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) { XGBE_SET_SUP(&pdata->phy, 1000baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_1000; } phy_data->phydev_mode = XGBE_MDIO_MODE_CL22; break; /* MDIO Base-X support */ case XGBE_PORT_MODE_1000BASE_X: XGBE_SET_SUP(&pdata->phy, Autoneg); XGBE_SET_SUP(&pdata->phy, Pause); XGBE_SET_SUP(&pdata->phy, Asym_Pause); XGBE_SET_SUP(&pdata->phy, FIBRE); XGBE_SET_SUP(&pdata->phy, 1000baseX_Full); phy_data->start_mode = XGBE_MODE_X; phy_data->phydev_mode = XGBE_MDIO_MODE_CL22; break; /* MDIO NBase-T support */ case XGBE_PORT_MODE_NBASE_T: XGBE_SET_SUP(&pdata->phy, Autoneg); XGBE_SET_SUP(&pdata->phy, Pause); XGBE_SET_SUP(&pdata->phy, Asym_Pause); XGBE_SET_SUP(&pdata->phy, TP); if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) { XGBE_SET_SUP(&pdata->phy, 100baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_100; } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) { XGBE_SET_SUP(&pdata->phy, 1000baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_1000; } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_2500) { XGBE_SET_SUP(&pdata->phy, 2500baseT_Full); phy_data->start_mode = XGBE_MODE_KX_2500; } phy_data->phydev_mode = XGBE_MDIO_MODE_CL45; break; /* 10GBase-T support */ case XGBE_PORT_MODE_10GBASE_T: XGBE_SET_SUP(&pdata->phy, Autoneg); XGBE_SET_SUP(&pdata->phy, Pause); XGBE_SET_SUP(&pdata->phy, Asym_Pause); XGBE_SET_SUP(&pdata->phy, TP); if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) { XGBE_SET_SUP(&pdata->phy, 100baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_100; } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) { XGBE_SET_SUP(&pdata->phy, 1000baseT_Full); phy_data->start_mode = XGBE_MODE_SGMII_1000; } if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) { XGBE_SET_SUP(&pdata->phy, 10000baseT_Full); phy_data->start_mode = XGBE_MODE_KR; } phy_data->phydev_mode = XGBE_MDIO_MODE_CL45; break; /* 10GBase-R support */ case XGBE_PORT_MODE_10GBASE_R: XGBE_SET_SUP(&pdata->phy, Autoneg); XGBE_SET_SUP(&pdata->phy, Pause); XGBE_SET_SUP(&pdata->phy, Asym_Pause); XGBE_SET_SUP(&pdata->phy, FIBRE); XGBE_SET_SUP(&pdata->phy, 10000baseSR_Full); XGBE_SET_SUP(&pdata->phy, 10000baseLR_Full); XGBE_SET_SUP(&pdata->phy, 10000baseLRM_Full); XGBE_SET_SUP(&pdata->phy, 10000baseER_Full); if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) XGBE_SET_SUP(&pdata->phy, 10000baseR_FEC); phy_data->start_mode = XGBE_MODE_SFI; phy_data->phydev_mode = XGBE_MDIO_MODE_NONE; break; /* SFP support */ case XGBE_PORT_MODE_SFP: XGBE_SET_SUP(&pdata->phy, Autoneg); XGBE_SET_SUP(&pdata->phy, Pause); XGBE_SET_SUP(&pdata->phy, Asym_Pause); XGBE_SET_SUP(&pdata->phy, TP); XGBE_SET_SUP(&pdata->phy, FIBRE); if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_100) phy_data->start_mode = XGBE_MODE_SGMII_100; if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_1000) phy_data->start_mode = XGBE_MODE_SGMII_1000; if (phy_data->port_speeds & XGBE_PHY_PORT_SPEED_10000) phy_data->start_mode = XGBE_MODE_SFI; phy_data->phydev_mode = XGBE_MDIO_MODE_CL22; xgbe_phy_sfp_setup(pdata); DBGPR("%s: start %d mode %d adv 0x%x\n", __func__, phy_data->start_mode, phy_data->phydev_mode, pdata->phy.advertising); break; default: return (-EINVAL); } axgbe_printf(2, "%s: start %d mode %d adv 0x%x\n", __func__, phy_data->start_mode, phy_data->phydev_mode, pdata->phy.advertising); DBGPR("%s: conn type %d mode %d\n", __func__, phy_data->conn_type, phy_data->phydev_mode); if ((phy_data->conn_type & XGBE_CONN_TYPE_MDIO) && (phy_data->phydev_mode != XGBE_MDIO_MODE_NONE)) { ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr, phy_data->phydev_mode); if (ret) { axgbe_error("mdio port/clause not compatible (%d/%u)\n", phy_data->mdio_addr, phy_data->phydev_mode); return (-EINVAL); } } if (phy_data->redrv && !phy_data->redrv_if) { ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr, XGBE_MDIO_MODE_CL22); if (ret) { axgbe_error("redriver mdio port not compatible (%u)\n", phy_data->redrv_addr); return (-EINVAL); } } phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT; if (phy_data->port_mode != XGBE_PORT_MODE_SFP) { ret = mii_attach(pdata->dev, &pdata->axgbe_miibus, pdata->netdev, (ifm_change_cb_t)axgbe_ifmedia_upd, (ifm_stat_cb_t)axgbe_ifmedia_sts, BMSR_DEFCAPMASK, pdata->mdio_addr, MII_OFFSET_ANY, MIIF_FORCEANEG); if (ret){ axgbe_printf(2, "mii attach failed with err=(%d)\n", ret); return (-EINVAL); } } DBGPR("%s: return success\n", __func__); return (0); } void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if) { struct xgbe_phy_impl_if *phy_impl = &phy_if->phy_impl; phy_impl->init = xgbe_phy_init; phy_impl->exit = xgbe_phy_exit; phy_impl->reset = xgbe_phy_reset; phy_impl->start = xgbe_phy_start; phy_impl->stop = xgbe_phy_stop; phy_impl->link_status = xgbe_phy_link_status; phy_impl->valid_speed = xgbe_phy_valid_speed; phy_impl->use_mode = xgbe_phy_use_mode; phy_impl->set_mode = xgbe_phy_set_mode; phy_impl->get_mode = xgbe_phy_get_mode; phy_impl->switch_mode = xgbe_phy_switch_mode; phy_impl->cur_mode = xgbe_phy_cur_mode; phy_impl->get_type = xgbe_phy_get_type; phy_impl->an_mode = xgbe_phy_an_mode; phy_impl->an_config = xgbe_phy_an_config; phy_impl->an_advertising = xgbe_phy_an_advertising; phy_impl->an_outcome = xgbe_phy_an_outcome; phy_impl->an_pre = xgbe_phy_an_pre; phy_impl->an_post = xgbe_phy_an_post; phy_impl->kr_training_pre = xgbe_phy_kr_training_pre; phy_impl->kr_training_post = xgbe_phy_kr_training_post; phy_impl->module_info = xgbe_phy_module_info; phy_impl->module_eeprom = xgbe_phy_module_eeprom; }