Page MenuHomeFreeBSD

D8549.id25145.diff
No OneTemporary

D8549.id25145.diff

Index: sys/arm64/conf/GENERIC
===================================================================
--- sys/arm64/conf/GENERIC
+++ sys/arm64/conf/GENERIC
@@ -119,6 +119,7 @@
device mii
device miibus # MII bus support
device awg # Allwinner EMAC Gigabit Ethernet
+device axgbe # AMD Opteron A1100 integrated NIC
device em # Intel PRO/1000 Gigabit Ethernet Family
device ix # Intel 10Gb Ethernet Family
device msk # Marvell/SysKonnect Yukon II Gigabit Ethernet
Index: sys/conf/files.arm64
===================================================================
--- sys/conf/files.arm64
+++ sys/conf/files.arm64
@@ -146,6 +146,11 @@
crypto/des/des_enc.c optional crypto | ipsec | ipsec_support | netsmb
dev/acpica/acpi_if.m optional acpi
dev/ahci/ahci_generic.c optional ahci
+dev/axgbe/if_axgbe.c optional axgbe
+dev/axgbe/xgbe-desc.c optional axgbe
+dev/axgbe/xgbe-dev.c optional axgbe
+dev/axgbe/xgbe-drv.c optional axgbe
+dev/axgbe/xgbe-mdio.c optional axgbe
dev/cpufreq/cpufreq_dt.c optional cpufreq fdt
dev/hwpmc/hwpmc_arm64.c optional hwpmc
dev/hwpmc/hwpmc_arm64_md.c optional hwpmc
Index: sys/dev/axgbe/if_axgbe.c
===================================================================
--- /dev/null
+++ sys/dev/axgbe/if_axgbe.c
@@ -0,0 +1,619 @@
+/*-
+ * Copyright (c) 2016,2017 SoftIron Inc.
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * the sponsorship of SoftIron Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/queue.h>
+#include <sys/rman.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/sx.h>
+#include <sys/taskqueue.h>
+
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_var.h>
+#include <net/if_media.h>
+#include <net/if_types.h>
+
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+#include <machine/bus.h>
+
+#include "miibus_if.h"
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+static device_probe_t axgbe_probe;
+static device_attach_t axgbe_attach;
+
+struct axgbe_softc {
+ /* Must be first */
+ struct xgbe_prv_data prv;
+
+ uint8_t mac_addr[ETHER_ADDR_LEN];
+ struct ifmedia media;
+};
+
+static struct ofw_compat_data compat_data[] = {
+ { "amd,xgbe-seattle-v1a", true },
+ { NULL, false }
+};
+
+static struct resource_spec old_phy_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE }, /* Rx/Tx regs */
+ { SYS_RES_MEMORY, 1, RF_ACTIVE }, /* Integration regs */
+ { SYS_RES_MEMORY, 2, RF_ACTIVE }, /* Integration regs */
+ { SYS_RES_IRQ, 0, RF_ACTIVE }, /* Interrupt */
+ { -1, 0 }
+};
+
+static struct resource_spec old_mac_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE }, /* MAC regs */
+ { SYS_RES_MEMORY, 1, RF_ACTIVE }, /* PCS regs */
+ { SYS_RES_IRQ, 0, RF_ACTIVE }, /* Device interrupt */
+ /* Per-channel interrupts */
+ { SYS_RES_IRQ, 1, RF_ACTIVE | RF_OPTIONAL },
+ { SYS_RES_IRQ, 2, RF_ACTIVE | RF_OPTIONAL },
+ { SYS_RES_IRQ, 3, RF_ACTIVE | RF_OPTIONAL },
+ { SYS_RES_IRQ, 4, RF_ACTIVE | RF_OPTIONAL },
+ { -1, 0 }
+};
+
+static struct resource_spec mac_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE }, /* MAC regs */
+ { SYS_RES_MEMORY, 1, RF_ACTIVE }, /* PCS regs */
+ { SYS_RES_MEMORY, 2, RF_ACTIVE }, /* Rx/Tx regs */
+ { SYS_RES_MEMORY, 3, RF_ACTIVE }, /* Integration regs */
+ { SYS_RES_MEMORY, 4, RF_ACTIVE }, /* Integration regs */
+ { SYS_RES_IRQ, 0, RF_ACTIVE }, /* Device interrupt */
+ /* Per-channel and auto-negotiation interrupts */
+ { SYS_RES_IRQ, 1, RF_ACTIVE },
+ { SYS_RES_IRQ, 2, RF_ACTIVE | RF_OPTIONAL },
+ { SYS_RES_IRQ, 3, RF_ACTIVE | RF_OPTIONAL },
+ { SYS_RES_IRQ, 4, RF_ACTIVE | RF_OPTIONAL },
+ { SYS_RES_IRQ, 5, RF_ACTIVE | RF_OPTIONAL },
+ { -1, 0 }
+};
+
+MALLOC_DEFINE(M_AXGBE, "axgbe", "axgbe data");
+
+static void
+axgbe_init(void *p)
+{
+ struct axgbe_softc *sc;
+ struct ifnet *ifp;
+
+ sc = p;
+ ifp = sc->prv.netdev;
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING)
+ return;
+
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+}
+
+static int
+axgbe_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data)
+{
+ struct axgbe_softc *sc = ifp->if_softc;
+ struct ifreq *ifr = (struct ifreq *)data;
+ int error;
+
+ switch(command) {
+ case SIOCSIFMTU:
+ if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > ETHERMTU_JUMBO)
+ error = EINVAL;
+ else
+ error = xgbe_change_mtu(ifp, ifr->ifr_mtu);
+ break;
+ case SIOCSIFFLAGS:
+ error = 0;
+ break;
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
+ break;
+ default:
+ error = ether_ioctl(ifp, command, data);
+ break;
+ }
+
+ return (error);
+}
+
+static void
+axgbe_qflush(struct ifnet *ifp)
+{
+
+ if_qflush(ifp);
+}
+
+static int
+axgbe_media_change(struct ifnet *ifp)
+{
+ struct axgbe_softc *sc;
+ int cur_media;
+
+ sc = ifp->if_softc;
+
+ sx_xlock(&sc->prv.an_mutex);
+ cur_media = sc->media.ifm_cur->ifm_media;
+
+ switch (IFM_SUBTYPE(cur_media)) {
+ case IFM_10G_KR:
+ sc->prv.phy.speed = SPEED_10000;
+ sc->prv.phy.autoneg = AUTONEG_DISABLE;
+ break;
+ case IFM_2500_KX:
+ sc->prv.phy.speed = SPEED_2500;
+ sc->prv.phy.autoneg = AUTONEG_DISABLE;
+ break;
+ case IFM_1000_KX:
+ sc->prv.phy.speed = SPEED_1000;
+ sc->prv.phy.autoneg = AUTONEG_DISABLE;
+ break;
+ case IFM_AUTO:
+ sc->prv.phy.autoneg = AUTONEG_ENABLE;
+ break;
+ }
+ sx_xunlock(&sc->prv.an_mutex);
+
+ return (-sc->prv.phy_if.phy_config_aneg(&sc->prv));
+}
+
+static void
+axgbe_media_status(struct ifnet *ifp, struct ifmediareq *ifmr)
+{
+ struct axgbe_softc *sc;
+
+ sc = ifp->if_softc;
+
+ ifmr->ifm_status = IFM_AVALID;
+ if (!sc->prv.phy.link)
+ return;
+
+ ifmr->ifm_status |= IFM_ACTIVE;
+ ifmr->ifm_active = IFM_ETHER;
+
+ if (sc->prv.phy.duplex == DUPLEX_FULL)
+ ifmr->ifm_active |= IFM_FDX;
+ else
+ ifmr->ifm_active |= IFM_HDX;
+
+ switch (sc->prv.phy.speed) {
+ case SPEED_10000:
+ ifmr->ifm_active |= IFM_10G_KR;
+ break;
+ case SPEED_2500:
+ ifmr->ifm_active |= IFM_2500_KX;
+ break;
+ case SPEED_1000:
+ ifmr->ifm_active |= IFM_1000_KX;
+ break;
+ }
+}
+
+static uint64_t
+axgbe_get_counter(struct ifnet *ifp, ift_counter c)
+{
+ struct xgbe_prv_data *pdata = ifp->if_softc;
+ struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
+
+ DBGPR("-->%s\n", __func__);
+
+ pdata->hw_if.read_mmc_stats(pdata);
+
+ switch(c) {
+ case IFCOUNTER_IPACKETS:
+ return (pstats->rxframecount_gb);
+ case IFCOUNTER_IERRORS:
+ return (pstats->rxframecount_gb -
+ pstats->rxbroadcastframes_g -
+ pstats->rxmulticastframes_g -
+ pstats->rxunicastframes_g);
+ case IFCOUNTER_OPACKETS:
+ return (pstats->txframecount_gb);
+ case IFCOUNTER_OERRORS:
+ return (pstats->txframecount_gb - pstats->txframecount_g);
+ case IFCOUNTER_IBYTES:
+ return (pstats->rxoctetcount_gb);
+ case IFCOUNTER_OBYTES:
+ return (pstats->txoctetcount_gb);
+ default:
+ return (if_get_counter_default(ifp, c));
+ }
+}
+
+static int
+axgbe_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data)
+ return (ENXIO);
+
+ device_set_desc(dev, "AMD 10 Gigabit Ethernet");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+axgbe_get_optional_prop(device_t dev, phandle_t node, const char *name,
+ int *data, size_t len)
+{
+
+ if (!OF_hasprop(node, name))
+ return (-1);
+
+ if (OF_getencprop(node, name, data, len) <= 0) {
+ device_printf(dev,"%s property is invalid\n", name);
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static int
+axgbe_attach(device_t dev)
+{
+ struct axgbe_softc *sc;
+ struct ifnet *ifp;
+ pcell_t phy_handle;
+ device_t phydev;
+ phandle_t node, phy_node;
+ struct resource *mac_res[11];
+ struct resource *phy_res[4];
+ ssize_t len;
+ int error, i, j;
+
+ sc = device_get_softc(dev);
+
+ node = ofw_bus_get_node(dev);
+ if (OF_getencprop(node, "phy-handle", &phy_handle,
+ sizeof(phy_handle)) <= 0) {
+ phy_node = node;
+
+ if (bus_alloc_resources(dev, mac_spec, mac_res)) {
+ device_printf(dev,
+ "could not allocate phy resources\n");
+ return (ENXIO);
+ }
+
+ sc->prv.xgmac_res = mac_res[0];
+ sc->prv.xpcs_res = mac_res[1];
+ sc->prv.rxtx_res = mac_res[2];
+ sc->prv.sir0_res = mac_res[3];
+ sc->prv.sir1_res = mac_res[4];
+
+ sc->prv.dev_irq_res = mac_res[5];
+ sc->prv.per_channel_irq = OF_hasprop(node,
+ XGBE_DMA_IRQS_PROPERTY);
+ for (i = 0, j = 6; j < nitems(mac_res) - 1 &&
+ mac_res[j + 1] != NULL; i++, j++) {
+ if (sc->prv.per_channel_irq) {
+ sc->prv.chan_irq_res[i] = mac_res[j];
+ }
+ }
+
+ /* The last entry is the auto-negotiation interrupt */
+ sc->prv.an_irq_res = mac_res[j];
+ } else {
+ phydev = OF_device_from_xref(phy_handle);
+ phy_node = ofw_bus_get_node(phydev);
+
+ if (bus_alloc_resources(phydev, old_phy_spec, phy_res)) {
+ device_printf(dev,
+ "could not allocate phy resources\n");
+ return (ENXIO);
+ }
+
+ if (bus_alloc_resources(dev, old_mac_spec, mac_res)) {
+ device_printf(dev,
+ "could not allocate mac resources\n");
+ return (ENXIO);
+ }
+
+ sc->prv.rxtx_res = phy_res[0];
+ sc->prv.sir0_res = phy_res[1];
+ sc->prv.sir1_res = phy_res[2];
+ sc->prv.an_irq_res = phy_res[3];
+
+ sc->prv.xgmac_res = mac_res[0];
+ sc->prv.xpcs_res = mac_res[1];
+ sc->prv.dev_irq_res = mac_res[2];
+ sc->prv.per_channel_irq = OF_hasprop(node,
+ XGBE_DMA_IRQS_PROPERTY);
+ if (sc->prv.per_channel_irq) {
+ for (i = 0, j = 3; i < nitems(sc->prv.chan_irq_res) &&
+ mac_res[j] != NULL; i++, j++) {
+ sc->prv.chan_irq_res[i] = mac_res[j];
+ }
+ }
+ }
+
+ if ((len = OF_getproplen(node, "mac-address")) < 0) {
+ device_printf(dev, "No mac-address property\n");
+ return (EINVAL);
+ }
+
+ if (len != ETHER_ADDR_LEN)
+ return (EINVAL);
+
+ OF_getprop(node, "mac-address", sc->mac_addr, ETHER_ADDR_LEN);
+
+ sc->prv.netdev = ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ device_printf(dev, "Cannot alloc ifnet\n");
+ return (ENXIO);
+ }
+
+ sc->prv.dev = dev;
+ sc->prv.dmat = bus_get_dma_tag(dev);
+ sc->prv.phy.advertising = ADVERTISED_10000baseKR_Full |
+ ADVERTISED_1000baseKX_Full;
+
+
+ /*
+ * Read the needed properties from the phy node.
+ */
+
+ /* This is documented as optional, but Linux requires it */
+ if (OF_getencprop(phy_node, XGBE_SPEEDSET_PROPERTY, &sc->prv.speed_set,
+ sizeof(sc->prv.speed_set)) <= 0) {
+ device_printf(dev, "%s property is missing\n",
+ XGBE_SPEEDSET_PROPERTY);
+ return (EINVAL);
+ }
+
+ error = axgbe_get_optional_prop(dev, phy_node, XGBE_BLWC_PROPERTY,
+ sc->prv.serdes_blwc, sizeof(sc->prv.serdes_blwc));
+ if (error > 0) {
+ return (error);
+ } else if (error < 0) {
+ sc->prv.serdes_blwc[0] = XGBE_SPEED_1000_BLWC;
+ sc->prv.serdes_blwc[1] = XGBE_SPEED_2500_BLWC;
+ sc->prv.serdes_blwc[2] = XGBE_SPEED_10000_BLWC;
+ }
+
+ error = axgbe_get_optional_prop(dev, phy_node, XGBE_CDR_RATE_PROPERTY,
+ sc->prv.serdes_cdr_rate, sizeof(sc->prv.serdes_cdr_rate));
+ if (error > 0) {
+ return (error);
+ } else if (error < 0) {
+ sc->prv.serdes_cdr_rate[0] = XGBE_SPEED_1000_CDR;
+ sc->prv.serdes_cdr_rate[1] = XGBE_SPEED_2500_CDR;
+ sc->prv.serdes_cdr_rate[2] = XGBE_SPEED_10000_CDR;
+ }
+
+ error = axgbe_get_optional_prop(dev, phy_node, XGBE_PQ_SKEW_PROPERTY,
+ sc->prv.serdes_pq_skew, sizeof(sc->prv.serdes_pq_skew));
+ if (error > 0) {
+ return (error);
+ } else if (error < 0) {
+ sc->prv.serdes_pq_skew[0] = XGBE_SPEED_1000_PQ;
+ sc->prv.serdes_pq_skew[1] = XGBE_SPEED_2500_PQ;
+ sc->prv.serdes_pq_skew[2] = XGBE_SPEED_10000_PQ;
+ }
+
+ error = axgbe_get_optional_prop(dev, phy_node, XGBE_TX_AMP_PROPERTY,
+ sc->prv.serdes_tx_amp, sizeof(sc->prv.serdes_tx_amp));
+ if (error > 0) {
+ return (error);
+ } else if (error < 0) {
+ sc->prv.serdes_tx_amp[0] = XGBE_SPEED_1000_TXAMP;
+ sc->prv.serdes_tx_amp[1] = XGBE_SPEED_2500_TXAMP;
+ sc->prv.serdes_tx_amp[2] = XGBE_SPEED_10000_TXAMP;
+ }
+
+ error = axgbe_get_optional_prop(dev, phy_node, XGBE_DFE_CFG_PROPERTY,
+ sc->prv.serdes_dfe_tap_cfg, sizeof(sc->prv.serdes_dfe_tap_cfg));
+ if (error > 0) {
+ return (error);
+ } else if (error < 0) {
+ sc->prv.serdes_dfe_tap_cfg[0] = XGBE_SPEED_1000_DFE_TAP_CONFIG;
+ sc->prv.serdes_dfe_tap_cfg[1] = XGBE_SPEED_2500_DFE_TAP_CONFIG;
+ sc->prv.serdes_dfe_tap_cfg[2] = XGBE_SPEED_10000_DFE_TAP_CONFIG;
+ }
+
+ error = axgbe_get_optional_prop(dev, phy_node, XGBE_DFE_ENA_PROPERTY,
+ sc->prv.serdes_dfe_tap_ena, sizeof(sc->prv.serdes_dfe_tap_ena));
+ if (error > 0) {
+ return (error);
+ } else if (error < 0) {
+ sc->prv.serdes_dfe_tap_ena[0] = XGBE_SPEED_1000_DFE_TAP_ENABLE;
+ sc->prv.serdes_dfe_tap_ena[1] = XGBE_SPEED_2500_DFE_TAP_ENABLE;
+ sc->prv.serdes_dfe_tap_ena[2] = XGBE_SPEED_10000_DFE_TAP_ENABLE;
+ }
+
+ /* Check if the NIC is DMA coherent */
+ sc->prv.coherent = OF_hasprop(node, "dma-coherent");
+ if (sc->prv.coherent) {
+ sc->prv.axdomain = XGBE_DMA_OS_AXDOMAIN;
+ sc->prv.arcache = XGBE_DMA_OS_ARCACHE;
+ sc->prv.awcache = XGBE_DMA_OS_AWCACHE;
+ } else {
+ sc->prv.axdomain = XGBE_DMA_SYS_AXDOMAIN;
+ sc->prv.arcache = XGBE_DMA_SYS_ARCACHE;
+ sc->prv.awcache = XGBE_DMA_SYS_AWCACHE;
+ }
+
+ /* Create the lock & workqueues */
+ spin_lock_init(&sc->prv.xpcs_lock);
+ sc->prv.dev_workqueue = taskqueue_create("axgbe", M_WAITOK,
+ taskqueue_thread_enqueue, &sc->prv.dev_workqueue);
+ taskqueue_start_threads(&sc->prv.dev_workqueue, 1, PI_NET,
+ "axgbe taskq");
+
+ /* Set the needed pointers */
+ xgbe_init_function_ptrs_phy(&sc->prv.phy_if);
+ xgbe_init_function_ptrs_dev(&sc->prv.hw_if);
+ xgbe_init_function_ptrs_desc(&sc->prv.desc_if);
+
+ /* Reset the hardware */
+ sc->prv.hw_if.exit(&sc->prv);
+
+ /* Read the hardware features */
+ xgbe_get_all_hw_features(&sc->prv);
+
+ /* Set default values */
+ sc->prv.pblx8 = DMA_PBL_X8_ENABLE;
+ sc->prv.tx_desc_count = XGBE_TX_DESC_CNT;
+ sc->prv.tx_sf_mode = MTL_TSF_ENABLE;
+ sc->prv.tx_threshold = MTL_TX_THRESHOLD_64;
+ sc->prv.tx_pbl = DMA_PBL_16;
+ sc->prv.tx_osp_mode = DMA_OSP_ENABLE;
+ sc->prv.rx_desc_count = XGBE_RX_DESC_CNT;
+ sc->prv.rx_sf_mode = MTL_RSF_DISABLE;
+ sc->prv.rx_threshold = MTL_RX_THRESHOLD_64;
+ sc->prv.rx_pbl = DMA_PBL_16;
+ sc->prv.pause_autoneg = 1;
+ sc->prv.tx_pause = 1;
+ sc->prv.rx_pause = 1;
+ sc->prv.phy_speed = SPEED_UNKNOWN;
+ sc->prv.power_down = 0;
+
+ /* TODO: Limit to min(ncpus, hw rings) */
+ sc->prv.tx_ring_count = 1;
+ sc->prv.tx_q_count = 1;
+ sc->prv.rx_ring_count = 1;
+ sc->prv.rx_q_count = sc->prv.hw_feat.rx_q_cnt;
+
+ /* Init the PHY */
+ sc->prv.phy_if.phy_init(&sc->prv);
+
+ /* Set the coalescing */
+ xgbe_init_rx_coalesce(&sc->prv);
+ xgbe_init_tx_coalesce(&sc->prv);
+
+ if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+ ifp->if_init = axgbe_init;
+ ifp->if_softc = sc;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = axgbe_ioctl;
+ ifp->if_transmit = xgbe_xmit;
+ ifp->if_qflush = axgbe_qflush;
+ ifp->if_get_counter = axgbe_get_counter;
+
+ /* TODO: Support HW offload */
+ ifp->if_capabilities = 0;
+ ifp->if_capenable = 0;
+ ifp->if_hwassist = 0;
+
+ ether_ifattach(ifp, sc->mac_addr);
+
+ ifmedia_init(&sc->media, IFM_IMASK, axgbe_media_change,
+ axgbe_media_status);
+#ifdef notyet
+ ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
+#endif
+ ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
+ ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
+
+ set_bit(XGBE_DOWN, &sc->prv.dev_state);
+
+ if (xgbe_open(ifp) < 0) {
+ device_printf(dev, "ndo_open failed\n");
+ return (ENXIO);
+ }
+
+ return (0);
+}
+
+static device_method_t axgbe_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, axgbe_probe),
+ DEVMETHOD(device_attach, axgbe_attach),
+
+ { 0, 0 }
+};
+
+static devclass_t axgbe_devclass;
+
+DEFINE_CLASS_0(axgbe, axgbe_driver, axgbe_methods,
+ sizeof(struct axgbe_softc));
+DRIVER_MODULE(axgbe, simplebus, axgbe_driver, axgbe_devclass, 0, 0);
+
+
+static struct ofw_compat_data phy_compat_data[] = {
+ { "amd,xgbe-phy-seattle-v1a", true },
+ { NULL, false }
+};
+
+static int
+axgbephy_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (!ofw_bus_search_compatible(dev, phy_compat_data)->ocd_data)
+ return (ENXIO);
+
+ device_set_desc(dev, "AMD 10 Gigabit Ethernet");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+axgbephy_attach(device_t dev)
+{
+ phandle_t node;
+
+ node = ofw_bus_get_node(dev);
+ OF_device_register_xref(OF_xref_from_node(node), dev);
+
+ return (0);
+}
+
+static device_method_t axgbephy_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, axgbephy_probe),
+ DEVMETHOD(device_attach, axgbephy_attach),
+
+ { 0, 0 }
+};
+
+static devclass_t axgbephy_devclass;
+
+DEFINE_CLASS_0(axgbephy, axgbephy_driver, axgbephy_methods, 0);
+EARLY_DRIVER_MODULE(axgbephy, simplebus, axgbephy_driver, axgbephy_devclass,
+ 0, 0, BUS_PASS_RESOURCE + BUS_PASS_ORDER_MIDDLE);
Index: sys/dev/axgbe/xgbe-common.h
===================================================================
--- sys/dev/axgbe/xgbe-common.h
+++ sys/dev/axgbe/xgbe-common.h
@@ -117,6 +117,9 @@
#ifndef __XGBE_COMMON_H__
#define __XGBE_COMMON_H__
+#include <sys/bus.h>
+#include <sys/rman.h>
+
/* DMA register offsets */
#define DMA_MR 0x3000
#define DMA_SBMR 0x3004
@@ -1123,7 +1126,7 @@
* register definitions formed using the input names
*/
#define XGMAC_IOREAD(_pdata, _reg) \
- ioread32((_pdata)->xgmac_regs + _reg)
+ bus_read_4((_pdata)->xgmac_res, _reg)
#define XGMAC_IOREAD_BITS(_pdata, _reg, _field) \
GET_BITS(XGMAC_IOREAD((_pdata), _reg), \
@@ -1131,7 +1134,7 @@
_reg##_##_field##_WIDTH)
#define XGMAC_IOWRITE(_pdata, _reg, _val) \
- iowrite32((_val), (_pdata)->xgmac_regs + _reg)
+ bus_write_4((_pdata)->xgmac_res, _reg, (_val))
#define XGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \
do { \
@@ -1147,7 +1150,7 @@
* base register value is calculated by the queue or traffic class number
*/
#define XGMAC_MTL_IOREAD(_pdata, _n, _reg) \
- ioread32((_pdata)->xgmac_regs + \
+ bus_read_4((_pdata)->xgmac_res, \
MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg)
#define XGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \
@@ -1156,8 +1159,8 @@
_reg##_##_field##_WIDTH)
#define XGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \
- iowrite32((_val), (_pdata)->xgmac_regs + \
- MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg)
+ bus_write_4((_pdata)->xgmac_res, \
+ MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg, (_val))
#define XGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \
do { \
@@ -1173,7 +1176,7 @@
* base register value is obtained from the ring
*/
#define XGMAC_DMA_IOREAD(_channel, _reg) \
- ioread32((_channel)->dma_regs + _reg)
+ bus_space_read_4((_channel)->dma_tag, (_channel)->dma_handle, _reg)
#define XGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \
GET_BITS(XGMAC_DMA_IOREAD((_channel), _reg), \
@@ -1181,7 +1184,8 @@
_reg##_##_field##_WIDTH)
#define XGMAC_DMA_IOWRITE(_channel, _reg, _val) \
- iowrite32((_val), (_channel)->dma_regs + _reg)
+ bus_space_write_4((_channel)->dma_tag, (_channel)->dma_handle, \
+ _reg, (_val))
#define XGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \
do { \
@@ -1196,10 +1200,10 @@
* within the register values of XPCS registers.
*/
#define XPCS_IOWRITE(_pdata, _off, _val) \
- iowrite32(_val, (_pdata)->xpcs_regs + (_off))
+ bus_write_4((_pdata)->xpcs_res, (_off), _val)
#define XPCS_IOREAD(_pdata, _off) \
- ioread32((_pdata)->xpcs_regs + (_off))
+ bus_read_4((_pdata)->xpcs_res, (_off))
/* Macros for building, reading or writing register values or bits
* within the register values of SerDes integration registers.
@@ -1215,7 +1219,7 @@
_prefix##_##_field##_WIDTH, (_val))
#define XSIR0_IOREAD(_pdata, _reg) \
- ioread16((_pdata)->sir0_regs + _reg)
+ bus_read_2((_pdata)->sir0_res, _reg)
#define XSIR0_IOREAD_BITS(_pdata, _reg, _field) \
GET_BITS(XSIR0_IOREAD((_pdata), _reg), \
@@ -1223,7 +1227,7 @@
_reg##_##_field##_WIDTH)
#define XSIR0_IOWRITE(_pdata, _reg, _val) \
- iowrite16((_val), (_pdata)->sir0_regs + _reg)
+ bus_write_2((_pdata)->sir0_res, _reg, (_val))
#define XSIR0_IOWRITE_BITS(_pdata, _reg, _field, _val) \
do { \
@@ -1235,7 +1239,7 @@
} while (0)
#define XSIR1_IOREAD(_pdata, _reg) \
- ioread16((_pdata)->sir1_regs + _reg)
+ bus_read_2((_pdata)->sir1_res, _reg)
#define XSIR1_IOREAD_BITS(_pdata, _reg, _field) \
GET_BITS(XSIR1_IOREAD((_pdata), _reg), \
@@ -1243,7 +1247,7 @@
_reg##_##_field##_WIDTH)
#define XSIR1_IOWRITE(_pdata, _reg, _val) \
- iowrite16((_val), (_pdata)->sir1_regs + _reg)
+ bus_write_2((_pdata)->sir1_res, _reg, (_val))
#define XSIR1_IOWRITE_BITS(_pdata, _reg, _field, _val) \
do { \
@@ -1258,7 +1262,7 @@
* within the register values of SerDes RxTx registers.
*/
#define XRXTX_IOREAD(_pdata, _reg) \
- ioread16((_pdata)->rxtx_regs + _reg)
+ bus_read_2((_pdata)->rxtx_res, _reg)
#define XRXTX_IOREAD_BITS(_pdata, _reg, _field) \
GET_BITS(XRXTX_IOREAD((_pdata), _reg), \
@@ -1266,7 +1270,7 @@
_reg##_##_field##_WIDTH)
#define XRXTX_IOWRITE(_pdata, _reg, _val) \
- iowrite16((_val), (_pdata)->rxtx_regs + _reg)
+ bus_write_2((_pdata)->rxtx_res, _reg, (_val))
#define XRXTX_IOWRITE_BITS(_pdata, _reg, _field, _val) \
do { \
Index: sys/dev/axgbe/xgbe-dcb.c
===================================================================
--- sys/dev/axgbe/xgbe-dcb.c
+++ /dev/null
@@ -1,293 +0,0 @@
-/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/netdevice.h>
-#include <net/dcbnl.h>
-
-#include "xgbe.h"
-#include "xgbe-common.h"
-
-static int xgbe_dcb_ieee_getets(struct net_device *netdev,
- struct ieee_ets *ets)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
-
- /* Set number of supported traffic classes */
- ets->ets_cap = pdata->hw_feat.tc_cnt;
-
- if (pdata->ets) {
- ets->cbs = pdata->ets->cbs;
- memcpy(ets->tc_tx_bw, pdata->ets->tc_tx_bw,
- sizeof(ets->tc_tx_bw));
- memcpy(ets->tc_tsa, pdata->ets->tc_tsa,
- sizeof(ets->tc_tsa));
- memcpy(ets->prio_tc, pdata->ets->prio_tc,
- sizeof(ets->prio_tc));
- }
-
- return 0;
-}
-
-static int xgbe_dcb_ieee_setets(struct net_device *netdev,
- struct ieee_ets *ets)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- unsigned int i, tc_ets, tc_ets_weight;
- u8 max_tc = 0;
-
- tc_ets = 0;
- tc_ets_weight = 0;
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- netif_dbg(pdata, drv, netdev,
- "TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
- ets->tc_tx_bw[i], ets->tc_rx_bw[i],
- ets->tc_tsa[i]);
- netif_dbg(pdata, drv, netdev, "PRIO%u: TC=%hhu\n", i,
- ets->prio_tc[i]);
-
- max_tc = max_t(u8, max_tc, ets->prio_tc[i]);
- if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]))
- max_tc = max_t(u8, max_tc, i);
-
- switch (ets->tc_tsa[i]) {
- case IEEE_8021QAZ_TSA_STRICT:
- break;
- case IEEE_8021QAZ_TSA_ETS:
- tc_ets = 1;
- tc_ets_weight += ets->tc_tx_bw[i];
- break;
- default:
- netif_err(pdata, drv, netdev,
- "unsupported TSA algorithm (%hhu)\n",
- ets->tc_tsa[i]);
- return -EINVAL;
- }
- }
-
- /* Check maximum traffic class requested */
- if (max_tc >= pdata->hw_feat.tc_cnt) {
- netif_err(pdata, drv, netdev,
- "exceeded number of supported traffic classes\n");
- return -EINVAL;
- }
-
- /* Weights must add up to 100% */
- if (tc_ets && (tc_ets_weight != 100)) {
- netif_err(pdata, drv, netdev,
- "sum of ETS algorithm weights is not 100 (%u)\n",
- tc_ets_weight);
- return -EINVAL;
- }
-
- if (!pdata->ets) {
- pdata->ets = devm_kzalloc(pdata->dev, sizeof(*pdata->ets),
- GFP_KERNEL);
- if (!pdata->ets)
- return -ENOMEM;
- }
-
- pdata->num_tcs = max_tc + 1;
- memcpy(pdata->ets, ets, sizeof(*pdata->ets));
-
- pdata->hw_if.config_dcb_tc(pdata);
-
- return 0;
-}
-
-static int xgbe_dcb_ieee_getpfc(struct net_device *netdev,
- struct ieee_pfc *pfc)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
-
- /* Set number of supported PFC traffic classes */
- pfc->pfc_cap = pdata->hw_feat.tc_cnt;
-
- if (pdata->pfc) {
- pfc->pfc_en = pdata->pfc->pfc_en;
- pfc->mbc = pdata->pfc->mbc;
- pfc->delay = pdata->pfc->delay;
- }
-
- return 0;
-}
-
-static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
- struct ieee_pfc *pfc)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
-
- netif_dbg(pdata, drv, netdev,
- "cap=%hhu, en=%#hhx, mbc=%hhu, delay=%hhu\n",
- pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
-
- /* Check PFC for supported number of traffic classes */
- if (pfc->pfc_en & ~((1 << pdata->hw_feat.tc_cnt) - 1)) {
- netif_err(pdata, drv, netdev,
- "PFC requested for unsupported traffic class\n");
- return -EINVAL;
- }
-
- if (!pdata->pfc) {
- pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc),
- GFP_KERNEL);
- if (!pdata->pfc)
- return -ENOMEM;
- }
-
- memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
-
- pdata->hw_if.config_dcb_pfc(pdata);
-
- return 0;
-}
-
-static u8 xgbe_dcb_getdcbx(struct net_device *netdev)
-{
- return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
-}
-
-static u8 xgbe_dcb_setdcbx(struct net_device *netdev, u8 dcbx)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- u8 support = xgbe_dcb_getdcbx(netdev);
-
- netif_dbg(pdata, drv, netdev, "DCBX=%#hhx\n", dcbx);
-
- if (dcbx & ~support)
- return 1;
-
- if ((dcbx & support) != support)
- return 1;
-
- return 0;
-}
-
-static const struct dcbnl_rtnl_ops xgbe_dcbnl_ops = {
- /* IEEE 802.1Qaz std */
- .ieee_getets = xgbe_dcb_ieee_getets,
- .ieee_setets = xgbe_dcb_ieee_setets,
- .ieee_getpfc = xgbe_dcb_ieee_getpfc,
- .ieee_setpfc = xgbe_dcb_ieee_setpfc,
-
- /* DCBX configuration */
- .getdcbx = xgbe_dcb_getdcbx,
- .setdcbx = xgbe_dcb_setdcbx,
-};
-
-const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void)
-{
- return &xgbe_dcbnl_ops;
-}
Index: sys/dev/axgbe/xgbe-debugfs.c
===================================================================
--- sys/dev/axgbe/xgbe-debugfs.c
+++ /dev/null
@@ -1,377 +0,0 @@
-/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/debugfs.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#include "xgbe.h"
-#include "xgbe-common.h"
-
-static ssize_t xgbe_common_read(char __user *buffer, size_t count,
- loff_t *ppos, unsigned int value)
-{
- char *buf;
- ssize_t len;
-
- if (*ppos != 0)
- return 0;
-
- buf = kasprintf(GFP_KERNEL, "0x%08x\n", value);
- if (!buf)
- return -ENOMEM;
-
- if (count < strlen(buf)) {
- kfree(buf);
- return -ENOSPC;
- }
-
- len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
- kfree(buf);
-
- return len;
-}
-
-static ssize_t xgbe_common_write(const char __user *buffer, size_t count,
- loff_t *ppos, unsigned int *value)
-{
- char workarea[32];
- ssize_t len;
- int ret;
-
- if (*ppos != 0)
- return 0;
-
- if (count >= sizeof(workarea))
- return -ENOSPC;
-
- len = simple_write_to_buffer(workarea, sizeof(workarea) - 1, ppos,
- buffer, count);
- if (len < 0)
- return len;
-
- workarea[len] = '\0';
- ret = kstrtouint(workarea, 16, value);
- if (ret)
- return -EIO;
-
- return len;
-}
-
-static ssize_t xgmac_reg_addr_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct xgbe_prv_data *pdata = filp->private_data;
-
- return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xgmac_reg);
-}
-
-static ssize_t xgmac_reg_addr_write(struct file *filp,
- const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct xgbe_prv_data *pdata = filp->private_data;
-
- return xgbe_common_write(buffer, count, ppos,
- &pdata->debugfs_xgmac_reg);
-}
-
-static ssize_t xgmac_reg_value_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct xgbe_prv_data *pdata = filp->private_data;
- unsigned int value;
-
- value = XGMAC_IOREAD(pdata, pdata->debugfs_xgmac_reg);
-
- return xgbe_common_read(buffer, count, ppos, value);
-}
-
-static ssize_t xgmac_reg_value_write(struct file *filp,
- const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct xgbe_prv_data *pdata = filp->private_data;
- unsigned int value;
- ssize_t len;
-
- len = xgbe_common_write(buffer, count, ppos, &value);
- if (len < 0)
- return len;
-
- XGMAC_IOWRITE(pdata, pdata->debugfs_xgmac_reg, value);
-
- return len;
-}
-
-static const struct file_operations xgmac_reg_addr_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = xgmac_reg_addr_read,
- .write = xgmac_reg_addr_write,
-};
-
-static const struct file_operations xgmac_reg_value_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = xgmac_reg_value_read,
- .write = xgmac_reg_value_write,
-};
-
-static ssize_t xpcs_mmd_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct xgbe_prv_data *pdata = filp->private_data;
-
- return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xpcs_mmd);
-}
-
-static ssize_t xpcs_mmd_write(struct file *filp, const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct xgbe_prv_data *pdata = filp->private_data;
-
- return xgbe_common_write(buffer, count, ppos,
- &pdata->debugfs_xpcs_mmd);
-}
-
-static ssize_t xpcs_reg_addr_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct xgbe_prv_data *pdata = filp->private_data;
-
- return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xpcs_reg);
-}
-
-static ssize_t xpcs_reg_addr_write(struct file *filp, const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct xgbe_prv_data *pdata = filp->private_data;
-
- return xgbe_common_write(buffer, count, ppos,
- &pdata->debugfs_xpcs_reg);
-}
-
-static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct xgbe_prv_data *pdata = filp->private_data;
- unsigned int value;
-
- value = XMDIO_READ(pdata, pdata->debugfs_xpcs_mmd,
- pdata->debugfs_xpcs_reg);
-
- return xgbe_common_read(buffer, count, ppos, value);
-}
-
-static ssize_t xpcs_reg_value_write(struct file *filp,
- const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct xgbe_prv_data *pdata = filp->private_data;
- unsigned int value;
- ssize_t len;
-
- len = xgbe_common_write(buffer, count, ppos, &value);
- if (len < 0)
- return len;
-
- XMDIO_WRITE(pdata, pdata->debugfs_xpcs_mmd, pdata->debugfs_xpcs_reg,
- value);
-
- return len;
-}
-
-static const struct file_operations xpcs_mmd_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = xpcs_mmd_read,
- .write = xpcs_mmd_write,
-};
-
-static const struct file_operations xpcs_reg_addr_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = xpcs_reg_addr_read,
- .write = xpcs_reg_addr_write,
-};
-
-static const struct file_operations xpcs_reg_value_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = xpcs_reg_value_read,
- .write = xpcs_reg_value_write,
-};
-
-void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
-{
- struct dentry *pfile;
- char *buf;
-
- /* Set defaults */
- pdata->debugfs_xgmac_reg = 0;
- pdata->debugfs_xpcs_mmd = 1;
- pdata->debugfs_xpcs_reg = 0;
-
- buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
- if (!buf)
- return;
-
- pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
- if (!pdata->xgbe_debugfs) {
- netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
- kfree(buf);
- return;
- }
-
- pfile = debugfs_create_file("xgmac_register", 0600,
- pdata->xgbe_debugfs, pdata,
- &xgmac_reg_addr_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
-
- pfile = debugfs_create_file("xgmac_register_value", 0600,
- pdata->xgbe_debugfs, pdata,
- &xgmac_reg_value_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
-
- pfile = debugfs_create_file("xpcs_mmd", 0600,
- pdata->xgbe_debugfs, pdata,
- &xpcs_mmd_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
-
- pfile = debugfs_create_file("xpcs_register", 0600,
- pdata->xgbe_debugfs, pdata,
- &xpcs_reg_addr_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
-
- pfile = debugfs_create_file("xpcs_register_value", 0600,
- pdata->xgbe_debugfs, pdata,
- &xpcs_reg_value_fops);
- if (!pfile)
- netdev_err(pdata->netdev, "debugfs_create_file failed\n");
-
- kfree(buf);
-}
-
-void xgbe_debugfs_exit(struct xgbe_prv_data *pdata)
-{
- debugfs_remove_recursive(pdata->xgbe_debugfs);
- pdata->xgbe_debugfs = NULL;
-}
Index: sys/dev/axgbe/xgbe-desc.c
===================================================================
--- sys/dev/axgbe/xgbe-desc.c
+++ sys/dev/axgbe/xgbe-desc.c
@@ -128,45 +128,29 @@
if (!ring)
return;
+ bus_dmamap_destroy(ring->mbuf_dmat, ring->mbuf_map);
+ bus_dma_tag_destroy(ring->mbuf_dmat);
+
+ ring->mbuf_map = NULL;
+ ring->mbuf_dmat = NULL;
+
if (ring->rdata) {
for (i = 0; i < ring->rdesc_count; i++) {
rdata = XGBE_GET_DESC_DATA(ring, i);
xgbe_unmap_rdata(pdata, rdata);
}
- kfree(ring->rdata);
+ free(ring->rdata, M_AXGBE);
ring->rdata = NULL;
}
- if (ring->rx_hdr_pa.pages) {
- dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
- ring->rx_hdr_pa.pages_len, DMA_FROM_DEVICE);
- put_page(ring->rx_hdr_pa.pages);
-
- ring->rx_hdr_pa.pages = NULL;
- ring->rx_hdr_pa.pages_len = 0;
- ring->rx_hdr_pa.pages_offset = 0;
- ring->rx_hdr_pa.pages_dma = 0;
- }
-
- if (ring->rx_buf_pa.pages) {
- dma_unmap_page(pdata->dev, ring->rx_buf_pa.pages_dma,
- ring->rx_buf_pa.pages_len, DMA_FROM_DEVICE);
- put_page(ring->rx_buf_pa.pages);
+ bus_dmamap_unload(ring->rdesc_dmat, ring->rdesc_map);
+ bus_dmamem_free(ring->rdesc_dmat, ring->rdesc, ring->rdesc_map);
+ bus_dma_tag_destroy(ring->rdesc_dmat);
- ring->rx_buf_pa.pages = NULL;
- ring->rx_buf_pa.pages_len = 0;
- ring->rx_buf_pa.pages_offset = 0;
- ring->rx_buf_pa.pages_dma = 0;
- }
-
- if (ring->rdesc) {
- dma_free_coherent(pdata->dev,
- (sizeof(struct xgbe_ring_desc) *
- ring->rdesc_count),
- ring->rdesc, ring->rdesc_dma);
- ring->rdesc = NULL;
- }
+ ring->rdesc_map = NULL;
+ ring->rdesc_dmat = NULL;
+ ring->rdesc = NULL;
}
static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
@@ -185,32 +169,71 @@
DBGPR("<--xgbe_free_ring_resources\n");
}
+static void xgbe_ring_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg,
+ int error)
+{
+ if (error)
+ return;
+ *(bus_addr_t *) arg = segs->ds_addr;
+}
+
static int xgbe_init_ring(struct xgbe_prv_data *pdata,
struct xgbe_ring *ring, unsigned int rdesc_count)
{
+ bus_size_t len;
+ int err, flags;
+
DBGPR("-->xgbe_init_ring\n");
if (!ring)
return 0;
+ flags = 0;
+ if (pdata->coherent)
+ flags = BUS_DMA_COHERENT;
+
/* Descriptors */
ring->rdesc_count = rdesc_count;
- ring->rdesc = dma_alloc_coherent(pdata->dev,
- (sizeof(struct xgbe_ring_desc) *
- rdesc_count), &ring->rdesc_dma,
- GFP_KERNEL);
- if (!ring->rdesc)
- return -ENOMEM;
+ len = sizeof(struct xgbe_ring_desc) * rdesc_count;
+ err = bus_dma_tag_create(pdata->dmat, 512, 0, BUS_SPACE_MAXADDR,
+ BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, flags, NULL, NULL,
+ &ring->rdesc_dmat);
+ if (err != 0) {
+ printf("Unable to create the DMA tag: %d\n", err);
+ return -err;
+ }
+
+ err = bus_dmamem_alloc(ring->rdesc_dmat, (void **)&ring->rdesc,
+ BUS_DMA_WAITOK | BUS_DMA_COHERENT, &ring->rdesc_map);
+ if (err != 0) {
+ bus_dma_tag_destroy(ring->rdesc_dmat);
+ printf("Unable to allocate DMA memory: %d\n", err);
+ return -err;
+ }
+ err = bus_dmamap_load(ring->rdesc_dmat, ring->rdesc_map, ring->rdesc,
+ len, xgbe_ring_dmamap_cb, &ring->rdesc_paddr, 0);
+ if (err != 0) {
+ bus_dmamem_free(ring->rdesc_dmat, ring->rdesc, ring->rdesc_map);
+ bus_dma_tag_destroy(ring->rdesc_dmat);
+ printf("Unable to load DMA memory\n");
+ return -err;
+ }
/* Descriptor information */
- ring->rdata = kcalloc(rdesc_count, sizeof(struct xgbe_ring_data),
- GFP_KERNEL);
- if (!ring->rdata)
- return -ENOMEM;
+ ring->rdata = malloc(rdesc_count * sizeof(struct xgbe_ring_data),
+ M_AXGBE, M_WAITOK | M_ZERO);
+
+ /* Create the space DMA tag for mbufs */
+ err = bus_dma_tag_create(pdata->dmat, 1, 0, BUS_SPACE_MAXADDR,
+ BUS_SPACE_MAXADDR, NULL, NULL, XGBE_TX_MAX_BUF_SIZE * rdesc_count,
+ rdesc_count, XGBE_TX_MAX_BUF_SIZE, flags, NULL, NULL,
+ &ring->mbuf_dmat);
+ if (err != 0)
+ return -err;
- netif_dbg(pdata, drv, pdata->netdev,
- "rdesc=%p, rdesc_dma=%pad, rdata=%p\n",
- ring->rdesc, &ring->rdesc_dma, ring->rdata);
+ err = bus_dmamap_create(ring->mbuf_dmat, 0, &ring->mbuf_map);
+ if (err != 0)
+ return -err;
DBGPR("<--xgbe_init_ring\n");
@@ -227,25 +250,17 @@
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
- netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
- channel->name);
-
ret = xgbe_init_ring(pdata, channel->tx_ring,
pdata->tx_desc_count);
if (ret) {
- netdev_alert(pdata->netdev,
- "error initializing Tx ring\n");
+ printf("error initializing Tx ring\n");
goto err_ring;
}
- netif_dbg(pdata, drv, pdata->netdev, "%s - Rx ring:\n",
- channel->name);
-
ret = xgbe_init_ring(pdata, channel->rx_ring,
pdata->rx_desc_count);
if (ret) {
- netdev_alert(pdata->netdev,
- "error initializing Rx ring\n");
+ printf("error initializing Rx ring\n");
goto err_ring;
}
}
@@ -260,93 +275,58 @@
return ret;
}
-static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
- struct xgbe_page_alloc *pa, gfp_t gfp, int order)
-{
- struct page *pages = NULL;
- dma_addr_t pages_dma;
- int ret;
-
- /* Try to obtain pages, decreasing order if necessary */
- gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
- while (order >= 0) {
- pages = alloc_pages(gfp, order);
- if (pages)
- break;
-
- order--;
- }
- if (!pages)
- return -ENOMEM;
-
- /* Map the pages */
- pages_dma = dma_map_page(pdata->dev, pages, 0,
- PAGE_SIZE << order, DMA_FROM_DEVICE);
- ret = dma_mapping_error(pdata->dev, pages_dma);
- if (ret) {
- put_page(pages);
- return ret;
- }
-
- pa->pages = pages;
- pa->pages_len = PAGE_SIZE << order;
- pa->pages_offset = 0;
- pa->pages_dma = pages_dma;
-
- return 0;
-}
-
-static void xgbe_set_buffer_data(struct xgbe_buffer_data *bd,
- struct xgbe_page_alloc *pa,
- unsigned int len)
-{
- get_page(pa->pages);
- bd->pa = *pa;
-
- bd->dma_base = pa->pages_dma;
- bd->dma_off = pa->pages_offset;
- bd->dma_len = len;
-
- pa->pages_offset += len;
- if ((pa->pages_offset + len) > pa->pages_len) {
- /* This data descriptor is responsible for unmapping page(s) */
- bd->pa_unmap = *pa;
-
- /* Get a new allocation next time */
- pa->pages = NULL;
- pa->pages_len = 0;
- pa->pages_offset = 0;
- pa->pages_dma = 0;
- }
-}
-
static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
struct xgbe_ring *ring,
struct xgbe_ring_data *rdata)
{
- int order, ret;
+ bus_dmamap_t mbuf_map;
+ bus_dma_segment_t segs[2];
+ struct mbuf *m0, *m1;
+ int err, nsegs;
+
+ m0 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MCLBYTES);
+ if (m0 == NULL)
+ return (-ENOBUFS);
+
+ m1 = m_getjcl(M_NOWAIT, MT_DATA, 0, MCLBYTES);
+ if (m1 == NULL) {
+ m_freem(m0);
+ return (-ENOBUFS);
+ }
+
+ m0->m_next = m1;
+ m0->m_flags |= M_PKTHDR;
+ m0->m_len = MHLEN;
+ m0->m_pkthdr.len = MHLEN + MCLBYTES;
+
+ m1->m_len = MCLBYTES;
+ m1->m_next = NULL;
+ m1->m_pkthdr.len = MCLBYTES;
- if (!ring->rx_hdr_pa.pages) {
- ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0);
- if (ret)
- return ret;
+ err = bus_dmamap_create(ring->mbuf_dmat, 0, &mbuf_map);
+ if (err != 0) {
+ m_freem(m0);
+ return (-err);
}
- if (!ring->rx_buf_pa.pages) {
- order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0);
- ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC,
- order);
- if (ret)
- return ret;
+ err = bus_dmamap_load_mbuf_sg(ring->mbuf_dmat, mbuf_map, m0, segs,
+ &nsegs, BUS_DMA_NOWAIT);
+ if (err != 0) {
+ m_freem(m0);
+ bus_dmamap_destroy(ring->mbuf_dmat, mbuf_map);
+ return (-err);
}
- /* Set up the header page info */
- xgbe_set_buffer_data(&rdata->rx.hdr, &ring->rx_hdr_pa,
- XGBE_SKB_ALLOC_SIZE);
+ KASSERT(nsegs == 2,
+ ("xgbe_map_rx_buffer: Unable to handle multiple segments %d",
+ nsegs));
- /* Set up the buffer page info */
- xgbe_set_buffer_data(&rdata->rx.buf, &ring->rx_buf_pa,
- pdata->rx_buf_size);
+ rdata->mb = m0;
+ rdata->mbuf_free = 0;
+ rdata->mbuf_dmat = ring->mbuf_dmat;
+ rdata->mbuf_map = mbuf_map;
+ rdata->mbuf_hdr_paddr = segs[0].ds_addr;
+ rdata->mbuf_data_paddr = segs[1].ds_addr;
return 0;
}
@@ -358,7 +338,7 @@
struct xgbe_ring *ring;
struct xgbe_ring_data *rdata;
struct xgbe_ring_desc *rdesc;
- dma_addr_t rdesc_dma;
+ bus_addr_t rdesc_paddr;
unsigned int i, j;
DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
@@ -370,16 +350,16 @@
break;
rdesc = ring->rdesc;
- rdesc_dma = ring->rdesc_dma;
+ rdesc_paddr = ring->rdesc_paddr;
for (j = 0; j < ring->rdesc_count; j++) {
rdata = XGBE_GET_DESC_DATA(ring, j);
rdata->rdesc = rdesc;
- rdata->rdesc_dma = rdesc_dma;
+ rdata->rdata_paddr = rdesc_paddr;
rdesc++;
- rdesc_dma += sizeof(struct xgbe_ring_desc);
+ rdesc_paddr += sizeof(struct xgbe_ring_desc);
}
ring->cur = 0;
@@ -399,7 +379,7 @@
struct xgbe_ring *ring;
struct xgbe_ring_desc *rdesc;
struct xgbe_ring_data *rdata;
- dma_addr_t rdesc_dma;
+ bus_addr_t rdesc_paddr;
unsigned int i, j;
DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
@@ -411,19 +391,19 @@
break;
rdesc = ring->rdesc;
- rdesc_dma = ring->rdesc_dma;
+ rdesc_paddr = ring->rdesc_paddr;
for (j = 0; j < ring->rdesc_count; j++) {
rdata = XGBE_GET_DESC_DATA(ring, j);
rdata->rdesc = rdesc;
- rdata->rdesc_dma = rdesc_dma;
+ rdata->rdata_paddr = rdesc_paddr;
if (xgbe_map_rx_buffer(pdata, ring, rdata))
break;
rdesc++;
- rdesc_dma += sizeof(struct xgbe_ring_desc);
+ rdesc_paddr += sizeof(struct xgbe_ring_desc);
}
ring->cur = 0;
@@ -431,78 +411,81 @@
hw_if->rx_desc_init(channel);
}
-
- DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
}
static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
struct xgbe_ring_data *rdata)
{
- if (rdata->skb_dma) {
- if (rdata->mapped_as_page) {
- dma_unmap_page(pdata->dev, rdata->skb_dma,
- rdata->skb_dma_len, DMA_TO_DEVICE);
- } else {
- dma_unmap_single(pdata->dev, rdata->skb_dma,
- rdata->skb_dma_len, DMA_TO_DEVICE);
- }
- rdata->skb_dma = 0;
- rdata->skb_dma_len = 0;
- }
-
- if (rdata->skb) {
- dev_kfree_skb_any(rdata->skb);
- rdata->skb = NULL;
- }
- if (rdata->rx.hdr.pa.pages)
- put_page(rdata->rx.hdr.pa.pages);
+ if (rdata->mbuf_map != NULL)
+ bus_dmamap_destroy(rdata->mbuf_dmat, rdata->mbuf_map);
- if (rdata->rx.hdr.pa_unmap.pages) {
- dma_unmap_page(pdata->dev, rdata->rx.hdr.pa_unmap.pages_dma,
- rdata->rx.hdr.pa_unmap.pages_len,
- DMA_FROM_DEVICE);
- put_page(rdata->rx.hdr.pa_unmap.pages);
- }
-
- if (rdata->rx.buf.pa.pages)
- put_page(rdata->rx.buf.pa.pages);
+ if (rdata->mbuf_free)
+ m_freem(rdata->mb);
- if (rdata->rx.buf.pa_unmap.pages) {
- dma_unmap_page(pdata->dev, rdata->rx.buf.pa_unmap.pages_dma,
- rdata->rx.buf.pa_unmap.pages_len,
- DMA_FROM_DEVICE);
- put_page(rdata->rx.buf.pa_unmap.pages);
- }
+ rdata->mb = NULL;
+ rdata->mbuf_free = 0;
+ rdata->mbuf_hdr_paddr = 0;
+ rdata->mbuf_data_paddr = 0;
+ rdata->mbuf_len = 0;
memset(&rdata->tx, 0, sizeof(rdata->tx));
memset(&rdata->rx, 0, sizeof(rdata->rx));
+}
+
+struct xgbe_map_tx_skb_data {
+ struct xgbe_ring *ring;
+ struct xgbe_packet_data *packet;
+ unsigned int cur_index;
+};
+
+static void xgbe_map_tx_skb_cb(void *callback_arg, bus_dma_segment_t *segs,
+ int nseg, bus_size_t mapsize, int error)
+{
+ struct xgbe_map_tx_skb_data *data;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring *ring;
+ int i;
+
+ if (error != 0)
+ return;
+
+ data = callback_arg;
+ ring = data->ring;
+
+ for (i = 0; i < nseg; i++) {
+ rdata = XGBE_GET_DESC_DATA(ring, data->cur_index);
+
+ KASSERT(segs[i].ds_len <= XGBE_TX_MAX_BUF_SIZE,
+ ("%s: Segment size is too large %ld > %d", __func__,
+ segs[i].ds_len, XGBE_TX_MAX_BUF_SIZE));
+
+ if (i == 0) {
+ rdata->mbuf_dmat = ring->mbuf_dmat;
+ bus_dmamap_create(ring->mbuf_dmat, 0, &ring->mbuf_map);
+ }
- rdata->mapped_as_page = 0;
+ rdata->mbuf_hdr_paddr = 0;
+ rdata->mbuf_data_paddr = segs[i].ds_addr;
+ rdata->mbuf_len = segs[i].ds_len;
- if (rdata->state_saved) {
- rdata->state_saved = 0;
- rdata->state.skb = NULL;
- rdata->state.len = 0;
- rdata->state.error = 0;
+ data->packet->length += rdata->mbuf_len;
+
+ data->cur_index++;
}
}
-static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
+static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct mbuf *m)
{
- struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_map_tx_skb_data cbdata;
struct xgbe_ring_data *rdata;
struct xgbe_packet_data *packet;
- struct skb_frag_struct *frag;
- dma_addr_t skb_dma;
unsigned int start_index, cur_index;
- unsigned int offset, tso, vlan, datalen, len;
- unsigned int i;
+ int err;
DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
- offset = 0;
start_index = ring->cur;
cur_index = ring->cur;
@@ -510,105 +493,24 @@
packet->rdesc_count = 0;
packet->length = 0;
- tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
- TSO_ENABLE);
- vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
- VLAN_CTAG);
-
- /* Save space for a context descriptor if needed */
- if ((tso && (packet->mss != ring->tx.cur_mss)) ||
- (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
- cur_index++;
- rdata = XGBE_GET_DESC_DATA(ring, cur_index);
-
- if (tso) {
- /* Map the TSO header */
- skb_dma = dma_map_single(pdata->dev, skb->data,
- packet->header_len, DMA_TO_DEVICE);
- if (dma_mapping_error(pdata->dev, skb_dma)) {
- netdev_alert(pdata->netdev, "dma_map_single failed\n");
- goto err_out;
- }
- rdata->skb_dma = skb_dma;
- rdata->skb_dma_len = packet->header_len;
- netif_dbg(pdata, tx_queued, pdata->netdev,
- "skb header: index=%u, dma=%pad, len=%u\n",
- cur_index, &skb_dma, packet->header_len);
-
- offset = packet->header_len;
-
- packet->length += packet->header_len;
-
- cur_index++;
- rdata = XGBE_GET_DESC_DATA(ring, cur_index);
- }
-
- /* Map the (remainder of the) packet */
- for (datalen = skb_headlen(skb) - offset; datalen; ) {
- len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
-
- skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(pdata->dev, skb_dma)) {
- netdev_alert(pdata->netdev, "dma_map_single failed\n");
- goto err_out;
- }
- rdata->skb_dma = skb_dma;
- rdata->skb_dma_len = len;
- netif_dbg(pdata, tx_queued, pdata->netdev,
- "skb data: index=%u, dma=%pad, len=%u\n",
- cur_index, &skb_dma, len);
-
- datalen -= len;
- offset += len;
-
- packet->length += len;
+ cbdata.ring = ring;
+ cbdata.packet = packet;
+ cbdata.cur_index = cur_index;
- cur_index++;
- rdata = XGBE_GET_DESC_DATA(ring, cur_index);
- }
+ err = bus_dmamap_load_mbuf(ring->mbuf_dmat, ring->mbuf_map, m,
+ xgbe_map_tx_skb_cb, &cbdata, BUS_DMA_NOWAIT);
+ if (err != 0) /* TODO: Undo the mapping */
+ return (-err);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- netif_dbg(pdata, tx_queued, pdata->netdev,
- "mapping frag %u\n", i);
-
- frag = &skb_shinfo(skb)->frags[i];
- offset = 0;
-
- for (datalen = skb_frag_size(frag); datalen; ) {
- len = min_t(unsigned int, datalen,
- XGBE_TX_MAX_BUF_SIZE);
-
- skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
- len, DMA_TO_DEVICE);
- if (dma_mapping_error(pdata->dev, skb_dma)) {
- netdev_alert(pdata->netdev,
- "skb_frag_dma_map failed\n");
- goto err_out;
- }
- rdata->skb_dma = skb_dma;
- rdata->skb_dma_len = len;
- rdata->mapped_as_page = 1;
- netif_dbg(pdata, tx_queued, pdata->netdev,
- "skb frag: index=%u, dma=%pad, len=%u\n",
- cur_index, &skb_dma, len);
-
- datalen -= len;
- offset += len;
-
- packet->length += len;
-
- cur_index++;
- rdata = XGBE_GET_DESC_DATA(ring, cur_index);
- }
- }
+ cur_index = cbdata.cur_index;
- /* Save the skb address in the last entry. We always have some data
+ /* Save the mbuf address in the last entry. We always have some data
* that has been mapped so rdata is always advanced past the last
* piece of mapped data - use the entry pointed to by cur_index - 1.
*/
rdata = XGBE_GET_DESC_DATA(ring, cur_index - 1);
- rdata->skb = skb;
+ rdata->mb = m;
+ rdata->mbuf_free = 1;
/* Save the number of descriptor entries used */
packet->rdesc_count = cur_index - start_index;
@@ -616,16 +518,6 @@
DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
return packet->rdesc_count;
-
-err_out:
- while (start_index < cur_index) {
- rdata = XGBE_GET_DESC_DATA(ring, start_index++);
- xgbe_unmap_rdata(pdata, rdata);
- }
-
- DBGPR("<--xgbe_map_tx_skb: count=0\n");
-
- return 0;
}
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
Index: sys/dev/axgbe/xgbe-dev.c
===================================================================
--- sys/dev/axgbe/xgbe-dev.c
+++ sys/dev/axgbe/xgbe-dev.c
@@ -114,15 +114,17 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/phy.h>
-#include <linux/mdio.h>
-#include <linux/clk.h>
-#include <linux/bitrev.h>
-#include <linux/crc32.h>
+#include <sys/cdefs.h>
+
+#include <sys/param.h>
+#include <sys/kernel.h>
#include "xgbe.h"
#include "xgbe-common.h"
+#include <net/if_dl.h>
+#include <net/if_var.h>
+
static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
unsigned int usec)
{
@@ -352,118 +354,6 @@
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
}
-static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
- unsigned int index, unsigned int val)
-{
- unsigned int wait;
- int ret = 0;
-
- mutex_lock(&pdata->rss_mutex);
-
- if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
- ret = -EBUSY;
- goto unlock;
- }
-
- XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
-
- XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
- XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
- XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
- XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
-
- wait = 1000;
- while (wait--) {
- if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
- goto unlock;
-
- usleep_range(1000, 1500);
- }
-
- ret = -EBUSY;
-
-unlock:
- mutex_unlock(&pdata->rss_mutex);
-
- return ret;
-}
-
-static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
-{
- unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
- unsigned int *key = (unsigned int *)&pdata->rss_key;
- int ret;
-
- while (key_regs--) {
- ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
- key_regs, *key++);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
-{
- unsigned int i;
- int ret;
-
- for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
- ret = xgbe_write_rss_reg(pdata,
- XGBE_RSS_LOOKUP_TABLE_TYPE, i,
- pdata->rss_table[i]);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
-{
- memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
-
- return xgbe_write_rss_hash_key(pdata);
-}
-
-static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
- const u32 *table)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
- XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
-
- return xgbe_write_rss_lookup_table(pdata);
-}
-
-static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
-{
- int ret;
-
- if (!pdata->hw_feat.rss)
- return -EOPNOTSUPP;
-
- /* Program the hash key */
- ret = xgbe_write_rss_hash_key(pdata);
- if (ret)
- return ret;
-
- /* Program the lookup table */
- ret = xgbe_write_rss_lookup_table(pdata);
- if (ret)
- return ret;
-
- /* Set the RSS options */
- XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
-
- /* Enable RSS */
- XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
-
- return 0;
-}
-
static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
{
if (!pdata->hw_feat.rss)
@@ -476,19 +366,11 @@
static void xgbe_config_rss(struct xgbe_prv_data *pdata)
{
- int ret;
if (!pdata->hw_feat.rss)
return;
- if (pdata->netdev->features & NETIF_F_RXHASH)
- ret = xgbe_enable_rss(pdata);
- else
- ret = xgbe_disable_rss(pdata);
-
- if (ret)
- netdev_err(pdata->netdev,
- "error configuring RSS, RSS disabled\n");
+ xgbe_disable_rss(pdata);
}
static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
@@ -518,44 +400,13 @@
static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
{
- struct ieee_pfc *pfc = pdata->pfc;
- struct ieee_ets *ets = pdata->ets;
unsigned int max_q_count, q_count;
unsigned int reg, reg_val;
unsigned int i;
/* Set MTL flow control */
for (i = 0; i < pdata->rx_q_count; i++) {
- unsigned int ehfc = 0;
-
- if (pfc && ets) {
- unsigned int prio;
-
- for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
- unsigned int tc;
-
- /* Does this queue handle the priority? */
- if (pdata->prio2q_map[prio] != i)
- continue;
-
- /* Get the Traffic Class for this priority */
- tc = ets->prio_tc[prio];
-
- /* Check if flow control should be enabled */
- if (pfc->pfc_en & (1 << tc)) {
- ehfc = 1;
- break;
- }
- }
- } else {
- ehfc = 1;
- }
-
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
-
- netif_dbg(pdata, drv, pdata->netdev,
- "flow control %s for RXq%u\n",
- ehfc ? "enabled" : "disabled", i);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1);
}
/* Set MAC flow control */
@@ -594,9 +445,8 @@
static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
{
- struct ieee_pfc *pfc = pdata->pfc;
- if (pdata->tx_pause || (pfc && pfc->pfc_en))
+ if (pdata->tx_pause)
xgbe_enable_tx_flow_control(pdata);
else
xgbe_disable_tx_flow_control(pdata);
@@ -606,9 +456,8 @@
static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
{
- struct ieee_pfc *pfc = pdata->pfc;
- if (pdata->rx_pause || (pfc && pfc->pfc_en))
+ if (pdata->rx_pause)
xgbe_enable_rx_flow_control(pdata);
else
xgbe_disable_rx_flow_control(pdata);
@@ -618,13 +467,11 @@
static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
{
- struct ieee_pfc *pfc = pdata->pfc;
xgbe_config_tx_flow_control(pdata);
xgbe_config_rx_flow_control(pdata);
- XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE,
- (pfc && pfc->pfc_en) ? 1 : 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
}
static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
@@ -794,47 +641,10 @@
return 0;
}
-static u32 xgbe_vid_crc32_le(__le16 vid_le)
-{
- u32 poly = 0xedb88320; /* CRCPOLY_LE */
- u32 crc = ~0;
- u32 temp = 0;
- unsigned char *data = (unsigned char *)&vid_le;
- unsigned char data_byte = 0;
- int i, bits;
-
- bits = get_bitmask_order(VLAN_VID_MASK);
- for (i = 0; i < bits; i++) {
- if ((i % 8) == 0)
- data_byte = data[i / 8];
-
- temp = ((crc & 1) ^ data_byte) & 1;
- crc >>= 1;
- data_byte >>= 1;
-
- if (temp)
- crc ^= poly;
- }
-
- return crc;
-}
-
static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
{
- u32 crc;
- u16 vid;
- __le16 vid_le;
u16 vlan_hash_table = 0;
- /* Generate the VLAN Hash Table value */
- for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
- /* Get the CRC32 value of the VLAN ID */
- vid_le = cpu_to_le16(vid);
- crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
-
- vlan_hash_table |= (1 << crc);
- }
-
/* Set the VLAN Hash Table filtering register */
XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
@@ -849,17 +659,10 @@
if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
return 0;
- netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
- enable ? "entering" : "leaving");
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
/* Hardware will still perform VLAN filtering in promiscuous mode */
- if (enable) {
- xgbe_disable_rx_vlan_filtering(pdata);
- } else {
- if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
- xgbe_enable_rx_vlan_filtering(pdata);
- }
+ xgbe_disable_rx_vlan_filtering(pdata);
return 0;
}
@@ -872,15 +675,13 @@
if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
return 0;
- netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
- enable ? "entering" : "leaving");
XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
return 0;
}
static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
- struct netdev_hw_addr *ha, unsigned int *mac_reg)
+ char *addr, unsigned int *mac_reg)
{
unsigned int mac_addr_hi, mac_addr_lo;
u8 *mac_addr;
@@ -888,19 +689,15 @@
mac_addr_lo = 0;
mac_addr_hi = 0;
- if (ha) {
+ if (addr) {
mac_addr = (u8 *)&mac_addr_lo;
- mac_addr[0] = ha->addr[0];
- mac_addr[1] = ha->addr[1];
- mac_addr[2] = ha->addr[2];
- mac_addr[3] = ha->addr[3];
+ mac_addr[0] = addr[0];
+ mac_addr[1] = addr[1];
+ mac_addr[2] = addr[2];
+ mac_addr[3] = addr[3];
mac_addr = (u8 *)&mac_addr_hi;
- mac_addr[0] = ha->addr[4];
- mac_addr[1] = ha->addr[5];
-
- netif_dbg(pdata, drv, pdata->netdev,
- "adding mac address %pM at %#x\n",
- ha->addr, *mac_reg);
+ mac_addr[0] = addr[4];
+ mac_addr[1] = addr[5];
XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
}
@@ -913,78 +710,23 @@
static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
{
- struct net_device *netdev = pdata->netdev;
- struct netdev_hw_addr *ha;
unsigned int mac_reg;
unsigned int addn_macs;
mac_reg = MAC_MACA1HR;
addn_macs = pdata->hw_feat.addn_mac;
- if (netdev_uc_count(netdev) > addn_macs) {
- xgbe_set_promiscuous_mode(pdata, 1);
- } else {
- netdev_for_each_uc_addr(ha, netdev) {
- xgbe_set_mac_reg(pdata, ha, &mac_reg);
- addn_macs--;
- }
-
- if (netdev_mc_count(netdev) > addn_macs) {
- xgbe_set_all_multicast_mode(pdata, 1);
- } else {
- netdev_for_each_mc_addr(ha, netdev) {
- xgbe_set_mac_reg(pdata, ha, &mac_reg);
- addn_macs--;
- }
- }
- }
+ xgbe_set_mac_reg(pdata, pdata->mac_addr, &mac_reg);
+ addn_macs--;
/* Clear remaining additional MAC address entries */
while (addn_macs--)
xgbe_set_mac_reg(pdata, NULL, &mac_reg);
}
-static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata)
-{
- struct net_device *netdev = pdata->netdev;
- struct netdev_hw_addr *ha;
- unsigned int hash_reg;
- unsigned int hash_table_shift, hash_table_count;
- u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE];
- u32 crc;
- unsigned int i;
-
- hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
- hash_table_count = pdata->hw_feat.hash_table_size / 32;
- memset(hash_table, 0, sizeof(hash_table));
-
- /* Build the MAC Hash Table register values */
- netdev_for_each_uc_addr(ha, netdev) {
- crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
- crc >>= hash_table_shift;
- hash_table[crc >> 5] |= (1 << (crc & 0x1f));
- }
-
- netdev_for_each_mc_addr(ha, netdev) {
- crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
- crc >>= hash_table_shift;
- hash_table[crc >> 5] |= (1 << (crc & 0x1f));
- }
-
- /* Set the MAC Hash Table registers */
- hash_reg = MAC_HTR0;
- for (i = 0; i < hash_table_count; i++) {
- XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]);
- hash_reg += MAC_HTR_INC;
- }
-}
-
static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
{
- if (pdata->hw_feat.hash_table_size)
- xgbe_set_mac_hash_table(pdata);
- else
- xgbe_set_mac_addn_addrs(pdata);
+ xgbe_set_mac_addn_addrs(pdata);
return 0;
}
@@ -1005,11 +747,11 @@
static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
{
- struct net_device *netdev = pdata->netdev;
unsigned int pr_mode, am_mode;
- pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
- am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
+ /* XXX */
+ pr_mode = 0;
+ am_mode = 0;
xgbe_set_promiscuous_mode(pdata, pr_mode);
xgbe_set_all_multicast_mode(pdata, am_mode);
@@ -1108,8 +850,7 @@
rdesc->desc2 = 0;
rdesc->desc3 = 0;
- /* Make sure ownership is written to the descriptor */
- dma_wmb();
+ dsb(sy);
}
static void xgbe_tx_desc_init(struct xgbe_channel *channel)
@@ -1135,9 +876,9 @@
/* Update the starting address of descriptor ring */
rdata = XGBE_GET_DESC_DATA(ring, start_index);
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
- upper_32_bits(rdata->rdesc_dma));
+ upper_32_bits(rdata->rdata_paddr));
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
- lower_32_bits(rdata->rdesc_dma));
+ lower_32_bits(rdata->rdata_paddr));
DBGPR("<--tx_desc_init\n");
}
@@ -1146,21 +887,9 @@
struct xgbe_ring_data *rdata, unsigned int index)
{
struct xgbe_ring_desc *rdesc = rdata->rdesc;
- unsigned int rx_usecs = pdata->rx_usecs;
- unsigned int rx_frames = pdata->rx_frames;
unsigned int inte;
- dma_addr_t hdr_dma, buf_dma;
- if (!rx_usecs && !rx_frames) {
- /* No coalescing, interrupt for every descriptor */
- inte = 1;
- } else {
- /* Set interrupt based on Rx frame coalescing setting */
- if (rx_frames && !((index + 1) % rx_frames))
- inte = 1;
- else
- inte = 0;
- }
+ inte = 1;
/* Reset the Rx descriptor
* Set buffer 1 (lo) address to header dma address (lo)
@@ -1169,25 +898,18 @@
* Set buffer 2 (hi) address to buffer dma address (hi) and
* set control bits OWN and INTE
*/
- hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off;
- buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off;
- rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
- rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
- rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
- rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->mbuf_hdr_paddr));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->mbuf_hdr_paddr));
+ rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->mbuf_data_paddr));
+ rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->mbuf_data_paddr));
XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
- /* Since the Rx DMA engine is likely running, make sure everything
- * is written to the descriptor(s) before setting the OWN bit
- * for the descriptor
- */
- dma_wmb();
+ dsb(sy);
XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
- /* Make sure ownership is written to the descriptor */
- dma_wmb();
+ dsb(sy);
}
static void xgbe_rx_desc_init(struct xgbe_channel *channel)
@@ -1208,246 +930,37 @@
xgbe_rx_desc_reset(pdata, rdata, i);
}
+ bus_dmamap_sync(ring->rdesc_dmat, ring->rdesc_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+
/* Update the total number of Rx descriptors */
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
/* Update the starting address of descriptor ring */
rdata = XGBE_GET_DESC_DATA(ring, start_index);
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
- upper_32_bits(rdata->rdesc_dma));
+ upper_32_bits(rdata->rdata_paddr));
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
- lower_32_bits(rdata->rdesc_dma));
+ lower_32_bits(rdata->rdata_paddr));
/* Update the Rx Descriptor Tail Pointer */
rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
- lower_32_bits(rdata->rdesc_dma));
+ lower_32_bits(rdata->rdata_paddr));
DBGPR("<--rx_desc_init\n");
}
-static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
- unsigned int addend)
-{
- /* Set the addend register value and tell the device */
- XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
- XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
-
- /* Wait for addend update to complete */
- while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
- udelay(5);
-}
-
-static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
- unsigned int nsec)
-{
- /* Set the time values and tell the device */
- XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
- XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
- XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
-
- /* Wait for time update to complete */
- while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
- udelay(5);
-}
-
-static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
-{
- u64 nsec;
-
- nsec = XGMAC_IOREAD(pdata, MAC_STSR);
- nsec *= NSEC_PER_SEC;
- nsec += XGMAC_IOREAD(pdata, MAC_STNR);
-
- return nsec;
-}
-
-static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
-{
- unsigned int tx_snr;
- u64 nsec;
-
- tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
- if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
- return 0;
-
- nsec = XGMAC_IOREAD(pdata, MAC_TXSSR);
- nsec *= NSEC_PER_SEC;
- nsec += tx_snr;
-
- return nsec;
-}
-
-static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
- struct xgbe_ring_desc *rdesc)
-{
- u64 nsec;
-
- if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
- !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
- nsec = le32_to_cpu(rdesc->desc1);
- nsec <<= 32;
- nsec |= le32_to_cpu(rdesc->desc0);
- if (nsec != 0xffffffffffffffffULL) {
- packet->rx_tstamp = nsec;
- XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- RX_TSTAMP, 1);
- }
- }
-}
-
-static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
- unsigned int mac_tscr)
-{
- /* Set one nano-second accuracy */
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
-
- /* Set fine timestamp update */
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
-
- /* Overwrite earlier timestamps */
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
-
- XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
-
- /* Exit if timestamping is not enabled */
- if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
- return 0;
-
- /* Initialize time registers */
- XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
- XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
- xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
- xgbe_set_tstamp_time(pdata, 0, 0);
-
- /* Initialize the timecounter */
- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
- ktime_to_ns(ktime_get_real()));
-
- return 0;
-}
-
-static void xgbe_config_tc(struct xgbe_prv_data *pdata)
-{
- unsigned int offset, queue, prio;
- u8 i;
-
- netdev_reset_tc(pdata->netdev);
- if (!pdata->num_tcs)
- return;
-
- netdev_set_num_tc(pdata->netdev, pdata->num_tcs);
-
- for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) {
- while ((queue < pdata->tx_q_count) &&
- (pdata->q2tc_map[queue] == i))
- queue++;
-
- netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n",
- i, offset, queue - 1);
- netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset);
- offset = queue;
- }
-
- if (!pdata->ets)
- return;
-
- for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
- netdev_set_prio_tc_map(pdata->netdev, prio,
- pdata->ets->prio_tc[prio]);
-}
-
-static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
-{
- struct ieee_ets *ets = pdata->ets;
- unsigned int total_weight, min_weight, weight;
- unsigned int mask, reg, reg_val;
- unsigned int i, prio;
-
- if (!ets)
- return;
-
- /* Set Tx to deficit weighted round robin scheduling algorithm (when
- * traffic class is using ETS algorithm)
- */
- XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
-
- /* Set Traffic Class algorithms */
- total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
- min_weight = total_weight / 100;
- if (!min_weight)
- min_weight = 1;
-
- for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
- /* Map the priorities to the traffic class */
- mask = 0;
- for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
- if (ets->prio_tc[prio] == i)
- mask |= (1 << prio);
- }
- mask &= 0xff;
-
- netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n",
- i, mask);
- reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG));
- reg_val = XGMAC_IOREAD(pdata, reg);
-
- reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3));
- reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3));
-
- XGMAC_IOWRITE(pdata, reg, reg_val);
-
- /* Set the traffic class algorithm */
- switch (ets->tc_tsa[i]) {
- case IEEE_8021QAZ_TSA_STRICT:
- netif_dbg(pdata, drv, pdata->netdev,
- "TC%u using SP\n", i);
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
- MTL_TSA_SP);
- break;
- case IEEE_8021QAZ_TSA_ETS:
- weight = total_weight * ets->tc_tx_bw[i] / 100;
- weight = clamp(weight, min_weight, total_weight);
-
- netif_dbg(pdata, drv, pdata->netdev,
- "TC%u using DWRR (weight %u)\n", i, weight);
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
- MTL_TSA_ETS);
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
- weight);
- break;
- }
- }
-
- xgbe_config_tc(pdata);
-}
-
-static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
-{
- xgbe_config_flow_control(pdata);
-}
-
static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
struct xgbe_ring *ring)
{
- struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring_data *rdata;
- /* Make sure everything is written before the register write */
- wmb();
-
/* Issue a poll command to Tx DMA by writing address
* of next immediate free descriptor */
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
- lower_32_bits(rdata->rdesc_dma));
-
- /* Start the Tx timer */
- if (pdata->tx_usecs && !channel->tx_timer_active) {
- channel->tx_timer_active = 1;
- mod_timer(&channel->tx_timer,
- jiffies + usecs_to_jiffies(pdata->tx_usecs));
- }
+ lower_32_bits(rdata->rdata_paddr));
ring->tx.xmit_more = 0;
}
@@ -1459,8 +972,6 @@
struct xgbe_ring_data *rdata;
struct xgbe_ring_desc *rdesc;
struct xgbe_packet_data *packet = &ring->packet_data;
- unsigned int csum, tso, vlan;
- unsigned int tso_context, vlan_context;
unsigned int tx_set_ic;
int start_index = ring->cur;
int cur_index = ring->cur;
@@ -1468,23 +979,6 @@
DBGPR("-->xgbe_dev_xmit\n");
- csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
- CSUM_ENABLE);
- tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
- TSO_ENABLE);
- vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
- VLAN_CTAG);
-
- if (tso && (packet->mss != ring->tx.cur_mss))
- tso_context = 1;
- else
- tso_context = 0;
-
- if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
- vlan_context = 1;
- else
- vlan_context = 0;
-
/* Determine if an interrupt should be generated for this Tx:
* Interrupt:
* - Tx frame count exceeds the frame count setting
@@ -1505,69 +999,18 @@
tx_set_ic = 1;
else
tx_set_ic = 0;
+ tx_set_ic = 1;
rdata = XGBE_GET_DESC_DATA(ring, cur_index);
rdesc = rdata->rdesc;
- /* Create a context descriptor if this is a TSO packet */
- if (tso_context || vlan_context) {
- if (tso_context) {
- netif_dbg(pdata, tx_queued, pdata->netdev,
- "TSO context descriptor, mss=%u\n",
- packet->mss);
-
- /* Set the MSS size */
- XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
- MSS, packet->mss);
-
- /* Mark it as a CONTEXT descriptor */
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
- CTXT, 1);
-
- /* Indicate this descriptor contains the MSS */
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
- TCMSSV, 1);
-
- ring->tx.cur_mss = packet->mss;
- }
-
- if (vlan_context) {
- netif_dbg(pdata, tx_queued, pdata->netdev,
- "VLAN context descriptor, ctag=%u\n",
- packet->vlan_ctag);
-
- /* Mark it as a CONTEXT descriptor */
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
- CTXT, 1);
-
- /* Set the VLAN tag */
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
- VT, packet->vlan_ctag);
-
- /* Indicate this descriptor contains the VLAN tag */
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
- VLTV, 1);
-
- ring->tx.cur_vlan_ctag = packet->vlan_ctag;
- }
-
- cur_index++;
- rdata = XGBE_GET_DESC_DATA(ring, cur_index);
- rdesc = rdata->rdesc;
- }
-
/* Update buffer address (for TSO this is the header) */
- rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
- rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->mbuf_data_paddr));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->mbuf_data_paddr));
/* Update the buffer length */
XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
- rdata->skb_dma_len);
-
- /* VLAN tag insertion check */
- if (vlan)
- XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
- TX_NORMAL_DESC2_VLAN_INSERT);
+ rdata->mbuf_len);
/* Timestamp enablement check */
if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
@@ -1583,28 +1026,12 @@
if (cur_index != start_index)
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
- if (tso) {
- /* Enable TSO */
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
- packet->tcp_payload_len);
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
- packet->tcp_header_len / 4);
-
- pdata->ext_stats.tx_tso_packets++;
- } else {
- /* Enable CRC and Pad Insertion */
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
-
- /* Enable HW CSUM */
- if (csum)
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
- CIC, 0x3);
+ /* Enable CRC and Pad Insertion */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
- /* Set the total length to be transmitted */
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
- packet->length);
- }
+ /* Set the total length to be transmitted */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
+ packet->length);
for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) {
cur_index++;
@@ -1612,23 +1039,18 @@
rdesc = rdata->rdesc;
/* Update buffer address */
- rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
- rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->mbuf_data_paddr));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->mbuf_data_paddr));
/* Update the buffer length */
XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
- rdata->skb_dma_len);
+ rdata->mbuf_len);
/* Set OWN bit */
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
/* Mark it as NORMAL descriptor */
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
-
- /* Enable HW CSUM */
- if (csum)
- XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
- CIC, 0x3);
}
/* Set LAST bit for the last descriptor */
@@ -1642,31 +1064,28 @@
rdata->tx.packets = packet->tx_packets;
rdata->tx.bytes = packet->tx_bytes;
+ /* Sync the DMA buffers */
+ bus_dmamap_sync(ring->rdesc_dmat, ring->rdesc_map,
+ BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(ring->mbuf_dmat, ring->mbuf_map,
+ BUS_DMASYNC_PREWRITE);
+
/* In case the Tx DMA engine is running, make sure everything
* is written to the descriptor(s) before setting the OWN bit
* for the first descriptor
*/
- dma_wmb();
/* Set OWN bit for the first descriptor */
rdata = XGBE_GET_DESC_DATA(ring, start_index);
rdesc = rdata->rdesc;
XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
- if (netif_msg_tx_queued(pdata))
- xgbe_dump_tx_desc(pdata, ring, start_index,
- packet->rdesc_count, 1);
-
- /* Make sure ownership is written to the descriptor */
- smp_wmb();
+ /* Sync to ensure the OWN bit was seen */
+ bus_dmamap_sync(ring->rdesc_dmat, ring->rdesc_map,
+ BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
ring->cur = cur_index + 1;
- if (!packet->skb->xmit_more ||
- netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
- channel->queue_index)))
- xgbe_tx_start_xmit(channel, ring);
- else
- ring->tx.xmit_more = 1;
+ xgbe_tx_start_xmit(channel, ring);
DBGPR(" %s: descriptors %u to %u written\n",
channel->name, start_index & (ring->rdesc_count - 1),
@@ -1677,39 +1096,27 @@
static int xgbe_dev_read(struct xgbe_channel *channel)
{
- struct xgbe_prv_data *pdata = channel->pdata;
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
struct xgbe_ring_desc *rdesc;
struct xgbe_packet_data *packet = &ring->packet_data;
- struct net_device *netdev = pdata->netdev;
- unsigned int err, etlt, l34t;
+ unsigned int err, etlt;
DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
rdesc = rdata->rdesc;
+ bus_dmamap_sync(ring->rdesc_dmat, ring->rdesc_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
+ dsb(sy);
+
/* Check for data availability */
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
return 1;
- /* Make sure descriptor fields are read after reading the OWN bit */
- dma_rmb();
-
- if (netif_msg_rx_status(pdata))
- xgbe_dump_rx_desc(pdata, ring, ring->cur);
-
- if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
- /* Timestamp Context Descriptor */
- xgbe_get_rx_tstamp(packet, rdesc);
-
- XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- CONTEXT, 1);
- XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- CONTEXT_NEXT, 0);
- return 0;
- }
+ dsb(sy);
/* Normal Descriptor, be sure Context Descriptor bit is off */
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0);
@@ -1723,28 +1130,6 @@
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
RX_NORMAL_DESC2, HL);
- if (rdata->rx.hdr_len)
- pdata->ext_stats.rx_split_header_packets++;
- }
-
- /* Get the RSS hash */
- if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
- XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- RSS_HASH, 1);
-
- packet->rss_hash = le32_to_cpu(rdesc->desc1);
-
- l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
- switch (l34t) {
- case RX_DESC3_L34T_IPV4_TCP:
- case RX_DESC3_L34T_IPV4_UDP:
- case RX_DESC3_L34T_IPV6_TCP:
- case RX_DESC3_L34T_IPV6_UDP:
- packet->rss_hash_type = PKT_HASH_TYPE_L4;
- break;
- default:
- packet->rss_hash_type = PKT_HASH_TYPE_L3;
- }
}
/* Get the packet length */
@@ -1761,29 +1146,11 @@
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
INCOMPLETE, 0);
- /* Set checksum done indicator as appropriate */
- if (netdev->features & NETIF_F_RXCSUM)
- XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- CSUM_DONE, 1);
-
/* Check for errors (only valid in last descriptor) */
err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
- netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
- if (!err || !etlt) {
- /* No error if err is 0 or etlt is 0 */
- if ((etlt == 0x09) &&
- (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
- XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- VLAN_CTAG, 1);
- packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
- RX_NORMAL_DESC0,
- OVT);
- netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
- packet->vlan_ctag);
- }
- } else {
+ if (err && etlt) {
if ((etlt == 0x05) || (etlt == 0x06))
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
CSUM_DONE, 0);
@@ -1792,6 +1159,9 @@
FRAME, 1);
}
+ bus_dmamap_sync(ring->mbuf_dmat, rdata->mbuf_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+
DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
ring->cur & (ring->rdesc_count - 1), ring->cur);
@@ -1909,11 +1279,11 @@
/* Issue a software reset */
XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
- usleep_range(10, 15);
+ DELAY(10);
/* Poll Until Poll Condition */
while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
- usleep_range(500, 600);
+ DELAY(500);
if (!count)
return -EBUSY;
@@ -1938,7 +1308,7 @@
count = 2000;
while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i,
MTL_Q_TQOMR, FTQ))
- usleep_range(500, 600);
+ DELAY(500);
if (!count)
return -EBUSY;
@@ -2035,10 +1405,6 @@
for (i = 0; i < pdata->tx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
-
- netif_info(pdata, drv, pdata->netdev,
- "%d Tx hardware queues, %d byte fifo per queue\n",
- pdata->tx_q_count, ((fifo_size + 1) * 256));
}
static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
@@ -2051,10 +1417,6 @@
for (i = 0; i < pdata->rx_q_count; i++)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
-
- netif_info(pdata, drv, pdata->netdev,
- "%d Rx hardware queues, %d byte fifo per queue\n",
- pdata->rx_q_count, ((fifo_size + 1) * 256));
}
static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
@@ -2073,16 +1435,12 @@
for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
for (j = 0; j < qptc; j++) {
- netif_dbg(pdata, drv, pdata->netdev,
- "TXq%u mapped to TC%u\n", queue, i);
XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
Q2TCMAP, i);
pdata->q2tc_map[queue++] = i;
}
if (i < qptc_extra) {
- netif_dbg(pdata, drv, pdata->netdev,
- "TXq%u mapped to TC%u\n", queue, i);
XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
Q2TCMAP, i);
pdata->q2tc_map[queue++] = i;
@@ -2100,15 +1458,11 @@
for (i = 0, prio = 0; i < prio_queues;) {
mask = 0;
for (j = 0; j < ppq; j++) {
- netif_dbg(pdata, drv, pdata->netdev,
- "PRIO%u mapped to RXq%u\n", prio, i);
mask |= (1 << prio);
pdata->prio2q_map[prio++] = i;
}
if (i < ppq_extra) {
- netif_dbg(pdata, drv, pdata->netdev,
- "PRIO%u mapped to RXq%u\n", prio, i);
mask |= (1 << prio);
pdata->prio2q_map[prio++] = i;
}
@@ -2154,21 +1508,15 @@
static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
{
- xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
- /* Filtering is done using perfect filtering and hash filtering */
- if (pdata->hw_feat.hash_table_size) {
- XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
- XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
- XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
- }
+ xgbe_set_mac_address(pdata, IF_LLADDR(pdata->netdev));
}
static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
{
unsigned int val;
- val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
+ val = (if_getmtu(pdata->netdev) > XGMAC_STD_PACKET_MTU) ? 1 : 0;
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
}
@@ -2187,12 +1535,16 @@
case SPEED_1000:
xgbe_set_gmii_speed(pdata);
break;
+ case SPEED_UNKNOWN:
+ break;
+ default:
+ panic("TODO %s:%d\n", __FILE__, __LINE__);
}
}
static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
{
- if (pdata->netdev->features & NETIF_F_RXCSUM)
+ if ((if_getcapenable(pdata->netdev) & IFCAP_RXCSUM) != 0)
xgbe_enable_rx_csum(pdata);
else
xgbe_disable_rx_csum(pdata);
@@ -2207,15 +1559,8 @@
/* Set the current VLAN Hash Table register value */
xgbe_update_vlan_hash_table(pdata);
- if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
- xgbe_enable_rx_vlan_filtering(pdata);
- else
- xgbe_disable_rx_vlan_filtering(pdata);
-
- if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
- xgbe_enable_rx_vlan_stripping(pdata);
- else
- xgbe_disable_rx_vlan_stripping(pdata);
+ xgbe_disable_rx_vlan_filtering(pdata);
+ xgbe_disable_rx_vlan_stripping(pdata);
}
static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
@@ -2587,21 +1932,16 @@
* descriptors. Wait for the Tx engine to enter the stopped or
* suspended state. Don't wait forever though...
*/
- tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
- while (time_before(jiffies, tx_timeout)) {
+ tx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz);
+ while (ticks < tx_timeout) {
tx_status = XGMAC_IOREAD(pdata, tx_dsr);
tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
if ((tx_status == DMA_TPS_STOPPED) ||
(tx_status == DMA_TPS_SUSPENDED))
break;
- usleep_range(500, 1000);
+ DELAY(500);
}
-
- if (!time_before(jiffies, tx_timeout))
- netdev_info(pdata->netdev,
- "timed out waiting for Tx DMA channel %u to stop\n",
- channel->queue_index);
}
static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
@@ -2668,20 +2008,15 @@
* packets. Wait for the Rx queue to empty the Rx fifo. Don't
* wait forever though...
*/
- rx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
- while (time_before(jiffies, rx_timeout)) {
+ rx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz);
+ while (ticks < rx_timeout) {
rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
(XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
break;
- usleep_range(500, 1000);
+ DELAY(500);
}
-
- if (!time_before(jiffies, rx_timeout))
- netdev_info(pdata->netdev,
- "timed out waiting for Rx queue %u to empty\n",
- queue);
}
static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
@@ -2860,8 +2195,6 @@
/*TODO: Error Packet and undersized good Packet forwarding enable
(FEP and FUP)
*/
- xgbe_config_dcb_tc(pdata);
- xgbe_config_dcb_pfc(pdata);
xgbe_enable_mtl_interrupts(pdata);
/*
@@ -2966,23 +2299,8 @@
hw_if->rx_mmc_int = xgbe_rx_mmc_int;
hw_if->read_mmc_stats = xgbe_read_mmc_stats;
- /* For PTP config */
- hw_if->config_tstamp = xgbe_config_tstamp;
- hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
- hw_if->set_tstamp_time = xgbe_set_tstamp_time;
- hw_if->get_tstamp_time = xgbe_get_tstamp_time;
- hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
-
- /* For Data Center Bridging config */
- hw_if->config_tc = xgbe_config_tc;
- hw_if->config_dcb_tc = xgbe_config_dcb_tc;
- hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
-
/* For Receive Side Scaling */
- hw_if->enable_rss = xgbe_enable_rss;
hw_if->disable_rss = xgbe_disable_rss;
- hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
- hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
DBGPR("<--xgbe_init_function_ptrs\n");
}
Index: sys/dev/axgbe/xgbe-drv.c
===================================================================
--- sys/dev/axgbe/xgbe-drv.c
+++ sys/dev/axgbe/xgbe-drv.c
@@ -114,21 +114,16 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/tcp.h>
-#include <linux/if_vlan.h>
-#include <net/busy_poll.h>
-#include <linux/clk.h>
-#include <linux/if_ether.h>
-#include <linux/net_tstamp.h>
-#include <linux/phy.h>
+#include <sys/cdefs.h>
+
+#include <sys/param.h>
+#include <sys/kernel.h>
#include "xgbe.h"
#include "xgbe-common.h"
-static int xgbe_one_poll(struct napi_struct *, int);
-static int xgbe_all_poll(struct napi_struct *, int);
+static int xgbe_one_poll(struct xgbe_channel *channel, int budget);
+static int xgbe_all_poll(struct xgbe_prv_data *pdata, int budget);
static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
{
@@ -139,38 +134,28 @@
count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
- channel_mem = kcalloc(count, sizeof(struct xgbe_channel), GFP_KERNEL);
- if (!channel_mem)
- goto err_channel;
-
- tx_ring = kcalloc(pdata->tx_ring_count, sizeof(struct xgbe_ring),
- GFP_KERNEL);
- if (!tx_ring)
- goto err_tx_ring;
-
- rx_ring = kcalloc(pdata->rx_ring_count, sizeof(struct xgbe_ring),
- GFP_KERNEL);
- if (!rx_ring)
- goto err_rx_ring;
+ channel_mem = malloc(count * sizeof(struct xgbe_channel), M_AXGBE,
+ M_WAITOK | M_ZERO);
+ tx_ring = malloc(pdata->tx_ring_count * sizeof(struct xgbe_ring),
+ M_AXGBE, M_WAITOK | M_ZERO);
+ rx_ring = malloc(pdata->rx_ring_count * sizeof(struct xgbe_ring),
+ M_AXGBE, M_WAITOK | M_ZERO);
for (i = 0, channel = channel_mem; i < count; i++, channel++) {
snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
channel->pdata = pdata;
channel->queue_index = i;
- channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
- (DMA_CH_INC * i);
+ channel->dma_tag = rman_get_bustag(pdata->xgmac_res);
+ bus_space_subregion(channel->dma_tag,
+ rman_get_bushandle(pdata->xgmac_res),
+ DMA_CH_BASE + (DMA_CH_INC * i), DMA_CH_INC,
+ &channel->dma_handle);
if (pdata->per_channel_irq) {
- /* Get the DMA interrupt (offset 1) */
- ret = platform_get_irq(pdata->pdev, i + 1);
- if (ret < 0) {
- netdev_err(pdata->netdev,
- "platform_get_irq %u failed\n",
- i + 1);
+ if (pdata->chan_irq_res[i] == NULL)
goto err_irq;
- }
- channel->dma_irq = ret;
+ channel->dma_irq_res = pdata->chan_irq_res[i];
}
if (i < pdata->tx_ring_count) {
@@ -182,11 +167,6 @@
spin_lock_init(&rx_ring->lock);
channel->rx_ring = rx_ring++;
}
-
- netif_dbg(pdata, drv, pdata->netdev,
- "%s: dma_regs=%p, dma_irq=%d, tx=%p, rx=%p\n",
- channel->name, channel->dma_regs, channel->dma_irq,
- channel->tx_ring, channel->rx_ring);
}
pdata->channel = channel_mem;
@@ -195,15 +175,10 @@
return 0;
err_irq:
- kfree(rx_ring);
+ free(rx_ring, M_AXGBE);
+ free(tx_ring, M_AXGBE);
+ free(channel_mem, M_AXGBE);
-err_rx_ring:
- kfree(tx_ring);
-
-err_tx_ring:
- kfree(channel_mem);
-
-err_channel:
return ret;
}
@@ -212,9 +187,9 @@
if (!pdata->channel)
return;
- kfree(pdata->channel->rx_ring);
- kfree(pdata->channel->tx_ring);
- kfree(pdata->channel);
+ free(pdata->channel->rx_ring, M_AXGBE);
+ free(pdata->channel->tx_ring, M_AXGBE);
+ free(pdata->channel, M_AXGBE);
pdata->channel = NULL;
pdata->channel_count = 0;
@@ -236,34 +211,28 @@
struct xgbe_prv_data *pdata = channel->pdata;
if (count > xgbe_tx_avail_desc(ring)) {
- netif_info(pdata, drv, pdata->netdev,
- "Tx queue stopped, not enough descriptors available\n");
- netif_stop_subqueue(pdata->netdev, channel->queue_index);
- ring->tx.queue_stopped = 1;
-
/* If we haven't notified the hardware because of xmit_more
* support, tell it now
*/
if (ring->tx.xmit_more)
pdata->hw_if.tx_start_xmit(channel, ring);
- return NETDEV_TX_BUSY;
+ return EFBIG;
}
return 0;
}
-static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
+static int xgbe_calc_rx_buf_size(struct ifnet *netdev, unsigned int mtu)
{
unsigned int rx_buf_size;
if (mtu > XGMAC_JUMBO_PACKET_MTU) {
- netdev_alert(netdev, "MTU exceeds maximum supported value\n");
return -EINVAL;
}
rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
- rx_buf_size = clamp_val(rx_buf_size, XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
+ rx_buf_size = MIN(XGBE_RX_MIN_BUF_SIZE, PAGE_SIZE);
rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
~(XGBE_RX_BUF_ALIGN - 1);
@@ -293,35 +262,13 @@
}
}
-static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
-{
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- struct xgbe_channel *channel;
- enum xgbe_int int_id;
- unsigned int i;
-
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (channel->tx_ring && channel->rx_ring)
- int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
- else if (channel->tx_ring)
- int_id = XGMAC_INT_DMA_CH_SR_TI;
- else if (channel->rx_ring)
- int_id = XGMAC_INT_DMA_CH_SR_RI;
- else
- continue;
-
- hw_if->disable_int(channel, int_id);
- }
-}
-
-static irqreturn_t xgbe_isr(int irq, void *data)
+static void xgbe_isr(void *data)
{
struct xgbe_prv_data *pdata = data;
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_channel *channel;
unsigned int dma_isr, dma_ch_isr;
- unsigned int mac_isr, mac_tssr;
+ unsigned int mac_isr;
unsigned int i;
/* The DMA interrupt status register also reports MAC and MTL
@@ -330,9 +277,7 @@
*/
dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
if (!dma_isr)
- goto isr_done;
-
- netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
+ return;
for (i = 0; i < pdata->channel_count; i++) {
if (!(dma_isr & (1 << i)))
@@ -341,8 +286,6 @@
channel = pdata->channel + i;
dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
- netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
- i, dma_ch_isr);
/* The TI or RI interrupt bits may still be set even if using
* per channel DMA interrupts. Check to be sure those are not
@@ -351,13 +294,7 @@
if (!pdata->per_channel_irq &&
(XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI))) {
- if (napi_schedule_prep(&pdata->napi)) {
- /* Disable Tx and Rx interrupts */
- xgbe_disable_rx_tx_ints(pdata);
-
- /* Turn on polling */
- __napi_schedule_irqoff(&pdata->napi);
- }
+ xgbe_all_poll(pdata, 16);
}
if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RBU))
@@ -365,7 +302,8 @@
/* Restart the device on a Fatal Bus Error */
if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
- schedule_work(&pdata->restart_work);
+ taskqueue_enqueue(taskqueue_thread,
+ &pdata->restart_work);
/* Clear all interrupt signals */
XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
@@ -379,123 +317,49 @@
if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
hw_if->rx_mmc_int(pdata);
-
- if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
- mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
-
- if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
- /* Read Tx Timestamp to clear interrupt */
- pdata->tx_tstamp =
- hw_if->get_tx_tstamp(pdata);
- queue_work(pdata->dev_workqueue,
- &pdata->tx_tstamp_work);
- }
- }
}
-
-isr_done:
- return IRQ_HANDLED;
}
-static irqreturn_t xgbe_dma_isr(int irq, void *data)
+static void xgbe_dma_isr(void *data)
{
struct xgbe_channel *channel = data;
- /* Per channel DMA interrupts are enabled, so we use the per
- * channel napi structure and not the private data napi structure
- */
- if (napi_schedule_prep(&channel->napi)) {
- /* Disable Tx and Rx interrupts */
- disable_irq_nosync(channel->dma_irq);
-
- /* Turn on polling */
- __napi_schedule_irqoff(&channel->napi);
- }
-
- return IRQ_HANDLED;
-}
-
-static void xgbe_tx_timer(unsigned long data)
-{
- struct xgbe_channel *channel = (struct xgbe_channel *)data;
- struct xgbe_prv_data *pdata = channel->pdata;
- struct napi_struct *napi;
-
- DBGPR("-->xgbe_tx_timer\n");
-
- napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
-
- if (napi_schedule_prep(napi)) {
- /* Disable Tx and Rx interrupts */
- if (pdata->per_channel_irq)
- disable_irq_nosync(channel->dma_irq);
- else
- xgbe_disable_rx_tx_ints(pdata);
-
- /* Turn on polling */
- __napi_schedule(napi);
- }
-
- channel->tx_timer_active = 0;
-
- DBGPR("<--xgbe_tx_timer\n");
+ xgbe_one_poll(channel, 16);
}
-static void xgbe_service(struct work_struct *work)
+static void xgbe_service(void *ctx, int pending)
{
- struct xgbe_prv_data *pdata = container_of(work,
- struct xgbe_prv_data,
- service_work);
+ struct xgbe_prv_data *pdata = ctx;
pdata->phy_if.phy_status(pdata);
}
-static void xgbe_service_timer(unsigned long data)
+static void xgbe_service_timer(void *data)
{
- struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
+ struct xgbe_prv_data *pdata = data;
- queue_work(pdata->dev_workqueue, &pdata->service_work);
+ DBGPR("--> xgbe_service_timer\n");
+ taskqueue_enqueue(pdata->dev_workqueue, &pdata->service_work);
- mod_timer(&pdata->service_timer, jiffies + HZ);
+ callout_reset(&pdata->service_timer, hz, xgbe_service_timer, pdata);
+ DBGPR("<-- xgbe_service_timer\n");
}
static void xgbe_init_timers(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
- unsigned int i;
- setup_timer(&pdata->service_timer, xgbe_service_timer,
- (unsigned long)pdata);
-
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
- break;
-
- setup_timer(&channel->tx_timer, xgbe_tx_timer,
- (unsigned long)channel);
- }
+ callout_init(&pdata->service_timer, 1);
}
static void xgbe_start_timers(struct xgbe_prv_data *pdata)
{
- mod_timer(&pdata->service_timer, jiffies + HZ);
+ callout_reset(&pdata->service_timer, hz, xgbe_service_timer, pdata);
}
static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
{
- struct xgbe_channel *channel;
- unsigned int i;
-
- del_timer_sync(&pdata->service_timer);
-
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
- break;
- del_timer_sync(&channel->tx_timer);
- }
+ callout_drain(&pdata->service_timer);
}
void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
@@ -598,62 +462,16 @@
DBGPR("<--xgbe_get_all_hw_features\n");
}
-static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
-{
- struct xgbe_channel *channel;
- unsigned int i;
-
- if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (add)
- netif_napi_add(pdata->netdev, &channel->napi,
- xgbe_one_poll, NAPI_POLL_WEIGHT);
-
- napi_enable(&channel->napi);
- }
- } else {
- if (add)
- netif_napi_add(pdata->netdev, &pdata->napi,
- xgbe_all_poll, NAPI_POLL_WEIGHT);
-
- napi_enable(&pdata->napi);
- }
-}
-
-static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
-{
- struct xgbe_channel *channel;
- unsigned int i;
-
- if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- napi_disable(&channel->napi);
-
- if (del)
- netif_napi_del(&channel->napi);
- }
- } else {
- napi_disable(&pdata->napi);
-
- if (del)
- netif_napi_del(&pdata->napi);
- }
-}
-
static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
{
struct xgbe_channel *channel;
- struct net_device *netdev = pdata->netdev;
unsigned int i;
int ret;
- ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
- netdev->name, pdata);
+ ret = bus_setup_intr(pdata->dev, pdata->dev_irq_res,
+ INTR_MPSAFE | INTR_TYPE_NET, NULL, xgbe_isr, pdata,
+ &pdata->dev_irq_tag);
if (ret) {
- netdev_alert(netdev, "error requesting irq %d\n",
- pdata->dev_irq);
return ret;
}
@@ -662,17 +480,10 @@
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
- snprintf(channel->dma_irq_name,
- sizeof(channel->dma_irq_name) - 1,
- "%s-TxRx-%u", netdev_name(netdev),
- channel->queue_index);
-
- ret = devm_request_irq(pdata->dev, channel->dma_irq,
- xgbe_dma_isr, 0,
- channel->dma_irq_name, channel);
- if (ret) {
- netdev_alert(netdev, "error requesting irq %d\n",
- channel->dma_irq);
+ ret = bus_setup_intr(pdata->dev, channel->dma_irq_res,
+ INTR_MPSAFE | INTR_TYPE_NET, NULL, xgbe_dma_isr, channel,
+ &channel->dma_irq_tag);
+ if (ret != 0) {
goto err_irq;
}
}
@@ -682,11 +493,12 @@
err_irq:
/* Using an unsigned int, 'i' will go to UINT_MAX and exit */
for (i--, channel--; i < pdata->channel_count; i--, channel--)
- devm_free_irq(pdata->dev, channel->dma_irq, channel);
+ bus_teardown_intr(pdata->dev, channel->dma_irq_res,
+ channel->dma_irq_tag);
- devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+ bus_teardown_intr(pdata->dev, pdata->dev_irq_res, pdata->dev_irq_tag);
- return ret;
+ return -ret;
}
static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
@@ -694,14 +506,15 @@
struct xgbe_channel *channel;
unsigned int i;
- devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
+ bus_teardown_intr(pdata->dev, pdata->dev_irq_res, pdata->dev_irq_tag);
if (!pdata->per_channel_irq)
return;
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++)
- devm_free_irq(pdata->dev, channel->dma_irq, channel);
+ bus_teardown_intr(pdata->dev, channel->dma_irq_res,
+ channel->dma_irq_tag);
}
void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
@@ -791,88 +604,10 @@
return pdata->phy_if.phy_reset(pdata);
}
-int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- unsigned long flags;
-
- DBGPR("-->xgbe_powerdown\n");
-
- if (!netif_running(netdev) ||
- (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
- netdev_alert(netdev, "Device is already powered down\n");
- DBGPR("<--xgbe_powerdown\n");
- return -EINVAL;
- }
-
- spin_lock_irqsave(&pdata->lock, flags);
-
- if (caller == XGMAC_DRIVER_CONTEXT)
- netif_device_detach(netdev);
-
- netif_tx_stop_all_queues(netdev);
-
- xgbe_stop_timers(pdata);
- flush_workqueue(pdata->dev_workqueue);
-
- hw_if->powerdown_tx(pdata);
- hw_if->powerdown_rx(pdata);
-
- xgbe_napi_disable(pdata, 0);
-
- pdata->power_down = 1;
-
- spin_unlock_irqrestore(&pdata->lock, flags);
-
- DBGPR("<--xgbe_powerdown\n");
-
- return 0;
-}
-
-int xgbe_powerup(struct net_device *netdev, unsigned int caller)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- unsigned long flags;
-
- DBGPR("-->xgbe_powerup\n");
-
- if (!netif_running(netdev) ||
- (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
- netdev_alert(netdev, "Device is already powered up\n");
- DBGPR("<--xgbe_powerup\n");
- return -EINVAL;
- }
-
- spin_lock_irqsave(&pdata->lock, flags);
-
- pdata->power_down = 0;
-
- xgbe_napi_enable(pdata, 0);
-
- hw_if->powerup_tx(pdata);
- hw_if->powerup_rx(pdata);
-
- if (caller == XGMAC_DRIVER_CONTEXT)
- netif_device_attach(netdev);
-
- netif_tx_start_all_queues(netdev);
-
- xgbe_start_timers(pdata);
-
- spin_unlock_irqrestore(&pdata->lock, flags);
-
- DBGPR("<--xgbe_powerup\n");
-
- return 0;
-}
-
static int xgbe_start(struct xgbe_prv_data *pdata)
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_phy_if *phy_if = &pdata->phy_if;
- struct net_device *netdev = pdata->netdev;
int ret;
DBGPR("-->xgbe_start\n");
@@ -883,8 +618,6 @@
if (ret)
goto err_phy;
- xgbe_napi_enable(pdata, 1);
-
ret = xgbe_request_irqs(pdata);
if (ret)
goto err_napi;
@@ -892,18 +625,16 @@
hw_if->enable_tx(pdata);
hw_if->enable_rx(pdata);
- netif_tx_start_all_queues(netdev);
+ xgbe_enable_rx_tx_ints(pdata);
xgbe_start_timers(pdata);
- queue_work(pdata->dev_workqueue, &pdata->service_work);
+ taskqueue_enqueue(pdata->dev_workqueue, &pdata->service_work);
DBGPR("<--xgbe_start\n");
return 0;
err_napi:
- xgbe_napi_disable(pdata, 1);
-
phy_if->phy_stop(pdata);
err_phy:
@@ -916,38 +647,21 @@
{
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_phy_if *phy_if = &pdata->phy_if;
- struct xgbe_channel *channel;
- struct net_device *netdev = pdata->netdev;
- struct netdev_queue *txq;
- unsigned int i;
DBGPR("-->xgbe_stop\n");
- netif_tx_stop_all_queues(netdev);
-
xgbe_stop_timers(pdata);
- flush_workqueue(pdata->dev_workqueue);
+ taskqueue_drain_all(pdata->dev_workqueue);
hw_if->disable_tx(pdata);
hw_if->disable_rx(pdata);
xgbe_free_irqs(pdata);
- xgbe_napi_disable(pdata, 1);
-
phy_if->phy_stop(pdata);
hw_if->exit(pdata);
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++) {
- if (!channel->tx_ring)
- continue;
-
- txq = netdev_get_tx_queue(netdev, channel->queue_index);
- netdev_tx_reset_queue(txq);
- }
-
DBGPR("<--xgbe_stop\n");
}
@@ -956,7 +670,7 @@
DBGPR("-->xgbe_restart_dev\n");
/* If not running, "restart" will happen on open */
- if (!netif_running(pdata->netdev))
+ if ((pdata->netdev->if_drv_flags & IFF_DRV_RUNNING) == 0)
return;
xgbe_stop(pdata);
@@ -969,329 +683,38 @@
DBGPR("<--xgbe_restart_dev\n");
}
-static void xgbe_restart(struct work_struct *work)
+static void xgbe_restart(void *ctx, int pending)
{
- struct xgbe_prv_data *pdata = container_of(work,
- struct xgbe_prv_data,
- restart_work);
-
- rtnl_lock();
+ struct xgbe_prv_data *pdata = ctx;
xgbe_restart_dev(pdata);
-
- rtnl_unlock();
-}
-
-static void xgbe_tx_tstamp(struct work_struct *work)
-{
- struct xgbe_prv_data *pdata = container_of(work,
- struct xgbe_prv_data,
- tx_tstamp_work);
- struct skb_shared_hwtstamps hwtstamps;
- u64 nsec;
- unsigned long flags;
-
- if (pdata->tx_tstamp) {
- nsec = timecounter_cyc2time(&pdata->tstamp_tc,
- pdata->tx_tstamp);
-
- memset(&hwtstamps, 0, sizeof(hwtstamps));
- hwtstamps.hwtstamp = ns_to_ktime(nsec);
- skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
- }
-
- dev_kfree_skb_any(pdata->tx_tstamp_skb);
-
- spin_lock_irqsave(&pdata->tstamp_lock, flags);
- pdata->tx_tstamp_skb = NULL;
- spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
-}
-
-static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
- struct ifreq *ifreq)
-{
- if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
- sizeof(pdata->tstamp_config)))
- return -EFAULT;
-
- return 0;
-}
-
-static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
- struct ifreq *ifreq)
-{
- struct hwtstamp_config config;
- unsigned int mac_tscr;
-
- if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
- return -EFAULT;
-
- if (config.flags)
- return -EINVAL;
-
- mac_tscr = 0;
-
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- break;
-
- case HWTSTAMP_TX_ON:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- default:
- return -ERANGE;
- }
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- break;
-
- case HWTSTAMP_FILTER_ALL:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2, UDP, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- /* PTP v1, UDP, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2, UDP, Sync packet */
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- /* PTP v1, UDP, Sync packet */
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2, UDP, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- /* PTP v1, UDP, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* 802.AS1, Ethernet, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* 802.AS1, Ethernet, Sync packet */
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* 802.AS1, Ethernet, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2/802.AS1, any layer, any kind of event packet */
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2/802.AS1, any layer, Sync packet */
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- /* PTP v2/802.AS1, any layer, Delay_req packet */
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
- XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
- break;
-
- default:
- return -ERANGE;
- }
-
- pdata->hw_if.config_tstamp(pdata, mac_tscr);
-
- memcpy(&pdata->tstamp_config, &config, sizeof(config));
-
- return 0;
-}
-
-static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
- struct sk_buff *skb,
- struct xgbe_packet_data *packet)
-{
- unsigned long flags;
-
- if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
- spin_lock_irqsave(&pdata->tstamp_lock, flags);
- if (pdata->tx_tstamp_skb) {
- /* Another timestamp in progress, ignore this one */
- XGMAC_SET_BITS(packet->attributes,
- TX_PACKET_ATTRIBUTES, PTP, 0);
- } else {
- pdata->tx_tstamp_skb = skb_get(skb);
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- }
- spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
- }
-
- if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
- skb_tx_timestamp(skb);
-}
-
-static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
-{
- if (skb_vlan_tag_present(skb))
- packet->vlan_ctag = skb_vlan_tag_get(skb);
-}
-
-static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
-{
- int ret;
-
- if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
- TSO_ENABLE))
- return 0;
-
- ret = skb_cow_head(skb, 0);
- if (ret)
- return ret;
-
- packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
- packet->tcp_header_len = tcp_hdrlen(skb);
- packet->tcp_payload_len = skb->len - packet->header_len;
- packet->mss = skb_shinfo(skb)->gso_size;
- DBGPR(" packet->header_len=%u\n", packet->header_len);
- DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
- packet->tcp_header_len, packet->tcp_payload_len);
- DBGPR(" packet->mss=%u\n", packet->mss);
-
- /* Update the number of packets that will ultimately be transmitted
- * along with the extra bytes for each extra packet
- */
- packet->tx_packets = skb_shinfo(skb)->gso_segs;
- packet->tx_bytes += (packet->tx_packets - 1) * packet->header_len;
-
- return 0;
-}
-
-static int xgbe_is_tso(struct sk_buff *skb)
-{
- if (skb->ip_summed != CHECKSUM_PARTIAL)
- return 0;
-
- if (!skb_is_gso(skb))
- return 0;
-
- DBGPR(" TSO packet to be processed\n");
-
- return 1;
}
static void xgbe_packet_info(struct xgbe_prv_data *pdata,
- struct xgbe_ring *ring, struct sk_buff *skb,
+ struct xgbe_ring *ring, struct mbuf *m0,
struct xgbe_packet_data *packet)
{
- struct skb_frag_struct *frag;
- unsigned int context_desc;
+ struct mbuf *m;
unsigned int len;
- unsigned int i;
- packet->skb = skb;
+ packet->m = m0;
- context_desc = 0;
packet->rdesc_count = 0;
packet->tx_packets = 1;
- packet->tx_bytes = skb->len;
-
- if (xgbe_is_tso(skb)) {
- /* TSO requires an extra descriptor if mss is different */
- if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
- context_desc = 1;
- packet->rdesc_count++;
- }
-
- /* TSO requires an extra descriptor for TSO header */
- packet->rdesc_count++;
-
- XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
- TSO_ENABLE, 1);
- XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
- CSUM_ENABLE, 1);
- } else if (skb->ip_summed == CHECKSUM_PARTIAL)
- XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
- CSUM_ENABLE, 1);
-
- if (skb_vlan_tag_present(skb)) {
- /* VLAN requires an extra descriptor if tag is different */
- if (skb_vlan_tag_get(skb) != ring->tx.cur_vlan_ctag)
- /* We can share with the TSO context descriptor */
- if (!context_desc) {
- context_desc = 1;
- packet->rdesc_count++;
- }
-
- XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
- VLAN_CTAG, 1);
- }
-
- if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
- XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
- PTP, 1);
-
- for (len = skb_headlen(skb); len;) {
- packet->rdesc_count++;
- len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
- }
+ packet->tx_bytes = m_length(m0, NULL);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- frag = &skb_shinfo(skb)->frags[i];
- for (len = skb_frag_size(frag); len; ) {
+ for (m = m0; m != NULL; m = m->m_next) {
+ for (len = m->m_len; len != 0;) {
packet->rdesc_count++;
- len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
+ len -= MIN(len, XGBE_TX_MAX_BUF_SIZE);
}
}
}
-static int xgbe_open(struct net_device *netdev)
+int xgbe_open(struct ifnet *netdev)
{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_prv_data *pdata = netdev->if_softc;
struct xgbe_desc_if *desc_if = &pdata->desc_if;
int ret;
@@ -1302,38 +725,29 @@
if (ret)
return ret;
- /* Enable the clocks */
- ret = clk_prepare_enable(pdata->sysclk);
- if (ret) {
- netdev_alert(netdev, "dma clk_prepare_enable failed\n");
- return ret;
- }
-
- ret = clk_prepare_enable(pdata->ptpclk);
- if (ret) {
- netdev_alert(netdev, "ptp clk_prepare_enable failed\n");
- goto err_sysclk;
- }
-
/* Calculate the Rx buffer size before allocating rings */
- ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
- if (ret < 0)
+ ret = xgbe_calc_rx_buf_size(netdev, if_getmtu(netdev));
+ if (ret < 0) {
goto err_ptpclk;
+ }
pdata->rx_buf_size = ret;
/* Allocate the channel and ring structures */
ret = xgbe_alloc_channels(pdata);
- if (ret)
+ if (ret) {
+ printf("xgbe_alloc_channels failed\n");
goto err_ptpclk;
+ }
/* Allocate the ring descriptors and buffers */
ret = desc_if->alloc_ring_resources(pdata);
- if (ret)
+ if (ret) {
+ printf("desc_if->alloc_ring_resources failed\n");
goto err_channels;
+ }
- INIT_WORK(&pdata->service_work, xgbe_service);
- INIT_WORK(&pdata->restart_work, xgbe_restart);
- INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
+ TASK_INIT(&pdata->service_work, 0, xgbe_service, pdata);
+ TASK_INIT(&pdata->restart_work, 0, xgbe_restart, pdata);
xgbe_init_timers(pdata);
ret = xgbe_start(pdata);
@@ -1353,17 +767,13 @@
xgbe_free_channels(pdata);
err_ptpclk:
- clk_disable_unprepare(pdata->ptpclk);
-
-err_sysclk:
- clk_disable_unprepare(pdata->sysclk);
return ret;
}
-static int xgbe_close(struct net_device *netdev)
+int xgbe_close(struct ifnet *netdev)
{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_prv_data *pdata = netdev->if_softc;
struct xgbe_desc_if *desc_if = &pdata->desc_if;
DBGPR("-->xgbe_close\n");
@@ -1377,10 +787,6 @@
/* Free the channel and ring structures */
xgbe_free_channels(pdata);
- /* Disable the clocks */
- clk_disable_unprepare(pdata->ptpclk);
- clk_disable_unprepare(pdata->sysclk);
-
set_bit(XGBE_DOWN, &pdata->dev_state);
DBGPR("<--xgbe_close\n");
@@ -1388,142 +794,66 @@
return 0;
}
-static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
+int xgbe_xmit(struct ifnet *ifp, struct mbuf *m)
{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_prv_data *pdata = ifp->if_softc;
struct xgbe_hw_if *hw_if = &pdata->hw_if;
struct xgbe_desc_if *desc_if = &pdata->desc_if;
struct xgbe_channel *channel;
struct xgbe_ring *ring;
struct xgbe_packet_data *packet;
- struct netdev_queue *txq;
int ret;
- DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
+ M_ASSERTPKTHDR(m);
+ MPASS(m->m_nextpkt == NULL);
- channel = pdata->channel + skb->queue_mapping;
- txq = netdev_get_tx_queue(netdev, channel->queue_index);
+ if (__predict_false(test_bit(XGBE_DOWN, &pdata->dev_state) ||
+ !pdata->phy.link)) {
+ m_freem(m);
+ return (ENETDOWN);
+ }
+
+ channel = pdata->channel;
ring = channel->tx_ring;
packet = &ring->packet_data;
- ret = NETDEV_TX_OK;
-
- if (skb->len == 0) {
- netif_err(pdata, tx_err, netdev,
- "empty skb received from stack\n");
- dev_kfree_skb_any(skb);
- goto tx_netdev_return;
- }
-
/* Calculate preliminary packet info */
memset(packet, 0, sizeof(*packet));
- xgbe_packet_info(pdata, ring, skb, packet);
+ xgbe_packet_info(pdata, ring, m, packet);
/* Check that there are enough descriptors available */
ret = xgbe_maybe_stop_tx_queue(channel, ring, packet->rdesc_count);
if (ret)
goto tx_netdev_return;
- ret = xgbe_prep_tso(skb, packet);
- if (ret) {
- netif_err(pdata, tx_err, netdev,
- "error processing TSO packet\n");
- dev_kfree_skb_any(skb);
- goto tx_netdev_return;
- }
- xgbe_prep_vlan(skb, packet);
-
- if (!desc_if->map_tx_skb(channel, skb)) {
- dev_kfree_skb_any(skb);
+ if (!desc_if->map_tx_skb(channel, m)) {
goto tx_netdev_return;
}
- xgbe_prep_tx_tstamp(pdata, skb, packet);
-
- /* Report on the actual number of bytes (to be) sent */
- netdev_tx_sent_queue(txq, packet->tx_bytes);
-
/* Configure required descriptor fields for transmission */
hw_if->dev_xmit(channel);
- if (netif_msg_pktdata(pdata))
- xgbe_print_pkt(netdev, skb, true);
-
- /* Stop the queue in advance if there may not be enough descriptors */
- xgbe_maybe_stop_tx_queue(channel, ring, XGBE_TX_MAX_DESCS);
-
- ret = NETDEV_TX_OK;
+ return 0;
tx_netdev_return:
- return ret;
-}
-
-static void xgbe_set_rx_mode(struct net_device *netdev)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
-
- DBGPR("-->xgbe_set_rx_mode\n");
-
- hw_if->config_rx_mode(pdata);
-
- DBGPR("<--xgbe_set_rx_mode\n");
-}
-
-static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- struct sockaddr *saddr = addr;
-
- DBGPR("-->xgbe_set_mac_address\n");
-
- if (!is_valid_ether_addr(saddr->sa_data))
- return -EADDRNOTAVAIL;
-
- memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
-
- hw_if->set_mac_address(pdata, netdev->dev_addr);
-
- DBGPR("<--xgbe_set_mac_address\n");
+ m_free(m);
return 0;
}
-static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- int ret;
-
- switch (cmd) {
- case SIOCGHWTSTAMP:
- ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
- break;
-
- case SIOCSHWTSTAMP:
- ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
- break;
-
- default:
- ret = -EOPNOTSUPP;
- }
-
- return ret;
-}
-
-static int xgbe_change_mtu(struct net_device *netdev, int mtu)
+int xgbe_change_mtu(struct ifnet *netdev, int mtu)
{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_prv_data *pdata = netdev->if_softc;
int ret;
DBGPR("-->xgbe_change_mtu\n");
ret = xgbe_calc_rx_buf_size(netdev, mtu);
if (ret < 0)
- return ret;
+ return -ret;
pdata->rx_buf_size = ret;
- netdev->mtu = mtu;
+ netdev->if_mtu = mtu;
xgbe_restart_dev(pdata);
@@ -1532,187 +862,6 @@
return 0;
}
-static void xgbe_tx_timeout(struct net_device *netdev)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
-
- netdev_warn(netdev, "tx timeout, device restarting\n");
- schedule_work(&pdata->restart_work);
-}
-
-static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *s)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
-
- DBGPR("-->%s\n", __func__);
-
- pdata->hw_if.read_mmc_stats(pdata);
-
- s->rx_packets = pstats->rxframecount_gb;
- s->rx_bytes = pstats->rxoctetcount_gb;
- s->rx_errors = pstats->rxframecount_gb -
- pstats->rxbroadcastframes_g -
- pstats->rxmulticastframes_g -
- pstats->rxunicastframes_g;
- s->multicast = pstats->rxmulticastframes_g;
- s->rx_length_errors = pstats->rxlengtherror;
- s->rx_crc_errors = pstats->rxcrcerror;
- s->rx_fifo_errors = pstats->rxfifooverflow;
-
- s->tx_packets = pstats->txframecount_gb;
- s->tx_bytes = pstats->txoctetcount_gb;
- s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
- s->tx_dropped = netdev->stats.tx_dropped;
-
- DBGPR("<--%s\n", __func__);
-
- return s;
-}
-
-static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
- u16 vid)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
-
- DBGPR("-->%s\n", __func__);
-
- set_bit(vid, pdata->active_vlans);
- hw_if->update_vlan_hash_table(pdata);
-
- DBGPR("<--%s\n", __func__);
-
- return 0;
-}
-
-static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
- u16 vid)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
-
- DBGPR("-->%s\n", __func__);
-
- clear_bit(vid, pdata->active_vlans);
- hw_if->update_vlan_hash_table(pdata);
-
- DBGPR("<--%s\n", __func__);
-
- return 0;
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void xgbe_poll_controller(struct net_device *netdev)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_channel *channel;
- unsigned int i;
-
- DBGPR("-->xgbe_poll_controller\n");
-
- if (pdata->per_channel_irq) {
- channel = pdata->channel;
- for (i = 0; i < pdata->channel_count; i++, channel++)
- xgbe_dma_isr(channel->dma_irq, channel);
- } else {
- disable_irq(pdata->dev_irq);
- xgbe_isr(pdata->dev_irq, pdata);
- enable_irq(pdata->dev_irq);
- }
-
- DBGPR("<--xgbe_poll_controller\n");
-}
-#endif /* End CONFIG_NET_POLL_CONTROLLER */
-
-static int xgbe_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
- struct tc_to_netdev *tc_to_netdev)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- u8 tc;
-
- if (tc_to_netdev->type != TC_SETUP_MQPRIO)
- return -EINVAL;
-
- tc = tc_to_netdev->tc;
-
- if (tc > pdata->hw_feat.tc_cnt)
- return -EINVAL;
-
- pdata->num_tcs = tc;
- pdata->hw_if.config_tc(pdata);
-
- return 0;
-}
-
-static int xgbe_set_features(struct net_device *netdev,
- netdev_features_t features)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- netdev_features_t rxhash, rxcsum, rxvlan, rxvlan_filter;
- int ret = 0;
-
- rxhash = pdata->netdev_features & NETIF_F_RXHASH;
- rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
- rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
- rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
-
- if ((features & NETIF_F_RXHASH) && !rxhash)
- ret = hw_if->enable_rss(pdata);
- else if (!(features & NETIF_F_RXHASH) && rxhash)
- ret = hw_if->disable_rss(pdata);
- if (ret)
- return ret;
-
- if ((features & NETIF_F_RXCSUM) && !rxcsum)
- hw_if->enable_rx_csum(pdata);
- else if (!(features & NETIF_F_RXCSUM) && rxcsum)
- hw_if->disable_rx_csum(pdata);
-
- if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
- hw_if->enable_rx_vlan_stripping(pdata);
- else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
- hw_if->disable_rx_vlan_stripping(pdata);
-
- if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
- hw_if->enable_rx_vlan_filtering(pdata);
- else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
- hw_if->disable_rx_vlan_filtering(pdata);
-
- pdata->netdev_features = features;
-
- DBGPR("<--xgbe_set_features\n");
-
- return 0;
-}
-
-static const struct net_device_ops xgbe_netdev_ops = {
- .ndo_open = xgbe_open,
- .ndo_stop = xgbe_close,
- .ndo_start_xmit = xgbe_xmit,
- .ndo_set_rx_mode = xgbe_set_rx_mode,
- .ndo_set_mac_address = xgbe_set_mac_address,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = xgbe_ioctl,
- .ndo_change_mtu = xgbe_change_mtu,
- .ndo_tx_timeout = xgbe_tx_timeout,
- .ndo_get_stats64 = xgbe_get_stats64,
- .ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = xgbe_poll_controller,
-#endif
- .ndo_setup_tc = xgbe_setup_tc,
- .ndo_set_features = xgbe_set_features,
-};
-
-struct net_device_ops *xgbe_get_netdev_ops(void)
-{
- return (struct net_device_ops *)&xgbe_netdev_ops;
-}
-
static void xgbe_rx_refresh(struct xgbe_channel *channel)
{
struct xgbe_prv_data *pdata = channel->pdata;
@@ -1736,59 +885,13 @@
}
/* Make sure everything is written before the register write */
- wmb();
+ dsb(sy);
/* Update the Rx Tail Pointer Register with address of
* the last cleaned entry */
rdata = XGBE_GET_DESC_DATA(ring, ring->dirty - 1);
XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
- lower_32_bits(rdata->rdesc_dma));
-}
-
-static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
- struct napi_struct *napi,
- struct xgbe_ring_data *rdata,
- unsigned int len)
-{
- struct sk_buff *skb;
- u8 *packet;
- unsigned int copy_len;
-
- skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
- if (!skb)
- return NULL;
-
- /* Start with the header buffer which may contain just the header
- * or the header plus data
- */
- dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
- rdata->rx.hdr.dma_off,
- rdata->rx.hdr.dma_len, DMA_FROM_DEVICE);
-
- packet = page_address(rdata->rx.hdr.pa.pages) +
- rdata->rx.hdr.pa.pages_offset;
- copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len;
- copy_len = min(rdata->rx.hdr.dma_len, copy_len);
- skb_copy_to_linear_data(skb, packet, copy_len);
- skb_put(skb, copy_len);
-
- len -= copy_len;
- if (len) {
- /* Add the remaining data as a frag */
- dma_sync_single_range_for_cpu(pdata->dev,
- rdata->rx.buf.dma_base,
- rdata->rx.buf.dma_off,
- rdata->rx.buf.dma_len,
- DMA_FROM_DEVICE);
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- rdata->rx.buf.pa.pages,
- rdata->rx.buf.pa.pages_offset,
- len, rdata->rx.buf.dma_len);
- rdata->rx.buf.pa.pages = NULL;
- }
-
- return skb;
+ lower_32_bits(rdata->rdata_paddr));
}
static int xgbe_tx_poll(struct xgbe_channel *channel)
@@ -1799,10 +902,7 @@
struct xgbe_ring *ring = channel->tx_ring;
struct xgbe_ring_data *rdata;
struct xgbe_ring_desc *rdesc;
- struct net_device *netdev = pdata->netdev;
- struct netdev_queue *txq;
int processed = 0;
- unsigned int tx_packets = 0, tx_bytes = 0;
unsigned int cur;
DBGPR("-->xgbe_tx_poll\n");
@@ -1814,9 +914,7 @@
cur = ring->cur;
/* Be sure we get ring->cur before accessing descriptor data */
- smp_rmb();
-
- txq = netdev_get_tx_queue(netdev, channel->queue_index);
+ dsb(sy);
while ((processed < XGBE_TX_DESC_MAX_PROC) &&
(ring->dirty != cur)) {
@@ -1828,15 +926,7 @@
/* Make sure descriptor fields are read after reading the OWN
* bit */
- dma_rmb();
-
- if (netif_msg_tx_done(pdata))
- xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
-
- if (hw_if->is_last_desc(rdesc)) {
- tx_packets += rdata->tx.packets;
- tx_bytes += rdata->tx.bytes;
- }
+ dsb(sy);
/* Free the SKB and reset the descriptor for re-use */
desc_if->unmap_rdata(pdata, rdata);
@@ -1849,14 +939,6 @@
if (!processed)
return 0;
- netdev_tx_completed_queue(txq, tx_packets, tx_bytes);
-
- if ((ring->tx.queue_stopped == 1) &&
- (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
- ring->tx.queue_stopped = 0;
- netif_tx_wake_queue(txq);
- }
-
DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
return processed;
@@ -1869,12 +951,9 @@
struct xgbe_ring *ring = channel->rx_ring;
struct xgbe_ring_data *rdata;
struct xgbe_packet_data *packet;
- struct net_device *netdev = pdata->netdev;
- struct napi_struct *napi;
- struct sk_buff *skb;
- struct skb_shared_hwtstamps *hwtstamps;
- unsigned int incomplete, error, context_next, context;
- unsigned int len, rdesc_len, max_len;
+ struct ifnet *ifp = pdata->netdev;
+ struct mbuf *m;
+ unsigned int incomplete, context_next, context;
unsigned int received = 0;
int packet_count = 0;
@@ -1887,25 +966,11 @@
incomplete = 0;
context_next = 0;
- napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
-
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
packet = &ring->packet_data;
while (packet_count < budget) {
DBGPR(" cur = %d\n", ring->cur);
- /* First time in loop see if we need to restore state */
- if (!received && rdata->state_saved) {
- skb = rdata->state.skb;
- error = rdata->state.error;
- len = rdata->state.len;
- } else {
- memset(packet, 0, sizeof(*packet));
- skb = NULL;
- error = 0;
- len = 0;
- }
-
read_again:
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
@@ -1915,6 +980,8 @@
if (hw_if->dev_read(channel))
break;
+ m = rdata->mb;
+
received++;
ring->cur++;
@@ -1929,118 +996,41 @@
CONTEXT);
/* Earlier error, just drain the remaining data */
- if ((incomplete || context_next) && error)
+ if (incomplete || context_next) {
goto read_again;
-
- if (error || packet->errors) {
- if (packet->errors)
- netif_err(pdata, rx_err, netdev,
- "error in received packet\n");
- dev_kfree_skb(skb);
- goto next_packet;
- }
-
- if (!context) {
- /* Length is cumulative, get this descriptor's length */
- rdesc_len = rdata->rx.len - len;
- len += rdesc_len;
-
- if (rdesc_len && !skb) {
- skb = xgbe_create_skb(pdata, napi, rdata,
- rdesc_len);
- if (!skb)
- error = 1;
- } else if (rdesc_len) {
- dma_sync_single_range_for_cpu(pdata->dev,
- rdata->rx.buf.dma_base,
- rdata->rx.buf.dma_off,
- rdata->rx.buf.dma_len,
- DMA_FROM_DEVICE);
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- rdata->rx.buf.pa.pages,
- rdata->rx.buf.pa.pages_offset,
- rdesc_len,
- rdata->rx.buf.dma_len);
- rdata->rx.buf.pa.pages = NULL;
- }
}
- if (incomplete || context_next)
- goto read_again;
-
- if (!skb)
- goto next_packet;
-
- /* Be sure we don't exceed the configured MTU */
- max_len = netdev->mtu + ETH_HLEN;
- if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
- (skb->protocol == htons(ETH_P_8021Q)))
- max_len += VLAN_HLEN;
-
- if (skb->len > max_len) {
- netif_err(pdata, rx_err, netdev,
- "packet length exceeds configured MTU\n");
- dev_kfree_skb(skb);
+ if (packet->errors) {
+ rdata->mbuf_free = 1;
goto next_packet;
}
+ rdata->mb = NULL;
- if (netif_msg_pktdata(pdata))
- xgbe_print_pkt(netdev, skb, false);
-
- skb_checksum_none_assert(skb);
- if (XGMAC_GET_BITS(packet->attributes,
- RX_PACKET_ATTRIBUTES, CSUM_DONE))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- if (XGMAC_GET_BITS(packet->attributes,
- RX_PACKET_ATTRIBUTES, VLAN_CTAG))
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
- packet->vlan_ctag);
-
- if (XGMAC_GET_BITS(packet->attributes,
- RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
- u64 nsec;
-
- nsec = timecounter_cyc2time(&pdata->tstamp_tc,
- packet->rx_tstamp);
- hwtstamps = skb_hwtstamps(skb);
- hwtstamps->hwtstamp = ns_to_ktime(nsec);
+ m->m_pkthdr.len = rdata->rx.hdr_len + rdata->rx.len;
+ if (rdata->rx.hdr_len != 0) {
+ m->m_len = rdata->rx.hdr_len;
+ m->m_next->m_len = rdata->rx.len;
+ } else {
+ m->m_len = rdata->rx.len;
+ m_freem(m->m_next);
+ m->m_next = NULL;
}
+ if_setrcvif(m, ifp);
+ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
- if (XGMAC_GET_BITS(packet->attributes,
- RX_PACKET_ATTRIBUTES, RSS_HASH))
- skb_set_hash(skb, packet->rss_hash,
- packet->rss_hash_type);
-
- skb->dev = netdev;
- skb->protocol = eth_type_trans(skb, netdev);
- skb_record_rx_queue(skb, channel->queue_index);
-
- napi_gro_receive(napi, skb);
+ ifp->if_input(ifp, m);
next_packet:
packet_count++;
}
- /* Check if we need to save state before leaving */
- if (received && (incomplete || context_next)) {
- rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
- rdata->state_saved = 1;
- rdata->state.skb = skb;
- rdata->state.len = len;
- rdata->state.error = error;
- }
-
DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
return packet_count;
}
-static int xgbe_one_poll(struct napi_struct *napi, int budget)
+static int xgbe_one_poll(struct xgbe_channel *channel, int budget)
{
- struct xgbe_channel *channel = container_of(napi, struct xgbe_channel,
- napi);
int processed = 0;
DBGPR("-->xgbe_one_poll: budget=%d\n", budget);
@@ -2051,24 +1041,13 @@
/* Process Rx ring next */
processed = xgbe_rx_poll(channel, budget);
- /* If we processed everything, we are done */
- if (processed < budget) {
- /* Turn off polling */
- napi_complete_done(napi, processed);
-
- /* Enable Tx and Rx interrupts */
- enable_irq(channel->dma_irq);
- }
-
DBGPR("<--xgbe_one_poll: received = %d\n", processed);
return processed;
}
-static int xgbe_all_poll(struct napi_struct *napi, int budget)
+static int xgbe_all_poll(struct xgbe_prv_data *pdata, int budget)
{
- struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
- napi);
struct xgbe_channel *channel;
int ring_budget;
int processed, last_processed;
@@ -2093,86 +1072,7 @@
}
} while ((processed < budget) && (processed != last_processed));
- /* If we processed everything, we are done */
- if (processed < budget) {
- /* Turn off polling */
- napi_complete_done(napi, processed);
-
- /* Enable Tx and Rx interrupts */
- xgbe_enable_rx_tx_ints(pdata);
- }
-
DBGPR("<--xgbe_all_poll: received = %d\n", processed);
return processed;
}
-
-void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
- unsigned int idx, unsigned int count, unsigned int flag)
-{
- struct xgbe_ring_data *rdata;
- struct xgbe_ring_desc *rdesc;
-
- while (count--) {
- rdata = XGBE_GET_DESC_DATA(ring, idx);
- rdesc = rdata->rdesc;
- netdev_dbg(pdata->netdev,
- "TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
- (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
- le32_to_cpu(rdesc->desc0),
- le32_to_cpu(rdesc->desc1),
- le32_to_cpu(rdesc->desc2),
- le32_to_cpu(rdesc->desc3));
- idx++;
- }
-}
-
-void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
- unsigned int idx)
-{
- struct xgbe_ring_data *rdata;
- struct xgbe_ring_desc *rdesc;
-
- rdata = XGBE_GET_DESC_DATA(ring, idx);
- rdesc = rdata->rdesc;
- netdev_dbg(pdata->netdev,
- "RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n",
- idx, le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
- le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
-}
-
-void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
-{
- struct ethhdr *eth = (struct ethhdr *)skb->data;
- unsigned char *buf = skb->data;
- unsigned char buffer[128];
- unsigned int i, j;
-
- netdev_dbg(netdev, "\n************** SKB dump ****************\n");
-
- netdev_dbg(netdev, "%s packet of %d bytes\n",
- (tx_rx ? "TX" : "RX"), skb->len);
-
- netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
- netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source);
- netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto));
-
- for (i = 0, j = 0; i < skb->len;) {
- j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
- buf[i++]);
-
- if ((i % 32) == 0) {
- netdev_dbg(netdev, " %#06x: %s\n", i - 32, buffer);
- j = 0;
- } else if ((i % 16) == 0) {
- buffer[j++] = ' ';
- buffer[j++] = ' ';
- } else if ((i % 4) == 0) {
- buffer[j++] = ' ';
- }
- }
- if (i % 32)
- netdev_dbg(netdev, " %#06x: %s\n", i - (i % 32), buffer);
-
- netdev_dbg(netdev, "\n************** SKB dump ****************\n");
-}
Index: sys/dev/axgbe/xgbe-ethtool.c
===================================================================
--- sys/dev/axgbe/xgbe-ethtool.c
+++ /dev/null
@@ -1,629 +0,0 @@
-/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/spinlock.h>
-#include <linux/phy.h>
-#include <linux/net_tstamp.h>
-
-#include "xgbe.h"
-#include "xgbe-common.h"
-
-struct xgbe_stats {
- char stat_string[ETH_GSTRING_LEN];
- int stat_size;
- int stat_offset;
-};
-
-#define XGMAC_MMC_STAT(_string, _var) \
- { _string, \
- FIELD_SIZEOF(struct xgbe_mmc_stats, _var), \
- offsetof(struct xgbe_prv_data, mmc_stats._var), \
- }
-
-#define XGMAC_EXT_STAT(_string, _var) \
- { _string, \
- FIELD_SIZEOF(struct xgbe_ext_stats, _var), \
- offsetof(struct xgbe_prv_data, ext_stats._var), \
- }
-
-static const struct xgbe_stats xgbe_gstring_stats[] = {
- XGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
- XGMAC_MMC_STAT("tx_packets", txframecount_gb),
- XGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
- XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
- XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
- XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
- XGMAC_EXT_STAT("tx_tso_packets", tx_tso_packets),
- XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
- XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
- XGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
- XGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
- XGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
- XGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
- XGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
- XGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
-
- XGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
- XGMAC_MMC_STAT("rx_packets", rxframecount_gb),
- XGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
- XGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
- XGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
- XGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
- XGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
- XGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
- XGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
- XGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
- XGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
- XGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
- XGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
- XGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
- XGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
- XGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
- XGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
- XGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
- XGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
- XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
- XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
- XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
- XGMAC_EXT_STAT("rx_split_header_packets", rx_split_header_packets),
- XGMAC_EXT_STAT("rx_buffer_unavailable", rx_buffer_unavailable),
-};
-
-#define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats)
-
-static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
-{
- int i;
-
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < XGBE_STATS_COUNT; i++) {
- memcpy(data, xgbe_gstring_stats[i].stat_string,
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
- break;
- }
-}
-
-static void xgbe_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- u8 *stat;
- int i;
-
- pdata->hw_if.read_mmc_stats(pdata);
- for (i = 0; i < XGBE_STATS_COUNT; i++) {
- stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
- *data++ = *(u64 *)stat;
- }
-}
-
-static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
-{
- int ret;
-
- switch (stringset) {
- case ETH_SS_STATS:
- ret = XGBE_STATS_COUNT;
- break;
-
- default:
- ret = -EOPNOTSUPP;
- }
-
- return ret;
-}
-
-static void xgbe_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
-
- pause->autoneg = pdata->phy.pause_autoneg;
- pause->tx_pause = pdata->phy.tx_pause;
- pause->rx_pause = pdata->phy.rx_pause;
-}
-
-static int xgbe_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- int ret = 0;
-
- if (pause->autoneg && (pdata->phy.autoneg != AUTONEG_ENABLE)) {
- netdev_err(netdev,
- "autoneg disabled, pause autoneg not avialable\n");
- return -EINVAL;
- }
-
- pdata->phy.pause_autoneg = pause->autoneg;
- pdata->phy.tx_pause = pause->tx_pause;
- pdata->phy.rx_pause = pause->rx_pause;
-
- pdata->phy.advertising &= ~ADVERTISED_Pause;
- pdata->phy.advertising &= ~ADVERTISED_Asym_Pause;
-
- if (pause->rx_pause) {
- pdata->phy.advertising |= ADVERTISED_Pause;
- pdata->phy.advertising |= ADVERTISED_Asym_Pause;
- }
-
- if (pause->tx_pause)
- pdata->phy.advertising ^= ADVERTISED_Asym_Pause;
-
- if (netif_running(netdev))
- ret = pdata->phy_if.phy_config_aneg(pdata);
-
- return ret;
-}
-
-static int xgbe_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
-
- cmd->phy_address = pdata->phy.address;
-
- cmd->supported = pdata->phy.supported;
- cmd->advertising = pdata->phy.advertising;
- cmd->lp_advertising = pdata->phy.lp_advertising;
-
- cmd->autoneg = pdata->phy.autoneg;
- ethtool_cmd_speed_set(cmd, pdata->phy.speed);
- cmd->duplex = pdata->phy.duplex;
-
- cmd->port = PORT_NONE;
- cmd->transceiver = XCVR_INTERNAL;
-
- return 0;
-}
-
-static int xgbe_set_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- u32 speed;
- int ret;
-
- speed = ethtool_cmd_speed(cmd);
-
- if (cmd->phy_address != pdata->phy.address) {
- netdev_err(netdev, "invalid phy address %hhu\n",
- cmd->phy_address);
- return -EINVAL;
- }
-
- if ((cmd->autoneg != AUTONEG_ENABLE) &&
- (cmd->autoneg != AUTONEG_DISABLE)) {
- netdev_err(netdev, "unsupported autoneg %hhu\n",
- cmd->autoneg);
- return -EINVAL;
- }
-
- if (cmd->autoneg == AUTONEG_DISABLE) {
- switch (speed) {
- case SPEED_10000:
- break;
- case SPEED_2500:
- if (pdata->speed_set != XGBE_SPEEDSET_2500_10000) {
- netdev_err(netdev, "unsupported speed %u\n",
- speed);
- return -EINVAL;
- }
- break;
- case SPEED_1000:
- if (pdata->speed_set != XGBE_SPEEDSET_1000_10000) {
- netdev_err(netdev, "unsupported speed %u\n",
- speed);
- return -EINVAL;
- }
- break;
- default:
- netdev_err(netdev, "unsupported speed %u\n", speed);
- return -EINVAL;
- }
-
- if (cmd->duplex != DUPLEX_FULL) {
- netdev_err(netdev, "unsupported duplex %hhu\n",
- cmd->duplex);
- return -EINVAL;
- }
- }
-
- netif_dbg(pdata, link, netdev,
- "requested advertisement %#x, phy supported %#x\n",
- cmd->advertising, pdata->phy.supported);
-
- cmd->advertising &= pdata->phy.supported;
- if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising) {
- netdev_err(netdev,
- "unsupported requested advertisement\n");
- return -EINVAL;
- }
-
- ret = 0;
- pdata->phy.autoneg = cmd->autoneg;
- pdata->phy.speed = speed;
- pdata->phy.duplex = cmd->duplex;
- pdata->phy.advertising = cmd->advertising;
-
- if (cmd->autoneg == AUTONEG_ENABLE)
- pdata->phy.advertising |= ADVERTISED_Autoneg;
- else
- pdata->phy.advertising &= ~ADVERTISED_Autoneg;
-
- if (netif_running(netdev))
- ret = pdata->phy_if.phy_config_aneg(pdata);
-
- return ret;
-}
-
-static void xgbe_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
-
- strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version));
- strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
- sizeof(drvinfo->bus_info));
- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
- XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
- XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID),
- XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER));
-}
-
-static u32 xgbe_get_msglevel(struct net_device *netdev)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
-
- return pdata->msg_enable;
-}
-
-static void xgbe_set_msglevel(struct net_device *netdev, u32 msglevel)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
-
- pdata->msg_enable = msglevel;
-}
-
-static int xgbe_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
-
- memset(ec, 0, sizeof(struct ethtool_coalesce));
-
- ec->rx_coalesce_usecs = pdata->rx_usecs;
- ec->rx_max_coalesced_frames = pdata->rx_frames;
-
- ec->tx_max_coalesced_frames = pdata->tx_frames;
-
- return 0;
-}
-
-static int xgbe_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- unsigned int rx_frames, rx_riwt, rx_usecs;
- unsigned int tx_frames;
-
- /* Check for not supported parameters */
- if ((ec->rx_coalesce_usecs_irq) ||
- (ec->rx_max_coalesced_frames_irq) ||
- (ec->tx_coalesce_usecs) ||
- (ec->tx_coalesce_usecs_irq) ||
- (ec->tx_max_coalesced_frames_irq) ||
- (ec->stats_block_coalesce_usecs) ||
- (ec->use_adaptive_rx_coalesce) ||
- (ec->use_adaptive_tx_coalesce) ||
- (ec->pkt_rate_low) ||
- (ec->rx_coalesce_usecs_low) ||
- (ec->rx_max_coalesced_frames_low) ||
- (ec->tx_coalesce_usecs_low) ||
- (ec->tx_max_coalesced_frames_low) ||
- (ec->pkt_rate_high) ||
- (ec->rx_coalesce_usecs_high) ||
- (ec->rx_max_coalesced_frames_high) ||
- (ec->tx_coalesce_usecs_high) ||
- (ec->tx_max_coalesced_frames_high) ||
- (ec->rate_sample_interval)) {
- netdev_err(netdev, "unsupported coalescing parameter\n");
- return -EOPNOTSUPP;
- }
-
- rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
- rx_usecs = ec->rx_coalesce_usecs;
- rx_frames = ec->rx_max_coalesced_frames;
-
- /* Use smallest possible value if conversion resulted in zero */
- if (rx_usecs && !rx_riwt)
- rx_riwt = 1;
-
- /* Check the bounds of values for Rx */
- if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
- netdev_err(netdev, "rx-usec is limited to %d usecs\n",
- hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT));
- return -EINVAL;
- }
- if (rx_frames > pdata->rx_desc_count) {
- netdev_err(netdev, "rx-frames is limited to %d frames\n",
- pdata->rx_desc_count);
- return -EINVAL;
- }
-
- tx_frames = ec->tx_max_coalesced_frames;
-
- /* Check the bounds of values for Tx */
- if (tx_frames > pdata->tx_desc_count) {
- netdev_err(netdev, "tx-frames is limited to %d frames\n",
- pdata->tx_desc_count);
- return -EINVAL;
- }
-
- pdata->rx_riwt = rx_riwt;
- pdata->rx_usecs = rx_usecs;
- pdata->rx_frames = rx_frames;
- hw_if->config_rx_coalesce(pdata);
-
- pdata->tx_frames = tx_frames;
- hw_if->config_tx_coalesce(pdata);
-
- return 0;
-}
-
-static int xgbe_get_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
-
- switch (rxnfc->cmd) {
- case ETHTOOL_GRXRINGS:
- rxnfc->data = pdata->rx_ring_count;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static u32 xgbe_get_rxfh_key_size(struct net_device *netdev)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
-
- return sizeof(pdata->rss_key);
-}
-
-static u32 xgbe_get_rxfh_indir_size(struct net_device *netdev)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
-
- return ARRAY_SIZE(pdata->rss_table);
-}
-
-static int xgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
- u8 *hfunc)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- unsigned int i;
-
- if (indir) {
- for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
- indir[i] = XGMAC_GET_BITS(pdata->rss_table[i],
- MAC_RSSDR, DMCH);
- }
-
- if (key)
- memcpy(key, pdata->rss_key, sizeof(pdata->rss_key));
-
- if (hfunc)
- *hfunc = ETH_RSS_HASH_TOP;
-
- return 0;
-}
-
-static int xgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
- const u8 *key, const u8 hfunc)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- struct xgbe_hw_if *hw_if = &pdata->hw_if;
- unsigned int ret;
-
- if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
- netdev_err(netdev, "unsupported hash function\n");
- return -EOPNOTSUPP;
- }
-
- if (indir) {
- ret = hw_if->set_rss_lookup_table(pdata, indir);
- if (ret)
- return ret;
- }
-
- if (key) {
- ret = hw_if->set_rss_hash_key(pdata, key);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int xgbe_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *ts_info)
-{
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
-
- ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
-
- if (pdata->ptp_clock)
- ts_info->phc_index = ptp_clock_index(pdata->ptp_clock);
- else
- ts_info->phc_index = -1;
-
- ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
- ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_ALL);
-
- return 0;
-}
-
-static const struct ethtool_ops xgbe_ethtool_ops = {
- .get_settings = xgbe_get_settings,
- .set_settings = xgbe_set_settings,
- .get_drvinfo = xgbe_get_drvinfo,
- .get_msglevel = xgbe_get_msglevel,
- .set_msglevel = xgbe_set_msglevel,
- .get_link = ethtool_op_get_link,
- .get_coalesce = xgbe_get_coalesce,
- .set_coalesce = xgbe_set_coalesce,
- .get_pauseparam = xgbe_get_pauseparam,
- .set_pauseparam = xgbe_set_pauseparam,
- .get_strings = xgbe_get_strings,
- .get_ethtool_stats = xgbe_get_ethtool_stats,
- .get_sset_count = xgbe_get_sset_count,
- .get_rxnfc = xgbe_get_rxnfc,
- .get_rxfh_key_size = xgbe_get_rxfh_key_size,
- .get_rxfh_indir_size = xgbe_get_rxfh_indir_size,
- .get_rxfh = xgbe_get_rxfh,
- .set_rxfh = xgbe_set_rxfh,
- .get_ts_info = xgbe_get_ts_info,
-};
-
-struct ethtool_ops *xgbe_get_ethtool_ops(void)
-{
- return (struct ethtool_ops *)&xgbe_ethtool_ops;
-}
Index: sys/dev/axgbe/xgbe-main.c
===================================================================
--- sys/dev/axgbe/xgbe-main.c
+++ /dev/null
@@ -1,908 +0,0 @@
-/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014-2016 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/spinlock.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_net.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/clk.h>
-#include <linux/property.h>
-#include <linux/acpi.h>
-#include <linux/mdio.h>
-
-#include "xgbe.h"
-#include "xgbe-common.h"
-
-MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
-MODULE_LICENSE("Dual BSD/GPL");
-MODULE_VERSION(XGBE_DRV_VERSION);
-MODULE_DESCRIPTION(XGBE_DRV_DESC);
-
-static int debug = -1;
-module_param(debug, int, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(debug, " Network interface message level setting");
-
-static const u32 default_msg_level = (NETIF_MSG_LINK | NETIF_MSG_IFDOWN |
- NETIF_MSG_IFUP);
-
-static const u32 xgbe_serdes_blwc[] = {
- XGBE_SPEED_1000_BLWC,
- XGBE_SPEED_2500_BLWC,
- XGBE_SPEED_10000_BLWC,
-};
-
-static const u32 xgbe_serdes_cdr_rate[] = {
- XGBE_SPEED_1000_CDR,
- XGBE_SPEED_2500_CDR,
- XGBE_SPEED_10000_CDR,
-};
-
-static const u32 xgbe_serdes_pq_skew[] = {
- XGBE_SPEED_1000_PQ,
- XGBE_SPEED_2500_PQ,
- XGBE_SPEED_10000_PQ,
-};
-
-static const u32 xgbe_serdes_tx_amp[] = {
- XGBE_SPEED_1000_TXAMP,
- XGBE_SPEED_2500_TXAMP,
- XGBE_SPEED_10000_TXAMP,
-};
-
-static const u32 xgbe_serdes_dfe_tap_cfg[] = {
- XGBE_SPEED_1000_DFE_TAP_CONFIG,
- XGBE_SPEED_2500_DFE_TAP_CONFIG,
- XGBE_SPEED_10000_DFE_TAP_CONFIG,
-};
-
-static const u32 xgbe_serdes_dfe_tap_ena[] = {
- XGBE_SPEED_1000_DFE_TAP_ENABLE,
- XGBE_SPEED_2500_DFE_TAP_ENABLE,
- XGBE_SPEED_10000_DFE_TAP_ENABLE,
-};
-
-static void xgbe_default_config(struct xgbe_prv_data *pdata)
-{
- DBGPR("-->xgbe_default_config\n");
-
- pdata->pblx8 = DMA_PBL_X8_ENABLE;
- pdata->tx_sf_mode = MTL_TSF_ENABLE;
- pdata->tx_threshold = MTL_TX_THRESHOLD_64;
- pdata->tx_pbl = DMA_PBL_16;
- pdata->tx_osp_mode = DMA_OSP_ENABLE;
- pdata->rx_sf_mode = MTL_RSF_DISABLE;
- pdata->rx_threshold = MTL_RX_THRESHOLD_64;
- pdata->rx_pbl = DMA_PBL_16;
- pdata->pause_autoneg = 1;
- pdata->tx_pause = 1;
- pdata->rx_pause = 1;
- pdata->phy_speed = SPEED_UNKNOWN;
- pdata->power_down = 0;
-
- DBGPR("<--xgbe_default_config\n");
-}
-
-static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
-{
- xgbe_init_function_ptrs_dev(&pdata->hw_if);
- xgbe_init_function_ptrs_phy(&pdata->phy_if);
- xgbe_init_function_ptrs_desc(&pdata->desc_if);
-}
-
-#ifdef CONFIG_ACPI
-static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
-{
- struct device *dev = pdata->dev;
- u32 property;
- int ret;
-
- /* Obtain the system clock setting */
- ret = device_property_read_u32(dev, XGBE_ACPI_DMA_FREQ, &property);
- if (ret) {
- dev_err(dev, "unable to obtain %s property\n",
- XGBE_ACPI_DMA_FREQ);
- return ret;
- }
- pdata->sysclk_rate = property;
-
- /* Obtain the PTP clock setting */
- ret = device_property_read_u32(dev, XGBE_ACPI_PTP_FREQ, &property);
- if (ret) {
- dev_err(dev, "unable to obtain %s property\n",
- XGBE_ACPI_PTP_FREQ);
- return ret;
- }
- pdata->ptpclk_rate = property;
-
- return 0;
-}
-#else /* CONFIG_ACPI */
-static int xgbe_acpi_support(struct xgbe_prv_data *pdata)
-{
- return -EINVAL;
-}
-#endif /* CONFIG_ACPI */
-
-#ifdef CONFIG_OF
-static int xgbe_of_support(struct xgbe_prv_data *pdata)
-{
- struct device *dev = pdata->dev;
-
- /* Obtain the system clock setting */
- pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
- if (IS_ERR(pdata->sysclk)) {
- dev_err(dev, "dma devm_clk_get failed\n");
- return PTR_ERR(pdata->sysclk);
- }
- pdata->sysclk_rate = clk_get_rate(pdata->sysclk);
-
- /* Obtain the PTP clock setting */
- pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
- if (IS_ERR(pdata->ptpclk)) {
- dev_err(dev, "ptp devm_clk_get failed\n");
- return PTR_ERR(pdata->ptpclk);
- }
- pdata->ptpclk_rate = clk_get_rate(pdata->ptpclk);
-
- return 0;
-}
-
-static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
-{
- struct device *dev = pdata->dev;
- struct device_node *phy_node;
- struct platform_device *phy_pdev;
-
- phy_node = of_parse_phandle(dev->of_node, "phy-handle", 0);
- if (phy_node) {
- /* Old style device tree:
- * The XGBE and PHY resources are separate
- */
- phy_pdev = of_find_device_by_node(phy_node);
- of_node_put(phy_node);
- } else {
- /* New style device tree:
- * The XGBE and PHY resources are grouped together with
- * the PHY resources listed last
- */
- get_device(dev);
- phy_pdev = pdata->pdev;
- }
-
- return phy_pdev;
-}
-#else /* CONFIG_OF */
-static int xgbe_of_support(struct xgbe_prv_data *pdata)
-{
- return -EINVAL;
-}
-
-static struct platform_device *xgbe_of_get_phy_pdev(struct xgbe_prv_data *pdata)
-{
- return NULL;
-}
-#endif /* CONFIG_OF */
-
-static unsigned int xgbe_resource_count(struct platform_device *pdev,
- unsigned int type)
-{
- unsigned int count;
- int i;
-
- for (i = 0, count = 0; i < pdev->num_resources; i++) {
- struct resource *res = &pdev->resource[i];
-
- if (type == resource_type(res))
- count++;
- }
-
- return count;
-}
-
-static struct platform_device *xgbe_get_phy_pdev(struct xgbe_prv_data *pdata)
-{
- struct platform_device *phy_pdev;
-
- if (pdata->use_acpi) {
- get_device(pdata->dev);
- phy_pdev = pdata->pdev;
- } else {
- phy_pdev = xgbe_of_get_phy_pdev(pdata);
- }
-
- return phy_pdev;
-}
-
-static int xgbe_probe(struct platform_device *pdev)
-{
- struct xgbe_prv_data *pdata;
- struct net_device *netdev;
- struct device *dev = &pdev->dev, *phy_dev;
- struct platform_device *phy_pdev;
- struct resource *res;
- const char *phy_mode;
- unsigned int i, phy_memnum, phy_irqnum;
- enum dev_dma_attr attr;
- int ret;
-
- DBGPR("--> xgbe_probe\n");
-
- netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
- XGBE_MAX_DMA_CHANNELS);
- if (!netdev) {
- dev_err(dev, "alloc_etherdev failed\n");
- ret = -ENOMEM;
- goto err_alloc;
- }
- SET_NETDEV_DEV(netdev, dev);
- pdata = netdev_priv(netdev);
- pdata->netdev = netdev;
- pdata->pdev = pdev;
- pdata->adev = ACPI_COMPANION(dev);
- pdata->dev = dev;
- platform_set_drvdata(pdev, netdev);
-
- spin_lock_init(&pdata->lock);
- spin_lock_init(&pdata->xpcs_lock);
- mutex_init(&pdata->rss_mutex);
- spin_lock_init(&pdata->tstamp_lock);
-
- pdata->msg_enable = netif_msg_init(debug, default_msg_level);
-
- set_bit(XGBE_DOWN, &pdata->dev_state);
-
- /* Check if we should use ACPI or DT */
- pdata->use_acpi = dev->of_node ? 0 : 1;
-
- phy_pdev = xgbe_get_phy_pdev(pdata);
- if (!phy_pdev) {
- dev_err(dev, "unable to obtain phy device\n");
- ret = -EINVAL;
- goto err_phydev;
- }
- phy_dev = &phy_pdev->dev;
-
- if (pdev == phy_pdev) {
- /* New style device tree or ACPI:
- * The XGBE and PHY resources are grouped together with
- * the PHY resources listed last
- */
- phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
- phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
- } else {
- /* Old style device tree:
- * The XGBE and PHY resources are separate
- */
- phy_memnum = 0;
- phy_irqnum = 0;
- }
-
- /* Set and validate the number of descriptors for a ring */
- BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
- pdata->tx_desc_count = XGBE_TX_DESC_CNT;
- if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
- dev_err(dev, "tx descriptor count (%d) is not valid\n",
- pdata->tx_desc_count);
- ret = -EINVAL;
- goto err_io;
- }
- BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
- pdata->rx_desc_count = XGBE_RX_DESC_CNT;
- if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
- dev_err(dev, "rx descriptor count (%d) is not valid\n",
- pdata->rx_desc_count);
- ret = -EINVAL;
- goto err_io;
- }
-
- /* Obtain the mmio areas for the device */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pdata->xgmac_regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(pdata->xgmac_regs)) {
- dev_err(dev, "xgmac ioremap failed\n");
- ret = PTR_ERR(pdata->xgmac_regs);
- goto err_io;
- }
- if (netif_msg_probe(pdata))
- dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- pdata->xpcs_regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(pdata->xpcs_regs)) {
- dev_err(dev, "xpcs ioremap failed\n");
- ret = PTR_ERR(pdata->xpcs_regs);
- goto err_io;
- }
- if (netif_msg_probe(pdata))
- dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
-
- res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
- pdata->rxtx_regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(pdata->rxtx_regs)) {
- dev_err(dev, "rxtx ioremap failed\n");
- ret = PTR_ERR(pdata->rxtx_regs);
- goto err_io;
- }
- if (netif_msg_probe(pdata))
- dev_dbg(dev, "rxtx_regs = %p\n", pdata->rxtx_regs);
-
- res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
- pdata->sir0_regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(pdata->sir0_regs)) {
- dev_err(dev, "sir0 ioremap failed\n");
- ret = PTR_ERR(pdata->sir0_regs);
- goto err_io;
- }
- if (netif_msg_probe(pdata))
- dev_dbg(dev, "sir0_regs = %p\n", pdata->sir0_regs);
-
- res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
- pdata->sir1_regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(pdata->sir1_regs)) {
- dev_err(dev, "sir1 ioremap failed\n");
- ret = PTR_ERR(pdata->sir1_regs);
- goto err_io;
- }
- if (netif_msg_probe(pdata))
- dev_dbg(dev, "sir1_regs = %p\n", pdata->sir1_regs);
-
- /* Retrieve the MAC address */
- ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
- pdata->mac_addr,
- sizeof(pdata->mac_addr));
- if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
- dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
- if (!ret)
- ret = -EINVAL;
- goto err_io;
- }
-
- /* Retrieve the PHY mode - it must be "xgmii" */
- ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
- &phy_mode);
- if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
- dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
- if (!ret)
- ret = -EINVAL;
- goto err_io;
- }
- pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;
-
- /* Check for per channel interrupt support */
- if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
- pdata->per_channel_irq = 1;
-
- /* Retrieve the PHY speedset */
- ret = device_property_read_u32(phy_dev, XGBE_SPEEDSET_PROPERTY,
- &pdata->speed_set);
- if (ret) {
- dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
- goto err_io;
- }
-
- switch (pdata->speed_set) {
- case XGBE_SPEEDSET_1000_10000:
- case XGBE_SPEEDSET_2500_10000:
- break;
- default:
- dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
- ret = -EINVAL;
- goto err_io;
- }
-
- /* Retrieve the PHY configuration properties */
- if (device_property_present(phy_dev, XGBE_BLWC_PROPERTY)) {
- ret = device_property_read_u32_array(phy_dev,
- XGBE_BLWC_PROPERTY,
- pdata->serdes_blwc,
- XGBE_SPEEDS);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_BLWC_PROPERTY);
- goto err_io;
- }
- } else {
- memcpy(pdata->serdes_blwc, xgbe_serdes_blwc,
- sizeof(pdata->serdes_blwc));
- }
-
- if (device_property_present(phy_dev, XGBE_CDR_RATE_PROPERTY)) {
- ret = device_property_read_u32_array(phy_dev,
- XGBE_CDR_RATE_PROPERTY,
- pdata->serdes_cdr_rate,
- XGBE_SPEEDS);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_CDR_RATE_PROPERTY);
- goto err_io;
- }
- } else {
- memcpy(pdata->serdes_cdr_rate, xgbe_serdes_cdr_rate,
- sizeof(pdata->serdes_cdr_rate));
- }
-
- if (device_property_present(phy_dev, XGBE_PQ_SKEW_PROPERTY)) {
- ret = device_property_read_u32_array(phy_dev,
- XGBE_PQ_SKEW_PROPERTY,
- pdata->serdes_pq_skew,
- XGBE_SPEEDS);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_PQ_SKEW_PROPERTY);
- goto err_io;
- }
- } else {
- memcpy(pdata->serdes_pq_skew, xgbe_serdes_pq_skew,
- sizeof(pdata->serdes_pq_skew));
- }
-
- if (device_property_present(phy_dev, XGBE_TX_AMP_PROPERTY)) {
- ret = device_property_read_u32_array(phy_dev,
- XGBE_TX_AMP_PROPERTY,
- pdata->serdes_tx_amp,
- XGBE_SPEEDS);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_TX_AMP_PROPERTY);
- goto err_io;
- }
- } else {
- memcpy(pdata->serdes_tx_amp, xgbe_serdes_tx_amp,
- sizeof(pdata->serdes_tx_amp));
- }
-
- if (device_property_present(phy_dev, XGBE_DFE_CFG_PROPERTY)) {
- ret = device_property_read_u32_array(phy_dev,
- XGBE_DFE_CFG_PROPERTY,
- pdata->serdes_dfe_tap_cfg,
- XGBE_SPEEDS);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_DFE_CFG_PROPERTY);
- goto err_io;
- }
- } else {
- memcpy(pdata->serdes_dfe_tap_cfg, xgbe_serdes_dfe_tap_cfg,
- sizeof(pdata->serdes_dfe_tap_cfg));
- }
-
- if (device_property_present(phy_dev, XGBE_DFE_ENA_PROPERTY)) {
- ret = device_property_read_u32_array(phy_dev,
- XGBE_DFE_ENA_PROPERTY,
- pdata->serdes_dfe_tap_ena,
- XGBE_SPEEDS);
- if (ret) {
- dev_err(dev, "invalid %s property\n",
- XGBE_DFE_ENA_PROPERTY);
- goto err_io;
- }
- } else {
- memcpy(pdata->serdes_dfe_tap_ena, xgbe_serdes_dfe_tap_ena,
- sizeof(pdata->serdes_dfe_tap_ena));
- }
-
- /* Obtain device settings unique to ACPI/OF */
- if (pdata->use_acpi)
- ret = xgbe_acpi_support(pdata);
- else
- ret = xgbe_of_support(pdata);
- if (ret)
- goto err_io;
-
- /* Set the DMA coherency values */
- attr = device_get_dma_attr(dev);
- if (attr == DEV_DMA_NOT_SUPPORTED) {
- dev_err(dev, "DMA is not supported");
- goto err_io;
- }
- pdata->coherent = (attr == DEV_DMA_COHERENT);
- if (pdata->coherent) {
- pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
- pdata->arcache = XGBE_DMA_OS_ARCACHE;
- pdata->awcache = XGBE_DMA_OS_AWCACHE;
- } else {
- pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
- pdata->arcache = XGBE_DMA_SYS_ARCACHE;
- pdata->awcache = XGBE_DMA_SYS_AWCACHE;
- }
-
- /* Get the device interrupt */
- ret = platform_get_irq(pdev, 0);
- if (ret < 0) {
- dev_err(dev, "platform_get_irq 0 failed\n");
- goto err_io;
- }
- pdata->dev_irq = ret;
-
- /* Get the auto-negotiation interrupt */
- ret = platform_get_irq(phy_pdev, phy_irqnum++);
- if (ret < 0) {
- dev_err(dev, "platform_get_irq phy 0 failed\n");
- goto err_io;
- }
- pdata->an_irq = ret;
-
- netdev->irq = pdata->dev_irq;
- netdev->base_addr = (unsigned long)pdata->xgmac_regs;
- memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
-
- /* Set all the function pointers */
- xgbe_init_all_fptrs(pdata);
-
- /* Issue software reset to device */
- pdata->hw_if.exit(pdata);
-
- /* Populate the hardware features */
- xgbe_get_all_hw_features(pdata);
-
- /* Set default configuration data */
- xgbe_default_config(pdata);
-
- /* Set the DMA mask */
- ret = dma_set_mask_and_coherent(dev,
- DMA_BIT_MASK(pdata->hw_feat.dma_width));
- if (ret) {
- dev_err(dev, "dma_set_mask_and_coherent failed\n");
- goto err_io;
- }
-
- /* Calculate the number of Tx and Rx rings to be created
- * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
- * the number of Tx queues to the number of Tx channels
- * enabled
- * -Rx (DMA) Channels do not map 1-to-1 so use the actual
- * number of Rx queues
- */
- pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
- pdata->hw_feat.tx_ch_cnt);
- pdata->tx_q_count = pdata->tx_ring_count;
- ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
- if (ret) {
- dev_err(dev, "error setting real tx queue count\n");
- goto err_io;
- }
-
- pdata->rx_ring_count = min_t(unsigned int,
- netif_get_num_default_rss_queues(),
- pdata->hw_feat.rx_ch_cnt);
- pdata->rx_q_count = pdata->hw_feat.rx_q_cnt;
- ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
- if (ret) {
- dev_err(dev, "error setting real rx queue count\n");
- goto err_io;
- }
-
- /* Initialize RSS hash key and lookup table */
- netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));
-
- for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
- XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
- i % pdata->rx_ring_count);
-
- XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
- XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
- XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
-
- /* Call MDIO/PHY initialization routine */
- pdata->phy_if.phy_init(pdata);
-
- /* Set device operations */
- netdev->netdev_ops = xgbe_get_netdev_ops();
- netdev->ethtool_ops = xgbe_get_ethtool_ops();
-#ifdef CONFIG_AMD_XGBE_DCB
- netdev->dcbnl_ops = xgbe_get_dcbnl_ops();
-#endif
-
- /* Set device features */
- netdev->hw_features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_GRO |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
-
- if (pdata->hw_feat.rss)
- netdev->hw_features |= NETIF_F_RXHASH;
-
- netdev->vlan_features |= NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO6;
-
- netdev->features |= netdev->hw_features;
- pdata->netdev_features = netdev->features;
-
- netdev->priv_flags |= IFF_UNICAST_FLT;
-
- /* Use default watchdog timeout */
- netdev->watchdog_timeo = 0;
-
- xgbe_init_rx_coalesce(pdata);
- xgbe_init_tx_coalesce(pdata);
-
- netif_carrier_off(netdev);
- ret = register_netdev(netdev);
- if (ret) {
- dev_err(dev, "net device registration failed\n");
- goto err_io;
- }
-
- /* Create the PHY/ANEG name based on netdev name */
- snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
- netdev_name(netdev));
-
- /* Create workqueues */
- pdata->dev_workqueue =
- create_singlethread_workqueue(netdev_name(netdev));
- if (!pdata->dev_workqueue) {
- netdev_err(netdev, "device workqueue creation failed\n");
- ret = -ENOMEM;
- goto err_netdev;
- }
-
- pdata->an_workqueue =
- create_singlethread_workqueue(pdata->an_name);
- if (!pdata->an_workqueue) {
- netdev_err(netdev, "phy workqueue creation failed\n");
- ret = -ENOMEM;
- goto err_wq;
- }
-
- xgbe_ptp_register(pdata);
-
- xgbe_debugfs_init(pdata);
-
- platform_device_put(phy_pdev);
-
- netdev_notice(netdev, "net device enabled\n");
-
- DBGPR("<-- xgbe_probe\n");
-
- return 0;
-
-err_wq:
- destroy_workqueue(pdata->dev_workqueue);
-
-err_netdev:
- unregister_netdev(netdev);
-
-err_io:
- platform_device_put(phy_pdev);
-
-err_phydev:
- free_netdev(netdev);
-
-err_alloc:
- dev_notice(dev, "net device not enabled\n");
-
- return ret;
-}
-
-static int xgbe_remove(struct platform_device *pdev)
-{
- struct net_device *netdev = platform_get_drvdata(pdev);
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
-
- DBGPR("-->xgbe_remove\n");
-
- xgbe_debugfs_exit(pdata);
-
- xgbe_ptp_unregister(pdata);
-
- flush_workqueue(pdata->an_workqueue);
- destroy_workqueue(pdata->an_workqueue);
-
- flush_workqueue(pdata->dev_workqueue);
- destroy_workqueue(pdata->dev_workqueue);
-
- unregister_netdev(netdev);
-
- free_netdev(netdev);
-
- DBGPR("<--xgbe_remove\n");
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int xgbe_suspend(struct device *dev)
-{
- struct net_device *netdev = dev_get_drvdata(dev);
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- int ret = 0;
-
- DBGPR("-->xgbe_suspend\n");
-
- if (netif_running(netdev))
- ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
-
- pdata->lpm_ctrl = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
- pdata->lpm_ctrl |= MDIO_CTRL1_LPOWER;
- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
-
- DBGPR("<--xgbe_suspend\n");
-
- return ret;
-}
-
-static int xgbe_resume(struct device *dev)
-{
- struct net_device *netdev = dev_get_drvdata(dev);
- struct xgbe_prv_data *pdata = netdev_priv(netdev);
- int ret = 0;
-
- DBGPR("-->xgbe_resume\n");
-
- pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
- XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
-
- if (netif_running(netdev))
- ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
-
- DBGPR("<--xgbe_resume\n");
-
- return ret;
-}
-#endif /* CONFIG_PM */
-
-#ifdef CONFIG_ACPI
-static const struct acpi_device_id xgbe_acpi_match[] = {
- { "AMDI8001", 0 },
- {},
-};
-
-MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
-#endif
-
-#ifdef CONFIG_OF
-static const struct of_device_id xgbe_of_match[] = {
- { .compatible = "amd,xgbe-seattle-v1a", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, xgbe_of_match);
-#endif
-
-static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume);
-
-static struct platform_driver xgbe_driver = {
- .driver = {
- .name = "amd-xgbe",
-#ifdef CONFIG_ACPI
- .acpi_match_table = xgbe_acpi_match,
-#endif
-#ifdef CONFIG_OF
- .of_match_table = xgbe_of_match,
-#endif
- .pm = &xgbe_pm_ops,
- },
- .probe = xgbe_probe,
- .remove = xgbe_remove,
-};
-
-module_platform_driver(xgbe_driver);
Index: sys/dev/axgbe/xgbe-mdio.c
===================================================================
--- sys/dev/axgbe/xgbe-mdio.c
+++ sys/dev/axgbe/xgbe-mdio.c
@@ -114,17 +114,16 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/module.h>
-#include <linux/kmod.h>
-#include <linux/mdio.h>
-#include <linux/phy.h>
-#include <linux/of.h>
-#include <linux/bitops.h>
-#include <linux/jiffies.h>
+#include <sys/cdefs.h>
+
+#include <sys/param.h>
+#include <sys/kernel.h>
#include "xgbe.h"
#include "xgbe-common.h"
+static void xgbe_an_state_machine(struct xgbe_prv_data *pdata);
+
static void xgbe_an_enable_kr_training(struct xgbe_prv_data *pdata)
{
unsigned int reg;
@@ -154,7 +153,7 @@
reg |= MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
- usleep_range(75, 100);
+ DELAY(75);
reg &= ~MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, reg);
@@ -177,7 +176,7 @@
/* Wait for Rx and Tx ready */
wait = XGBE_RATECHANGE_COUNT;
while (wait--) {
- usleep_range(50, 75);
+ DELAY(50);
status = XSIR0_IOREAD(pdata, SIR0_STATUS);
if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
@@ -185,9 +184,6 @@
goto rx_reset;
}
- netif_dbg(pdata, link, pdata->netdev, "SerDes rx/tx not ready (%#hx)\n",
- status);
-
rx_reset:
/* Perform Rx reset for the DFE changes */
XRXTX_IOWRITE_BITS(pdata, RXTX_REG6, RESETB_RXD, 0);
@@ -238,8 +234,6 @@
pdata->serdes_dfe_tap_ena[XGBE_SPEED_10000]);
xgbe_serdes_complete_ratechange(pdata);
-
- netif_dbg(pdata, link, pdata->netdev, "10GbE KR mode set\n");
}
static void xgbe_gmii_2500_mode(struct xgbe_prv_data *pdata)
@@ -286,8 +280,6 @@
pdata->serdes_dfe_tap_ena[XGBE_SPEED_2500]);
xgbe_serdes_complete_ratechange(pdata);
-
- netif_dbg(pdata, link, pdata->netdev, "2.5GbE KX mode set\n");
}
static void xgbe_gmii_mode(struct xgbe_prv_data *pdata)
@@ -334,8 +326,6 @@
pdata->serdes_dfe_tap_ena[XGBE_SPEED_1000]);
xgbe_serdes_complete_ratechange(pdata);
-
- netif_dbg(pdata, link, pdata->netdev, "1GbE KX mode set\n");
}
static void xgbe_cur_mode(struct xgbe_prv_data *pdata,
@@ -440,15 +430,11 @@
static void xgbe_restart_an(struct xgbe_prv_data *pdata)
{
xgbe_set_an(pdata, true, true);
-
- netif_dbg(pdata, link, pdata->netdev, "AN enabled/restarted\n");
}
static void xgbe_disable_an(struct xgbe_prv_data *pdata)
{
xgbe_set_an(pdata, false, false);
-
- netif_dbg(pdata, link, pdata->netdev, "AN disabled\n");
}
static enum xgbe_an xgbe_an_tx_training(struct xgbe_prv_data *pdata,
@@ -483,9 +469,6 @@
reg);
XSIR0_IOWRITE_BITS(pdata, SIR0_KR_RT_1, RESET, 0);
-
- netif_dbg(pdata, link, pdata->netdev,
- "KR training initiated\n");
}
return XGBE_AN_PAGE_RECEIVED;
@@ -554,19 +537,16 @@
enum xgbe_an ret;
if (!pdata->an_start) {
- pdata->an_start = jiffies;
+ pdata->an_start = ticks;
} else {
an_timeout = pdata->an_start +
- msecs_to_jiffies(XGBE_AN_MS_TIMEOUT);
- if (time_after(jiffies, an_timeout)) {
+ ((uint64_t)XGBE_AN_MS_TIMEOUT * (uint64_t)hz) / 1000ull;
+ if ((int)(ticks - an_timeout) > 0) {
/* Auto-negotiation timed out, reset state */
pdata->kr_state = XGBE_RX_BPA;
pdata->kx_state = XGBE_RX_BPA;
- pdata->an_start = jiffies;
-
- netif_dbg(pdata, link, pdata->netdev,
- "AN timed out, resetting state\n");
+ pdata->an_start = ticks;
}
}
@@ -620,12 +600,10 @@
return XGBE_AN_INCOMPAT_LINK;
}
-static irqreturn_t xgbe_an_isr(int irq, void *data)
+static void xgbe_an_isr(void *data)
{
struct xgbe_prv_data *pdata = (struct xgbe_prv_data *)data;
- netif_dbg(pdata, intr, pdata->netdev, "AN interrupt received\n");
-
/* Disable AN interrupts */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
@@ -636,57 +614,19 @@
/* Clear the interrupt(s) that fired and process them */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int);
- queue_work(pdata->an_workqueue, &pdata->an_irq_work);
+ xgbe_an_state_machine(pdata);
} else {
/* Enable AN interrupts */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK,
XGBE_AN_INT_MASK);
}
-
- return IRQ_HANDLED;
-}
-
-static void xgbe_an_irq_work(struct work_struct *work)
-{
- struct xgbe_prv_data *pdata = container_of(work,
- struct xgbe_prv_data,
- an_irq_work);
-
- /* Avoid a race between enabling the IRQ and exiting the work by
- * waiting for the work to finish and then queueing it
- */
- flush_work(&pdata->an_work);
- queue_work(pdata->an_workqueue, &pdata->an_work);
}
-static const char *xgbe_state_as_string(enum xgbe_an state)
+static void xgbe_an_state_machine(struct xgbe_prv_data *pdata)
{
- switch (state) {
- case XGBE_AN_READY:
- return "Ready";
- case XGBE_AN_PAGE_RECEIVED:
- return "Page-Received";
- case XGBE_AN_INCOMPAT_LINK:
- return "Incompatible-Link";
- case XGBE_AN_COMPLETE:
- return "Complete";
- case XGBE_AN_NO_LINK:
- return "No-Link";
- case XGBE_AN_ERROR:
- return "Error";
- default:
- return "Undefined";
- }
-}
-
-static void xgbe_an_state_machine(struct work_struct *work)
-{
- struct xgbe_prv_data *pdata = container_of(work,
- struct xgbe_prv_data,
- an_work);
enum xgbe_an cur_state = pdata->an_state;
- mutex_lock(&pdata->an_mutex);
+ sx_xlock(&pdata->an_mutex);
if (!pdata->an_int)
goto out;
@@ -708,9 +648,6 @@
pdata->an_result = pdata->an_state;
again:
- netif_dbg(pdata, link, pdata->netdev, "AN %s\n",
- xgbe_state_as_string(pdata->an_state));
-
cur_state = pdata->an_state;
switch (pdata->an_state) {
@@ -731,9 +668,6 @@
case XGBE_AN_COMPLETE:
pdata->parallel_detect = pdata->an_supported ? 0 : 1;
- netif_dbg(pdata, link, pdata->netdev, "%s successful\n",
- pdata->an_supported ? "Auto negotiation"
- : "Parallel detection");
break;
case XGBE_AN_NO_LINK:
@@ -747,10 +681,6 @@
pdata->an_int = 0;
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
} else if (pdata->an_state == XGBE_AN_ERROR) {
- netdev_err(pdata->netdev,
- "error during auto-negotiation, state=%u\n",
- cur_state);
-
pdata->an_int = 0;
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
}
@@ -761,9 +691,6 @@
pdata->kr_state = XGBE_RX_BPA;
pdata->kx_state = XGBE_RX_BPA;
pdata->an_start = 0;
-
- netif_dbg(pdata, link, pdata->netdev, "AN result: %s\n",
- xgbe_state_as_string(pdata->an_result));
}
if (cur_state != pdata->an_state)
@@ -776,7 +703,7 @@
/* Enable AN interrupts on the way out */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, XGBE_AN_INT_MASK);
- mutex_unlock(&pdata->an_mutex);
+ sx_xunlock(&pdata->an_mutex);
}
static void xgbe_an_init(struct xgbe_prv_data *pdata)
@@ -785,10 +712,7 @@
/* Set up Advertisement register 3 first */
reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
- if (pdata->phy.advertising & ADVERTISED_10000baseR_FEC)
- reg |= 0xc000;
- else
- reg &= ~0xc000;
+ reg &= ~0xc000;
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, reg);
@@ -823,48 +747,6 @@
reg &= ~XGBE_XNP_NP_EXCHANGE;
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
-
- netif_dbg(pdata, link, pdata->netdev, "AN initialized\n");
-}
-
-static const char *xgbe_phy_fc_string(struct xgbe_prv_data *pdata)
-{
- if (pdata->tx_pause && pdata->rx_pause)
- return "rx/tx";
- else if (pdata->rx_pause)
- return "rx";
- else if (pdata->tx_pause)
- return "tx";
- else
- return "off";
-}
-
-static const char *xgbe_phy_speed_string(int speed)
-{
- switch (speed) {
- case SPEED_1000:
- return "1Gbps";
- case SPEED_2500:
- return "2.5Gbps";
- case SPEED_10000:
- return "10Gbps";
- case SPEED_UNKNOWN:
- return "Unknown";
- default:
- return "Unsupported";
- }
-}
-
-static void xgbe_phy_print_status(struct xgbe_prv_data *pdata)
-{
- if (pdata->phy.link)
- netdev_info(pdata->netdev,
- "Link is Up - %s/%s - flow control %s\n",
- xgbe_phy_speed_string(pdata->phy.speed),
- pdata->phy.duplex == DUPLEX_FULL ? "Full" : "Half",
- xgbe_phy_fc_string(pdata));
- else
- netdev_info(pdata->netdev, "Link is Down\n");
}
static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
@@ -902,14 +784,10 @@
pdata->phy_link = 0;
pdata->phy_speed = SPEED_UNKNOWN;
}
-
- if (new_state && netif_msg_link(pdata))
- xgbe_phy_print_status(pdata);
}
static int xgbe_phy_config_fixed(struct xgbe_prv_data *pdata)
{
- netif_dbg(pdata, link, pdata->netdev, "fixed PHY configuration\n");
/* Disable auto-negotiation */
xgbe_disable_an(pdata);
@@ -939,15 +817,16 @@
static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata)
{
set_bit(XGBE_LINK_INIT, &pdata->dev_state);
- pdata->link_check = jiffies;
+ pdata->link_check = ticks;
if (pdata->phy.autoneg != AUTONEG_ENABLE)
return xgbe_phy_config_fixed(pdata);
- netif_dbg(pdata, link, pdata->netdev, "AN PHY configuration\n");
-
/* Disable auto-negotiation interrupt */
- disable_irq(pdata->an_irq);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+
+ /* Clear any auto-negotitation interrupts */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
/* Start auto-negotiation in a supported mode */
if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full) {
@@ -956,7 +835,7 @@
(pdata->phy.advertising & ADVERTISED_2500baseX_Full)) {
xgbe_set_mode(pdata, XGBE_MODE_KX);
} else {
- enable_irq(pdata->an_irq);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
return -EINVAL;
}
@@ -972,7 +851,7 @@
pdata->kx_state = XGBE_RX_BPA;
/* Re-enable auto-negotiation interrupt */
- enable_irq(pdata->an_irq);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);
/* Set up advertisement registers based on current settings */
xgbe_an_init(pdata);
@@ -987,7 +866,7 @@
{
int ret;
- mutex_lock(&pdata->an_mutex);
+ sx_xlock(&pdata->an_mutex);
ret = __xgbe_phy_config_aneg(pdata);
if (ret)
@@ -995,7 +874,7 @@
else
clear_bit(XGBE_LINK_ERR, &pdata->dev_state);
- mutex_unlock(&pdata->an_mutex);
+ sx_unlock(&pdata->an_mutex);
return ret;
}
@@ -1009,9 +888,8 @@
{
unsigned long link_timeout;
- link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * HZ);
- if (time_after(jiffies, link_timeout)) {
- netif_dbg(pdata, link, pdata->netdev, "AN link timeout\n");
+ link_timeout = pdata->link_check + (XGBE_LINK_TIMEOUT * hz);
+ if ((int)(ticks - link_timeout) >= 0) {
xgbe_phy_config_aneg(pdata);
}
}
@@ -1109,10 +987,6 @@
/* Compare Advertisement and Link Partner register 3 */
ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
- if (lp_reg & 0xc000)
- pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
-
- pdata->phy.duplex = DUPLEX_FULL;
}
static void xgbe_phy_status(struct xgbe_prv_data *pdata)
@@ -1120,8 +994,6 @@
unsigned int reg, link_aneg;
if (test_bit(XGBE_LINK_ERR, &pdata->dev_state)) {
- netif_carrier_off(pdata->netdev);
-
pdata->phy.link = 0;
goto adjust_link;
}
@@ -1145,8 +1017,6 @@
if (test_bit(XGBE_LINK_INIT, &pdata->dev_state))
clear_bit(XGBE_LINK_INIT, &pdata->dev_state);
-
- netif_carrier_on(pdata->netdev);
} else {
if (test_bit(XGBE_LINK_INIT, &pdata->dev_state)) {
xgbe_check_link_timeout(pdata);
@@ -1156,8 +1026,6 @@
}
xgbe_phy_status_aneg(pdata);
-
- netif_carrier_off(pdata->netdev);
}
adjust_link:
@@ -1166,7 +1034,6 @@
static void xgbe_phy_stop(struct xgbe_prv_data *pdata)
{
- netif_dbg(pdata, link, pdata->netdev, "stopping PHY\n");
/* Disable auto-negotiation */
xgbe_disable_an(pdata);
@@ -1174,27 +1041,22 @@
/* Disable auto-negotiation interrupts */
XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
- devm_free_irq(pdata->dev, pdata->an_irq, pdata);
+ bus_teardown_intr(pdata->dev, pdata->an_irq_res, pdata->an_irq_tag);
pdata->phy.link = 0;
- netif_carrier_off(pdata->netdev);
xgbe_phy_adjust_link(pdata);
}
static int xgbe_phy_start(struct xgbe_prv_data *pdata)
{
- struct net_device *netdev = pdata->netdev;
int ret;
- netif_dbg(pdata, link, pdata->netdev, "starting PHY\n");
-
- ret = devm_request_irq(pdata->dev, pdata->an_irq,
- xgbe_an_isr, 0, pdata->an_name,
- pdata);
+ ret = bus_setup_intr(pdata->dev, pdata->an_irq_res,
+ INTR_MPSAFE | INTR_TYPE_NET, NULL, xgbe_an_isr, pdata,
+ &pdata->an_irq_tag);
if (ret) {
- netdev_err(netdev, "phy irq request failed\n");
- return ret;
+ return -ret;
}
/* Set initial mode - call the mode setting routines
@@ -1220,7 +1082,7 @@
return xgbe_phy_config_aneg(pdata);
err_irq:
- devm_free_irq(pdata->dev, pdata->an_irq, pdata);
+ bus_teardown_intr(pdata->dev, pdata->an_irq_res, pdata->an_irq_tag);
return ret;
}
@@ -1235,7 +1097,7 @@
count = 50;
do {
- msleep(20);
+ DELAY(20);
reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1);
} while ((reg & MDIO_CTRL1_RESET) && --count);
@@ -1251,50 +1113,9 @@
return 0;
}
-static void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
-{
- struct device *dev = pdata->dev;
-
- dev_dbg(dev, "\n************* PHY Reg dump **********************\n");
-
- dev_dbg(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
- dev_dbg(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
- dev_dbg(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
- dev_dbg(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
- dev_dbg(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
- dev_dbg(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2,
- XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
-
- dev_dbg(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
- dev_dbg(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
- dev_dbg(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n",
- MDIO_AN_ADVERTISE,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
- dev_dbg(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n",
- MDIO_AN_ADVERTISE + 1,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
- dev_dbg(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n",
- MDIO_AN_ADVERTISE + 2,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
- dev_dbg(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n",
- MDIO_AN_COMP_STAT,
- XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
-
- dev_dbg(dev, "\n*************************************************\n");
-}
-
static void xgbe_phy_init(struct xgbe_prv_data *pdata)
{
- mutex_init(&pdata->an_mutex);
- INIT_WORK(&pdata->an_irq_work, xgbe_an_irq_work);
- INIT_WORK(&pdata->an_work, xgbe_an_state_machine);
+ sx_init(&pdata->an_mutex, "axgbe AN lock");
pdata->mdio_mmd = MDIO_MMD_PCS;
/* Initialize supported features */
@@ -1343,9 +1164,6 @@
if (pdata->tx_pause)
pdata->phy.advertising ^= ADVERTISED_Asym_Pause;
-
- if (netif_msg_drv(pdata))
- xgbe_dump_phy_registers(pdata);
}
void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *phy_if)
Index: sys/dev/axgbe/xgbe-ptp.c
===================================================================
--- sys/dev/axgbe/xgbe-ptp.c
+++ /dev/null
@@ -1,279 +0,0 @@
-/*
- * AMD 10Gb Ethernet driver
- *
- * This file is available to you under your choice of the following two
- * licenses:
- *
- * License 1: GPLv2
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- *
- * This file is free software; you may copy, redistribute and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 2 of the License, or (at
- * your option) any later version.
- *
- * This file is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * License 2: Modified BSD
- *
- * Copyright (c) 2014 Advanced Micro Devices, Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Advanced Micro Devices, Inc. nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file incorporates work covered by the following copyright and
- * permission notice:
- * The Synopsys DWC ETHER XGMAC Software Driver and documentation
- * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
- * Inc. unless otherwise expressly agreed to in writing between Synopsys
- * and you.
- *
- * The Software IS NOT an item of Licensed Software or Licensed Product
- * under any End User Software License Agreement or Agreement for Licensed
- * Product with Synopsys or any supplement thereto. Permission is hereby
- * granted, free of charge, to any person obtaining a copy of this software
- * annotated with this license and the Software, to deal in the Software
- * without restriction, including without limitation the rights to use,
- * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
- * of the Software, and to permit persons to whom the Software is furnished
- * to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
- * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
- * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <linux/clk.h>
-#include <linux/clocksource.h>
-#include <linux/ptp_clock_kernel.h>
-#include <linux/net_tstamp.h>
-
-#include "xgbe.h"
-#include "xgbe-common.h"
-
-static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
-{
- struct xgbe_prv_data *pdata = container_of(cc,
- struct xgbe_prv_data,
- tstamp_cc);
- u64 nsec;
-
- nsec = pdata->hw_if.get_tstamp_time(pdata);
-
- return nsec;
-}
-
-static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
-{
- struct xgbe_prv_data *pdata = container_of(info,
- struct xgbe_prv_data,
- ptp_clock_info);
- unsigned long flags;
- u64 adjust;
- u32 addend, diff;
- unsigned int neg_adjust = 0;
-
- if (delta < 0) {
- neg_adjust = 1;
- delta = -delta;
- }
-
- adjust = pdata->tstamp_addend;
- adjust *= delta;
- diff = div_u64(adjust, 1000000000UL);
-
- addend = (neg_adjust) ? pdata->tstamp_addend - diff :
- pdata->tstamp_addend + diff;
-
- spin_lock_irqsave(&pdata->tstamp_lock, flags);
-
- pdata->hw_if.update_tstamp_addend(pdata, addend);
-
- spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
-
- return 0;
-}
-
-static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta)
-{
- struct xgbe_prv_data *pdata = container_of(info,
- struct xgbe_prv_data,
- ptp_clock_info);
- unsigned long flags;
-
- spin_lock_irqsave(&pdata->tstamp_lock, flags);
- timecounter_adjtime(&pdata->tstamp_tc, delta);
- spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
-
- return 0;
-}
-
-static int xgbe_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
-{
- struct xgbe_prv_data *pdata = container_of(info,
- struct xgbe_prv_data,
- ptp_clock_info);
- unsigned long flags;
- u64 nsec;
-
- spin_lock_irqsave(&pdata->tstamp_lock, flags);
-
- nsec = timecounter_read(&pdata->tstamp_tc);
-
- spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
-
- *ts = ns_to_timespec64(nsec);
-
- return 0;
-}
-
-static int xgbe_settime(struct ptp_clock_info *info,
- const struct timespec64 *ts)
-{
- struct xgbe_prv_data *pdata = container_of(info,
- struct xgbe_prv_data,
- ptp_clock_info);
- unsigned long flags;
- u64 nsec;
-
- nsec = timespec64_to_ns(ts);
-
- spin_lock_irqsave(&pdata->tstamp_lock, flags);
-
- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
-
- spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
-
- return 0;
-}
-
-static int xgbe_enable(struct ptp_clock_info *info,
- struct ptp_clock_request *request, int on)
-{
- return -EOPNOTSUPP;
-}
-
-void xgbe_ptp_register(struct xgbe_prv_data *pdata)
-{
- struct ptp_clock_info *info = &pdata->ptp_clock_info;
- struct ptp_clock *clock;
- struct cyclecounter *cc = &pdata->tstamp_cc;
- u64 dividend;
-
- snprintf(info->name, sizeof(info->name), "%s",
- netdev_name(pdata->netdev));
- info->owner = THIS_MODULE;
- info->max_adj = pdata->ptpclk_rate;
- info->adjfreq = xgbe_adjfreq;
- info->adjtime = xgbe_adjtime;
- info->gettime64 = xgbe_gettime;
- info->settime64 = xgbe_settime;
- info->enable = xgbe_enable;
-
- clock = ptp_clock_register(info, pdata->dev);
- if (IS_ERR(clock)) {
- dev_err(pdata->dev, "ptp_clock_register failed\n");
- return;
- }
-
- pdata->ptp_clock = clock;
-
- /* Calculate the addend:
- * addend = 2^32 / (PTP ref clock / 50Mhz)
- * = (2^32 * 50Mhz) / PTP ref clock
- */
- dividend = 50000000;
- dividend <<= 32;
- pdata->tstamp_addend = div_u64(dividend, pdata->ptpclk_rate);
-
- /* Setup the timecounter */
- cc->read = xgbe_cc_read;
- cc->mask = CLOCKSOURCE_MASK(64);
- cc->mult = 1;
- cc->shift = 0;
-
- timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
- ktime_to_ns(ktime_get_real()));
-
- /* Disable all timestamping to start */
- XGMAC_IOWRITE(pdata, MAC_TCR, 0);
- pdata->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
- pdata->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
-}
-
-void xgbe_ptp_unregister(struct xgbe_prv_data *pdata)
-{
- if (pdata->ptp_clock)
- ptp_clock_unregister(pdata->ptp_clock);
-}
Index: sys/dev/axgbe/xgbe.h
===================================================================
--- sys/dev/axgbe/xgbe.h
+++ sys/dev/axgbe/xgbe.h
@@ -117,16 +117,10 @@
#ifndef __XGBE_H__
#define __XGBE_H__
-#include <linux/dma-mapping.h>
-#include <linux/netdevice.h>
-#include <linux/workqueue.h>
-#include <linux/phy.h>
-#include <linux/if_vlan.h>
-#include <linux/bitops.h>
-#include <linux/ptp_clock_kernel.h>
-#include <linux/timecounter.h>
-#include <linux/net_tstamp.h>
-#include <net/dcbnl.h>
+#include "xgbe_osdep.h"
+
+/* From linux/dcbnl.h */
+#define IEEE_8021QAZ_MAX_TCS 8
#define XGBE_DRV_NAME "amd-xgbe"
#define XGBE_DRV_VERSION "1.0.2"
@@ -151,7 +145,7 @@
*/
#define XGBE_TX_MAX_DESCS (MAX_SKB_FRAGS + XGBE_TX_MAX_SPLIT + 2)
-#define XGBE_RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
+#define XGBE_RX_MIN_BUF_SIZE 1522
#define XGBE_RX_BUF_ALIGN 64
#define XGBE_SKB_ALLOC_SIZE 256
#define XGBE_SPH_HDSMS_SIZE 2 /* Keep in sync with SKB_ALLOC_SIZE */
@@ -288,7 +282,7 @@
struct xgbe_prv_data;
struct xgbe_packet_data {
- struct sk_buff *skb;
+ struct mbuf *m;
unsigned int attributes;
@@ -297,18 +291,8 @@
unsigned int rdesc_count;
unsigned int length;
- unsigned int header_len;
- unsigned int tcp_header_len;
- unsigned int tcp_payload_len;
- unsigned short mss;
-
- unsigned short vlan_ctag;
-
u64 rx_tstamp;
- u32 rss_hash;
- enum pkt_hash_types rss_hash_type;
-
unsigned int tx_packets;
unsigned int tx_bytes;
};
@@ -321,25 +305,6 @@
__le32 desc3;
};
-/* Page allocation related values */
-struct xgbe_page_alloc {
- struct page *pages;
- unsigned int pages_len;
- unsigned int pages_offset;
-
- dma_addr_t pages_dma;
-};
-
-/* Ring entry buffer data */
-struct xgbe_buffer_data {
- struct xgbe_page_alloc pa;
- struct xgbe_page_alloc pa_unmap;
-
- dma_addr_t dma_base;
- unsigned long dma_off;
- unsigned int dma_len;
-};
-
/* Tx-related ring data */
struct xgbe_tx_ring_data {
unsigned int packets; /* BQL packet count */
@@ -348,9 +313,6 @@
/* Rx-related ring data */
struct xgbe_rx_ring_data {
- struct xgbe_buffer_data hdr; /* Header locations */
- struct xgbe_buffer_data buf; /* Payload locations */
-
unsigned short hdr_len; /* Length of received header */
unsigned short len; /* Length of received packet */
};
@@ -361,28 +323,19 @@
*/
struct xgbe_ring_data {
struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */
- dma_addr_t rdesc_dma; /* DMA address of descriptor */
+ bus_addr_t rdata_paddr;
- struct sk_buff *skb; /* Virtual address of SKB */
- dma_addr_t skb_dma; /* DMA address of SKB data */
- unsigned int skb_dma_len; /* Length of SKB DMA area */
+ bus_dma_tag_t mbuf_dmat;
+ bus_dmamap_t mbuf_map;
+ bus_addr_t mbuf_hdr_paddr;
+ bus_addr_t mbuf_data_paddr;
+ bus_size_t mbuf_len;
+
+ int mbuf_free;
+ struct mbuf *mb;
struct xgbe_tx_ring_data tx; /* Tx-related data */
struct xgbe_rx_ring_data rx; /* Rx-related data */
-
- unsigned int mapped_as_page;
-
- /* Incomplete receive save location. If the budget is exhausted
- * or the last descriptor (last normal descriptor or a following
- * context descriptor) has not been DMA'd yet the current state
- * of the receive processing needs to be saved.
- */
- unsigned int state_saved;
- struct {
- struct sk_buff *skb;
- unsigned int len;
- unsigned int error;
- } state;
};
struct xgbe_ring {
@@ -394,18 +347,19 @@
/* Virtual/DMA addresses and count of allocated descriptor memory */
struct xgbe_ring_desc *rdesc;
- dma_addr_t rdesc_dma;
+ bus_dmamap_t rdesc_map;
+ bus_dma_tag_t rdesc_dmat;
+ bus_addr_t rdesc_paddr;
unsigned int rdesc_count;
+ bus_dma_tag_t mbuf_dmat;
+ bus_dmamap_t mbuf_map;
+
/* Array of descriptor data corresponding the descriptor memory
* (always use the XGBE_GET_DESC_DATA macro to access this data)
*/
struct xgbe_ring_data *rdata;
- /* Page allocation for RX buffers */
- struct xgbe_page_alloc rx_hdr_pa;
- struct xgbe_page_alloc rx_buf_pa;
-
/* Ring index values
* cur - Tx: index of descriptor to be used for current transfer
* Rx: index of descriptor to check for packet availability
@@ -426,7 +380,7 @@
unsigned short cur_vlan_ctag;
} tx;
};
-} ____cacheline_aligned;
+} __aligned(CACHE_LINE_SIZE);
/* Structure used to describe the descriptor rings associated with
* a DMA channel.
@@ -439,23 +393,18 @@
/* Queue index and base address of queue's DMA registers */
unsigned int queue_index;
- void __iomem *dma_regs;
+ bus_space_tag_t dma_tag;
+ bus_space_handle_t dma_handle;
/* Per channel interrupt irq number */
- int dma_irq;
- char dma_irq_name[IFNAMSIZ + 32];
-
- /* Netdev related settings */
- struct napi_struct napi;
+ struct resource *dma_irq_res;
+ void *dma_irq_tag;
unsigned int saved_ier;
- unsigned int tx_timer_active;
- struct timer_list tx_timer;
-
struct xgbe_ring *tx_ring;
struct xgbe_ring *rx_ring;
-} ____cacheline_aligned;
+} __aligned(CACHE_LINE_SIZE);
enum xgbe_state {
XGBE_DOWN,
@@ -664,24 +613,8 @@
void (*tx_mmc_int)(struct xgbe_prv_data *);
void (*read_mmc_stats)(struct xgbe_prv_data *);
- /* For Timestamp config */
- int (*config_tstamp)(struct xgbe_prv_data *, unsigned int);
- void (*update_tstamp_addend)(struct xgbe_prv_data *, unsigned int);
- void (*set_tstamp_time)(struct xgbe_prv_data *, unsigned int sec,
- unsigned int nsec);
- u64 (*get_tstamp_time)(struct xgbe_prv_data *);
- u64 (*get_tx_tstamp)(struct xgbe_prv_data *);
-
- /* For Data Center Bridging config */
- void (*config_tc)(struct xgbe_prv_data *);
- void (*config_dcb_tc)(struct xgbe_prv_data *);
- void (*config_dcb_pfc)(struct xgbe_prv_data *);
-
/* For Receive Side Scaling */
- int (*enable_rss)(struct xgbe_prv_data *);
int (*disable_rss)(struct xgbe_prv_data *);
- int (*set_rss_hash_key)(struct xgbe_prv_data *, const u8 *);
- int (*set_rss_lookup_table)(struct xgbe_prv_data *, const u32 *);
};
struct xgbe_phy_if {
@@ -701,7 +634,7 @@
struct xgbe_desc_if {
int (*alloc_ring_resources)(struct xgbe_prv_data *);
void (*free_ring_resources)(struct xgbe_prv_data *);
- int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
+ int (*map_tx_skb)(struct xgbe_channel *, struct mbuf *);
int (*map_rx_buffer)(struct xgbe_prv_data *, struct xgbe_ring *,
struct xgbe_ring_data *);
void (*unmap_rdata)(struct xgbe_prv_data *, struct xgbe_ring_data *);
@@ -756,34 +689,33 @@
};
struct xgbe_prv_data {
- struct net_device *netdev;
+ struct ifnet *netdev;
struct platform_device *pdev;
struct acpi_device *adev;
- struct device *dev;
+ device_t dev;
/* ACPI or DT flag */
unsigned int use_acpi;
/* XGMAC/XPCS related mmio registers */
- void __iomem *xgmac_regs; /* XGMAC CSRs */
- void __iomem *xpcs_regs; /* XPCS MMD registers */
- void __iomem *rxtx_regs; /* SerDes Rx/Tx CSRs */
- void __iomem *sir0_regs; /* SerDes integration registers (1/2) */
- void __iomem *sir1_regs; /* SerDes integration registers (2/2) */
+ struct resource *xgmac_res; /* XGMAC CSRs */
+ struct resource *xpcs_res; /* XPCS MMD registers */
+ struct resource *rxtx_res; /* SerDes Rx/Tx CSRs */
+ struct resource *sir0_res; /* SerDes integration registers (1/2) */
+ struct resource *sir1_res; /* SerDes integration registers (2/2) */
- /* Overall device lock */
- spinlock_t lock;
+ /* DMA tag */
+ bus_dma_tag_t dmat;
/* XPCS indirect addressing lock */
spinlock_t xpcs_lock;
- /* RSS addressing mutex */
- struct mutex rss_mutex;
-
/* Flags representing xgbe_state */
unsigned long dev_state;
- int dev_irq;
+ struct resource *dev_irq_res;
+ struct resource *chan_irq_res[4];
+ void *dev_irq_tag;
unsigned int per_channel_irq;
struct xgbe_hw_if hw_if;
@@ -797,9 +729,9 @@
unsigned int awcache;
/* Service routine support */
- struct workqueue_struct *dev_workqueue;
- struct work_struct service_work;
- struct timer_list service_timer;
+ struct taskqueue *dev_workqueue;
+ struct task service_work;
+ struct callout service_timer;
/* Rings for Tx/Rx on a DMA channel */
struct xgbe_channel *channel;
@@ -850,35 +782,16 @@
/* Netdev related settings */
unsigned char mac_addr[ETH_ALEN];
- netdev_features_t netdev_features;
- struct napi_struct napi;
struct xgbe_mmc_stats mmc_stats;
struct xgbe_ext_stats ext_stats;
- /* Filtering support */
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
-
/* Device clocks */
struct clk *sysclk;
unsigned long sysclk_rate;
struct clk *ptpclk;
unsigned long ptpclk_rate;
- /* Timestamp support */
- spinlock_t tstamp_lock;
- struct ptp_clock_info ptp_clock_info;
- struct ptp_clock *ptp_clock;
- struct hwtstamp_config tstamp_config;
- struct cyclecounter tstamp_cc;
- struct timecounter tstamp_tc;
- unsigned int tstamp_addend;
- struct work_struct tx_tstamp_work;
- struct sk_buff *tx_tstamp_skb;
- u64 tx_tstamp;
-
/* DCB support */
- struct ieee_ets *ets;
- struct ieee_pfc *pfc;
unsigned int q2tc_map[XGBE_MAX_QUEUES];
unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS];
u8 num_tcs;
@@ -887,7 +800,7 @@
struct xgbe_hw_features hw_feat;
/* Device restart work structure */
- struct work_struct restart_work;
+ struct task restart_work;
/* Keeps track of power mode */
unsigned int power_down;
@@ -896,7 +809,6 @@
u32 msg_enable;
/* Current PHY settings */
- phy_interface_t phy_mode;
int phy_link;
int phy_speed;
@@ -906,10 +818,9 @@
unsigned long link_check;
char an_name[IFNAMSIZ + 32];
- struct workqueue_struct *an_workqueue;
- int an_irq;
- struct work_struct an_irq_work;
+ struct resource *an_irq_res;
+ void *an_irq_tag;
unsigned int speed_set;
@@ -928,61 +839,32 @@
/* Auto-negotiation state machine support */
unsigned int an_int;
- struct mutex an_mutex;
+ struct sx an_mutex;
enum xgbe_an an_result;
enum xgbe_an an_state;
enum xgbe_rx kr_state;
enum xgbe_rx kx_state;
- struct work_struct an_work;
unsigned int an_supported;
unsigned int parallel_detect;
unsigned int fec_ability;
unsigned long an_start;
unsigned int lpm_ctrl; /* CTRL1 for resume */
-
-#ifdef CONFIG_DEBUG_FS
- struct dentry *xgbe_debugfs;
-
- unsigned int debugfs_xgmac_reg;
-
- unsigned int debugfs_xpcs_mmd;
- unsigned int debugfs_xpcs_reg;
-#endif
};
/* Function prototypes*/
+int xgbe_open(struct ifnet *);
+int xgbe_close(struct ifnet *);
+int xgbe_xmit(struct ifnet *, struct mbuf *);
+int xgbe_change_mtu(struct ifnet *, int);
void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
void xgbe_init_function_ptrs_phy(struct xgbe_phy_if *);
void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
-struct net_device_ops *xgbe_get_netdev_ops(void);
-struct ethtool_ops *xgbe_get_ethtool_ops(void);
-#ifdef CONFIG_AMD_XGBE_DCB
-const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void);
-#endif
-
-void xgbe_ptp_register(struct xgbe_prv_data *);
-void xgbe_ptp_unregister(struct xgbe_prv_data *);
-void xgbe_dump_tx_desc(struct xgbe_prv_data *, struct xgbe_ring *,
- unsigned int, unsigned int, unsigned int);
-void xgbe_dump_rx_desc(struct xgbe_prv_data *, struct xgbe_ring *,
- unsigned int);
-void xgbe_print_pkt(struct net_device *, struct sk_buff *, bool);
void xgbe_get_all_hw_features(struct xgbe_prv_data *);
-int xgbe_powerup(struct net_device *, unsigned int);
-int xgbe_powerdown(struct net_device *, unsigned int);
void xgbe_init_rx_coalesce(struct xgbe_prv_data *);
void xgbe_init_tx_coalesce(struct xgbe_prv_data *);
-#ifdef CONFIG_DEBUG_FS
-void xgbe_debugfs_init(struct xgbe_prv_data *);
-void xgbe_debugfs_exit(struct xgbe_prv_data *);
-#else
-static inline void xgbe_debugfs_init(struct xgbe_prv_data *pdata) {}
-static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
-#endif /* CONFIG_DEBUG_FS */
-
/* NOTE: Uncomment for function trace log messages in KERNEL LOG */
#if 0
#define YDEBUG
@@ -991,13 +873,13 @@
/* For debug prints */
#ifdef YDEBUG
-#define DBGPR(x...) pr_alert(x)
+#define DBGPR(x...) printf(x)
#else
#define DBGPR(x...) do { } while (0)
#endif
#ifdef YDEBUG_MDIO
-#define DBGPR_MDIO(x...) pr_alert(x)
+#define DBGPR_MDIO(x...) printf(x)
#else
#define DBGPR_MDIO(x...) do { } while (0)
#endif
Index: sys/dev/axgbe/xgbe_osdep.h
===================================================================
--- /dev/null
+++ sys/dev/axgbe/xgbe_osdep.h
@@ -0,0 +1,186 @@
+/*-
+ * Copyright (c) 2016,2017 SoftIron Inc.
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * the sponsorship of SoftIron Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _XGBE_OSDEP_H_
+#define _XGBE_OSDEP_H_
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/endian.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mbuf.h>
+#include <sys/mutex.h>
+#include <sys/socket.h>
+#include <sys/sx.h>
+#include <sys/taskqueue.h>
+
+#include <machine/bus.h>
+
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <net/if_var.h>
+
+typedef uint8_t u8;
+typedef uint16_t u16;
+typedef uint32_t __le32;
+typedef uint32_t u32;
+typedef uint64_t u64;
+
+typedef struct {
+ struct mtx lock;
+} spinlock_t;
+
+static inline void
+spin_lock_init(spinlock_t *spinlock)
+{
+
+ mtx_init(&spinlock->lock, "axgbe_spin", NULL, MTX_DEF);
+}
+
+#define spin_lock_irqsave(spinlock, flags) \
+do { \
+ (flags) = intr_disable(); \
+ mtx_lock(&(spinlock)->lock); \
+} while (0)
+
+#define spin_unlock_irqrestore(spinlock, flags) \
+do { \
+ mtx_unlock(&(spinlock)->lock); \
+ intr_restore(flags); \
+} while (0)
+
+#define BIT(pos) (1ul << pos)
+
+static inline void
+clear_bit(int pos, unsigned long *p)
+{
+
+ atomic_clear_long(p, 1ul << pos);
+}
+
+static inline int
+test_bit(int pos, unsigned long *p)
+{
+ unsigned long val;
+
+ val = *p;
+ return ((val & 1ul << pos) != 0);
+}
+
+static inline void
+set_bit(int pos, unsigned long *p)
+{
+
+ atomic_set_long(p, 1ul << pos);
+}
+
+#define lower_32_bits(x) ((x) & 0xffffffffu)
+#define upper_32_bits(x) (((x) >> 32) & 0xffffffffu)
+#define cpu_to_le32(x) le32toh(x)
+#define le32_to_cpu(x) htole32(x)
+
+MALLOC_DECLARE(M_AXGBE);
+
+#define ADVERTISED_Pause 0x01
+#define ADVERTISED_Asym_Pause 0x02
+#define ADVERTISED_Autoneg 0x04
+#define ADVERTISED_Backplane 0x08
+#define ADVERTISED_10000baseKR_Full 0x10
+#define ADVERTISED_2500baseX_Full 0x20
+#define ADVERTISED_1000baseKX_Full 0x40
+
+#define AUTONEG_DISABLE 0
+#define AUTONEG_ENABLE 1
+
+#define DUPLEX_UNKNOWN 1
+#define DUPLEX_FULL 2
+
+#define SPEED_UNKNOWN 1
+#define SPEED_10000 2
+#define SPEED_2500 3
+#define SPEED_1000 4
+
+#define SUPPORTED_Autoneg 0x01
+#define SUPPORTED_Pause 0x02
+#define SUPPORTED_Asym_Pause 0x04
+#define SUPPORTED_Backplane 0x08
+#define SUPPORTED_10000baseKR_Full 0x10
+#define SUPPORTED_1000baseKX_Full 0x20
+#define SUPPORTED_2500baseX_Full 0x40
+#define SUPPORTED_10000baseR_FEC 0x80
+
+#define BMCR_SPEED100 0x2000
+
+#define MDIO_MMD_PMAPMD 1
+#define MDIO_MMD_PCS 3
+#define MDIO_MMD_AN 7
+#define MDIO_PMA_10GBR_FECABLE 170
+#define MDIO_PMA_10GBR_FECABLE_ABLE 0x0001
+#define MDIO_PMA_10GBR_FECABLE_ERRABLE 0x0002
+#define MII_ADDR_C45 (1<<30)
+
+#define MDIO_CTRL1 0x00 /* MII_BMCR */
+#define MDIO_CTRL1_RESET 0x8000 /* BMCR_RESET */
+#define MDIO_CTRL1_SPEEDSELEXT 0x2040 /* BMCR_SPEED1000|BMCR_SPEED100*/
+#define MDIO_CTRL1_SPEEDSEL (MDIO_CTRL1_SPEEDSELEXT | 0x3c)
+#define MDIO_AN_CTRL1_ENABLE 0x1000 /* BMCR_AUTOEN */
+#define MDIO_CTRL1_LPOWER 0x0800 /* BMCR_PDOWN */
+#define MDIO_AN_CTRL1_RESTART 0x0200 /* BMCR_STARTNEG */
+
+#define MDIO_CTRL1_SPEED10G (MDIO_CTRL1_SPEEDSELEXT | 0x00)
+
+#define MDIO_STAT1 1 /* MII_BMSR */
+#define MDIO_STAT1_LSTATUS 0x0004 /* BMSR_LINK */
+
+#define MDIO_CTRL2 0x07
+#define MDIO_PCS_CTRL2_10GBR 0x0000
+#define MDIO_PCS_CTRL2_10GBX 0x0001
+#define MDIO_PCS_CTRL2_TYPE 0x0003
+
+#define MDIO_AN_ADVERTISE 16
+
+#define MDIO_AN_LPA 19
+
+#define ETH_ALEN ETHER_ADDR_LEN
+#define ETH_HLEN ETHER_HDR_LEN
+#define ETH_FCS_LEN 4
+#define VLAN_HLEN ETHER_VLAN_ENCAP_LEN
+
+#define ARRAY_SIZE(x) nitems(x)
+
+#define BITS_PER_LONG (sizeof(long) * CHAR_BIT)
+#define BITS_TO_LONGS(n) howmany((n), BITS_PER_LONG)
+
+#define NSEC_PER_SEC 1000000000ul
+
+#define min_t(t, a, b) MIN((t)(a), (t)(b))
+#define max_t(t, a, b) MAX((t)(a), (t)(b))
+
+#endif /* _XGBE_OSDEP_H_ */

File Metadata

Mime Type
text/plain
Expires
Sat, May 16, 10:20 AM (5 h, 33 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
33126452
Default Alt Text
D8549.id25145.diff (239 KB)

Event Timeline