Index: head/sys/arm/xscale/ixp425/if_npe.c =================================================================== --- head/sys/arm/xscale/ixp425/if_npe.c (revision 193095) +++ head/sys/arm/xscale/ixp425/if_npe.c (revision 193096) @@ -1,1753 +1,1758 @@ /*- * Copyright (c) 2006-2008 Sam Leffler. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Intel XScale NPE Ethernet driver. * * This driver handles the two ports present on the IXP425. * Packet processing is done by the Network Processing Engines * (NPE's) that work together with a MAC and PHY. The MAC * is also mapped to the XScale cpu; the PHY is accessed via * the MAC. NPE-XScale communication happens through h/w * queues managed by the Q Manager block. * * The code here replaces the ethAcc, ethMii, and ethDB classes * in the Intel Access Library (IAL) and the OS-specific driver. * * XXX add vlan support */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #endif #include #include #include #include #include #include #include #include #include #include #include "miibus_if.h" /* * XXX: For the main bus dma tag. Can go away if the new method to get the * dma tag from the parent got MFC'd into RELENG_6. */ extern struct ixp425_softc *ixp425_softc; struct npebuf { struct npebuf *ix_next; /* chain to next buffer */ void *ix_m; /* backpointer to mbuf */ bus_dmamap_t ix_map; /* bus dma map for associated data */ struct npehwbuf *ix_hw; /* associated h/w block */ uint32_t ix_neaddr; /* phys address of ix_hw */ }; struct npedma { const char* name; int nbuf; /* # npebuf's allocated */ bus_dma_tag_t mtag; /* bus dma tag for mbuf data */ struct npehwbuf *hwbuf; /* NPE h/w buffers */ bus_dma_tag_t buf_tag; /* tag+map for NPE buffers */ bus_dmamap_t buf_map; bus_addr_t buf_phys; /* phys addr of buffers */ struct npebuf *buf; /* s/w buffers (1-1 w/ h/w) */ }; struct npe_softc { /* XXX mii requires this be first; do not move! */ struct ifnet *sc_ifp; /* ifnet pointer */ struct mtx sc_mtx; /* basically a perimeter lock */ device_t sc_dev; bus_space_tag_t sc_iot; bus_space_handle_t sc_ioh; /* MAC register window */ device_t sc_mii; /* child miibus */ bus_space_handle_t sc_miih; /* MII register window */ int sc_npeid; struct ixpnpe_softc *sc_npe; /* NPE support */ int sc_debug; /* DPRINTF* control */ int sc_tickinterval; struct callout tick_ch; /* Tick callout */ int npe_watchdog_timer; struct npedma txdma; struct npebuf *tx_free; /* list of free tx buffers */ struct npedma rxdma; bus_addr_t buf_phys; /* XXX for returning a value */ int rx_qid; /* rx qid */ int rx_freeqid; /* rx free buffers qid */ int tx_qid; /* tx qid */ int tx_doneqid; /* tx completed qid */ int sc_phy; /* PHY id */ struct ifmib_iso_8802_3 mibdata; bus_dma_tag_t sc_stats_tag; /* bus dma tag for stats block */ struct npestats *sc_stats; bus_dmamap_t sc_stats_map; bus_addr_t sc_stats_phys; /* phys addr of sc_stats */ struct npestats sc_totals; /* accumulated sc_stats */ }; /* * Static configuration for IXP425. The tx and * rx free Q id's are fixed by the NPE microcode. The * rx Q id's are programmed to be separate to simplify * multi-port processing. It may be better to handle * all traffic through one Q (as done by the Intel drivers). * * Note that the PHY's are accessible only from MAC A * on the IXP425. This and other platform-specific * assumptions probably need to be handled through hints. */ static const struct { uint32_t macbase; uint32_t miibase; int phy; /* phy id */ uint8_t rx_qid; uint8_t rx_freeqid; uint8_t tx_qid; uint8_t tx_doneqid; } npeconfig[NPE_MAX] = { [NPE_A] = { .macbase = IXP435_MAC_A_HWBASE, .miibase = IXP425_MAC_C_HWBASE, .phy = 2, .rx_qid = 4, .rx_freeqid = 26, .tx_qid = 23, .tx_doneqid = 31 }, [NPE_B] = { .macbase = IXP425_MAC_B_HWBASE, .miibase = IXP425_MAC_C_HWBASE, .phy = 0, .rx_qid = 4, .rx_freeqid = 27, .tx_qid = 24, .tx_doneqid = 31 }, [NPE_C] = { .macbase = IXP425_MAC_C_HWBASE, .miibase = IXP425_MAC_C_HWBASE, .phy = 1, .rx_qid = 12, .rx_freeqid = 28, .tx_qid = 25, .tx_doneqid = 31 }, }; static struct npe_softc *npes[NPE_MAX]; /* NB: indexed by npeid */ static __inline uint32_t RD4(struct npe_softc *sc, bus_size_t off) { return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off); } static __inline void WR4(struct npe_softc *sc, bus_size_t off, uint32_t val) { bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val); } #define NPE_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define NPE_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define NPE_LOCK_INIT(_sc) \ mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \ MTX_NETWORK_LOCK, MTX_DEF) #define NPE_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); #define NPE_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); #define NPE_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); static devclass_t npe_devclass; static int override_npeid(device_t, const char *resname, int *val); static int npe_activate(device_t dev); static void npe_deactivate(device_t dev); static int npe_ifmedia_update(struct ifnet *ifp); static void npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr); static void npe_setmac(struct npe_softc *sc, u_char *eaddr); static void npe_getmac(struct npe_softc *sc, u_char *eaddr); static void npe_txdone(int qid, void *arg); static int npe_rxbuf_init(struct npe_softc *, struct npebuf *, struct mbuf *); -static void npe_rxdone(int qid, void *arg); +static int npe_rxdone(int qid, void *arg); static void npeinit(void *); static void npestart_locked(struct ifnet *); static void npestart(struct ifnet *); static void npestop(struct npe_softc *); static void npewatchdog(struct npe_softc *); static int npeioctl(struct ifnet * ifp, u_long, caddr_t); static int npe_setrxqosentry(struct npe_softc *, int classix, int trafclass, int qid); static int npe_setfirewallmode(struct npe_softc *, int onoff); static int npe_updatestats(struct npe_softc *); #if 0 static int npe_getstats(struct npe_softc *); static uint32_t npe_getimageid(struct npe_softc *); static int npe_setloopback(struct npe_softc *, int ena); #endif /* NB: all tx done processing goes through one queue */ static int tx_doneqid = -1; SYSCTL_NODE(_hw, OID_AUTO, npe, CTLFLAG_RD, 0, "IXP4XX NPE driver parameters"); static int npe_debug = 0; SYSCTL_INT(_hw_npe, OID_AUTO, debug, CTLFLAG_RW, &npe_debug, 0, "IXP4XX NPE network interface debug msgs"); TUNABLE_INT("hw.npe.debug", &npe_debug); #define DPRINTF(sc, fmt, ...) do { \ if (sc->sc_debug) device_printf(sc->sc_dev, fmt, __VA_ARGS__); \ } while (0) #define DPRINTFn(n, sc, fmt, ...) do { \ if (sc->sc_debug >= n) device_printf(sc->sc_dev, fmt, __VA_ARGS__);\ } while (0) static int npe_tickinterval = 3; /* npe_tick frequency (secs) */ SYSCTL_INT(_hw_npe, OID_AUTO, tickinterval, CTLFLAG_RD, &npe_tickinterval, 0, "periodic work interval (secs)"); TUNABLE_INT("hw.npe.tickinterval", &npe_tickinterval); static int npe_rxbuf = 64; /* # rx buffers to allocate */ SYSCTL_INT(_hw_npe, OID_AUTO, rxbuf, CTLFLAG_RD, &npe_rxbuf, 0, "rx buffers allocated"); TUNABLE_INT("hw.npe.rxbuf", &npe_rxbuf); static int npe_txbuf = 128; /* # tx buffers to allocate */ SYSCTL_INT(_hw_npe, OID_AUTO, txbuf, CTLFLAG_RD, &npe_txbuf, 0, "tx buffers allocated"); TUNABLE_INT("hw.npe.txbuf", &npe_txbuf); static int unit2npeid(int unit) { static const int npeidmap[2][3] = { /* on 425 A is for HSS, B & C are for Ethernet */ { NPE_B, NPE_C, -1 }, /* IXP425 */ /* 435 only has A & C, order C then A */ { NPE_C, NPE_A, -1 }, /* IXP435 */ }; /* XXX check feature register instead */ return (unit < 3 ? npeidmap[ (cpu_id() & CPU_ID_CPU_MASK) == CPU_ID_IXP435][unit] : -1); } static int npe_probe(device_t dev) { static const char *desc[NPE_MAX] = { [NPE_A] = "IXP NPE-A", [NPE_B] = "IXP NPE-B", [NPE_C] = "IXP NPE-C" }; int unit = device_get_unit(dev); int npeid; if (unit > 2 || (ixp4xx_read_feature_bits() & (unit == 0 ? EXP_FCTRL_ETH0 : EXP_FCTRL_ETH1)) == 0) return EINVAL; npeid = -1; if (!override_npeid(dev, "npeid", &npeid)) npeid = unit2npeid(unit); if (npeid == -1) { device_printf(dev, "unit %d not supported\n", unit); return EINVAL; } device_set_desc(dev, desc[npeid]); return 0; } static int npe_attach(device_t dev) { struct npe_softc *sc = device_get_softc(dev); struct ixp425_softc *sa = device_get_softc(device_get_parent(dev)); struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct ifnet *ifp; int error; u_char eaddr[6]; sc->sc_dev = dev; sc->sc_iot = sa->sc_iot; NPE_LOCK_INIT(sc); callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0); sc->sc_debug = npe_debug; sc->sc_tickinterval = npe_tickinterval; ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "cannot allocate ifnet\n"); error = EIO; /* XXX */ goto out; } /* NB: must be setup prior to invoking mii code */ sc->sc_ifp = ifp; error = npe_activate(dev); if (error) { device_printf(dev, "cannot activate npe\n"); goto out; } npe_getmac(sc, eaddr); ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = npestart; ifp->if_ioctl = npeioctl; ifp->if_init = npeinit; IFQ_SET_MAXLEN(&ifp->if_snd, sc->txdma.nbuf - 1); ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); ifp->if_linkmib = &sc->mibdata; ifp->if_linkmiblen = sizeof(sc->mibdata); sc->mibdata.dot3Compliance = DOT3COMPLIANCE_STATS; /* device supports oversided vlan frames */ ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tickinterval", CTLFLAG_RW, &sc->sc_tickinterval, 0, "periodic work frequency"); SYSCTL_ADD_STRUCT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "stats", CTLFLAG_RD, &sc->sc_totals, npestats, "onboard stats"); ether_ifattach(ifp, eaddr); return 0; out: if (ifp != NULL) if_free(ifp); NPE_LOCK_DESTROY(sc); npe_deactivate(dev); return error; } static int npe_detach(device_t dev) { struct npe_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->sc_ifp; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif npestop(sc); if (ifp != NULL) { ether_ifdetach(ifp); if_free(ifp); } NPE_LOCK_DESTROY(sc); npe_deactivate(dev); return 0; } /* * Compute and install the multicast filter. */ static void npe_setmcast(struct npe_softc *sc) { struct ifnet *ifp = sc->sc_ifp; uint8_t mask[ETHER_ADDR_LEN], addr[ETHER_ADDR_LEN]; int i; if (ifp->if_flags & IFF_PROMISC) { memset(mask, 0, ETHER_ADDR_LEN); memset(addr, 0, ETHER_ADDR_LEN); } else if (ifp->if_flags & IFF_ALLMULTI) { static const uint8_t allmulti[ETHER_ADDR_LEN] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; memcpy(mask, allmulti, ETHER_ADDR_LEN); memcpy(addr, allmulti, ETHER_ADDR_LEN); } else { uint8_t clr[ETHER_ADDR_LEN], set[ETHER_ADDR_LEN]; struct ifmultiaddr *ifma; const uint8_t *mac; memset(clr, 0, ETHER_ADDR_LEN); memset(set, 0xff, ETHER_ADDR_LEN); IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; mac = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); for (i = 0; i < ETHER_ADDR_LEN; i++) { clr[i] |= mac[i]; set[i] &= mac[i]; } } IF_ADDR_UNLOCK(ifp); for (i = 0; i < ETHER_ADDR_LEN; i++) { mask[i] = set[i] | ~clr[i]; addr[i] = set[i]; } } /* * Write the mask and address registers. */ for (i = 0; i < ETHER_ADDR_LEN; i++) { WR4(sc, NPE_MAC_ADDR_MASK(i), mask[i]); WR4(sc, NPE_MAC_ADDR(i), addr[i]); } } static void npe_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct npe_softc *sc; if (error != 0) return; sc = (struct npe_softc *)arg; sc->buf_phys = segs[0].ds_addr; } static int npe_dma_setup(struct npe_softc *sc, struct npedma *dma, const char *name, int nbuf, int maxseg) { int error, i; memset(dma, 0, sizeof(*dma)); dma->name = name; dma->nbuf = nbuf; /* DMA tag for mapped mbufs */ error = bus_dma_tag_create(ixp425_softc->sc_dmat, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, maxseg, MCLBYTES, 0, busdma_lock_mutex, &sc->sc_mtx, &dma->mtag); if (error != 0) { device_printf(sc->sc_dev, "unable to create %s mbuf dma tag, " "error %u\n", dma->name, error); return error; } /* DMA tag and map for the NPE buffers */ error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, nbuf * sizeof(struct npehwbuf), 1, nbuf * sizeof(struct npehwbuf), 0, busdma_lock_mutex, &sc->sc_mtx, &dma->buf_tag); if (error != 0) { device_printf(sc->sc_dev, "unable to create %s npebuf dma tag, error %u\n", dma->name, error); return error; } /* XXX COHERENT for now */ if (bus_dmamem_alloc(dma->buf_tag, (void **)&dma->hwbuf, BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->buf_map) != 0) { device_printf(sc->sc_dev, "unable to allocate memory for %s h/w buffers, error %u\n", dma->name, error); return error; } /* XXX M_TEMP */ dma->buf = malloc(nbuf * sizeof(struct npebuf), M_TEMP, M_NOWAIT | M_ZERO); if (dma->buf == NULL) { device_printf(sc->sc_dev, "unable to allocate memory for %s s/w buffers\n", dma->name); return error; } if (bus_dmamap_load(dma->buf_tag, dma->buf_map, dma->hwbuf, nbuf*sizeof(struct npehwbuf), npe_getaddr, sc, 0) != 0) { device_printf(sc->sc_dev, "unable to map memory for %s h/w buffers, error %u\n", dma->name, error); return error; } dma->buf_phys = sc->buf_phys; for (i = 0; i < dma->nbuf; i++) { struct npebuf *npe = &dma->buf[i]; struct npehwbuf *hw = &dma->hwbuf[i]; /* calculate offset to shared area */ npe->ix_neaddr = dma->buf_phys + ((uintptr_t)hw - (uintptr_t)dma->hwbuf); KASSERT((npe->ix_neaddr & 0x1f) == 0, ("ixpbuf misaligned, PA 0x%x", npe->ix_neaddr)); error = bus_dmamap_create(dma->mtag, BUS_DMA_NOWAIT, &npe->ix_map); if (error != 0) { device_printf(sc->sc_dev, "unable to create dmamap for %s buffer %u, " "error %u\n", dma->name, i, error); return error; } npe->ix_hw = hw; } bus_dmamap_sync(dma->buf_tag, dma->buf_map, BUS_DMASYNC_PREWRITE); return 0; } static void npe_dma_destroy(struct npe_softc *sc, struct npedma *dma) { int i; if (dma->hwbuf != NULL) { for (i = 0; i < dma->nbuf; i++) { struct npebuf *npe = &dma->buf[i]; bus_dmamap_destroy(dma->mtag, npe->ix_map); } bus_dmamap_unload(dma->buf_tag, dma->buf_map); bus_dmamem_free(dma->buf_tag, dma->hwbuf, dma->buf_map); } if (dma->buf != NULL) free(dma->buf, M_TEMP); if (dma->buf_tag) bus_dma_tag_destroy(dma->buf_tag); if (dma->mtag) bus_dma_tag_destroy(dma->mtag); memset(dma, 0, sizeof(*dma)); } static int override_addr(device_t dev, const char *resname, int *base) { int unit = device_get_unit(dev); const char *resval; /* XXX warn for wrong hint type */ if (resource_string_value("npe", unit, resname, &resval) != 0) return 0; switch (resval[0]) { case 'A': *base = IXP435_MAC_A_HWBASE; break; case 'B': *base = IXP425_MAC_B_HWBASE; break; case 'C': *base = IXP425_MAC_C_HWBASE; break; default: device_printf(dev, "Warning, bad value %s for " "npe.%d.%s ignored\n", resval, unit, resname); return 0; } if (bootverbose) device_printf(dev, "using npe.%d.%s=%s override\n", unit, resname, resval); return 1; } static int override_npeid(device_t dev, const char *resname, int *npeid) { int unit = device_get_unit(dev); const char *resval; /* XXX warn for wrong hint type */ if (resource_string_value("npe", unit, resname, &resval) != 0) return 0; switch (resval[0]) { case 'A': *npeid = NPE_A; break; case 'B': *npeid = NPE_B; break; case 'C': *npeid = NPE_C; break; default: device_printf(dev, "Warning, bad value %s for " "npe.%d.%s ignored\n", resval, unit, resname); return 0; } if (bootverbose) device_printf(dev, "using npe.%d.%s=%s override\n", unit, resname, resval); return 1; } static int override_unit(device_t dev, const char *resname, int *val, int min, int max) { int unit = device_get_unit(dev); int resval; if (resource_int_value("npe", unit, resname, &resval) != 0) return 0; if (!(min <= resval && resval <= max)) { device_printf(dev, "Warning, bad value %d for npe.%d.%s " "ignored (value must be [%d-%d])\n", resval, unit, resname, min, max); return 0; } if (bootverbose) device_printf(dev, "using npe.%d.%s=%d override\n", unit, resname, resval); *val = resval; return 1; } static void npe_mac_reset(struct npe_softc *sc) { /* * Reset MAC core. */ WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET); DELAY(NPE_MAC_RESET_DELAY); /* configure MAC to generate MDC clock */ WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN); } static int npe_activate(device_t dev) { struct npe_softc * sc = device_get_softc(dev); int error, i, macbase, miibase; /* * Setup NEP ID, MAC, and MII bindings. We allow override * via hints to handle unexpected board configs. */ if (!override_npeid(dev, "npeid", &sc->sc_npeid)) sc->sc_npeid = unit2npeid(device_get_unit(dev)); sc->sc_npe = ixpnpe_attach(dev, sc->sc_npeid); if (sc->sc_npe == NULL) { device_printf(dev, "cannot attach ixpnpe\n"); return EIO; /* XXX */ } /* MAC */ if (!override_addr(dev, "mac", &macbase)) macbase = npeconfig[sc->sc_npeid].macbase; device_printf(sc->sc_dev, "MAC at 0x%x\n", macbase); if (bus_space_map(sc->sc_iot, macbase, IXP425_REG_SIZE, 0, &sc->sc_ioh)) { device_printf(dev, "cannot map mac registers 0x%x:0x%x\n", macbase, IXP425_REG_SIZE); return ENOMEM; } /* PHY */ if (!override_unit(dev, "phy", &sc->sc_phy, 0, MII_NPHY-1)) sc->sc_phy = npeconfig[sc->sc_npeid].phy; if (!override_addr(dev, "mii", &miibase)) miibase = npeconfig[sc->sc_npeid].miibase; device_printf(sc->sc_dev, "MII at 0x%x\n", miibase); if (miibase != macbase) { /* * PHY is mapped through a different MAC, setup an * additional mapping for frobbing the PHY registers. */ if (bus_space_map(sc->sc_iot, miibase, IXP425_REG_SIZE, 0, &sc->sc_miih)) { device_printf(dev, "cannot map MII registers 0x%x:0x%x\n", miibase, IXP425_REG_SIZE); return ENOMEM; } } else sc->sc_miih = sc->sc_ioh; /* * Load NPE firmware and start it running. */ error = ixpnpe_init(sc->sc_npe); if (error != 0) { device_printf(dev, "cannot init NPE (error %d)\n", error); return error; } /* probe for PHY */ if (mii_phy_probe(dev, &sc->sc_mii, npe_ifmedia_update, npe_ifmedia_status)) { device_printf(dev, "cannot find PHY %d.\n", sc->sc_phy); return ENXIO; } error = npe_dma_setup(sc, &sc->txdma, "tx", npe_txbuf, NPE_MAXSEG); if (error != 0) return error; error = npe_dma_setup(sc, &sc->rxdma, "rx", npe_rxbuf, 1); if (error != 0) return error; /* setup statistics block */ error = bus_dma_tag_create(ixp425_softc->sc_dmat, sizeof(uint32_t), 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct npestats), 1, sizeof(struct npestats), 0, busdma_lock_mutex, &sc->sc_mtx, &sc->sc_stats_tag); if (error != 0) { device_printf(sc->sc_dev, "unable to create stats tag, " "error %u\n", error); return error; } if (bus_dmamem_alloc(sc->sc_stats_tag, (void **)&sc->sc_stats, BUS_DMA_NOWAIT, &sc->sc_stats_map) != 0) { device_printf(sc->sc_dev, "unable to allocate memory for stats block, error %u\n", error); return error; } if (bus_dmamap_load(sc->sc_stats_tag, sc->sc_stats_map, sc->sc_stats, sizeof(struct npestats), npe_getaddr, sc, 0) != 0) { device_printf(sc->sc_dev, "unable to load memory for stats block, error %u\n", error); return error; } sc->sc_stats_phys = sc->buf_phys; /* * Setup h/w rx/tx queues. There are four q's: * rx inbound q of rx'd frames * rx_free pool of ixpbuf's for receiving frames * tx outbound q of frames to send * tx_done q of tx frames that have been processed * * The NPE handles the actual tx/rx process and the q manager * handles the queues. The driver just writes entries to the * q manager mailbox's and gets callbacks when there are rx'd * frames to process or tx'd frames to reap. These callbacks * are controlled by the q configurations; e.g. we get a * callback when tx_done has 2 or more frames to process and * when the rx q has at least one frame. These setings can * changed at the time the q is configured. */ sc->rx_qid = npeconfig[sc->sc_npeid].rx_qid; ixpqmgr_qconfig(sc->rx_qid, npe_rxbuf, 0, 1, - IX_QMGR_Q_SOURCE_ID_NOT_E, npe_rxdone, sc); + IX_QMGR_Q_SOURCE_ID_NOT_E, (qconfig_hand_t *)npe_rxdone, sc); sc->rx_freeqid = npeconfig[sc->sc_npeid].rx_freeqid; ixpqmgr_qconfig(sc->rx_freeqid, npe_rxbuf, 0, npe_rxbuf/2, 0, NULL, sc); /* * Setup the NPE to direct all traffic to rx_qid. * When QoS is enabled in the firmware there are * 8 traffic classes; otherwise just 4. */ for (i = 0; i < 8; i++) npe_setrxqosentry(sc, i, 0, sc->rx_qid); /* disable firewall mode just in case (should be off) */ npe_setfirewallmode(sc, 0); sc->tx_qid = npeconfig[sc->sc_npeid].tx_qid; sc->tx_doneqid = npeconfig[sc->sc_npeid].tx_doneqid; ixpqmgr_qconfig(sc->tx_qid, npe_txbuf, 0, npe_txbuf, 0, NULL, sc); if (tx_doneqid == -1) { ixpqmgr_qconfig(sc->tx_doneqid, npe_txbuf, 0, 2, IX_QMGR_Q_SOURCE_ID_NOT_E, npe_txdone, sc); tx_doneqid = sc->tx_doneqid; } KASSERT(npes[sc->sc_npeid] == NULL, ("npe %u already setup", sc->sc_npeid)); npes[sc->sc_npeid] = sc; return 0; } static void npe_deactivate(device_t dev) { struct npe_softc *sc = device_get_softc(dev); npes[sc->sc_npeid] = NULL; /* XXX disable q's */ if (sc->sc_npe != NULL) { ixpnpe_stop(sc->sc_npe); ixpnpe_detach(sc->sc_npe); } if (sc->sc_stats != NULL) { bus_dmamap_unload(sc->sc_stats_tag, sc->sc_stats_map); bus_dmamem_free(sc->sc_stats_tag, sc->sc_stats, sc->sc_stats_map); } if (sc->sc_stats_tag != NULL) bus_dma_tag_destroy(sc->sc_stats_tag); npe_dma_destroy(sc, &sc->txdma); npe_dma_destroy(sc, &sc->rxdma); bus_generic_detach(sc->sc_dev); if (sc->sc_mii != NULL) device_delete_child(sc->sc_dev, sc->sc_mii); } /* * Change media according to request. */ static int npe_ifmedia_update(struct ifnet *ifp) { struct npe_softc *sc = ifp->if_softc; struct mii_data *mii; mii = device_get_softc(sc->sc_mii); NPE_LOCK(sc); mii_mediachg(mii); /* XXX push state ourself? */ NPE_UNLOCK(sc); return (0); } /* * Notify the world which media we're using. */ static void npe_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct npe_softc *sc = ifp->if_softc; struct mii_data *mii; mii = device_get_softc(sc->sc_mii); NPE_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; NPE_UNLOCK(sc); } static void npe_addstats(struct npe_softc *sc) { #define NPEADD(x) sc->sc_totals.x += be32toh(ns->x) #define MIBADD(x) do { sc->mibdata.x += be32toh(ns->x); NPEADD(x); } while (0) struct ifnet *ifp = sc->sc_ifp; struct npestats *ns = sc->sc_stats; MIBADD(dot3StatsAlignmentErrors); MIBADD(dot3StatsFCSErrors); MIBADD(dot3StatsInternalMacReceiveErrors); NPEADD(RxOverrunDiscards); NPEADD(RxLearnedEntryDiscards); NPEADD(RxLargeFramesDiscards); NPEADD(RxSTPBlockedDiscards); NPEADD(RxVLANTypeFilterDiscards); NPEADD(RxVLANIdFilterDiscards); NPEADD(RxInvalidSourceDiscards); NPEADD(RxBlackListDiscards); NPEADD(RxWhiteListDiscards); NPEADD(RxUnderflowEntryDiscards); MIBADD(dot3StatsSingleCollisionFrames); MIBADD(dot3StatsMultipleCollisionFrames); MIBADD(dot3StatsDeferredTransmissions); MIBADD(dot3StatsLateCollisions); MIBADD(dot3StatsExcessiveCollisions); MIBADD(dot3StatsInternalMacTransmitErrors); MIBADD(dot3StatsCarrierSenseErrors); NPEADD(TxLargeFrameDiscards); NPEADD(TxVLANIdFilterDiscards); sc->mibdata.dot3StatsFrameTooLongs += be32toh(ns->RxLargeFramesDiscards) + be32toh(ns->TxLargeFrameDiscards); sc->mibdata.dot3StatsMissedFrames += be32toh(ns->RxOverrunDiscards) + be32toh(ns->RxUnderflowEntryDiscards); ifp->if_oerrors += be32toh(ns->dot3StatsInternalMacTransmitErrors) + be32toh(ns->dot3StatsCarrierSenseErrors) + be32toh(ns->TxVLANIdFilterDiscards) ; ifp->if_ierrors += be32toh(ns->dot3StatsFCSErrors) + be32toh(ns->dot3StatsInternalMacReceiveErrors) + be32toh(ns->RxOverrunDiscards) + be32toh(ns->RxUnderflowEntryDiscards) ; ifp->if_collisions += be32toh(ns->dot3StatsSingleCollisionFrames) + be32toh(ns->dot3StatsMultipleCollisionFrames) ; #undef NPEADD #undef MIBADD } static void npe_tick(void *xsc) { #define ACK (NPE_RESETSTATS << NPE_MAC_MSGID_SHL) struct npe_softc *sc = xsc; struct mii_data *mii = device_get_softc(sc->sc_mii); uint32_t msg[2]; NPE_ASSERT_LOCKED(sc); /* * NB: to avoid sleeping with the softc lock held we * split the NPE msg processing into two parts. The * request for statistics is sent w/o waiting for a * reply and then on the next tick we retrieve the * results. This works because npe_tick is the only * code that talks via the mailbox's (except at setup). * This likely can be handled better. */ if (ixpnpe_recvmsg_async(sc->sc_npe, msg) == 0 && msg[0] == ACK) { bus_dmamap_sync(sc->sc_stats_tag, sc->sc_stats_map, BUS_DMASYNC_POSTREAD); npe_addstats(sc); } npe_updatestats(sc); mii_tick(mii); npewatchdog(sc); /* schedule next poll */ callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc); #undef ACK } static void npe_setmac(struct npe_softc *sc, u_char *eaddr) { WR4(sc, NPE_MAC_UNI_ADDR_1, eaddr[0]); WR4(sc, NPE_MAC_UNI_ADDR_2, eaddr[1]); WR4(sc, NPE_MAC_UNI_ADDR_3, eaddr[2]); WR4(sc, NPE_MAC_UNI_ADDR_4, eaddr[3]); WR4(sc, NPE_MAC_UNI_ADDR_5, eaddr[4]); WR4(sc, NPE_MAC_UNI_ADDR_6, eaddr[5]); } static void npe_getmac(struct npe_softc *sc, u_char *eaddr) { /* NB: the unicast address appears to be loaded from EEPROM on reset */ eaddr[0] = RD4(sc, NPE_MAC_UNI_ADDR_1) & 0xff; eaddr[1] = RD4(sc, NPE_MAC_UNI_ADDR_2) & 0xff; eaddr[2] = RD4(sc, NPE_MAC_UNI_ADDR_3) & 0xff; eaddr[3] = RD4(sc, NPE_MAC_UNI_ADDR_4) & 0xff; eaddr[4] = RD4(sc, NPE_MAC_UNI_ADDR_5) & 0xff; eaddr[5] = RD4(sc, NPE_MAC_UNI_ADDR_6) & 0xff; } struct txdone { struct npebuf *head; struct npebuf **tail; int count; }; static __inline void npe_txdone_finish(struct npe_softc *sc, const struct txdone *td) { struct ifnet *ifp = sc->sc_ifp; NPE_LOCK(sc); *td->tail = sc->tx_free; sc->tx_free = td->head; /* * We're no longer busy, so clear the busy flag and call the * start routine to xmit more packets. */ ifp->if_opackets += td->count; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->npe_watchdog_timer = 0; npestart_locked(ifp); NPE_UNLOCK(sc); } /* * Q manager callback on tx done queue. Reap mbufs * and return tx buffers to the free list. Finally * restart output. Note the microcode has only one * txdone q wired into it so we must use the NPE ID * returned with each npehwbuf to decide where to * send buffers. */ static void npe_txdone(int qid, void *arg) { #define P2V(a, dma) \ &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)] struct npe_softc *sc0 = arg; struct npe_softc *sc; struct npebuf *npe; struct txdone *td, q[NPE_MAX]; uint32_t entry; /* XXX no NPE-A support */ q[NPE_B].tail = &q[NPE_B].head; q[NPE_B].count = 0; q[NPE_C].tail = &q[NPE_C].head; q[NPE_C].count = 0; /* XXX max # at a time? */ while (ixpqmgr_qread(qid, &entry) == 0) { DPRINTF(sc0, "%s: entry 0x%x NPE %u port %u\n", __func__, entry, NPE_QM_Q_NPE(entry), NPE_QM_Q_PORT(entry)); sc = npes[NPE_QM_Q_NPE(entry)]; npe = P2V(NPE_QM_Q_ADDR(entry), &sc->txdma); m_freem(npe->ix_m); npe->ix_m = NULL; td = &q[NPE_QM_Q_NPE(entry)]; *td->tail = npe; td->tail = &npe->ix_next; td->count++; } if (q[NPE_B].count) npe_txdone_finish(npes[NPE_B], &q[NPE_B]); if (q[NPE_C].count) npe_txdone_finish(npes[NPE_C], &q[NPE_C]); #undef P2V } static int npe_rxbuf_init(struct npe_softc *sc, struct npebuf *npe, struct mbuf *m) { bus_dma_segment_t segs[1]; struct npedma *dma = &sc->rxdma; struct npehwbuf *hw; int error, nseg; if (m == NULL) { m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return ENOBUFS; } KASSERT(m->m_ext.ext_size >= 1536 + ETHER_ALIGN, ("ext_size %d", m->m_ext.ext_size)); m->m_pkthdr.len = m->m_len = 1536; /* backload payload and align ip hdr */ m->m_data = m->m_ext.ext_buf + (m->m_ext.ext_size - (1536+ETHER_ALIGN)); error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map, m, segs, &nseg, 0); if (error != 0) { m_freem(m); return error; } hw = npe->ix_hw; hw->ix_ne[0].data = htobe32(segs[0].ds_addr); /* NB: NPE requires length be a multiple of 64 */ /* NB: buffer length is shifted in word */ hw->ix_ne[0].len = htobe32(segs[0].ds_len << 16); hw->ix_ne[0].next = 0; npe->ix_m = m; /* Flush the memory in the mbuf */ bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREREAD); return 0; } /* * RX q processing for a specific NPE. Claim entries * from the hardware queue and pass the frames up the * stack. Pass the rx buffers to the free list. */ -static void +static int npe_rxdone(int qid, void *arg) { #define P2V(a, dma) \ &(dma)->buf[((a) - (dma)->buf_phys) / sizeof(struct npehwbuf)] struct npe_softc *sc = arg; struct npedma *dma = &sc->rxdma; uint32_t entry; + int rx_npkts = 0; while (ixpqmgr_qread(qid, &entry) == 0) { struct npebuf *npe = P2V(NPE_QM_Q_ADDR(entry), dma); struct mbuf *m; DPRINTF(sc, "%s: entry 0x%x neaddr 0x%x ne_len 0x%x\n", __func__, entry, npe->ix_neaddr, npe->ix_hw->ix_ne[0].len); /* * Allocate a new mbuf to replenish the rx buffer. * If doing so fails we drop the rx'd frame so we * can reuse the previous mbuf. When we're able to * allocate a new mbuf dispatch the mbuf w/ rx'd * data up the stack and replace it with the newly * allocated one. */ m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m != NULL) { struct mbuf *mrx = npe->ix_m; struct npehwbuf *hw = npe->ix_hw; struct ifnet *ifp = sc->sc_ifp; /* Flush mbuf memory for rx'd data */ bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_POSTREAD); /* XXX flush hw buffer; works now 'cuz coherent */ /* set m_len etc. per rx frame size */ mrx->m_len = be32toh(hw->ix_ne[0].len) & 0xffff; mrx->m_pkthdr.len = mrx->m_len; mrx->m_pkthdr.rcvif = ifp; ifp->if_ipackets++; ifp->if_input(ifp, mrx); + rx_npkts++; } else { /* discard frame and re-use mbuf */ m = npe->ix_m; } if (npe_rxbuf_init(sc, npe, m) == 0) { /* return npe buf to rx free list */ ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr); } else { /* XXX should not happen */ } } + return (rx_npkts); #undef P2V } #ifdef DEVICE_POLLING -static void +static int npe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct npe_softc *sc = ifp->if_softc; + int rx_npkts = 0; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - npe_rxdone(sc->rx_qid, sc); + rx_npkts = npe_rxdone(sc->rx_qid, sc); npe_txdone(sc->tx_doneqid, sc); /* XXX polls both NPE's */ } + return (rx_npkts); } #endif /* DEVICE_POLLING */ static void npe_startxmit(struct npe_softc *sc) { struct npedma *dma = &sc->txdma; int i; NPE_ASSERT_LOCKED(sc); sc->tx_free = NULL; for (i = 0; i < dma->nbuf; i++) { struct npebuf *npe = &dma->buf[i]; if (npe->ix_m != NULL) { /* NB: should not happen */ device_printf(sc->sc_dev, "%s: free mbuf at entry %u\n", __func__, i); m_freem(npe->ix_m); } npe->ix_m = NULL; npe->ix_next = sc->tx_free; sc->tx_free = npe; } } static void npe_startrecv(struct npe_softc *sc) { struct npedma *dma = &sc->rxdma; struct npebuf *npe; int i; NPE_ASSERT_LOCKED(sc); for (i = 0; i < dma->nbuf; i++) { npe = &dma->buf[i]; npe_rxbuf_init(sc, npe, npe->ix_m); /* set npe buf on rx free list */ ixpqmgr_qwrite(sc->rx_freeqid, npe->ix_neaddr); } } /* * Reset and initialize the chip */ static void npeinit_locked(void *xsc) { struct npe_softc *sc = xsc; struct ifnet *ifp = sc->sc_ifp; NPE_ASSERT_LOCKED(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) return;/*XXX*/ /* * Reset MAC core. */ npe_mac_reset(sc); /* disable transmitter and reciver in the MAC */ WR4(sc, NPE_MAC_RX_CNTRL1, RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN); WR4(sc, NPE_MAC_TX_CNTRL1, RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN); /* * Set the MAC core registers. */ WR4(sc, NPE_MAC_INT_CLK_THRESH, 0x1); /* clock ratio: for ipx4xx */ WR4(sc, NPE_MAC_TX_CNTRL2, 0xf); /* max retries */ WR4(sc, NPE_MAC_RANDOM_SEED, 0x8); /* LFSR back-off seed */ /* thresholds determined by NPE firmware FS */ WR4(sc, NPE_MAC_THRESH_P_EMPTY, 0x12); WR4(sc, NPE_MAC_THRESH_P_FULL, 0x30); WR4(sc, NPE_MAC_BUF_SIZE_TX, 0x8); /* tx fifo threshold (bytes) */ WR4(sc, NPE_MAC_TX_DEFER, 0x15); /* for single deferral */ WR4(sc, NPE_MAC_RX_DEFER, 0x16); /* deferral on inter-frame gap*/ WR4(sc, NPE_MAC_TX_TWO_DEFER_1, 0x8); /* for 2-part deferral */ WR4(sc, NPE_MAC_TX_TWO_DEFER_2, 0x7); /* for 2-part deferral */ WR4(sc, NPE_MAC_SLOT_TIME, 0x80); /* assumes MII mode */ WR4(sc, NPE_MAC_TX_CNTRL1, NPE_TX_CNTRL1_RETRY /* retry failed xmits */ | NPE_TX_CNTRL1_FCS_EN /* append FCS */ | NPE_TX_CNTRL1_2DEFER /* 2-part deferal */ | NPE_TX_CNTRL1_PAD_EN); /* pad runt frames */ /* XXX pad strip? */ /* ena pause frame handling */ WR4(sc, NPE_MAC_RX_CNTRL1, NPE_RX_CNTRL1_PAUSE_EN); WR4(sc, NPE_MAC_RX_CNTRL2, 0); npe_setmac(sc, IF_LLADDR(ifp)); npe_setmcast(sc); npe_startxmit(sc); npe_startrecv(sc); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->npe_watchdog_timer = 0; /* just in case */ /* enable transmitter and reciver in the MAC */ WR4(sc, NPE_MAC_RX_CNTRL1, RD4(sc, NPE_MAC_RX_CNTRL1) | NPE_RX_CNTRL1_RX_EN); WR4(sc, NPE_MAC_TX_CNTRL1, RD4(sc, NPE_MAC_TX_CNTRL1) | NPE_TX_CNTRL1_TX_EN); callout_reset(&sc->tick_ch, sc->sc_tickinterval * hz, npe_tick, sc); } static void npeinit(void *xsc) { struct npe_softc *sc = xsc; NPE_LOCK(sc); npeinit_locked(sc); NPE_UNLOCK(sc); } /* * Dequeue packets and place on the h/w transmit queue. */ static void npestart_locked(struct ifnet *ifp) { struct npe_softc *sc = ifp->if_softc; struct npebuf *npe; struct npehwbuf *hw; struct mbuf *m, *n; struct npedma *dma = &sc->txdma; bus_dma_segment_t segs[NPE_MAXSEG]; int nseg, len, error, i; uint32_t next; NPE_ASSERT_LOCKED(sc); /* XXX can this happen? */ if (ifp->if_drv_flags & IFF_DRV_OACTIVE) return; while (sc->tx_free != NULL) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) { /* XXX? */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; return; } npe = sc->tx_free; error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map, m, segs, &nseg, 0); if (error == EFBIG) { n = m_collapse(m, M_DONTWAIT, NPE_MAXSEG); if (n == NULL) { if_printf(ifp, "%s: too many fragments %u\n", __func__, nseg); m_freem(m); return; /* XXX? */ } m = n; error = bus_dmamap_load_mbuf_sg(dma->mtag, npe->ix_map, m, segs, &nseg, 0); } if (error != 0 || nseg == 0) { if_printf(ifp, "%s: error %u nseg %u\n", __func__, error, nseg); m_freem(m); return; /* XXX? */ } sc->tx_free = npe->ix_next; bus_dmamap_sync(dma->mtag, npe->ix_map, BUS_DMASYNC_PREWRITE); /* * Tap off here if there is a bpf listener. */ BPF_MTAP(ifp, m); npe->ix_m = m; hw = npe->ix_hw; len = m->m_pkthdr.len; next = npe->ix_neaddr + sizeof(hw->ix_ne[0]); for (i = 0; i < nseg; i++) { hw->ix_ne[i].data = htobe32(segs[i].ds_addr); hw->ix_ne[i].len = htobe32((segs[i].ds_len<<16) | len); hw->ix_ne[i].next = htobe32(next); len = 0; /* zero for segments > 1 */ next += sizeof(hw->ix_ne[0]); } hw->ix_ne[i-1].next = 0; /* zero last in chain */ /* XXX flush descriptor instead of using uncached memory */ DPRINTF(sc, "%s: qwrite(%u, 0x%x) ne_data %x ne_len 0x%x\n", __func__, sc->tx_qid, npe->ix_neaddr, hw->ix_ne[0].data, hw->ix_ne[0].len); /* stick it on the tx q */ /* XXX add vlan priority */ ixpqmgr_qwrite(sc->tx_qid, npe->ix_neaddr); sc->npe_watchdog_timer = 5; } if (sc->tx_free == NULL) ifp->if_drv_flags |= IFF_DRV_OACTIVE; } void npestart(struct ifnet *ifp) { struct npe_softc *sc = ifp->if_softc; NPE_LOCK(sc); npestart_locked(ifp); NPE_UNLOCK(sc); } static void npe_stopxmit(struct npe_softc *sc) { struct npedma *dma = &sc->txdma; int i; NPE_ASSERT_LOCKED(sc); /* XXX qmgr */ for (i = 0; i < dma->nbuf; i++) { struct npebuf *npe = &dma->buf[i]; if (npe->ix_m != NULL) { bus_dmamap_unload(dma->mtag, npe->ix_map); m_freem(npe->ix_m); npe->ix_m = NULL; } } } static void npe_stoprecv(struct npe_softc *sc) { struct npedma *dma = &sc->rxdma; int i; NPE_ASSERT_LOCKED(sc); /* XXX qmgr */ for (i = 0; i < dma->nbuf; i++) { struct npebuf *npe = &dma->buf[i]; if (npe->ix_m != NULL) { bus_dmamap_unload(dma->mtag, npe->ix_map); m_freem(npe->ix_m); npe->ix_m = NULL; } } } /* * Turn off interrupts, and stop the nic. */ void npestop(struct npe_softc *sc) { struct ifnet *ifp = sc->sc_ifp; /* disable transmitter and reciver in the MAC */ WR4(sc, NPE_MAC_RX_CNTRL1, RD4(sc, NPE_MAC_RX_CNTRL1) &~ NPE_RX_CNTRL1_RX_EN); WR4(sc, NPE_MAC_TX_CNTRL1, RD4(sc, NPE_MAC_TX_CNTRL1) &~ NPE_TX_CNTRL1_TX_EN); sc->npe_watchdog_timer = 0; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); callout_stop(&sc->tick_ch); npe_stopxmit(sc); npe_stoprecv(sc); /* XXX go into loopback & drain q's? */ /* XXX but beware of disabling tx above */ /* * The MAC core rx/tx disable may leave the MAC hardware in an * unpredictable state. A hw reset is executed before resetting * all the MAC parameters to a known value. */ WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_RESET); DELAY(NPE_MAC_RESET_DELAY); WR4(sc, NPE_MAC_INT_CLK_THRESH, NPE_MAC_INT_CLK_THRESH_DEFAULT); WR4(sc, NPE_MAC_CORE_CNTRL, NPE_CORE_MDC_EN); } void npewatchdog(struct npe_softc *sc) { NPE_ASSERT_LOCKED(sc); if (sc->npe_watchdog_timer == 0 || --sc->npe_watchdog_timer != 0) return; device_printf(sc->sc_dev, "watchdog timeout\n"); sc->sc_ifp->if_oerrors++; npeinit_locked(sc); } static int npeioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct npe_softc *sc = ifp->if_softc; struct mii_data *mii; struct ifreq *ifr = (struct ifreq *)data; int error = 0; #ifdef DEVICE_POLLING int mask; #endif switch (cmd) { case SIOCSIFFLAGS: NPE_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0 && ifp->if_drv_flags & IFF_DRV_RUNNING) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; npestop(sc); } else { /* reinitialize card on any parameter change */ npeinit_locked(sc); } NPE_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* update multicast filter list. */ NPE_LOCK(sc); npe_setmcast(sc); NPE_UNLOCK(sc); error = 0; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: mii = device_get_softc(sc->sc_mii); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); break; #ifdef DEVICE_POLLING case SIOCSIFCAP: mask = ifp->if_capenable ^ ifr->ifr_reqcap; if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(npe_poll, ifp); if (error) return error; NPE_LOCK(sc); /* disable callbacks XXX txdone is shared */ ixpqmgr_notify_disable(sc->rx_qid); ixpqmgr_notify_disable(sc->tx_doneqid); ifp->if_capenable |= IFCAP_POLLING; NPE_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* NB: always enable qmgr callbacks */ NPE_LOCK(sc); /* enable qmgr callbacks */ ixpqmgr_notify_enable(sc->rx_qid, IX_QMGR_Q_SOURCE_ID_NOT_E); ixpqmgr_notify_enable(sc->tx_doneqid, IX_QMGR_Q_SOURCE_ID_NOT_E); ifp->if_capenable &= ~IFCAP_POLLING; NPE_UNLOCK(sc); } } break; #endif default: error = ether_ioctl(ifp, cmd, data); break; } return error; } /* * Setup a traffic class -> rx queue mapping. */ static int npe_setrxqosentry(struct npe_softc *sc, int classix, int trafclass, int qid) { uint32_t msg[2]; msg[0] = (NPE_SETRXQOSENTRY << 24) | (sc->sc_npeid << 20) | classix; msg[1] = (trafclass << 24) | (1 << 23) | (qid << 16) | (qid << 4); return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg); } static int npe_setfirewallmode(struct npe_softc *sc, int onoff) { uint32_t msg[2]; /* XXX honor onoff */ msg[0] = (NPE_SETFIREWALLMODE << 24) | (sc->sc_npeid << 20); msg[1] = 0; return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg); } /* * Update and reset the statistics in the NPE. */ static int npe_updatestats(struct npe_softc *sc) { uint32_t msg[2]; msg[0] = NPE_RESETSTATS << NPE_MAC_MSGID_SHL; msg[1] = sc->sc_stats_phys; /* physical address of stat block */ return ixpnpe_sendmsg_async(sc->sc_npe, msg); } #if 0 /* * Get the current statistics block. */ static int npe_getstats(struct npe_softc *sc) { uint32_t msg[2]; msg[0] = NPE_GETSTATS << NPE_MAC_MSGID_SHL; msg[1] = sc->sc_stats_phys; /* physical address of stat block */ return ixpnpe_sendandrecvmsg(sc->sc_npe, msg, msg); } /* * Query the image id of the loaded firmware. */ static uint32_t npe_getimageid(struct npe_softc *sc) { uint32_t msg[2]; msg[0] = NPE_GETSTATUS << NPE_MAC_MSGID_SHL; msg[1] = 0; return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg) == 0 ? msg[1] : 0; } /* * Enable/disable loopback. */ static int npe_setloopback(struct npe_softc *sc, int ena) { uint32_t msg[2]; msg[0] = (NPE_SETLOOPBACK << NPE_MAC_MSGID_SHL) | (ena != 0); msg[1] = 0; return ixpnpe_sendandrecvmsg_sync(sc->sc_npe, msg, msg); } #endif static void npe_child_detached(device_t dev, device_t child) { struct npe_softc *sc; sc = device_get_softc(dev); if (child == sc->sc_mii) sc->sc_mii = NULL; } /* * MII bus support routines. */ #define MII_RD4(sc, reg) bus_space_read_4(sc->sc_iot, sc->sc_miih, reg) #define MII_WR4(sc, reg, v) \ bus_space_write_4(sc->sc_iot, sc->sc_miih, reg, v) static uint32_t npe_mii_mdio_read(struct npe_softc *sc, int reg) { uint32_t v; /* NB: registers are known to be sequential */ v = (MII_RD4(sc, reg+0) & 0xff) << 0; v |= (MII_RD4(sc, reg+4) & 0xff) << 8; v |= (MII_RD4(sc, reg+8) & 0xff) << 16; v |= (MII_RD4(sc, reg+12) & 0xff) << 24; return v; } static void npe_mii_mdio_write(struct npe_softc *sc, int reg, uint32_t cmd) { /* NB: registers are known to be sequential */ MII_WR4(sc, reg+0, cmd & 0xff); MII_WR4(sc, reg+4, (cmd >> 8) & 0xff); MII_WR4(sc, reg+8, (cmd >> 16) & 0xff); MII_WR4(sc, reg+12, (cmd >> 24) & 0xff); } static int npe_mii_mdio_wait(struct npe_softc *sc) { uint32_t v; int i; /* NB: typically this takes 25-30 trips */ for (i = 0; i < 1000; i++) { v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_CMD); if ((v & NPE_MII_GO) == 0) return 1; DELAY(1); } device_printf(sc->sc_dev, "%s: timeout after ~1ms, cmd 0x%x\n", __func__, v); return 0; /* NB: timeout */ } static int npe_miibus_readreg(device_t dev, int phy, int reg) { struct npe_softc *sc = device_get_softc(dev); uint32_t v; if (phy != sc->sc_phy) /* XXX no auto-detect */ return 0xffff; v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL) | NPE_MII_GO; npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v); if (npe_mii_mdio_wait(sc)) v = npe_mii_mdio_read(sc, NPE_MAC_MDIO_STS); else v = 0xffff | NPE_MII_READ_FAIL; return (v & NPE_MII_READ_FAIL) ? 0xffff : (v & 0xffff); } static void npe_miibus_writereg(device_t dev, int phy, int reg, int data) { struct npe_softc *sc = device_get_softc(dev); uint32_t v; if (phy != sc->sc_phy) /* XXX */ return; v = (phy << NPE_MII_ADDR_SHL) | (reg << NPE_MII_REG_SHL) | data | NPE_MII_WRITE | NPE_MII_GO; npe_mii_mdio_write(sc, NPE_MAC_MDIO_CMD, v); /* XXX complain about timeout */ (void) npe_mii_mdio_wait(sc); } static void npe_miibus_statchg(device_t dev) { struct npe_softc *sc = device_get_softc(dev); struct mii_data *mii = device_get_softc(sc->sc_mii); uint32_t tx1, rx1; /* sync MAC duplex state */ tx1 = RD4(sc, NPE_MAC_TX_CNTRL1); rx1 = RD4(sc, NPE_MAC_RX_CNTRL1); if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { tx1 &= ~NPE_TX_CNTRL1_DUPLEX; rx1 |= NPE_RX_CNTRL1_PAUSE_EN; } else { tx1 |= NPE_TX_CNTRL1_DUPLEX; rx1 &= ~NPE_RX_CNTRL1_PAUSE_EN; } WR4(sc, NPE_MAC_RX_CNTRL1, rx1); WR4(sc, NPE_MAC_TX_CNTRL1, tx1); } static device_method_t npe_methods[] = { /* Device interface */ DEVMETHOD(device_probe, npe_probe), DEVMETHOD(device_attach, npe_attach), DEVMETHOD(device_detach, npe_detach), /* Bus interface */ DEVMETHOD(bus_child_detached, npe_child_detached), /* MII interface */ DEVMETHOD(miibus_readreg, npe_miibus_readreg), DEVMETHOD(miibus_writereg, npe_miibus_writereg), DEVMETHOD(miibus_statchg, npe_miibus_statchg), { 0, 0 } }; static driver_t npe_driver = { "npe", npe_methods, sizeof(struct npe_softc), }; DRIVER_MODULE(npe, ixp, npe_driver, npe_devclass, 0, 0); DRIVER_MODULE(miibus, npe, miibus_driver, miibus_devclass, 0, 0); MODULE_DEPEND(npe, ixpqmgr, 1, 1, 1); MODULE_DEPEND(npe, miibus, 1, 1, 1); MODULE_DEPEND(npe, ether, 1, 1, 1); Index: head/sys/arm/xscale/ixp425/ixp425_qmgr.c =================================================================== --- head/sys/arm/xscale/ixp425/ixp425_qmgr.c (revision 193095) +++ head/sys/arm/xscale/ixp425/ixp425_qmgr.c (revision 193096) @@ -1,1105 +1,1105 @@ /*- * Copyright (c) 2006 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ /*- * Copyright (c) 2001-2005, Intel Corporation. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the Intel Corporation nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Intel XScale Queue Manager support. * * Each IXP4XXX device has a hardware block that implements a priority * queue manager that is shared between the XScale cpu and the backend * devices (such as the NPE). Queues are accessed by reading/writing * special memory locations. The queue contents are mapped into a shared * SRAM region with entries managed in a circular buffer. The XScale * processor can receive interrupts based on queue contents (a condition * code determines when interrupts should be delivered). * * The code here basically replaces the qmgr class in the Intel Access * Library (IAL). */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * State per AQM hw queue. * This structure holds q configuration and dispatch state. */ struct qmgrInfo { int qSizeInWords; /* queue size in words */ uint32_t qOflowStatBitMask; /* overflow status mask */ int qWriteCount; /* queue write count */ bus_size_t qAccRegAddr; /* access register */ bus_size_t qUOStatRegAddr; /* status register */ bus_size_t qConfigRegAddr; /* config register */ int qSizeInEntries; /* queue size in entries */ uint32_t qUflowStatBitMask; /* underflow status mask */ int qReadCount; /* queue read count */ /* XXX union */ uint32_t qStatRegAddr; uint32_t qStatBitsOffset; uint32_t qStat0BitMask; uint32_t qStat1BitMask; uint32_t intRegCheckMask; /* interrupt reg check mask */ void (*cb)(int, void *); /* callback function */ void *cbarg; /* callback argument */ int priority; /* dispatch priority */ #if 0 /* NB: needed only for A0 parts */ u_int statusWordOffset; /* status word offset */ uint32_t statusMask; /* status mask */ uint32_t statusCheckValue; /* status check value */ #endif }; struct ixpqmgr_softc { device_t sc_dev; bus_space_tag_t sc_iot; bus_space_handle_t sc_ioh; struct resource *sc_irq1; /* IRQ resource */ void *sc_ih1; /* interrupt handler */ int sc_rid1; /* resource id for irq */ struct resource *sc_irq2; void *sc_ih2; int sc_rid2; struct qmgrInfo qinfo[IX_QMGR_MAX_NUM_QUEUES]; /* * This array contains a list of queue identifiers ordered by * priority. The table is split logically between queue * identifiers 0-31 and 32-63. To optimize lookups bit masks * are kept for the first-32 and last-32 q's. When the * table needs to be rebuilt mark rebuildTable and it'll * happen after the next interrupt. */ int priorityTable[IX_QMGR_MAX_NUM_QUEUES]; uint32_t lowPriorityTableFirstHalfMask; uint32_t uppPriorityTableFirstHalfMask; int rebuildTable; /* rebuild priorityTable */ uint32_t aqmFreeSramAddress; /* SRAM free space */ }; static int qmgr_debug = 0; SYSCTL_INT(_debug, OID_AUTO, qmgr, CTLFLAG_RW, &qmgr_debug, 0, "IXP4XX Q-Manager debug msgs"); TUNABLE_INT("debug.qmgr", &qmgr_debug); #define DPRINTF(dev, fmt, ...) do { \ if (qmgr_debug) printf(fmt, __VA_ARGS__); \ } while (0) #define DPRINTFn(n, dev, fmt, ...) do { \ if (qmgr_debug >= n) printf(fmt, __VA_ARGS__); \ } while (0) static struct ixpqmgr_softc *ixpqmgr_sc = NULL; static void ixpqmgr_rebuild(struct ixpqmgr_softc *); static void ixpqmgr_intr(void *); static void aqm_int_enable(struct ixpqmgr_softc *sc, int qId); static void aqm_int_disable(struct ixpqmgr_softc *sc, int qId); static void aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf); static void aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId); static void aqm_reset(struct ixpqmgr_softc *sc); static void dummyCallback(int qId, void *arg) { /* XXX complain */ } static uint32_t aqm_reg_read(struct ixpqmgr_softc *sc, bus_size_t off) { DPRINTFn(9, sc->sc_dev, "%s(0x%x)\n", __func__, (int)off); return bus_space_read_4(sc->sc_iot, sc->sc_ioh, off); } static void aqm_reg_write(struct ixpqmgr_softc *sc, bus_size_t off, uint32_t val) { DPRINTFn(9, sc->sc_dev, "%s(0x%x, 0x%x)\n", __func__, (int)off, val); bus_space_write_4(sc->sc_iot, sc->sc_ioh, off, val); } static int ixpqmgr_probe(device_t dev) { device_set_desc(dev, "IXP4XX Q-Manager"); return 0; } static int ixpqmgr_attach(device_t dev) { struct ixpqmgr_softc *sc = device_get_softc(dev); struct ixp425_softc *sa = device_get_softc(device_get_parent(dev)); int i, err; ixpqmgr_sc = sc; sc->sc_dev = dev; sc->sc_iot = sa->sc_iot; if (bus_space_map(sc->sc_iot, IXP425_QMGR_HWBASE, IXP425_QMGR_SIZE, 0, &sc->sc_ioh)) panic("%s: Cannot map registers", device_get_name(dev)); /* NB: we only use the lower 32 q's */ /* Set up QMGR interrupts */ sc->sc_rid1 = 0; sc->sc_irq1 = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->sc_rid1, IXP425_INT_QUE1_32, IXP425_INT_QUE1_32, 1, RF_ACTIVE); sc->sc_rid2 = 1; sc->sc_irq2 = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->sc_rid2, IXP425_INT_QUE33_64, IXP425_INT_QUE33_64, 1, RF_ACTIVE); if (sc->sc_irq1 == NULL || sc->sc_irq2 == NULL) panic("Unable to allocate the qmgr irqs.\n"); err = bus_setup_intr(dev, sc->sc_irq1, INTR_TYPE_NET | INTR_MPSAFE, NULL, ixpqmgr_intr, NULL, &sc->sc_ih1); if (err) { device_printf(dev, "failed to set up qmgr irq=%d\n", IXP425_INT_QUE1_32); return (ENXIO); } err = bus_setup_intr(dev, sc->sc_irq2, INTR_TYPE_NET | INTR_MPSAFE, NULL, ixpqmgr_intr, NULL, &sc->sc_ih2); if (err) { device_printf(dev, "failed to set up qmgr irq=%d\n", IXP425_INT_QUE33_64); return (ENXIO); } /* NB: softc is pre-zero'd */ for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++) { struct qmgrInfo *qi = &sc->qinfo[i]; qi->cb = dummyCallback; qi->priority = IX_QMGR_Q_PRIORITY_0; /* default priority */ /* * There are two interrupt registers, 32 bits each. One * for the lower queues(0-31) and one for the upper * queues(32-63). Therefore need to mod by 32 i.e the * min upper queue identifier. */ qi->intRegCheckMask = (1<<(i%(IX_QMGR_MIN_QUEUPP_QID))); /* * Register addresses and bit masks are calculated and * stored here to optimize QRead, QWrite and QStatusGet * functions. */ /* AQM Queue access reg addresses, per queue */ qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i); qi->qAccRegAddr = IX_QMGR_Q_ACCESS_ADDR_GET(i); qi->qConfigRegAddr = IX_QMGR_Q_CONFIG_ADDR_GET(i); /* AQM Queue lower-group (0-31), only */ if (i < IX_QMGR_MIN_QUEUPP_QID) { /* AQM Q underflow/overflow status reg address, per queue */ qi->qUOStatRegAddr = IX_QMGR_QUEUOSTAT0_OFFSET + ((i / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD) * sizeof(uint32_t)); /* AQM Q underflow status bit masks for status reg per queue */ qi->qUflowStatBitMask = (IX_QMGR_UNDERFLOW_BIT_OFFSET + 1) << ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) * (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD)); /* AQM Q overflow status bit masks for status reg, per queue */ qi->qOflowStatBitMask = (IX_QMGR_OVERFLOW_BIT_OFFSET + 1) << ((i & (IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD - 1)) * (32 / IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD)); /* AQM Q lower-group (0-31) status reg addresses, per queue */ qi->qStatRegAddr = IX_QMGR_QUELOWSTAT0_OFFSET + ((i / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) * sizeof(uint32_t)); /* AQM Q lower-group (0-31) status register bit offset */ qi->qStatBitsOffset = (i & (IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD - 1)) * (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD); } else { /* AQM Q upper-group (32-63), only */ qi->qUOStatRegAddr = 0; /* XXX */ /* AQM Q upper-group (32-63) Nearly Empty status reg bitmasks */ qi->qStat0BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID)); /* AQM Q upper-group (32-63) Full status register bitmasks */ qi->qStat1BitMask = (1 << (i - IX_QMGR_MIN_QUEUPP_QID)); } } sc->aqmFreeSramAddress = 0x100; /* Q buffer space starts at 0x2100 */ ixpqmgr_rebuild(sc); /* build inital priority table */ aqm_reset(sc); /* reset h/w */ return (0); } static int ixpqmgr_detach(device_t dev) { struct ixpqmgr_softc *sc = device_get_softc(dev); aqm_reset(sc); /* disable interrupts */ bus_teardown_intr(dev, sc->sc_irq1, sc->sc_ih1); bus_teardown_intr(dev, sc->sc_irq2, sc->sc_ih2); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_rid1, sc->sc_irq1); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_rid2, sc->sc_irq2); bus_space_unmap(sc->sc_iot, sc->sc_ioh, IXP425_QMGR_SIZE); return (0); } int ixpqmgr_qconfig(int qId, int qEntries, int ne, int nf, int srcSel, - void (*cb)(int, void *), void *cbarg) + qconfig_hand_t *cb, void *cbarg) { struct ixpqmgr_softc *sc = ixpqmgr_sc; struct qmgrInfo *qi = &sc->qinfo[qId]; DPRINTF(sc->sc_dev, "%s(%u, %u, %u, %u, %u, %p, %p)\n", __func__, qId, qEntries, ne, nf, srcSel, cb, cbarg); /* NB: entry size is always 1 */ qi->qSizeInWords = qEntries; qi->qReadCount = 0; qi->qWriteCount = 0; qi->qSizeInEntries = qEntries; /* XXX kept for code clarity */ if (cb == NULL) { /* Reset to dummy callback */ qi->cb = dummyCallback; qi->cbarg = 0; } else { qi->cb = cb; qi->cbarg = cbarg; } /* Write the config register; NB must be AFTER qinfo setup */ aqm_qcfg(sc, qId, ne, nf); /* * Account for space just allocated to queue. */ sc->aqmFreeSramAddress += (qi->qSizeInWords * sizeof(uint32_t)); /* Set the interrupt source if this queue is in the range 0-31 */ if (qId < IX_QMGR_MIN_QUEUPP_QID) aqm_srcsel_write(sc, qId, srcSel); if (cb != NULL) /* Enable the interrupt */ aqm_int_enable(sc, qId); sc->rebuildTable = TRUE; return 0; /* XXX */ } int ixpqmgr_qwrite(int qId, uint32_t entry) { struct ixpqmgr_softc *sc = ixpqmgr_sc; struct qmgrInfo *qi = &sc->qinfo[qId]; DPRINTFn(3, sc->sc_dev, "%s(%u, 0x%x) writeCount %u size %u\n", __func__, qId, entry, qi->qWriteCount, qi->qSizeInEntries); /* write the entry */ aqm_reg_write(sc, qi->qAccRegAddr, entry); /* NB: overflow is available for lower queues only */ if (qId < IX_QMGR_MIN_QUEUPP_QID) { int qSize = qi->qSizeInEntries; /* * Increment the current number of entries in the queue * and check for overflow . */ if (qi->qWriteCount++ == qSize) { /* check for overflow */ uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr); int qPtrs; /* * Read the status twice because the status may * not be immediately ready after the write operation */ if ((status & qi->qOflowStatBitMask) || ((status = aqm_reg_read(sc, qi->qUOStatRegAddr)) & qi->qOflowStatBitMask)) { /* * The queue is full, clear the overflow status bit if set. */ aqm_reg_write(sc, qi->qUOStatRegAddr, status & ~qi->qOflowStatBitMask); qi->qWriteCount = qSize; DPRINTFn(5, sc->sc_dev, "%s(%u, 0x%x) Q full, overflow status cleared\n", __func__, qId, entry); return ENOSPC; } /* * No overflow occured : someone is draining the queue * and the current counter needs to be * updated from the current number of entries in the queue */ /* calculate number of words in q */ qPtrs = aqm_reg_read(sc, qi->qConfigRegAddr); DPRINTFn(2, sc->sc_dev, "%s(%u, 0x%x) Q full, no overflow status, qConfig 0x%x\n", __func__, qId, entry, qPtrs); qPtrs = (qPtrs - (qPtrs >> 7)) & 0x7f; if (qPtrs == 0) { /* * The queue may be full at the time of the * snapshot. Next access will check * the overflow status again. */ qi->qWriteCount = qSize; } else { /* convert the number of words to a number of entries */ qi->qWriteCount = qPtrs & (qSize - 1); } } } return 0; } int ixpqmgr_qread(int qId, uint32_t *entry) { struct ixpqmgr_softc *sc = ixpqmgr_sc; struct qmgrInfo *qi = &sc->qinfo[qId]; bus_size_t off = qi->qAccRegAddr; *entry = aqm_reg_read(sc, off); /* * Reset the current read count : next access to the read function * will force a underflow status check. */ qi->qReadCount = 0; /* Check if underflow occurred on the read */ if (*entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) { /* get the queue status */ uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr); if (status & qi->qUflowStatBitMask) { /* clear underflow status */ aqm_reg_write(sc, qi->qUOStatRegAddr, status &~ qi->qUflowStatBitMask); return ENOSPC; } } return 0; } int ixpqmgr_qreadm(int qId, uint32_t n, uint32_t *p) { struct ixpqmgr_softc *sc = ixpqmgr_sc; struct qmgrInfo *qi = &sc->qinfo[qId]; uint32_t entry; bus_size_t off = qi->qAccRegAddr; entry = aqm_reg_read(sc, off); while (--n) { if (entry == 0) { /* if we read a NULL entry, stop. We have underflowed */ break; } *p++ = entry; /* store */ entry = aqm_reg_read(sc, off); } *p = entry; /* * Reset the current read count : next access to the read function * will force a underflow status check. */ qi->qReadCount = 0; /* Check if underflow occurred on the read */ if (entry == 0 && qId < IX_QMGR_MIN_QUEUPP_QID) { /* get the queue status */ uint32_t status = aqm_reg_read(sc, qi->qUOStatRegAddr); if (status & qi->qUflowStatBitMask) { /* clear underflow status */ aqm_reg_write(sc, qi->qUOStatRegAddr, status &~ qi->qUflowStatBitMask); return ENOSPC; } } return 0; } uint32_t ixpqmgr_getqstatus(int qId) { #define QLOWSTATMASK \ ((1 << (32 / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD)) - 1) struct ixpqmgr_softc *sc = ixpqmgr_sc; const struct qmgrInfo *qi = &sc->qinfo[qId]; uint32_t status; if (qId < IX_QMGR_MIN_QUEUPP_QID) { /* read the status of a queue in the range 0-31 */ status = aqm_reg_read(sc, qi->qStatRegAddr); /* mask out the status bits relevant only to this queue */ status = (status >> qi->qStatBitsOffset) & QLOWSTATMASK; } else { /* read status of a queue in the range 32-63 */ status = 0; if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT0_OFFSET)&qi->qStat0BitMask) status |= IX_QMGR_Q_STATUS_NE_BIT_MASK; /* nearly empty */ if (aqm_reg_read(sc, IX_QMGR_QUEUPPSTAT1_OFFSET)&qi->qStat1BitMask) status |= IX_QMGR_Q_STATUS_F_BIT_MASK; /* full */ } return status; #undef QLOWSTATMASK } uint32_t ixpqmgr_getqconfig(int qId) { struct ixpqmgr_softc *sc = ixpqmgr_sc; return aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId)); } void ixpqmgr_dump(void) { struct ixpqmgr_softc *sc = ixpqmgr_sc; int i, a; /* status registers */ printf("0x%04x: %08x %08x %08x %08x\n" , 0x400 , aqm_reg_read(sc, 0x400) , aqm_reg_read(sc, 0x400+4) , aqm_reg_read(sc, 0x400+8) , aqm_reg_read(sc, 0x400+12) ); printf("0x%04x: %08x %08x %08x %08x\n" , 0x410 , aqm_reg_read(sc, 0x410) , aqm_reg_read(sc, 0x410+4) , aqm_reg_read(sc, 0x410+8) , aqm_reg_read(sc, 0x410+12) ); printf("0x%04x: %08x %08x %08x %08x\n" , 0x420 , aqm_reg_read(sc, 0x420) , aqm_reg_read(sc, 0x420+4) , aqm_reg_read(sc, 0x420+8) , aqm_reg_read(sc, 0x420+12) ); printf("0x%04x: %08x %08x %08x %08x\n" , 0x430 , aqm_reg_read(sc, 0x430) , aqm_reg_read(sc, 0x430+4) , aqm_reg_read(sc, 0x430+8) , aqm_reg_read(sc, 0x430+12) ); /* q configuration registers */ for (a = 0x2000; a < 0x20ff; a += 32) printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n" , a , aqm_reg_read(sc, a) , aqm_reg_read(sc, a+4) , aqm_reg_read(sc, a+8) , aqm_reg_read(sc, a+12) , aqm_reg_read(sc, a+16) , aqm_reg_read(sc, a+20) , aqm_reg_read(sc, a+24) , aqm_reg_read(sc, a+28) ); /* allocated SRAM */ for (i = 0x100; i < sc->aqmFreeSramAddress; i += 32) { a = 0x2000 + i; printf("0x%04x: %08x %08x %08x %08x %08x %08x %08x %08x\n" , a , aqm_reg_read(sc, a) , aqm_reg_read(sc, a+4) , aqm_reg_read(sc, a+8) , aqm_reg_read(sc, a+12) , aqm_reg_read(sc, a+16) , aqm_reg_read(sc, a+20) , aqm_reg_read(sc, a+24) , aqm_reg_read(sc, a+28) ); } for (i = 0; i < 16; i++) { printf("Q[%2d] config 0x%08x status 0x%02x " "Q[%2d] config 0x%08x status 0x%02x\n" , i, ixpqmgr_getqconfig(i), ixpqmgr_getqstatus(i) , i+16, ixpqmgr_getqconfig(i+16), ixpqmgr_getqstatus(i+16) ); } } void ixpqmgr_notify_enable(int qId, int srcSel) { struct ixpqmgr_softc *sc = ixpqmgr_sc; #if 0 /* Calculate the checkMask and checkValue for this q */ aqm_calc_statuscheck(sc, qId, srcSel); #endif /* Set the interrupt source if this queue is in the range 0-31 */ if (qId < IX_QMGR_MIN_QUEUPP_QID) aqm_srcsel_write(sc, qId, srcSel); /* Enable the interrupt */ aqm_int_enable(sc, qId); } void ixpqmgr_notify_disable(int qId) { struct ixpqmgr_softc *sc = ixpqmgr_sc; aqm_int_disable(sc, qId); } /* * Rebuild the priority table used by the dispatcher. */ static void ixpqmgr_rebuild(struct ixpqmgr_softc *sc) { int q, pri; int lowQuePriorityTableIndex, uppQuePriorityTableIndex; struct qmgrInfo *qi; sc->lowPriorityTableFirstHalfMask = 0; sc->uppPriorityTableFirstHalfMask = 0; lowQuePriorityTableIndex = 0; uppQuePriorityTableIndex = 32; for (pri = 0; pri < IX_QMGR_NUM_PRIORITY_LEVELS; pri++) { /* low priority q's */ for (q = 0; q < IX_QMGR_MIN_QUEUPP_QID; q++) { qi = &sc->qinfo[q]; if (qi->priority == pri) { /* * Build the priority table bitmask which match the * queues of the first half of the priority table. */ if (lowQuePriorityTableIndex < 16) { sc->lowPriorityTableFirstHalfMask |= qi->intRegCheckMask; } sc->priorityTable[lowQuePriorityTableIndex++] = q; } } /* high priority q's */ for (; q < IX_QMGR_MAX_NUM_QUEUES; q++) { qi = &sc->qinfo[q]; if (qi->priority == pri) { /* * Build the priority table bitmask which match the * queues of the first half of the priority table . */ if (uppQuePriorityTableIndex < 48) { sc->uppPriorityTableFirstHalfMask |= qi->intRegCheckMask; } sc->priorityTable[uppQuePriorityTableIndex++] = q; } } } sc->rebuildTable = FALSE; } /* * Count the number of leading zero bits in a word, * and return the same value than the CLZ instruction. * Note this is similar to the standard ffs function but * it counts zero's from the MSB instead of the LSB. * * word (in) return value (out) * 0x80000000 0 * 0x40000000 1 * ,,, ,,, * 0x00000002 30 * 0x00000001 31 * 0x00000000 32 * * The C version of this function is used as a replacement * for system not providing the equivalent of the CLZ * assembly language instruction. * * Note that this version is big-endian */ static unsigned int _lzcount(uint32_t word) { unsigned int lzcount = 0; if (word == 0) return 32; while ((word & 0x80000000) == 0) { word <<= 1; lzcount++; } return lzcount; } static void ixpqmgr_intr(void *arg) { struct ixpqmgr_softc *sc = ixpqmgr_sc; uint32_t intRegVal; /* Interrupt reg val */ struct qmgrInfo *qi; int priorityTableIndex; /* Priority table index */ int qIndex; /* Current queue being processed */ /* Read the interrupt register */ intRegVal = aqm_reg_read(sc, IX_QMGR_QINTREG0_OFFSET); /* Write back to clear interrupt */ aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, intRegVal); DPRINTFn(5, sc->sc_dev, "%s: ISR0 0x%x ISR1 0x%x\n", __func__, intRegVal, aqm_reg_read(sc, IX_QMGR_QINTREG1_OFFSET)); /* No queue has interrupt register set */ if (intRegVal != 0) { /* get the first queue Id from the interrupt register value */ qIndex = (32 - 1) - _lzcount(intRegVal); DPRINTFn(2, sc->sc_dev, "%s: ISR0 0x%x qIndex %u\n", __func__, intRegVal, qIndex); /* * Optimize for single callback case. */ qi = &sc->qinfo[qIndex]; if (intRegVal == qi->intRegCheckMask) { /* * Only 1 queue event triggered a notification. * Call the callback function for this queue */ qi->cb(qIndex, qi->cbarg); } else { /* * The event is triggered by more than 1 queue, * the queue search will start from the beginning * or the middle of the priority table. * * The search will end when all the bits of the interrupt * register are cleared. There is no need to maintain * a seperate value and test it at each iteration. */ if (intRegVal & sc->lowPriorityTableFirstHalfMask) { priorityTableIndex = 0; } else { priorityTableIndex = 16; } /* * Iterate over the priority table until all the bits * of the interrupt register are cleared. */ do { qIndex = sc->priorityTable[priorityTableIndex++]; qi = &sc->qinfo[qIndex]; /* If this queue caused this interrupt to be raised */ if (intRegVal & qi->intRegCheckMask) { /* Call the callback function for this queue */ qi->cb(qIndex, qi->cbarg); /* Clear the interrupt register bit */ intRegVal &= ~qi->intRegCheckMask; } } while (intRegVal); } } /* Rebuild the priority table if needed */ if (sc->rebuildTable) ixpqmgr_rebuild(sc); } #if 0 /* * Generate the parameters used to check if a Q's status matches * the specified source select. We calculate which status word * to check (statusWordOffset), the value to check the status * against (statusCheckValue) and the mask (statusMask) to mask * out all but the bits to check in the status word. */ static void aqm_calc_statuscheck(int qId, IxQMgrSourceId srcSel) { struct qmgrInfo *qi = &qinfo[qId]; uint32_t shiftVal; if (qId < IX_QMGR_MIN_QUEUPP_QID) { switch (srcSel) { case IX_QMGR_Q_SOURCE_ID_E: qi->statusCheckValue = IX_QMGR_Q_STATUS_E_BIT_MASK; qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK; break; case IX_QMGR_Q_SOURCE_ID_NE: qi->statusCheckValue = IX_QMGR_Q_STATUS_NE_BIT_MASK; qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK; break; case IX_QMGR_Q_SOURCE_ID_NF: qi->statusCheckValue = IX_QMGR_Q_STATUS_NF_BIT_MASK; qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK; break; case IX_QMGR_Q_SOURCE_ID_F: qi->statusCheckValue = IX_QMGR_Q_STATUS_F_BIT_MASK; qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK; break; case IX_QMGR_Q_SOURCE_ID_NOT_E: qi->statusCheckValue = 0; qi->statusMask = IX_QMGR_Q_STATUS_E_BIT_MASK; break; case IX_QMGR_Q_SOURCE_ID_NOT_NE: qi->statusCheckValue = 0; qi->statusMask = IX_QMGR_Q_STATUS_NE_BIT_MASK; break; case IX_QMGR_Q_SOURCE_ID_NOT_NF: qi->statusCheckValue = 0; qi->statusMask = IX_QMGR_Q_STATUS_NF_BIT_MASK; break; case IX_QMGR_Q_SOURCE_ID_NOT_F: qi->statusCheckValue = 0; qi->statusMask = IX_QMGR_Q_STATUS_F_BIT_MASK; break; default: /* Should never hit */ IX_OSAL_ASSERT(0); break; } /* One nibble of status per queue so need to shift the * check value and mask out to the correct position. */ shiftVal = (qId % IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) * IX_QMGR_QUELOWSTAT_BITS_PER_Q; /* Calculate the which status word to check from the qId, * 8 Qs status per word */ qi->statusWordOffset = qId / IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD; qi->statusCheckValue <<= shiftVal; qi->statusMask <<= shiftVal; } else { /* One status word */ qi->statusWordOffset = 0; /* Single bits per queue and int source bit hardwired NE, * Qs start at 32. */ qi->statusMask = 1 << (qId - IX_QMGR_MIN_QUEUPP_QID); qi->statusCheckValue = qi->statusMask; } } #endif static void aqm_int_enable(struct ixpqmgr_softc *sc, int qId) { bus_size_t reg; uint32_t v; if (qId < IX_QMGR_MIN_QUEUPP_QID) reg = IX_QMGR_QUEIEREG0_OFFSET; else reg = IX_QMGR_QUEIEREG1_OFFSET; v = aqm_reg_read(sc, reg); aqm_reg_write(sc, reg, v | (1 << (qId % IX_QMGR_MIN_QUEUPP_QID))); DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n", __func__, qId, reg, v, aqm_reg_read(sc, reg)); } static void aqm_int_disable(struct ixpqmgr_softc *sc, int qId) { bus_size_t reg; uint32_t v; if (qId < IX_QMGR_MIN_QUEUPP_QID) reg = IX_QMGR_QUEIEREG0_OFFSET; else reg = IX_QMGR_QUEIEREG1_OFFSET; v = aqm_reg_read(sc, reg); aqm_reg_write(sc, reg, v &~ (1 << (qId % IX_QMGR_MIN_QUEUPP_QID))); DPRINTF(sc->sc_dev, "%s(%u) 0x%lx: 0x%x => 0x%x\n", __func__, qId, reg, v, aqm_reg_read(sc, reg)); } static unsigned log2(unsigned n) { unsigned count; /* * N.B. this function will return 0 if supplied 0. */ for (count = 0; n/2; count++) n /= 2; return count; } static __inline unsigned toAqmEntrySize(int entrySize) { /* entrySize 1("00"),2("01"),4("10") */ return log2(entrySize); } static __inline unsigned toAqmBufferSize(unsigned bufferSizeInWords) { /* bufferSize 16("00"),32("01),64("10"),128("11") */ return log2(bufferSizeInWords / IX_QMGR_MIN_BUFFER_SIZE); } static __inline unsigned toAqmWatermark(int watermark) { /* * Watermarks 0("000"),1("001"),2("010"),4("011"), * 8("100"),16("101"),32("110"),64("111") */ return log2(2 * watermark); } static void aqm_qcfg(struct ixpqmgr_softc *sc, int qId, u_int ne, u_int nf) { const struct qmgrInfo *qi = &sc->qinfo[qId]; uint32_t qCfg; uint32_t baseAddress; /* Build config register */ qCfg = ((toAqmEntrySize(1) & IX_QMGR_ENTRY_SIZE_MASK) << IX_QMGR_Q_CONFIG_ESIZE_OFFSET) | ((toAqmBufferSize(qi->qSizeInWords) & IX_QMGR_SIZE_MASK) << IX_QMGR_Q_CONFIG_BSIZE_OFFSET); /* baseAddress, calculated relative to start address */ baseAddress = sc->aqmFreeSramAddress; /* base address must be word-aligned */ KASSERT((baseAddress % IX_QMGR_BASE_ADDR_16_WORD_ALIGN) == 0, ("address not word-aligned")); /* Now convert to a 16 word pointer as required by QUECONFIG register */ baseAddress >>= IX_QMGR_BASE_ADDR_16_WORD_SHIFT; qCfg |= baseAddress << IX_QMGR_Q_CONFIG_BADDR_OFFSET; /* set watermarks */ qCfg |= (toAqmWatermark(ne) << IX_QMGR_Q_CONFIG_NE_OFFSET) | (toAqmWatermark(nf) << IX_QMGR_Q_CONFIG_NF_OFFSET); DPRINTF(sc->sc_dev, "%s(%u, %u, %u) 0x%x => 0x%x @ 0x%x\n", __func__, qId, ne, nf, aqm_reg_read(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId)), qCfg, IX_QMGR_Q_CONFIG_ADDR_GET(qId)); aqm_reg_write(sc, IX_QMGR_Q_CONFIG_ADDR_GET(qId), qCfg); } static void aqm_srcsel_write(struct ixpqmgr_softc *sc, int qId, int sourceId) { bus_size_t off; uint32_t v; /* * Calculate the register offset; multiple queues split across registers */ off = IX_QMGR_INT0SRCSELREG0_OFFSET + ((qId / IX_QMGR_INTSRC_NUM_QUE_PER_WORD) * sizeof(uint32_t)); v = aqm_reg_read(sc, off); if (off == IX_QMGR_INT0SRCSELREG0_OFFSET && qId == 0) { /* Queue 0 at INT0SRCSELREG should not corrupt the value bit-3 */ v |= 0x7; } else { const uint32_t bpq = 32 / IX_QMGR_INTSRC_NUM_QUE_PER_WORD; uint32_t mask; int qshift; qshift = (qId & (IX_QMGR_INTSRC_NUM_QUE_PER_WORD-1)) * bpq; mask = ((1 << bpq) - 1) << qshift; /* q's status mask */ /* merge sourceId */ v = (v &~ mask) | ((sourceId << qshift) & mask); } DPRINTF(sc->sc_dev, "%s(%u, %u) 0x%x => 0x%x @ 0x%lx\n", __func__, qId, sourceId, aqm_reg_read(sc, off), v, off); aqm_reg_write(sc, off, v); } /* * Reset AQM registers to default values. */ static void aqm_reset(struct ixpqmgr_softc *sc) { int i; /* Reset queues 0..31 status registers 0..3 */ aqm_reg_write(sc, IX_QMGR_QUELOWSTAT0_OFFSET, IX_QMGR_QUELOWSTAT_RESET_VALUE); aqm_reg_write(sc, IX_QMGR_QUELOWSTAT1_OFFSET, IX_QMGR_QUELOWSTAT_RESET_VALUE); aqm_reg_write(sc, IX_QMGR_QUELOWSTAT2_OFFSET, IX_QMGR_QUELOWSTAT_RESET_VALUE); aqm_reg_write(sc, IX_QMGR_QUELOWSTAT3_OFFSET, IX_QMGR_QUELOWSTAT_RESET_VALUE); /* Reset underflow/overflow status registers 0..1 */ aqm_reg_write(sc, IX_QMGR_QUEUOSTAT0_OFFSET, IX_QMGR_QUEUOSTAT_RESET_VALUE); aqm_reg_write(sc, IX_QMGR_QUEUOSTAT1_OFFSET, IX_QMGR_QUEUOSTAT_RESET_VALUE); /* Reset queues 32..63 nearly empty status registers */ aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT0_OFFSET, IX_QMGR_QUEUPPSTAT0_RESET_VALUE); /* Reset queues 32..63 full status registers */ aqm_reg_write(sc, IX_QMGR_QUEUPPSTAT1_OFFSET, IX_QMGR_QUEUPPSTAT1_RESET_VALUE); /* Reset int0 status flag source select registers 0..3 */ aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG0_OFFSET, IX_QMGR_INT0SRCSELREG_RESET_VALUE); aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG1_OFFSET, IX_QMGR_INT0SRCSELREG_RESET_VALUE); aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG2_OFFSET, IX_QMGR_INT0SRCSELREG_RESET_VALUE); aqm_reg_write(sc, IX_QMGR_INT0SRCSELREG3_OFFSET, IX_QMGR_INT0SRCSELREG_RESET_VALUE); /* Reset queue interrupt enable register 0..1 */ aqm_reg_write(sc, IX_QMGR_QUEIEREG0_OFFSET, IX_QMGR_QUEIEREG_RESET_VALUE); aqm_reg_write(sc, IX_QMGR_QUEIEREG1_OFFSET, IX_QMGR_QUEIEREG_RESET_VALUE); /* Reset queue interrupt register 0..1 */ aqm_reg_write(sc, IX_QMGR_QINTREG0_OFFSET, IX_QMGR_QINTREG_RESET_VALUE); aqm_reg_write(sc, IX_QMGR_QINTREG1_OFFSET, IX_QMGR_QINTREG_RESET_VALUE); /* Reset queue configuration words 0..63 */ for (i = 0; i < IX_QMGR_MAX_NUM_QUEUES; i++) aqm_reg_write(sc, sc->qinfo[i].qConfigRegAddr, IX_QMGR_QUECONFIG_RESET_VALUE); /* XXX zero SRAM to simplify debugging */ for (i = IX_QMGR_QUEBUFFER_SPACE_OFFSET; i < IX_QMGR_AQM_SRAM_SIZE_IN_BYTES; i += sizeof(uint32_t)) aqm_reg_write(sc, i, 0); } static device_method_t ixpqmgr_methods[] = { DEVMETHOD(device_probe, ixpqmgr_probe), DEVMETHOD(device_attach, ixpqmgr_attach), DEVMETHOD(device_detach, ixpqmgr_detach), { 0, 0 } }; static driver_t ixpqmgr_driver = { "ixpqmgr", ixpqmgr_methods, sizeof(struct ixpqmgr_softc), }; static devclass_t ixpqmgr_devclass; DRIVER_MODULE(ixpqmgr, ixp, ixpqmgr_driver, ixpqmgr_devclass, 0, 0); Index: head/sys/arm/xscale/ixp425/ixp425_qmgr.h =================================================================== --- head/sys/arm/xscale/ixp425/ixp425_qmgr.h (revision 193095) +++ head/sys/arm/xscale/ixp425/ixp425_qmgr.h (revision 193096) @@ -1,243 +1,245 @@ /*- * Copyright (c) 2006 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. * * $FreeBSD$ */ /*- * Copyright (c) 2001-2005, Intel Corporation. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the Intel Corporation nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifndef ARM_XSCALE_IXP425_QMGR_H #define ARM_XSCALE_IXP425_QMGR_H #define IX_QMGR_MAX_NUM_QUEUES 64 #define IX_QMGR_MIN_QUEUPP_QID 32 #define IX_QMGR_MIN_ENTRY_SIZE_IN_WORDS 16 /* Total size of SRAM */ #define IX_QMGR_AQM_SRAM_SIZE_IN_BYTES 0x4000 #define IX_QMGR_Q_PRIORITY_0 0 #define IX_QMGR_Q_PRIORITY_1 1 #define IX_QMGR_Q_PRIORITY_2 2 #define IX_QMGR_NUM_PRIORITY_LEVELS 3 /* number of priority levels */ #define IX_QMGR_Q_STATUS_E_BIT_MASK 0x1 /* Empty */ #define IX_QMGR_Q_STATUS_NE_BIT_MASK 0x2 /* Nearly Empty */ #define IX_QMGR_Q_STATUS_NF_BIT_MASK 0x4 /* Nearly Full */ #define IX_QMGR_Q_STATUS_F_BIT_MASK 0x8 /* Full */ #define IX_QMGR_Q_STATUS_UF_BIT_MASK 0x10 /* Underflow */ #define IX_QMGR_Q_STATUS_OF_BIT_MASK 0x20 /* Overflow */ #define IX_QMGR_Q_SOURCE_ID_E 0 /* Q Empty after last read */ #define IX_QMGR_Q_SOURCE_ID_NE 1 /* Q Nearly Empty after last read */ #define IX_QMGR_Q_SOURCE_ID_NF 2 /* Q Nearly Full after last write */ #define IX_QMGR_Q_SOURCE_ID_F 3 /* Q Full after last write */ #define IX_QMGR_Q_SOURCE_ID_NOT_E 4 /* Q !Empty after last write */ #define IX_QMGR_Q_SOURCE_ID_NOT_NE 5 /* Q !Nearly Empty after last write */ #define IX_QMGR_Q_SOURCE_ID_NOT_NF 6 /* Q !Nearly Full after last read */ #define IX_QMGR_Q_SOURCE_ID_NOT_F 7 /* Q !Full after last read */ #define IX_QMGR_UNDERFLOW_BIT_OFFSET 0x0 /* underflow bit mask */ #define IX_QMGR_OVERFLOW_BIT_OFFSET 0x1 /* overflow bit mask */ #define IX_QMGR_QUEACC0_OFFSET 0x0000 /* q 0 access register */ #define IX_QMGR_QUEACC_SIZE 0x4/*words*/ #define IX_QMGR_QUELOWSTAT0_OFFSET 0x400 /* Q status, q's 0-7 */ #define IX_QMGR_QUELOWSTAT1_OFFSET 0x404 /* Q status, q's 8-15 */ #define IX_QMGR_QUELOWSTAT2_OFFSET 0x408 /* Q status, q's 16-23 */ #define IX_QMGR_QUELOWSTAT3_OFFSET 0x40c /* Q status, q's 24-31 */ /* Queue status register Q status bits mask */ #define IX_QMGR_QUELOWSTAT_QUE_STS_BITS_MASK 0xF /* Size of queue 0-31 status register */ #define IX_QMGR_QUELOWSTAT_SIZE 0x4 /*words*/ #define IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD 8 /* # status/word */ #define IX_QMGR_QUEUOSTAT0_OFFSET 0x410 /* Q UF/OF status, q's 0-15 */ #define IX_QMGR_QUEUOSTAT1_OFFSET 0x414 /* Q UF/OF status, q's 16-31 */ #define IX_QMGR_QUEUOSTAT_NUM_QUE_PER_WORD 16 /* # UF/OF status/word */ #define IX_QMGR_QUEUPPSTAT0_OFFSET 0x418 /* NE status, q's 32-63 */ #define IX_QMGR_QUEUPPSTAT1_OFFSET 0x41c /* F status, q's 32-63 */ #define IX_QMGR_INT0SRCSELREG0_OFFSET 0x420 /* INT src select, q's 0-7 */ #define IX_QMGR_INT0SRCSELREG1_OFFSET 0x424 /* INT src select, q's 8-15 */ #define IX_QMGR_INT0SRCSELREG2_OFFSET 0x428 /* INT src select, q's 16-23 */ #define IX_QMGR_INT0SRCSELREG3_OFFSET 0x42c /* INT src select, q's 24-31 */ #define IX_QMGR_INTSRC_NUM_QUE_PER_WORD 8 /* # INT src select/word */ #define IX_QMGR_QUEIEREG0_OFFSET 0x430 /* INT enable, q's 0-31 */ #define IX_QMGR_QUEIEREG1_OFFSET 0x434 /* INT enable, q's 32-63 */ #define IX_QMGR_QINTREG0_OFFSET 0x438 /* INT status, q's 0-31 */ #define IX_QMGR_QINTREG1_OFFSET 0x43c /* INT status, q's 32-63 */ #define IX_QMGR_QUECONFIG_BASE_OFFSET 0x2000 /* Q config register, q 0 */ #define IX_QMGR_QUECONFIG_SIZE 0x100 /* total size of Q config regs*/ #define IX_QMGR_QUEBUFFER_SPACE_OFFSET 0x2100 /* start of SRAM */ /* Total bits in a word */ #define BITS_PER_WORD 32 /* Size of queue buffer space */ #define IX_QMGR_QUE_BUFFER_SPACE_SIZE 0x1F00 /* * This macro will return the address of the access register for the * queue specified by qId */ #define IX_QMGR_Q_ACCESS_ADDR_GET(qId)\ (((qId) * (IX_QMGR_QUEACC_SIZE * sizeof(uint32_t)))\ + IX_QMGR_QUEACC0_OFFSET) /* * Bit location of bit-3 of INT0SRCSELREG0 register to enabled * sticky interrupt register. */ #define IX_QMGR_INT0SRCSELREG0_BIT3 3 /* * These defines are the bit offsets of the various fields of * the queue configuration register. */ #if 0 #define IX_QMGR_Q_CONFIG_WRPTR_OFFSET 0x00 #define IX_QMGR_Q_CONFIG_RDPTR_OFFSET 0x07 #define IX_QMGR_Q_CONFIG_BADDR_OFFSET 0x0E #define IX_QMGR_Q_CONFIG_ESIZE_OFFSET 0x16 #define IX_QMGR_Q_CONFIG_BSIZE_OFFSET 0x18 #define IX_QMGR_Q_CONFIG_NE_OFFSET 0x1A #define IX_QMGR_Q_CONFIG_NF_OFFSET 0x1D #define IX_QMGR_NE_NF_CLEAR_MASK 0x03FFFFFF #define IX_QMGR_NE_MASK 0x7 #define IX_QMGR_NF_MASK 0x7 #define IX_QMGR_SIZE_MASK 0x3 #define IX_QMGR_ENTRY_SIZE_MASK 0x3 #define IX_QMGR_BADDR_MASK 0x003FC000 #define IX_QMGR_RDPTR_MASK 0x7F #define IX_QMGR_WRPTR_MASK 0x7F #define IX_QMGR_RDWRPTR_MASK 0x00003FFF #else #define IX_QMGR_Q_CONFIG_WRPTR_OFFSET 0 #define IX_QMGR_WRPTR_MASK 0x7F #define IX_QMGR_Q_CONFIG_RDPTR_OFFSET 7 #define IX_QMGR_RDPTR_MASK 0x7F #define IX_QMGR_Q_CONFIG_BADDR_OFFSET 14 #define IX_QMGR_BADDR_MASK 0x3FC000 /* XXX not used */ #define IX_QMGR_Q_CONFIG_ESIZE_OFFSET 22 #define IX_QMGR_ENTRY_SIZE_MASK 0x3 #define IX_QMGR_Q_CONFIG_BSIZE_OFFSET 24 #define IX_QMGR_SIZE_MASK 0x3 #define IX_QMGR_Q_CONFIG_NE_OFFSET 26 #define IX_QMGR_NE_MASK 0x7 #define IX_QMGR_Q_CONFIG_NF_OFFSET 29 #define IX_QMGR_NF_MASK 0x7 #define IX_QMGR_RDWRPTR_MASK 0x00003FFF #define IX_QMGR_NE_NF_CLEAR_MASK 0x03FFFFFF #endif #define IX_QMGR_BASE_ADDR_16_WORD_ALIGN 64 #define IX_QMGR_BASE_ADDR_16_WORD_SHIFT 6 #define IX_QMGR_AQM_ADDRESS_SPACE_SIZE_IN_WORDS 0x1000 /* Base address of AQM SRAM */ #define IX_QMGR_AQM_SRAM_BASE_ADDRESS_OFFSET \ ((IX_QMGR_QUECONFIG_BASE_OFFSET) + (IX_QMGR_QUECONFIG_SIZE)) /* Min buffer size used for generating buffer size in QUECONFIG */ #define IX_QMGR_MIN_BUFFER_SIZE 16 /* Reset values of QMgr hardware registers */ #define IX_QMGR_QUELOWSTAT_RESET_VALUE 0x33333333 #define IX_QMGR_QUEUOSTAT_RESET_VALUE 0x00000000 #define IX_QMGR_QUEUPPSTAT0_RESET_VALUE 0xFFFFFFFF #define IX_QMGR_QUEUPPSTAT1_RESET_VALUE 0x00000000 #define IX_QMGR_INT0SRCSELREG_RESET_VALUE 0x00000000 #define IX_QMGR_QUEIEREG_RESET_VALUE 0x00000000 #define IX_QMGR_QINTREG_RESET_VALUE 0xFFFFFFFF #define IX_QMGR_QUECONFIG_RESET_VALUE 0x00000000 #define IX_QMGR_QUELOWSTAT_BITS_PER_Q \ (BITS_PER_WORD/IX_QMGR_QUELOWSTAT_NUM_QUE_PER_WORD) #define IX_QMGR_QUELOWSTAT_QID_MASK 0x7 #define IX_QMGR_Q_CONFIG_ADDR_GET(qId)\ (((qId) * sizeof(uint32_t)) + IX_QMGR_QUECONFIG_BASE_OFFSET) #define IX_QMGR_ENTRY1_OFFSET 0 #define IX_QMGR_ENTRY2_OFFSET 1 #define IX_QMGR_ENTRY4_OFFSET 3 +typedef void qconfig_hand_t(int, void *); + int ixpqmgr_qconfig(int qId, int qSizeInWords, int ne, int nf, int srcSel, - void (*cb)(int, void *), void *cbarg); + qconfig_hand_t *cb, void *cbarg); int ixpqmgr_qwrite(int qId, uint32_t entry); int ixpqmgr_qread(int qId, uint32_t *entry); int ixpqmgr_qreadm(int qId, uint32_t n, uint32_t *p); uint32_t ixpqmgr_getqstatus(int qId); uint32_t ixpqmgr_getqconfig(int qId); void ixpqmgr_notify_enable(int qId, int srcSel); void ixpqmgr_notify_disable(int qId); void ixpqmgr_dump(void); #endif /* ARM_XSCALE_IXP425_QMGR_H */ Index: head/sys/dev/bge/if_bge.c =================================================================== --- head/sys/dev/bge/if_bge.c (revision 193095) +++ head/sys/dev/bge/if_bge.c (revision 193096) @@ -1,4829 +1,4833 @@ /*- * Copyright (c) 2001 Wind River Systems * Copyright (c) 1997, 1998, 1999, 2001 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. * * The Broadcom BCM5700 is based on technology originally developed by * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has * two on-board MIPS R4000 CPUs and can have as much as 16MB of external * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo * frames, highly configurable RX filtering, and 16 RX and TX queues * (which, along with RX filter rules, can be used for QOS applications). * Other features, such as TCP segmentation, may be available as part * of value-added firmware updates. Unlike the Tigon I and Tigon II, * firmware images can be stored in hardware and need not be compiled * into the driver. * * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. * * The BCM5701 is a single-chip solution incorporating both the BCM5700 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 * does not support external SSRAM. * * Broadcom also produces a variation of the BCM5700 under the "Altima" * brand name, which is functionally similar but lacks PCI-X support. * * Without external SSRAM, you can only have at most 4 TX rings, * and the use of the mini RX ring is disabled. This seems to imply * that these features are simply not available on the BCM5701. As a * result, this driver does not implement any support for the mini RX * ring. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "miidevs.h" #include #ifdef __sparc64__ #include #include #include #include #endif #include #include #include #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) #define ETHER_MIN_NOPAD (ETHER_MIN_LEN - ETHER_CRC_LEN) /* i.e., 60 */ MODULE_DEPEND(bge, pci, 1, 1, 1); MODULE_DEPEND(bge, ether, 1, 1, 1); MODULE_DEPEND(bge, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Various supported device vendors/types and their names. Note: the * spec seems to indicate that the hardware still has Alteon's vendor * ID burned into it, though it will always be overriden by the vendor * ID in the EEPROM. Just to be safe, we cover all possibilities. */ static const struct bge_type { uint16_t bge_vid; uint16_t bge_did; } bge_devs[] = { { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5700 }, { ALTEON_VENDORID, ALTEON_DEVICEID_BCM5701 }, { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000 }, { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002 }, { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100 }, { APPLE_VENDORID, APPLE_DEVICE_BCM5701 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5700 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5701 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5702 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5702_ALT }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5703 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5703_ALT }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S_ALT }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705F }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5714S }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5715 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5715S }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5720 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5721 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5722 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5750 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5751 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5751F }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5752 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5752M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5753 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5753F }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5753M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5754 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5754M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5755 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5755M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5780 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5780S }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5781 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5782 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5786 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5787 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5787M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5788 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5789 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5901 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5903M }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5906 }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5906M }, { SK_VENDORID, SK_DEVICEID_ALTIMA }, { TC_VENDORID, TC_DEVICEID_3C996 }, { 0, 0 } }; static const struct bge_vendor { uint16_t v_id; const char *v_name; } bge_vendors[] = { { ALTEON_VENDORID, "Alteon" }, { ALTIMA_VENDORID, "Altima" }, { APPLE_VENDORID, "Apple" }, { BCOM_VENDORID, "Broadcom" }, { SK_VENDORID, "SysKonnect" }, { TC_VENDORID, "3Com" }, { 0, NULL } }; static const struct bge_revision { uint32_t br_chipid; const char *br_name; } bge_revisions[] = { { BGE_CHIPID_BCM5700_A0, "BCM5700 A0" }, { BGE_CHIPID_BCM5700_A1, "BCM5700 A1" }, { BGE_CHIPID_BCM5700_B0, "BCM5700 B0" }, { BGE_CHIPID_BCM5700_B1, "BCM5700 B1" }, { BGE_CHIPID_BCM5700_B2, "BCM5700 B2" }, { BGE_CHIPID_BCM5700_B3, "BCM5700 B3" }, { BGE_CHIPID_BCM5700_ALTIMA, "BCM5700 Altima" }, { BGE_CHIPID_BCM5700_C0, "BCM5700 C0" }, { BGE_CHIPID_BCM5701_A0, "BCM5701 A0" }, { BGE_CHIPID_BCM5701_B0, "BCM5701 B0" }, { BGE_CHIPID_BCM5701_B2, "BCM5701 B2" }, { BGE_CHIPID_BCM5701_B5, "BCM5701 B5" }, { BGE_CHIPID_BCM5703_A0, "BCM5703 A0" }, { BGE_CHIPID_BCM5703_A1, "BCM5703 A1" }, { BGE_CHIPID_BCM5703_A2, "BCM5703 A2" }, { BGE_CHIPID_BCM5703_A3, "BCM5703 A3" }, { BGE_CHIPID_BCM5703_B0, "BCM5703 B0" }, { BGE_CHIPID_BCM5704_A0, "BCM5704 A0" }, { BGE_CHIPID_BCM5704_A1, "BCM5704 A1" }, { BGE_CHIPID_BCM5704_A2, "BCM5704 A2" }, { BGE_CHIPID_BCM5704_A3, "BCM5704 A3" }, { BGE_CHIPID_BCM5704_B0, "BCM5704 B0" }, { BGE_CHIPID_BCM5705_A0, "BCM5705 A0" }, { BGE_CHIPID_BCM5705_A1, "BCM5705 A1" }, { BGE_CHIPID_BCM5705_A2, "BCM5705 A2" }, { BGE_CHIPID_BCM5705_A3, "BCM5705 A3" }, { BGE_CHIPID_BCM5750_A0, "BCM5750 A0" }, { BGE_CHIPID_BCM5750_A1, "BCM5750 A1" }, { BGE_CHIPID_BCM5750_A3, "BCM5750 A3" }, { BGE_CHIPID_BCM5750_B0, "BCM5750 B0" }, { BGE_CHIPID_BCM5750_B1, "BCM5750 B1" }, { BGE_CHIPID_BCM5750_C0, "BCM5750 C0" }, { BGE_CHIPID_BCM5750_C1, "BCM5750 C1" }, { BGE_CHIPID_BCM5750_C2, "BCM5750 C2" }, { BGE_CHIPID_BCM5714_A0, "BCM5714 A0" }, { BGE_CHIPID_BCM5752_A0, "BCM5752 A0" }, { BGE_CHIPID_BCM5752_A1, "BCM5752 A1" }, { BGE_CHIPID_BCM5752_A2, "BCM5752 A2" }, { BGE_CHIPID_BCM5714_B0, "BCM5714 B0" }, { BGE_CHIPID_BCM5714_B3, "BCM5714 B3" }, { BGE_CHIPID_BCM5715_A0, "BCM5715 A0" }, { BGE_CHIPID_BCM5715_A1, "BCM5715 A1" }, { BGE_CHIPID_BCM5715_A3, "BCM5715 A3" }, { BGE_CHIPID_BCM5755_A0, "BCM5755 A0" }, { BGE_CHIPID_BCM5755_A1, "BCM5755 A1" }, { BGE_CHIPID_BCM5755_A2, "BCM5755 A2" }, { BGE_CHIPID_BCM5722_A0, "BCM5722 A0" }, /* 5754 and 5787 share the same ASIC ID */ { BGE_CHIPID_BCM5787_A0, "BCM5754/5787 A0" }, { BGE_CHIPID_BCM5787_A1, "BCM5754/5787 A1" }, { BGE_CHIPID_BCM5787_A2, "BCM5754/5787 A2" }, { BGE_CHIPID_BCM5906_A1, "BCM5906 A1" }, { BGE_CHIPID_BCM5906_A2, "BCM5906 A2" }, { 0, NULL } }; /* * Some defaults for major revisions, so that newer steppings * that we don't know about have a shot at working. */ static const struct bge_revision bge_majorrevs[] = { { BGE_ASICREV_BCM5700, "unknown BCM5700" }, { BGE_ASICREV_BCM5701, "unknown BCM5701" }, { BGE_ASICREV_BCM5703, "unknown BCM5703" }, { BGE_ASICREV_BCM5704, "unknown BCM5704" }, { BGE_ASICREV_BCM5705, "unknown BCM5705" }, { BGE_ASICREV_BCM5750, "unknown BCM5750" }, { BGE_ASICREV_BCM5714_A0, "unknown BCM5714" }, { BGE_ASICREV_BCM5752, "unknown BCM5752" }, { BGE_ASICREV_BCM5780, "unknown BCM5780" }, { BGE_ASICREV_BCM5714, "unknown BCM5714" }, { BGE_ASICREV_BCM5755, "unknown BCM5755" }, /* 5754 and 5787 share the same ASIC ID */ { BGE_ASICREV_BCM5787, "unknown BCM5754/5787" }, { BGE_ASICREV_BCM5906, "unknown BCM5906" }, { 0, NULL } }; #define BGE_IS_JUMBO_CAPABLE(sc) ((sc)->bge_flags & BGE_FLAG_JUMBO) #define BGE_IS_5700_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5700_FAMILY) #define BGE_IS_5705_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_5705_PLUS) #define BGE_IS_5714_FAMILY(sc) ((sc)->bge_flags & BGE_FLAG_5714_FAMILY) #define BGE_IS_575X_PLUS(sc) ((sc)->bge_flags & BGE_FLAG_575X_PLUS) const struct bge_revision * bge_lookup_rev(uint32_t); const struct bge_vendor * bge_lookup_vendor(uint16_t); typedef int (*bge_eaddr_fcn_t)(struct bge_softc *, uint8_t[]); static int bge_probe(device_t); static int bge_attach(device_t); static int bge_detach(device_t); static int bge_suspend(device_t); static int bge_resume(device_t); static void bge_release_resources(struct bge_softc *); static void bge_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int bge_dma_alloc(device_t); static void bge_dma_free(struct bge_softc *); static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]); static int bge_get_eaddr_mem(struct bge_softc *, uint8_t[]); static int bge_get_eaddr_nvram(struct bge_softc *, uint8_t[]); static int bge_get_eaddr_eeprom(struct bge_softc *, uint8_t[]); static int bge_get_eaddr(struct bge_softc *, uint8_t[]); static void bge_txeof(struct bge_softc *); -static void bge_rxeof(struct bge_softc *); +static int bge_rxeof(struct bge_softc *); static void bge_asf_driver_up (struct bge_softc *); static void bge_tick(void *); static void bge_stats_update(struct bge_softc *); static void bge_stats_update_regs(struct bge_softc *); static int bge_encap(struct bge_softc *, struct mbuf **, uint32_t *); static void bge_intr(void *); static void bge_start_locked(struct ifnet *); static void bge_start(struct ifnet *); static int bge_ioctl(struct ifnet *, u_long, caddr_t); static void bge_init_locked(struct bge_softc *); static void bge_init(void *); static void bge_stop(struct bge_softc *); static void bge_watchdog(struct bge_softc *); static int bge_shutdown(device_t); static int bge_ifmedia_upd_locked(struct ifnet *); static int bge_ifmedia_upd(struct ifnet *); static void bge_ifmedia_sts(struct ifnet *, struct ifmediareq *); static uint8_t bge_nvram_getbyte(struct bge_softc *, int, uint8_t *); static int bge_read_nvram(struct bge_softc *, caddr_t, int, int); static uint8_t bge_eeprom_getbyte(struct bge_softc *, int, uint8_t *); static int bge_read_eeprom(struct bge_softc *, caddr_t, int, int); static void bge_setpromisc(struct bge_softc *); static void bge_setmulti(struct bge_softc *); static void bge_setvlan(struct bge_softc *); static int bge_newbuf_std(struct bge_softc *, int, struct mbuf *); static int bge_newbuf_jumbo(struct bge_softc *, int, struct mbuf *); static int bge_init_rx_ring_std(struct bge_softc *); static void bge_free_rx_ring_std(struct bge_softc *); static int bge_init_rx_ring_jumbo(struct bge_softc *); static void bge_free_rx_ring_jumbo(struct bge_softc *); static void bge_free_tx_ring(struct bge_softc *); static int bge_init_tx_ring(struct bge_softc *); static int bge_chipinit(struct bge_softc *); static int bge_blockinit(struct bge_softc *); static int bge_has_eaddr(struct bge_softc *); static uint32_t bge_readmem_ind(struct bge_softc *, int); static void bge_writemem_ind(struct bge_softc *, int, int); static void bge_writembx(struct bge_softc *, int, int); #ifdef notdef static uint32_t bge_readreg_ind(struct bge_softc *, int); #endif static void bge_writemem_direct(struct bge_softc *, int, int); static void bge_writereg_ind(struct bge_softc *, int, int); static void bge_set_max_readrq(struct bge_softc *, int); static int bge_miibus_readreg(device_t, int, int); static int bge_miibus_writereg(device_t, int, int, int); static void bge_miibus_statchg(device_t); #ifdef DEVICE_POLLING -static void bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); +static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); #endif #define BGE_RESET_START 1 #define BGE_RESET_STOP 2 static void bge_sig_post_reset(struct bge_softc *, int); static void bge_sig_legacy(struct bge_softc *, int); static void bge_sig_pre_reset(struct bge_softc *, int); static int bge_reset(struct bge_softc *); static void bge_link_upd(struct bge_softc *); /* * The BGE_REGISTER_DEBUG option is only for low-level debugging. It may * leak information to untrusted users. It is also known to cause alignment * traps on certain architectures. */ #ifdef BGE_REGISTER_DEBUG static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS); static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS); static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS); #endif static void bge_add_sysctls(struct bge_softc *); static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS); static device_method_t bge_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bge_probe), DEVMETHOD(device_attach, bge_attach), DEVMETHOD(device_detach, bge_detach), DEVMETHOD(device_shutdown, bge_shutdown), DEVMETHOD(device_suspend, bge_suspend), DEVMETHOD(device_resume, bge_resume), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, bge_miibus_readreg), DEVMETHOD(miibus_writereg, bge_miibus_writereg), DEVMETHOD(miibus_statchg, bge_miibus_statchg), { 0, 0 } }; static driver_t bge_driver = { "bge", bge_methods, sizeof(struct bge_softc) }; static devclass_t bge_devclass; DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0); DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); static int bge_allow_asf = 1; TUNABLE_INT("hw.bge.allow_asf", &bge_allow_asf); SYSCTL_NODE(_hw, OID_AUTO, bge, CTLFLAG_RD, 0, "BGE driver parameters"); SYSCTL_INT(_hw_bge, OID_AUTO, allow_asf, CTLFLAG_RD, &bge_allow_asf, 0, "Allow ASF mode if available"); #define SPARC64_BLADE_1500_MODEL "SUNW,Sun-Blade-1500" #define SPARC64_BLADE_1500_PATH_BGE "/pci@1f,700000/network@2" #define SPARC64_BLADE_2500_MODEL "SUNW,Sun-Blade-2500" #define SPARC64_BLADE_2500_PATH_BGE "/pci@1c,600000/network@3" #define SPARC64_OFW_SUBVENDOR "subsystem-vendor-id" static int bge_has_eaddr(struct bge_softc *sc) { #ifdef __sparc64__ char buf[sizeof(SPARC64_BLADE_1500_PATH_BGE)]; device_t dev; uint32_t subvendor; dev = sc->bge_dev; /* * The on-board BGEs found in sun4u machines aren't fitted with * an EEPROM which means that we have to obtain the MAC address * via OFW and that some tests will always fail. We distinguish * such BGEs by the subvendor ID, which also has to be obtained * from OFW instead of the PCI configuration space as the latter * indicates Broadcom as the subvendor of the netboot interface. * For early Blade 1500 and 2500 we even have to check the OFW * device path as the subvendor ID always defaults to Broadcom * there. */ if (OF_getprop(ofw_bus_get_node(dev), SPARC64_OFW_SUBVENDOR, &subvendor, sizeof(subvendor)) == sizeof(subvendor) && subvendor == SUN_VENDORID) return (0); memset(buf, 0, sizeof(buf)); if (OF_package_to_path(ofw_bus_get_node(dev), buf, sizeof(buf)) > 0) { if (strcmp(sparc64_model, SPARC64_BLADE_1500_MODEL) == 0 && strcmp(buf, SPARC64_BLADE_1500_PATH_BGE) == 0) return (0); if (strcmp(sparc64_model, SPARC64_BLADE_2500_MODEL) == 0 && strcmp(buf, SPARC64_BLADE_2500_PATH_BGE) == 0) return (0); } #endif return (1); } static uint32_t bge_readmem_ind(struct bge_softc *sc, int off) { device_t dev; uint32_t val; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); val = pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4); pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); return (val); } static void bge_writemem_ind(struct bge_softc *sc, int off, int val) { device_t dev; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, 0, 4); } /* * PCI Express only */ static void bge_set_max_readrq(struct bge_softc *sc, int expr_ptr) { device_t dev; uint16_t val; KASSERT((sc->bge_flags & BGE_FLAG_PCIE) && expr_ptr != 0, ("%s: not applicable", __func__)); dev = sc->bge_dev; val = pci_read_config(dev, expr_ptr + BGE_PCIE_DEVCTL, 2); if ((val & BGE_PCIE_DEVCTL_MAX_READRQ_MASK) != BGE_PCIE_DEVCTL_MAX_READRQ_4096) { if (bootverbose) device_printf(dev, "adjust device control 0x%04x ", val); val &= ~BGE_PCIE_DEVCTL_MAX_READRQ_MASK; val |= BGE_PCIE_DEVCTL_MAX_READRQ_4096; pci_write_config(dev, expr_ptr + BGE_PCIE_DEVCTL, val, 2); if (bootverbose) printf("-> 0x%04x\n", val); } } #ifdef notdef static uint32_t bge_readreg_ind(struct bge_softc *sc, int off) { device_t dev; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); return (pci_read_config(dev, BGE_PCI_REG_DATA, 4)); } #endif static void bge_writereg_ind(struct bge_softc *sc, int off, int val) { device_t dev; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); } static void bge_writemem_direct(struct bge_softc *sc, int off, int val) { CSR_WRITE_4(sc, off, val); } static void bge_writembx(struct bge_softc *sc, int off, int val) { if (sc->bge_asicrev == BGE_ASICREV_BCM5906) off += BGE_LPMBX_IRQ0_HI - BGE_MBX_IRQ0_HI; CSR_WRITE_4(sc, off, val); } /* * Map a single buffer address. */ static void bge_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct bge_dmamap_arg *ctx; if (error) return; ctx = arg; if (nseg > ctx->bge_maxsegs) { ctx->bge_maxsegs = 0; return; } ctx->bge_busaddr = segs->ds_addr; } static uint8_t bge_nvram_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) { uint32_t access, byte = 0; int i; /* Lock. */ CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_SET1); for (i = 0; i < 8000; i++) { if (CSR_READ_4(sc, BGE_NVRAM_SWARB) & BGE_NVRAMSWARB_GNT1) break; DELAY(20); } if (i == 8000) return (1); /* Enable access. */ access = CSR_READ_4(sc, BGE_NVRAM_ACCESS); CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access | BGE_NVRAMACC_ENABLE); CSR_WRITE_4(sc, BGE_NVRAM_ADDR, addr & 0xfffffffc); CSR_WRITE_4(sc, BGE_NVRAM_CMD, BGE_NVRAM_READCMD); for (i = 0; i < BGE_TIMEOUT * 10; i++) { DELAY(10); if (CSR_READ_4(sc, BGE_NVRAM_CMD) & BGE_NVRAMCMD_DONE) { DELAY(10); break; } } if (i == BGE_TIMEOUT * 10) { if_printf(sc->bge_ifp, "nvram read timed out\n"); return (1); } /* Get result. */ byte = CSR_READ_4(sc, BGE_NVRAM_RDDATA); *dest = (bswap32(byte) >> ((addr % 4) * 8)) & 0xFF; /* Disable access. */ CSR_WRITE_4(sc, BGE_NVRAM_ACCESS, access); /* Unlock. */ CSR_WRITE_4(sc, BGE_NVRAM_SWARB, BGE_NVRAMSWARB_CLR1); CSR_READ_4(sc, BGE_NVRAM_SWARB); return (0); } /* * Read a sequence of bytes from NVRAM. */ static int bge_read_nvram(struct bge_softc *sc, caddr_t dest, int off, int cnt) { int err = 0, i; uint8_t byte = 0; if (sc->bge_asicrev != BGE_ASICREV_BCM5906) return (1); for (i = 0; i < cnt; i++) { err = bge_nvram_getbyte(sc, off + i, &byte); if (err) break; *(dest + i) = byte; } return (err ? 1 : 0); } /* * Read a byte of data stored in the EEPROM at address 'addr.' The * BCM570x supports both the traditional bitbang interface and an * auto access interface for reading the EEPROM. We use the auto * access method. */ static uint8_t bge_eeprom_getbyte(struct bge_softc *sc, int addr, uint8_t *dest) { int i; uint32_t byte = 0; /* * Enable use of auto EEPROM access so we can avoid * having to use the bitbang method. */ BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); /* Reset the EEPROM, load the clock period. */ CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EEADDR_RESET | BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); DELAY(20); /* Issue the read EEPROM command. */ CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); /* Wait for completion */ for(i = 0; i < BGE_TIMEOUT * 10; i++) { DELAY(10); if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) break; } if (i == BGE_TIMEOUT * 10) { device_printf(sc->bge_dev, "EEPROM read timed out\n"); return (1); } /* Get result. */ byte = CSR_READ_4(sc, BGE_EE_DATA); *dest = (byte >> ((addr % 4) * 8)) & 0xFF; return (0); } /* * Read a sequence of bytes from the EEPROM. */ static int bge_read_eeprom(struct bge_softc *sc, caddr_t dest, int off, int cnt) { int i, error = 0; uint8_t byte = 0; for (i = 0; i < cnt; i++) { error = bge_eeprom_getbyte(sc, off + i, &byte); if (error) break; *(dest + i) = byte; } return (error ? 1 : 0); } static int bge_miibus_readreg(device_t dev, int phy, int reg) { struct bge_softc *sc; uint32_t val, autopoll; int i; sc = device_get_softc(dev); /* * Broadcom's own driver always assumes the internal * PHY is at GMII address 1. On some chips, the PHY responds * to accesses at all addresses, which could cause us to * bogusly attach the PHY 32 times at probe type. Always * restricting the lookup to address 1 is simpler than * trying to figure out which chips revisions should be * special-cased. */ if (phy != 1) return (0); /* Reading with autopolling on may trigger PCI errors */ autopoll = CSR_READ_4(sc, BGE_MI_MODE); if (autopoll & BGE_MIMODE_AUTOPOLL) { BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); DELAY(40); } CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ | BGE_MICOMM_BUSY | BGE_MIPHY(phy) | BGE_MIREG(reg)); for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); val = CSR_READ_4(sc, BGE_MI_COMM); if (!(val & BGE_MICOMM_BUSY)) break; } if (i == BGE_TIMEOUT) { device_printf(sc->bge_dev, "PHY read timed out (phy %d, reg %d, val 0x%08x)\n", phy, reg, val); val = 0; goto done; } DELAY(5); val = CSR_READ_4(sc, BGE_MI_COMM); done: if (autopoll & BGE_MIMODE_AUTOPOLL) { BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); DELAY(40); } if (val & BGE_MICOMM_READFAIL) return (0); return (val & 0xFFFF); } static int bge_miibus_writereg(device_t dev, int phy, int reg, int val) { struct bge_softc *sc; uint32_t autopoll; int i; sc = device_get_softc(dev); if (sc->bge_asicrev == BGE_ASICREV_BCM5906 && (reg == BRGPHY_MII_1000CTL || reg == BRGPHY_MII_AUXCTL)) return(0); /* Reading with autopolling on may trigger PCI errors */ autopoll = CSR_READ_4(sc, BGE_MI_MODE); if (autopoll & BGE_MIMODE_AUTOPOLL) { BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); DELAY(40); } CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE | BGE_MICOMM_BUSY | BGE_MIPHY(phy) | BGE_MIREG(reg) | val); for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) { DELAY(5); CSR_READ_4(sc, BGE_MI_COMM); /* dummy read */ break; } } if (i == BGE_TIMEOUT) { device_printf(sc->bge_dev, "PHY write timed out (phy %d, reg %d, val %d)\n", phy, reg, val); return (0); } if (autopoll & BGE_MIMODE_AUTOPOLL) { BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); DELAY(40); } return (0); } static void bge_miibus_statchg(device_t dev) { struct bge_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->bge_miibus); BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); else BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); else BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); } /* * Intialize a standard receive ring descriptor. */ static int bge_newbuf_std(struct bge_softc *sc, int i, struct mbuf *m) { struct mbuf *m_new = NULL; struct bge_rx_bd *r; struct bge_dmamap_arg ctx; int error; if (m == NULL) { m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m_new == NULL) return (ENOBUFS); m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) m_adj(m_new, ETHER_ALIGN); sc->bge_cdata.bge_rx_std_chain[i] = m_new; r = &sc->bge_ldata.bge_rx_std_ring[i]; ctx.bge_maxsegs = 1; ctx.sc = sc; error = bus_dmamap_load(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *), m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error || ctx.bge_maxsegs == 0) { if (m == NULL) { sc->bge_cdata.bge_rx_std_chain[i] = NULL; m_freem(m_new); } return (ENOMEM); } r->bge_addr.bge_addr_lo = BGE_ADDR_LO(ctx.bge_busaddr); r->bge_addr.bge_addr_hi = BGE_ADDR_HI(ctx.bge_busaddr); r->bge_flags = BGE_RXBDFLAG_END; r->bge_len = m_new->m_len; r->bge_idx = i; bus_dmamap_sync(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD); return (0); } /* * Initialize a jumbo receive ring descriptor. This allocates * a jumbo buffer from the pool managed internally by the driver. */ static int bge_newbuf_jumbo(struct bge_softc *sc, int i, struct mbuf *m) { bus_dma_segment_t segs[BGE_NSEG_JUMBO]; struct bge_extrx_bd *r; struct mbuf *m_new = NULL; int nsegs; int error; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) return (ENOBUFS); m_cljget(m_new, M_DONTWAIT, MJUM9BYTES); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); return (ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MJUM9BYTES; m_new->m_data = m_new->m_ext.ext_buf; } if ((sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) == 0) m_adj(m_new, ETHER_ALIGN); error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i], m_new, segs, &nsegs, BUS_DMA_NOWAIT); if (error) { if (m == NULL) m_freem(m_new); return (error); } sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; /* * Fill in the extended RX buffer descriptor. */ r = &sc->bge_ldata.bge_rx_jumbo_ring[i]; r->bge_flags = BGE_RXBDFLAG_JUMBO_RING | BGE_RXBDFLAG_END; r->bge_idx = i; r->bge_len3 = r->bge_len2 = r->bge_len1 = 0; switch (nsegs) { case 4: r->bge_addr3.bge_addr_lo = BGE_ADDR_LO(segs[3].ds_addr); r->bge_addr3.bge_addr_hi = BGE_ADDR_HI(segs[3].ds_addr); r->bge_len3 = segs[3].ds_len; case 3: r->bge_addr2.bge_addr_lo = BGE_ADDR_LO(segs[2].ds_addr); r->bge_addr2.bge_addr_hi = BGE_ADDR_HI(segs[2].ds_addr); r->bge_len2 = segs[2].ds_len; case 2: r->bge_addr1.bge_addr_lo = BGE_ADDR_LO(segs[1].ds_addr); r->bge_addr1.bge_addr_hi = BGE_ADDR_HI(segs[1].ds_addr); r->bge_len1 = segs[1].ds_len; case 1: r->bge_addr0.bge_addr_lo = BGE_ADDR_LO(segs[0].ds_addr); r->bge_addr0.bge_addr_hi = BGE_ADDR_HI(segs[0].ds_addr); r->bge_len0 = segs[0].ds_len; break; default: panic("%s: %d segments\n", __func__, nsegs); } bus_dmamap_sync(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD); return (0); } /* * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, * that's 1MB or memory, which is a lot. For now, we fill only the first * 256 ring entries and hope that our CPU is fast enough to keep up with * the NIC. */ static int bge_init_rx_ring_std(struct bge_softc *sc) { int i; for (i = 0; i < BGE_SSLOTS; i++) { if (bge_newbuf_std(sc, i, NULL) == ENOBUFS) return (ENOBUFS); }; bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); sc->bge_std = i - 1; bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); return (0); } static void bge_free_rx_ring_std(struct bge_softc *sc) { int i; for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_rx_std_dmamap[i]); m_freem(sc->bge_cdata.bge_rx_std_chain[i]); sc->bge_cdata.bge_rx_std_chain[i] = NULL; } bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i], sizeof(struct bge_rx_bd)); } } static int bge_init_rx_ring_jumbo(struct bge_softc *sc) { struct bge_rcb *rcb; int i; for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) return (ENOBUFS); }; bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); sc->bge_jumbo = i - 1; rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD); CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); return (0); } static void bge_free_rx_ring_jumbo(struct bge_softc *sc) { int i; for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i]); m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; } bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i], sizeof(struct bge_extrx_bd)); } } static void bge_free_tx_ring(struct bge_softc *sc) { int i; if (sc->bge_ldata.bge_tx_ring == NULL) return; for (i = 0; i < BGE_TX_RING_CNT; i++) { if (sc->bge_cdata.bge_tx_chain[i] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_tx_dmamap[i], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_tx_dmamap[i]); m_freem(sc->bge_cdata.bge_tx_chain[i]); sc->bge_cdata.bge_tx_chain[i] = NULL; } bzero((char *)&sc->bge_ldata.bge_tx_ring[i], sizeof(struct bge_tx_bd)); } } static int bge_init_tx_ring(struct bge_softc *sc) { sc->bge_txcnt = 0; sc->bge_tx_saved_considx = 0; /* Initialize transmit producer index for host-memory send ring. */ sc->bge_tx_prodidx = 0; bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); /* 5700 b2 errata */ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, sc->bge_tx_prodidx); /* NIC-memory send ring not used; initialize to zero. */ bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); /* 5700 b2 errata */ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) bge_writembx(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); return (0); } static void bge_setpromisc(struct bge_softc *sc) { struct ifnet *ifp; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; /* Enable or disable promiscuous mode as needed. */ if (ifp->if_flags & IFF_PROMISC) BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); else BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); } static void bge_setmulti(struct bge_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; uint32_t hashes[4] = { 0, 0, 0, 0 }; int h, i; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { for (i = 0; i < 4; i++) CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); return; } /* First, zot all the existing filters. */ for (i = 0; i < 4; i++) CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); /* Now program new ones. */ IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_le(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F; hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); } IF_ADDR_UNLOCK(ifp); for (i = 0; i < 4; i++) CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); } static void bge_setvlan(struct bge_softc *sc) { struct ifnet *ifp; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; /* Enable or disable VLAN tag stripping as needed. */ if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG); else BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_KEEP_VLAN_DIAG); } static void bge_sig_pre_reset(sc, type) struct bge_softc *sc; int type; { /* * Some chips don't like this so only do this if ASF is enabled */ if (sc->bge_asf_mode) bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { switch (type) { case BGE_RESET_START: bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ break; case BGE_RESET_STOP: bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ break; } } } static void bge_sig_post_reset(sc, type) struct bge_softc *sc; int type; { if (sc->bge_asf_mode & ASF_NEW_HANDSHAKE) { switch (type) { case BGE_RESET_START: bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000001); /* START DONE */ break; case BGE_RESET_STOP: bge_writemem_ind(sc, BGE_SDI_STATUS, 0x80000002); break; } } } static void bge_sig_legacy(sc, type) struct bge_softc *sc; int type; { if (sc->bge_asf_mode) { switch (type) { case BGE_RESET_START: bge_writemem_ind(sc, BGE_SDI_STATUS, 0x1); /* START */ break; case BGE_RESET_STOP: bge_writemem_ind(sc, BGE_SDI_STATUS, 0x2); /* UNLOAD */ break; } } } void bge_stop_fw(struct bge_softc *); void bge_stop_fw(sc) struct bge_softc *sc; { int i; if (sc->bge_asf_mode) { bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_PAUSE); CSR_WRITE_4(sc, BGE_CPU_EVENT, CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14)); for (i = 0; i < 100; i++ ) { if (!(CSR_READ_4(sc, BGE_CPU_EVENT) & (1 << 14))) break; DELAY(10); } } } /* * Do endian, PCI and DMA initialization. */ static int bge_chipinit(struct bge_softc *sc) { uint32_t dma_rw_ctl; int i; /* Set endianness before we access any non-PCI registers. */ pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_INIT, 4); /* Clear the MAC control register */ CSR_WRITE_4(sc, BGE_MAC_MODE, 0); /* * Clear the MAC statistics block in the NIC's * internal memory. */ for (i = BGE_STATS_BLOCK; i < BGE_STATS_BLOCK_END + 1; i += sizeof(uint32_t)) BGE_MEMWIN_WRITE(sc, i, 0); for (i = BGE_STATUS_BLOCK; i < BGE_STATUS_BLOCK_END + 1; i += sizeof(uint32_t)) BGE_MEMWIN_WRITE(sc, i, 0); /* * Set up the PCI DMA control register. */ dma_rw_ctl = BGE_PCIDMARWCTL_RD_CMD_SHIFT(6) | BGE_PCIDMARWCTL_WR_CMD_SHIFT(7); if (sc->bge_flags & BGE_FLAG_PCIE) { /* Read watermark not used, 128 bytes for write. */ dma_rw_ctl |= BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); } else if (sc->bge_flags & BGE_FLAG_PCIX) { if (BGE_IS_5714_FAMILY(sc)) { /* 256 bytes for read and write. */ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(2) | BGE_PCIDMARWCTL_WR_WAT_SHIFT(2); dma_rw_ctl |= (sc->bge_asicrev == BGE_ASICREV_BCM5780) ? BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL : BGE_PCIDMARWCTL_ONEDMA_ATONCE_LOCAL; } else if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { /* 1536 bytes for read, 384 bytes for write. */ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | BGE_PCIDMARWCTL_WR_WAT_SHIFT(3); } else { /* 384 bytes for read and write. */ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(3) | BGE_PCIDMARWCTL_WR_WAT_SHIFT(3) | 0x0F; } if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || sc->bge_asicrev == BGE_ASICREV_BCM5704) { uint32_t tmp; /* Set ONE_DMA_AT_ONCE for hardware workaround. */ tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1F; if (tmp == 6 || tmp == 7) dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE_GLOBAL; /* Set PCI-X DMA write workaround. */ dma_rw_ctl |= BGE_PCIDMARWCTL_ASRT_ALL_BE; } } else { /* Conventional PCI bus: 256 bytes for read and write. */ dma_rw_ctl |= BGE_PCIDMARWCTL_RD_WAT_SHIFT(7) | BGE_PCIDMARWCTL_WR_WAT_SHIFT(7); if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) dma_rw_ctl |= 0x0F; } if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || sc->bge_asicrev == BGE_ASICREV_BCM5701) dma_rw_ctl |= BGE_PCIDMARWCTL_USE_MRM | BGE_PCIDMARWCTL_ASRT_ALL_BE; if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || sc->bge_asicrev == BGE_ASICREV_BCM5704) dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); /* * Set up general mode register. */ CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS | BGE_MODECTL_MAC_ATTN_INTR | BGE_MODECTL_HOST_SEND_BDS | BGE_MODECTL_TX_NO_PHDR_CSUM); /* * BCM5701 B5 have a bug causing data corruption when using * 64-bit DMA reads, which can be terminated early and then * completed later as 32-bit accesses, in combination with * certain bridges. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && sc->bge_chipid == BGE_CHIPID_BCM5701_B5) BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_FORCE_PCI32); /* * Tell the firmware the driver is running */ if (sc->bge_asf_mode & ASF_STACKUP) BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); /* * Disable memory write invalidate. Apparently it is not supported * properly by these devices. Also ensure that INTx isn't disabled, * as these chips need it even when using MSI. */ PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_INTxDIS | PCIM_CMD_MWIEN, 4); /* Set the timer prescaler (always 66Mhz) */ CSR_WRITE_4(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); /* XXX: The Linux tg3 driver does this at the start of brgphy_reset. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { DELAY(40); /* XXX */ /* Put PHY into ready state */ BGE_CLRBIT(sc, BGE_MISC_CFG, BGE_MISCCFG_EPHY_IDDQ); CSR_READ_4(sc, BGE_MISC_CFG); /* Flush */ DELAY(40); } return (0); } static int bge_blockinit(struct bge_softc *sc) { struct bge_rcb *rcb; bus_size_t vrcb; bge_hostaddr taddr; uint32_t val; int i; /* * Initialize the memory window pointer register so that * we can access the first 32K of internal NIC RAM. This will * allow us to set up the TX send ring RCBs and the RX return * ring RCBs, plus other things which live in NIC memory. */ CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); /* Note: the BCM5704 has a smaller mbuf space than other chips. */ if (!(BGE_IS_5705_PLUS(sc))) { /* Configure mbuf memory pool */ CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); if (sc->bge_asicrev == BGE_ASICREV_BCM5704) CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); else CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); /* Configure DMA resource pool */ CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, BGE_DMA_DESCRIPTORS); CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); } /* Configure mbuf pool watermarks */ if (!BGE_IS_5705_PLUS(sc)) { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); } else if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x04); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x10); } else { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); } /* Configure DMA resource watermarks */ CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); /* Enable buffer manager */ if (!(BGE_IS_5705_PLUS(sc))) { CSR_WRITE_4(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE | BGE_BMANMODE_LOMBUF_ATTN); /* Poll for buffer manager start indication */ for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) break; } if (i == BGE_TIMEOUT) { device_printf(sc->bge_dev, "buffer manager failed to start\n"); return (ENXIO); } } /* Enable flow-through queues */ CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); /* Wait until queue initialization is complete */ for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) break; } if (i == BGE_TIMEOUT) { device_printf(sc->bge_dev, "flow-through queue init failed\n"); return (ENXIO); } /* Initialize the standard RX ring control block */ rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; rcb->bge_hostaddr.bge_addr_lo = BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); rcb->bge_hostaddr.bge_addr_hi = BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD); if (BGE_IS_5705_PLUS(sc)) rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); else rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); rcb->bge_nicaddr = BGE_STD_RX_RINGS; CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); /* * Initialize the jumbo RX ring control block * We set the 'ring disabled' bit in the flags * field until we're actually ready to start * using this ring (i.e. once we set the MTU * high enough to require it). */ if (BGE_IS_JUMBO_CAPABLE(sc)) { rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; rcb->bge_hostaddr.bge_addr_lo = BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); rcb->bge_hostaddr.bge_addr_hi = BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREREAD); rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_USE_EXT_RX_BD | BGE_RCB_FLAG_RING_DISABLED); rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); /* Set up dummy disabled mini ring RCB */ rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); } /* * Set the BD ring replentish thresholds. The recommended * values are 1/8th the number of descriptors allocated to * each ring. * XXX The 5754 requires a lower threshold, so it might be a * requirement of all 575x family chips. The Linux driver sets * the lower threshold for all 5705 family chips as well, but there * are reports that it might not need to be so strict. * * XXX Linux does some extra fiddling here for the 5906 parts as * well. */ if (BGE_IS_5705_PLUS(sc)) val = 8; else val = BGE_STD_RX_RING_CNT / 8; CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, val); CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); /* * Disable all unused send rings by setting the 'ring disabled' * bit in the flags field of all the TX send ring control blocks. * These are located in NIC memory. */ vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED)); RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); vrcb += sizeof(struct bge_rcb); } /* Configure TX RCB 0 (we use only the first ring) */ vrcb = BGE_MEMWIN_START + BGE_SEND_RING_RCB; BGE_HOSTADDR(taddr, sc->bge_ldata.bge_tx_ring_paddr); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); RCB_WRITE_4(sc, vrcb, bge_nicaddr, BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT)); if (!(BGE_IS_5705_PLUS(sc))) RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0)); /* Disable all unused RX return rings */ vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; for (i = 0; i < BGE_RX_RINGS_MAX; i++) { RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, 0); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, 0); RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, BGE_RCB_FLAG_RING_DISABLED)); RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0); bge_writembx(sc, BGE_MBX_RX_CONS0_LO + (i * (sizeof(uint64_t))), 0); vrcb += sizeof(struct bge_rcb); } /* Initialize RX ring indexes */ bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, 0); bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); bge_writembx(sc, BGE_MBX_RX_MINI_PROD_LO, 0); /* * Set up RX return ring 0 * Note that the NIC address for RX return rings is 0x00000000. * The return rings live entirely within the host, so the * nicaddr field in the RCB isn't used. */ vrcb = BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB; BGE_HOSTADDR(taddr, sc->bge_ldata.bge_rx_return_ring_paddr); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_hi, taddr.bge_addr_hi); RCB_WRITE_4(sc, vrcb, bge_hostaddr.bge_addr_lo, taddr.bge_addr_lo); RCB_WRITE_4(sc, vrcb, bge_nicaddr, 0x00000000); RCB_WRITE_4(sc, vrcb, bge_maxlen_flags, BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0)); /* Set random backoff seed for TX */ CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, IF_LLADDR(sc->bge_ifp)[0] + IF_LLADDR(sc->bge_ifp)[1] + IF_LLADDR(sc->bge_ifp)[2] + IF_LLADDR(sc->bge_ifp)[3] + IF_LLADDR(sc->bge_ifp)[4] + IF_LLADDR(sc->bge_ifp)[5] + BGE_TX_BACKOFF_SEED_MASK); /* Set inter-packet gap */ CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); /* * Specify which ring to use for packets that don't match * any RX rules. */ CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); /* * Configure number of RX lists. One interrupt distribution * list, sixteen active lists, one bad frames class. */ CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); /* Inialize RX list placement stats mask. */ CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); /* Disable host coalescing until we get it set up */ CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); /* Poll to make sure it's shut down. */ for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) break; } if (i == BGE_TIMEOUT) { device_printf(sc->bge_dev, "host coalescing engine failed to idle\n"); return (ENXIO); } /* Set up host coalescing defaults */ CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); if (!(BGE_IS_5705_PLUS(sc))) { CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); } CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 1); CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 1); /* Set up address of statistics block */ if (!(BGE_IS_5705_PLUS(sc))) { CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); } /* Set up address of status block */ CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0; sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0; /* Turn on host coalescing state machine */ CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); /* Turn on RX BD completion state machine and enable attentions */ CSR_WRITE_4(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE | BGE_RBDCMODE_ATTN); /* Turn on RX list placement state machine */ CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); /* Turn on RX list selector state machine. */ if (!(BGE_IS_5705_PLUS(sc))) CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); /* Turn on DMA, clear stats */ CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB | BGE_MACMODE_RXDMA_ENB | BGE_MACMODE_RX_STATS_CLEAR | BGE_MACMODE_TX_STATS_CLEAR | BGE_MACMODE_RX_STATS_ENB | BGE_MACMODE_TX_STATS_ENB | BGE_MACMODE_FRMHDR_DMA_ENB | ((sc->bge_flags & BGE_FLAG_TBI) ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); /* Set misc. local control, enable interrupts on attentions */ CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); #ifdef notdef /* Assert GPIO pins for PHY reset */ BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0 | BGE_MLC_MISCIO_OUT1 | BGE_MLC_MISCIO_OUT2); BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0 | BGE_MLC_MISCIO_OUTEN1 | BGE_MLC_MISCIO_OUTEN2); #endif /* Turn on DMA completion state machine */ if (!(BGE_IS_5705_PLUS(sc))) CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); val = BGE_WDMAMODE_ENABLE | BGE_WDMAMODE_ALL_ATTNS; /* Enable host coalescing bug fix. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || sc->bge_asicrev == BGE_ASICREV_BCM5787) val |= 1 << 29; /* Turn on write DMA state machine */ CSR_WRITE_4(sc, BGE_WDMA_MODE, val); DELAY(40); /* Turn on read DMA state machine */ val = BGE_RDMAMODE_ENABLE | BGE_RDMAMODE_ALL_ATTNS; if (sc->bge_flags & BGE_FLAG_PCIE) val |= BGE_RDMAMODE_FIFO_LONG_BURST; CSR_WRITE_4(sc, BGE_RDMA_MODE, val); DELAY(40); /* Turn on RX data completion state machine */ CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); /* Turn on RX BD initiator state machine */ CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); /* Turn on RX data and RX BD initiator state machine */ CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); /* Turn on Mbuf cluster free state machine */ if (!(BGE_IS_5705_PLUS(sc))) CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); /* Turn on send BD completion state machine */ CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); /* Turn on send data completion state machine */ CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); /* Turn on send data initiator state machine */ CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); /* Turn on send BD initiator state machine */ CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); /* Turn on send BD selector state machine */ CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, BGE_SDISTATSCTL_ENABLE | BGE_SDISTATSCTL_FASTER); /* ack/clear link change events */ CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | BGE_MACSTAT_LINK_CHANGED); CSR_WRITE_4(sc, BGE_MI_STS, 0); /* Enable PHY auto polling (for MII/GMII only) */ if (sc->bge_flags & BGE_FLAG_TBI) { CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); } else { BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL | (10 << 16)); if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_B2) CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT); } /* * Clear any pending link state attention. * Otherwise some link state change events may be lost until attention * is cleared by bge_intr() -> bge_link_upd() sequence. * It's not necessary on newer BCM chips - perhaps enabling link * state change attentions implies clearing pending attention. */ CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | BGE_MACSTAT_LINK_CHANGED); /* Enable link state change attentions. */ BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); return (0); } const struct bge_revision * bge_lookup_rev(uint32_t chipid) { const struct bge_revision *br; for (br = bge_revisions; br->br_name != NULL; br++) { if (br->br_chipid == chipid) return (br); } for (br = bge_majorrevs; br->br_name != NULL; br++) { if (br->br_chipid == BGE_ASICREV(chipid)) return (br); } return (NULL); } const struct bge_vendor * bge_lookup_vendor(uint16_t vid) { const struct bge_vendor *v; for (v = bge_vendors; v->v_name != NULL; v++) if (v->v_id == vid) return (v); panic("%s: unknown vendor %d", __func__, vid); return (NULL); } /* * Probe for a Broadcom chip. Check the PCI vendor and device IDs * against our list and return its name if we find a match. * * Note that since the Broadcom controller contains VPD support, we * try to get the device name string from the controller itself instead * of the compiled-in string. It guarantees we'll always announce the * right product name. We fall back to the compiled-in string when * VPD is unavailable or corrupt. */ static int bge_probe(device_t dev) { const struct bge_type *t = bge_devs; struct bge_softc *sc = device_get_softc(dev); uint16_t vid, did; sc->bge_dev = dev; vid = pci_get_vendor(dev); did = pci_get_device(dev); while(t->bge_vid != 0) { if ((vid == t->bge_vid) && (did == t->bge_did)) { char model[64], buf[96]; const struct bge_revision *br; const struct bge_vendor *v; uint32_t id; id = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) & BGE_PCIMISCCTL_ASICREV; br = bge_lookup_rev(id); v = bge_lookup_vendor(vid); { #if __FreeBSD_version > 700024 const char *pname; if (bge_has_eaddr(sc) && pci_get_vpd_ident(dev, &pname) == 0) snprintf(model, 64, "%s", pname); else #endif snprintf(model, 64, "%s %s", v->v_name, br != NULL ? br->br_name : "NetXtreme Ethernet Controller"); } snprintf(buf, 96, "%s, %sASIC rev. %#04x", model, br != NULL ? "" : "unknown ", id >> 16); device_set_desc_copy(dev, buf); if (pci_get_subvendor(dev) == DELL_VENDORID) sc->bge_flags |= BGE_FLAG_NO_3LED; if (did == BCOM_DEVICEID_BCM5755M) sc->bge_flags |= BGE_FLAG_ADJUST_TRIM; return (0); } t++; } return (ENXIO); } static void bge_dma_free(struct bge_softc *sc) { int i; /* Destroy DMA maps for RX buffers. */ for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_std_dmamap[i]) bus_dmamap_destroy(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_rx_std_dmamap[i]); } /* Destroy DMA maps for jumbo RX buffers. */ for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_jumbo_dmamap[i]) bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i]); } /* Destroy DMA maps for TX buffers. */ for (i = 0; i < BGE_TX_RING_CNT; i++) { if (sc->bge_cdata.bge_tx_dmamap[i]) bus_dmamap_destroy(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_tx_dmamap[i]); } if (sc->bge_cdata.bge_mtag) bus_dma_tag_destroy(sc->bge_cdata.bge_mtag); /* Destroy standard RX ring. */ if (sc->bge_cdata.bge_rx_std_ring_map) bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map); if (sc->bge_cdata.bge_rx_std_ring_map && sc->bge_ldata.bge_rx_std_ring) bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_ldata.bge_rx_std_ring, sc->bge_cdata.bge_rx_std_ring_map); if (sc->bge_cdata.bge_rx_std_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag); /* Destroy jumbo RX ring. */ if (sc->bge_cdata.bge_rx_jumbo_ring_map) bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map); if (sc->bge_cdata.bge_rx_jumbo_ring_map && sc->bge_ldata.bge_rx_jumbo_ring) bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_ldata.bge_rx_jumbo_ring, sc->bge_cdata.bge_rx_jumbo_ring_map); if (sc->bge_cdata.bge_rx_jumbo_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag); /* Destroy RX return ring. */ if (sc->bge_cdata.bge_rx_return_ring_map) bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map); if (sc->bge_cdata.bge_rx_return_ring_map && sc->bge_ldata.bge_rx_return_ring) bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_ldata.bge_rx_return_ring, sc->bge_cdata.bge_rx_return_ring_map); if (sc->bge_cdata.bge_rx_return_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag); /* Destroy TX ring. */ if (sc->bge_cdata.bge_tx_ring_map) bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map); if (sc->bge_cdata.bge_tx_ring_map && sc->bge_ldata.bge_tx_ring) bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag, sc->bge_ldata.bge_tx_ring, sc->bge_cdata.bge_tx_ring_map); if (sc->bge_cdata.bge_tx_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag); /* Destroy status block. */ if (sc->bge_cdata.bge_status_map) bus_dmamap_unload(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map); if (sc->bge_cdata.bge_status_map && sc->bge_ldata.bge_status_block) bus_dmamem_free(sc->bge_cdata.bge_status_tag, sc->bge_ldata.bge_status_block, sc->bge_cdata.bge_status_map); if (sc->bge_cdata.bge_status_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag); /* Destroy statistics block. */ if (sc->bge_cdata.bge_stats_map) bus_dmamap_unload(sc->bge_cdata.bge_stats_tag, sc->bge_cdata.bge_stats_map); if (sc->bge_cdata.bge_stats_map && sc->bge_ldata.bge_stats) bus_dmamem_free(sc->bge_cdata.bge_stats_tag, sc->bge_ldata.bge_stats, sc->bge_cdata.bge_stats_map); if (sc->bge_cdata.bge_stats_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag); /* Destroy the parent tag. */ if (sc->bge_cdata.bge_parent_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); } static int bge_dma_alloc(device_t dev) { struct bge_dmamap_arg ctx; struct bge_softc *sc; int i, error; sc = device_get_softc(dev); /* * Allocate the parent bus DMA tag appropriate for PCI. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->bge_dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->bge_cdata.bge_parent_tag); if (error != 0) { device_printf(sc->bge_dev, "could not allocate parent dma tag\n"); return (ENOMEM); } /* * Create tag for mbufs. */ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * BGE_NSEG_NEW, BGE_NSEG_NEW, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag); if (error) { device_printf(sc->bge_dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Create DMA maps for RX buffers. */ for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, &sc->bge_cdata.bge_rx_std_dmamap[i]); if (error) { device_printf(sc->bge_dev, "can't create DMA map for RX\n"); return (ENOMEM); } } /* Create DMA maps for TX buffers. */ for (i = 0; i < BGE_TX_RING_CNT; i++) { error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, &sc->bge_cdata.bge_tx_dmamap[i]); if (error) { device_printf(sc->bge_dev, "can't create DMA map for RX\n"); return (ENOMEM); } } /* Create tag for standard RX ring. */ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0, NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag); if (error) { device_printf(sc->bge_dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for standard RX ring. */ error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag, (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_std_ring_map); if (error) return (ENOMEM); bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); /* Load the address of the standard RX ring. */ ctx.bge_maxsegs = 1; ctx.sc = sc; error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error) return (ENOMEM); sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr; /* Create tags for jumbo mbufs. */ if (BGE_IS_JUMBO_CAPABLE(sc)) { error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MJUM9BYTES, BGE_NSEG_JUMBO, PAGE_SIZE, 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo); if (error) { device_printf(sc->bge_dev, "could not allocate jumbo dma tag\n"); return (ENOMEM); } /* Create tag for jumbo RX ring. */ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0, NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag); if (error) { device_printf(sc->bge_dev, "could not allocate jumbo ring dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for jumbo RX ring. */ error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag, (void **)&sc->bge_ldata.bge_rx_jumbo_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->bge_cdata.bge_rx_jumbo_ring_map); if (error) return (ENOMEM); /* Load the address of the jumbo RX ring. */ ctx.bge_maxsegs = 1; ctx.sc = sc; error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error) return (ENOMEM); sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr; /* Create DMA maps for jumbo RX buffers. */ for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]); if (error) { device_printf(sc->bge_dev, "can't create DMA map for jumbo RX\n"); return (ENOMEM); } } } /* Create tag for RX return ring. */ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0, NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag); if (error) { device_printf(sc->bge_dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for RX return ring. */ error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag, (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_return_ring_map); if (error) return (ENOMEM); bzero((char *)sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc)); /* Load the address of the RX return ring. */ ctx.bge_maxsegs = 1; ctx.sc = sc; error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map, sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc), bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error) return (ENOMEM); sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr; /* Create tag for TX ring. */ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL, &sc->bge_cdata.bge_tx_ring_tag); if (error) { device_printf(sc->bge_dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for TX ring. */ error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag, (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_tx_ring_map); if (error) return (ENOMEM); bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); /* Load the address of the TX ring. */ ctx.bge_maxsegs = 1; ctx.sc = sc; error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error) return (ENOMEM); sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr; /* Create tag for status block. */ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0, NULL, NULL, &sc->bge_cdata.bge_status_tag); if (error) { device_printf(sc->bge_dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for status block. */ error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag, (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_status_map); if (error) return (ENOMEM); bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ); /* Load the address of the status block. */ ctx.sc = sc; ctx.bge_maxsegs = 1; error = bus_dmamap_load(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error) return (ENOMEM); sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr; /* Create tag for statistics block. */ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL, &sc->bge_cdata.bge_stats_tag); if (error) { device_printf(sc->bge_dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for statistics block. */ error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag, (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_stats_map); if (error) return (ENOMEM); bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ); /* Load the address of the statstics block. */ ctx.sc = sc; ctx.bge_maxsegs = 1; error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag, sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats, BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error) return (ENOMEM); sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr; return (0); } #if __FreeBSD_version > 602105 /* * Return true if this device has more than one port. */ static int bge_has_multiple_ports(struct bge_softc *sc) { device_t dev = sc->bge_dev; u_int b, d, f, fscan, s; d = pci_get_domain(dev); b = pci_get_bus(dev); s = pci_get_slot(dev); f = pci_get_function(dev); for (fscan = 0; fscan <= PCI_FUNCMAX; fscan++) if (fscan != f && pci_find_dbsf(d, b, s, fscan) != NULL) return (1); return (0); } /* * Return true if MSI can be used with this device. */ static int bge_can_use_msi(struct bge_softc *sc) { int can_use_msi = 0; switch (sc->bge_asicrev) { case BGE_ASICREV_BCM5714_A0: case BGE_ASICREV_BCM5714: /* * Apparently, MSI doesn't work when these chips are * configured in single-port mode. */ if (bge_has_multiple_ports(sc)) can_use_msi = 1; break; case BGE_ASICREV_BCM5750: if (sc->bge_chiprev != BGE_CHIPREV_5750_AX && sc->bge_chiprev != BGE_CHIPREV_5750_BX) can_use_msi = 1; break; default: if (BGE_IS_575X_PLUS(sc)) can_use_msi = 1; } return (can_use_msi); } #endif static int bge_attach(device_t dev) { struct ifnet *ifp; struct bge_softc *sc; uint32_t hwcfg = 0, misccfg; u_char eaddr[ETHER_ADDR_LEN]; int error, reg, rid, trys; sc = device_get_softc(dev); sc->bge_dev = dev; /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = BGE_PCI_BAR0; sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->bge_res == NULL) { device_printf (sc->bge_dev, "couldn't map memory\n"); error = ENXIO; goto fail; } /* Save various chip information. */ sc->bge_chipid = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) & BGE_PCIMISCCTL_ASICREV; sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); /* * Don't enable Ethernet@WireSpeed for the 5700, 5906, or the * 5705 A0 and A1 chips. */ if (sc->bge_asicrev != BGE_ASICREV_BCM5700 && sc->bge_asicrev != BGE_ASICREV_BCM5906 && sc->bge_chipid != BGE_CHIPID_BCM5705_A0 && sc->bge_chipid != BGE_CHIPID_BCM5705_A1) sc->bge_flags |= BGE_FLAG_WIRESPEED; if (bge_has_eaddr(sc)) sc->bge_flags |= BGE_FLAG_EADDR; /* Save chipset family. */ switch (sc->bge_asicrev) { case BGE_ASICREV_BCM5700: case BGE_ASICREV_BCM5701: case BGE_ASICREV_BCM5703: case BGE_ASICREV_BCM5704: sc->bge_flags |= BGE_FLAG_5700_FAMILY | BGE_FLAG_JUMBO; break; case BGE_ASICREV_BCM5714_A0: case BGE_ASICREV_BCM5780: case BGE_ASICREV_BCM5714: sc->bge_flags |= BGE_FLAG_5714_FAMILY /* | BGE_FLAG_JUMBO */; /* FALLTHROUGH */ case BGE_ASICREV_BCM5750: case BGE_ASICREV_BCM5752: case BGE_ASICREV_BCM5755: case BGE_ASICREV_BCM5787: case BGE_ASICREV_BCM5906: sc->bge_flags |= BGE_FLAG_575X_PLUS; /* FALLTHROUGH */ case BGE_ASICREV_BCM5705: sc->bge_flags |= BGE_FLAG_5705_PLUS; break; } /* Set various bug flags. */ if (sc->bge_chipid == BGE_CHIPID_BCM5701_A0 || sc->bge_chipid == BGE_CHIPID_BCM5701_B0) sc->bge_flags |= BGE_FLAG_CRC_BUG; if (sc->bge_chiprev == BGE_CHIPREV_5703_AX || sc->bge_chiprev == BGE_CHIPREV_5704_AX) sc->bge_flags |= BGE_FLAG_ADC_BUG; if (sc->bge_chipid == BGE_CHIPID_BCM5704_A0) sc->bge_flags |= BGE_FLAG_5704_A0_BUG; if (BGE_IS_5705_PLUS(sc) && !(sc->bge_flags & BGE_FLAG_ADJUST_TRIM)) { if (sc->bge_asicrev == BGE_ASICREV_BCM5755 || sc->bge_asicrev == BGE_ASICREV_BCM5787) { if (sc->bge_chipid != BGE_CHIPID_BCM5722_A0) sc->bge_flags |= BGE_FLAG_JITTER_BUG; } else if (sc->bge_asicrev != BGE_ASICREV_BCM5906) sc->bge_flags |= BGE_FLAG_BER_BUG; } /* * We could possibly check for BCOM_DEVICEID_BCM5788 in bge_probe() * but I do not know the DEVICEID for the 5788M. */ misccfg = CSR_READ_4(sc, BGE_MISC_CFG) & BGE_MISCCFG_BOARD_ID; if (misccfg == BGE_MISCCFG_BOARD_ID_5788 || misccfg == BGE_MISCCFG_BOARD_ID_5788M) sc->bge_flags |= BGE_FLAG_5788; /* * Check if this is a PCI-X or PCI Express device. */ #if __FreeBSD_version > 602101 if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { /* * Found a PCI Express capabilities register, this * must be a PCI Express device. */ if (reg != 0) { sc->bge_flags |= BGE_FLAG_PCIE; #else if (BGE_IS_5705_PLUS(sc)) { reg = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4); if ((reg & 0xFF) == BGE_PCIE_CAPID) { sc->bge_flags |= BGE_FLAG_PCIE; reg = BGE_PCIE_CAPID; #endif bge_set_max_readrq(sc, reg); } } else { /* * Check if the device is in PCI-X Mode. * (This bit is not valid on PCI Express controllers.) */ if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) & BGE_PCISTATE_PCI_BUSMODE) == 0) sc->bge_flags |= BGE_FLAG_PCIX; } #if __FreeBSD_version > 602105 { int msicount; /* * Allocate the interrupt, using MSI if possible. These devices * support 8 MSI messages, but only the first one is used in * normal operation. */ if (bge_can_use_msi(sc)) { msicount = pci_msi_count(dev); if (msicount > 1) msicount = 1; } else msicount = 0; if (msicount == 1 && pci_alloc_msi(dev, &msicount) == 0) { rid = 1; sc->bge_flags |= BGE_FLAG_MSI; } else rid = 0; } #else rid = 0; #endif sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->bge_irq == NULL) { device_printf(sc->bge_dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } if (bootverbose) device_printf(dev, "CHIP ID 0x%08x; ASIC REV 0x%02x; CHIP REV 0x%02x; %s\n", sc->bge_chipid, sc->bge_asicrev, sc->bge_chiprev, (sc->bge_flags & BGE_FLAG_PCIX) ? "PCI-X" : ((sc->bge_flags & BGE_FLAG_PCIE) ? "PCI-E" : "PCI")); BGE_LOCK_INIT(sc, device_get_nameunit(dev)); /* Try to reset the chip. */ if (bge_reset(sc)) { device_printf(sc->bge_dev, "chip reset failed\n"); error = ENXIO; goto fail; } sc->bge_asf_mode = 0; if (bge_allow_asf && (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER)) { if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG) & BGE_HWCFG_ASF) { sc->bge_asf_mode |= ASF_ENABLE; sc->bge_asf_mode |= ASF_STACKUP; if (sc->bge_asicrev == BGE_ASICREV_BCM5750) { sc->bge_asf_mode |= ASF_NEW_HANDSHAKE; } } } /* Try to reset the chip again the nice way. */ bge_stop_fw(sc); bge_sig_pre_reset(sc, BGE_RESET_STOP); if (bge_reset(sc)) { device_printf(sc->bge_dev, "chip reset failed\n"); error = ENXIO; goto fail; } bge_sig_legacy(sc, BGE_RESET_STOP); bge_sig_post_reset(sc, BGE_RESET_STOP); if (bge_chipinit(sc)) { device_printf(sc->bge_dev, "chip initialization failed\n"); error = ENXIO; goto fail; } error = bge_get_eaddr(sc, eaddr); if (error) { device_printf(sc->bge_dev, "failed to read station address\n"); error = ENXIO; goto fail; } /* 5705 limits RX return ring to 512 entries. */ if (BGE_IS_5705_PLUS(sc)) sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; else sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; if (bge_dma_alloc(dev)) { device_printf(sc->bge_dev, "failed to allocate DMA resources\n"); error = ENXIO; goto fail; } /* Set default tuneable values. */ sc->bge_stat_ticks = BGE_TICKS_PER_SEC; sc->bge_rx_coal_ticks = 150; sc->bge_tx_coal_ticks = 150; sc->bge_rx_max_coal_bds = 10; sc->bge_tx_max_coal_bds = 10; /* Set up ifnet structure */ ifp = sc->bge_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(sc->bge_dev, "failed to if_alloc()\n"); error = ENXIO; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = bge_ioctl; ifp->if_start = bge_start; ifp->if_init = bge_init; ifp->if_mtu = ETHERMTU; ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1; IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); IFQ_SET_READY(&ifp->if_snd); ifp->if_hwassist = BGE_CSUM_FEATURES; ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; #ifdef IFCAP_VLAN_HWCSUM ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; #endif ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* * 5700 B0 chips do not support checksumming correctly due * to hardware bugs. */ if (sc->bge_chipid == BGE_CHIPID_BCM5700_B0) { ifp->if_capabilities &= ~IFCAP_HWCSUM; ifp->if_capenable &= IFCAP_HWCSUM; ifp->if_hwassist = 0; } /* * Figure out what sort of media we have by checking the * hardware config word in the first 32k of NIC internal memory, * or fall back to examining the EEPROM if necessary. * Note: on some BCM5700 cards, this value appears to be unset. * If that's the case, we have to rely on identifying the NIC * by its PCI subsystem ID, as we do below for the SysKonnect * SK-9D41. */ if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); else if ((sc->bge_flags & BGE_FLAG_EADDR) && (sc->bge_asicrev != BGE_ASICREV_BCM5906)) { if (bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, sizeof(hwcfg))) { device_printf(sc->bge_dev, "failed to read EEPROM\n"); error = ENXIO; goto fail; } hwcfg = ntohl(hwcfg); } if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) sc->bge_flags |= BGE_FLAG_TBI; /* The SysKonnect SK-9D41 is a 1000baseSX card. */ if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41) sc->bge_flags |= BGE_FLAG_TBI; if (sc->bge_flags & BGE_FLAG_TBI) { ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, bge_ifmedia_sts); ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX, 0, NULL); ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL); ifmedia_add(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->bge_ifmedia, IFM_ETHER | IFM_AUTO); sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; } else { /* * Do transceiver setup and tell the firmware the * driver is down so we can try to get access the * probe if ASF is running. Retry a couple of times * if we get a conflict with the ASF firmware accessing * the PHY. */ trys = 0; BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); again: bge_asf_driver_up(sc); if (mii_phy_probe(dev, &sc->bge_miibus, bge_ifmedia_upd, bge_ifmedia_sts)) { if (trys++ < 4) { device_printf(sc->bge_dev, "Try again\n"); bge_miibus_writereg(sc->bge_dev, 1, MII_BMCR, BMCR_RESET); goto again; } device_printf(sc->bge_dev, "MII without any PHY!\n"); error = ENXIO; goto fail; } /* * Now tell the firmware we are going up after probing the PHY */ if (sc->bge_asf_mode & ASF_STACKUP) BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); } /* * When using the BCM5701 in PCI-X mode, data corruption has * been observed in the first few bytes of some received packets. * Aligning the packet buffer in memory eliminates the corruption. * Unfortunately, this misaligns the packet payloads. On platforms * which do not support unaligned accesses, we will realign the * payloads by copying the received packets. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5701 && sc->bge_flags & BGE_FLAG_PCIX) sc->bge_flags |= BGE_FLAG_RX_ALIGNBUG; /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); callout_init_mtx(&sc->bge_stat_ch, &sc->bge_mtx, 0); /* * Hookup IRQ last. */ #if __FreeBSD_version > 700030 error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, bge_intr, sc, &sc->bge_intrhand); #else error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, bge_intr, sc, &sc->bge_intrhand); #endif if (error) { bge_detach(dev); device_printf(sc->bge_dev, "couldn't set up irq\n"); } bge_add_sysctls(sc); return (0); fail: bge_release_resources(sc); return (error); } static int bge_detach(device_t dev) { struct bge_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->bge_ifp; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif BGE_LOCK(sc); bge_stop(sc); bge_reset(sc); BGE_UNLOCK(sc); callout_drain(&sc->bge_stat_ch); ether_ifdetach(ifp); if (sc->bge_flags & BGE_FLAG_TBI) { ifmedia_removeall(&sc->bge_ifmedia); } else { bus_generic_detach(dev); device_delete_child(dev, sc->bge_miibus); } bge_release_resources(sc); return (0); } static void bge_release_resources(struct bge_softc *sc) { device_t dev; dev = sc->bge_dev; if (sc->bge_intrhand != NULL) bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); if (sc->bge_irq != NULL) bus_release_resource(dev, SYS_RES_IRQ, sc->bge_flags & BGE_FLAG_MSI ? 1 : 0, sc->bge_irq); #if __FreeBSD_version > 602105 if (sc->bge_flags & BGE_FLAG_MSI) pci_release_msi(dev); #endif if (sc->bge_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, BGE_PCI_BAR0, sc->bge_res); if (sc->bge_ifp != NULL) if_free(sc->bge_ifp); bge_dma_free(sc); if (mtx_initialized(&sc->bge_mtx)) /* XXX */ BGE_LOCK_DESTROY(sc); } static int bge_reset(struct bge_softc *sc) { device_t dev; uint32_t cachesize, command, pcistate, reset, val; void (*write_op)(struct bge_softc *, int, int); int i; dev = sc->bge_dev; if (BGE_IS_575X_PLUS(sc) && !BGE_IS_5714_FAMILY(sc) && (sc->bge_asicrev != BGE_ASICREV_BCM5906)) { if (sc->bge_flags & BGE_FLAG_PCIE) write_op = bge_writemem_direct; else write_op = bge_writemem_ind; } else write_op = bge_writereg_ind; /* Save some important PCI state. */ cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); command = pci_read_config(dev, BGE_PCI_CMD, 4); pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); pci_write_config(dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); /* Disable fastboot on controllers that support it. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5752 || sc->bge_asicrev == BGE_ASICREV_BCM5755 || sc->bge_asicrev == BGE_ASICREV_BCM5787) { if (bootverbose) device_printf(sc->bge_dev, "Disabling fastboot\n"); CSR_WRITE_4(sc, BGE_FASTBOOT_PC, 0x0); } /* * Write the magic number to SRAM at offset 0xB50. * When firmware finishes its initialization it will * write ~BGE_MAGIC_NUMBER to the same location. */ bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); reset = BGE_MISCCFG_RESET_CORE_CLOCKS | BGE_32BITTIME_66MHZ; /* XXX: Broadcom Linux driver. */ if (sc->bge_flags & BGE_FLAG_PCIE) { if (CSR_READ_4(sc, 0x7E2C) == 0x60) /* PCIE 1.0 */ CSR_WRITE_4(sc, 0x7E2C, 0x20); if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { /* Prevent PCIE link training during global reset */ CSR_WRITE_4(sc, BGE_MISC_CFG, 1 << 29); reset |= 1 << 29; } } /* * Set GPHY Power Down Override to leave GPHY * powered up in D0 uninitialized. */ if (BGE_IS_5705_PLUS(sc)) reset |= 0x04000000; /* Issue global reset */ write_op(sc, BGE_MISC_CFG, reset); if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { val = CSR_READ_4(sc, BGE_VCPU_STATUS); CSR_WRITE_4(sc, BGE_VCPU_STATUS, val | BGE_VCPU_STATUS_DRV_RESET); val = CSR_READ_4(sc, BGE_VCPU_EXT_CTRL); CSR_WRITE_4(sc, BGE_VCPU_EXT_CTRL, val & ~BGE_VCPU_EXT_CTRL_HALT_CPU); } DELAY(1000); /* XXX: Broadcom Linux driver. */ if (sc->bge_flags & BGE_FLAG_PCIE) { if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { DELAY(500000); /* wait for link training to complete */ val = pci_read_config(dev, 0xC4, 4); pci_write_config(dev, 0xC4, val | (1 << 15), 4); } /* * Set PCIE max payload size to 128 bytes and clear error * status. */ pci_write_config(dev, 0xD8, 0xF5000, 4); } /* Reset some of the PCI state that got zapped by reset. */ pci_write_config(dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_INDIRECT_ACCESS | BGE_PCIMISCCTL_MASK_PCI_INTR | BGE_HIF_SWAP_OPTIONS | BGE_PCIMISCCTL_PCISTATE_RW, 4); pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); pci_write_config(dev, BGE_PCI_CMD, command, 4); write_op(sc, BGE_MISC_CFG, BGE_32BITTIME_66MHZ); /* Re-enable MSI, if neccesary, and enable the memory arbiter. */ if (BGE_IS_5714_FAMILY(sc)) { /* This chip disables MSI on reset. */ if (sc->bge_flags & BGE_FLAG_MSI) { val = pci_read_config(dev, BGE_PCI_MSI_CTL, 2); pci_write_config(dev, BGE_PCI_MSI_CTL, val | PCIM_MSICTRL_MSI_ENABLE, 2); val = CSR_READ_4(sc, BGE_MSI_MODE); CSR_WRITE_4(sc, BGE_MSI_MODE, val | BGE_MSIMODE_ENABLE); } val = CSR_READ_4(sc, BGE_MARB_MODE); CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE | val); } else CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); if (sc->bge_asicrev == BGE_ASICREV_BCM5906) { for (i = 0; i < BGE_TIMEOUT; i++) { val = CSR_READ_4(sc, BGE_VCPU_STATUS); if (val & BGE_VCPU_STATUS_INIT_DONE) break; DELAY(100); } if (i == BGE_TIMEOUT) { device_printf(sc->bge_dev, "reset timed out\n"); return (1); } } else { /* * Poll until we see the 1's complement of the magic number. * This indicates that the firmware initialization is complete. * We expect this to fail if no chip containing the Ethernet * address is fitted though. */ for (i = 0; i < BGE_TIMEOUT; i++) { DELAY(10); val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); if (val == ~BGE_MAGIC_NUMBER) break; } if ((sc->bge_flags & BGE_FLAG_EADDR) && i == BGE_TIMEOUT) device_printf(sc->bge_dev, "firmware handshake timed out, " "found 0x%08x\n", val); } /* * XXX Wait for the value of the PCISTATE register to * return to its original pre-reset state. This is a * fairly good indicator of reset completion. If we don't * wait for the reset to fully complete, trying to read * from the device's non-PCI registers may yield garbage * results. */ for (i = 0; i < BGE_TIMEOUT; i++) { if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) break; DELAY(10); } if (sc->bge_flags & BGE_FLAG_PCIE) { reset = bge_readmem_ind(sc, 0x7C00); bge_writemem_ind(sc, 0x7C00, reset | (1 << 25)); } /* Fix up byte swapping. */ CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_DMA_SWAP_OPTIONS | BGE_MODECTL_BYTESWAP_DATA); /* Tell the ASF firmware we are up */ if (sc->bge_asf_mode & ASF_STACKUP) BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); CSR_WRITE_4(sc, BGE_MAC_MODE, 0); /* * The 5704 in TBI mode apparently needs some special * adjustment to insure the SERDES drive level is set * to 1.2V. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_flags & BGE_FLAG_TBI) { val = CSR_READ_4(sc, BGE_SERDES_CFG); val = (val & ~0xFFF) | 0x880; CSR_WRITE_4(sc, BGE_SERDES_CFG, val); } /* XXX: Broadcom Linux driver. */ if (sc->bge_flags & BGE_FLAG_PCIE && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { val = CSR_READ_4(sc, 0x7C00); CSR_WRITE_4(sc, 0x7C00, val | (1 << 25)); } DELAY(10000); return(0); } /* * Frame reception handling. This is called if there's a frame * on the receive return list. * * Note: we have to be able to handle two possibilities here: * 1) the frame is from the jumbo receive ring * 2) the frame is from the standard receive ring */ -static void +static int bge_rxeof(struct bge_softc *sc) { struct ifnet *ifp; - int stdcnt = 0, jumbocnt = 0; + int rx_npkts = 0, stdcnt = 0, jumbocnt = 0; BGE_LOCK_ASSERT(sc); /* Nothing to do. */ if (sc->bge_rx_saved_considx == sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) - return; + return (rx_npkts); ifp = sc->bge_ifp; bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTREAD); bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD); if (BGE_IS_JUMBO_CAPABLE(sc)) bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTREAD); while (sc->bge_rx_saved_considx != sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) { struct bge_rx_bd *cur_rx; uint32_t rxidx; struct mbuf *m = NULL; uint16_t vlan_tag = 0; int have_tag = 0; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif cur_rx = &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx]; rxidx = cur_rx->bge_idx; BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING && cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { have_tag = 1; vlan_tag = cur_rx->bge_vlan_tag; } if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]); m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; jumbocnt++; if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { ifp->if_ierrors++; bge_newbuf_jumbo(sc, sc->bge_jumbo, m); continue; } if (bge_newbuf_jumbo(sc, sc->bge_jumbo, NULL) == ENOBUFS) { ifp->if_ierrors++; bge_newbuf_jumbo(sc, sc->bge_jumbo, m); continue; } } else { BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); bus_dmamap_sync(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_rx_std_dmamap[rxidx], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_rx_std_dmamap[rxidx]); m = sc->bge_cdata.bge_rx_std_chain[rxidx]; sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; stdcnt++; if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { ifp->if_ierrors++; bge_newbuf_std(sc, sc->bge_std, m); continue; } if (bge_newbuf_std(sc, sc->bge_std, NULL) == ENOBUFS) { ifp->if_ierrors++; bge_newbuf_std(sc, sc->bge_std, m); continue; } } ifp->if_ipackets++; #ifndef __NO_STRICT_ALIGNMENT /* * For architectures with strict alignment we must make sure * the payload is aligned. */ if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) { bcopy(m->m_data, m->m_data + ETHER_ALIGN, cur_rx->bge_len); m->m_data += ETHER_ALIGN; } #endif m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; m->m_pkthdr.rcvif = ifp; if (ifp->if_capenable & IFCAP_RXCSUM) { if (cur_rx->bge_flags & BGE_RXBDFLAG_IP_CSUM) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if ((cur_rx->bge_ip_csum ^ 0xFFFF) == 0) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; } if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM && m->m_pkthdr.len >= ETHER_MIN_NOPAD) { m->m_pkthdr.csum_data = cur_rx->bge_tcp_udp_csum; m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; } } /* * If we received a packet with a vlan tag, * attach that information to the packet. */ if (have_tag) { #if __FreeBSD_version > 700022 m->m_pkthdr.ether_vtag = vlan_tag; m->m_flags |= M_VLANTAG; #else VLAN_INPUT_TAG_NEW(ifp, m, vlan_tag); if (m == NULL) continue; #endif } BGE_UNLOCK(sc); (*ifp->if_input)(ifp, m); BGE_LOCK(sc); + rk_npkts++; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return; } if (stdcnt > 0) bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREWRITE); if (BGE_IS_JUMBO_CAPABLE(sc) && jumbocnt > 0) bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREWRITE); bge_writembx(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); if (stdcnt) bge_writembx(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); if (jumbocnt) bge_writembx(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); #ifdef notyet /* * This register wraps very quickly under heavy packet drops. * If you need correct statistics, you can enable this check. */ if (BGE_IS_5705_PLUS(sc)) ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); #endif + return (rx_npkts); } static void bge_txeof(struct bge_softc *sc) { struct bge_tx_bd *cur_tx = NULL; struct ifnet *ifp; BGE_LOCK_ASSERT(sc); /* Nothing to do. */ if (sc->bge_tx_saved_considx == sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) return; ifp = sc->bge_ifp; bus_dmamap_sync(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* * Go through our tx ring and free mbufs for those * frames that have been sent. */ while (sc->bge_tx_saved_considx != sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) { uint32_t idx = 0; idx = sc->bge_tx_saved_considx; cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; if (cur_tx->bge_flags & BGE_TXBDFLAG_END) ifp->if_opackets++; if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { bus_dmamap_sync(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_tx_dmamap[idx], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_tx_dmamap[idx]); m_freem(sc->bge_cdata.bge_tx_chain[idx]); sc->bge_cdata.bge_tx_chain[idx] = NULL; } sc->bge_txcnt--; BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); } if (cur_tx != NULL) ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (sc->bge_txcnt == 0) sc->bge_timer = 0; } #ifdef DEVICE_POLLING -static void +static int bge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct bge_softc *sc = ifp->if_softc; uint32_t statusword; - + int rx_npkts = 0; + BGE_LOCK(sc); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { BGE_UNLOCK(sc); - return; + return (rx_npkts); } bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD); statusword = atomic_readandclear_32( &sc->bge_ldata.bge_status_block->bge_status); bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD); /* Note link event. It will be processed by POLL_AND_CHECK_STATUS. */ if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) sc->bge_link_evt++; if (cmd == POLL_AND_CHECK_STATUS) if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || sc->bge_link_evt || (sc->bge_flags & BGE_FLAG_TBI)) bge_link_upd(sc); sc->rxcycles = count; - bge_rxeof(sc); + rx_npkts = bge_rxeof(sc); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { BGE_UNLOCK(sc); return; } bge_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) bge_start_locked(ifp); BGE_UNLOCK(sc); + return (rx_npkts); } #endif /* DEVICE_POLLING */ static void bge_intr(void *xsc) { struct bge_softc *sc; struct ifnet *ifp; uint32_t statusword; sc = xsc; BGE_LOCK(sc); ifp = sc->bge_ifp; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { BGE_UNLOCK(sc); return; } #endif /* * Ack the interrupt by writing something to BGE_MBX_IRQ0_LO. Don't * disable interrupts by writing nonzero like we used to, since with * our current organization this just gives complications and * pessimizations for re-enabling interrupts. We used to have races * instead of the necessary complications. Disabling interrupts * would just reduce the chance of a status update while we are * running (by switching to the interrupt-mode coalescence * parameters), but this chance is already very low so it is more * efficient to get another interrupt than prevent it. * * We do the ack first to ensure another interrupt if there is a * status update after the ack. We don't check for the status * changing later because it is more efficient to get another * interrupt than prevent it, not quite as above (not checking is * a smaller optimization than not toggling the interrupt enable, * since checking doesn't involve PCI accesses and toggling require * the status check). So toggling would probably be a pessimization * even with MSI. It would only be needed for using a task queue. */ bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); /* * Do the mandatory PCI flush as well as get the link status. */ statusword = CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_LINK_CHANGED; /* Make sure the descriptor ring indexes are coherent. */ bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTREAD); bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREREAD); if ((sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_B2) || statusword || sc->bge_link_evt) bge_link_upd(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { /* Check RX return ring producer/consumer. */ bge_rxeof(sc); } if (ifp->if_drv_flags & IFF_DRV_RUNNING) { /* Check TX ring producer/consumer. */ bge_txeof(sc); } if (ifp->if_drv_flags & IFF_DRV_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) bge_start_locked(ifp); BGE_UNLOCK(sc); } static void bge_asf_driver_up(struct bge_softc *sc) { if (sc->bge_asf_mode & ASF_STACKUP) { /* Send ASF heartbeat aprox. every 2s */ if (sc->bge_asf_count) sc->bge_asf_count --; else { sc->bge_asf_count = 5; bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM_FW, BGE_FW_DRV_ALIVE); bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_LEN, 4); bge_writemem_ind(sc, BGE_SOFTWARE_GENNCOMM_FW_DATA, 3); CSR_WRITE_4(sc, BGE_CPU_EVENT, CSR_READ_4(sc, BGE_CPU_EVENT) | (1 << 14)); } } } static void bge_tick(void *xsc) { struct bge_softc *sc = xsc; struct mii_data *mii = NULL; BGE_LOCK_ASSERT(sc); /* Synchronize with possible callout reset/stop. */ if (callout_pending(&sc->bge_stat_ch) || !callout_active(&sc->bge_stat_ch)) return; if (BGE_IS_5705_PLUS(sc)) bge_stats_update_regs(sc); else bge_stats_update(sc); if ((sc->bge_flags & BGE_FLAG_TBI) == 0) { mii = device_get_softc(sc->bge_miibus); /* * Do not touch PHY if we have link up. This could break * IPMI/ASF mode or produce extra input errors * (extra errors was reported for bcm5701 & bcm5704). */ if (!sc->bge_link) mii_tick(mii); } else { /* * Since in TBI mode auto-polling can't be used we should poll * link status manually. Here we register pending link event * and trigger interrupt. */ #ifdef DEVICE_POLLING /* In polling mode we poll link state in bge_poll(). */ if (!(sc->bge_ifp->if_capenable & IFCAP_POLLING)) #endif { sc->bge_link_evt++; if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || sc->bge_flags & BGE_FLAG_5788) BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); else BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); } } bge_asf_driver_up(sc); bge_watchdog(sc); callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); } static void bge_stats_update_regs(struct bge_softc *sc) { struct ifnet *ifp; ifp = sc->bge_ifp; ifp->if_collisions += CSR_READ_4(sc, BGE_MAC_STATS + offsetof(struct bge_mac_stats_regs, etherStatsCollisions)); ifp->if_ierrors += CSR_READ_4(sc, BGE_RXLP_LOCSTAT_IFIN_DROPS); } static void bge_stats_update(struct bge_softc *sc) { struct ifnet *ifp; bus_size_t stats; uint32_t cnt; /* current register value */ ifp = sc->bge_ifp; stats = BGE_MEMWIN_START + BGE_STATS_BLOCK; #define READ_STAT(sc, stats, stat) \ CSR_READ_4(sc, stats + offsetof(struct bge_stats, stat)) cnt = READ_STAT(sc, stats, txstats.etherStatsCollisions.bge_addr_lo); ifp->if_collisions += (uint32_t)(cnt - sc->bge_tx_collisions); sc->bge_tx_collisions = cnt; cnt = READ_STAT(sc, stats, ifInDiscards.bge_addr_lo); ifp->if_ierrors += (uint32_t)(cnt - sc->bge_rx_discards); sc->bge_rx_discards = cnt; cnt = READ_STAT(sc, stats, txstats.ifOutDiscards.bge_addr_lo); ifp->if_oerrors += (uint32_t)(cnt - sc->bge_tx_discards); sc->bge_tx_discards = cnt; #undef READ_STAT } /* * Pad outbound frame to ETHER_MIN_NOPAD for an unusual reason. * The bge hardware will pad out Tx runts to ETHER_MIN_NOPAD, * but when such padded frames employ the bge IP/TCP checksum offload, * the hardware checksum assist gives incorrect results (possibly * from incorporating its own padding into the UDP/TCP checksum; who knows). * If we pad such runts with zeros, the onboard checksum comes out correct. */ static __inline int bge_cksum_pad(struct mbuf *m) { int padlen = ETHER_MIN_NOPAD - m->m_pkthdr.len; struct mbuf *last; /* If there's only the packet-header and we can pad there, use it. */ if (m->m_pkthdr.len == m->m_len && M_WRITABLE(m) && M_TRAILINGSPACE(m) >= padlen) { last = m; } else { /* * Walk packet chain to find last mbuf. We will either * pad there, or append a new mbuf and pad it. */ for (last = m; last->m_next != NULL; last = last->m_next); if (!(M_WRITABLE(last) && M_TRAILINGSPACE(last) >= padlen)) { /* Allocate new empty mbuf, pad it. Compact later. */ struct mbuf *n; MGET(n, M_DONTWAIT, MT_DATA); if (n == NULL) return (ENOBUFS); n->m_len = 0; last->m_next = n; last = n; } } /* Now zero the pad area, to avoid the bge cksum-assist bug. */ memset(mtod(last, caddr_t) + last->m_len, 0, padlen); last->m_len += padlen; m->m_pkthdr.len += padlen; return (0); } /* * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data * pointers to descriptors. */ static int bge_encap(struct bge_softc *sc, struct mbuf **m_head, uint32_t *txidx) { bus_dma_segment_t segs[BGE_NSEG_NEW]; bus_dmamap_t map; struct bge_tx_bd *d; struct mbuf *m = *m_head; uint32_t idx = *txidx; uint16_t csum_flags; int nsegs, i, error; csum_flags = 0; if (m->m_pkthdr.csum_flags) { if (m->m_pkthdr.csum_flags & CSUM_IP) csum_flags |= BGE_TXBDFLAG_IP_CSUM; if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) { csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; if (m->m_pkthdr.len < ETHER_MIN_NOPAD && (error = bge_cksum_pad(m)) != 0) { m_freem(m); *m_head = NULL; return (error); } } if (m->m_flags & M_LASTFRAG) csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; else if (m->m_flags & M_FRAG) csum_flags |= BGE_TXBDFLAG_IP_FRAG; } map = sc->bge_cdata.bge_tx_dmamap[idx]; error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m = m_collapse(m, M_DONTWAIT, BGE_NSEG_NEW); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->bge_cdata.bge_mtag, map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error) { m_freem(m); *m_head = NULL; return (error); } } else if (error != 0) return (error); /* * Sanity check: avoid coming within 16 descriptors * of the end of the ring. */ if (nsegs > (BGE_TX_RING_CNT - sc->bge_txcnt - 16)) { bus_dmamap_unload(sc->bge_cdata.bge_mtag, map); return (ENOBUFS); } bus_dmamap_sync(sc->bge_cdata.bge_mtag, map, BUS_DMASYNC_PREWRITE); for (i = 0; ; i++) { d = &sc->bge_ldata.bge_tx_ring[idx]; d->bge_addr.bge_addr_lo = BGE_ADDR_LO(segs[i].ds_addr); d->bge_addr.bge_addr_hi = BGE_ADDR_HI(segs[i].ds_addr); d->bge_len = segs[i].ds_len; d->bge_flags = csum_flags; if (i == nsegs - 1) break; BGE_INC(idx, BGE_TX_RING_CNT); } /* Mark the last segment as end of packet... */ d->bge_flags |= BGE_TXBDFLAG_END; /* ... and put VLAN tag into first segment. */ d = &sc->bge_ldata.bge_tx_ring[*txidx]; #if __FreeBSD_version > 700022 if (m->m_flags & M_VLANTAG) { d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; d->bge_vlan_tag = m->m_pkthdr.ether_vtag; } else d->bge_vlan_tag = 0; #else { struct m_tag *mtag; if ((mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m)) != NULL) { d->bge_flags |= BGE_TXBDFLAG_VLAN_TAG; d->bge_vlan_tag = VLAN_TAG_VALUE(mtag); } else d->bge_vlan_tag = 0; } #endif /* * Insure that the map for this transmission * is placed at the array index of the last descriptor * in this chain. */ sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[idx]; sc->bge_cdata.bge_tx_dmamap[idx] = map; sc->bge_cdata.bge_tx_chain[idx] = m; sc->bge_txcnt += nsegs; BGE_INC(idx, BGE_TX_RING_CNT); *txidx = idx; return (0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit descriptors. */ static void bge_start_locked(struct ifnet *ifp) { struct bge_softc *sc; struct mbuf *m_head = NULL; uint32_t prodidx; int count = 0; sc = ifp->if_softc; if (!sc->bge_link || IFQ_DRV_IS_EMPTY(&ifp->if_snd)) return; prodidx = sc->bge_tx_prodidx; while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * XXX * The code inside the if() block is never reached since we * must mark CSUM_IP_FRAGS in our if_hwassist to start getting * requests to checksum TCP/UDP in a fragmented packet. * * XXX * safety overkill. If this is a fragmented packet chain * with delayed TCP/UDP checksums, then only encapsulate * it if we have enough descriptors to handle the entire * chain at once. * (paranoia -- may not actually be needed) */ if (m_head->m_flags & M_FIRSTFRAG && m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { if ((BGE_TX_RING_CNT - sc->bge_txcnt) < m_head->m_pkthdr.csum_data + 16) { IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } } /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (bge_encap(sc, &m_head, &prodidx)) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } ++count; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ #ifdef ETHER_BPF_MTAP ETHER_BPF_MTAP(ifp, m_head); #else BPF_MTAP(ifp, m_head); #endif } if (count == 0) /* No packets were dequeued. */ return; /* Transmit. */ bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); /* 5700 b2 errata */ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) bge_writembx(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); sc->bge_tx_prodidx = prodidx; /* * Set a timeout in case the chip goes out to lunch. */ sc->bge_timer = 5; } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit descriptors. */ static void bge_start(struct ifnet *ifp) { struct bge_softc *sc; sc = ifp->if_softc; BGE_LOCK(sc); bge_start_locked(ifp); BGE_UNLOCK(sc); } static void bge_init_locked(struct bge_softc *sc) { struct ifnet *ifp; uint16_t *m; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; if (ifp->if_drv_flags & IFF_DRV_RUNNING) return; /* Cancel pending I/O and flush buffers. */ bge_stop(sc); bge_stop_fw(sc); bge_sig_pre_reset(sc, BGE_RESET_START); bge_reset(sc); bge_sig_legacy(sc, BGE_RESET_START); bge_sig_post_reset(sc, BGE_RESET_START); bge_chipinit(sc); /* * Init the various state machines, ring * control blocks and firmware. */ if (bge_blockinit(sc)) { device_printf(sc->bge_dev, "initialization failure\n"); return; } ifp = sc->bge_ifp; /* Specify MTU. */ CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + (ifp->if_capenable & IFCAP_VLAN_MTU ? ETHER_VLAN_ENCAP_LEN : 0)); /* Load our MAC address. */ m = (uint16_t *)IF_LLADDR(sc->bge_ifp); CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); /* Program promiscuous mode. */ bge_setpromisc(sc); /* Program multicast filter. */ bge_setmulti(sc); /* Program VLAN tag stripping. */ bge_setvlan(sc); /* Init RX ring. */ bge_init_rx_ring_std(sc); /* * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's * memory to insure that the chip has in fact read the first * entry of the ring. */ if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { uint32_t v, i; for (i = 0; i < 10; i++) { DELAY(20); v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); if (v == (MCLBYTES - ETHER_ALIGN)) break; } if (i == 10) device_printf (sc->bge_dev, "5705 A0 chip failed to load RX ring\n"); } /* Init jumbo RX ring. */ if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) bge_init_rx_ring_jumbo(sc); /* Init our RX return ring index. */ sc->bge_rx_saved_considx = 0; /* Init our RX/TX stat counters. */ sc->bge_rx_discards = sc->bge_tx_discards = sc->bge_tx_collisions = 0; /* Init TX ring. */ bge_init_tx_ring(sc); /* Turn on transmitter. */ BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); /* Turn on receiver. */ BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); /* Tell firmware we're alive. */ BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); #ifdef DEVICE_POLLING /* Disable interrupts if we are polling. */ if (ifp->if_capenable & IFCAP_POLLING) { BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); } else #endif /* Enable host interrupts. */ { BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); } bge_ifmedia_upd_locked(ifp); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); } static void bge_init(void *xsc) { struct bge_softc *sc = xsc; BGE_LOCK(sc); bge_init_locked(sc); BGE_UNLOCK(sc); } /* * Set media options. */ static int bge_ifmedia_upd(struct ifnet *ifp) { struct bge_softc *sc = ifp->if_softc; int res; BGE_LOCK(sc); res = bge_ifmedia_upd_locked(ifp); BGE_UNLOCK(sc); return (res); } static int bge_ifmedia_upd_locked(struct ifnet *ifp) { struct bge_softc *sc = ifp->if_softc; struct mii_data *mii; struct mii_softc *miisc; struct ifmedia *ifm; BGE_LOCK_ASSERT(sc); ifm = &sc->bge_ifmedia; /* If this is a 1000baseX NIC, enable the TBI port. */ if (sc->bge_flags & BGE_FLAG_TBI) { if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); switch(IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: /* * The BCM5704 ASIC appears to have a special * mechanism for programming the autoneg * advertisement registers in TBI mode. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { uint32_t sgdig; sgdig = CSR_READ_4(sc, BGE_SGDIG_STS); if (sgdig & BGE_SGDIGSTS_DONE) { CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); sgdig |= BGE_SGDIGCFG_AUTO | BGE_SGDIGCFG_PAUSE_CAP | BGE_SGDIGCFG_ASYM_PAUSE; CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig | BGE_SGDIGCFG_SEND); DELAY(5); CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); } } break; case IFM_1000_SX: if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); } else { BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); } break; default: return (EINVAL); } return (0); } sc->bge_link_evt++; mii = device_get_softc(sc->bge_miibus); if (mii->mii_instance) LIST_FOREACH(miisc, &mii->mii_phys, mii_list) mii_phy_reset(miisc); mii_mediachg(mii); /* * Force an interrupt so that we will call bge_link_upd * if needed and clear any pending link state attention. * Without this we are not getting any further interrupts * for link state changes and thus will not UP the link and * not be able to send in bge_start_locked. The only * way to get things working was to receive a packet and * get an RX intr. * bge_tick should help for fiber cards and we might not * need to do this here if BGE_FLAG_TBI is set but as * we poll for fiber anyway it should not harm. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 || sc->bge_flags & BGE_FLAG_5788) BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_SET); else BGE_SETBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_COAL_NOW); return (0); } /* * Report current media status. */ static void bge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct bge_softc *sc = ifp->if_softc; struct mii_data *mii; BGE_LOCK(sc); if (sc->bge_flags & BGE_FLAG_TBI) { ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_TBI_PCS_SYNCHED) ifmr->ifm_status |= IFM_ACTIVE; else { ifmr->ifm_active |= IFM_NONE; BGE_UNLOCK(sc); return; } ifmr->ifm_active |= IFM_1000_SX; if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) ifmr->ifm_active |= IFM_HDX; else ifmr->ifm_active |= IFM_FDX; BGE_UNLOCK(sc); return; } mii = device_get_softc(sc->bge_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; BGE_UNLOCK(sc); } static int bge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct bge_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int flags, mask, error = 0; switch (command) { case SIOCSIFMTU: if (ifr->ifr_mtu < ETHERMIN || ((BGE_IS_JUMBO_CAPABLE(sc)) && ifr->ifr_mtu > BGE_JUMBO_MTU) || ((!BGE_IS_JUMBO_CAPABLE(sc)) && ifr->ifr_mtu > ETHERMTU)) error = EINVAL; else if (ifp->if_mtu != ifr->ifr_mtu) { ifp->if_mtu = ifr->ifr_mtu; ifp->if_drv_flags &= ~IFF_DRV_RUNNING; bge_init(sc); } break; case SIOCSIFFLAGS: BGE_LOCK(sc); if (ifp->if_flags & IFF_UP) { /* * If only the state of the PROMISC flag changed, * then just use the 'set promisc mode' command * instead of reinitializing the entire NIC. Doing * a full re-init means reloading the firmware and * waiting for it to start up, which may take a * second or two. Similarly for ALLMULTI. */ if (ifp->if_drv_flags & IFF_DRV_RUNNING) { flags = ifp->if_flags ^ sc->bge_if_flags; if (flags & IFF_PROMISC) bge_setpromisc(sc); if (flags & IFF_ALLMULTI) bge_setmulti(sc); } else bge_init_locked(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { bge_stop(sc); } } sc->bge_if_flags = ifp->if_flags; BGE_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: if (ifp->if_drv_flags & IFF_DRV_RUNNING) { BGE_LOCK(sc); bge_setmulti(sc); BGE_UNLOCK(sc); error = 0; } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: if (sc->bge_flags & BGE_FLAG_TBI) { error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, command); } else { mii = device_get_softc(sc->bge_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(bge_poll, ifp); if (error) return (error); BGE_LOCK(sc); BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); ifp->if_capenable |= IFCAP_POLLING; BGE_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupt even in error case */ BGE_LOCK(sc); BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 0); ifp->if_capenable &= ~IFCAP_POLLING; BGE_UNLOCK(sc); } } #endif if (mask & IFCAP_HWCSUM) { ifp->if_capenable ^= IFCAP_HWCSUM; if (IFCAP_HWCSUM & ifp->if_capenable && IFCAP_HWCSUM & ifp->if_capabilities) ifp->if_hwassist = BGE_CSUM_FEATURES; else ifp->if_hwassist = 0; #ifdef VLAN_CAPABILITIES VLAN_CAPABILITIES(ifp); #endif } if (mask & IFCAP_VLAN_MTU) { ifp->if_capenable ^= IFCAP_VLAN_MTU; ifp->if_drv_flags &= ~IFF_DRV_RUNNING; bge_init(sc); } if (mask & IFCAP_VLAN_HWTAGGING) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; BGE_LOCK(sc); bge_setvlan(sc); BGE_UNLOCK(sc); #ifdef VLAN_CAPABILITIES VLAN_CAPABILITIES(ifp); #endif } break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void bge_watchdog(struct bge_softc *sc) { struct ifnet *ifp; BGE_LOCK_ASSERT(sc); if (sc->bge_timer == 0 || --sc->bge_timer) return; ifp = sc->bge_ifp; if_printf(ifp, "watchdog timeout -- resetting\n"); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; bge_init_locked(sc); ifp->if_oerrors++; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void bge_stop(struct bge_softc *sc) { struct ifnet *ifp; struct ifmedia_entry *ifm; struct mii_data *mii = NULL; int mtmp, itmp; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; if ((sc->bge_flags & BGE_FLAG_TBI) == 0) mii = device_get_softc(sc->bge_miibus); callout_stop(&sc->bge_stat_ch); /* * Disable all of the receiver blocks. */ BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); if (!(BGE_IS_5705_PLUS(sc))) BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); /* * Disable all of the transmit blocks. */ BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); if (!(BGE_IS_5705_PLUS(sc))) BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); /* * Shut down all of the memory managers and related * state machines. */ BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); if (!(BGE_IS_5705_PLUS(sc))) BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); if (!(BGE_IS_5705_PLUS(sc))) { BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); } /* Disable host interrupts. */ BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); bge_writembx(sc, BGE_MBX_IRQ0_LO, 1); /* * Tell firmware we're shutting down. */ bge_stop_fw(sc); bge_sig_pre_reset(sc, BGE_RESET_STOP); bge_reset(sc); bge_sig_legacy(sc, BGE_RESET_STOP); bge_sig_post_reset(sc, BGE_RESET_STOP); /* * Keep the ASF firmware running if up. */ if (sc->bge_asf_mode & ASF_STACKUP) BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); else BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); /* Free the RX lists. */ bge_free_rx_ring_std(sc); /* Free jumbo RX list. */ if (BGE_IS_JUMBO_CAPABLE(sc)) bge_free_rx_ring_jumbo(sc); /* Free TX buffers. */ bge_free_tx_ring(sc); /* * Isolate/power down the PHY, but leave the media selection * unchanged so that things will be put back to normal when * we bring the interface back up. */ if ((sc->bge_flags & BGE_FLAG_TBI) == 0) { itmp = ifp->if_flags; ifp->if_flags |= IFF_UP; /* * If we are called from bge_detach(), mii is already NULL. */ if (mii != NULL) { ifm = mii->mii_media.ifm_cur; mtmp = ifm->ifm_media; ifm->ifm_media = IFM_ETHER | IFM_NONE; mii_mediachg(mii); ifm->ifm_media = mtmp; } ifp->if_flags = itmp; } sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; /* Clear MAC's link state (PHY may still have link UP). */ if (bootverbose && sc->bge_link) if_printf(sc->bge_ifp, "link DOWN\n"); sc->bge_link = 0; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int bge_shutdown(device_t dev) { struct bge_softc *sc; sc = device_get_softc(dev); BGE_LOCK(sc); bge_stop(sc); bge_reset(sc); BGE_UNLOCK(sc); return (0); } static int bge_suspend(device_t dev) { struct bge_softc *sc; sc = device_get_softc(dev); BGE_LOCK(sc); bge_stop(sc); BGE_UNLOCK(sc); return (0); } static int bge_resume(device_t dev) { struct bge_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); BGE_LOCK(sc); ifp = sc->bge_ifp; if (ifp->if_flags & IFF_UP) { bge_init_locked(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) bge_start_locked(ifp); } BGE_UNLOCK(sc); return (0); } static void bge_link_upd(struct bge_softc *sc) { struct mii_data *mii; uint32_t link, status; BGE_LOCK_ASSERT(sc); /* Clear 'pending link event' flag. */ sc->bge_link_evt = 0; /* * Process link state changes. * Grrr. The link status word in the status block does * not work correctly on the BCM5700 rev AX and BX chips, * according to all available information. Hence, we have * to enable MII interrupts in order to properly obtain * async link changes. Unfortunately, this also means that * we have to read the MAC status register to detect link * changes, thereby adding an additional register access to * the interrupt handler. * * XXX: perhaps link state detection procedure used for * BGE_CHIPID_BCM5700_B2 can be used for others BCM5700 revisions. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5700 && sc->bge_chipid != BGE_CHIPID_BCM5700_B2) { status = CSR_READ_4(sc, BGE_MAC_STS); if (status & BGE_MACSTAT_MI_INTERRUPT) { mii = device_get_softc(sc->bge_miibus); mii_pollstat(mii); if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->bge_link++; if (bootverbose) if_printf(sc->bge_ifp, "link UP\n"); } else if (sc->bge_link && (!(mii->mii_media_status & IFM_ACTIVE) || IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { sc->bge_link = 0; if (bootverbose) if_printf(sc->bge_ifp, "link DOWN\n"); } /* Clear the interrupt. */ CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT); bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, BRGPHY_INTRS); } return; } if (sc->bge_flags & BGE_FLAG_TBI) { status = CSR_READ_4(sc, BGE_MAC_STS); if (status & BGE_MACSTAT_TBI_PCS_SYNCHED) { if (!sc->bge_link) { sc->bge_link++; if (sc->bge_asicrev == BGE_ASICREV_BCM5704) BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_TBI_SEND_CFGS); CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); if (bootverbose) if_printf(sc->bge_ifp, "link UP\n"); if_link_state_change(sc->bge_ifp, LINK_STATE_UP); } } else if (sc->bge_link) { sc->bge_link = 0; if (bootverbose) if_printf(sc->bge_ifp, "link DOWN\n"); if_link_state_change(sc->bge_ifp, LINK_STATE_DOWN); } } else if (CSR_READ_4(sc, BGE_MI_MODE) & BGE_MIMODE_AUTOPOLL) { /* * Some broken BCM chips have BGE_STATFLAG_LINKSTATE_CHANGED bit * in status word always set. Workaround this bug by reading * PHY link status directly. */ link = (CSR_READ_4(sc, BGE_MI_STS) & BGE_MISTS_LINK) ? 1 : 0; if (link != sc->bge_link || sc->bge_asicrev == BGE_ASICREV_BCM5700) { mii = device_get_softc(sc->bge_miibus); mii_pollstat(mii); if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->bge_link++; if (bootverbose) if_printf(sc->bge_ifp, "link UP\n"); } else if (sc->bge_link && (!(mii->mii_media_status & IFM_ACTIVE) || IFM_SUBTYPE(mii->mii_media_active) == IFM_NONE)) { sc->bge_link = 0; if (bootverbose) if_printf(sc->bge_ifp, "link DOWN\n"); } } } else { /* * Discard link events for MII/GMII controllers * if MI auto-polling is disabled. */ } /* Clear the attention. */ CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED | BGE_MACSTAT_CFG_CHANGED | BGE_MACSTAT_MI_COMPLETE | BGE_MACSTAT_LINK_CHANGED); } #define BGE_SYSCTL_STAT(sc, ctx, desc, parent, node, oid) \ SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, oid, CTLTYPE_UINT|CTLFLAG_RD, \ sc, offsetof(struct bge_stats, node), bge_sysctl_stats, "IU", \ desc) static void bge_add_sysctls(struct bge_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children, *schildren; struct sysctl_oid *tree; ctx = device_get_sysctl_ctx(sc->bge_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->bge_dev)); #ifdef BGE_REGISTER_DEBUG SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "debug_info", CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_debug_info, "I", "Debug Information"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "reg_read", CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_reg_read, "I", "Register Read"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "mem_read", CTLTYPE_INT | CTLFLAG_RW, sc, 0, bge_sysctl_mem_read, "I", "Memory Read"); #endif if (BGE_IS_5705_PLUS(sc)) return; tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, NULL, "BGE Statistics"); schildren = children = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT(sc, ctx, "Frames Dropped Due To Filters", children, COSFramesDroppedDueToFilters, "FramesDroppedDueToFilters"); BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write Queue Full", children, nicDmaWriteQueueFull, "DmaWriteQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Write High Priority Queue Full", children, nicDmaWriteHighPriQueueFull, "DmaWriteHighPriQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC No More RX Buffer Descriptors", children, nicNoMoreRxBDs, "NoMoreRxBDs"); BGE_SYSCTL_STAT(sc, ctx, "Discarded Input Frames", children, ifInDiscards, "InputDiscards"); BGE_SYSCTL_STAT(sc, ctx, "Input Errors", children, ifInErrors, "InputErrors"); BGE_SYSCTL_STAT(sc, ctx, "NIC Recv Threshold Hit", children, nicRecvThresholdHit, "RecvThresholdHit"); BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read Queue Full", children, nicDmaReadQueueFull, "DmaReadQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC DMA Read High Priority Queue Full", children, nicDmaReadHighPriQueueFull, "DmaReadHighPriQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC Send Data Complete Queue Full", children, nicSendDataCompQueueFull, "SendDataCompQueueFull"); BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Set Send Producer Index", children, nicRingSetSendProdIndex, "RingSetSendProdIndex"); BGE_SYSCTL_STAT(sc, ctx, "NIC Ring Status Update", children, nicRingStatusUpdate, "RingStatusUpdate"); BGE_SYSCTL_STAT(sc, ctx, "NIC Interrupts", children, nicInterrupts, "Interrupts"); BGE_SYSCTL_STAT(sc, ctx, "NIC Avoided Interrupts", children, nicAvoidedInterrupts, "AvoidedInterrupts"); BGE_SYSCTL_STAT(sc, ctx, "NIC Send Threshold Hit", children, nicSendThresholdHit, "SendThresholdHit"); tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "rx", CTLFLAG_RD, NULL, "BGE RX Statistics"); children = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT(sc, ctx, "Inbound Octets", children, rxstats.ifHCInOctets, "Octets"); BGE_SYSCTL_STAT(sc, ctx, "Fragments", children, rxstats.etherStatsFragments, "Fragments"); BGE_SYSCTL_STAT(sc, ctx, "Inbound Unicast Packets", children, rxstats.ifHCInUcastPkts, "UcastPkts"); BGE_SYSCTL_STAT(sc, ctx, "Inbound Multicast Packets", children, rxstats.ifHCInMulticastPkts, "MulticastPkts"); BGE_SYSCTL_STAT(sc, ctx, "FCS Errors", children, rxstats.dot3StatsFCSErrors, "FCSErrors"); BGE_SYSCTL_STAT(sc, ctx, "Alignment Errors", children, rxstats.dot3StatsAlignmentErrors, "AlignmentErrors"); BGE_SYSCTL_STAT(sc, ctx, "XON Pause Frames Received", children, rxstats.xonPauseFramesReceived, "xonPauseFramesReceived"); BGE_SYSCTL_STAT(sc, ctx, "XOFF Pause Frames Received", children, rxstats.xoffPauseFramesReceived, "xoffPauseFramesReceived"); BGE_SYSCTL_STAT(sc, ctx, "MAC Control Frames Received", children, rxstats.macControlFramesReceived, "ControlFramesReceived"); BGE_SYSCTL_STAT(sc, ctx, "XOFF State Entered", children, rxstats.xoffStateEntered, "xoffStateEntered"); BGE_SYSCTL_STAT(sc, ctx, "Frames Too Long", children, rxstats.dot3StatsFramesTooLong, "FramesTooLong"); BGE_SYSCTL_STAT(sc, ctx, "Jabbers", children, rxstats.etherStatsJabbers, "Jabbers"); BGE_SYSCTL_STAT(sc, ctx, "Undersized Packets", children, rxstats.etherStatsUndersizePkts, "UndersizePkts"); BGE_SYSCTL_STAT(sc, ctx, "Inbound Range Length Errors", children, rxstats.inRangeLengthError, "inRangeLengthError"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Range Length Errors", children, rxstats.outRangeLengthError, "outRangeLengthError"); tree = SYSCTL_ADD_NODE(ctx, schildren, OID_AUTO, "tx", CTLFLAG_RD, NULL, "BGE TX Statistics"); children = SYSCTL_CHILDREN(tree); BGE_SYSCTL_STAT(sc, ctx, "Outbound Octets", children, txstats.ifHCOutOctets, "Octets"); BGE_SYSCTL_STAT(sc, ctx, "TX Collisions", children, txstats.etherStatsCollisions, "Collisions"); BGE_SYSCTL_STAT(sc, ctx, "XON Sent", children, txstats.outXonSent, "XonSent"); BGE_SYSCTL_STAT(sc, ctx, "XOFF Sent", children, txstats.outXoffSent, "XoffSent"); BGE_SYSCTL_STAT(sc, ctx, "Flow Control Done", children, txstats.flowControlDone, "flowControlDone"); BGE_SYSCTL_STAT(sc, ctx, "Internal MAC TX errors", children, txstats.dot3StatsInternalMacTransmitErrors, "InternalMacTransmitErrors"); BGE_SYSCTL_STAT(sc, ctx, "Single Collision Frames", children, txstats.dot3StatsSingleCollisionFrames, "SingleCollisionFrames"); BGE_SYSCTL_STAT(sc, ctx, "Multiple Collision Frames", children, txstats.dot3StatsMultipleCollisionFrames, "MultipleCollisionFrames"); BGE_SYSCTL_STAT(sc, ctx, "Deferred Transmissions", children, txstats.dot3StatsDeferredTransmissions, "DeferredTransmissions"); BGE_SYSCTL_STAT(sc, ctx, "Excessive Collisions", children, txstats.dot3StatsExcessiveCollisions, "ExcessiveCollisions"); BGE_SYSCTL_STAT(sc, ctx, "Late Collisions", children, txstats.dot3StatsLateCollisions, "LateCollisions"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Unicast Packets", children, txstats.ifHCOutUcastPkts, "UcastPkts"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Multicast Packets", children, txstats.ifHCOutMulticastPkts, "MulticastPkts"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Broadcast Packets", children, txstats.ifHCOutBroadcastPkts, "BroadcastPkts"); BGE_SYSCTL_STAT(sc, ctx, "Carrier Sense Errors", children, txstats.dot3StatsCarrierSenseErrors, "CarrierSenseErrors"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Discards", children, txstats.ifOutDiscards, "Discards"); BGE_SYSCTL_STAT(sc, ctx, "Outbound Errors", children, txstats.ifOutErrors, "Errors"); } static int bge_sysctl_stats(SYSCTL_HANDLER_ARGS) { struct bge_softc *sc; uint32_t result; int offset; sc = (struct bge_softc *)arg1; offset = arg2; result = CSR_READ_4(sc, BGE_MEMWIN_START + BGE_STATS_BLOCK + offset + offsetof(bge_hostaddr, bge_addr_lo)); return (sysctl_handle_int(oidp, &result, 0, req)); } #ifdef BGE_REGISTER_DEBUG static int bge_sysctl_debug_info(SYSCTL_HANDLER_ARGS) { struct bge_softc *sc; uint16_t *sbdata; int error; int result; int i, j; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); if (result == 1) { sc = (struct bge_softc *)arg1; sbdata = (uint16_t *)sc->bge_ldata.bge_status_block; printf("Status Block:\n"); for (i = 0x0; i < (BGE_STATUS_BLK_SZ / 4); ) { printf("%06x:", i); for (j = 0; j < 8; j++) { printf(" %04x", sbdata[i]); i += 4; } printf("\n"); } printf("Registers:\n"); for (i = 0x800; i < 0xA00; ) { printf("%06x:", i); for (j = 0; j < 8; j++) { printf(" %08x", CSR_READ_4(sc, i)); i += 4; } printf("\n"); } printf("Hardware Flags:\n"); if (BGE_IS_575X_PLUS(sc)) printf(" - 575X Plus\n"); if (BGE_IS_5705_PLUS(sc)) printf(" - 5705 Plus\n"); if (BGE_IS_5714_FAMILY(sc)) printf(" - 5714 Family\n"); if (BGE_IS_5700_FAMILY(sc)) printf(" - 5700 Family\n"); if (sc->bge_flags & BGE_FLAG_JUMBO) printf(" - Supports Jumbo Frames\n"); if (sc->bge_flags & BGE_FLAG_PCIX) printf(" - PCI-X Bus\n"); if (sc->bge_flags & BGE_FLAG_PCIE) printf(" - PCI Express Bus\n"); if (sc->bge_flags & BGE_FLAG_NO_3LED) printf(" - No 3 LEDs\n"); if (sc->bge_flags & BGE_FLAG_RX_ALIGNBUG) printf(" - RX Alignment Bug\n"); } return (error); } static int bge_sysctl_reg_read(SYSCTL_HANDLER_ARGS) { struct bge_softc *sc; int error; uint16_t result; uint32_t val; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); if (result < 0x8000) { sc = (struct bge_softc *)arg1; val = CSR_READ_4(sc, result); printf("reg 0x%06X = 0x%08X\n", result, val); } return (error); } static int bge_sysctl_mem_read(SYSCTL_HANDLER_ARGS) { struct bge_softc *sc; int error; uint16_t result; uint32_t val; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || (req->newptr == NULL)) return (error); if (result < 0x8000) { sc = (struct bge_softc *)arg1; val = bge_readmem_ind(sc, result); printf("mem 0x%06X = 0x%08X\n", result, val); } return (error); } #endif static int bge_get_eaddr_fw(struct bge_softc *sc, uint8_t ether_addr[]) { if (sc->bge_flags & BGE_FLAG_EADDR) return (1); #ifdef __sparc64__ OF_getetheraddr(sc->bge_dev, ether_addr); return (0); #endif return (1); } static int bge_get_eaddr_mem(struct bge_softc *sc, uint8_t ether_addr[]) { uint32_t mac_addr; mac_addr = bge_readmem_ind(sc, 0x0c14); if ((mac_addr >> 16) == 0x484b) { ether_addr[0] = (uint8_t)(mac_addr >> 8); ether_addr[1] = (uint8_t)mac_addr; mac_addr = bge_readmem_ind(sc, 0x0c18); ether_addr[2] = (uint8_t)(mac_addr >> 24); ether_addr[3] = (uint8_t)(mac_addr >> 16); ether_addr[4] = (uint8_t)(mac_addr >> 8); ether_addr[5] = (uint8_t)mac_addr; return (0); } return (1); } static int bge_get_eaddr_nvram(struct bge_softc *sc, uint8_t ether_addr[]) { int mac_offset = BGE_EE_MAC_OFFSET; if (sc->bge_asicrev == BGE_ASICREV_BCM5906) mac_offset = BGE_EE_MAC_OFFSET_5906; return (bge_read_nvram(sc, ether_addr, mac_offset + 2, ETHER_ADDR_LEN)); } static int bge_get_eaddr_eeprom(struct bge_softc *sc, uint8_t ether_addr[]) { if (sc->bge_asicrev == BGE_ASICREV_BCM5906) return (1); return (bge_read_eeprom(sc, ether_addr, BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)); } static int bge_get_eaddr(struct bge_softc *sc, uint8_t eaddr[]) { static const bge_eaddr_fcn_t bge_eaddr_funcs[] = { /* NOTE: Order is critical */ bge_get_eaddr_fw, bge_get_eaddr_mem, bge_get_eaddr_nvram, bge_get_eaddr_eeprom, NULL }; const bge_eaddr_fcn_t *func; for (func = bge_eaddr_funcs; *func != NULL; ++func) { if ((*func)(sc, eaddr) == 0) break; } return (*func == NULL ? ENXIO : 0); } Index: head/sys/dev/dc/if_dc.c =================================================================== --- head/sys/dev/dc/if_dc.c (revision 193095) +++ head/sys/dev/dc/if_dc.c (revision 193096) @@ -1,3803 +1,3809 @@ /*- * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143 * series chips and several workalikes including the following: * * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com) * Macronix/Lite-On 82c115 PNIC II (www.macronix.com) * Lite-On 82c168/82c169 PNIC (www.litecom.com) * ASIX Electronics AX88140A (www.asix.com.tw) * ASIX Electronics AX88141 (www.asix.com.tw) * ADMtek AL981 (www.admtek.com.tw) * ADMtek AN985 (www.admtek.com.tw) * Netgear FA511 (www.netgear.com) Appears to be rebadged ADMTek AN985 * Davicom DM9100, DM9102, DM9102A (www.davicom8.com) * Accton EN1217 (www.accton.com) * Xircom X3201 (www.xircom.com) * Abocom FE2500 * Conexant LANfinity (www.conexant.com) * 3Com OfficeConnect 10/100B 3CSOHO100B (www.3com.com) * * Datasheets for the 21143 are available at developer.intel.com. * Datasheets for the clone parts can be found at their respective sites. * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.) * The PNIC II is essentially a Macronix 98715A chip; the only difference * worth noting is that its multicast hash table is only 128 bits wide * instead of 512. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The Intel 21143 is the successor to the DEC 21140. It is basically * the same as the 21140 but with a few new features. The 21143 supports * three kinds of media attachments: * * o MII port, for 10Mbps and 100Mbps support and NWAY * autonegotiation provided by an external PHY. * o SYM port, for symbol mode 100Mbps support. * o 10baseT port. * o AUI/BNC port. * * The 100Mbps SYM port and 10baseT port can be used together in * combination with the internal NWAY support to create a 10/100 * autosensing configuration. * * Note that not all tulip workalikes are handled in this driver: we only * deal with those which are relatively well behaved. The Winbond is * handled separately due to its different register offsets and the * special handling needed for its various bugs. The PNIC is handled * here, but I'm not thrilled about it. * * All of the workalike chips use some form of MII transceiver support * with the exception of the Macronix chips, which also have a SYM port. * The ASIX AX88140A is also documented to have a SYM port, but all * the cards I've seen use an MII transceiver, probably because the * AX88140A doesn't support internal NWAY. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DC_USEIOSPACE #include #ifdef __sparc64__ #include #include #endif MODULE_DEPEND(dc, pci, 1, 1, 1); MODULE_DEPEND(dc, ether, 1, 1, 1); MODULE_DEPEND(dc, miibus, 1, 1, 1); /* * "device miibus" is required in kernel config. See GENERIC if you get * errors here. */ #include "miibus_if.h" /* * Various supported device vendors/types and their names. */ static const struct dc_type dc_devs[] = { { DC_DEVID(DC_VENDORID_DEC, DC_DEVICEID_21143), 0, "Intel 21143 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009), 0, "Davicom DM9009 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100), 0, "Davicom DM9100 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102), DC_REVISION_DM9102A, "Davicom DM9102A 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102), 0, "Davicom DM9102 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AL981), 0, "ADMtek AL981 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AN985), 0, "ADMtek AN985 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9511), 0, "ADMtek ADM9511 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9513), 0, "ADMtek ADM9513 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_FA511), 0, "Netgear FA511 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_ASIX, DC_DEVICEID_AX88140A), DC_REVISION_88141, "ASIX AX88141 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_ASIX, DC_DEVICEID_AX88140A), 0, "ASIX AX88140A 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98713), DC_REVISION_98713A, "Macronix 98713A 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98713), 0, "Macronix 98713 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_CP, DC_DEVICEID_98713_CP), DC_REVISION_98713A, "Compex RL100-TX 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_CP, DC_DEVICEID_98713_CP), 0, "Compex RL100-TX 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5), DC_REVISION_98725, "Macronix 98725 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5), DC_REVISION_98715AEC_C, "Macronix 98715AEC-C 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5), 0, "Macronix 98715/98715A 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98727), 0, "Macronix 98727/98732 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C115), 0, "LC82C115 PNIC II 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168), DC_REVISION_82C169, "82c169 PNIC 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168), 0, "82c168 PNIC 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN1217), 0, "Accton EN1217 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN2242), 0, "Accton EN2242 MiniPCI 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_XIRCOM, DC_DEVICEID_X3201), 0, "Xircom X3201 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_DLINK, DC_DEVICEID_DRP32TXD), 0, "Neteasy DRP-32TXD Cardbus 10/100" }, { DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500), 0, "Abocom FE2500 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500MX), 0, "Abocom FE2500MX 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_CONEXANT, DC_DEVICEID_RS7112), 0, "Conexant LANfinity MiniPCI 10/100BaseTX" }, { DC_DEVID(DC_VENDORID_HAWKING, DC_DEVICEID_HAWKING_PN672TX), 0, "Hawking CB102 CardBus 10/100" }, { DC_DEVID(DC_VENDORID_PLANEX, DC_DEVICEID_FNW3602T), 0, "PlaneX FNW-3602-T CardBus 10/100" }, { DC_DEVID(DC_VENDORID_3COM, DC_DEVICEID_3CSOHOB), 0, "3Com OfficeConnect 10/100B" }, { DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN120), 0, "Microsoft MN-120 CardBus 10/100" }, { DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN130), 0, "Microsoft MN-130 10/100" }, { DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB08), 0, "Linksys PCMPC200 CardBus 10/100" }, { DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB09), 0, "Linksys PCMPC200 CardBus 10/100" }, { 0, 0, NULL } }; static int dc_probe(device_t); static int dc_attach(device_t); static int dc_detach(device_t); static int dc_suspend(device_t); static int dc_resume(device_t); static const struct dc_type *dc_devtype(device_t); static int dc_newbuf(struct dc_softc *, int, int); static int dc_encap(struct dc_softc *, struct mbuf **); static void dc_pnic_rx_bug_war(struct dc_softc *, int); static int dc_rx_resync(struct dc_softc *); -static void dc_rxeof(struct dc_softc *); +static int dc_rxeof(struct dc_softc *); static void dc_txeof(struct dc_softc *); static void dc_tick(void *); static void dc_tx_underrun(struct dc_softc *); static void dc_intr(void *); static void dc_start(struct ifnet *); static void dc_start_locked(struct ifnet *); static int dc_ioctl(struct ifnet *, u_long, caddr_t); static void dc_init(void *); static void dc_init_locked(struct dc_softc *); static void dc_stop(struct dc_softc *); static void dc_watchdog(void *); static int dc_shutdown(device_t); static int dc_ifmedia_upd(struct ifnet *); static void dc_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void dc_delay(struct dc_softc *); static void dc_eeprom_idle(struct dc_softc *); static void dc_eeprom_putbyte(struct dc_softc *, int); static void dc_eeprom_getword(struct dc_softc *, int, u_int16_t *); static void dc_eeprom_getword_pnic(struct dc_softc *, int, u_int16_t *); static void dc_eeprom_getword_xircom(struct dc_softc *, int, u_int16_t *); static void dc_eeprom_width(struct dc_softc *); static void dc_read_eeprom(struct dc_softc *, caddr_t, int, int, int); static void dc_mii_writebit(struct dc_softc *, int); static int dc_mii_readbit(struct dc_softc *); static void dc_mii_sync(struct dc_softc *); static void dc_mii_send(struct dc_softc *, u_int32_t, int); static int dc_mii_readreg(struct dc_softc *, struct dc_mii_frame *); static int dc_mii_writereg(struct dc_softc *, struct dc_mii_frame *); static int dc_miibus_readreg(device_t, int, int); static int dc_miibus_writereg(device_t, int, int, int); static void dc_miibus_statchg(device_t); static void dc_miibus_mediainit(device_t); static void dc_setcfg(struct dc_softc *, int); static uint32_t dc_mchash_le(struct dc_softc *, const uint8_t *); static uint32_t dc_mchash_be(const uint8_t *); static void dc_setfilt_21143(struct dc_softc *); static void dc_setfilt_asix(struct dc_softc *); static void dc_setfilt_admtek(struct dc_softc *); static void dc_setfilt_xircom(struct dc_softc *); static void dc_setfilt(struct dc_softc *); static void dc_reset(struct dc_softc *); static int dc_list_rx_init(struct dc_softc *); static int dc_list_tx_init(struct dc_softc *); static void dc_read_srom(struct dc_softc *, int); static void dc_parse_21143_srom(struct dc_softc *); static void dc_decode_leaf_sia(struct dc_softc *, struct dc_eblock_sia *); static void dc_decode_leaf_mii(struct dc_softc *, struct dc_eblock_mii *); static void dc_decode_leaf_sym(struct dc_softc *, struct dc_eblock_sym *); static void dc_apply_fixup(struct dc_softc *, int); #ifdef DC_USEIOSPACE #define DC_RES SYS_RES_IOPORT #define DC_RID DC_PCI_CFBIO #else #define DC_RES SYS_RES_MEMORY #define DC_RID DC_PCI_CFBMA #endif static device_method_t dc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dc_probe), DEVMETHOD(device_attach, dc_attach), DEVMETHOD(device_detach, dc_detach), DEVMETHOD(device_suspend, dc_suspend), DEVMETHOD(device_resume, dc_resume), DEVMETHOD(device_shutdown, dc_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, dc_miibus_readreg), DEVMETHOD(miibus_writereg, dc_miibus_writereg), DEVMETHOD(miibus_statchg, dc_miibus_statchg), DEVMETHOD(miibus_mediainit, dc_miibus_mediainit), { 0, 0 } }; static driver_t dc_driver = { "dc", dc_methods, sizeof(struct dc_softc) }; static devclass_t dc_devclass; DRIVER_MODULE(dc, pci, dc_driver, dc_devclass, 0, 0); DRIVER_MODULE(miibus, dc, miibus_driver, miibus_devclass, 0, 0); #define DC_SETBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) #define DC_CLRBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) #define SIO_SET(x) DC_SETBIT(sc, DC_SIO, (x)) #define SIO_CLR(x) DC_CLRBIT(sc, DC_SIO, (x)) static void dc_delay(struct dc_softc *sc) { int idx; for (idx = (300 / 33) + 1; idx > 0; idx--) CSR_READ_4(sc, DC_BUSCTL); } static void dc_eeprom_width(struct dc_softc *sc) { int i; /* Force EEPROM to idle state. */ dc_eeprom_idle(sc); /* Enter EEPROM access mode. */ CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); dc_delay(sc); for (i = 3; i--;) { if (6 & (1 << i)) DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); else DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); } for (i = 1; i <= 12; i++) { DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) { DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); break; } DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); } /* Turn off EEPROM access mode. */ dc_eeprom_idle(sc); if (i < 4 || i > 12) sc->dc_romwidth = 6; else sc->dc_romwidth = i; /* Enter EEPROM access mode. */ CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); dc_delay(sc); /* Turn off EEPROM access mode. */ dc_eeprom_idle(sc); } static void dc_eeprom_idle(struct dc_softc *sc) { int i; CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); dc_delay(sc); for (i = 0; i < 25; i++) { DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); } DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS); dc_delay(sc); CSR_WRITE_4(sc, DC_SIO, 0x00000000); } /* * Send a read command and address to the EEPROM, check for ACK. */ static void dc_eeprom_putbyte(struct dc_softc *sc, int addr) { int d, i; d = DC_EECMD_READ >> 6; for (i = 3; i--; ) { if (d & (1 << i)) DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); else DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); } /* * Feed in each bit and strobe the clock. */ for (i = sc->dc_romwidth; i--;) { if (addr & (1 << i)) { SIO_SET(DC_SIO_EE_DATAIN); } else { SIO_CLR(DC_SIO_EE_DATAIN); } dc_delay(sc); SIO_SET(DC_SIO_EE_CLK); dc_delay(sc); SIO_CLR(DC_SIO_EE_CLK); dc_delay(sc); } } /* * Read a word of data stored in the EEPROM at address 'addr.' * The PNIC 82c168/82c169 has its own non-standard way to read * the EEPROM. */ static void dc_eeprom_getword_pnic(struct dc_softc *sc, int addr, u_int16_t *dest) { int i; u_int32_t r; CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ | addr); for (i = 0; i < DC_TIMEOUT; i++) { DELAY(1); r = CSR_READ_4(sc, DC_SIO); if (!(r & DC_PN_SIOCTL_BUSY)) { *dest = (u_int16_t)(r & 0xFFFF); return; } } } /* * Read a word of data stored in the EEPROM at address 'addr.' * The Xircom X3201 has its own non-standard way to read * the EEPROM, too. */ static void dc_eeprom_getword_xircom(struct dc_softc *sc, int addr, u_int16_t *dest) { SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); addr *= 2; CSR_WRITE_4(sc, DC_ROM, addr | 0x160); *dest = (u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff; addr += 1; CSR_WRITE_4(sc, DC_ROM, addr | 0x160); *dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff) << 8; SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); } /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void dc_eeprom_getword(struct dc_softc *sc, int addr, u_int16_t *dest) { int i; u_int16_t word = 0; /* Force EEPROM to idle state. */ dc_eeprom_idle(sc); /* Enter EEPROM access mode. */ CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); dc_delay(sc); /* * Send address of word we want to read. */ dc_eeprom_putbyte(sc, addr); /* * Start reading bits from EEPROM. */ for (i = 0x8000; i; i >>= 1) { SIO_SET(DC_SIO_EE_CLK); dc_delay(sc); if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT) word |= i; dc_delay(sc); SIO_CLR(DC_SIO_EE_CLK); dc_delay(sc); } /* Turn off EEPROM access mode. */ dc_eeprom_idle(sc); *dest = word; } /* * Read a sequence of words from the EEPROM. */ static void dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt, int be) { int i; u_int16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { if (DC_IS_PNIC(sc)) dc_eeprom_getword_pnic(sc, off + i, &word); else if (DC_IS_XIRCOM(sc)) dc_eeprom_getword_xircom(sc, off + i, &word); else dc_eeprom_getword(sc, off + i, &word); ptr = (u_int16_t *)(dest + (i * 2)); if (be) *ptr = be16toh(word); else *ptr = le16toh(word); } } /* * The following two routines are taken from the Macronix 98713 * Application Notes pp.19-21. */ /* * Write a bit to the MII bus. */ static void dc_mii_writebit(struct dc_softc *sc, int bit) { uint32_t reg; reg = DC_SIO_ROMCTL_WRITE | (bit != 0 ? DC_SIO_MII_DATAOUT : 0); CSR_WRITE_4(sc, DC_SIO, reg); CSR_BARRIER_4(sc, DC_SIO, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); DELAY(1); CSR_WRITE_4(sc, DC_SIO, reg | DC_SIO_MII_CLK); CSR_BARRIER_4(sc, DC_SIO, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); DELAY(1); CSR_WRITE_4(sc, DC_SIO, reg); CSR_BARRIER_4(sc, DC_SIO, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); DELAY(1); } /* * Read a bit from the MII bus. */ static int dc_mii_readbit(struct dc_softc *sc) { uint32_t reg; reg = DC_SIO_ROMCTL_READ | DC_SIO_MII_DIR; CSR_WRITE_4(sc, DC_SIO, reg); CSR_BARRIER_4(sc, DC_SIO, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); DELAY(1); (void)CSR_READ_4(sc, DC_SIO); CSR_WRITE_4(sc, DC_SIO, reg | DC_SIO_MII_CLK); CSR_BARRIER_4(sc, DC_SIO, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); DELAY(1); CSR_WRITE_4(sc, DC_SIO, reg); CSR_BARRIER_4(sc, DC_SIO, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); DELAY(1); if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN) return (1); return (0); } /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void dc_mii_sync(struct dc_softc *sc) { int i; CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); CSR_BARRIER_4(sc, DC_SIO, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); DELAY(1); for (i = 0; i < 32; i++) dc_mii_writebit(sc, 1); } /* * Clock a series of bits through the MII. */ static void dc_mii_send(struct dc_softc *sc, u_int32_t bits, int cnt) { int i; for (i = (0x1 << (cnt - 1)); i; i >>= 1) dc_mii_writebit(sc, bits & i); } /* * Read an PHY register through the MII. */ static int dc_mii_readreg(struct dc_softc *sc, struct dc_mii_frame *frame) { int i; /* * Set up frame for RX. */ frame->mii_stdelim = DC_MII_STARTDELIM; frame->mii_opcode = DC_MII_READOP; /* * Sync the PHYs. */ dc_mii_sync(sc); /* * Send command/address info. */ dc_mii_send(sc, frame->mii_stdelim, 2); dc_mii_send(sc, frame->mii_opcode, 2); dc_mii_send(sc, frame->mii_phyaddr, 5); dc_mii_send(sc, frame->mii_regaddr, 5); /* * Now try reading data bits. If the turnaround failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ frame->mii_turnaround = dc_mii_readbit(sc); if (frame->mii_turnaround != 0) { for (i = 0; i < 16; i++) dc_mii_readbit(sc); goto fail; } for (i = 0x8000; i; i >>= 1) { if (dc_mii_readbit(sc)) frame->mii_data |= i; } fail: /* Clock the idle bits. */ dc_mii_writebit(sc, 0); dc_mii_writebit(sc, 0); if (frame->mii_turnaround != 0) return (1); return (0); } /* * Write to a PHY register through the MII. */ static int dc_mii_writereg(struct dc_softc *sc, struct dc_mii_frame *frame) { /* * Set up frame for TX. */ frame->mii_stdelim = DC_MII_STARTDELIM; frame->mii_opcode = DC_MII_WRITEOP; frame->mii_turnaround = DC_MII_TURNAROUND; /* * Sync the PHYs. */ dc_mii_sync(sc); dc_mii_send(sc, frame->mii_stdelim, 2); dc_mii_send(sc, frame->mii_opcode, 2); dc_mii_send(sc, frame->mii_phyaddr, 5); dc_mii_send(sc, frame->mii_regaddr, 5); dc_mii_send(sc, frame->mii_turnaround, 2); dc_mii_send(sc, frame->mii_data, 16); /* Clock the idle bits. */ dc_mii_writebit(sc, 0); dc_mii_writebit(sc, 0); return (0); } static int dc_miibus_readreg(device_t dev, int phy, int reg) { struct dc_mii_frame frame; struct dc_softc *sc; int i, rval, phy_reg = 0; sc = device_get_softc(dev); bzero(&frame, sizeof(frame)); /* * Note: both the AL981 and AN985 have internal PHYs, * however the AL981 provides direct access to the PHY * registers while the AN985 uses a serial MII interface. * The AN985's MII interface is also buggy in that you * can read from any MII address (0 to 31), but only address 1 * behaves normally. To deal with both cases, we pretend * that the PHY is at MII address 1. */ if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) return (0); /* * Note: the ukphy probes of the RS7112 report a PHY at * MII address 0 (possibly HomePNA?) and 1 (ethernet) * so we only respond to correct one. */ if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR) return (0); if (sc->dc_pmode != DC_PMODE_MII) { if (phy == (MII_NPHY - 1)) { switch (reg) { case MII_BMSR: /* * Fake something to make the probe * code think there's a PHY here. */ return (BMSR_MEDIAMASK); break; case MII_PHYIDR1: if (DC_IS_PNIC(sc)) return (DC_VENDORID_LO); return (DC_VENDORID_DEC); break; case MII_PHYIDR2: if (DC_IS_PNIC(sc)) return (DC_DEVICEID_82C168); return (DC_DEVICEID_21143); break; default: return (0); break; } } else return (0); } if (DC_IS_PNIC(sc)) { CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ | (phy << 23) | (reg << 18)); for (i = 0; i < DC_TIMEOUT; i++) { DELAY(1); rval = CSR_READ_4(sc, DC_PN_MII); if (!(rval & DC_PN_MII_BUSY)) { rval &= 0xFFFF; return (rval == 0xFFFF ? 0 : rval); } } return (0); } if (DC_IS_COMET(sc)) { switch (reg) { case MII_BMCR: phy_reg = DC_AL_BMCR; break; case MII_BMSR: phy_reg = DC_AL_BMSR; break; case MII_PHYIDR1: phy_reg = DC_AL_VENID; break; case MII_PHYIDR2: phy_reg = DC_AL_DEVID; break; case MII_ANAR: phy_reg = DC_AL_ANAR; break; case MII_ANLPAR: phy_reg = DC_AL_LPAR; break; case MII_ANER: phy_reg = DC_AL_ANER; break; default: device_printf(dev, "phy_read: bad phy register %x\n", reg); return (0); break; } rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF; if (rval == 0xFFFF) return (0); return (rval); } frame.mii_phyaddr = phy; frame.mii_regaddr = reg; if (sc->dc_type == DC_TYPE_98713) { phy_reg = CSR_READ_4(sc, DC_NETCFG); CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); } dc_mii_readreg(sc, &frame); if (sc->dc_type == DC_TYPE_98713) CSR_WRITE_4(sc, DC_NETCFG, phy_reg); return (frame.mii_data); } static int dc_miibus_writereg(device_t dev, int phy, int reg, int data) { struct dc_softc *sc; struct dc_mii_frame frame; int i, phy_reg = 0; sc = device_get_softc(dev); bzero(&frame, sizeof(frame)); if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) return (0); if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR) return (0); if (DC_IS_PNIC(sc)) { CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE | (phy << 23) | (reg << 10) | data); for (i = 0; i < DC_TIMEOUT; i++) { if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY)) break; } return (0); } if (DC_IS_COMET(sc)) { switch (reg) { case MII_BMCR: phy_reg = DC_AL_BMCR; break; case MII_BMSR: phy_reg = DC_AL_BMSR; break; case MII_PHYIDR1: phy_reg = DC_AL_VENID; break; case MII_PHYIDR2: phy_reg = DC_AL_DEVID; break; case MII_ANAR: phy_reg = DC_AL_ANAR; break; case MII_ANLPAR: phy_reg = DC_AL_LPAR; break; case MII_ANER: phy_reg = DC_AL_ANER; break; default: device_printf(dev, "phy_write: bad phy register %x\n", reg); return (0); break; } CSR_WRITE_4(sc, phy_reg, data); return (0); } frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = data; if (sc->dc_type == DC_TYPE_98713) { phy_reg = CSR_READ_4(sc, DC_NETCFG); CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); } dc_mii_writereg(sc, &frame); if (sc->dc_type == DC_TYPE_98713) CSR_WRITE_4(sc, DC_NETCFG, phy_reg); return (0); } static void dc_miibus_statchg(device_t dev) { struct dc_softc *sc; struct mii_data *mii; struct ifmedia *ifm; sc = device_get_softc(dev); if (DC_IS_ADMTEK(sc)) return; mii = device_get_softc(sc->dc_miibus); ifm = &mii->mii_media; if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { dc_setcfg(sc, ifm->ifm_media); sc->dc_if_media = ifm->ifm_media; } else { dc_setcfg(sc, mii->mii_media_active); sc->dc_if_media = mii->mii_media_active; } } /* * Special support for DM9102A cards with HomePNA PHYs. Note: * with the Davicom DM9102A/DM9801 eval board that I have, it seems * to be impossible to talk to the management interface of the DM9801 * PHY (its MDIO pin is not connected to anything). Consequently, * the driver has to just 'know' about the additional mode and deal * with it itself. *sigh* */ static void dc_miibus_mediainit(device_t dev) { struct dc_softc *sc; struct mii_data *mii; struct ifmedia *ifm; int rev; rev = pci_get_revid(dev); sc = device_get_softc(dev); mii = device_get_softc(sc->dc_miibus); ifm = &mii->mii_media; if (DC_IS_DAVICOM(sc) && rev >= DC_REVISION_DM9102A) ifmedia_add(ifm, IFM_ETHER | IFM_HPNA_1, 0, NULL); } #define DC_BITS_512 9 #define DC_BITS_128 7 #define DC_BITS_64 6 static uint32_t dc_mchash_le(struct dc_softc *sc, const uint8_t *addr) { uint32_t crc; /* Compute CRC for the address value. */ crc = ether_crc32_le(addr, ETHER_ADDR_LEN); /* * The hash table on the PNIC II and the MX98715AEC-C/D/E * chips is only 128 bits wide. */ if (sc->dc_flags & DC_128BIT_HASH) return (crc & ((1 << DC_BITS_128) - 1)); /* The hash table on the MX98715BEC is only 64 bits wide. */ if (sc->dc_flags & DC_64BIT_HASH) return (crc & ((1 << DC_BITS_64) - 1)); /* Xircom's hash filtering table is different (read: weird) */ /* Xircom uses the LEAST significant bits */ if (DC_IS_XIRCOM(sc)) { if ((crc & 0x180) == 0x180) return ((crc & 0x0F) + (crc & 0x70) * 3 + (14 << 4)); else return ((crc & 0x1F) + ((crc >> 1) & 0xF0) * 3 + (12 << 4)); } return (crc & ((1 << DC_BITS_512) - 1)); } /* * Calculate CRC of a multicast group address, return the lower 6 bits. */ static uint32_t dc_mchash_be(const uint8_t *addr) { uint32_t crc; /* Compute CRC for the address value. */ crc = ether_crc32_be(addr, ETHER_ADDR_LEN); /* Return the filter bit position. */ return ((crc >> 26) & 0x0000003F); } /* * 21143-style RX filter setup routine. Filter programming is done by * downloading a special setup frame into the TX engine. 21143, Macronix, * PNIC, PNIC II and Davicom chips are programmed this way. * * We always program the chip using 'hash perfect' mode, i.e. one perfect * address (our node address) and a 512-bit hash filter for multicast * frames. We also sneak the broadcast address into the hash filter since * we need that too. */ static void dc_setfilt_21143(struct dc_softc *sc) { uint16_t eaddr[(ETHER_ADDR_LEN+1)/2]; struct dc_desc *sframe; u_int32_t h, *sp; struct ifmultiaddr *ifma; struct ifnet *ifp; int i; ifp = sc->dc_ifp; i = sc->dc_cdata.dc_tx_prod; DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); sc->dc_cdata.dc_tx_cnt++; sframe = &sc->dc_ldata->dc_tx_list[i]; sp = sc->dc_cdata.dc_sbuf; bzero(sp, DC_SFRAME_LEN); sframe->dc_data = htole32(sc->dc_saddr); sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP | DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT); sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); if (ifp->if_flags & IFF_ALLMULTI) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = dc_mchash_le(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); sp[h >> 4] |= htole32(1 << (h & 0xF)); } IF_ADDR_UNLOCK(ifp); if (ifp->if_flags & IFF_BROADCAST) { h = dc_mchash_le(sc, ifp->if_broadcastaddr); sp[h >> 4] |= htole32(1 << (h & 0xF)); } /* Set our MAC address. */ bcopy(IF_LLADDR(sc->dc_ifp), eaddr, ETHER_ADDR_LEN); sp[39] = DC_SP_MAC(eaddr[0]); sp[40] = DC_SP_MAC(eaddr[1]); sp[41] = DC_SP_MAC(eaddr[2]); sframe->dc_status = htole32(DC_TXSTAT_OWN); CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); /* * The PNIC takes an exceedingly long time to process its * setup frame; wait 10ms after posting the setup frame * before proceeding, just so it has time to swallow its * medicine. */ DELAY(10000); sc->dc_wdog_timer = 5; } static void dc_setfilt_admtek(struct dc_softc *sc) { uint8_t eaddr[ETHER_ADDR_LEN]; struct ifnet *ifp; struct ifmultiaddr *ifma; int h = 0; u_int32_t hashes[2] = { 0, 0 }; ifp = sc->dc_ifp; /* Init our MAC address. */ bcopy(IF_LLADDR(sc->dc_ifp), eaddr, ETHER_ADDR_LEN); CSR_WRITE_4(sc, DC_AL_PAR0, eaddr[3] << 24 | eaddr[2] << 16 | eaddr[1] << 8 | eaddr[0]); CSR_WRITE_4(sc, DC_AL_PAR1, eaddr[5] << 8 | eaddr[4]); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); if (ifp->if_flags & IFF_ALLMULTI) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); /* First, zot all the existing hash bits. */ CSR_WRITE_4(sc, DC_AL_MAR0, 0); CSR_WRITE_4(sc, DC_AL_MAR1, 0); /* * If we're already in promisc or allmulti mode, we * don't have to bother programming the multicast filter. */ if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) return; /* Now program new ones. */ IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (DC_IS_CENTAUR(sc)) h = dc_mchash_le(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); else h = dc_mchash_be( LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } IF_ADDR_UNLOCK(ifp); CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]); CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]); } static void dc_setfilt_asix(struct dc_softc *sc) { uint32_t eaddr[(ETHER_ADDR_LEN+3)/4]; struct ifnet *ifp; struct ifmultiaddr *ifma; int h = 0; u_int32_t hashes[2] = { 0, 0 }; ifp = sc->dc_ifp; /* Init our MAC address. */ bcopy(IF_LLADDR(sc->dc_ifp), eaddr, ETHER_ADDR_LEN); CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0); CSR_WRITE_4(sc, DC_AX_FILTDATA, eaddr[0]); CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1); CSR_WRITE_4(sc, DC_AX_FILTDATA, eaddr[1]); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); if (ifp->if_flags & IFF_ALLMULTI) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); /* * The ASIX chip has a special bit to enable reception * of broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); else DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); /* * If we're already in promisc or allmulti mode, we * don't have to bother programming the multicast filter. */ if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) return; /* now program new ones */ IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = dc_mchash_be(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } IF_ADDR_UNLOCK(ifp); CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]); CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]); } static void dc_setfilt_xircom(struct dc_softc *sc) { uint16_t eaddr[(ETHER_ADDR_LEN+1)/2]; struct ifnet *ifp; struct ifmultiaddr *ifma; struct dc_desc *sframe; u_int32_t h, *sp; int i; ifp = sc->dc_ifp; DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)); i = sc->dc_cdata.dc_tx_prod; DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); sc->dc_cdata.dc_tx_cnt++; sframe = &sc->dc_ldata->dc_tx_list[i]; sp = sc->dc_cdata.dc_sbuf; bzero(sp, DC_SFRAME_LEN); sframe->dc_data = htole32(sc->dc_saddr); sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP | DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT); sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); if (ifp->if_flags & IFF_ALLMULTI) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = dc_mchash_le(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); sp[h >> 4] |= htole32(1 << (h & 0xF)); } IF_ADDR_UNLOCK(ifp); if (ifp->if_flags & IFF_BROADCAST) { h = dc_mchash_le(sc, ifp->if_broadcastaddr); sp[h >> 4] |= htole32(1 << (h & 0xF)); } /* Set our MAC address. */ bcopy(IF_LLADDR(sc->dc_ifp), eaddr, ETHER_ADDR_LEN); sp[0] = DC_SP_MAC(eaddr[0]); sp[1] = DC_SP_MAC(eaddr[1]); sp[2] = DC_SP_MAC(eaddr[2]); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); ifp->if_drv_flags |= IFF_DRV_RUNNING; sframe->dc_status = htole32(DC_TXSTAT_OWN); CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); /* * Wait some time... */ DELAY(1000); sc->dc_wdog_timer = 5; } static void dc_setfilt(struct dc_softc *sc) { if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) || DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc)) dc_setfilt_21143(sc); if (DC_IS_ASIX(sc)) dc_setfilt_asix(sc); if (DC_IS_ADMTEK(sc)) dc_setfilt_admtek(sc); if (DC_IS_XIRCOM(sc)) dc_setfilt_xircom(sc); } /* * In order to fiddle with the 'full-duplex' and '100Mbps' bits in * the netconfig register, we first have to put the transmit and/or * receive logic in the idle state. */ static void dc_setcfg(struct dc_softc *sc, int media) { int i, restart = 0, watchdogreg; u_int32_t isr; if (IFM_SUBTYPE(media) == IFM_NONE) return; if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)) { restart = 1; DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)); for (i = 0; i < DC_TIMEOUT; i++) { isr = CSR_READ_4(sc, DC_ISR); if (isr & DC_ISR_TX_IDLE && ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED || (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT)) break; DELAY(10); } if (i == DC_TIMEOUT) { if (!(isr & DC_ISR_TX_IDLE) && !DC_IS_ASIX(sc)) device_printf(sc->dc_dev, "%s: failed to force tx to idle state\n", __func__); if (!((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED || (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) && !DC_HAS_BROKEN_RXSTATE(sc)) device_printf(sc->dc_dev, "%s: failed to force rx to idle state\n", __func__); } } if (IFM_SUBTYPE(media) == IFM_100_TX) { DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); if (sc->dc_pmode == DC_PMODE_MII) { if (DC_IS_INTEL(sc)) { /* There's a write enable bit here that reads as 1. */ watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); watchdogreg &= ~DC_WDOG_CTLWREN; watchdogreg |= DC_WDOG_JABBERDIS; CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); } else { DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); } DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS | DC_NETCFG_PORTSEL | DC_NETCFG_SCRAMBLER)); if (sc->dc_type == DC_TYPE_98713) DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS | DC_NETCFG_SCRAMBLER)); if (!DC_IS_DAVICOM(sc)) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); if (DC_IS_INTEL(sc)) dc_apply_fixup(sc, IFM_AUTO); } else { if (DC_IS_PNIC(sc)) { DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL); DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); } DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); if (DC_IS_INTEL(sc)) dc_apply_fixup(sc, (media & IFM_GMASK) == IFM_FDX ? IFM_100_TX | IFM_FDX : IFM_100_TX); } } if (IFM_SUBTYPE(media) == IFM_10_T) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); if (sc->dc_pmode == DC_PMODE_MII) { /* There's a write enable bit here that reads as 1. */ if (DC_IS_INTEL(sc)) { watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); watchdogreg &= ~DC_WDOG_CTLWREN; watchdogreg |= DC_WDOG_JABBERDIS; CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); } else { DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); } DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS | DC_NETCFG_PORTSEL | DC_NETCFG_SCRAMBLER)); if (sc->dc_type == DC_TYPE_98713) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); if (!DC_IS_DAVICOM(sc)) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); if (DC_IS_INTEL(sc)) dc_apply_fixup(sc, IFM_AUTO); } else { if (DC_IS_PNIC(sc)) { DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL); DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); } DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS); DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); if (DC_IS_INTEL(sc)) { DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET); DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); if ((media & IFM_GMASK) == IFM_FDX) DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D); else DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F); DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); DC_CLRBIT(sc, DC_10BTCTRL, DC_TCTL_AUTONEGENBL); dc_apply_fixup(sc, (media & IFM_GMASK) == IFM_FDX ? IFM_10_T | IFM_FDX : IFM_10_T); DELAY(20000); } } } /* * If this is a Davicom DM9102A card with a DM9801 HomePNA * PHY and we want HomePNA mode, set the portsel bit to turn * on the external MII port. */ if (DC_IS_DAVICOM(sc)) { if (IFM_SUBTYPE(media) == IFM_HPNA_1) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); sc->dc_link = 1; } else { DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); } } if ((media & IFM_GMASK) == IFM_FDX) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); } else { DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); } if (restart) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON | DC_NETCFG_RX_ON); } static void dc_reset(struct dc_softc *sc) { int i; DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); for (i = 0; i < DC_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET)) break; } if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_CONEXANT(sc) || DC_IS_XIRCOM(sc) || DC_IS_INTEL(sc)) { DELAY(10000); DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); i = 0; } if (i == DC_TIMEOUT) device_printf(sc->dc_dev, "reset never completed!\n"); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); CSR_WRITE_4(sc, DC_IMR, 0x00000000); CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000); CSR_WRITE_4(sc, DC_NETCFG, 0x00000000); /* * Bring the SIA out of reset. In some cases, it looks * like failing to unreset the SIA soon enough gets it * into a state where it will never come out of reset * until we reset the whole chip again. */ if (DC_IS_INTEL(sc)) { DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); CSR_WRITE_4(sc, DC_10BTCTRL, 0); CSR_WRITE_4(sc, DC_WATCHDOG, 0); } } static const struct dc_type * dc_devtype(device_t dev) { const struct dc_type *t; u_int32_t devid; u_int8_t rev; t = dc_devs; devid = pci_get_devid(dev); rev = pci_get_revid(dev); while (t->dc_name != NULL) { if (devid == t->dc_devid && rev >= t->dc_minrev) return (t); t++; } return (NULL); } /* * Probe for a 21143 or clone chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. * We do a little bit of extra work to identify the exact type of * chip. The MX98713 and MX98713A have the same PCI vendor/device ID, * but different revision IDs. The same is true for 98715/98715A * chips and the 98725, as well as the ASIX and ADMtek chips. In some * cases, the exact chip revision affects driver behavior. */ static int dc_probe(device_t dev) { const struct dc_type *t; t = dc_devtype(dev); if (t != NULL) { device_set_desc(dev, t->dc_name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static void dc_apply_fixup(struct dc_softc *sc, int media) { struct dc_mediainfo *m; u_int8_t *p; int i; u_int32_t reg; m = sc->dc_mi; while (m != NULL) { if (m->dc_media == media) break; m = m->dc_next; } if (m == NULL) return; for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) { reg = (p[0] | (p[1] << 8)) << 16; CSR_WRITE_4(sc, DC_WATCHDOG, reg); } for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) { reg = (p[0] | (p[1] << 8)) << 16; CSR_WRITE_4(sc, DC_WATCHDOG, reg); } } static void dc_decode_leaf_sia(struct dc_softc *sc, struct dc_eblock_sia *l) { struct dc_mediainfo *m; m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO); switch (l->dc_sia_code & ~DC_SIA_CODE_EXT) { case DC_SIA_CODE_10BT: m->dc_media = IFM_10_T; break; case DC_SIA_CODE_10BT_FDX: m->dc_media = IFM_10_T | IFM_FDX; break; case DC_SIA_CODE_10B2: m->dc_media = IFM_10_2; break; case DC_SIA_CODE_10B5: m->dc_media = IFM_10_5; break; default: break; } /* * We need to ignore CSR13, CSR14, CSR15 for SIA mode. * Things apparently already work for cards that do * supply Media Specific Data. */ if (l->dc_sia_code & DC_SIA_CODE_EXT) { m->dc_gp_len = 2; m->dc_gp_ptr = (u_int8_t *)&l->dc_un.dc_sia_ext.dc_sia_gpio_ctl; } else { m->dc_gp_len = 2; m->dc_gp_ptr = (u_int8_t *)&l->dc_un.dc_sia_noext.dc_sia_gpio_ctl; } m->dc_next = sc->dc_mi; sc->dc_mi = m; sc->dc_pmode = DC_PMODE_SIA; } static void dc_decode_leaf_sym(struct dc_softc *sc, struct dc_eblock_sym *l) { struct dc_mediainfo *m; m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (l->dc_sym_code == DC_SYM_CODE_100BT) m->dc_media = IFM_100_TX; if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX) m->dc_media = IFM_100_TX | IFM_FDX; m->dc_gp_len = 2; m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl; m->dc_next = sc->dc_mi; sc->dc_mi = m; sc->dc_pmode = DC_PMODE_SYM; } static void dc_decode_leaf_mii(struct dc_softc *sc, struct dc_eblock_mii *l) { struct dc_mediainfo *m; u_int8_t *p; m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO); /* We abuse IFM_AUTO to represent MII. */ m->dc_media = IFM_AUTO; m->dc_gp_len = l->dc_gpr_len; p = (u_int8_t *)l; p += sizeof(struct dc_eblock_mii); m->dc_gp_ptr = p; p += 2 * l->dc_gpr_len; m->dc_reset_len = *p; p++; m->dc_reset_ptr = p; m->dc_next = sc->dc_mi; sc->dc_mi = m; } static void dc_read_srom(struct dc_softc *sc, int bits) { int size; size = 2 << bits; sc->dc_srom = malloc(size, M_DEVBUF, M_NOWAIT); dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0); } static void dc_parse_21143_srom(struct dc_softc *sc) { struct dc_leaf_hdr *lhdr; struct dc_eblock_hdr *hdr; int have_mii, i, loff; char *ptr; have_mii = 0; loff = sc->dc_srom[27]; lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]); ptr = (char *)lhdr; ptr += sizeof(struct dc_leaf_hdr) - 1; /* * Look if we got a MII media block. */ for (i = 0; i < lhdr->dc_mcnt; i++) { hdr = (struct dc_eblock_hdr *)ptr; if (hdr->dc_type == DC_EBLOCK_MII) have_mii++; ptr += (hdr->dc_len & 0x7F); ptr++; } /* * Do the same thing again. Only use SIA and SYM media * blocks if no MII media block is available. */ ptr = (char *)lhdr; ptr += sizeof(struct dc_leaf_hdr) - 1; for (i = 0; i < lhdr->dc_mcnt; i++) { hdr = (struct dc_eblock_hdr *)ptr; switch (hdr->dc_type) { case DC_EBLOCK_MII: dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr); break; case DC_EBLOCK_SIA: if (! have_mii) dc_decode_leaf_sia(sc, (struct dc_eblock_sia *)hdr); break; case DC_EBLOCK_SYM: if (! have_mii) dc_decode_leaf_sym(sc, (struct dc_eblock_sym *)hdr); break; default: /* Don't care. Yet. */ break; } ptr += (hdr->dc_len & 0x7F); ptr++; } } static void dc_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { u_int32_t *paddr; KASSERT(nseg == 1, ("%s: wrong number of segments (%d)", __func__, nseg)); paddr = arg; *paddr = segs->ds_addr; } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int dc_attach(device_t dev) { int tmp = 0; uint32_t eaddr[(ETHER_ADDR_LEN+3)/4]; u_int32_t command; struct dc_softc *sc; struct ifnet *ifp; u_int32_t reg, revision; int error = 0, rid, mac_offset; int i; u_int8_t *mac; sc = device_get_softc(dev); sc->dc_dev = dev; mtx_init(&sc->dc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = DC_RID; sc->dc_res = bus_alloc_resource_any(dev, DC_RES, &rid, RF_ACTIVE); if (sc->dc_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } sc->dc_btag = rman_get_bustag(sc->dc_res); sc->dc_bhandle = rman_get_bushandle(sc->dc_res); /* Allocate interrupt. */ rid = 0; sc->dc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->dc_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } /* Need this info to decide on a chip type. */ sc->dc_info = dc_devtype(dev); revision = pci_get_revid(dev); /* Get the eeprom width, but PNIC and XIRCOM have diff eeprom */ if (sc->dc_info->dc_devid != DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168) && sc->dc_info->dc_devid != DC_DEVID(DC_VENDORID_XIRCOM, DC_DEVICEID_X3201)) dc_eeprom_width(sc); switch (sc->dc_info->dc_devid) { case DC_DEVID(DC_VENDORID_DEC, DC_DEVICEID_21143): sc->dc_type = DC_TYPE_21143; sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; sc->dc_flags |= DC_REDUCED_MII_POLL; /* Save EEPROM contents so we can parse them later. */ dc_read_srom(sc, sc->dc_romwidth); break; case DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009): case DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100): case DC_DEVID(DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102): sc->dc_type = DC_TYPE_DM9102; sc->dc_flags |= DC_TX_COALESCE | DC_TX_INTR_ALWAYS; sc->dc_flags |= DC_REDUCED_MII_POLL | DC_TX_STORENFWD; sc->dc_flags |= DC_TX_ALIGN; sc->dc_pmode = DC_PMODE_MII; /* Increase the latency timer value. */ pci_write_config(dev, PCIR_LATTIMER, 0x80, 1); break; case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AL981): sc->dc_type = DC_TYPE_AL981; sc->dc_flags |= DC_TX_USE_TX_INTR; sc->dc_flags |= DC_TX_ADMTEK_WAR; sc->dc_pmode = DC_PMODE_MII; dc_read_srom(sc, sc->dc_romwidth); break; case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_AN985): case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9511): case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9513): case DC_DEVID(DC_VENDORID_DLINK, DC_DEVICEID_DRP32TXD): case DC_DEVID(DC_VENDORID_ADMTEK, DC_DEVICEID_FA511): case DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500): case DC_DEVID(DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500MX): case DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN2242): case DC_DEVID(DC_VENDORID_HAWKING, DC_DEVICEID_HAWKING_PN672TX): case DC_DEVID(DC_VENDORID_PLANEX, DC_DEVICEID_FNW3602T): case DC_DEVID(DC_VENDORID_3COM, DC_DEVICEID_3CSOHOB): case DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN120): case DC_DEVID(DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN130): case DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB08): case DC_DEVID(DC_VENDORID_LINKSYS, DC_DEVICEID_PCMPC200_AB09): sc->dc_type = DC_TYPE_AN985; sc->dc_flags |= DC_64BIT_HASH; sc->dc_flags |= DC_TX_USE_TX_INTR; sc->dc_flags |= DC_TX_ADMTEK_WAR; sc->dc_pmode = DC_PMODE_MII; /* Don't read SROM for - auto-loaded on reset */ break; case DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98713): case DC_DEVID(DC_VENDORID_CP, DC_DEVICEID_98713_CP): if (revision < DC_REVISION_98713A) { sc->dc_type = DC_TYPE_98713; } if (revision >= DC_REVISION_98713A) { sc->dc_type = DC_TYPE_98713A; sc->dc_flags |= DC_21143_NWAY; } sc->dc_flags |= DC_REDUCED_MII_POLL; sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; break; case DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_987x5): case DC_DEVID(DC_VENDORID_ACCTON, DC_DEVICEID_EN1217): /* * Macronix MX98715AEC-C/D/E parts have only a * 128-bit hash table. We need to deal with these * in the same manner as the PNIC II so that we * get the right number of bits out of the * CRC routine. */ if (revision >= DC_REVISION_98715AEC_C && revision < DC_REVISION_98725) sc->dc_flags |= DC_128BIT_HASH; sc->dc_type = DC_TYPE_987x5; sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY; break; case DC_DEVID(DC_VENDORID_MX, DC_DEVICEID_98727): sc->dc_type = DC_TYPE_987x5; sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY; break; case DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C115): sc->dc_type = DC_TYPE_PNICII; sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR | DC_128BIT_HASH; sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY; break; case DC_DEVID(DC_VENDORID_LO, DC_DEVICEID_82C168): sc->dc_type = DC_TYPE_PNIC; sc->dc_flags |= DC_TX_STORENFWD | DC_TX_INTR_ALWAYS; sc->dc_flags |= DC_PNIC_RX_BUG_WAR; sc->dc_pnic_rx_buf = malloc(DC_RXLEN * 5, M_DEVBUF, M_NOWAIT); if (revision < DC_REVISION_82C169) sc->dc_pmode = DC_PMODE_SYM; break; case DC_DEVID(DC_VENDORID_ASIX, DC_DEVICEID_AX88140A): sc->dc_type = DC_TYPE_ASIX; sc->dc_flags |= DC_TX_USE_TX_INTR | DC_TX_INTR_FIRSTFRAG; sc->dc_flags |= DC_REDUCED_MII_POLL; sc->dc_pmode = DC_PMODE_MII; break; case DC_DEVID(DC_VENDORID_XIRCOM, DC_DEVICEID_X3201): sc->dc_type = DC_TYPE_XIRCOM; sc->dc_flags |= DC_TX_INTR_ALWAYS | DC_TX_COALESCE | DC_TX_ALIGN; /* * We don't actually need to coalesce, but we're doing * it to obtain a double word aligned buffer. * The DC_TX_COALESCE flag is required. */ sc->dc_pmode = DC_PMODE_MII; break; case DC_DEVID(DC_VENDORID_CONEXANT, DC_DEVICEID_RS7112): sc->dc_type = DC_TYPE_CONEXANT; sc->dc_flags |= DC_TX_INTR_ALWAYS; sc->dc_flags |= DC_REDUCED_MII_POLL; sc->dc_pmode = DC_PMODE_MII; dc_read_srom(sc, sc->dc_romwidth); break; default: device_printf(dev, "unknown device: %x\n", sc->dc_info->dc_devid); break; } /* Save the cache line size. */ if (DC_IS_DAVICOM(sc)) sc->dc_cachesize = 0; else sc->dc_cachesize = pci_get_cachelnsz(dev); /* Reset the adapter. */ dc_reset(sc); /* Take 21143 out of snooze mode */ if (DC_IS_INTEL(sc) || DC_IS_XIRCOM(sc)) { command = pci_read_config(dev, DC_PCI_CFDD, 4); command &= ~(DC_CFDD_SNOOZE_MODE | DC_CFDD_SLEEP_MODE); pci_write_config(dev, DC_PCI_CFDD, command, 4); } /* * Try to learn something about the supported media. * We know that ASIX and ADMtek and Davicom devices * will *always* be using MII media, so that's a no-brainer. * The tricky ones are the Macronix/PNIC II and the * Intel 21143. */ if (DC_IS_INTEL(sc)) dc_parse_21143_srom(sc); else if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { if (sc->dc_type == DC_TYPE_98713) sc->dc_pmode = DC_PMODE_MII; else sc->dc_pmode = DC_PMODE_SYM; } else if (!sc->dc_pmode) sc->dc_pmode = DC_PMODE_MII; /* * Get station address from the EEPROM. */ switch(sc->dc_type) { case DC_TYPE_98713: case DC_TYPE_98713A: case DC_TYPE_987x5: case DC_TYPE_PNICII: dc_read_eeprom(sc, (caddr_t)&mac_offset, (DC_EE_NODEADDR_OFFSET / 2), 1, 0); dc_read_eeprom(sc, (caddr_t)&eaddr, (mac_offset / 2), 3, 0); break; case DC_TYPE_PNIC: dc_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 1); break; case DC_TYPE_DM9102: dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); #ifdef __sparc64__ /* * If this is an onboard dc(4) the station address read from * the EEPROM is all zero and we have to get it from the FCode. */ if (eaddr[0] == 0 && (eaddr[1] & ~0xffff) == 0) OF_getetheraddr(dev, (caddr_t)&eaddr); #endif break; case DC_TYPE_21143: case DC_TYPE_ASIX: dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); break; case DC_TYPE_AL981: case DC_TYPE_AN985: reg = CSR_READ_4(sc, DC_AL_PAR0); mac = (uint8_t *)&eaddr[0]; mac[0] = (reg >> 0) & 0xff; mac[1] = (reg >> 8) & 0xff; mac[2] = (reg >> 16) & 0xff; mac[3] = (reg >> 24) & 0xff; reg = CSR_READ_4(sc, DC_AL_PAR1); mac[4] = (reg >> 0) & 0xff; mac[5] = (reg >> 8) & 0xff; break; case DC_TYPE_CONEXANT: bcopy(sc->dc_srom + DC_CONEXANT_EE_NODEADDR, &eaddr, ETHER_ADDR_LEN); break; case DC_TYPE_XIRCOM: /* The MAC comes from the CIS. */ mac = pci_get_ether(dev); if (!mac) { device_printf(dev, "No station address in CIS!\n"); error = ENXIO; goto fail; } bcopy(mac, eaddr, ETHER_ADDR_LEN); break; default: dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); break; } /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */ error = bus_dma_tag_create(bus_get_dma_tag(dev), PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct dc_list_data), 1, sizeof(struct dc_list_data), 0, NULL, NULL, &sc->dc_ltag); if (error) { device_printf(dev, "failed to allocate busdma tag\n"); error = ENXIO; goto fail; } error = bus_dmamem_alloc(sc->dc_ltag, (void **)&sc->dc_ldata, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->dc_lmap); if (error) { device_printf(dev, "failed to allocate DMA safe memory\n"); error = ENXIO; goto fail; } error = bus_dmamap_load(sc->dc_ltag, sc->dc_lmap, sc->dc_ldata, sizeof(struct dc_list_data), dc_dma_map_addr, &sc->dc_laddr, BUS_DMA_NOWAIT); if (error) { device_printf(dev, "cannot get address of the descriptors\n"); error = ENXIO; goto fail; } /* * Allocate a busdma tag and DMA safe memory for the multicast * setup frame. */ error = bus_dma_tag_create(bus_get_dma_tag(dev), PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, DC_SFRAME_LEN + DC_MIN_FRAMELEN, 1, DC_SFRAME_LEN + DC_MIN_FRAMELEN, 0, NULL, NULL, &sc->dc_stag); if (error) { device_printf(dev, "failed to allocate busdma tag\n"); error = ENXIO; goto fail; } error = bus_dmamem_alloc(sc->dc_stag, (void **)&sc->dc_cdata.dc_sbuf, BUS_DMA_NOWAIT, &sc->dc_smap); if (error) { device_printf(dev, "failed to allocate DMA safe memory\n"); error = ENXIO; goto fail; } error = bus_dmamap_load(sc->dc_stag, sc->dc_smap, sc->dc_cdata.dc_sbuf, DC_SFRAME_LEN, dc_dma_map_addr, &sc->dc_saddr, BUS_DMA_NOWAIT); if (error) { device_printf(dev, "cannot get address of the descriptors\n"); error = ENXIO; goto fail; } /* Allocate a busdma tag for mbufs. */ error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * DC_MAXFRAGS, DC_MAXFRAGS, MCLBYTES, 0, NULL, NULL, &sc->dc_mtag); if (error) { device_printf(dev, "failed to allocate busdma tag\n"); error = ENXIO; goto fail; } /* Create the TX/RX busdma maps. */ for (i = 0; i < DC_TX_LIST_CNT; i++) { error = bus_dmamap_create(sc->dc_mtag, 0, &sc->dc_cdata.dc_tx_map[i]); if (error) { device_printf(dev, "failed to init TX ring\n"); error = ENXIO; goto fail; } } for (i = 0; i < DC_RX_LIST_CNT; i++) { error = bus_dmamap_create(sc->dc_mtag, 0, &sc->dc_cdata.dc_rx_map[i]); if (error) { device_printf(dev, "failed to init RX ring\n"); error = ENXIO; goto fail; } } error = bus_dmamap_create(sc->dc_mtag, 0, &sc->dc_sparemap); if (error) { device_printf(dev, "failed to init RX ring\n"); error = ENXIO; goto fail; } ifp = sc->dc_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = dc_ioctl; ifp->if_start = dc_start; ifp->if_init = dc_init; IFQ_SET_MAXLEN(&ifp->if_snd, DC_TX_LIST_CNT - 1); ifp->if_snd.ifq_drv_maxlen = DC_TX_LIST_CNT - 1; IFQ_SET_READY(&ifp->if_snd); /* * Do MII setup. If this is a 21143, check for a PHY on the * MII bus after applying any necessary fixups to twiddle the * GPIO bits. If we don't end up finding a PHY, restore the * old selection (SIA only or SIA/SYM) and attach the dcphy * driver instead. */ if (DC_IS_INTEL(sc)) { dc_apply_fixup(sc, IFM_AUTO); tmp = sc->dc_pmode; sc->dc_pmode = DC_PMODE_MII; } /* * Setup General Purpose port mode and data so the tulip can talk * to the MII. This needs to be done before mii_phy_probe so that * we can actually see them. */ if (DC_IS_XIRCOM(sc)) { CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); DELAY(10); CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); DELAY(10); } error = mii_phy_probe(dev, &sc->dc_miibus, dc_ifmedia_upd, dc_ifmedia_sts); if (error && DC_IS_INTEL(sc)) { sc->dc_pmode = tmp; if (sc->dc_pmode != DC_PMODE_SIA) sc->dc_pmode = DC_PMODE_SYM; sc->dc_flags |= DC_21143_NWAY; mii_phy_probe(dev, &sc->dc_miibus, dc_ifmedia_upd, dc_ifmedia_sts); /* * For non-MII cards, we need to have the 21143 * drive the LEDs. Except there are some systems * like the NEC VersaPro NoteBook PC which have no * LEDs, and twiddling these bits has adverse effects * on them. (I.e. you suddenly can't get a link.) */ if (!(pci_get_subvendor(dev) == 0x1033 && pci_get_subdevice(dev) == 0x8028)) sc->dc_flags |= DC_TULIP_LEDS; error = 0; } if (error) { device_printf(dev, "MII without any PHY!\n"); goto fail; } if (DC_IS_ADMTEK(sc)) { /* * Set automatic TX underrun recovery for the ADMtek chips */ DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR); } /* * Tell the upper layer(s) we support long frames. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif callout_init_mtx(&sc->dc_stat_ch, &sc->dc_mtx, 0); callout_init_mtx(&sc->dc_wdog_ch, &sc->dc_mtx, 0); /* * Call MI attach routine. */ ether_ifattach(ifp, (caddr_t)eaddr); /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->dc_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, dc_intr, sc, &sc->dc_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); ether_ifdetach(ifp); goto fail; } fail: if (error) dc_detach(dev); return (error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int dc_detach(device_t dev) { struct dc_softc *sc; struct ifnet *ifp; struct dc_mediainfo *m; int i; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->dc_mtx), ("dc mutex not initialized")); ifp = sc->dc_ifp; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { DC_LOCK(sc); dc_stop(sc); DC_UNLOCK(sc); callout_drain(&sc->dc_stat_ch); callout_drain(&sc->dc_wdog_ch); ether_ifdetach(ifp); } if (sc->dc_miibus) device_delete_child(dev, sc->dc_miibus); bus_generic_detach(dev); if (sc->dc_intrhand) bus_teardown_intr(dev, sc->dc_irq, sc->dc_intrhand); if (sc->dc_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq); if (sc->dc_res) bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); if (ifp) if_free(ifp); if (sc->dc_cdata.dc_sbuf != NULL) bus_dmamem_free(sc->dc_stag, sc->dc_cdata.dc_sbuf, sc->dc_smap); if (sc->dc_ldata != NULL) bus_dmamem_free(sc->dc_ltag, sc->dc_ldata, sc->dc_lmap); if (sc->dc_mtag) { for (i = 0; i < DC_TX_LIST_CNT; i++) if (sc->dc_cdata.dc_tx_map[i] != NULL) bus_dmamap_destroy(sc->dc_mtag, sc->dc_cdata.dc_tx_map[i]); for (i = 0; i < DC_RX_LIST_CNT; i++) if (sc->dc_cdata.dc_rx_map[i] != NULL) bus_dmamap_destroy(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i]); bus_dmamap_destroy(sc->dc_mtag, sc->dc_sparemap); } if (sc->dc_stag) bus_dma_tag_destroy(sc->dc_stag); if (sc->dc_mtag) bus_dma_tag_destroy(sc->dc_mtag); if (sc->dc_ltag) bus_dma_tag_destroy(sc->dc_ltag); free(sc->dc_pnic_rx_buf, M_DEVBUF); while (sc->dc_mi != NULL) { m = sc->dc_mi->dc_next; free(sc->dc_mi, M_DEVBUF); sc->dc_mi = m; } free(sc->dc_srom, M_DEVBUF); mtx_destroy(&sc->dc_mtx); return (0); } /* * Initialize the transmit descriptors. */ static int dc_list_tx_init(struct dc_softc *sc) { struct dc_chain_data *cd; struct dc_list_data *ld; int i, nexti; cd = &sc->dc_cdata; ld = sc->dc_ldata; for (i = 0; i < DC_TX_LIST_CNT; i++) { if (i == DC_TX_LIST_CNT - 1) nexti = 0; else nexti = i + 1; ld->dc_tx_list[i].dc_next = htole32(DC_TXDESC(sc, nexti)); cd->dc_tx_chain[i] = NULL; ld->dc_tx_list[i].dc_data = 0; ld->dc_tx_list[i].dc_ctl = 0; } cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0; bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); return (0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int dc_list_rx_init(struct dc_softc *sc) { struct dc_chain_data *cd; struct dc_list_data *ld; int i, nexti; cd = &sc->dc_cdata; ld = sc->dc_ldata; for (i = 0; i < DC_RX_LIST_CNT; i++) { if (dc_newbuf(sc, i, 1) != 0) return (ENOBUFS); if (i == DC_RX_LIST_CNT - 1) nexti = 0; else nexti = i + 1; ld->dc_rx_list[i].dc_next = htole32(DC_RXDESC(sc, nexti)); } cd->dc_rx_prod = 0; bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); return (0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int dc_newbuf(struct dc_softc *sc, int i, int alloc) { struct mbuf *m_new; bus_dmamap_t tmp; bus_dma_segment_t segs[1]; int error, nseg; if (alloc) { m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m_new == NULL) return (ENOBUFS); } else { m_new = sc->dc_cdata.dc_rx_chain[i]; m_new->m_data = m_new->m_ext.ext_buf; } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_adj(m_new, sizeof(u_int64_t)); /* * If this is a PNIC chip, zero the buffer. This is part * of the workaround for the receive bug in the 82c168 and * 82c169 chips. */ if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) bzero(mtod(m_new, char *), m_new->m_len); /* No need to remap the mbuf if we're reusing it. */ if (alloc) { error = bus_dmamap_load_mbuf_sg(sc->dc_mtag, sc->dc_sparemap, m_new, segs, &nseg, 0); if (error) { m_freem(m_new); return (error); } KASSERT(nseg == 1, ("%s: wrong number of segments (%d)", __func__, nseg)); sc->dc_ldata->dc_rx_list[i].dc_data = htole32(segs->ds_addr); bus_dmamap_unload(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i]); tmp = sc->dc_cdata.dc_rx_map[i]; sc->dc_cdata.dc_rx_map[i] = sc->dc_sparemap; sc->dc_sparemap = tmp; sc->dc_cdata.dc_rx_chain[i] = m_new; } sc->dc_ldata->dc_rx_list[i].dc_ctl = htole32(DC_RXCTL_RLINK | DC_RXLEN); sc->dc_ldata->dc_rx_list[i].dc_status = htole32(DC_RXSTAT_OWN); bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i], BUS_DMASYNC_PREREAD); bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); return (0); } /* * Grrrrr. * The PNIC chip has a terrible bug in it that manifests itself during * periods of heavy activity. The exact mode of failure if difficult to * pinpoint: sometimes it only happens in promiscuous mode, sometimes it * will happen on slow machines. The bug is that sometimes instead of * uploading one complete frame during reception, it uploads what looks * like the entire contents of its FIFO memory. The frame we want is at * the end of the whole mess, but we never know exactly how much data has * been uploaded, so salvaging the frame is hard. * * There is only one way to do it reliably, and it's disgusting. * Here's what we know: * * - We know there will always be somewhere between one and three extra * descriptors uploaded. * * - We know the desired received frame will always be at the end of the * total data upload. * * - We know the size of the desired received frame because it will be * provided in the length field of the status word in the last descriptor. * * Here's what we do: * * - When we allocate buffers for the receive ring, we bzero() them. * This means that we know that the buffer contents should be all * zeros, except for data uploaded by the chip. * * - We also force the PNIC chip to upload frames that include the * ethernet CRC at the end. * * - We gather all of the bogus frame data into a single buffer. * * - We then position a pointer at the end of this buffer and scan * backwards until we encounter the first non-zero byte of data. * This is the end of the received frame. We know we will encounter * some data at the end of the frame because the CRC will always be * there, so even if the sender transmits a packet of all zeros, * we won't be fooled. * * - We know the size of the actual received frame, so we subtract * that value from the current pointer location. This brings us * to the start of the actual received packet. * * - We copy this into an mbuf and pass it on, along with the actual * frame length. * * The performance hit is tremendous, but it beats dropping frames all * the time. */ #define DC_WHOLEFRAME (DC_RXSTAT_FIRSTFRAG | DC_RXSTAT_LASTFRAG) static void dc_pnic_rx_bug_war(struct dc_softc *sc, int idx) { struct dc_desc *cur_rx; struct dc_desc *c = NULL; struct mbuf *m = NULL; unsigned char *ptr; int i, total_len; u_int32_t rxstat = 0; i = sc->dc_pnic_rx_bug_save; cur_rx = &sc->dc_ldata->dc_rx_list[idx]; ptr = sc->dc_pnic_rx_buf; bzero(ptr, DC_RXLEN * 5); /* Copy all the bytes from the bogus buffers. */ while (1) { c = &sc->dc_ldata->dc_rx_list[i]; rxstat = le32toh(c->dc_status); m = sc->dc_cdata.dc_rx_chain[i]; bcopy(mtod(m, char *), ptr, DC_RXLEN); ptr += DC_RXLEN; /* If this is the last buffer, break out. */ if (i == idx || rxstat & DC_RXSTAT_LASTFRAG) break; dc_newbuf(sc, i, 0); DC_INC(i, DC_RX_LIST_CNT); } /* Find the length of the actual receive frame. */ total_len = DC_RXBYTES(rxstat); /* Scan backwards until we hit a non-zero byte. */ while (*ptr == 0x00) ptr--; /* Round off. */ if ((uintptr_t)(ptr) & 0x3) ptr -= 1; /* Now find the start of the frame. */ ptr -= total_len; if (ptr < sc->dc_pnic_rx_buf) ptr = sc->dc_pnic_rx_buf; /* * Now copy the salvaged frame to the last mbuf and fake up * the status word to make it look like a successful * frame reception. */ dc_newbuf(sc, i, 0); bcopy(ptr, mtod(m, char *), total_len); cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG); } /* * This routine searches the RX ring for dirty descriptors in the * event that the rxeof routine falls out of sync with the chip's * current descriptor pointer. This may happen sometimes as a result * of a "no RX buffer available" condition that happens when the chip * consumes all of the RX buffers before the driver has a chance to * process the RX ring. This routine may need to be called more than * once to bring the driver back in sync with the chip, however we * should still be getting RX DONE interrupts to drive the search * for new packets in the RX ring, so we should catch up eventually. */ static int dc_rx_resync(struct dc_softc *sc) { struct dc_desc *cur_rx; int i, pos; pos = sc->dc_cdata.dc_rx_prod; for (i = 0; i < DC_RX_LIST_CNT; i++) { cur_rx = &sc->dc_ldata->dc_rx_list[pos]; if (!(le32toh(cur_rx->dc_status) & DC_RXSTAT_OWN)) break; DC_INC(pos, DC_RX_LIST_CNT); } /* If the ring really is empty, then just return. */ if (i == DC_RX_LIST_CNT) return (0); /* We've fallen behing the chip: catch it. */ sc->dc_cdata.dc_rx_prod = pos; return (EAGAIN); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ -static void +static int dc_rxeof(struct dc_softc *sc) { struct mbuf *m, *m0; struct ifnet *ifp; struct dc_desc *cur_rx; - int i, total_len = 0; + int i, total_len, rx_npkts; u_int32_t rxstat; DC_LOCK_ASSERT(sc); ifp = sc->dc_ifp; i = sc->dc_cdata.dc_rx_prod; + total_len = 0; + rx_npkts = 0; bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_POSTREAD); while (!(le32toh(sc->dc_ldata->dc_rx_list[i].dc_status) & DC_RXSTAT_OWN)) { #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif cur_rx = &sc->dc_ldata->dc_rx_list[i]; rxstat = le32toh(cur_rx->dc_status); m = sc->dc_cdata.dc_rx_chain[i]; bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i], BUS_DMASYNC_POSTREAD); total_len = DC_RXBYTES(rxstat); if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) { if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) { if (rxstat & DC_RXSTAT_FIRSTFRAG) sc->dc_pnic_rx_bug_save = i; if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) { DC_INC(i, DC_RX_LIST_CNT); continue; } dc_pnic_rx_bug_war(sc, i); rxstat = le32toh(cur_rx->dc_status); total_len = DC_RXBYTES(rxstat); } } /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. However, don't report long * frames as errors since they could be vlans. */ if ((rxstat & DC_RXSTAT_RXERR)) { if (!(rxstat & DC_RXSTAT_GIANT) || (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE | DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN | DC_RXSTAT_RUNT | DC_RXSTAT_DE))) { ifp->if_ierrors++; if (rxstat & DC_RXSTAT_COLLSEEN) ifp->if_collisions++; dc_newbuf(sc, i, 0); if (rxstat & DC_RXSTAT_CRCERR) { DC_INC(i, DC_RX_LIST_CNT); continue; } else { dc_init_locked(sc); - return; + return (rx_npkts); } } } /* No errors; receive the packet. */ total_len -= ETHER_CRC_LEN; #ifdef __NO_STRICT_ALIGNMENT /* * On architectures without alignment problems we try to * allocate a new buffer for the receive ring, and pass up * the one where the packet is already, saving the expensive * copy done in m_devget(). * If we are on an architecture with alignment problems, or * if the allocation fails, then use m_devget and leave the * existing buffer in the receive ring. */ if (dc_newbuf(sc, i, 1) == 0) { m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; DC_INC(i, DC_RX_LIST_CNT); } else #endif { m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp, NULL); dc_newbuf(sc, i, 0); DC_INC(i, DC_RX_LIST_CNT); if (m0 == NULL) { ifp->if_ierrors++; continue; } m = m0; } ifp->if_ipackets++; DC_UNLOCK(sc); (*ifp->if_input)(ifp, m); DC_LOCK(sc); + rx_npkts++; } sc->dc_cdata.dc_rx_prod = i; + return (rx_npkts); } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void dc_txeof(struct dc_softc *sc) { struct dc_desc *cur_tx = NULL; struct ifnet *ifp; int idx; u_int32_t ctl, txstat; ifp = sc->dc_ifp; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_POSTREAD); idx = sc->dc_cdata.dc_tx_cons; while (idx != sc->dc_cdata.dc_tx_prod) { cur_tx = &sc->dc_ldata->dc_tx_list[idx]; txstat = le32toh(cur_tx->dc_status); ctl = le32toh(cur_tx->dc_ctl); if (txstat & DC_TXSTAT_OWN) break; if (!(ctl & DC_TXCTL_LASTFRAG) || ctl & DC_TXCTL_SETUP) { if (ctl & DC_TXCTL_SETUP) { /* * Yes, the PNIC is so brain damaged * that it will sometimes generate a TX * underrun error while DMAing the RX * filter setup frame. If we detect this, * we have to send the setup frame again, * or else the filter won't be programmed * correctly. */ if (DC_IS_PNIC(sc)) { if (txstat & DC_TXSTAT_ERRSUM) dc_setfilt(sc); } sc->dc_cdata.dc_tx_chain[idx] = NULL; } sc->dc_cdata.dc_tx_cnt--; DC_INC(idx, DC_TX_LIST_CNT); continue; } if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) { /* * XXX: Why does my Xircom taunt me so? * For some reason it likes setting the CARRLOST flag * even when the carrier is there. wtf?!? * Who knows, but Conexant chips have the * same problem. Maybe they took lessons * from Xircom. */ if (/*sc->dc_type == DC_TYPE_21143 &&*/ sc->dc_pmode == DC_PMODE_MII && ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM | DC_TXSTAT_NOCARRIER))) txstat &= ~DC_TXSTAT_ERRSUM; } else { if (/*sc->dc_type == DC_TYPE_21143 &&*/ sc->dc_pmode == DC_PMODE_MII && ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM | DC_TXSTAT_NOCARRIER | DC_TXSTAT_CARRLOST))) txstat &= ~DC_TXSTAT_ERRSUM; } if (txstat & DC_TXSTAT_ERRSUM) { ifp->if_oerrors++; if (txstat & DC_TXSTAT_EXCESSCOLL) ifp->if_collisions++; if (txstat & DC_TXSTAT_LATECOLL) ifp->if_collisions++; if (!(txstat & DC_TXSTAT_UNDERRUN)) { dc_init_locked(sc); return; } } ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3; ifp->if_opackets++; if (sc->dc_cdata.dc_tx_chain[idx] != NULL) { bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_tx_map[idx], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->dc_mtag, sc->dc_cdata.dc_tx_map[idx]); m_freem(sc->dc_cdata.dc_tx_chain[idx]); sc->dc_cdata.dc_tx_chain[idx] = NULL; } sc->dc_cdata.dc_tx_cnt--; DC_INC(idx, DC_TX_LIST_CNT); } sc->dc_cdata.dc_tx_cons = idx; if (DC_TX_LIST_CNT - sc->dc_cdata.dc_tx_cnt > DC_TX_LIST_RSVD) ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (sc->dc_cdata.dc_tx_cnt == 0) sc->dc_wdog_timer = 0; } static void dc_tick(void *xsc) { struct dc_softc *sc; struct mii_data *mii; struct ifnet *ifp; u_int32_t r; sc = xsc; DC_LOCK_ASSERT(sc); ifp = sc->dc_ifp; mii = device_get_softc(sc->dc_miibus); if (sc->dc_flags & DC_REDUCED_MII_POLL) { if (sc->dc_flags & DC_21143_NWAY) { r = CSR_READ_4(sc, DC_10BTSTAT); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX && (r & DC_TSTAT_LS100)) { sc->dc_link = 0; mii_mediachg(mii); } if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T && (r & DC_TSTAT_LS10)) { sc->dc_link = 0; mii_mediachg(mii); } if (sc->dc_link == 0) mii_tick(mii); } else { /* * For NICs which never report DC_RXSTATE_WAIT, we * have to bite the bullet... */ if ((DC_HAS_BROKEN_RXSTATE(sc) || (CSR_READ_4(sc, DC_ISR) & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT) && sc->dc_cdata.dc_tx_cnt == 0) { mii_tick(mii); if (!(mii->mii_media_status & IFM_ACTIVE)) sc->dc_link = 0; } } } else mii_tick(mii); /* * When the init routine completes, we expect to be able to send * packets right away, and in fact the network code will send a * gratuitous ARP the moment the init routine marks the interface * as running. However, even though the MAC may have been initialized, * there may be a delay of a few seconds before the PHY completes * autonegotiation and the link is brought up. Any transmissions * made during that delay will be lost. Dealing with this is tricky: * we can't just pause in the init routine while waiting for the * PHY to come ready since that would bring the whole system to * a screeching halt for several seconds. * * What we do here is prevent the TX start routine from sending * any packets until a link has been established. After the * interface has been initialized, the tick routine will poll * the state of the PHY until the IFM_ACTIVE flag is set. Until * that time, packets will stay in the send queue, and once the * link comes up, they will be flushed out to the wire. */ if (!sc->dc_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->dc_link++; if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) dc_start_locked(ifp); } if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link) callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc); else callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc); } /* * A transmit underrun has occurred. Back off the transmit threshold, * or switch to store and forward mode if we have to. */ static void dc_tx_underrun(struct dc_softc *sc) { u_int32_t isr; int i; if (DC_IS_DAVICOM(sc)) dc_init_locked(sc); if (DC_IS_INTEL(sc)) { /* * The real 21143 requires that the transmitter be idle * in order to change the transmit threshold or store * and forward state. */ DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); for (i = 0; i < DC_TIMEOUT; i++) { isr = CSR_READ_4(sc, DC_ISR); if (isr & DC_ISR_TX_IDLE) break; DELAY(10); } if (i == DC_TIMEOUT) { device_printf(sc->dc_dev, "%s: failed to force tx to idle state\n", __func__); dc_init_locked(sc); } } device_printf(sc->dc_dev, "TX underrun -- "); sc->dc_txthresh += DC_TXTHRESH_INC; if (sc->dc_txthresh > DC_TXTHRESH_MAX) { printf("using store and forward mode\n"); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); } else { printf("increasing TX threshold\n"); DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); } if (DC_IS_INTEL(sc)) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); } #ifdef DEVICE_POLLING static poll_handler_t dc_poll; -static void +static int dc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct dc_softc *sc = ifp->if_softc; + int rx_npkts = 0; DC_LOCK(sc); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { DC_UNLOCK(sc); - return; + return (rx_npkts); } sc->rxcycles = count; - dc_rxeof(sc); + rx_npkts = dc_rxeof(sc); dc_txeof(sc); if (!IFQ_IS_EMPTY(&ifp->if_snd) && !(ifp->if_drv_flags & IFF_DRV_OACTIVE)) dc_start_locked(ifp); if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ u_int32_t status; status = CSR_READ_4(sc, DC_ISR); status &= (DC_ISR_RX_WATDOGTIMEO | DC_ISR_RX_NOBUF | DC_ISR_TX_NOBUF | DC_ISR_TX_IDLE | DC_ISR_TX_UNDERRUN | DC_ISR_BUS_ERR); if (!status) { DC_UNLOCK(sc); - return; + return (rx_npkts); } /* ack what we have */ CSR_WRITE_4(sc, DC_ISR, status); if (status & (DC_ISR_RX_WATDOGTIMEO | DC_ISR_RX_NOBUF)) { u_int32_t r = CSR_READ_4(sc, DC_FRAMESDISCARDED); ifp->if_ierrors += (r & 0xffff) + ((r >> 17) & 0x7ff); if (dc_rx_resync(sc)) dc_rxeof(sc); } /* restart transmit unit if necessary */ if (status & DC_ISR_TX_IDLE && sc->dc_cdata.dc_tx_cnt) CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); if (status & DC_ISR_TX_UNDERRUN) dc_tx_underrun(sc); if (status & DC_ISR_BUS_ERR) { if_printf(ifp, "%s: bus error\n", __func__); dc_reset(sc); dc_init_locked(sc); } } DC_UNLOCK(sc); + return (rx_npkts); } #endif /* DEVICE_POLLING */ static void dc_intr(void *arg) { struct dc_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; if (sc->suspended) return; if ((CSR_READ_4(sc, DC_ISR) & DC_INTRS) == 0) return; DC_LOCK(sc); ifp = sc->dc_ifp; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { DC_UNLOCK(sc); return; } #endif /* Suppress unwanted interrupts */ if (!(ifp->if_flags & IFF_UP)) { if (CSR_READ_4(sc, DC_ISR) & DC_INTRS) dc_stop(sc); DC_UNLOCK(sc); return; } /* Disable interrupts. */ CSR_WRITE_4(sc, DC_IMR, 0x00000000); while (((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) && status != 0xFFFFFFFF && (ifp->if_drv_flags & IFF_DRV_RUNNING)) { CSR_WRITE_4(sc, DC_ISR, status); if (status & DC_ISR_RX_OK) { int curpkts; curpkts = ifp->if_ipackets; dc_rxeof(sc); if (curpkts == ifp->if_ipackets) { while (dc_rx_resync(sc)) dc_rxeof(sc); } } if (status & (DC_ISR_TX_OK | DC_ISR_TX_NOBUF)) dc_txeof(sc); if (status & DC_ISR_TX_IDLE) { dc_txeof(sc); if (sc->dc_cdata.dc_tx_cnt) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); } } if (status & DC_ISR_TX_UNDERRUN) dc_tx_underrun(sc); if ((status & DC_ISR_RX_WATDOGTIMEO) || (status & DC_ISR_RX_NOBUF)) { int curpkts; curpkts = ifp->if_ipackets; dc_rxeof(sc); if (curpkts == ifp->if_ipackets) { while (dc_rx_resync(sc)) dc_rxeof(sc); } } if (status & DC_ISR_BUS_ERR) { dc_reset(sc); dc_init_locked(sc); } } /* Re-enable interrupts. */ CSR_WRITE_4(sc, DC_IMR, DC_INTRS); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) dc_start_locked(ifp); DC_UNLOCK(sc); } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int dc_encap(struct dc_softc *sc, struct mbuf **m_head) { bus_dma_segment_t segs[DC_MAXFRAGS]; struct dc_desc *f; struct mbuf *m; int cur, defragged, error, first, frag, i, idx, nseg; /* * If there's no way we can send any packets, return now. */ if (DC_TX_LIST_CNT - sc->dc_cdata.dc_tx_cnt <= DC_TX_LIST_RSVD) return (ENOBUFS); m = NULL; defragged = 0; if (sc->dc_flags & DC_TX_COALESCE && ((*m_head)->m_next != NULL || sc->dc_flags & DC_TX_ALIGN)) { m = m_defrag(*m_head, M_DONTWAIT); defragged = 1; } else { /* * Count the number of frags in this chain to see if we * need to m_collapse. Since the descriptor list is shared * by all packets, we'll m_collapse long chains so that they * do not use up the entire list, even if they would fit. */ i = 0; for (m = *m_head; m != NULL; m = m->m_next) i++; if (i > DC_TX_LIST_CNT / 4 || DC_TX_LIST_CNT - i + sc->dc_cdata.dc_tx_cnt <= DC_TX_LIST_RSVD) { m = m_collapse(*m_head, M_DONTWAIT, DC_MAXFRAGS); defragged = 1; } } if (defragged != 0) { if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m; } idx = sc->dc_cdata.dc_tx_prod; error = bus_dmamap_load_mbuf_sg(sc->dc_mtag, sc->dc_cdata.dc_tx_map[idx], *m_head, segs, &nseg, 0); if (error == EFBIG) { if (defragged != 0 || (m = m_collapse(*m_head, M_DONTWAIT, DC_MAXFRAGS)) == NULL) { m_freem(*m_head); *m_head = NULL; return (defragged != 0 ? error : ENOBUFS); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->dc_mtag, sc->dc_cdata.dc_tx_map[idx], *m_head, segs, &nseg, 0); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (error); } } else if (error != 0) return (error); KASSERT(nseg <= DC_MAXFRAGS, ("%s: wrong number of segments (%d)", __func__, nseg)); if (nseg == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } first = cur = frag = sc->dc_cdata.dc_tx_prod; for (i = 0; i < nseg; i++) { if ((sc->dc_flags & DC_TX_ADMTEK_WAR) && (frag == (DC_TX_LIST_CNT - 1)) && (first != sc->dc_cdata.dc_tx_first)) { bus_dmamap_unload(sc->dc_mtag, sc->dc_cdata.dc_tx_map[first]); m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } f = &sc->dc_ldata->dc_tx_list[frag]; f->dc_ctl = htole32(DC_TXCTL_TLINK | segs[i].ds_len); if (i == 0) { f->dc_status = 0; f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG); } else f->dc_status = htole32(DC_TXSTAT_OWN); f->dc_data = htole32(segs[i].ds_addr); cur = frag; DC_INC(frag, DC_TX_LIST_CNT); } sc->dc_cdata.dc_tx_prod = frag; sc->dc_cdata.dc_tx_cnt += nseg; sc->dc_cdata.dc_tx_chain[cur] = *m_head; sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG); if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG) sc->dc_ldata->dc_tx_list[first].dc_ctl |= htole32(DC_TXCTL_FINT); if (sc->dc_flags & DC_TX_INTR_ALWAYS) sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_FINT); if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64) sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_FINT); sc->dc_ldata->dc_tx_list[first].dc_status = htole32(DC_TXSTAT_OWN); bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_tx_map[idx], BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); return (0); } static void dc_start(struct ifnet *ifp) { struct dc_softc *sc; sc = ifp->if_softc; DC_LOCK(sc); dc_start_locked(ifp); DC_UNLOCK(sc); } /* * Main transmit routine * To avoid having to do mbuf copies, we put pointers to the mbuf data * regions directly in the transmit lists. We also save a copy of the * pointers since the transmit list fragment pointers are physical * addresses. */ static void dc_start_locked(struct ifnet *ifp) { struct dc_softc *sc; struct mbuf *m_head = NULL; unsigned int queued = 0; int idx; sc = ifp->if_softc; DC_LOCK_ASSERT(sc); if (!sc->dc_link && ifp->if_snd.ifq_len < 10) return; if (ifp->if_drv_flags & IFF_DRV_OACTIVE) return; idx = sc->dc_cdata.dc_tx_first = sc->dc_cdata.dc_tx_prod; while (sc->dc_cdata.dc_tx_chain[idx] == NULL) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (dc_encap(sc, &m_head)) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } idx = sc->dc_cdata.dc_tx_prod; queued++; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); if (sc->dc_flags & DC_TX_ONE) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } } if (queued > 0) { /* Transmit */ if (!(sc->dc_flags & DC_TX_POLL)) CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); /* * Set a timeout in case the chip goes out to lunch. */ sc->dc_wdog_timer = 5; } } static void dc_init(void *xsc) { struct dc_softc *sc = xsc; DC_LOCK(sc); dc_init_locked(sc); DC_UNLOCK(sc); } static void dc_init_locked(struct dc_softc *sc) { struct ifnet *ifp = sc->dc_ifp; struct mii_data *mii; DC_LOCK_ASSERT(sc); mii = device_get_softc(sc->dc_miibus); /* * Cancel pending I/O and free all RX/TX buffers. */ dc_stop(sc); dc_reset(sc); /* * Set cache alignment and burst length. */ if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc)) CSR_WRITE_4(sc, DC_BUSCTL, 0); else CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME | DC_BUSCTL_MRLE); /* * Evenly share the bus between receive and transmit process. */ if (DC_IS_INTEL(sc)) DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION); if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) { DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA); } else { DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG); } if (sc->dc_flags & DC_TX_POLL) DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1); switch(sc->dc_cachesize) { case 32: DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG); break; case 16: DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG); break; case 8: DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG); break; case 0: default: DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE); break; } if (sc->dc_flags & DC_TX_STORENFWD) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); else { if (sc->dc_txthresh > DC_TXTHRESH_MAX) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); } else { DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); } } DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC); DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF); if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { /* * The app notes for the 98713 and 98715A say that * in order to have the chips operate properly, a magic * number must be written to CSR16. Macronix does not * document the meaning of these bits so there's no way * to know exactly what they do. The 98713 has a magic * number all its own; the rest all use a different one. */ DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000); if (sc->dc_type == DC_TYPE_98713) DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713); else DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715); } if (DC_IS_XIRCOM(sc)) { /* * setup General Purpose Port mode and data so the tulip * can talk to the MII. */ CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); DELAY(10); CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); DELAY(10); } DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN); /* Init circular RX list. */ if (dc_list_rx_init(sc) == ENOBUFS) { device_printf(sc->dc_dev, "initialization failed: no memory for rx buffers\n"); dc_stop(sc); return; } /* * Init TX descriptors. */ dc_list_tx_init(sc); /* * Load the address of the RX list. */ CSR_WRITE_4(sc, DC_RXADDR, DC_RXDESC(sc, 0)); CSR_WRITE_4(sc, DC_TXADDR, DC_TXDESC(sc, 0)); /* * Enable interrupts. */ #ifdef DEVICE_POLLING /* * ... but only if we are not polling, and make sure they are off in * the case of polling. Some cards (e.g. fxp) turn interrupts on * after a reset. */ if (ifp->if_capenable & IFCAP_POLLING) CSR_WRITE_4(sc, DC_IMR, 0x00000000); else #endif CSR_WRITE_4(sc, DC_IMR, DC_INTRS); CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF); /* Enable transmitter. */ DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); /* * If this is an Intel 21143 and we're not using the * MII port, program the LED control pins so we get * link and activity indications. */ if (sc->dc_flags & DC_TULIP_LEDS) { CSR_WRITE_4(sc, DC_WATCHDOG, DC_WDOG_CTLWREN | DC_WDOG_LINK | DC_WDOG_ACTIVITY); CSR_WRITE_4(sc, DC_WATCHDOG, 0); } /* * Load the RX/multicast filter. We do this sort of late * because the filter programming scheme on the 21143 and * some clones requires DMAing a setup frame via the TX * engine, and we need the transmitter enabled for that. */ dc_setfilt(sc); /* Enable receiver. */ DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF); mii_mediachg(mii); dc_setcfg(sc, sc->dc_if_media); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* Don't start the ticker if this is a homePNA link. */ if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1) sc->dc_link = 1; else { if (sc->dc_flags & DC_21143_NWAY) callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc); else callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc); } sc->dc_wdog_timer = 0; callout_reset(&sc->dc_wdog_ch, hz, dc_watchdog, sc); } /* * Set media options. */ static int dc_ifmedia_upd(struct ifnet *ifp) { struct dc_softc *sc; struct mii_data *mii; struct ifmedia *ifm; sc = ifp->if_softc; mii = device_get_softc(sc->dc_miibus); DC_LOCK(sc); mii_mediachg(mii); ifm = &mii->mii_media; if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) dc_setcfg(sc, ifm->ifm_media); else sc->dc_link = 0; DC_UNLOCK(sc); return (0); } /* * Report current media status. */ static void dc_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct dc_softc *sc; struct mii_data *mii; struct ifmedia *ifm; sc = ifp->if_softc; mii = device_get_softc(sc->dc_miibus); DC_LOCK(sc); mii_pollstat(mii); ifm = &mii->mii_media; if (DC_IS_DAVICOM(sc)) { if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { ifmr->ifm_active = ifm->ifm_media; ifmr->ifm_status = 0; DC_UNLOCK(sc); return; } } ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; DC_UNLOCK(sc); } static int dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct dc_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; int error = 0; switch (command) { case SIOCSIFFLAGS: DC_LOCK(sc); if (ifp->if_flags & IFF_UP) { int need_setfilt = (ifp->if_flags ^ sc->dc_if_flags) & (IFF_PROMISC | IFF_ALLMULTI); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if (need_setfilt) dc_setfilt(sc); } else { sc->dc_txthresh = 0; dc_init_locked(sc); } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) dc_stop(sc); } sc->dc_if_flags = ifp->if_flags; DC_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: DC_LOCK(sc); dc_setfilt(sc); DC_UNLOCK(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->dc_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: #ifdef DEVICE_POLLING if (ifr->ifr_reqcap & IFCAP_POLLING && !(ifp->if_capenable & IFCAP_POLLING)) { error = ether_poll_register(dc_poll, ifp); if (error) return(error); DC_LOCK(sc); /* Disable interrupts */ CSR_WRITE_4(sc, DC_IMR, 0x00000000); ifp->if_capenable |= IFCAP_POLLING; DC_UNLOCK(sc); return (error); } if (!(ifr->ifr_reqcap & IFCAP_POLLING) && ifp->if_capenable & IFCAP_POLLING) { error = ether_poll_deregister(ifp); /* Enable interrupts. */ DC_LOCK(sc); CSR_WRITE_4(sc, DC_IMR, DC_INTRS); ifp->if_capenable &= ~IFCAP_POLLING; DC_UNLOCK(sc); return (error); } #endif /* DEVICE_POLLING */ break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void dc_watchdog(void *xsc) { struct dc_softc *sc = xsc; struct ifnet *ifp; DC_LOCK_ASSERT(sc); if (sc->dc_wdog_timer == 0 || --sc->dc_wdog_timer != 0) { callout_reset(&sc->dc_wdog_ch, hz, dc_watchdog, sc); return; } ifp = sc->dc_ifp; ifp->if_oerrors++; device_printf(sc->dc_dev, "watchdog timeout\n"); dc_stop(sc); dc_reset(sc); dc_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) dc_start_locked(ifp); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void dc_stop(struct dc_softc *sc) { struct ifnet *ifp; struct dc_list_data *ld; struct dc_chain_data *cd; int i; u_int32_t ctl; DC_LOCK_ASSERT(sc); ifp = sc->dc_ifp; ld = sc->dc_ldata; cd = &sc->dc_cdata; callout_stop(&sc->dc_stat_ch); callout_stop(&sc->dc_wdog_ch); sc->dc_wdog_timer = 0; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON | DC_NETCFG_TX_ON)); CSR_WRITE_4(sc, DC_IMR, 0x00000000); CSR_WRITE_4(sc, DC_TXADDR, 0x00000000); CSR_WRITE_4(sc, DC_RXADDR, 0x00000000); sc->dc_link = 0; /* * Free data in the RX lists. */ for (i = 0; i < DC_RX_LIST_CNT; i++) { if (cd->dc_rx_chain[i] != NULL) { m_freem(cd->dc_rx_chain[i]); cd->dc_rx_chain[i] = NULL; } } bzero(&ld->dc_rx_list, sizeof(ld->dc_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < DC_TX_LIST_CNT; i++) { if (cd->dc_tx_chain[i] != NULL) { ctl = le32toh(ld->dc_tx_list[i].dc_ctl); if ((ctl & DC_TXCTL_SETUP) || !(ctl & DC_TXCTL_LASTFRAG)) { cd->dc_tx_chain[i] = NULL; continue; } bus_dmamap_unload(sc->dc_mtag, cd->dc_tx_map[i]); m_freem(cd->dc_tx_chain[i]); cd->dc_tx_chain[i] = NULL; } } bzero(&ld->dc_tx_list, sizeof(ld->dc_tx_list)); } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int dc_suspend(device_t dev) { struct dc_softc *sc; sc = device_get_softc(dev); DC_LOCK(sc); dc_stop(sc); sc->suspended = 1; DC_UNLOCK(sc); return (0); } /* * Device resume routine. Restore some PCI settings in case the BIOS * doesn't, re-enable busmastering, and restart the interface if * appropriate. */ static int dc_resume(device_t dev) { struct dc_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->dc_ifp; /* reinitialize interface if necessary */ DC_LOCK(sc); if (ifp->if_flags & IFF_UP) dc_init_locked(sc); sc->suspended = 0; DC_UNLOCK(sc); return (0); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int dc_shutdown(device_t dev) { struct dc_softc *sc; sc = device_get_softc(dev); DC_LOCK(sc); dc_stop(sc); DC_UNLOCK(sc); return (0); } Index: head/sys/dev/e1000/if_em.c =================================================================== --- head/sys/dev/e1000/if_em.c (revision 193095) +++ head/sys/dev/e1000/if_em.c (revision 193096) @@ -1,5480 +1,5491 @@ /****************************************************************************** Copyright (c) 2001-2009, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #include "opt_inet.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef EM_TIMESYNC #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "e1000_api.h" #include "e1000_82571.h" #include "if_em.h" /********************************************************************* * Set this to one to display debug statistics *********************************************************************/ int em_display_debug_stats = 0; /********************************************************************* * Driver version: *********************************************************************/ char em_driver_version[] = "6.9.9"; /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on * Last field stores an index into e1000_strings * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } *********************************************************************/ static em_vendor_info_t em_vendor_info_array[] = { /* Intel(R) PRO/1000 Network Connection */ { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_SERDES_DUAL, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_SERDES_QUAD, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_QUAD_COPPER_LP, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571EB_QUAD_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82571PT_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82572EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82572EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82572EI_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82572EI, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82573E, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82573E_IAMT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82573L, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_SPT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_SPT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_80003ES2LAN_COPPER_DPT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_80003ES2LAN_SERDES_DPT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IFE, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH8_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IGP_M_AMT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IGP_AMT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IGP_C, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IGP_M, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IGP_M_V, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IFE, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IFE_GT, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_IFE_G, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH9_BM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82574L, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH10_R_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH10_R_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH10_R_BM_V, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH10_D_BM_LM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_ICH10_D_BM_LF, PCI_ANY_ID, PCI_ANY_ID, 0}, /* required last entry */ { 0, 0, 0, 0, 0} }; /********************************************************************* * Table of branding strings for all supported NICs. *********************************************************************/ static char *em_strings[] = { "Intel(R) PRO/1000 Network Connection" }; /********************************************************************* * Function prototypes *********************************************************************/ static int em_probe(device_t); static int em_attach(device_t); static int em_detach(device_t); static int em_shutdown(device_t); static int em_suspend(device_t); static int em_resume(device_t); static void em_start(struct ifnet *); static void em_start_locked(struct ifnet *ifp); static int em_ioctl(struct ifnet *, u_long, caddr_t); static void em_watchdog(struct adapter *); static void em_init(void *); static void em_init_locked(struct adapter *); static void em_stop(void *); static void em_media_status(struct ifnet *, struct ifmediareq *); static int em_media_change(struct ifnet *); static void em_identify_hardware(struct adapter *); static int em_allocate_pci_resources(struct adapter *); static int em_allocate_legacy(struct adapter *adapter); static int em_allocate_msix(struct adapter *adapter); static int em_setup_msix(struct adapter *); static void em_free_pci_resources(struct adapter *); static void em_local_timer(void *); static int em_hardware_init(struct adapter *); static void em_setup_interface(device_t, struct adapter *); static void em_setup_transmit_structures(struct adapter *); static void em_initialize_transmit_unit(struct adapter *); static int em_setup_receive_structures(struct adapter *); static void em_initialize_receive_unit(struct adapter *); static void em_enable_intr(struct adapter *); static void em_disable_intr(struct adapter *); static void em_free_transmit_structures(struct adapter *); static void em_free_receive_structures(struct adapter *); static void em_update_stats_counters(struct adapter *); static void em_txeof(struct adapter *); static void em_tx_purge(struct adapter *); static int em_allocate_receive_structures(struct adapter *); static int em_allocate_transmit_structures(struct adapter *); -static int em_rxeof(struct adapter *, int); +static int em_rxeof(struct adapter *, int, int *); #ifndef __NO_STRICT_ALIGNMENT static int em_fixup_rx(struct adapter *); #endif static void em_receive_checksum(struct adapter *, struct e1000_rx_desc *, struct mbuf *); static void em_transmit_checksum_setup(struct adapter *, struct mbuf *, u32 *, u32 *); #if __FreeBSD_version >= 700000 static bool em_tso_setup(struct adapter *, struct mbuf *, u32 *, u32 *); #endif /* FreeBSD_version >= 700000 */ static void em_set_promisc(struct adapter *); static void em_disable_promisc(struct adapter *); static void em_set_multi(struct adapter *); static void em_print_hw_stats(struct adapter *); static void em_update_link_status(struct adapter *); static int em_get_buf(struct adapter *, int); static void em_register_vlan(void *, struct ifnet *, u16); static void em_unregister_vlan(void *, struct ifnet *, u16); static int em_xmit(struct adapter *, struct mbuf **); static void em_smartspeed(struct adapter *); static int em_82547_fifo_workaround(struct adapter *, int); static void em_82547_update_fifo_head(struct adapter *, int); static int em_82547_tx_fifo_reset(struct adapter *); static void em_82547_move_tail(void *); static int em_dma_malloc(struct adapter *, bus_size_t, struct em_dma_alloc *, int); static void em_dma_free(struct adapter *, struct em_dma_alloc *); static void em_print_debug_info(struct adapter *); static void em_print_nvm_info(struct adapter *); static int em_is_valid_ether_addr(u8 *); static int em_sysctl_stats(SYSCTL_HANDLER_ARGS); static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS); static u32 em_fill_descriptors (bus_addr_t address, u32 length, PDESC_ARRAY desc_array); static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS); static void em_add_int_delay_sysctl(struct adapter *, const char *, const char *, struct em_int_delay_info *, int, int); /* Management and WOL Support */ static void em_init_manageability(struct adapter *); static void em_release_manageability(struct adapter *); static void em_get_hw_control(struct adapter *); static void em_release_hw_control(struct adapter *); static void em_enable_wakeup(device_t); #ifdef EM_TIMESYNC /* Precision Time sync support */ static int em_tsync_init(struct adapter *); static void em_tsync_disable(struct adapter *); #endif #ifdef EM_LEGACY_IRQ static void em_intr(void *); #else /* FAST IRQ */ #if __FreeBSD_version < 700000 static void em_irq_fast(void *); #else static int em_irq_fast(void *); #endif /* MSIX handlers */ static void em_msix_tx(void *); static void em_msix_rx(void *); static void em_msix_link(void *); static void em_handle_rx(void *context, int pending); static void em_handle_tx(void *context, int pending); static void em_handle_rxtx(void *context, int pending); static void em_handle_link(void *context, int pending); static void em_add_rx_process_limit(struct adapter *, const char *, const char *, int *, int); #endif /* ~EM_LEGACY_IRQ */ #ifdef DEVICE_POLLING static poll_handler_t em_poll; #endif /* POLLING */ /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t em_methods[] = { /* Device interface */ DEVMETHOD(device_probe, em_probe), DEVMETHOD(device_attach, em_attach), DEVMETHOD(device_detach, em_detach), DEVMETHOD(device_shutdown, em_shutdown), DEVMETHOD(device_suspend, em_suspend), DEVMETHOD(device_resume, em_resume), {0, 0} }; static driver_t em_driver = { "em", em_methods, sizeof(struct adapter), }; static devclass_t em_devclass; DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0); MODULE_DEPEND(em, pci, 1, 1, 1); MODULE_DEPEND(em, ether, 1, 1, 1); /********************************************************************* * Tunable default values. *********************************************************************/ #define EM_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000) #define EM_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024) #define M_TSO_LEN 66 /* Allow common code without TSO */ #ifndef CSUM_TSO #define CSUM_TSO 0 #endif static int em_tx_int_delay_dflt = EM_TICKS_TO_USECS(EM_TIDV); static int em_rx_int_delay_dflt = EM_TICKS_TO_USECS(EM_RDTR); static int em_tx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_TADV); static int em_rx_abs_int_delay_dflt = EM_TICKS_TO_USECS(EM_RADV); static int em_rxd = EM_DEFAULT_RXD; static int em_txd = EM_DEFAULT_TXD; static int em_smart_pwr_down = FALSE; /* Controls whether promiscuous also shows bad packets */ static int em_debug_sbp = FALSE; /* Local switch for MSI/MSIX */ static int em_enable_msi = TRUE; TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt); TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt); TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt); TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt); TUNABLE_INT("hw.em.rxd", &em_rxd); TUNABLE_INT("hw.em.txd", &em_txd); TUNABLE_INT("hw.em.smart_pwr_down", &em_smart_pwr_down); TUNABLE_INT("hw.em.sbp", &em_debug_sbp); TUNABLE_INT("hw.em.enable_msi", &em_enable_msi); #ifndef EM_LEGACY_IRQ /* How many packets rxeof tries to clean at a time */ static int em_rx_process_limit = 100; TUNABLE_INT("hw.em.rx_process_limit", &em_rx_process_limit); #endif /* Global used in WOL setup with multiport cards */ static int global_quad_port_a = 0; /********************************************************************* * Device identification routine * * em_probe determines if the driver should be loaded on * adapter based on PCI vendor/device id of the adapter. * * return BUS_PROBE_DEFAULT on success, positive on failure *********************************************************************/ static int em_probe(device_t dev) { char adapter_name[60]; u16 pci_vendor_id = 0; u16 pci_device_id = 0; u16 pci_subvendor_id = 0; u16 pci_subdevice_id = 0; em_vendor_info_t *ent; INIT_DEBUGOUT("em_probe: begin"); pci_vendor_id = pci_get_vendor(dev); if (pci_vendor_id != EM_VENDOR_ID) return (ENXIO); pci_device_id = pci_get_device(dev); pci_subvendor_id = pci_get_subvendor(dev); pci_subdevice_id = pci_get_subdevice(dev); ent = em_vendor_info_array; while (ent->vendor_id != 0) { if ((pci_vendor_id == ent->vendor_id) && (pci_device_id == ent->device_id) && ((pci_subvendor_id == ent->subvendor_id) || (ent->subvendor_id == PCI_ANY_ID)) && ((pci_subdevice_id == ent->subdevice_id) || (ent->subdevice_id == PCI_ANY_ID))) { sprintf(adapter_name, "%s %s", em_strings[ent->index], em_driver_version); device_set_desc_copy(dev, adapter_name); return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); } /********************************************************************* * Device initialization routine * * The attach entry point is called when the driver is being loaded. * This routine identifies the type of hardware, allocates all resources * and initializes the hardware. * * return 0 on success, positive on failure *********************************************************************/ static int em_attach(device_t dev) { struct adapter *adapter; int tsize, rsize; int error = 0; u16 eeprom_data, device_id; INIT_DEBUGOUT("em_attach: begin"); adapter = device_get_softc(dev); adapter->dev = adapter->osdep.dev = dev; EM_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); EM_TX_LOCK_INIT(adapter, device_get_nameunit(dev)); EM_RX_LOCK_INIT(adapter, device_get_nameunit(dev)); /* SYSCTL stuff */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, em_sysctl_debug_info, "I", "Debug Information"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, em_sysctl_stats, "I", "Statistics"); callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); callout_init_mtx(&adapter->tx_fifo_timer, &adapter->tx_mtx, 0); /* Determine hardware and mac info */ em_identify_hardware(adapter); /* Setup PCI resources */ if (em_allocate_pci_resources(adapter)) { device_printf(dev, "Allocation of PCI resources failed\n"); error = ENXIO; goto err_pci; } /* ** For ICH8 and family we need to ** map the flash memory, and this ** must happen after the MAC is ** identified */ if ((adapter->hw.mac.type == e1000_ich8lan) || (adapter->hw.mac.type == e1000_ich9lan) || (adapter->hw.mac.type == e1000_ich10lan)) { int rid = EM_BAR_TYPE_FLASH; adapter->flash = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (adapter->flash == NULL) { device_printf(dev, "Mapping of Flash failed\n"); error = ENXIO; goto err_pci; } /* This is used in the shared code */ adapter->hw.flash_address = (u8 *)adapter->flash; adapter->osdep.flash_bus_space_tag = rman_get_bustag(adapter->flash); adapter->osdep.flash_bus_space_handle = rman_get_bushandle(adapter->flash); } /* Do Shared Code initialization */ if (e1000_setup_init_funcs(&adapter->hw, TRUE)) { device_printf(dev, "Setup of Shared code failed\n"); error = ENXIO; goto err_pci; } e1000_get_bus_info(&adapter->hw); /* Set up some sysctls for the tunable interrupt delays */ em_add_int_delay_sysctl(adapter, "rx_int_delay", "receive interrupt delay in usecs", &adapter->rx_int_delay, E1000_REGISTER(&adapter->hw, E1000_RDTR), em_rx_int_delay_dflt); em_add_int_delay_sysctl(adapter, "tx_int_delay", "transmit interrupt delay in usecs", &adapter->tx_int_delay, E1000_REGISTER(&adapter->hw, E1000_TIDV), em_tx_int_delay_dflt); if (adapter->hw.mac.type >= e1000_82540) { em_add_int_delay_sysctl(adapter, "rx_abs_int_delay", "receive interrupt delay limit in usecs", &adapter->rx_abs_int_delay, E1000_REGISTER(&adapter->hw, E1000_RADV), em_rx_abs_int_delay_dflt); em_add_int_delay_sysctl(adapter, "tx_abs_int_delay", "transmit interrupt delay limit in usecs", &adapter->tx_abs_int_delay, E1000_REGISTER(&adapter->hw, E1000_TADV), em_tx_abs_int_delay_dflt); } #ifndef EM_LEGACY_IRQ /* Sysctls for limiting the amount of work done in the taskqueue */ em_add_rx_process_limit(adapter, "rx_processing_limit", "max number of rx packets to process", &adapter->rx_process_limit, em_rx_process_limit); #endif /* * Validate number of transmit and receive descriptors. It * must not exceed hardware maximum, and must be multiple * of E1000_DBA_ALIGN. */ if (((em_txd * sizeof(struct e1000_tx_desc)) % EM_DBA_ALIGN) != 0 || (adapter->hw.mac.type >= e1000_82544 && em_txd > EM_MAX_TXD) || (adapter->hw.mac.type < e1000_82544 && em_txd > EM_MAX_TXD_82543) || (em_txd < EM_MIN_TXD)) { device_printf(dev, "Using %d TX descriptors instead of %d!\n", EM_DEFAULT_TXD, em_txd); adapter->num_tx_desc = EM_DEFAULT_TXD; } else adapter->num_tx_desc = em_txd; if (((em_rxd * sizeof(struct e1000_rx_desc)) % EM_DBA_ALIGN) != 0 || (adapter->hw.mac.type >= e1000_82544 && em_rxd > EM_MAX_RXD) || (adapter->hw.mac.type < e1000_82544 && em_rxd > EM_MAX_RXD_82543) || (em_rxd < EM_MIN_RXD)) { device_printf(dev, "Using %d RX descriptors instead of %d!\n", EM_DEFAULT_RXD, em_rxd); adapter->num_rx_desc = EM_DEFAULT_RXD; } else adapter->num_rx_desc = em_rxd; adapter->hw.mac.autoneg = DO_AUTO_NEG; adapter->hw.phy.autoneg_wait_to_complete = FALSE; adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; adapter->rx_buffer_len = 2048; e1000_init_script_state_82541(&adapter->hw, TRUE); e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE); /* Copper options */ if (adapter->hw.phy.media_type == e1000_media_type_copper) { adapter->hw.phy.mdix = AUTO_ALL_MODES; adapter->hw.phy.disable_polarity_correction = FALSE; adapter->hw.phy.ms_type = EM_MASTER_SLAVE; } /* * Set the frame limits assuming * standard ethernet sized frames. */ adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE; adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE; /* * This controls when hardware reports transmit completion * status. */ adapter->hw.mac.report_tx_early = 1; tsize = roundup2(adapter->num_tx_desc * sizeof(struct e1000_tx_desc), EM_DBA_ALIGN); /* Allocate Transmit Descriptor ring */ if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate tx_desc memory\n"); error = ENOMEM; goto err_tx_desc; } adapter->tx_desc_base = (struct e1000_tx_desc *)adapter->txdma.dma_vaddr; rsize = roundup2(adapter->num_rx_desc * sizeof(struct e1000_rx_desc), EM_DBA_ALIGN); /* Allocate Receive Descriptor ring */ if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) { device_printf(dev, "Unable to allocate rx_desc memory\n"); error = ENOMEM; goto err_rx_desc; } adapter->rx_desc_base = (struct e1000_rx_desc *)adapter->rxdma.dma_vaddr; /* ** Start from a known state, this is ** important in reading the nvm and ** mac from that. */ e1000_reset_hw(&adapter->hw); /* Make sure we have a good EEPROM before we read from it */ if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { /* ** Some PCI-E parts fail the first check due to ** the link being in sleep state, call it again, ** if it fails a second time its a real issue. */ if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { device_printf(dev, "The EEPROM Checksum Is Not Valid\n"); error = EIO; goto err_hw_init; } } /* Copy the permanent MAC address out of the EEPROM */ if (e1000_read_mac_addr(&adapter->hw) < 0) { device_printf(dev, "EEPROM read error while reading MAC" " address\n"); error = EIO; goto err_hw_init; } if (!em_is_valid_ether_addr(adapter->hw.mac.addr)) { device_printf(dev, "Invalid MAC address\n"); error = EIO; goto err_hw_init; } /* Initialize the hardware */ if (em_hardware_init(adapter)) { device_printf(dev, "Unable to initialize the hardware\n"); error = EIO; goto err_hw_init; } /* Allocate transmit descriptors and buffers */ if (em_allocate_transmit_structures(adapter)) { device_printf(dev, "Could not setup transmit structures\n"); error = ENOMEM; goto err_tx_struct; } /* Allocate receive descriptors and buffers */ if (em_allocate_receive_structures(adapter)) { device_printf(dev, "Could not setup receive structures\n"); error = ENOMEM; goto err_rx_struct; } /* ** Do interrupt configuration */ if (adapter->msi > 1) /* Do MSI/X */ error = em_allocate_msix(adapter); else /* MSI or Legacy */ error = em_allocate_legacy(adapter); if (error) goto err_rx_struct; /* Setup OS specific network interface */ em_setup_interface(dev, adapter); /* Initialize statistics */ em_update_stats_counters(adapter); adapter->hw.mac.get_link_status = 1; em_update_link_status(adapter); /* Indicate SOL/IDER usage */ if (e1000_check_reset_block(&adapter->hw)) device_printf(dev, "PHY reset is blocked due to SOL/IDER session.\n"); /* Determine if we have to control management hardware */ adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw); /* * Setup Wake-on-Lan */ switch (adapter->hw.mac.type) { case e1000_82542: case e1000_82543: break; case e1000_82546: case e1000_82546_rev_3: case e1000_82571: case e1000_80003es2lan: if (adapter->hw.bus.func == 1) e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); else e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); eeprom_data &= EM_EEPROM_APME; break; default: /* APME bit in EEPROM is mapped to WUC.APME */ eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC) & E1000_WUC_APME; break; } if (eeprom_data) adapter->wol = E1000_WUFC_MAG; /* * We have the eeprom settings, now apply the special cases * where the eeprom may be wrong or the board won't support * wake on lan on a particular port */ device_id = pci_get_device(dev); switch (device_id) { case E1000_DEV_ID_82546GB_PCIE: adapter->wol = 0; break; case E1000_DEV_ID_82546EB_FIBER: case E1000_DEV_ID_82546GB_FIBER: case E1000_DEV_ID_82571EB_FIBER: /* Wake events only supported on port A for dual fiber * regardless of eeprom setting */ if (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_FUNC_1) adapter->wol = 0; break; case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: case E1000_DEV_ID_82571EB_QUAD_COPPER: case E1000_DEV_ID_82571EB_QUAD_FIBER: case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: /* if quad port adapter, disable WoL on all but port A */ if (global_quad_port_a != 0) adapter->wol = 0; /* Reset for multiple quad port adapters */ if (++global_quad_port_a == 4) global_quad_port_a = 0; break; } /* Do we need workaround for 82544 PCI-X adapter? */ if (adapter->hw.bus.type == e1000_bus_type_pcix && adapter->hw.mac.type == e1000_82544) adapter->pcix_82544 = TRUE; else adapter->pcix_82544 = FALSE; /* Register for VLAN events */ adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, em_register_vlan, 0, EVENTHANDLER_PRI_FIRST); adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, em_unregister_vlan, 0, EVENTHANDLER_PRI_FIRST); /* Tell the stack that the interface is not active */ adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); INIT_DEBUGOUT("em_attach: end"); return (0); err_rx_struct: em_free_transmit_structures(adapter); err_tx_struct: err_hw_init: em_release_hw_control(adapter); em_dma_free(adapter, &adapter->rxdma); err_rx_desc: em_dma_free(adapter, &adapter->txdma); err_tx_desc: err_pci: em_free_pci_resources(adapter); EM_TX_LOCK_DESTROY(adapter); EM_RX_LOCK_DESTROY(adapter); EM_CORE_LOCK_DESTROY(adapter); return (error); } /********************************************************************* * Device removal routine * * The detach entry point is called when the driver is being removed. * This routine stops the adapter and deallocates all the resources * that were allocated for driver operation. * * return 0 on success, positive on failure *********************************************************************/ static int em_detach(device_t dev) { struct adapter *adapter = device_get_softc(dev); struct ifnet *ifp = adapter->ifp; INIT_DEBUGOUT("em_detach: begin"); /* Make sure VLANS are not using driver */ #if __FreeBSD_version >= 700000 if (adapter->ifp->if_vlantrunk != NULL) { #else if (adapter->ifp->if_nvlans != 0) { #endif device_printf(dev,"Vlan in use, detach first\n"); return (EBUSY); } #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif EM_CORE_LOCK(adapter); EM_TX_LOCK(adapter); adapter->in_detach = 1; em_stop(adapter); e1000_phy_hw_reset(&adapter->hw); em_release_manageability(adapter); if (((adapter->hw.mac.type == e1000_82573) || (adapter->hw.mac.type == e1000_ich8lan) || (adapter->hw.mac.type == e1000_ich10lan) || (adapter->hw.mac.type == e1000_ich9lan)) && e1000_check_mng_mode(&adapter->hw)) em_release_hw_control(adapter); if (adapter->wol) { E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); em_enable_wakeup(dev); } EM_TX_UNLOCK(adapter); EM_CORE_UNLOCK(adapter); /* Unregister VLAN events */ if (adapter->vlan_attach != NULL) EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); if (adapter->vlan_detach != NULL) EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); ether_ifdetach(adapter->ifp); callout_drain(&adapter->timer); callout_drain(&adapter->tx_fifo_timer); em_free_pci_resources(adapter); bus_generic_detach(dev); if_free(ifp); drbr_free(adapter->br, M_DEVBUF); em_free_transmit_structures(adapter); em_free_receive_structures(adapter); /* Free Transmit Descriptor ring */ if (adapter->tx_desc_base) { em_dma_free(adapter, &adapter->txdma); adapter->tx_desc_base = NULL; } /* Free Receive Descriptor ring */ if (adapter->rx_desc_base) { em_dma_free(adapter, &adapter->rxdma); adapter->rx_desc_base = NULL; } EM_TX_LOCK_DESTROY(adapter); EM_RX_LOCK_DESTROY(adapter); EM_CORE_LOCK_DESTROY(adapter); return (0); } /********************************************************************* * * Shutdown entry point * **********************************************************************/ static int em_shutdown(device_t dev) { return em_suspend(dev); } /* * Suspend/resume device methods. */ static int em_suspend(device_t dev) { struct adapter *adapter = device_get_softc(dev); EM_CORE_LOCK(adapter); EM_TX_LOCK(adapter); em_stop(adapter); EM_TX_UNLOCK(adapter); em_release_manageability(adapter); if (((adapter->hw.mac.type == e1000_82573) || (adapter->hw.mac.type == e1000_ich8lan) || (adapter->hw.mac.type == e1000_ich10lan) || (adapter->hw.mac.type == e1000_ich9lan)) && e1000_check_mng_mode(&adapter->hw)) em_release_hw_control(adapter); if (adapter->wol) { E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); em_enable_wakeup(dev); } EM_CORE_UNLOCK(adapter); return bus_generic_suspend(dev); } static int em_resume(device_t dev) { struct adapter *adapter = device_get_softc(dev); struct ifnet *ifp = adapter->ifp; EM_CORE_LOCK(adapter); em_init_locked(adapter); em_init_manageability(adapter); EM_CORE_UNLOCK(adapter); em_start(ifp); return bus_generic_resume(dev); } /********************************************************************* * Transmit entry point * * em_start is called by the stack to initiate a transmit. * The driver will remain in this routine as long as there are * packets to transmit and transmit resources are available. * In case resources are not available stack is notified and * the packet is requeued. **********************************************************************/ #ifdef IFNET_BUF_RING static int em_transmit_locked(struct ifnet *ifp, struct mbuf *m) { struct adapter *adapter = ifp->if_softc; int error; EM_TX_LOCK_ASSERT(adapter); if (((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) || (!adapter->link_active)) { error = drbr_enqueue(ifp, adapter->br, m); return (error); } else if (ADAPTER_RING_EMPTY(adapter) && (adapter->num_tx_desc_avail > EM_TX_OP_THRESHOLD)) { if (em_xmit(adapter, &m)) { if (m && (error = drbr_enqueue(ifp, adapter->br, m)) != 0) return (error); } else { /* * We've bypassed the buf ring so we need to update * ifp directly */ drbr_stats_update(ifp, m->m_pkthdr.len, m->m_flags); /* ** Send a copy of the frame to the BPF ** listener and set the watchdog on. */ ETHER_BPF_MTAP(ifp, m); adapter->watchdog_timer = EM_TX_TIMEOUT; } } else if ((error = drbr_enqueue(ifp, adapter->br, m)) != 0) return (error); if (!ADAPTER_RING_EMPTY(adapter)) em_start_locked(ifp); return (0); } static int em_transmit(struct ifnet *ifp, struct mbuf *m) { struct adapter *adapter = ifp->if_softc; int error = 0; if(EM_TX_TRYLOCK(adapter)) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) error = em_transmit_locked(ifp, m); EM_TX_UNLOCK(adapter); } else error = drbr_enqueue(ifp, adapter->br, m); return (error); } static void em_qflush(struct ifnet *ifp) { struct mbuf *m; struct adapter *adapter = (struct adapter *)ifp->if_softc; EM_TX_LOCK(adapter); while ((m = buf_ring_dequeue_sc(adapter->br)) != NULL) m_freem(m); if_qflush(ifp); EM_TX_UNLOCK(adapter); } #endif static void em_start_locked(struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; struct mbuf *m_head; EM_TX_LOCK_ASSERT(adapter); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; if (!adapter->link_active) return; while ((adapter->num_tx_desc_avail > EM_TX_OP_THRESHOLD) && (!ADAPTER_RING_EMPTY(adapter))) { m_head = em_dequeue(ifp, adapter->br); if (m_head == NULL) break; /* * Encapsulation can modify our pointer, and or make it * NULL on failure. In that event, we can't requeue. */ if (em_xmit(adapter, &m_head)) { if (m_head == NULL) break; #ifndef IFNET_BUF_RING ifp->if_drv_flags |= IFF_DRV_OACTIVE; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); #endif break; } /* Send a copy of the frame to the BPF listener */ ETHER_BPF_MTAP(ifp, m_head); /* Set timeout in case hardware has problems transmitting. */ adapter->watchdog_timer = EM_TX_TIMEOUT; } if ((adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD)) ifp->if_drv_flags |= IFF_DRV_OACTIVE; } static void em_start(struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; EM_TX_LOCK(adapter); if (ifp->if_drv_flags & IFF_DRV_RUNNING) em_start_locked(ifp); EM_TX_UNLOCK(adapter); } /********************************************************************* * Ioctl entry point * * em_ioctl is called when the user wants to configure the * interface. * * return 0 on success, positive on failure **********************************************************************/ static int em_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct adapter *adapter = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; #ifdef INET struct ifaddr *ifa = (struct ifaddr *)data; #endif int error = 0; if (adapter->in_detach) return (error); switch (command) { case SIOCSIFADDR: #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) { /* * XXX * Since resetting hardware takes a very long time * and results in link renegotiation we only * initialize the hardware only when it is absolutely * required. */ ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { EM_CORE_LOCK(adapter); em_init_locked(adapter); EM_CORE_UNLOCK(adapter); } arp_ifinit(ifp, ifa); } else #endif error = ether_ioctl(ifp, command, data); break; case SIOCSIFMTU: { int max_frame_size; u16 eeprom_data = 0; IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); EM_CORE_LOCK(adapter); switch (adapter->hw.mac.type) { case e1000_82573: /* * 82573 only supports jumbo frames * if ASPM is disabled. */ e1000_read_nvm(&adapter->hw, NVM_INIT_3GIO_3, 1, &eeprom_data); if (eeprom_data & NVM_WORD1A_ASPM_MASK) { max_frame_size = ETHER_MAX_LEN; break; } /* Allow Jumbo frames - fall thru */ case e1000_82571: case e1000_82572: case e1000_ich9lan: case e1000_ich10lan: case e1000_82574: case e1000_80003es2lan: /* Limit Jumbo Frame size */ max_frame_size = 9234; break; /* Adapters that do not support jumbo frames */ case e1000_82542: case e1000_ich8lan: max_frame_size = ETHER_MAX_LEN; break; default: max_frame_size = MAX_JUMBO_FRAME_SIZE; } if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - ETHER_CRC_LEN) { EM_CORE_UNLOCK(adapter); error = EINVAL; break; } ifp->if_mtu = ifr->ifr_mtu; adapter->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; em_init_locked(adapter); EM_CORE_UNLOCK(adapter); break; } case SIOCSIFFLAGS: IOCTL_DEBUGOUT("ioctl rcv'd:\ SIOCSIFFLAGS (Set Interface Flags)"); EM_CORE_LOCK(adapter); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { if ((ifp->if_flags ^ adapter->if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { em_disable_promisc(adapter); em_set_promisc(adapter); } } else em_init_locked(adapter); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { EM_TX_LOCK(adapter); em_stop(adapter); EM_TX_UNLOCK(adapter); } adapter->if_flags = ifp->if_flags; EM_CORE_UNLOCK(adapter); break; case SIOCADDMULTI: case SIOCDELMULTI: IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { EM_CORE_LOCK(adapter); em_disable_intr(adapter); em_set_multi(adapter); if (adapter->hw.mac.type == e1000_82542 && adapter->hw.revision_id == E1000_REVISION_2) { em_initialize_receive_unit(adapter); } #ifdef DEVICE_POLLING if (!(ifp->if_capenable & IFCAP_POLLING)) #endif em_enable_intr(adapter); EM_CORE_UNLOCK(adapter); } break; case SIOCSIFMEDIA: /* Check SOL/IDER usage */ EM_CORE_LOCK(adapter); if (e1000_check_reset_block(&adapter->hw)) { EM_CORE_UNLOCK(adapter); device_printf(adapter->dev, "Media change is" " blocked due to SOL/IDER session.\n"); break; } EM_CORE_UNLOCK(adapter); case SIOCGIFMEDIA: IOCTL_DEBUGOUT("ioctl rcv'd: \ SIOCxIFMEDIA (Get/Set Interface Media)"); error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); break; case SIOCSIFCAP: { int mask, reinit; IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)"); reinit = 0; mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(em_poll, ifp); if (error) return (error); EM_CORE_LOCK(adapter); em_disable_intr(adapter); ifp->if_capenable |= IFCAP_POLLING; EM_CORE_UNLOCK(adapter); } else { error = ether_poll_deregister(ifp); /* Enable interrupt even in error case */ EM_CORE_LOCK(adapter); em_enable_intr(adapter); ifp->if_capenable &= ~IFCAP_POLLING; EM_CORE_UNLOCK(adapter); } } #endif if (mask & IFCAP_HWCSUM) { ifp->if_capenable ^= IFCAP_HWCSUM; reinit = 1; } #if __FreeBSD_version >= 700000 if (mask & IFCAP_TSO4) { ifp->if_capenable ^= IFCAP_TSO4; reinit = 1; } #endif if (mask & IFCAP_VLAN_HWTAGGING) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; reinit = 1; } if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) em_init(adapter); #if __FreeBSD_version >= 700000 VLAN_CAPABILITIES(ifp); #endif break; } #ifdef EM_TIMESYNC /* ** IOCTL support for Precision Time (IEEE 1588) Support */ case EM_TIMESYNC_READTS: { u32 rx_ctl, tx_ctl; struct em_tsync_read *tdata; tdata = (struct em_tsync_read *) ifr->ifr_data; IOCTL_DEBUGOUT("Reading Timestamp\n"); if (tdata->read_current_time) { getnanotime(&tdata->system_time); tdata->network_time = E1000_READ_REG(&adapter->hw, E1000_SYSTIML); tdata->network_time |= (u64)E1000_READ_REG(&adapter->hw, E1000_SYSTIMH ) << 32; } rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL); tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL); IOCTL_DEBUGOUT1("RX_CTL value = %u\n", rx_ctl); IOCTL_DEBUGOUT1("TX_CTL value = %u\n", tx_ctl); if (rx_ctl & 0x1) { IOCTL_DEBUGOUT("RX timestamp is valid\n"); u32 tmp; unsigned char *tmp_cp; tdata->rx_valid = 1; tdata->rx_stamp = E1000_READ_REG(&adapter->hw, E1000_RXSTMPL); tdata->rx_stamp |= (u64)E1000_READ_REG(&adapter->hw, E1000_RXSTMPH) << 32; tmp = E1000_READ_REG(&adapter->hw, E1000_RXSATRL); tmp_cp = (unsigned char *) &tmp; tdata->srcid[0] = tmp_cp[0]; tdata->srcid[1] = tmp_cp[1]; tdata->srcid[2] = tmp_cp[2]; tdata->srcid[3] = tmp_cp[3]; tmp = E1000_READ_REG(&adapter->hw, E1000_RXSATRH); tmp_cp = (unsigned char *) &tmp; tdata->srcid[4] = tmp_cp[0]; tdata->srcid[5] = tmp_cp[1]; tdata->seqid = tmp >> 16; tdata->seqid = htons(tdata->seqid); } else tdata->rx_valid = 0; if (tx_ctl & 0x1) { IOCTL_DEBUGOUT("TX timestamp is valid\n"); tdata->tx_valid = 1; tdata->tx_stamp = E1000_READ_REG(&adapter->hw, E1000_TXSTMPL); tdata->tx_stamp |= (u64) E1000_READ_REG(&adapter->hw, E1000_TXSTMPH) << 32; } else tdata->tx_valid = 0; return (0); } #endif /* EM_TIMESYNC */ default: error = ether_ioctl(ifp, command, data); break; } return (error); } /********************************************************************* * Watchdog timer: * * This routine is called from the local timer every second. * As long as transmit descriptors are being cleaned the value * is non-zero and we do nothing. Reaching 0 indicates a tx hang * and we then reset the device. * **********************************************************************/ static void em_watchdog(struct adapter *adapter) { EM_CORE_LOCK_ASSERT(adapter); /* ** The timer is set to 5 every time start queues a packet. ** Then txeof keeps resetting it as long as it cleans at ** least one descriptor. ** Finally, anytime all descriptors are clean the timer is ** set to 0. */ EM_TX_LOCK(adapter); if ((adapter->watchdog_timer == 0) || (--adapter->watchdog_timer)) { EM_TX_UNLOCK(adapter); return; } /* If we are in this routine because of pause frames, then * don't reset the hardware. */ if (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_TXOFF) { adapter->watchdog_timer = EM_TX_TIMEOUT; EM_TX_UNLOCK(adapter); return; } if (e1000_check_for_link(&adapter->hw) == 0) device_printf(adapter->dev, "watchdog timeout -- resetting\n"); adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; adapter->watchdog_events++; EM_TX_UNLOCK(adapter); em_init_locked(adapter); } /********************************************************************* * Init entry point * * This routine is used in two ways. It is used by the stack as * init entry point in network interface structure. It is also used * by the driver as a hw/sw initialization routine to get to a * consistent state. * * return 0 on success, positive on failure **********************************************************************/ static void em_init_locked(struct adapter *adapter) { struct ifnet *ifp = adapter->ifp; device_t dev = adapter->dev; u32 pba; INIT_DEBUGOUT("em_init: begin"); EM_CORE_LOCK_ASSERT(adapter); EM_TX_LOCK(adapter); em_stop(adapter); EM_TX_UNLOCK(adapter); /* * Packet Buffer Allocation (PBA) * Writing PBA sets the receive portion of the buffer * the remainder is used for the transmit buffer. * * Devices before the 82547 had a Packet Buffer of 64K. * Default allocation: PBA=48K for Rx, leaving 16K for Tx. * After the 82547 the buffer was reduced to 40K. * Default allocation: PBA=30K for Rx, leaving 10K for Tx. * Note: default does not leave enough room for Jumbo Frame >10k. */ switch (adapter->hw.mac.type) { case e1000_82547: case e1000_82547_rev_2: /* 82547: Total Packet Buffer is 40K */ if (adapter->max_frame_size > 8192) pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ else pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */ adapter->tx_fifo_head = 0; adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT; adapter->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT; break; /* Total Packet Buffer on these is 48K */ case e1000_82571: case e1000_82572: case e1000_80003es2lan: pba = E1000_PBA_32K; /* 32K for Rx, 16K for Tx */ break; case e1000_82573: /* 82573: Total Packet Buffer is 32K */ pba = E1000_PBA_12K; /* 12K for Rx, 20K for Tx */ break; case e1000_82574: pba = E1000_PBA_20K; /* 20K for Rx, 20K for Tx */ break; case e1000_ich9lan: case e1000_ich10lan: #define E1000_PBA_10K 0x000A pba = E1000_PBA_10K; break; case e1000_ich8lan: pba = E1000_PBA_8K; break; default: /* Devices before 82547 had a Packet Buffer of 64K. */ if (adapter->max_frame_size > 8192) pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ else pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ } INIT_DEBUGOUT1("em_init: pba=%dK",pba); E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba); /* Get the latest mac address, User can use a LAA */ bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr, ETHER_ADDR_LEN); /* Put the address into the Receive Address Array */ e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); /* * With the 82571 adapter, RAR[0] may be overwritten * when the other port is reset, we make a duplicate * in RAR[14] for that eventuality, this assures * the interface continues to function. */ if (adapter->hw.mac.type == e1000_82571) { e1000_set_laa_state_82571(&adapter->hw, TRUE); e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, E1000_RAR_ENTRIES - 1); } /* Initialize the hardware */ if (em_hardware_init(adapter)) { device_printf(dev, "Unable to initialize the hardware\n"); return; } em_update_link_status(adapter); /* Setup VLAN support, basic and offload if available */ E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) && ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0)) { u32 ctrl; ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL); ctrl |= E1000_CTRL_VME; E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl); } /* Set hardware offload abilities */ ifp->if_hwassist = 0; if (adapter->hw.mac.type >= e1000_82543) { if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); #if __FreeBSD_version >= 700000 if (ifp->if_capenable & IFCAP_TSO4) ifp->if_hwassist |= CSUM_TSO; #endif } /* Configure for OS presence */ em_init_manageability(adapter); /* Prepare transmit descriptors and buffers */ em_setup_transmit_structures(adapter); em_initialize_transmit_unit(adapter); /* Setup Multicast table */ em_set_multi(adapter); /* Prepare receive descriptors and buffers */ if (em_setup_receive_structures(adapter)) { device_printf(dev, "Could not setup receive structures\n"); EM_TX_LOCK(adapter); em_stop(adapter); EM_TX_UNLOCK(adapter); return; } em_initialize_receive_unit(adapter); /* Don't lose promiscuous settings */ em_set_promisc(adapter); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&adapter->timer, hz, em_local_timer, adapter); e1000_clear_hw_cntrs_base_generic(&adapter->hw); /* MSI/X configuration for 82574 */ if (adapter->hw.mac.type == e1000_82574) { int tmp; tmp = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); tmp |= E1000_CTRL_EXT_PBA_CLR; E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, tmp); /* ** Set the IVAR - interrupt vector routing. ** Each nibble represents a vector, high bit ** is enable, other 3 bits are the MSIX table ** entry, we map RXQ0 to 0, TXQ0 to 1, and ** Link (other) to 2, hence the magic number. */ E1000_WRITE_REG(&adapter->hw, E1000_IVAR, 0x800A0908); } #ifdef DEVICE_POLLING /* * Only enable interrupts if we are not polling, make sure * they are off otherwise. */ if (ifp->if_capenable & IFCAP_POLLING) em_disable_intr(adapter); else #endif /* DEVICE_POLLING */ em_enable_intr(adapter); #ifdef EM_TIMESYNC /* Initializae IEEE 1588 Precision Time hardware */ if ((adapter->hw.mac.type == e1000_82574) || (adapter->hw.mac.type == e1000_ich10lan)) em_tsync_init(adapter); #endif /* Don't reset the phy next time init gets called */ adapter->hw.phy.reset_disable = TRUE; } static void em_init(void *arg) { struct adapter *adapter = arg; EM_CORE_LOCK(adapter); em_init_locked(adapter); EM_CORE_UNLOCK(adapter); } #ifdef DEVICE_POLLING /********************************************************************* * * Legacy polling routine * *********************************************************************/ -static void +static int em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { - struct adapter *adapter = ifp->if_softc; + struct adapter *adapter; u32 reg_icr; + int rx_npkts; + adapter = ifp->if_softc; + rx_npkts = 0; + EM_CORE_LOCK(adapter); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { EM_CORE_UNLOCK(adapter); - return; + return (rx_npkts); } if (cmd == POLL_AND_CHECK_STATUS) { reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { callout_stop(&adapter->timer); adapter->hw.mac.get_link_status = 1; em_update_link_status(adapter); callout_reset(&adapter->timer, hz, em_local_timer, adapter); } } EM_CORE_UNLOCK(adapter); - em_rxeof(adapter, count); + em_rxeof(adapter, count, &rx_npkts); EM_TX_LOCK(adapter); em_txeof(adapter); if (!ADAPTER_RING_EMPTY(adapter)) em_start_locked(ifp); EM_TX_UNLOCK(adapter); + return (rx_npkts); } #endif /* DEVICE_POLLING */ #ifdef EM_LEGACY_IRQ /********************************************************************* * * Legacy Interrupt Service routine * *********************************************************************/ static void em_intr(void *arg) { struct adapter *adapter = arg; struct ifnet *ifp = adapter->ifp; u32 reg_icr; if (ifp->if_capenable & IFCAP_POLLING) return; EM_CORE_LOCK(adapter); reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); if ((reg_icr == 0xffffffff) || (reg_icr == 0)|| (adapter->hw.mac.type >= e1000_82571 && (reg_icr & E1000_ICR_INT_ASSERTED) == 0)) goto out; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) goto out; EM_TX_LOCK(adapter); em_txeof(adapter); - em_rxeof(adapter, -1); + em_rxeof(adapter, -1, NULL); em_txeof(adapter); EM_TX_UNLOCK(adapter); if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { callout_stop(&adapter->timer); adapter->hw.mac.get_link_status = 1; em_update_link_status(adapter); /* Deal with TX cruft when link lost */ em_tx_purge(adapter); callout_reset(&adapter->timer, hz, em_local_timer, adapter); } if (reg_icr & E1000_ICR_RXO) adapter->rx_overruns++; out: EM_CORE_UNLOCK(adapter); if (ifp->if_drv_flags & IFF_DRV_RUNNING && !ADAPTER_RING_EMPTY(adapter)) em_start(ifp); } #else /* EM_FAST_IRQ, then fast interrupt routines only */ static void em_handle_link(void *context, int pending) { struct adapter *adapter = context; struct ifnet *ifp = adapter->ifp; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return; EM_CORE_LOCK(adapter); callout_stop(&adapter->timer); em_update_link_status(adapter); /* Deal with TX cruft when link lost */ em_tx_purge(adapter); callout_reset(&adapter->timer, hz, em_local_timer, adapter); EM_CORE_UNLOCK(adapter); } /* Combined RX/TX handler, used by Legacy and MSI */ static void em_handle_rxtx(void *context, int pending) { struct adapter *adapter = context; struct ifnet *ifp = adapter->ifp; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { - if (em_rxeof(adapter, adapter->rx_process_limit) != 0) + if (em_rxeof(adapter, adapter->rx_process_limit, NULL) != 0) taskqueue_enqueue(adapter->tq, &adapter->rxtx_task); EM_TX_LOCK(adapter); em_txeof(adapter); if (!ADAPTER_RING_EMPTY(adapter)) em_start_locked(ifp); EM_TX_UNLOCK(adapter); } em_enable_intr(adapter); } /********************************************************************* * * Fast Legacy/MSI Combined Interrupt Service routine * *********************************************************************/ #if __FreeBSD_version < 700000 #define FILTER_STRAY #define FILTER_HANDLED static void #else static int #endif em_irq_fast(void *arg) { struct adapter *adapter = arg; struct ifnet *ifp; u32 reg_icr; ifp = adapter->ifp; reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); /* Hot eject? */ if (reg_icr == 0xffffffff) return FILTER_STRAY; /* Definitely not our interrupt. */ if (reg_icr == 0x0) return FILTER_STRAY; /* * Starting with the 82571 chip, bit 31 should be used to * determine whether the interrupt belongs to us. */ if (adapter->hw.mac.type >= e1000_82571 && (reg_icr & E1000_ICR_INT_ASSERTED) == 0) return FILTER_STRAY; /* * Mask interrupts until the taskqueue is finished running. This is * cheap, just assume that it is needed. This also works around the * MSI message reordering errata on certain systems. */ em_disable_intr(adapter); taskqueue_enqueue(adapter->tq, &adapter->rxtx_task); /* Link status change */ if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { adapter->hw.mac.get_link_status = 1; taskqueue_enqueue(taskqueue_fast, &adapter->link_task); } if (reg_icr & E1000_ICR_RXO) adapter->rx_overruns++; return FILTER_HANDLED; } /********************************************************************* * * MSIX Interrupt Service Routines * **********************************************************************/ #define EM_MSIX_TX 0x00040000 #define EM_MSIX_RX 0x00010000 #define EM_MSIX_LINK 0x00100000 static void em_msix_tx(void *arg) { struct adapter *adapter = arg; struct ifnet *ifp = adapter->ifp; ++adapter->tx_irq; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { EM_TX_LOCK(adapter); em_txeof(adapter); EM_TX_UNLOCK(adapter); taskqueue_enqueue(adapter->tq, &adapter->tx_task); } /* Reenable this interrupt */ E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_TX); return; } /********************************************************************* * * MSIX RX Interrupt Service routine * **********************************************************************/ static void em_msix_rx(void *arg) { struct adapter *adapter = arg; struct ifnet *ifp = adapter->ifp; ++adapter->rx_irq; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && - (em_rxeof(adapter, adapter->rx_process_limit) != 0)) + (em_rxeof(adapter, adapter->rx_process_limit, NULL) != 0)) taskqueue_enqueue(adapter->tq, &adapter->rx_task); /* Reenable this interrupt */ E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_RX); return; } /********************************************************************* * * MSIX Link Fast Interrupt Service routine * **********************************************************************/ static void em_msix_link(void *arg) { struct adapter *adapter = arg; u32 reg_icr; ++adapter->link_irq; reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { adapter->hw.mac.get_link_status = 1; taskqueue_enqueue(taskqueue_fast, &adapter->link_task); } E1000_WRITE_REG(&adapter->hw, E1000_IMS, EM_MSIX_LINK | E1000_IMS_LSC); return; } static void em_handle_rx(void *context, int pending) { struct adapter *adapter = context; struct ifnet *ifp = adapter->ifp; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && - (em_rxeof(adapter, adapter->rx_process_limit) != 0)) + (em_rxeof(adapter, adapter->rx_process_limit, NULL) != 0)) taskqueue_enqueue(adapter->tq, &adapter->rx_task); } static void em_handle_tx(void *context, int pending) { struct adapter *adapter = context; struct ifnet *ifp = adapter->ifp; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if (!EM_TX_TRYLOCK(adapter)) return; em_txeof(adapter); if (!ADAPTER_RING_EMPTY(adapter)) em_start_locked(ifp); EM_TX_UNLOCK(adapter); } } #endif /* EM_FAST_IRQ */ /********************************************************************* * * Media Ioctl callback * * This routine is called whenever the user queries the status of * the interface using ifconfig. * **********************************************************************/ static void em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct adapter *adapter = ifp->if_softc; u_char fiber_type = IFM_1000_SX; INIT_DEBUGOUT("em_media_status: begin"); EM_CORE_LOCK(adapter); em_update_link_status(adapter); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!adapter->link_active) { EM_CORE_UNLOCK(adapter); return; } ifmr->ifm_status |= IFM_ACTIVE; if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) { if (adapter->hw.mac.type == e1000_82545) fiber_type = IFM_1000_LX; ifmr->ifm_active |= fiber_type | IFM_FDX; } else { switch (adapter->link_speed) { case 10: ifmr->ifm_active |= IFM_10_T; break; case 100: ifmr->ifm_active |= IFM_100_TX; break; case 1000: ifmr->ifm_active |= IFM_1000_T; break; } if (adapter->link_duplex == FULL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } EM_CORE_UNLOCK(adapter); } /********************************************************************* * * Media Ioctl callback * * This routine is called when the user changes speed/duplex using * media/mediopt option with ifconfig. * **********************************************************************/ static int em_media_change(struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; struct ifmedia *ifm = &adapter->media; INIT_DEBUGOUT("em_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); EM_CORE_LOCK(adapter); switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: adapter->hw.mac.autoneg = DO_AUTO_NEG; adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; break; case IFM_1000_LX: case IFM_1000_SX: case IFM_1000_T: adapter->hw.mac.autoneg = DO_AUTO_NEG; adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; break; case IFM_100_TX: adapter->hw.mac.autoneg = FALSE; adapter->hw.phy.autoneg_advertised = 0; if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; else adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; break; case IFM_10_T: adapter->hw.mac.autoneg = FALSE; adapter->hw.phy.autoneg_advertised = 0; if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; else adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; break; default: device_printf(adapter->dev, "Unsupported media type\n"); } /* As the speed/duplex settings my have changed we need to * reset the PHY. */ adapter->hw.phy.reset_disable = FALSE; em_init_locked(adapter); EM_CORE_UNLOCK(adapter); return (0); } /********************************************************************* * * This routine maps the mbufs to tx descriptors. * * return 0 on success, positive on failure **********************************************************************/ static int em_xmit(struct adapter *adapter, struct mbuf **m_headp) { bus_dma_segment_t segs[EM_MAX_SCATTER]; bus_dmamap_t map; struct em_buffer *tx_buffer, *tx_buffer_mapped; struct e1000_tx_desc *ctxd = NULL; struct mbuf *m_head; u32 txd_upper, txd_lower, txd_used, txd_saved; int nsegs, i, j, first, last = 0; int error, do_tso, tso_desc = 0; #if __FreeBSD_version < 700000 struct m_tag *mtag; #endif m_head = *m_headp; txd_upper = txd_lower = txd_used = txd_saved = 0; #if __FreeBSD_version >= 700000 do_tso = ((m_head->m_pkthdr.csum_flags & CSUM_TSO) != 0); #else do_tso = 0; #endif /* * Force a cleanup if number of TX descriptors * available hits the threshold */ if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) { em_txeof(adapter); /* Now do we at least have a minimal? */ if (adapter->num_tx_desc_avail <= EM_TX_OP_THRESHOLD) { adapter->no_tx_desc_avail1++; return (ENOBUFS); } } /* * TSO workaround: * If an mbuf is only header we need * to pull 4 bytes of data into it. */ if (do_tso && (m_head->m_len <= M_TSO_LEN)) { m_head = m_pullup(m_head, M_TSO_LEN + 4); *m_headp = m_head; if (m_head == NULL) return (ENOBUFS); } /* * Map the packet for DMA * * Capture the first descriptor index, * this descriptor will have the index * of the EOP which is the only one that * now gets a DONE bit writeback. */ first = adapter->next_avail_tx_desc; tx_buffer = &adapter->tx_buffer_area[first]; tx_buffer_mapped = tx_buffer; map = tx_buffer->map; error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); /* * There are two types of errors we can (try) to handle: * - EFBIG means the mbuf chain was too long and bus_dma ran * out of segments. Defragment the mbuf chain and try again. * - ENOMEM means bus_dma could not obtain enough bounce buffers * at this point in time. Defer sending and try again later. * All other errors, in particular EINVAL, are fatal and prevent the * mbuf chain from ever going through. Drop it and report error. */ if (error == EFBIG) { struct mbuf *m; m = m_defrag(*m_headp, M_DONTWAIT); if (m == NULL) { adapter->mbuf_alloc_failed++; m_freem(*m_headp); *m_headp = NULL; return (ENOBUFS); } *m_headp = m; /* Try it again */ error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); if (error) { adapter->no_tx_dma_setup++; m_freem(*m_headp); *m_headp = NULL; return (error); } } else if (error != 0) { adapter->no_tx_dma_setup++; return (error); } /* * TSO Hardware workaround, if this packet is not * TSO, and is only a single descriptor long, and * it follows a TSO burst, then we need to add a * sentinel descriptor to prevent premature writeback. */ if ((do_tso == 0) && (adapter->tx_tso == TRUE)) { if (nsegs == 1) tso_desc = TRUE; adapter->tx_tso = FALSE; } if (nsegs > (adapter->num_tx_desc_avail - 2)) { adapter->no_tx_desc_avail2++; bus_dmamap_unload(adapter->txtag, map); return (ENOBUFS); } m_head = *m_headp; /* Do hardware assists */ #if __FreeBSD_version >= 700000 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { error = em_tso_setup(adapter, m_head, &txd_upper, &txd_lower); if (error != TRUE) return (ENXIO); /* something foobar */ /* we need to make a final sentinel transmit desc */ tso_desc = TRUE; } else #endif #ifndef EM_TIMESYNC /* ** Timesync needs to check the packet header ** so call checksum code to do so, but don't ** penalize the code if not defined. */ if (m_head->m_pkthdr.csum_flags & CSUM_OFFLOAD) #endif em_transmit_checksum_setup(adapter, m_head, &txd_upper, &txd_lower); i = adapter->next_avail_tx_desc; if (adapter->pcix_82544) txd_saved = i; /* Set up our transmit descriptors */ for (j = 0; j < nsegs; j++) { bus_size_t seg_len; bus_addr_t seg_addr; /* If adapter is 82544 and on PCIX bus */ if(adapter->pcix_82544) { DESC_ARRAY desc_array; u32 array_elements, counter; /* * Check the Address and Length combination and * split the data accordingly */ array_elements = em_fill_descriptors(segs[j].ds_addr, segs[j].ds_len, &desc_array); for (counter = 0; counter < array_elements; counter++) { if (txd_used == adapter->num_tx_desc_avail) { adapter->next_avail_tx_desc = txd_saved; adapter->no_tx_desc_avail2++; bus_dmamap_unload(adapter->txtag, map); return (ENOBUFS); } tx_buffer = &adapter->tx_buffer_area[i]; ctxd = &adapter->tx_desc_base[i]; ctxd->buffer_addr = htole64( desc_array.descriptor[counter].address); ctxd->lower.data = htole32( (adapter->txd_cmd | txd_lower | (u16) desc_array.descriptor[counter].length)); ctxd->upper.data = htole32((txd_upper)); last = i; if (++i == adapter->num_tx_desc) i = 0; tx_buffer->m_head = NULL; tx_buffer->next_eop = -1; txd_used++; } } else { tx_buffer = &adapter->tx_buffer_area[i]; ctxd = &adapter->tx_desc_base[i]; seg_addr = segs[j].ds_addr; seg_len = segs[j].ds_len; /* ** TSO Workaround: ** If this is the last descriptor, we want to ** split it so we have a small final sentinel */ if (tso_desc && (j == (nsegs -1)) && (seg_len > 8)) { seg_len -= 4; ctxd->buffer_addr = htole64(seg_addr); ctxd->lower.data = htole32( adapter->txd_cmd | txd_lower | seg_len); ctxd->upper.data = htole32(txd_upper); if (++i == adapter->num_tx_desc) i = 0; /* Now make the sentinel */ ++txd_used; /* using an extra txd */ ctxd = &adapter->tx_desc_base[i]; tx_buffer = &adapter->tx_buffer_area[i]; ctxd->buffer_addr = htole64(seg_addr + seg_len); ctxd->lower.data = htole32( adapter->txd_cmd | txd_lower | 4); ctxd->upper.data = htole32(txd_upper); last = i; if (++i == adapter->num_tx_desc) i = 0; } else { ctxd->buffer_addr = htole64(seg_addr); ctxd->lower.data = htole32( adapter->txd_cmd | txd_lower | seg_len); ctxd->upper.data = htole32(txd_upper); last = i; if (++i == adapter->num_tx_desc) i = 0; } tx_buffer->m_head = NULL; tx_buffer->next_eop = -1; } } adapter->next_avail_tx_desc = i; if (adapter->pcix_82544) adapter->num_tx_desc_avail -= txd_used; else { adapter->num_tx_desc_avail -= nsegs; if (tso_desc) /* TSO used an extra for sentinel */ adapter->num_tx_desc_avail -= txd_used; } /* ** Handle VLAN tag, this is the ** biggest difference between ** 6.x and 7 */ #if __FreeBSD_version < 700000 /* Find out if we are in vlan mode. */ mtag = VLAN_OUTPUT_TAG(ifp, m_head); if (mtag != NULL) { ctxd->upper.fields.special = htole16(VLAN_TAG_VALUE(mtag)); #else /* FreeBSD 7 */ if (m_head->m_flags & M_VLANTAG) { /* Set the vlan id. */ ctxd->upper.fields.special = htole16(m_head->m_pkthdr.ether_vtag); #endif /* Tell hardware to add tag */ ctxd->lower.data |= htole32(E1000_TXD_CMD_VLE); } tx_buffer->m_head = m_head; tx_buffer_mapped->map = tx_buffer->map; tx_buffer->map = map; bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE); /* * Last Descriptor of Packet * needs End Of Packet (EOP) * and Report Status (RS) */ ctxd->lower.data |= htole32(E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS); /* * Keep track in the first buffer which * descriptor will be written back */ tx_buffer = &adapter->tx_buffer_area[first]; tx_buffer->next_eop = last; /* * Advance the Transmit Descriptor Tail (TDT), this tells the E1000 * that this frame is available to transmit. */ bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (adapter->hw.mac.type == e1000_82547 && adapter->link_duplex == HALF_DUPLEX) em_82547_move_tail(adapter); else { E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), i); if (adapter->hw.mac.type == e1000_82547) em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len); } #ifdef EM_TIMESYNC if (ctxd->upper.data & E1000_TXD_EXTCMD_TSTAMP) { HW_DEBUGOUT( "@@@ Timestamp bit is set in transmit descriptor\n" ); } #endif return (0); } /********************************************************************* * * 82547 workaround to avoid controller hang in half-duplex environment. * The workaround is to avoid queuing a large packet that would span * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers * in this case. We do that only when FIFO is quiescent. * **********************************************************************/ static void em_82547_move_tail(void *arg) { struct adapter *adapter = arg; struct e1000_tx_desc *tx_desc; u16 hw_tdt, sw_tdt, length = 0; bool eop = 0; EM_TX_LOCK_ASSERT(adapter); hw_tdt = E1000_READ_REG(&adapter->hw, E1000_TDT(0)); sw_tdt = adapter->next_avail_tx_desc; while (hw_tdt != sw_tdt) { tx_desc = &adapter->tx_desc_base[hw_tdt]; length += tx_desc->lower.flags.length; eop = tx_desc->lower.data & E1000_TXD_CMD_EOP; if (++hw_tdt == adapter->num_tx_desc) hw_tdt = 0; if (eop) { if (em_82547_fifo_workaround(adapter, length)) { adapter->tx_fifo_wrk_cnt++; callout_reset(&adapter->tx_fifo_timer, 1, em_82547_move_tail, adapter); break; } E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), hw_tdt); em_82547_update_fifo_head(adapter, length); length = 0; } } } static int em_82547_fifo_workaround(struct adapter *adapter, int len) { int fifo_space, fifo_pkt_len; fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR); if (adapter->link_duplex == HALF_DUPLEX) { fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) { if (em_82547_tx_fifo_reset(adapter)) return (0); else return (1); } } return (0); } static void em_82547_update_fifo_head(struct adapter *adapter, int len) { int fifo_pkt_len = roundup2(len + EM_FIFO_HDR, EM_FIFO_HDR); /* tx_fifo_head is always 16 byte aligned */ adapter->tx_fifo_head += fifo_pkt_len; if (adapter->tx_fifo_head >= adapter->tx_fifo_size) { adapter->tx_fifo_head -= adapter->tx_fifo_size; } } static int em_82547_tx_fifo_reset(struct adapter *adapter) { u32 tctl; if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) == E1000_READ_REG(&adapter->hw, E1000_TDH(0))) && (E1000_READ_REG(&adapter->hw, E1000_TDFT) == E1000_READ_REG(&adapter->hw, E1000_TDFH)) && (E1000_READ_REG(&adapter->hw, E1000_TDFTS) == E1000_READ_REG(&adapter->hw, E1000_TDFHS)) && (E1000_READ_REG(&adapter->hw, E1000_TDFPC) == 0)) { /* Disable TX unit */ tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl & ~E1000_TCTL_EN); /* Reset FIFO pointers */ E1000_WRITE_REG(&adapter->hw, E1000_TDFT, adapter->tx_head_addr); E1000_WRITE_REG(&adapter->hw, E1000_TDFH, adapter->tx_head_addr); E1000_WRITE_REG(&adapter->hw, E1000_TDFTS, adapter->tx_head_addr); E1000_WRITE_REG(&adapter->hw, E1000_TDFHS, adapter->tx_head_addr); /* Re-enable TX unit */ E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl); E1000_WRITE_FLUSH(&adapter->hw); adapter->tx_fifo_head = 0; adapter->tx_fifo_reset_cnt++; return (TRUE); } else { return (FALSE); } } static void em_set_promisc(struct adapter *adapter) { struct ifnet *ifp = adapter->ifp; u32 reg_rctl; reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); if (ifp->if_flags & IFF_PROMISC) { reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); /* Turn this on if you want to see bad packets */ if (em_debug_sbp) reg_rctl |= E1000_RCTL_SBP; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); } else if (ifp->if_flags & IFF_ALLMULTI) { reg_rctl |= E1000_RCTL_MPE; reg_rctl &= ~E1000_RCTL_UPE; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); } } static void em_disable_promisc(struct adapter *adapter) { u32 reg_rctl; reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); reg_rctl &= (~E1000_RCTL_UPE); reg_rctl &= (~E1000_RCTL_MPE); reg_rctl &= (~E1000_RCTL_SBP); E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); } /********************************************************************* * Multicast Update * * This routine is called whenever multicast address list is updated. * **********************************************************************/ static void em_set_multi(struct adapter *adapter) { struct ifnet *ifp = adapter->ifp; struct ifmultiaddr *ifma; u32 reg_rctl = 0; u8 *mta; /* Multicast array memory */ int mcnt = 0; IOCTL_DEBUGOUT("em_set_multi: begin"); if (adapter->hw.mac.type == e1000_82542 && adapter->hw.revision_id == E1000_REVISION_2) { reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) e1000_pci_clear_mwi(&adapter->hw); reg_rctl |= E1000_RCTL_RST; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); msec_delay(5); } /* Allocate temporary memory to setup array */ mta = malloc(sizeof(u8) * (ETH_ADDR_LEN * MAX_NUM_MULTICAST_ADDRESSES), M_DEVBUF, M_NOWAIT | M_ZERO); if (mta == NULL) panic("em_set_multi memory failure\n"); IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break; bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); mcnt++; } IF_ADDR_UNLOCK(ifp); if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); reg_rctl |= E1000_RCTL_MPE; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); } else e1000_update_mc_addr_list(&adapter->hw, mta, mcnt); if (adapter->hw.mac.type == e1000_82542 && adapter->hw.revision_id == E1000_REVISION_2) { reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); reg_rctl &= ~E1000_RCTL_RST; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); msec_delay(5); if (adapter->hw.bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) e1000_pci_set_mwi(&adapter->hw); } free(mta, M_DEVBUF); } /********************************************************************* * Timer routine * * This routine checks for link status and updates statistics. * **********************************************************************/ static void em_local_timer(void *arg) { struct adapter *adapter = arg; struct ifnet *ifp = adapter->ifp; EM_CORE_LOCK_ASSERT(adapter); taskqueue_enqueue(adapter->tq, &adapter->rxtx_task); em_update_link_status(adapter); em_update_stats_counters(adapter); /* Reset LAA into RAR[0] on 82571 */ if (e1000_get_laa_state_82571(&adapter->hw) == TRUE) e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); if (em_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) em_print_hw_stats(adapter); em_smartspeed(adapter); /* * Each second we check the watchdog to * protect against hardware hangs. */ em_watchdog(adapter); callout_reset(&adapter->timer, hz, em_local_timer, adapter); } static void em_update_link_status(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; struct ifnet *ifp = adapter->ifp; device_t dev = adapter->dev; u32 link_check = 0; /* Get the cached link value or read phy for real */ switch (hw->phy.media_type) { case e1000_media_type_copper: if (hw->mac.get_link_status) { /* Do the work to read phy */ e1000_check_for_link(hw); link_check = !hw->mac.get_link_status; if (link_check) /* ESB2 fix */ e1000_cfg_on_link_up(hw); } else link_check = TRUE; break; case e1000_media_type_fiber: e1000_check_for_link(hw); link_check = (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU); break; case e1000_media_type_internal_serdes: e1000_check_for_link(hw); link_check = adapter->hw.mac.serdes_has_link; break; default: case e1000_media_type_unknown: break; } /* Now check for a transition */ if (link_check && (adapter->link_active == 0)) { e1000_get_speed_and_duplex(hw, &adapter->link_speed, &adapter->link_duplex); /* Check if we must disable SPEED_MODE bit on PCI-E */ if ((adapter->link_speed != SPEED_1000) && ((hw->mac.type == e1000_82571) || (hw->mac.type == e1000_82572))) { int tarc0; tarc0 = E1000_READ_REG(hw, E1000_TARC(0)); tarc0 &= ~SPEED_MODE_BIT; E1000_WRITE_REG(hw, E1000_TARC(0), tarc0); } if (bootverbose) device_printf(dev, "Link is up %d Mbps %s\n", adapter->link_speed, ((adapter->link_duplex == FULL_DUPLEX) ? "Full Duplex" : "Half Duplex")); adapter->link_active = 1; adapter->smartspeed = 0; ifp->if_baudrate = adapter->link_speed * 1000000; if_link_state_change(ifp, LINK_STATE_UP); } else if (!link_check && (adapter->link_active == 1)) { ifp->if_baudrate = adapter->link_speed = 0; adapter->link_duplex = 0; if (bootverbose) device_printf(dev, "Link is Down\n"); adapter->link_active = 0; /* Link down, disable watchdog */ adapter->watchdog_timer = FALSE; if_link_state_change(ifp, LINK_STATE_DOWN); } } /********************************************************************* * * This routine disables all traffic on the adapter by issuing a * global reset on the MAC and deallocates TX/RX buffers. * * This routine should always be called with BOTH the CORE * and TX locks. **********************************************************************/ static void em_stop(void *arg) { struct adapter *adapter = arg; struct ifnet *ifp = adapter->ifp; EM_CORE_LOCK_ASSERT(adapter); EM_TX_LOCK_ASSERT(adapter); INIT_DEBUGOUT("em_stop: begin"); em_disable_intr(adapter); callout_stop(&adapter->timer); callout_stop(&adapter->tx_fifo_timer); /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); #ifdef EM_TIMESYNC /* Disable IEEE 1588 Time hardware */ if ((adapter->hw.mac.type == e1000_82574) || (adapter->hw.mac.type == e1000_ich10lan)) em_tsync_disable(adapter); #endif e1000_reset_hw(&adapter->hw); if (adapter->hw.mac.type >= e1000_82544) E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0); } /********************************************************************* * * Determine hardware revision. * **********************************************************************/ static void em_identify_hardware(struct adapter *adapter) { device_t dev = adapter->dev; /* Make sure our PCI config space has the necessary stuff set */ adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) && (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) { device_printf(dev, "Memory Access and/or Bus Master bits " "were not set!\n"); adapter->hw.bus.pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); pci_write_config(dev, PCIR_COMMAND, adapter->hw.bus.pci_cmd_word, 2); } /* Save off the information about this board */ adapter->hw.vendor_id = pci_get_vendor(dev); adapter->hw.device_id = pci_get_device(dev); adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); adapter->hw.subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); /* Do Shared Code Init and Setup */ if (e1000_set_mac_type(&adapter->hw)) { device_printf(dev, "Setup init failure\n"); return; } } static int em_allocate_pci_resources(struct adapter *adapter) { device_t dev = adapter->dev; int val, rid, error = E1000_SUCCESS; rid = PCIR_BAR(0); adapter->memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (adapter->memory == NULL) { device_printf(dev, "Unable to allocate bus resource: memory\n"); return (ENXIO); } adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->memory); adapter->osdep.mem_bus_space_handle = rman_get_bushandle(adapter->memory); adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; /* Only older adapters use IO mapping */ if ((adapter->hw.mac.type > e1000_82543) && (adapter->hw.mac.type < e1000_82571)) { /* Figure our where our IO BAR is ? */ for (rid = PCIR_BAR(0); rid < PCIR_CIS;) { val = pci_read_config(dev, rid, 4); if (EM_BAR_TYPE(val) == EM_BAR_TYPE_IO) { adapter->io_rid = rid; break; } rid += 4; /* check for 64bit BAR */ if (EM_BAR_MEM_TYPE(val) == EM_BAR_MEM_TYPE_64BIT) rid += 4; } if (rid >= PCIR_CIS) { device_printf(dev, "Unable to locate IO BAR\n"); return (ENXIO); } adapter->ioport = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE); if (adapter->ioport == NULL) { device_printf(dev, "Unable to allocate bus resource: " "ioport\n"); return (ENXIO); } adapter->hw.io_base = 0; adapter->osdep.io_bus_space_tag = rman_get_bustag(adapter->ioport); adapter->osdep.io_bus_space_handle = rman_get_bushandle(adapter->ioport); } /* ** Init the resource arrays ** used by MSIX setup */ for (int i = 0; i < 3; i++) { adapter->rid[i] = i + 1; /* MSI/X RID starts at 1 */ adapter->tag[i] = NULL; adapter->res[i] = NULL; } /* * Setup MSI/X or MSI if PCI Express */ if (em_enable_msi) adapter->msi = em_setup_msix(adapter); adapter->hw.back = &adapter->osdep; return (error); } /********************************************************************* * * Setup the Legacy or MSI Interrupt handler * **********************************************************************/ int em_allocate_legacy(struct adapter *adapter) { device_t dev = adapter->dev; int error; /* Manually turn off all interrupts */ E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); /* Legacy RID is 0 */ if (adapter->msi == 0) adapter->rid[0] = 0; /* We allocate a single interrupt resource */ adapter->res[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &adapter->rid[0], RF_SHAREABLE | RF_ACTIVE); if (adapter->res[0] == NULL) { device_printf(dev, "Unable to allocate bus resource: " "interrupt\n"); return (ENXIO); } #ifdef EM_LEGACY_IRQ /* We do Legacy setup */ if ((error = bus_setup_intr(dev, adapter->res[0], #if __FreeBSD_version > 700000 INTR_TYPE_NET | INTR_MPSAFE, NULL, em_intr, adapter, #else /* 6.X */ INTR_TYPE_NET | INTR_MPSAFE, em_intr, adapter, #endif &adapter->tag[0])) != 0) { device_printf(dev, "Failed to register interrupt handler"); return (error); } #else /* FAST_IRQ */ /* * Try allocating a fast interrupt and the associated deferred * processing contexts. */ TASK_INIT(&adapter->rxtx_task, 0, em_handle_rxtx, adapter); TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter); adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT, taskqueue_thread_enqueue, &adapter->tq); taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq", device_get_nameunit(adapter->dev)); #if __FreeBSD_version < 700000 if ((error = bus_setup_intr(dev, adapter->res[0], INTR_TYPE_NET | INTR_FAST, em_irq_fast, adapter, #else if ((error = bus_setup_intr(dev, adapter->res[0], INTR_TYPE_NET, em_irq_fast, NULL, adapter, #endif &adapter->tag[0])) != 0) { device_printf(dev, "Failed to register fast interrupt " "handler: %d\n", error); taskqueue_free(adapter->tq); adapter->tq = NULL; return (error); } #endif /* EM_LEGACY_IRQ */ return (0); } /********************************************************************* * * Setup the MSIX Interrupt handlers * This is not really Multiqueue, rather * its just multiple interrupt vectors. * **********************************************************************/ int em_allocate_msix(struct adapter *adapter) { device_t dev = adapter->dev; int error; /* Make sure all interrupts are disabled */ E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); /* First get the resources */ for (int i = 0; i < adapter->msi; i++) { adapter->res[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &adapter->rid[i], RF_ACTIVE); if (adapter->res[i] == NULL) { device_printf(dev, "Unable to allocate bus resource: " "MSIX Interrupt\n"); return (ENXIO); } } /* * Now allocate deferred processing contexts. */ TASK_INIT(&adapter->rx_task, 0, em_handle_rx, adapter); TASK_INIT(&adapter->tx_task, 0, em_handle_tx, adapter); /* * Handle compatibility for msi case for deferral due to * trylock failure */ TASK_INIT(&adapter->rxtx_task, 0, em_handle_tx, adapter); TASK_INIT(&adapter->link_task, 0, em_handle_link, adapter); adapter->tq = taskqueue_create_fast("em_taskq", M_NOWAIT, taskqueue_thread_enqueue, &adapter->tq); taskqueue_start_threads(&adapter->tq, 1, PI_NET, "%s taskq", device_get_nameunit(adapter->dev)); /* * And setup the interrupt handlers */ /* First slot to RX */ if ((error = bus_setup_intr(dev, adapter->res[0], #if __FreeBSD_version > 700000 INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_rx, adapter, #else /* 6.X */ INTR_TYPE_NET | INTR_MPSAFE, em_msix_rx, adapter, #endif &adapter->tag[0])) != 0) { device_printf(dev, "Failed to register RX handler"); return (error); } /* Next TX */ if ((error = bus_setup_intr(dev, adapter->res[1], #if __FreeBSD_version > 700000 INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_tx, adapter, #else /* 6.X */ INTR_TYPE_NET | INTR_MPSAFE, em_msix_tx, adapter, #endif &adapter->tag[1])) != 0) { device_printf(dev, "Failed to register TX handler"); return (error); } /* And Link */ if ((error = bus_setup_intr(dev, adapter->res[2], #if __FreeBSD_version > 700000 INTR_TYPE_NET | INTR_MPSAFE, NULL, em_msix_link, adapter, #else /* 6.X */ INTR_TYPE_NET | INTR_MPSAFE, em_msix_link, adapter, #endif &adapter->tag[2])) != 0) { device_printf(dev, "Failed to register TX handler"); return (error); } return (0); } static void em_free_pci_resources(struct adapter *adapter) { device_t dev = adapter->dev; /* Make sure the for loop below runs once */ if (adapter->msi == 0) adapter->msi = 1; /* * First release all the interrupt resources: * notice that since these are just kept * in an array we can do the same logic * whether its MSIX or just legacy. */ for (int i = 0; i < adapter->msi; i++) { if (adapter->tag[i] != NULL) { bus_teardown_intr(dev, adapter->res[i], adapter->tag[i]); adapter->tag[i] = NULL; } if (adapter->res[i] != NULL) { bus_release_resource(dev, SYS_RES_IRQ, adapter->rid[i], adapter->res[i]); } } if (adapter->msi) pci_release_msi(dev); if (adapter->msix != NULL) bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(EM_MSIX_BAR), adapter->msix); if (adapter->memory != NULL) bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), adapter->memory); if (adapter->flash != NULL) bus_release_resource(dev, SYS_RES_MEMORY, EM_FLASH, adapter->flash); if (adapter->ioport != NULL) bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid, adapter->ioport); } /* * Setup MSI or MSI/X */ static int em_setup_msix(struct adapter *adapter) { device_t dev = adapter->dev; int val = 0; if (adapter->hw.mac.type < e1000_82571) return (0); /* Setup MSI/X for Hartwell */ if (adapter->hw.mac.type == e1000_82574) { /* Map the MSIX BAR */ int rid = PCIR_BAR(EM_MSIX_BAR); adapter->msix = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!adapter->msix) { /* May not be enabled */ device_printf(adapter->dev, "Unable to map MSIX table \n"); goto msi; } val = pci_msix_count(dev); /* ** 82574 can be configured for 5 but ** we limit use to 3. */ if (val > 3) val = 3; if ((val) && pci_alloc_msix(dev, &val) == 0) { device_printf(adapter->dev,"Using MSIX interrupts\n"); return (val); } } msi: val = pci_msi_count(dev); if (val == 1 && pci_alloc_msi(dev, &val) == 0) { adapter->msi = 1; device_printf(adapter->dev,"Using MSI interrupt\n"); return (val); } return (0); } /********************************************************************* * * Initialize the hardware to a configuration * as specified by the adapter structure. * **********************************************************************/ static int em_hardware_init(struct adapter *adapter) { device_t dev = adapter->dev; u16 rx_buffer_size; INIT_DEBUGOUT("em_hardware_init: begin"); /* Issue a global reset */ e1000_reset_hw(&adapter->hw); /* Get control from any management/hw control */ if (((adapter->hw.mac.type == e1000_82573) || (adapter->hw.mac.type == e1000_ich8lan) || (adapter->hw.mac.type == e1000_ich10lan) || (adapter->hw.mac.type == e1000_ich9lan)) && e1000_check_mng_mode(&adapter->hw)) em_get_hw_control(adapter); /* When hardware is reset, fifo_head is also reset */ adapter->tx_fifo_head = 0; /* Set up smart power down as default off on newer adapters. */ if (!em_smart_pwr_down && (adapter->hw.mac.type == e1000_82571 || adapter->hw.mac.type == e1000_82572)) { u16 phy_tmp = 0; /* Speed up time to link by disabling smart power down. */ e1000_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT, &phy_tmp); phy_tmp &= ~IGP02E1000_PM_SPD; e1000_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT, phy_tmp); } /* * These parameters control the automatic generation (Tx) and * response (Rx) to Ethernet PAUSE frames. * - High water mark should allow for at least two frames to be * received after sending an XOFF. * - Low water mark works best when it is very near the high water mark. * This allows the receiver to restart by sending XON when it has * drained a bit. Here we use an arbitary value of 1500 which will * restart after one full frame is pulled from the buffer. There * could be several smaller frames in the buffer and if so they will * not trigger the XON until their total number reduces the buffer * by 1500. * - The pause time is fairly large at 1000 x 512ns = 512 usec. */ rx_buffer_size = ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) << 10 ); adapter->hw.fc.high_water = rx_buffer_size - roundup2(adapter->max_frame_size, 1024); adapter->hw.fc.low_water = adapter->hw.fc.high_water - 1500; if (adapter->hw.mac.type == e1000_80003es2lan) adapter->hw.fc.pause_time = 0xFFFF; else adapter->hw.fc.pause_time = EM_FC_PAUSE_TIME; adapter->hw.fc.send_xon = TRUE; adapter->hw.fc.requested_mode = e1000_fc_full; if (e1000_init_hw(&adapter->hw) < 0) { device_printf(dev, "Hardware Initialization Failed\n"); return (EIO); } e1000_check_for_link(&adapter->hw); return (0); } /********************************************************************* * * Setup networking device structure and register an interface. * **********************************************************************/ static void em_setup_interface(device_t dev, struct adapter *adapter) { struct ifnet *ifp; INIT_DEBUGOUT("em_setup_interface: begin"); ifp = adapter->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) panic("%s: can not if_alloc()", device_get_nameunit(dev)); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_init = em_init; ifp->if_softc = adapter; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = em_ioctl; ifp->if_start = em_start; IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1); ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1; IFQ_SET_READY(&ifp->if_snd); ether_ifattach(ifp, adapter->hw.mac.addr); ifp->if_capabilities = ifp->if_capenable = 0; #ifdef IFNET_BUF_RING ifp->if_transmit = em_transmit; ifp->if_qflush = em_qflush; adapter->br = buf_ring_alloc(2048, M_DEVBUF, M_WAITOK, &adapter->tx_mtx); #endif if (adapter->hw.mac.type >= e1000_82543) { int version_cap; #if __FreeBSD_version < 700000 version_cap = IFCAP_HWCSUM; #else version_cap = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM; #endif ifp->if_capabilities |= version_cap; ifp->if_capenable |= version_cap; } #if __FreeBSD_version >= 700000 /* Identify TSO capable adapters */ if ((adapter->hw.mac.type > e1000_82544) && (adapter->hw.mac.type != e1000_82547)) ifp->if_capabilities |= IFCAP_TSO4; /* * By default only enable on PCI-E, this * can be overriden by ifconfig. */ if (adapter->hw.mac.type >= e1000_82571) ifp->if_capenable |= IFCAP_TSO4; #endif /* * Tell the upper layer(s) we support long frames. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* * Specify the media types supported by this adapter and register * callbacks to update media and link information */ ifmedia_init(&adapter->media, IFM_IMASK, em_media_change, em_media_status); if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) { u_char fiber_type = IFM_1000_SX; /* default type */ if (adapter->hw.mac.type == e1000_82545) fiber_type = IFM_1000_LX; ifmedia_add(&adapter->media, IFM_ETHER | fiber_type | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | fiber_type, 0, NULL); } else { ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); if (adapter->hw.phy.type != e1000_phy_ife) { ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); } } ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); } /********************************************************************* * * Workaround for SmartSpeed on 82541 and 82547 controllers * **********************************************************************/ static void em_smartspeed(struct adapter *adapter) { u16 phy_tmp; if (adapter->link_active || (adapter->hw.phy.type != e1000_phy_igp) || adapter->hw.mac.autoneg == 0 || (adapter->hw.phy.autoneg_advertised & ADVERTISE_1000_FULL) == 0) return; if (adapter->smartspeed == 0) { /* If Master/Slave config fault is asserted twice, * we assume back-to-back */ e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); if (!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) return; e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); if (phy_tmp & SR_1000T_MS_CONFIG_FAULT) { e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp); if(phy_tmp & CR_1000T_MS_ENABLE) { phy_tmp &= ~CR_1000T_MS_ENABLE; e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp); adapter->smartspeed++; if(adapter->hw.mac.autoneg && !e1000_phy_setup_autoneg(&adapter->hw) && !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) { phy_tmp |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp); } } } return; } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) { /* If still no link, perhaps using 2/3 pair cable */ e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp); phy_tmp |= CR_1000T_MS_ENABLE; e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp); if(adapter->hw.mac.autoneg && !e1000_phy_setup_autoneg(&adapter->hw) && !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_tmp)) { phy_tmp |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_tmp); } } /* Restart process after EM_SMARTSPEED_MAX iterations */ if(adapter->smartspeed++ == EM_SMARTSPEED_MAX) adapter->smartspeed = 0; } /* * Manage DMA'able memory. */ static void em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error) return; *(bus_addr_t *) arg = segs[0].ds_addr; } static int em_dma_malloc(struct adapter *adapter, bus_size_t size, struct em_dma_alloc *dma, int mapflags) { int error; #if __FreeBSD_version >= 700000 error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */ #else error = bus_dma_tag_create(NULL, /* parent */ #endif EM_DBA_ALIGN, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dma->dma_tag); if (error) { device_printf(adapter->dev, "%s: bus_dma_tag_create failed: %d\n", __func__, error); goto fail_0; } error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dma->dma_map); if (error) { device_printf(adapter->dev, "%s: bus_dmamem_alloc(%ju) failed: %d\n", __func__, (uintmax_t)size, error); goto fail_2; } dma->dma_paddr = 0; error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, em_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); if (error || dma->dma_paddr == 0) { device_printf(adapter->dev, "%s: bus_dmamap_load failed: %d\n", __func__, error); goto fail_3; } return (0); fail_3: bus_dmamap_unload(dma->dma_tag, dma->dma_map); fail_2: bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); fail_0: dma->dma_map = NULL; dma->dma_tag = NULL; return (error); } static void em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma) { if (dma->dma_tag == NULL) return; if (dma->dma_map != NULL) { bus_dmamap_sync(dma->dma_tag, dma->dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dma->dma_tag, dma->dma_map); bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); dma->dma_map = NULL; } bus_dma_tag_destroy(dma->dma_tag); dma->dma_tag = NULL; } /********************************************************************* * * Allocate memory for tx_buffer structures. The tx_buffer stores all * the information needed to transmit a packet on the wire. * **********************************************************************/ static int em_allocate_transmit_structures(struct adapter *adapter) { device_t dev = adapter->dev; struct em_buffer *tx_buffer; int error; /* * Create DMA tags for tx descriptors */ #if __FreeBSD_version >= 700000 if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ #else if ((error = bus_dma_tag_create(NULL, /* parent */ #endif 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ EM_TSO_SIZE, /* maxsize */ EM_MAX_SCATTER, /* nsegments */ EM_TSO_SEG_SIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &adapter->txtag)) != 0) { device_printf(dev, "Unable to allocate TX DMA tag\n"); goto fail; } adapter->tx_buffer_area = malloc(sizeof(struct em_buffer) * adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO); if (adapter->tx_buffer_area == NULL) { device_printf(dev, "Unable to allocate tx_buffer memory\n"); error = ENOMEM; goto fail; } /* Create the descriptor buffer dma maps */ for (int i = 0; i < adapter->num_tx_desc; i++) { tx_buffer = &adapter->tx_buffer_area[i]; error = bus_dmamap_create(adapter->txtag, 0, &tx_buffer->map); if (error != 0) { device_printf(dev, "Unable to create TX DMA map\n"); goto fail; } tx_buffer->next_eop = -1; } return (0); fail: em_free_transmit_structures(adapter); return (error); } /********************************************************************* * * (Re)Initialize transmit structures. * **********************************************************************/ static void em_setup_transmit_structures(struct adapter *adapter) { struct em_buffer *tx_buffer; /* Clear the old ring contents */ bzero(adapter->tx_desc_base, (sizeof(struct e1000_tx_desc)) * adapter->num_tx_desc); /* Free any existing TX buffers */ for (int i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { tx_buffer = &adapter->tx_buffer_area[i]; bus_dmamap_sync(adapter->txtag, tx_buffer->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(adapter->txtag, tx_buffer->map); m_freem(tx_buffer->m_head); tx_buffer->m_head = NULL; tx_buffer->next_eop = -1; } /* Reset state */ adapter->next_avail_tx_desc = 0; adapter->next_tx_to_clean = 0; adapter->num_tx_desc_avail = adapter->num_tx_desc; bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return; } /********************************************************************* * * Enable transmit unit. * **********************************************************************/ static void em_initialize_transmit_unit(struct adapter *adapter) { u32 tctl, tarc, tipg = 0; u64 bus_addr; INIT_DEBUGOUT("em_initialize_transmit_unit: begin"); /* Setup the Base and Length of the Tx Descriptor Ring */ bus_addr = adapter->txdma.dma_paddr; E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0), adapter->num_tx_desc * sizeof(struct e1000_tx_desc)); E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0), (u32)(bus_addr >> 32)); E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0), (u32)bus_addr); /* Setup the HW Tx Head and Tail descriptor pointers */ E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0); E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0); HW_DEBUGOUT2("Base = %x, Length = %x\n", E1000_READ_REG(&adapter->hw, E1000_TDBAL(0)), E1000_READ_REG(&adapter->hw, E1000_TDLEN(0))); /* Set the default values for the Tx Inter Packet Gap timer */ switch (adapter->hw.mac.type) { case e1000_82542: tipg = DEFAULT_82542_TIPG_IPGT; tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; break; case e1000_80003es2lan: tipg = DEFAULT_82543_TIPG_IPGR1; tipg |= DEFAULT_80003ES2LAN_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; break; default: if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) tipg = DEFAULT_82543_TIPG_IPGT_FIBER; else tipg = DEFAULT_82543_TIPG_IPGT_COPPER; tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; } E1000_WRITE_REG(&adapter->hw, E1000_TIPG, tipg); E1000_WRITE_REG(&adapter->hw, E1000_TIDV, adapter->tx_int_delay.value); if(adapter->hw.mac.type >= e1000_82540) E1000_WRITE_REG(&adapter->hw, E1000_TADV, adapter->tx_abs_int_delay.value); if ((adapter->hw.mac.type == e1000_82571) || (adapter->hw.mac.type == e1000_82572)) { tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); tarc |= SPEED_MODE_BIT; E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); } else if (adapter->hw.mac.type == e1000_80003es2lan) { tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(0)); tarc |= 1; E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc); tarc = E1000_READ_REG(&adapter->hw, E1000_TARC(1)); tarc |= 1; E1000_WRITE_REG(&adapter->hw, E1000_TARC(1), tarc); } /* Program the Transmit Control Register */ tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL); tctl &= ~E1000_TCTL_CT; tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); if (adapter->hw.mac.type >= e1000_82571) tctl |= E1000_TCTL_MULR; /* This write will effectively turn on the transmit unit. */ E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl); /* Setup Transmit Descriptor Base Settings */ adapter->txd_cmd = E1000_TXD_CMD_IFCS; if (adapter->tx_int_delay.value > 0) adapter->txd_cmd |= E1000_TXD_CMD_IDE; } /********************************************************************* * * Free all transmit related data structures. * **********************************************************************/ static void em_free_transmit_structures(struct adapter *adapter) { struct em_buffer *tx_buffer; INIT_DEBUGOUT("free_transmit_structures: begin"); if (adapter->tx_buffer_area != NULL) { for (int i = 0; i < adapter->num_tx_desc; i++) { tx_buffer = &adapter->tx_buffer_area[i]; if (tx_buffer->m_head != NULL) { bus_dmamap_sync(adapter->txtag, tx_buffer->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(adapter->txtag, tx_buffer->map); m_freem(tx_buffer->m_head); tx_buffer->m_head = NULL; } else if (tx_buffer->map != NULL) bus_dmamap_unload(adapter->txtag, tx_buffer->map); if (tx_buffer->map != NULL) { bus_dmamap_destroy(adapter->txtag, tx_buffer->map); tx_buffer->map = NULL; } } } if (adapter->tx_buffer_area != NULL) { free(adapter->tx_buffer_area, M_DEVBUF); adapter->tx_buffer_area = NULL; } if (adapter->txtag != NULL) { bus_dma_tag_destroy(adapter->txtag); adapter->txtag = NULL; } } /********************************************************************* * * The offload context needs to be set when we transfer the first * packet of a particular protocol (TCP/UDP). This routine has been * enhanced to deal with inserted VLAN headers, and IPV6 (not complete) * **********************************************************************/ static void em_transmit_checksum_setup(struct adapter *adapter, struct mbuf *mp, u32 *txd_upper, u32 *txd_lower) { struct e1000_context_desc *TXD; struct em_buffer *tx_buffer; struct ether_vlan_header *eh; struct ip *ip = NULL; struct ip6_hdr *ip6; struct tcp_hdr *th; int curr_txd, ehdrlen; u32 cmd, hdr_len, ip_hlen; u16 etype; u8 ipproto; cmd = hdr_len = ipproto = 0; /* Setup checksum offload context. */ curr_txd = adapter->next_avail_tx_desc; tx_buffer = &adapter->tx_buffer_area[curr_txd]; TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd]; /* * Determine where frame payload starts. * Jump over vlan headers if already present, * helpful for QinQ too. */ eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { etype = ntohs(eh->evl_proto); ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { etype = ntohs(eh->evl_encap_proto); ehdrlen = ETHER_HDR_LEN; } /* * We only support TCP/UDP for IPv4 and IPv6 for the moment. * TODO: Support SCTP too when it hits the tree. */ switch (etype) { case ETHERTYPE_IP: ip = (struct ip *)(mp->m_data + ehdrlen); ip_hlen = ip->ip_hl << 2; /* Setup of IP header checksum. */ if (mp->m_pkthdr.csum_flags & CSUM_IP) { /* * Start offset for header checksum calculation. * End offset for header checksum calculation. * Offset of place to put the checksum. */ TXD->lower_setup.ip_fields.ipcss = ehdrlen; TXD->lower_setup.ip_fields.ipcse = htole16(ehdrlen + ip_hlen); TXD->lower_setup.ip_fields.ipcso = ehdrlen + offsetof(struct ip, ip_sum); cmd |= E1000_TXD_CMD_IP; *txd_upper |= E1000_TXD_POPTS_IXSM << 8; } if (mp->m_len < ehdrlen + ip_hlen) return; /* failure */ hdr_len = ehdrlen + ip_hlen; ipproto = ip->ip_p; break; case ETHERTYPE_IPV6: ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); ip_hlen = sizeof(struct ip6_hdr); /* XXX: No header stacking. */ if (mp->m_len < ehdrlen + ip_hlen) return; /* failure */ /* IPv6 doesn't have a header checksum. */ hdr_len = ehdrlen + ip_hlen; ipproto = ip6->ip6_nxt; break; #ifdef EM_TIMESYNC case ETHERTYPE_IEEE1588: *txd_upper |= E1000_TXD_EXTCMD_TSTAMP; break; #endif default: *txd_upper = 0; *txd_lower = 0; return; } switch (ipproto) { case IPPROTO_TCP: if (mp->m_pkthdr.csum_flags & CSUM_TCP) { /* * Start offset for payload checksum calculation. * End offset for payload checksum calculation. * Offset of place to put the checksum. */ th = (struct tcp_hdr *)(mp->m_data + hdr_len); TXD->upper_setup.tcp_fields.tucss = hdr_len; TXD->upper_setup.tcp_fields.tucse = htole16(0); TXD->upper_setup.tcp_fields.tucso = hdr_len + offsetof(struct tcphdr, th_sum); cmd |= E1000_TXD_CMD_TCP; *txd_upper |= E1000_TXD_POPTS_TXSM << 8; } break; case IPPROTO_UDP: { #ifdef EM_TIMESYNC void *hdr = (caddr_t) ip + ip_hlen; struct udphdr *uh = (struct udphdr *)hdr; if (uh->uh_dport == htons(TSYNC_PORT)) { *txd_upper |= E1000_TXD_EXTCMD_TSTAMP; IOCTL_DEBUGOUT("@@@ Sending Event Packet\n"); } #endif if (mp->m_pkthdr.csum_flags & CSUM_UDP) { /* * Start offset for header checksum calculation. * End offset for header checksum calculation. * Offset of place to put the checksum. */ TXD->upper_setup.tcp_fields.tucss = hdr_len; TXD->upper_setup.tcp_fields.tucse = htole16(0); TXD->upper_setup.tcp_fields.tucso = hdr_len + offsetof(struct udphdr, uh_sum); *txd_upper |= E1000_TXD_POPTS_TXSM << 8; } /* Fall Thru */ } default: break; } #ifdef EM_TIMESYNC /* ** We might be here just for TIMESYNC ** which means we don't need the context ** descriptor. */ if (!mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) return; #endif *txd_lower = E1000_TXD_CMD_DEXT | /* Extended descr type */ E1000_TXD_DTYP_D; /* Data descr */ TXD->tcp_seg_setup.data = htole32(0); TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | cmd); tx_buffer->m_head = NULL; tx_buffer->next_eop = -1; if (++curr_txd == adapter->num_tx_desc) curr_txd = 0; adapter->num_tx_desc_avail--; adapter->next_avail_tx_desc = curr_txd; } #if __FreeBSD_version >= 700000 /********************************************************************** * * Setup work for hardware segmentation offload (TSO) * **********************************************************************/ static bool em_tso_setup(struct adapter *adapter, struct mbuf *mp, u32 *txd_upper, u32 *txd_lower) { struct e1000_context_desc *TXD; struct em_buffer *tx_buffer; struct ether_vlan_header *eh; struct ip *ip; struct ip6_hdr *ip6; struct tcphdr *th; int curr_txd, ehdrlen, hdr_len, ip_hlen, isip6; u16 etype; /* * This function could/should be extended to support IP/IPv6 * fragmentation as well. But as they say, one step at a time. */ /* * Determine where frame payload starts. * Jump over vlan headers if already present, * helpful for QinQ too. */ eh = mtod(mp, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { etype = ntohs(eh->evl_proto); ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { etype = ntohs(eh->evl_encap_proto); ehdrlen = ETHER_HDR_LEN; } /* Ensure we have at least the IP+TCP header in the first mbuf. */ if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr)) return FALSE; /* -1 */ /* * We only support TCP for IPv4 and IPv6 (notyet) for the moment. * TODO: Support SCTP too when it hits the tree. */ switch (etype) { case ETHERTYPE_IP: isip6 = 0; ip = (struct ip *)(mp->m_data + ehdrlen); if (ip->ip_p != IPPROTO_TCP) return FALSE; /* 0 */ ip->ip_len = 0; ip->ip_sum = 0; ip_hlen = ip->ip_hl << 2; if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr)) return FALSE; /* -1 */ th = (struct tcphdr *)((caddr_t)ip + ip_hlen); #if 1 th->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); #else th->th_sum = mp->m_pkthdr.csum_data; #endif break; case ETHERTYPE_IPV6: isip6 = 1; return FALSE; /* Not supported yet. */ ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); if (ip6->ip6_nxt != IPPROTO_TCP) return FALSE; /* 0 */ ip6->ip6_plen = 0; ip_hlen = sizeof(struct ip6_hdr); /* XXX: no header stacking. */ if (mp->m_len < ehdrlen + ip_hlen + sizeof(struct tcphdr)) return FALSE; /* -1 */ th = (struct tcphdr *)((caddr_t)ip6 + ip_hlen); #if 0 th->th_sum = in6_pseudo(ip6->ip6_src, ip->ip6_dst, htons(IPPROTO_TCP)); /* XXX: function notyet. */ #else th->th_sum = mp->m_pkthdr.csum_data; #endif break; default: return FALSE; } hdr_len = ehdrlen + ip_hlen + (th->th_off << 2); *txd_lower = (E1000_TXD_CMD_DEXT | /* Extended descr type */ E1000_TXD_DTYP_D | /* Data descr type */ E1000_TXD_CMD_TSE); /* Do TSE on this packet */ /* IP and/or TCP header checksum calculation and insertion. */ *txd_upper = ((isip6 ? 0 : E1000_TXD_POPTS_IXSM) | E1000_TXD_POPTS_TXSM) << 8; curr_txd = adapter->next_avail_tx_desc; tx_buffer = &adapter->tx_buffer_area[curr_txd]; TXD = (struct e1000_context_desc *) &adapter->tx_desc_base[curr_txd]; /* IPv6 doesn't have a header checksum. */ if (!isip6) { /* * Start offset for header checksum calculation. * End offset for header checksum calculation. * Offset of place put the checksum. */ TXD->lower_setup.ip_fields.ipcss = ehdrlen; TXD->lower_setup.ip_fields.ipcse = htole16(ehdrlen + ip_hlen - 1); TXD->lower_setup.ip_fields.ipcso = ehdrlen + offsetof(struct ip, ip_sum); } /* * Start offset for payload checksum calculation. * End offset for payload checksum calculation. * Offset of place to put the checksum. */ TXD->upper_setup.tcp_fields.tucss = ehdrlen + ip_hlen; TXD->upper_setup.tcp_fields.tucse = 0; TXD->upper_setup.tcp_fields.tucso = ehdrlen + ip_hlen + offsetof(struct tcphdr, th_sum); /* * Payload size per packet w/o any headers. * Length of all headers up to payload. */ TXD->tcp_seg_setup.fields.mss = htole16(mp->m_pkthdr.tso_segsz); TXD->tcp_seg_setup.fields.hdr_len = hdr_len; TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT | /* Extended descr */ E1000_TXD_CMD_TSE | /* TSE context */ (isip6 ? 0 : E1000_TXD_CMD_IP) | /* Do IP csum */ E1000_TXD_CMD_TCP | /* Do TCP checksum */ (mp->m_pkthdr.len - (hdr_len))); /* Total len */ tx_buffer->m_head = NULL; tx_buffer->next_eop = -1; if (++curr_txd == adapter->num_tx_desc) curr_txd = 0; adapter->num_tx_desc_avail--; adapter->next_avail_tx_desc = curr_txd; adapter->tx_tso = TRUE; return TRUE; } #endif /* __FreeBSD_version >= 700000 */ /********************************************************************** * * Examine each tx_buffer in the used queue. If the hardware is done * processing the packet then free associated resources. The * tx_buffer is put back on the free queue. * **********************************************************************/ static void em_txeof(struct adapter *adapter) { int first, last, done, num_avail; u32 cleaned = 0; struct em_buffer *tx_buffer; struct e1000_tx_desc *tx_desc, *eop_desc; struct ifnet *ifp = adapter->ifp; EM_TX_LOCK_ASSERT(adapter); if (adapter->num_tx_desc_avail == adapter->num_tx_desc) return; num_avail = adapter->num_tx_desc_avail; first = adapter->next_tx_to_clean; tx_desc = &adapter->tx_desc_base[first]; tx_buffer = &adapter->tx_buffer_area[first]; last = tx_buffer->next_eop; eop_desc = &adapter->tx_desc_base[last]; /* * What this does is get the index of the * first descriptor AFTER the EOP of the * first packet, that way we can do the * simple comparison on the inner while loop. */ if (++last == adapter->num_tx_desc) last = 0; done = last; bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, BUS_DMASYNC_POSTREAD); while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) { /* We clean the range of the packet */ while (first != done) { tx_desc->upper.data = 0; tx_desc->lower.data = 0; tx_desc->buffer_addr = 0; ++num_avail; ++cleaned; if (tx_buffer->m_head) { ifp->if_opackets++; bus_dmamap_sync(adapter->txtag, tx_buffer->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(adapter->txtag, tx_buffer->map); m_freem(tx_buffer->m_head); tx_buffer->m_head = NULL; } tx_buffer->next_eop = -1; if (++first == adapter->num_tx_desc) first = 0; tx_buffer = &adapter->tx_buffer_area[first]; tx_desc = &adapter->tx_desc_base[first]; } /* See if we can continue to the next packet */ last = tx_buffer->next_eop; if (last != -1) { eop_desc = &adapter->tx_desc_base[last]; /* Get new done point */ if (++last == adapter->num_tx_desc) last = 0; done = last; } else break; } bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); adapter->next_tx_to_clean = first; /* * If we have enough room, clear IFF_DRV_OACTIVE to * tell the stack that it is OK to send packets. * If there are no pending descriptors, clear the timeout. */ if (num_avail > EM_TX_CLEANUP_THRESHOLD) { ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (num_avail == adapter->num_tx_desc) { adapter->watchdog_timer = 0; adapter->num_tx_desc_avail = num_avail; return; } } /* If any descriptors cleaned, reset the watchdog */ if (cleaned) adapter->watchdog_timer = EM_TX_TIMEOUT; adapter->num_tx_desc_avail = num_avail; return; } /********************************************************************* * * When Link is lost sometimes there is work still in the TX ring * which will result in a watchdog, rather than allow that do an * attempted cleanup and then reinit here. Note that this has been * seens mostly with fiber adapters. * **********************************************************************/ static void em_tx_purge(struct adapter *adapter) { if ((!adapter->link_active) && (adapter->watchdog_timer)) { EM_TX_LOCK(adapter); em_txeof(adapter); EM_TX_UNLOCK(adapter); if (adapter->watchdog_timer) { /* Still not clean? */ adapter->watchdog_timer = 0; em_init_locked(adapter); } } } /********************************************************************* * * Get a buffer from system mbuf buffer pool. * **********************************************************************/ static int em_get_buf(struct adapter *adapter, int i) { struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct em_buffer *rx_buffer; int error, nsegs; m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { adapter->mbuf_cluster_failed++; return (ENOBUFS); } m->m_len = m->m_pkthdr.len = MCLBYTES; if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN)) m_adj(m, ETHER_ALIGN); /* * Using memory from the mbuf cluster pool, invoke the * bus_dma machinery to arrange the memory mapping. */ error = bus_dmamap_load_mbuf_sg(adapter->rxtag, adapter->rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_free(m); return (error); } /* If nsegs is wrong then the stack is corrupt. */ KASSERT(nsegs == 1, ("Too many segments returned!")); rx_buffer = &adapter->rx_buffer_area[i]; if (rx_buffer->m_head != NULL) bus_dmamap_unload(adapter->rxtag, rx_buffer->map); map = rx_buffer->map; rx_buffer->map = adapter->rx_sparemap; adapter->rx_sparemap = map; bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD); rx_buffer->m_head = m; adapter->rx_desc_base[i].buffer_addr = htole64(segs[0].ds_addr); return (0); } /********************************************************************* * * Allocate memory for rx_buffer structures. Since we use one * rx_buffer per received packet, the maximum number of rx_buffer's * that we'll need is equal to the number of receive descriptors * that we've allocated. * **********************************************************************/ static int em_allocate_receive_structures(struct adapter *adapter) { device_t dev = adapter->dev; struct em_buffer *rx_buffer; int i, error; adapter->rx_buffer_area = malloc(sizeof(struct em_buffer) * adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO); if (adapter->rx_buffer_area == NULL) { device_printf(dev, "Unable to allocate rx_buffer memory\n"); return (ENOMEM); } #if __FreeBSD_version >= 700000 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ #else error = bus_dma_tag_create(NULL, /* parent */ #endif 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, /* maxsize */ 1, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &adapter->rxtag); if (error) { device_printf(dev, "%s: bus_dma_tag_create failed %d\n", __func__, error); goto fail; } /* Create the spare map (used by getbuf) */ error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT, &adapter->rx_sparemap); if (error) { device_printf(dev, "%s: bus_dmamap_create failed: %d\n", __func__, error); goto fail; } rx_buffer = adapter->rx_buffer_area; for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT, &rx_buffer->map); if (error) { device_printf(dev, "%s: bus_dmamap_create failed: %d\n", __func__, error); goto fail; } } return (0); fail: em_free_receive_structures(adapter); return (error); } /********************************************************************* * * (Re)initialize receive structures. * **********************************************************************/ static int em_setup_receive_structures(struct adapter *adapter) { struct em_buffer *rx_buffer; int i, error; /* Reset descriptor ring */ bzero(adapter->rx_desc_base, (sizeof(struct e1000_rx_desc)) * adapter->num_rx_desc); /* Free current RX buffers. */ rx_buffer = adapter->rx_buffer_area; for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { if (rx_buffer->m_head != NULL) { bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(adapter->rxtag, rx_buffer->map); m_freem(rx_buffer->m_head); rx_buffer->m_head = NULL; } } /* Allocate new ones. */ for (i = 0; i < adapter->num_rx_desc; i++) { error = em_get_buf(adapter, i); if (error) return (error); } /* Setup our descriptor pointers */ adapter->next_rx_desc_to_check = 0; bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } /********************************************************************* * * Enable receive unit. * **********************************************************************/ #define MAX_INTS_PER_SEC 8000 #define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256) static void em_initialize_receive_unit(struct adapter *adapter) { struct ifnet *ifp = adapter->ifp; u64 bus_addr; u32 rctl, rxcsum; INIT_DEBUGOUT("em_initialize_receive_unit: begin"); /* * Make sure receives are disabled while setting * up the descriptor ring */ rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); if (adapter->hw.mac.type >= e1000_82540) { E1000_WRITE_REG(&adapter->hw, E1000_RADV, adapter->rx_abs_int_delay.value); /* * Set the interrupt throttling rate. Value is calculated * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */ E1000_WRITE_REG(&adapter->hw, E1000_ITR, DEFAULT_ITR); } /* ** When using MSIX interrupts we need to throttle ** using the EITR register (82574 only) */ if (adapter->msix) for (int i = 0; i < 4; i++) E1000_WRITE_REG(&adapter->hw, E1000_EITR_82574(i), DEFAULT_ITR); /* Disable accelerated ackknowledge */ if (adapter->hw.mac.type == e1000_82574) E1000_WRITE_REG(&adapter->hw, E1000_RFCTL, E1000_RFCTL_ACK_DIS); /* Setup the Base and Length of the Rx Descriptor Ring */ bus_addr = adapter->rxdma.dma_paddr; E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0), adapter->num_rx_desc * sizeof(struct e1000_rx_desc)); E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0), (u32)(bus_addr >> 32)); E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0), (u32)bus_addr); /* Setup the Receive Control Register */ rctl &= ~(3 << E1000_RCTL_MO_SHIFT); rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); /* Make sure VLAN Filters are off */ rctl &= ~E1000_RCTL_VFE; if (e1000_tbi_sbp_enabled_82543(&adapter->hw)) rctl |= E1000_RCTL_SBP; else rctl &= ~E1000_RCTL_SBP; switch (adapter->rx_buffer_len) { default: case 2048: rctl |= E1000_RCTL_SZ_2048; break; case 4096: rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE; break; case 8192: rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE; break; case 16384: rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE; break; } if (ifp->if_mtu > ETHERMTU) rctl |= E1000_RCTL_LPE; else rctl &= ~E1000_RCTL_LPE; /* Enable 82543 Receive Checksum Offload for TCP and UDP */ if ((adapter->hw.mac.type >= e1000_82543) && (ifp->if_capenable & IFCAP_RXCSUM)) { rxcsum = E1000_READ_REG(&adapter->hw, E1000_RXCSUM); rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL); E1000_WRITE_REG(&adapter->hw, E1000_RXCSUM, rxcsum); } /* ** XXX TEMPORARY WORKAROUND: on some systems with 82573 ** long latencies are observed, like Lenovo X60. This ** change eliminates the problem, but since having positive ** values in RDTR is a known source of problems on other ** platforms another solution is being sought. */ if (adapter->hw.mac.type == e1000_82573) E1000_WRITE_REG(&adapter->hw, E1000_RDTR, 0x20); /* Enable Receives */ E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl); /* * Setup the HW Rx Head and * Tail Descriptor Pointers */ E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0); E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), adapter->num_rx_desc - 1); return; } /********************************************************************* * * Free receive related data structures. * **********************************************************************/ static void em_free_receive_structures(struct adapter *adapter) { struct em_buffer *rx_buffer; int i; INIT_DEBUGOUT("free_receive_structures: begin"); if (adapter->rx_sparemap) { bus_dmamap_destroy(adapter->rxtag, adapter->rx_sparemap); adapter->rx_sparemap = NULL; } /* Cleanup any existing buffers */ if (adapter->rx_buffer_area != NULL) { rx_buffer = adapter->rx_buffer_area; for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { if (rx_buffer->m_head != NULL) { bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(adapter->rxtag, rx_buffer->map); m_freem(rx_buffer->m_head); rx_buffer->m_head = NULL; } else if (rx_buffer->map != NULL) bus_dmamap_unload(adapter->rxtag, rx_buffer->map); if (rx_buffer->map != NULL) { bus_dmamap_destroy(adapter->rxtag, rx_buffer->map); rx_buffer->map = NULL; } } } if (adapter->rx_buffer_area != NULL) { free(adapter->rx_buffer_area, M_DEVBUF); adapter->rx_buffer_area = NULL; } if (adapter->rxtag != NULL) { bus_dma_tag_destroy(adapter->rxtag); adapter->rxtag = NULL; } } /********************************************************************* * * This routine executes in interrupt context. It replenishes * the mbufs in the descriptor and sends data which has been * dma'ed into host memory to upper layer. * * We loop at most count times if count is > 0, or until done if * count < 0. * *********************************************************************/ static int -em_rxeof(struct adapter *adapter, int count) +em_rxeof(struct adapter *adapter, int count, int *rx_npktsp) { struct ifnet *ifp = adapter->ifp;; struct mbuf *mp; u8 status, accept_frame = 0, eop = 0; u16 len, desc_len, prev_len_adj; - int i; + int i, rx_npkts; struct e1000_rx_desc *current_desc; EM_RX_LOCK(adapter); i = adapter->next_rx_desc_to_check; + rx_npkts = 0; current_desc = &adapter->rx_desc_base[i]; bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, BUS_DMASYNC_POSTREAD); if (!((current_desc->status) & E1000_RXD_STAT_DD)) { EM_RX_UNLOCK(adapter); + if (rx_npktsp != NULL) + *rx_npktsp = rx_npkts; return (0); } while ((current_desc->status & E1000_RXD_STAT_DD) && (count != 0) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) { struct mbuf *m = NULL; mp = adapter->rx_buffer_area[i].m_head; /* * Can't defer bus_dmamap_sync(9) because TBI_ACCEPT * needs to access the last received byte in the mbuf. */ bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map, BUS_DMASYNC_POSTREAD); accept_frame = 1; prev_len_adj = 0; desc_len = le16toh(current_desc->length); status = current_desc->status; if (status & E1000_RXD_STAT_EOP) { count--; eop = 1; if (desc_len < ETHER_CRC_LEN) { len = 0; prev_len_adj = ETHER_CRC_LEN - desc_len; } else len = desc_len - ETHER_CRC_LEN; } else { eop = 0; len = desc_len; } if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { u8 last_byte; u32 pkt_len = desc_len; if (adapter->fmp != NULL) pkt_len += adapter->fmp->m_pkthdr.len; last_byte = *(mtod(mp, caddr_t) + desc_len - 1); if (TBI_ACCEPT(&adapter->hw, status, current_desc->errors, pkt_len, last_byte, adapter->min_frame_size, adapter->max_frame_size)) { e1000_tbi_adjust_stats_82543(&adapter->hw, &adapter->stats, pkt_len, adapter->hw.mac.addr, adapter->max_frame_size); if (len > 0) len--; } else accept_frame = 0; } if (accept_frame) { if (em_get_buf(adapter, i) != 0) { ifp->if_iqdrops++; goto discard; } /* Assign correct length to the current fragment */ mp->m_len = len; if (adapter->fmp == NULL) { mp->m_pkthdr.len = len; adapter->fmp = mp; /* Store the first mbuf */ adapter->lmp = mp; } else { /* Chain mbuf's together */ mp->m_flags &= ~M_PKTHDR; /* * Adjust length of previous mbuf in chain if * we received less than 4 bytes in the last * descriptor. */ if (prev_len_adj > 0) { adapter->lmp->m_len -= prev_len_adj; adapter->fmp->m_pkthdr.len -= prev_len_adj; } adapter->lmp->m_next = mp; adapter->lmp = adapter->lmp->m_next; adapter->fmp->m_pkthdr.len += len; } if (eop) { adapter->fmp->m_pkthdr.rcvif = ifp; ifp->if_ipackets++; em_receive_checksum(adapter, current_desc, adapter->fmp); #ifndef __NO_STRICT_ALIGNMENT if (adapter->max_frame_size > (MCLBYTES - ETHER_ALIGN) && em_fixup_rx(adapter) != 0) goto skip; #endif if (status & E1000_RXD_STAT_VP) { #if __FreeBSD_version < 700000 VLAN_INPUT_TAG_NEW(ifp, adapter->fmp, (le16toh(current_desc->special) & E1000_RXD_SPC_VLAN_MASK)); #else adapter->fmp->m_pkthdr.ether_vtag = (le16toh(current_desc->special) & E1000_RXD_SPC_VLAN_MASK); adapter->fmp->m_flags |= M_VLANTAG; #endif } #ifndef __NO_STRICT_ALIGNMENT skip: #endif m = adapter->fmp; adapter->fmp = NULL; adapter->lmp = NULL; } } else { ifp->if_ierrors++; discard: /* Reuse loaded DMA map and just update mbuf chain */ mp = adapter->rx_buffer_area[i].m_head; mp->m_len = mp->m_pkthdr.len = MCLBYTES; mp->m_data = mp->m_ext.ext_buf; mp->m_next = NULL; if (adapter->max_frame_size <= (MCLBYTES - ETHER_ALIGN)) m_adj(mp, ETHER_ALIGN); if (adapter->fmp != NULL) { m_freem(adapter->fmp); adapter->fmp = NULL; adapter->lmp = NULL; } m = NULL; } /* Zero out the receive descriptors status. */ current_desc->status = 0; bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Advance our pointers to the next descriptor. */ if (++i == adapter->num_rx_desc) i = 0; if (m != NULL) { adapter->next_rx_desc_to_check = i; /* Unlock for call into stack */ EM_RX_UNLOCK(adapter); (*ifp->if_input)(ifp, m); EM_RX_LOCK(adapter); + rx_npkts++; i = adapter->next_rx_desc_to_check; } current_desc = &adapter->rx_desc_base[i]; } adapter->next_rx_desc_to_check = i; /* Advance the E1000's Receive Queue #0 "Tail Pointer". */ if (--i < 0) i = adapter->num_rx_desc - 1; E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), i); EM_RX_UNLOCK(adapter); + if (rx_npktsp != NULL) + *rx_npktsp = rx_npkts; if (!((current_desc->status) & E1000_RXD_STAT_DD)) return (0); return (1); } #ifndef __NO_STRICT_ALIGNMENT /* * When jumbo frames are enabled we should realign entire payload on * architecures with strict alignment. This is serious design mistake of 8254x * as it nullifies DMA operations. 8254x just allows RX buffer size to be * 2048/4096/8192/16384. What we really want is 2048 - ETHER_ALIGN to align its * payload. On architecures without strict alignment restrictions 8254x still * performs unaligned memory access which would reduce the performance too. * To avoid copying over an entire frame to align, we allocate a new mbuf and * copy ethernet header to the new mbuf. The new mbuf is prepended into the * existing mbuf chain. * * Be aware, best performance of the 8254x is achived only when jumbo frame is * not used at all on architectures with strict alignment. */ static int em_fixup_rx(struct adapter *adapter) { struct mbuf *m, *n; int error; error = 0; m = adapter->fmp; if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); m->m_data += ETHER_HDR_LEN; } else { MGETHDR(n, M_DONTWAIT, MT_DATA); if (n != NULL) { bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); m->m_data += ETHER_HDR_LEN; m->m_len -= ETHER_HDR_LEN; n->m_len = ETHER_HDR_LEN; M_MOVE_PKTHDR(n, m); n->m_next = m; adapter->fmp = n; } else { adapter->dropped_pkts++; m_freem(adapter->fmp); adapter->fmp = NULL; error = ENOMEM; } } return (error); } #endif /********************************************************************* * * Verify that the hardware indicated that the checksum is valid. * Inform the stack about the status of checksum so that stack * doesn't spend time verifying the checksum. * *********************************************************************/ static void em_receive_checksum(struct adapter *adapter, struct e1000_rx_desc *rx_desc, struct mbuf *mp) { /* 82543 or newer only */ if ((adapter->hw.mac.type < e1000_82543) || /* Ignore Checksum bit is set */ (rx_desc->status & E1000_RXD_STAT_IXSM)) { mp->m_pkthdr.csum_flags = 0; return; } if (rx_desc->status & E1000_RXD_STAT_IPCS) { /* Did it pass? */ if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) { /* IP Checksum Good */ mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; } else { mp->m_pkthdr.csum_flags = 0; } } if (rx_desc->status & E1000_RXD_STAT_TCPCS) { /* Did it pass? */ if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) { mp->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); mp->m_pkthdr.csum_data = htons(0xffff); } } } /* * This routine is run via an vlan * config EVENT */ static void em_register_vlan(void *unused, struct ifnet *ifp, u16 vtag) { struct adapter *adapter = ifp->if_softc; u32 ctrl, rctl, index, vfta; ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL); ctrl |= E1000_CTRL_VME; E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl); /* Setup for Hardware Filter */ rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); rctl |= E1000_RCTL_VFE; rctl &= ~E1000_RCTL_CFIEN; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl); /* Make entry in the hardware filter table */ index = ((vtag >> 5) & 0x7F); vfta = E1000_READ_REG_ARRAY(&adapter->hw, E1000_VFTA, index); vfta |= (1 << (vtag & 0x1F)); E1000_WRITE_REG_ARRAY(&adapter->hw, E1000_VFTA, index, vfta); /* Update the frame size */ E1000_WRITE_REG(&adapter->hw, E1000_RLPML, adapter->max_frame_size + VLAN_TAG_SIZE); } /* * This routine is run via an vlan * unconfig EVENT */ static void em_unregister_vlan(void *unused, struct ifnet *ifp, u16 vtag) { struct adapter *adapter = ifp->if_softc; u32 index, vfta; /* Remove entry in the hardware filter table */ index = ((vtag >> 5) & 0x7F); vfta = E1000_READ_REG_ARRAY(&adapter->hw, E1000_VFTA, index); vfta &= ~(1 << (vtag & 0x1F)); E1000_WRITE_REG_ARRAY(&adapter->hw, E1000_VFTA, index, vfta); /* Have all vlans unregistered? */ if (adapter->ifp->if_vlantrunk == NULL) { u32 rctl; /* Turn off the filter table */ rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); rctl &= ~E1000_RCTL_VFE; rctl |= E1000_RCTL_CFIEN; E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl); /* Reset the frame size */ E1000_WRITE_REG(&adapter->hw, E1000_RLPML, adapter->max_frame_size); } } static void em_enable_intr(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; u32 ims_mask = IMS_ENABLE_MASK; if (adapter->msix) { E1000_WRITE_REG(hw, EM_EIAC, EM_MSIX_MASK); ims_mask |= EM_MSIX_MASK; } E1000_WRITE_REG(hw, E1000_IMS, ims_mask); } static void em_disable_intr(struct adapter *adapter) { struct e1000_hw *hw = &adapter->hw; if (adapter->msix) E1000_WRITE_REG(hw, EM_EIAC, 0); E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); } /* * Bit of a misnomer, what this really means is * to enable OS management of the system... aka * to disable special hardware management features */ static void em_init_manageability(struct adapter *adapter) { /* A shared code workaround */ #define E1000_82542_MANC2H E1000_MANC2H if (adapter->has_manage) { int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H); int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); /* disable hardware interception of ARP */ manc &= ~(E1000_MANC_ARP_EN); /* enable receiving management packets to the host */ if (adapter->hw.mac.type >= e1000_82571) { manc |= E1000_MANC_EN_MNG2HOST; #define E1000_MNG2HOST_PORT_623 (1 << 5) #define E1000_MNG2HOST_PORT_664 (1 << 6) manc2h |= E1000_MNG2HOST_PORT_623; manc2h |= E1000_MNG2HOST_PORT_664; E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h); } E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); } } /* * Give control back to hardware management * controller if there is one. */ static void em_release_manageability(struct adapter *adapter) { if (adapter->has_manage) { int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); /* re-enable hardware interception of ARP */ manc |= E1000_MANC_ARP_EN; if (adapter->hw.mac.type >= e1000_82571) manc &= ~E1000_MANC_EN_MNG2HOST; E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); } } /* * em_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that * the driver is loaded. For AMT version (only with 82573) * of the f/w this means that the network i/f is open. * */ static void em_get_hw_control(struct adapter *adapter) { u32 ctrl_ext, swsm; /* Let firmware know the driver has taken over */ switch (adapter->hw.mac.type) { case e1000_82573: swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM); E1000_WRITE_REG(&adapter->hw, E1000_SWSM, swsm | E1000_SWSM_DRV_LOAD); break; case e1000_82571: case e1000_82572: case e1000_80003es2lan: case e1000_ich8lan: case e1000_ich9lan: case e1000_ich10lan: ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); break; default: break; } } /* * em_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit. * For ASF and Pass Through versions of f/w this means that the * driver is no longer loaded. For AMT version (only with 82573) i * of the f/w this means that the network i/f is closed. * */ static void em_release_hw_control(struct adapter *adapter) { u32 ctrl_ext, swsm; /* Let firmware taken over control of h/w */ switch (adapter->hw.mac.type) { case e1000_82573: swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM); E1000_WRITE_REG(&adapter->hw, E1000_SWSM, swsm & ~E1000_SWSM_DRV_LOAD); break; case e1000_82571: case e1000_82572: case e1000_80003es2lan: case e1000_ich8lan: case e1000_ich9lan: case e1000_ich10lan: ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); break; default: break; } } static int em_is_valid_ether_addr(u8 *addr) { char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { return (FALSE); } return (TRUE); } /* * Enable PCI Wake On Lan capability */ void em_enable_wakeup(device_t dev) { u16 cap, status; u8 id; /* First find the capabilities pointer*/ cap = pci_read_config(dev, PCIR_CAP_PTR, 2); /* Read the PM Capabilities */ id = pci_read_config(dev, cap, 1); if (id != PCIY_PMG) /* Something wrong */ return; /* OK, we have the power capabilities, so now get the status register */ cap += PCIR_POWER_STATUS; status = pci_read_config(dev, cap, 2); status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(dev, cap, status, 2); return; } /********************************************************************* * 82544 Coexistence issue workaround. * There are 2 issues. * 1. Transmit Hang issue. * To detect this issue, following equation can be used... * SIZE[3:0] + ADDR[2:0] = SUM[3:0]. * If SUM[3:0] is in between 1 to 4, we will have this issue. * * 2. DAC issue. * To detect this issue, following equation can be used... * SIZE[3:0] + ADDR[2:0] = SUM[3:0]. * If SUM[3:0] is in between 9 to c, we will have this issue. * * * WORKAROUND: * Make sure we do not have ending address * as 1,2,3,4(Hang) or 9,a,b,c (DAC) * *************************************************************************/ static u32 em_fill_descriptors (bus_addr_t address, u32 length, PDESC_ARRAY desc_array) { u32 safe_terminator; /* Since issue is sensitive to length and address.*/ /* Let us first check the address...*/ if (length <= 4) { desc_array->descriptor[0].address = address; desc_array->descriptor[0].length = length; desc_array->elements = 1; return (desc_array->elements); } safe_terminator = (u32)((((u32)address & 0x7) + (length & 0xF)) & 0xF); /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */ if (safe_terminator == 0 || (safe_terminator > 4 && safe_terminator < 9) || (safe_terminator > 0xC && safe_terminator <= 0xF)) { desc_array->descriptor[0].address = address; desc_array->descriptor[0].length = length; desc_array->elements = 1; return (desc_array->elements); } desc_array->descriptor[0].address = address; desc_array->descriptor[0].length = length - 4; desc_array->descriptor[1].address = address + (length - 4); desc_array->descriptor[1].length = 4; desc_array->elements = 2; return (desc_array->elements); } /********************************************************************** * * Update the board statistics counters. * **********************************************************************/ static void em_update_stats_counters(struct adapter *adapter) { struct ifnet *ifp; if(adapter->hw.phy.media_type == e1000_media_type_copper || (E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) { adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, E1000_SYMERRS); adapter->stats.sec += E1000_READ_REG(&adapter->hw, E1000_SEC); } adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, E1000_CRCERRS); adapter->stats.mpc += E1000_READ_REG(&adapter->hw, E1000_MPC); adapter->stats.scc += E1000_READ_REG(&adapter->hw, E1000_SCC); adapter->stats.ecol += E1000_READ_REG(&adapter->hw, E1000_ECOL); adapter->stats.mcc += E1000_READ_REG(&adapter->hw, E1000_MCC); adapter->stats.latecol += E1000_READ_REG(&adapter->hw, E1000_LATECOL); adapter->stats.colc += E1000_READ_REG(&adapter->hw, E1000_COLC); adapter->stats.dc += E1000_READ_REG(&adapter->hw, E1000_DC); adapter->stats.rlec += E1000_READ_REG(&adapter->hw, E1000_RLEC); adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, E1000_XONRXC); adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, E1000_XONTXC); adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, E1000_XOFFRXC); adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, E1000_XOFFTXC); adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, E1000_FCRUC); adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, E1000_PRC64); adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, E1000_PRC127); adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, E1000_PRC255); adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, E1000_PRC511); adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, E1000_PRC1023); adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, E1000_PRC1522); adapter->stats.gprc += E1000_READ_REG(&adapter->hw, E1000_GPRC); adapter->stats.bprc += E1000_READ_REG(&adapter->hw, E1000_BPRC); adapter->stats.mprc += E1000_READ_REG(&adapter->hw, E1000_MPRC); adapter->stats.gptc += E1000_READ_REG(&adapter->hw, E1000_GPTC); /* For the 64-bit byte counters the low dword must be read first. */ /* Both registers clear on the read of the high dword */ adapter->stats.gorc += E1000_READ_REG(&adapter->hw, E1000_GORCH); adapter->stats.gotc += E1000_READ_REG(&adapter->hw, E1000_GOTCH); adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, E1000_RNBC); adapter->stats.ruc += E1000_READ_REG(&adapter->hw, E1000_RUC); adapter->stats.rfc += E1000_READ_REG(&adapter->hw, E1000_RFC); adapter->stats.roc += E1000_READ_REG(&adapter->hw, E1000_ROC); adapter->stats.rjc += E1000_READ_REG(&adapter->hw, E1000_RJC); adapter->stats.tor += E1000_READ_REG(&adapter->hw, E1000_TORH); adapter->stats.tot += E1000_READ_REG(&adapter->hw, E1000_TOTH); adapter->stats.tpr += E1000_READ_REG(&adapter->hw, E1000_TPR); adapter->stats.tpt += E1000_READ_REG(&adapter->hw, E1000_TPT); adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, E1000_PTC64); adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, E1000_PTC127); adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, E1000_PTC255); adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, E1000_PTC511); adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, E1000_PTC1023); adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, E1000_PTC1522); adapter->stats.mptc += E1000_READ_REG(&adapter->hw, E1000_MPTC); adapter->stats.bptc += E1000_READ_REG(&adapter->hw, E1000_BPTC); if (adapter->hw.mac.type >= e1000_82543) { adapter->stats.algnerrc += E1000_READ_REG(&adapter->hw, E1000_ALGNERRC); adapter->stats.rxerrc += E1000_READ_REG(&adapter->hw, E1000_RXERRC); adapter->stats.tncrs += E1000_READ_REG(&adapter->hw, E1000_TNCRS); adapter->stats.cexterr += E1000_READ_REG(&adapter->hw, E1000_CEXTERR); adapter->stats.tsctc += E1000_READ_REG(&adapter->hw, E1000_TSCTC); adapter->stats.tsctfc += E1000_READ_REG(&adapter->hw, E1000_TSCTFC); } ifp = adapter->ifp; ifp->if_collisions = adapter->stats.colc; /* Rx Errors */ ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc + adapter->stats.crcerrs + adapter->stats.algnerrc + adapter->stats.ruc + adapter->stats.roc + adapter->stats.mpc + adapter->stats.cexterr; /* Tx Errors */ ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol + adapter->watchdog_events; } /********************************************************************** * * This routine is called only when em_display_debug_stats is enabled. * This routine provides a way to take a look at important statistics * maintained by the driver and hardware. * **********************************************************************/ static void em_print_debug_info(struct adapter *adapter) { device_t dev = adapter->dev; u8 *hw_addr = adapter->hw.hw_addr; device_printf(dev, "Adapter hardware address = %p \n", hw_addr); device_printf(dev, "CTRL = 0x%x RCTL = 0x%x \n", E1000_READ_REG(&adapter->hw, E1000_CTRL), E1000_READ_REG(&adapter->hw, E1000_RCTL)); device_printf(dev, "Packet buffer = Tx=%dk Rx=%dk \n", ((E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff0000) >> 16),\ (E1000_READ_REG(&adapter->hw, E1000_PBA) & 0xffff) ); device_printf(dev, "Flow control watermarks high = %d low = %d\n", adapter->hw.fc.high_water, adapter->hw.fc.low_water); device_printf(dev, "tx_int_delay = %d, tx_abs_int_delay = %d\n", E1000_READ_REG(&adapter->hw, E1000_TIDV), E1000_READ_REG(&adapter->hw, E1000_TADV)); device_printf(dev, "rx_int_delay = %d, rx_abs_int_delay = %d\n", E1000_READ_REG(&adapter->hw, E1000_RDTR), E1000_READ_REG(&adapter->hw, E1000_RADV)); device_printf(dev, "fifo workaround = %lld, fifo_reset_count = %lld\n", (long long)adapter->tx_fifo_wrk_cnt, (long long)adapter->tx_fifo_reset_cnt); device_printf(dev, "hw tdh = %d, hw tdt = %d\n", E1000_READ_REG(&adapter->hw, E1000_TDH(0)), E1000_READ_REG(&adapter->hw, E1000_TDT(0))); device_printf(dev, "hw rdh = %d, hw rdt = %d\n", E1000_READ_REG(&adapter->hw, E1000_RDH(0)), E1000_READ_REG(&adapter->hw, E1000_RDT(0))); device_printf(dev, "Num Tx descriptors avail = %d\n", adapter->num_tx_desc_avail); device_printf(dev, "Tx Descriptors not avail1 = %ld\n", adapter->no_tx_desc_avail1); device_printf(dev, "Tx Descriptors not avail2 = %ld\n", adapter->no_tx_desc_avail2); device_printf(dev, "Std mbuf failed = %ld\n", adapter->mbuf_alloc_failed); device_printf(dev, "Std mbuf cluster failed = %ld\n", adapter->mbuf_cluster_failed); device_printf(dev, "Driver dropped packets = %ld\n", adapter->dropped_pkts); device_printf(dev, "Driver tx dma failure in encap = %ld\n", adapter->no_tx_dma_setup); } static void em_print_hw_stats(struct adapter *adapter) { device_t dev = adapter->dev; device_printf(dev, "Excessive collisions = %lld\n", (long long)adapter->stats.ecol); #if (DEBUG_HW > 0) /* Dont output these errors normally */ device_printf(dev, "Symbol errors = %lld\n", (long long)adapter->stats.symerrs); #endif device_printf(dev, "Sequence errors = %lld\n", (long long)adapter->stats.sec); device_printf(dev, "Defer count = %lld\n", (long long)adapter->stats.dc); device_printf(dev, "Missed Packets = %lld\n", (long long)adapter->stats.mpc); device_printf(dev, "Receive No Buffers = %lld\n", (long long)adapter->stats.rnbc); /* RLEC is inaccurate on some hardware, calculate our own. */ device_printf(dev, "Receive Length Errors = %lld\n", ((long long)adapter->stats.roc + (long long)adapter->stats.ruc)); device_printf(dev, "Receive errors = %lld\n", (long long)adapter->stats.rxerrc); device_printf(dev, "Crc errors = %lld\n", (long long)adapter->stats.crcerrs); device_printf(dev, "Alignment errors = %lld\n", (long long)adapter->stats.algnerrc); device_printf(dev, "Collision/Carrier extension errors = %lld\n", (long long)adapter->stats.cexterr); device_printf(dev, "RX overruns = %ld\n", adapter->rx_overruns); device_printf(dev, "watchdog timeouts = %ld\n", adapter->watchdog_events); device_printf(dev, "RX MSIX IRQ = %ld TX MSIX IRQ = %ld" " LINK MSIX IRQ = %ld\n", adapter->rx_irq, adapter->tx_irq , adapter->link_irq); device_printf(dev, "XON Rcvd = %lld\n", (long long)adapter->stats.xonrxc); device_printf(dev, "XON Xmtd = %lld\n", (long long)adapter->stats.xontxc); device_printf(dev, "XOFF Rcvd = %lld\n", (long long)adapter->stats.xoffrxc); device_printf(dev, "XOFF Xmtd = %lld\n", (long long)adapter->stats.xofftxc); device_printf(dev, "Good Packets Rcvd = %lld\n", (long long)adapter->stats.gprc); device_printf(dev, "Good Packets Xmtd = %lld\n", (long long)adapter->stats.gptc); device_printf(dev, "TSO Contexts Xmtd = %lld\n", (long long)adapter->stats.tsctc); device_printf(dev, "TSO Contexts Failed = %lld\n", (long long)adapter->stats.tsctfc); } /********************************************************************** * * This routine provides a way to dump out the adapter eeprom, * often a useful debug/service tool. This only dumps the first * 32 words, stuff that matters is in that extent. * **********************************************************************/ static void em_print_nvm_info(struct adapter *adapter) { u16 eeprom_data; int i, j, row = 0; /* Its a bit crude, but it gets the job done */ printf("\nInterface EEPROM Dump:\n"); printf("Offset\n0x0000 "); for (i = 0, j = 0; i < 32; i++, j++) { if (j == 8) { /* Make the offset block */ j = 0; ++row; printf("\n0x00%x0 ",row); } e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data); printf("%04x ", eeprom_data); } printf("\n"); } static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS) { struct adapter *adapter; int error; int result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { adapter = (struct adapter *)arg1; em_print_debug_info(adapter); } /* * This value will cause a hex dump of the * first 32 16-bit words of the EEPROM to * the screen. */ if (result == 2) { adapter = (struct adapter *)arg1; em_print_nvm_info(adapter); } return (error); } static int em_sysctl_stats(SYSCTL_HANDLER_ARGS) { struct adapter *adapter; int error; int result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { adapter = (struct adapter *)arg1; em_print_hw_stats(adapter); } return (error); } static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS) { struct em_int_delay_info *info; struct adapter *adapter; u32 regval; int error; int usecs; int ticks; info = (struct em_int_delay_info *)arg1; usecs = info->value; error = sysctl_handle_int(oidp, &usecs, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (usecs < 0 || usecs > EM_TICKS_TO_USECS(65535)) return (EINVAL); info->value = usecs; ticks = EM_USECS_TO_TICKS(usecs); adapter = info->adapter; EM_CORE_LOCK(adapter); regval = E1000_READ_OFFSET(&adapter->hw, info->offset); regval = (regval & ~0xffff) | (ticks & 0xffff); /* Handle a few special cases. */ switch (info->offset) { case E1000_RDTR: break; case E1000_TIDV: if (ticks == 0) { adapter->txd_cmd &= ~E1000_TXD_CMD_IDE; /* Don't write 0 into the TIDV register. */ regval++; } else adapter->txd_cmd |= E1000_TXD_CMD_IDE; break; } E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval); EM_CORE_UNLOCK(adapter); return (0); } static void em_add_int_delay_sysctl(struct adapter *adapter, const char *name, const char *description, struct em_int_delay_info *info, int offset, int value) { info->adapter = adapter; info->offset = offset; info->value = value; SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, info, 0, em_sysctl_int_delay, "I", description); } #ifndef EM_LEGACY_IRQ static void em_add_rx_process_limit(struct adapter *adapter, const char *name, const char *description, int *limit, int value) { *limit = value; SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description); } #endif #ifdef EM_TIMESYNC /* * Initialize the Time Sync Feature */ static int em_tsync_init(struct adapter *adapter) { device_t dev = adapter->dev; u32 tx_ctl, rx_ctl; E1000_WRITE_REG(&adapter->hw, E1000_TIMINCA, (1<<24) | 20833/PICOSECS_PER_TICK); adapter->last_stamp = E1000_READ_REG(&adapter->hw, E1000_SYSTIML); adapter->last_stamp |= (u64)E1000_READ_REG(&adapter->hw, E1000_SYSTIMH) << 32ULL; /* Enable the TX side */ tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL); tx_ctl |= 0x10; E1000_WRITE_REG(&adapter->hw, E1000_TSYNCTXCTL, tx_ctl); E1000_WRITE_FLUSH(&adapter->hw); tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL); if ((tx_ctl & 0x10) == 0) { device_printf(dev, "Failed to enable TX timestamping\n"); return (ENXIO); } /* Enable RX */ rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL); rx_ctl |= 0x10; /* Enable the feature */ rx_ctl |= 0x0a; /* This value turns on Ver 1 and 2 */ E1000_WRITE_REG(&adapter->hw, E1000_TSYNCRXCTL, rx_ctl); /* * Ethertype Stamping (Ethertype = 0x88F7) */ E1000_WRITE_REG(&adapter->hw, E1000_RXMTRL, htonl(0x440088f7)); /* * Source Port Queue Filter Setup: * this is for UDP port filtering */ E1000_WRITE_REG(&adapter->hw, E1000_RXUDP, htons(TSYNC_PORT)); /* Protocol = UDP, enable Timestamp, and filter on source/protocol */ E1000_WRITE_FLUSH(&adapter->hw); rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL); if ((rx_ctl & 0x10) == 0) { device_printf(dev, "Failed to enable RX timestamping\n"); return (ENXIO); } device_printf(dev, "IEEE 1588 Precision Time Protocol enabled\n"); return (0); } /* * Disable the Time Sync Feature */ static void em_tsync_disable(struct adapter *adapter) { u32 tx_ctl, rx_ctl; tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL); tx_ctl &= ~0x10; E1000_WRITE_REG(&adapter->hw, E1000_TSYNCTXCTL, tx_ctl); E1000_WRITE_FLUSH(&adapter->hw); /* Invalidate TX Timestamp */ E1000_READ_REG(&adapter->hw, E1000_TXSTMPH); tx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCTXCTL); if (tx_ctl & 0x10) HW_DEBUGOUT("Failed to disable TX timestamping\n"); rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL); rx_ctl &= ~0x10; E1000_WRITE_REG(&adapter->hw, E1000_TSYNCRXCTL, rx_ctl); E1000_WRITE_FLUSH(&adapter->hw); /* Invalidate RX Timestamp */ E1000_READ_REG(&adapter->hw, E1000_RXSATRH); rx_ctl = E1000_READ_REG(&adapter->hw, E1000_TSYNCRXCTL); if (rx_ctl & 0x10) HW_DEBUGOUT("Failed to disable RX timestamping\n"); return; } #endif /* EM_TIMESYNC */ Index: head/sys/dev/firewire/if_fwe.c =================================================================== --- head/sys/dev/firewire/if_fwe.c (revision 193095) +++ head/sys/dev/firewire/if_fwe.c (revision 193096) @@ -1,733 +1,736 @@ /*- * Copyright (c) 2002-2003 * Hidetoshi Shimokawa. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * * This product includes software developed by Hidetoshi Shimokawa. * * 4. Neither the name of the author nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #include "opt_inet.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __DragonFly__ #include #include #include #include "if_fwevar.h" #else #include #include #include #include #endif #define FWEDEBUG if (fwedebug) if_printf #define TX_MAX_QUEUE (FWMAXQUEUE - 1) /* network interface */ static void fwe_start (struct ifnet *); static int fwe_ioctl (struct ifnet *, u_long, caddr_t); static void fwe_init (void *); static void fwe_output_callback (struct fw_xfer *); static void fwe_as_output (struct fwe_softc *, struct ifnet *); static void fwe_as_input (struct fw_xferq *); static int fwedebug = 0; static int stream_ch = 1; static int tx_speed = 2; static int rx_queue_len = FWMAXQUEUE; MALLOC_DEFINE(M_FWE, "if_fwe", "Ethernet over FireWire interface"); SYSCTL_INT(_debug, OID_AUTO, if_fwe_debug, CTLFLAG_RW, &fwedebug, 0, ""); SYSCTL_DECL(_hw_firewire); SYSCTL_NODE(_hw_firewire, OID_AUTO, fwe, CTLFLAG_RD, 0, "Ethernet emulation subsystem"); SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, stream_ch, CTLFLAG_RW, &stream_ch, 0, "Stream channel to use"); SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, tx_speed, CTLFLAG_RW, &tx_speed, 0, "Transmission speed"); SYSCTL_INT(_hw_firewire_fwe, OID_AUTO, rx_queue_len, CTLFLAG_RW, &rx_queue_len, 0, "Length of the receive queue"); TUNABLE_INT("hw.firewire.fwe.stream_ch", &stream_ch); TUNABLE_INT("hw.firewire.fwe.tx_speed", &tx_speed); TUNABLE_INT("hw.firewire.fwe.rx_queue_len", &rx_queue_len); #ifdef DEVICE_POLLING static poll_handler_t fwe_poll; -static void +static int fwe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct fwe_softc *fwe; struct firewire_comm *fc; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) - return; + return (0); fwe = ((struct fwe_eth_softc *)ifp->if_softc)->fwe; fc = fwe->fd.fc; fc->poll(fc, (cmd == POLL_AND_CHECK_STATUS)?0:1, count); + return (0); } #endif /* DEVICE_POLLING */ static void fwe_identify(driver_t *driver, device_t parent) { BUS_ADD_CHILD(parent, 0, "fwe", device_get_unit(parent)); } static int fwe_probe(device_t dev) { device_t pa; pa = device_get_parent(dev); if(device_get_unit(dev) != device_get_unit(pa)){ return(ENXIO); } device_set_desc(dev, "Ethernet over FireWire"); return (0); } static int fwe_attach(device_t dev) { struct fwe_softc *fwe; struct ifnet *ifp; int unit, s; #if defined(__DragonFly__) || __FreeBSD_version < 500000 u_char *eaddr; #else u_char eaddr[6]; #endif struct fw_eui64 *eui; fwe = ((struct fwe_softc *)device_get_softc(dev)); unit = device_get_unit(dev); bzero(fwe, sizeof(struct fwe_softc)); mtx_init(&fwe->mtx, "fwe", NULL, MTX_DEF); /* XXX */ fwe->stream_ch = stream_ch; fwe->dma_ch = -1; fwe->fd.fc = device_get_ivars(dev); if (tx_speed < 0) tx_speed = fwe->fd.fc->speed; fwe->fd.dev = dev; fwe->fd.post_explore = NULL; fwe->eth_softc.fwe = fwe; fwe->pkt_hdr.mode.stream.tcode = FWTCODE_STREAM; fwe->pkt_hdr.mode.stream.sy = 0; fwe->pkt_hdr.mode.stream.chtag = fwe->stream_ch; /* generate fake MAC address: first and last 3bytes from eui64 */ #define LOCAL (0x02) #define GROUP (0x01) #if defined(__DragonFly__) || __FreeBSD_version < 500000 eaddr = &IFP2ENADDR(fwe->eth_softc.ifp)[0]; #endif eui = &fwe->fd.fc->eui; eaddr[0] = (FW_EUI64_BYTE(eui, 0) | LOCAL) & ~GROUP; eaddr[1] = FW_EUI64_BYTE(eui, 1); eaddr[2] = FW_EUI64_BYTE(eui, 2); eaddr[3] = FW_EUI64_BYTE(eui, 5); eaddr[4] = FW_EUI64_BYTE(eui, 6); eaddr[5] = FW_EUI64_BYTE(eui, 7); printf("if_fwe%d: Fake Ethernet address: " "%02x:%02x:%02x:%02x:%02x:%02x\n", unit, eaddr[0], eaddr[1], eaddr[2], eaddr[3], eaddr[4], eaddr[5]); /* fill the rest and attach interface */ ifp = fwe->eth_softc.ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); return (ENOSPC); } ifp->if_softc = &fwe->eth_softc; #if __FreeBSD_version >= 501113 || defined(__DragonFly__) if_initname(ifp, device_get_name(dev), unit); #else ifp->if_unit = unit; ifp->if_name = "fwe"; #endif ifp->if_init = fwe_init; #if defined(__DragonFly__) || __FreeBSD_version < 500000 ifp->if_output = ether_output; #endif ifp->if_start = fwe_start; ifp->if_ioctl = fwe_ioctl; ifp->if_mtu = ETHERMTU; ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST); ifp->if_snd.ifq_maxlen = TX_MAX_QUEUE; s = splimp(); #if defined(__DragonFly__) || __FreeBSD_version < 500000 ether_ifattach(ifp, 1); #else ether_ifattach(ifp, eaddr); #endif splx(s); /* Tell the upper layer(s) we support long frames. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_POLLING; ifp->if_capenable |= IFCAP_VLAN_MTU; #endif FWEDEBUG(ifp, "interface created\n"); return 0; } static void fwe_stop(struct fwe_softc *fwe) { struct firewire_comm *fc; struct fw_xferq *xferq; struct ifnet *ifp = fwe->eth_softc.ifp; struct fw_xfer *xfer, *next; int i; fc = fwe->fd.fc; if (fwe->dma_ch >= 0) { xferq = fc->ir[fwe->dma_ch]; if (xferq->flag & FWXFERQ_RUNNING) fc->irx_disable(fc, fwe->dma_ch); xferq->flag &= ~(FWXFERQ_MODEMASK | FWXFERQ_OPEN | FWXFERQ_STREAM | FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_CHTAGMASK); xferq->hand = NULL; for (i = 0; i < xferq->bnchunk; i ++) m_freem(xferq->bulkxfer[i].mbuf); free(xferq->bulkxfer, M_FWE); for (xfer = STAILQ_FIRST(&fwe->xferlist); xfer != NULL; xfer = next) { next = STAILQ_NEXT(xfer, link); fw_xfer_free(xfer); } STAILQ_INIT(&fwe->xferlist); xferq->bulkxfer = NULL; fwe->dma_ch = -1; } #if defined(__FreeBSD__) ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); #else ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); #endif } static int fwe_detach(device_t dev) { struct fwe_softc *fwe; struct ifnet *ifp; int s; fwe = device_get_softc(dev); ifp = fwe->eth_softc.ifp; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif s = splimp(); fwe_stop(fwe); #if defined(__DragonFly__) || __FreeBSD_version < 500000 ether_ifdetach(ifp, 1); #else ether_ifdetach(ifp); if_free(ifp); #endif splx(s); mtx_destroy(&fwe->mtx); return 0; } static void fwe_init(void *arg) { struct fwe_softc *fwe = ((struct fwe_eth_softc *)arg)->fwe; struct firewire_comm *fc; struct ifnet *ifp = fwe->eth_softc.ifp; struct fw_xferq *xferq; struct fw_xfer *xfer; struct mbuf *m; int i; FWEDEBUG(ifp, "initializing\n"); /* XXX keep promiscoud mode */ ifp->if_flags |= IFF_PROMISC; fc = fwe->fd.fc; if (fwe->dma_ch < 0) { fwe->dma_ch = fw_open_isodma(fc, /* tx */0); if (fwe->dma_ch < 0) return; xferq = fc->ir[fwe->dma_ch]; xferq->flag |= FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_STREAM; fwe->stream_ch = stream_ch; fwe->pkt_hdr.mode.stream.chtag = fwe->stream_ch; xferq->flag &= ~0xff; xferq->flag |= fwe->stream_ch & 0xff; /* register fwe_input handler */ xferq->sc = (caddr_t) fwe; xferq->hand = fwe_as_input; xferq->bnchunk = rx_queue_len; xferq->bnpacket = 1; xferq->psize = MCLBYTES; xferq->queued = 0; xferq->buf = NULL; xferq->bulkxfer = (struct fw_bulkxfer *) malloc( sizeof(struct fw_bulkxfer) * xferq->bnchunk, M_FWE, M_WAITOK); if (xferq->bulkxfer == NULL) { printf("if_fwe: malloc failed\n"); return; } STAILQ_INIT(&xferq->stvalid); STAILQ_INIT(&xferq->stfree); STAILQ_INIT(&xferq->stdma); xferq->stproc = NULL; for (i = 0; i < xferq->bnchunk; i ++) { m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR); xferq->bulkxfer[i].mbuf = m; m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; STAILQ_INSERT_TAIL(&xferq->stfree, &xferq->bulkxfer[i], link); } STAILQ_INIT(&fwe->xferlist); for (i = 0; i < TX_MAX_QUEUE; i++) { xfer = fw_xfer_alloc(M_FWE); if (xfer == NULL) break; xfer->send.spd = tx_speed; xfer->fc = fwe->fd.fc; xfer->sc = (caddr_t)fwe; xfer->hand = fwe_output_callback; STAILQ_INSERT_TAIL(&fwe->xferlist, xfer, link); } } else xferq = fc->ir[fwe->dma_ch]; /* start dma */ if ((xferq->flag & FWXFERQ_RUNNING) == 0) fc->irx_enable(fc, fwe->dma_ch); #if defined(__FreeBSD__) ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; #else ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; #endif #if 0 /* attempt to start output */ fwe_start(ifp); #endif } static int fwe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct fwe_softc *fwe = ((struct fwe_eth_softc *)ifp->if_softc)->fwe; struct ifstat *ifs = NULL; int s, error, len; switch (cmd) { case SIOCSIFFLAGS: s = splimp(); if (ifp->if_flags & IFF_UP) { #if defined(__FreeBSD__) if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) #else if (!(ifp->if_flags & IFF_RUNNING)) #endif fwe_init(&fwe->eth_softc); } else { #if defined(__FreeBSD__) if (ifp->if_drv_flags & IFF_DRV_RUNNING) #else if (ifp->if_flags & IFF_RUNNING) #endif fwe_stop(fwe); } /* XXX keep promiscoud mode */ ifp->if_flags |= IFF_PROMISC; splx(s); break; case SIOCADDMULTI: case SIOCDELMULTI: break; case SIOCGIFSTATUS: s = splimp(); ifs = (struct ifstat *)data; len = strlen(ifs->ascii); if (len < sizeof(ifs->ascii)) snprintf(ifs->ascii + len, sizeof(ifs->ascii) - len, "\tch %d dma %d\n", fwe->stream_ch, fwe->dma_ch); splx(s); break; case SIOCSIFCAP: #ifdef DEVICE_POLLING { struct ifreq *ifr = (struct ifreq *) data; struct firewire_comm *fc = fwe->fd.fc; if (ifr->ifr_reqcap & IFCAP_POLLING && !(ifp->if_capenable & IFCAP_POLLING)) { error = ether_poll_register(fwe_poll, ifp); if (error) return(error); /* Disable interrupts */ fc->set_intr(fc, 0); ifp->if_capenable |= IFCAP_POLLING; + ifp->if_capenable |= IFCAP_POLLING_NOCOUNT; return (error); } if (!(ifr->ifr_reqcap & IFCAP_POLLING) && ifp->if_capenable & IFCAP_POLLING) { error = ether_poll_deregister(ifp); /* Enable interrupts. */ fc->set_intr(fc, 1); ifp->if_capenable &= ~IFCAP_POLLING; + ifp->if_capenable &= ~IFCAP_POLLING_NOCOUNT; return (error); } } #endif /* DEVICE_POLLING */ break; #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 default: #else case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: #endif s = splimp(); error = ether_ioctl(ifp, cmd, data); splx(s); return (error); #if defined(__DragonFly__) || __FreeBSD_version < 500000 default: return (EINVAL); #endif } return (0); } static void fwe_output_callback(struct fw_xfer *xfer) { struct fwe_softc *fwe; struct ifnet *ifp; int s; fwe = (struct fwe_softc *)xfer->sc; ifp = fwe->eth_softc.ifp; /* XXX error check */ FWEDEBUG(ifp, "resp = %d\n", xfer->resp); if (xfer->resp != 0) ifp->if_oerrors ++; m_freem(xfer->mbuf); fw_xfer_unload(xfer); s = splimp(); FWE_LOCK(fwe); STAILQ_INSERT_TAIL(&fwe->xferlist, xfer, link); FWE_UNLOCK(fwe); splx(s); /* for queue full */ if (ifp->if_snd.ifq_head != NULL) fwe_start(ifp); } static void fwe_start(struct ifnet *ifp) { struct fwe_softc *fwe = ((struct fwe_eth_softc *)ifp->if_softc)->fwe; int s; FWEDEBUG(ifp, "starting\n"); if (fwe->dma_ch < 0) { struct mbuf *m = NULL; FWEDEBUG(ifp, "not ready\n"); s = splimp(); do { IF_DEQUEUE(&ifp->if_snd, m); if (m != NULL) m_freem(m); ifp->if_oerrors ++; } while (m != NULL); splx(s); return; } s = splimp(); #if defined(__FreeBSD__) ifp->if_drv_flags |= IFF_DRV_OACTIVE; #else ifp->if_flags |= IFF_OACTIVE; #endif if (ifp->if_snd.ifq_len != 0) fwe_as_output(fwe, ifp); #if defined(__FreeBSD__) ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; #else ifp->if_flags &= ~IFF_OACTIVE; #endif splx(s); } #define HDR_LEN 4 #ifndef ETHER_ALIGN #define ETHER_ALIGN 2 #endif /* Async. stream output */ static void fwe_as_output(struct fwe_softc *fwe, struct ifnet *ifp) { struct mbuf *m; struct fw_xfer *xfer; struct fw_xferq *xferq; struct fw_pkt *fp; int i = 0; xfer = NULL; xferq = fwe->fd.fc->atq; while ((xferq->queued < xferq->maxq - 1) && (ifp->if_snd.ifq_head != NULL)) { FWE_LOCK(fwe); xfer = STAILQ_FIRST(&fwe->xferlist); if (xfer == NULL) { #if 0 printf("if_fwe: lack of xfer\n"); #endif FWE_UNLOCK(fwe); break; } STAILQ_REMOVE_HEAD(&fwe->xferlist, link); FWE_UNLOCK(fwe); IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) { FWE_LOCK(fwe); STAILQ_INSERT_HEAD(&fwe->xferlist, xfer, link); FWE_UNLOCK(fwe); break; } #if defined(__DragonFly__) || __FreeBSD_version < 500000 if (ifp->if_bpf != NULL) bpf_mtap(ifp, m); #else BPF_MTAP(ifp, m); #endif /* keep ip packet alignment for alpha */ M_PREPEND(m, ETHER_ALIGN, M_DONTWAIT); fp = &xfer->send.hdr; *(uint32_t *)&xfer->send.hdr = *(int32_t *)&fwe->pkt_hdr; fp->mode.stream.len = m->m_pkthdr.len; xfer->mbuf = m; xfer->send.pay_len = m->m_pkthdr.len; if (fw_asyreq(fwe->fd.fc, -1, xfer) != 0) { /* error */ ifp->if_oerrors ++; /* XXX set error code */ fwe_output_callback(xfer); } else { ifp->if_opackets ++; i++; } } #if 0 if (i > 1) printf("%d queued\n", i); #endif if (i > 0) xferq->start(fwe->fd.fc); } /* Async. stream output */ static void fwe_as_input(struct fw_xferq *xferq) { struct mbuf *m, *m0; struct ifnet *ifp; struct fwe_softc *fwe; struct fw_bulkxfer *sxfer; struct fw_pkt *fp; u_char *c; #if defined(__DragonFly__) || __FreeBSD_version < 500000 struct ether_header *eh; #endif fwe = (struct fwe_softc *)xferq->sc; ifp = fwe->eth_softc.ifp; /* We do not need a lock here because the bottom half is serialized */ while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) { STAILQ_REMOVE_HEAD(&xferq->stvalid, link); fp = mtod(sxfer->mbuf, struct fw_pkt *); if (fwe->fd.fc->irx_post != NULL) fwe->fd.fc->irx_post(fwe->fd.fc, fp->mode.ld); m = sxfer->mbuf; /* insert new rbuf */ sxfer->mbuf = m0 = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m0 != NULL) { m0->m_len = m0->m_pkthdr.len = m0->m_ext.ext_size; STAILQ_INSERT_TAIL(&xferq->stfree, sxfer, link); } else printf("%s: m_getcl failed\n", __FUNCTION__); if (sxfer->resp != 0 || fp->mode.stream.len < ETHER_ALIGN + sizeof(struct ether_header)) { m_freem(m); ifp->if_ierrors ++; continue; } m->m_data += HDR_LEN + ETHER_ALIGN; c = mtod(m, u_char *); #if defined(__DragonFly__) || __FreeBSD_version < 500000 eh = (struct ether_header *)c; m->m_data += sizeof(struct ether_header); m->m_len = m->m_pkthdr.len = fp->mode.stream.len - ETHER_ALIGN - sizeof(struct ether_header); #else m->m_len = m->m_pkthdr.len = fp->mode.stream.len - ETHER_ALIGN; #endif m->m_pkthdr.rcvif = ifp; #if 0 FWEDEBUG(ifp, "%02x %02x %02x %02x %02x %02x\n" "%02x %02x %02x %02x %02x %02x\n" "%02x %02x %02x %02x\n" "%02x %02x %02x %02x\n" "%02x %02x %02x %02x\n" "%02x %02x %02x %02x\n", c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7], c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15], c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23], c[20], c[21], c[22], c[23] ); #endif #if defined(__DragonFly__) || __FreeBSD_version < 500000 ether_input(ifp, eh, m); #else (*ifp->if_input)(ifp, m); #endif ifp->if_ipackets ++; } if (STAILQ_FIRST(&xferq->stfree) != NULL) fwe->fd.fc->irx_enable(fwe->fd.fc, fwe->dma_ch); } static devclass_t fwe_devclass; static device_method_t fwe_methods[] = { /* device interface */ DEVMETHOD(device_identify, fwe_identify), DEVMETHOD(device_probe, fwe_probe), DEVMETHOD(device_attach, fwe_attach), DEVMETHOD(device_detach, fwe_detach), { 0, 0 } }; static driver_t fwe_driver = { "fwe", fwe_methods, sizeof(struct fwe_softc), }; #ifdef __DragonFly__ DECLARE_DUMMY_MODULE(fwe); #endif DRIVER_MODULE(fwe, firewire, fwe_driver, fwe_devclass, 0, 0); MODULE_VERSION(fwe, 1); MODULE_DEPEND(fwe, firewire, 1, 1, 1); Index: head/sys/dev/firewire/if_fwip.c =================================================================== --- head/sys/dev/firewire/if_fwip.c (revision 193095) +++ head/sys/dev/firewire/if_fwip.c (revision 193096) @@ -1,983 +1,986 @@ /*- * Copyright (c) 2004 * Doug Rabson * Copyright (c) 2002-2003 * Hidetoshi Shimokawa. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * * This product includes software developed by Hidetoshi Shimokawa. * * 4. Neither the name of the author nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #include "opt_inet.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef __DragonFly__ #include #include #include "if_fwipvar.h" #else #include #include #include #include #endif /* * We really need a mechanism for allocating regions in the FIFO * address space. We pick a address in the OHCI controller's 'middle' * address space. This means that the controller will automatically * send responses for us, which is fine since we don't have any * important information to put in the response anyway. */ #define INET_FIFO 0xfffe00000000LL #define FWIPDEBUG if (fwipdebug) if_printf #define TX_MAX_QUEUE (FWMAXQUEUE - 1) /* network interface */ static void fwip_start (struct ifnet *); static int fwip_ioctl (struct ifnet *, u_long, caddr_t); static void fwip_init (void *); static void fwip_post_busreset (void *); static void fwip_output_callback (struct fw_xfer *); static void fwip_async_output (struct fwip_softc *, struct ifnet *); static void fwip_start_send (void *, int); static void fwip_stream_input (struct fw_xferq *); static void fwip_unicast_input(struct fw_xfer *); static int fwipdebug = 0; static int broadcast_channel = 0xc0 | 0x1f; /* tag | channel(XXX) */ static int tx_speed = 2; static int rx_queue_len = FWMAXQUEUE; MALLOC_DEFINE(M_FWIP, "if_fwip", "IP over FireWire interface"); SYSCTL_INT(_debug, OID_AUTO, if_fwip_debug, CTLFLAG_RW, &fwipdebug, 0, ""); SYSCTL_DECL(_hw_firewire); SYSCTL_NODE(_hw_firewire, OID_AUTO, fwip, CTLFLAG_RD, 0, "Firewire ip subsystem"); SYSCTL_INT(_hw_firewire_fwip, OID_AUTO, rx_queue_len, CTLFLAG_RW, &rx_queue_len, 0, "Length of the receive queue"); TUNABLE_INT("hw.firewire.fwip.rx_queue_len", &rx_queue_len); #ifdef DEVICE_POLLING static poll_handler_t fwip_poll; -static void +static int fwip_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct fwip_softc *fwip; struct firewire_comm *fc; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) - return; + return (0); fwip = ((struct fwip_eth_softc *)ifp->if_softc)->fwip; fc = fwip->fd.fc; fc->poll(fc, (cmd == POLL_AND_CHECK_STATUS)?0:1, count); + return (0); } #endif /* DEVICE_POLLING */ static void fwip_identify(driver_t *driver, device_t parent) { BUS_ADD_CHILD(parent, 0, "fwip", device_get_unit(parent)); } static int fwip_probe(device_t dev) { device_t pa; pa = device_get_parent(dev); if(device_get_unit(dev) != device_get_unit(pa)){ return(ENXIO); } device_set_desc(dev, "IP over FireWire"); return (0); } static int fwip_attach(device_t dev) { struct fwip_softc *fwip; struct ifnet *ifp; int unit, s; struct fw_hwaddr *hwaddr; fwip = ((struct fwip_softc *)device_get_softc(dev)); unit = device_get_unit(dev); ifp = fwip->fw_softc.fwip_ifp = if_alloc(IFT_IEEE1394); if (ifp == NULL) return (ENOSPC); mtx_init(&fwip->mtx, "fwip", NULL, MTX_DEF); /* XXX */ fwip->dma_ch = -1; fwip->fd.fc = device_get_ivars(dev); if (tx_speed < 0) tx_speed = fwip->fd.fc->speed; fwip->fd.dev = dev; fwip->fd.post_explore = NULL; fwip->fd.post_busreset = fwip_post_busreset; fwip->fw_softc.fwip = fwip; TASK_INIT(&fwip->start_send, 0, fwip_start_send, fwip); /* * Encode our hardware the way that arp likes it. */ hwaddr = &IFP2FWC(fwip->fw_softc.fwip_ifp)->fc_hwaddr; hwaddr->sender_unique_ID_hi = htonl(fwip->fd.fc->eui.hi); hwaddr->sender_unique_ID_lo = htonl(fwip->fd.fc->eui.lo); hwaddr->sender_max_rec = fwip->fd.fc->maxrec; hwaddr->sspd = fwip->fd.fc->speed; hwaddr->sender_unicast_FIFO_hi = htons((uint16_t)(INET_FIFO >> 32)); hwaddr->sender_unicast_FIFO_lo = htonl((uint32_t)INET_FIFO); /* fill the rest and attach interface */ ifp->if_softc = &fwip->fw_softc; #if __FreeBSD_version >= 501113 || defined(__DragonFly__) if_initname(ifp, device_get_name(dev), unit); #else ifp->if_unit = unit; ifp->if_name = "fwip"; #endif ifp->if_init = fwip_init; ifp->if_start = fwip_start; ifp->if_ioctl = fwip_ioctl; ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST); ifp->if_snd.ifq_maxlen = TX_MAX_QUEUE; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif s = splimp(); firewire_ifattach(ifp, hwaddr); splx(s); FWIPDEBUG(ifp, "interface created\n"); return 0; } static void fwip_stop(struct fwip_softc *fwip) { struct firewire_comm *fc; struct fw_xferq *xferq; struct ifnet *ifp = fwip->fw_softc.fwip_ifp; struct fw_xfer *xfer, *next; int i; fc = fwip->fd.fc; if (fwip->dma_ch >= 0) { xferq = fc->ir[fwip->dma_ch]; if (xferq->flag & FWXFERQ_RUNNING) fc->irx_disable(fc, fwip->dma_ch); xferq->flag &= ~(FWXFERQ_MODEMASK | FWXFERQ_OPEN | FWXFERQ_STREAM | FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_CHTAGMASK); xferq->hand = NULL; for (i = 0; i < xferq->bnchunk; i ++) m_freem(xferq->bulkxfer[i].mbuf); free(xferq->bulkxfer, M_FWIP); fw_bindremove(fc, &fwip->fwb); for (xfer = STAILQ_FIRST(&fwip->fwb.xferlist); xfer != NULL; xfer = next) { next = STAILQ_NEXT(xfer, link); fw_xfer_free(xfer); } for (xfer = STAILQ_FIRST(&fwip->xferlist); xfer != NULL; xfer = next) { next = STAILQ_NEXT(xfer, link); fw_xfer_free(xfer); } STAILQ_INIT(&fwip->xferlist); xferq->bulkxfer = NULL; fwip->dma_ch = -1; } #if defined(__FreeBSD__) ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); #else ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); #endif } static int fwip_detach(device_t dev) { struct fwip_softc *fwip; struct ifnet *ifp; int s; fwip = (struct fwip_softc *)device_get_softc(dev); ifp = fwip->fw_softc.fwip_ifp; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif s = splimp(); fwip_stop(fwip); firewire_ifdetach(ifp); if_free(ifp); mtx_destroy(&fwip->mtx); splx(s); return 0; } static void fwip_init(void *arg) { struct fwip_softc *fwip = ((struct fwip_eth_softc *)arg)->fwip; struct firewire_comm *fc; struct ifnet *ifp = fwip->fw_softc.fwip_ifp; struct fw_xferq *xferq; struct fw_xfer *xfer; struct mbuf *m; int i; FWIPDEBUG(ifp, "initializing\n"); fc = fwip->fd.fc; #define START 0 if (fwip->dma_ch < 0) { fwip->dma_ch = fw_open_isodma(fc, /* tx */0); if (fwip->dma_ch < 0) return; xferq = fc->ir[fwip->dma_ch]; xferq->flag |= FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_STREAM; xferq->flag &= ~0xff; xferq->flag |= broadcast_channel & 0xff; /* register fwip_input handler */ xferq->sc = (caddr_t) fwip; xferq->hand = fwip_stream_input; xferq->bnchunk = rx_queue_len; xferq->bnpacket = 1; xferq->psize = MCLBYTES; xferq->queued = 0; xferq->buf = NULL; xferq->bulkxfer = (struct fw_bulkxfer *) malloc( sizeof(struct fw_bulkxfer) * xferq->bnchunk, M_FWIP, M_WAITOK); if (xferq->bulkxfer == NULL) { printf("if_fwip: malloc failed\n"); return; } STAILQ_INIT(&xferq->stvalid); STAILQ_INIT(&xferq->stfree); STAILQ_INIT(&xferq->stdma); xferq->stproc = NULL; for (i = 0; i < xferq->bnchunk; i ++) { m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR); xferq->bulkxfer[i].mbuf = m; m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; STAILQ_INSERT_TAIL(&xferq->stfree, &xferq->bulkxfer[i], link); } fwip->fwb.start = INET_FIFO; fwip->fwb.end = INET_FIFO + 16384; /* S3200 packet size */ /* pre-allocate xfer */ STAILQ_INIT(&fwip->fwb.xferlist); for (i = 0; i < rx_queue_len; i ++) { xfer = fw_xfer_alloc(M_FWIP); if (xfer == NULL) break; m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR); xfer->recv.payload = mtod(m, uint32_t *); xfer->recv.pay_len = MCLBYTES; xfer->hand = fwip_unicast_input; xfer->fc = fc; xfer->sc = (caddr_t)fwip; xfer->mbuf = m; STAILQ_INSERT_TAIL(&fwip->fwb.xferlist, xfer, link); } fw_bindadd(fc, &fwip->fwb); STAILQ_INIT(&fwip->xferlist); for (i = 0; i < TX_MAX_QUEUE; i++) { xfer = fw_xfer_alloc(M_FWIP); if (xfer == NULL) break; xfer->send.spd = tx_speed; xfer->fc = fwip->fd.fc; xfer->sc = (caddr_t)fwip; xfer->hand = fwip_output_callback; STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link); } } else xferq = fc->ir[fwip->dma_ch]; fwip->last_dest.hi = 0; fwip->last_dest.lo = 0; /* start dma */ if ((xferq->flag & FWXFERQ_RUNNING) == 0) fc->irx_enable(fc, fwip->dma_ch); #if defined(__FreeBSD__) ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; #else ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; #endif #if 0 /* attempt to start output */ fwip_start(ifp); #endif } static int fwip_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct fwip_softc *fwip = ((struct fwip_eth_softc *)ifp->if_softc)->fwip; int s, error; switch (cmd) { case SIOCSIFFLAGS: s = splimp(); if (ifp->if_flags & IFF_UP) { #if defined(__FreeBSD__) if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) #else if (!(ifp->if_flags & IFF_RUNNING)) #endif fwip_init(&fwip->fw_softc); } else { #if defined(__FreeBSD__) if (ifp->if_drv_flags & IFF_DRV_RUNNING) #else if (ifp->if_flags & IFF_RUNNING) #endif fwip_stop(fwip); } splx(s); break; case SIOCADDMULTI: case SIOCDELMULTI: break; case SIOCSIFCAP: #ifdef DEVICE_POLLING { struct ifreq *ifr = (struct ifreq *) data; struct firewire_comm *fc = fwip->fd.fc; if (ifr->ifr_reqcap & IFCAP_POLLING && !(ifp->if_capenable & IFCAP_POLLING)) { error = ether_poll_register(fwip_poll, ifp); if (error) return(error); /* Disable interrupts */ fc->set_intr(fc, 0); - ifp->if_capenable |= IFCAP_POLLING; + ifp->if_capenable |= IFCAP_POLLING | + IFCAP_POLLING_NOCOUNT; return (error); } if (!(ifr->ifr_reqcap & IFCAP_POLLING) && ifp->if_capenable & IFCAP_POLLING) { error = ether_poll_deregister(ifp); /* Enable interrupts. */ fc->set_intr(fc, 1); ifp->if_capenable &= ~IFCAP_POLLING; + ifp->if_capenable &= ~IFCAP_POLLING_NOCOUNT; return (error); } } #endif /* DEVICE_POLLING */ break; #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 default: #else case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: #endif s = splimp(); error = firewire_ioctl(ifp, cmd, data); splx(s); return (error); #if defined(__DragonFly__) || __FreeBSD_version < 500000 default: return (EINVAL); #endif } return (0); } static void fwip_post_busreset(void *arg) { struct fwip_softc *fwip = arg; struct crom_src *src; struct crom_chunk *root; src = fwip->fd.fc->crom_src; root = fwip->fd.fc->crom_root; /* RFC2734 IPv4 over IEEE1394 */ bzero(&fwip->unit4, sizeof(struct crom_chunk)); crom_add_chunk(src, root, &fwip->unit4, CROM_UDIR); crom_add_entry(&fwip->unit4, CSRKEY_SPEC, CSRVAL_IETF); crom_add_simple_text(src, &fwip->unit4, &fwip->spec4, "IANA"); crom_add_entry(&fwip->unit4, CSRKEY_VER, 1); crom_add_simple_text(src, &fwip->unit4, &fwip->ver4, "IPv4"); /* RFC3146 IPv6 over IEEE1394 */ bzero(&fwip->unit6, sizeof(struct crom_chunk)); crom_add_chunk(src, root, &fwip->unit6, CROM_UDIR); crom_add_entry(&fwip->unit6, CSRKEY_SPEC, CSRVAL_IETF); crom_add_simple_text(src, &fwip->unit6, &fwip->spec6, "IANA"); crom_add_entry(&fwip->unit6, CSRKEY_VER, 2); crom_add_simple_text(src, &fwip->unit6, &fwip->ver6, "IPv6"); fwip->last_dest.hi = 0; fwip->last_dest.lo = 0; firewire_busreset(fwip->fw_softc.fwip_ifp); } static void fwip_output_callback(struct fw_xfer *xfer) { struct fwip_softc *fwip; struct ifnet *ifp; int s; fwip = (struct fwip_softc *)xfer->sc; ifp = fwip->fw_softc.fwip_ifp; /* XXX error check */ FWIPDEBUG(ifp, "resp = %d\n", xfer->resp); if (xfer->resp != 0) ifp->if_oerrors ++; m_freem(xfer->mbuf); fw_xfer_unload(xfer); s = splimp(); FWIP_LOCK(fwip); STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link); FWIP_UNLOCK(fwip); splx(s); /* for queue full */ if (ifp->if_snd.ifq_head != NULL) { fwip_start(ifp); } } static void fwip_start(struct ifnet *ifp) { struct fwip_softc *fwip = ((struct fwip_eth_softc *)ifp->if_softc)->fwip; int s; FWIPDEBUG(ifp, "starting\n"); if (fwip->dma_ch < 0) { struct mbuf *m = NULL; FWIPDEBUG(ifp, "not ready\n"); s = splimp(); do { IF_DEQUEUE(&ifp->if_snd, m); if (m != NULL) m_freem(m); ifp->if_oerrors ++; } while (m != NULL); splx(s); return; } s = splimp(); #if defined(__FreeBSD__) ifp->if_drv_flags |= IFF_DRV_OACTIVE; #else ifp->if_flags |= IFF_OACTIVE; #endif if (ifp->if_snd.ifq_len != 0) fwip_async_output(fwip, ifp); #if defined(__FreeBSD__) ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; #else ifp->if_flags &= ~IFF_OACTIVE; #endif splx(s); } /* Async. stream output */ static void fwip_async_output(struct fwip_softc *fwip, struct ifnet *ifp) { struct firewire_comm *fc = fwip->fd.fc; struct mbuf *m; struct m_tag *mtag; struct fw_hwaddr *destfw; struct fw_xfer *xfer; struct fw_xferq *xferq; struct fw_pkt *fp; uint16_t nodeid; int error; int i = 0; xfer = NULL; xferq = fc->atq; while ((xferq->queued < xferq->maxq - 1) && (ifp->if_snd.ifq_head != NULL)) { FWIP_LOCK(fwip); xfer = STAILQ_FIRST(&fwip->xferlist); if (xfer == NULL) { FWIP_UNLOCK(fwip); #if 0 printf("if_fwip: lack of xfer\n"); #endif break; } STAILQ_REMOVE_HEAD(&fwip->xferlist, link); FWIP_UNLOCK(fwip); IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) { FWIP_LOCK(fwip); STAILQ_INSERT_HEAD(&fwip->xferlist, xfer, link); FWIP_UNLOCK(fwip); break; } /* * Dig out the link-level address which * firewire_output got via arp or neighbour * discovery. If we don't have a link-level address, * just stick the thing on the broadcast channel. */ mtag = m_tag_locate(m, MTAG_FIREWIRE, MTAG_FIREWIRE_HWADDR, 0); if (mtag == NULL) destfw = 0; else destfw = (struct fw_hwaddr *) (mtag + 1); /* * We don't do any bpf stuff here - the generic code * in firewire_output gives the packet to bpf before * it adds the link-level encapsulation. */ /* * Put the mbuf in the xfer early in case we hit an * error case below - fwip_output_callback will free * the mbuf. */ xfer->mbuf = m; /* * We use the arp result (if any) to add a suitable firewire * packet header before handing off to the bus. */ fp = &xfer->send.hdr; nodeid = FWLOCALBUS | fc->nodeid; if ((m->m_flags & M_BCAST) || !destfw) { /* * Broadcast packets are sent as GASP packets with * specifier ID 0x00005e, version 1 on the broadcast * channel. To be conservative, we send at the * slowest possible speed. */ uint32_t *p; M_PREPEND(m, 2*sizeof(uint32_t), M_DONTWAIT); p = mtod(m, uint32_t *); fp->mode.stream.len = m->m_pkthdr.len; fp->mode.stream.chtag = broadcast_channel; fp->mode.stream.tcode = FWTCODE_STREAM; fp->mode.stream.sy = 0; xfer->send.spd = 0; p[0] = htonl(nodeid << 16); p[1] = htonl((0x5e << 24) | 1); } else { /* * Unicast packets are sent as block writes to the * target's unicast fifo address. If we can't * find the node address, we just give up. We * could broadcast it but that might overflow * the packet size limitations due to the * extra GASP header. Note: the hardware * address is stored in network byte order to * make life easier for ARP. */ struct fw_device *fd; struct fw_eui64 eui; eui.hi = ntohl(destfw->sender_unique_ID_hi); eui.lo = ntohl(destfw->sender_unique_ID_lo); if (fwip->last_dest.hi != eui.hi || fwip->last_dest.lo != eui.lo) { fd = fw_noderesolve_eui64(fc, &eui); if (!fd) { /* error */ ifp->if_oerrors ++; /* XXX set error code */ fwip_output_callback(xfer); continue; } fwip->last_hdr.mode.wreqb.dst = FWLOCALBUS | fd->dst; fwip->last_hdr.mode.wreqb.tlrt = 0; fwip->last_hdr.mode.wreqb.tcode = FWTCODE_WREQB; fwip->last_hdr.mode.wreqb.pri = 0; fwip->last_hdr.mode.wreqb.src = nodeid; fwip->last_hdr.mode.wreqb.dest_hi = ntohs(destfw->sender_unicast_FIFO_hi); fwip->last_hdr.mode.wreqb.dest_lo = ntohl(destfw->sender_unicast_FIFO_lo); fwip->last_hdr.mode.wreqb.extcode = 0; fwip->last_dest = eui; } fp->mode.wreqb = fwip->last_hdr.mode.wreqb; fp->mode.wreqb.len = m->m_pkthdr.len; xfer->send.spd = min(destfw->sspd, fc->speed); } xfer->send.pay_len = m->m_pkthdr.len; error = fw_asyreq(fc, -1, xfer); if (error == EAGAIN) { /* * We ran out of tlabels - requeue the packet * for later transmission. */ xfer->mbuf = 0; FWIP_LOCK(fwip); STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link); FWIP_UNLOCK(fwip); IF_PREPEND(&ifp->if_snd, m); break; } if (error) { /* error */ ifp->if_oerrors ++; /* XXX set error code */ fwip_output_callback(xfer); continue; } else { ifp->if_opackets ++; i++; } } #if 0 if (i > 1) printf("%d queued\n", i); #endif if (i > 0) xferq->start(fc); } static void fwip_start_send (void *arg, int count) { struct fwip_softc *fwip = arg; fwip->fd.fc->atq->start(fwip->fd.fc); } /* Async. stream output */ static void fwip_stream_input(struct fw_xferq *xferq) { struct mbuf *m, *m0; struct m_tag *mtag; struct ifnet *ifp; struct fwip_softc *fwip; struct fw_bulkxfer *sxfer; struct fw_pkt *fp; uint16_t src; uint32_t *p; fwip = (struct fwip_softc *)xferq->sc; ifp = fwip->fw_softc.fwip_ifp; while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) { STAILQ_REMOVE_HEAD(&xferq->stvalid, link); fp = mtod(sxfer->mbuf, struct fw_pkt *); if (fwip->fd.fc->irx_post != NULL) fwip->fd.fc->irx_post(fwip->fd.fc, fp->mode.ld); m = sxfer->mbuf; /* insert new rbuf */ sxfer->mbuf = m0 = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m0 != NULL) { m0->m_len = m0->m_pkthdr.len = m0->m_ext.ext_size; STAILQ_INSERT_TAIL(&xferq->stfree, sxfer, link); } else printf("fwip_as_input: m_getcl failed\n"); /* * We must have a GASP header - leave the * encapsulation sanity checks to the generic * code. Remeber that we also have the firewire async * stream header even though that isn't accounted for * in mode.stream.len. */ if (sxfer->resp != 0 || fp->mode.stream.len < 2*sizeof(uint32_t)) { m_freem(m); ifp->if_ierrors ++; continue; } m->m_len = m->m_pkthdr.len = fp->mode.stream.len + sizeof(fp->mode.stream); /* * If we received the packet on the broadcast channel, * mark it as broadcast, otherwise we assume it must * be multicast. */ if (fp->mode.stream.chtag == broadcast_channel) m->m_flags |= M_BCAST; else m->m_flags |= M_MCAST; /* * Make sure we recognise the GASP specifier and * version. */ p = mtod(m, uint32_t *); if ((((ntohl(p[1]) & 0xffff) << 8) | ntohl(p[2]) >> 24) != 0x00005e || (ntohl(p[2]) & 0xffffff) != 1) { FWIPDEBUG(ifp, "Unrecognised GASP header %#08x %#08x\n", ntohl(p[1]), ntohl(p[2])); m_freem(m); ifp->if_ierrors ++; continue; } /* * Record the sender ID for possible BPF usage. */ src = ntohl(p[1]) >> 16; if (bpf_peers_present(ifp->if_bpf)) { mtag = m_tag_alloc(MTAG_FIREWIRE, MTAG_FIREWIRE_SENDER_EUID, 2*sizeof(uint32_t), M_NOWAIT); if (mtag) { /* bpf wants it in network byte order */ struct fw_device *fd; uint32_t *p = (uint32_t *) (mtag + 1); fd = fw_noderesolve_nodeid(fwip->fd.fc, src & 0x3f); if (fd) { p[0] = htonl(fd->eui.hi); p[1] = htonl(fd->eui.lo); } else { p[0] = 0; p[1] = 0; } m_tag_prepend(m, mtag); } } /* * Trim off the GASP header */ m_adj(m, 3*sizeof(uint32_t)); m->m_pkthdr.rcvif = ifp; firewire_input(ifp, m, src); ifp->if_ipackets ++; } if (STAILQ_FIRST(&xferq->stfree) != NULL) fwip->fd.fc->irx_enable(fwip->fd.fc, fwip->dma_ch); } static __inline void fwip_unicast_input_recycle(struct fwip_softc *fwip, struct fw_xfer *xfer) { struct mbuf *m; /* * We have finished with a unicast xfer. Allocate a new * cluster and stick it on the back of the input queue. */ m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR); xfer->mbuf = m; xfer->recv.payload = mtod(m, uint32_t *); xfer->recv.pay_len = MCLBYTES; xfer->mbuf = m; STAILQ_INSERT_TAIL(&fwip->fwb.xferlist, xfer, link); } static void fwip_unicast_input(struct fw_xfer *xfer) { uint64_t address; struct mbuf *m; struct m_tag *mtag; struct ifnet *ifp; struct fwip_softc *fwip; struct fw_pkt *fp; //struct fw_pkt *sfp; int rtcode; fwip = (struct fwip_softc *)xfer->sc; ifp = fwip->fw_softc.fwip_ifp; m = xfer->mbuf; xfer->mbuf = 0; fp = &xfer->recv.hdr; /* * Check the fifo address - we only accept addresses of * exactly INET_FIFO. */ address = ((uint64_t)fp->mode.wreqb.dest_hi << 32) | fp->mode.wreqb.dest_lo; if (fp->mode.wreqb.tcode != FWTCODE_WREQB) { rtcode = FWRCODE_ER_TYPE; } else if (address != INET_FIFO) { rtcode = FWRCODE_ER_ADDR; } else { rtcode = FWRCODE_COMPLETE; } /* * Pick up a new mbuf and stick it on the back of the receive * queue. */ fwip_unicast_input_recycle(fwip, xfer); /* * If we've already rejected the packet, give up now. */ if (rtcode != FWRCODE_COMPLETE) { m_freem(m); ifp->if_ierrors ++; return; } if (bpf_peers_present(ifp->if_bpf)) { /* * Record the sender ID for possible BPF usage. */ mtag = m_tag_alloc(MTAG_FIREWIRE, MTAG_FIREWIRE_SENDER_EUID, 2*sizeof(uint32_t), M_NOWAIT); if (mtag) { /* bpf wants it in network byte order */ struct fw_device *fd; uint32_t *p = (uint32_t *) (mtag + 1); fd = fw_noderesolve_nodeid(fwip->fd.fc, fp->mode.wreqb.src & 0x3f); if (fd) { p[0] = htonl(fd->eui.hi); p[1] = htonl(fd->eui.lo); } else { p[0] = 0; p[1] = 0; } m_tag_prepend(m, mtag); } } /* * Hand off to the generic encapsulation code. We don't use * ifp->if_input so that we can pass the source nodeid as an * argument to facilitate link-level fragment reassembly. */ m->m_len = m->m_pkthdr.len = fp->mode.wreqb.len; m->m_pkthdr.rcvif = ifp; firewire_input(ifp, m, fp->mode.wreqb.src); ifp->if_ipackets ++; } static devclass_t fwip_devclass; static device_method_t fwip_methods[] = { /* device interface */ DEVMETHOD(device_identify, fwip_identify), DEVMETHOD(device_probe, fwip_probe), DEVMETHOD(device_attach, fwip_attach), DEVMETHOD(device_detach, fwip_detach), { 0, 0 } }; static driver_t fwip_driver = { "fwip", fwip_methods, sizeof(struct fwip_softc), }; #ifdef __DragonFly__ DECLARE_DUMMY_MODULE(fwip); #endif DRIVER_MODULE(fwip, firewire, fwip_driver, fwip_devclass, 0, 0); MODULE_VERSION(fwip, 1); MODULE_DEPEND(fwip, firewire, 1, 1, 1); Index: head/sys/dev/fxp/if_fxp.c =================================================================== --- head/sys/dev/fxp/if_fxp.c (revision 193095) +++ head/sys/dev/fxp/if_fxp.c (revision 193096) @@ -1,3051 +1,3058 @@ /*- * Copyright (c) 1995, David Greenman * Copyright (c) 2001 Jonathan Lemon * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); /* * Intel EtherExpress Pro/100B PCI Fast Ethernet driver */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for PCIM_CMD_xxx */ #include #include #include #include #include MODULE_DEPEND(fxp, pci, 1, 1, 1); MODULE_DEPEND(fxp, ether, 1, 1, 1); MODULE_DEPEND(fxp, miibus, 1, 1, 1); #include "miibus_if.h" /* * NOTE! On the Alpha, we have an alignment constraint. The * card DMAs the packet immediately following the RFA. However, * the first thing in the packet is a 14-byte Ethernet header. * This means that the packet is misaligned. To compensate, * we actually offset the RFA 2 bytes into the cluster. This * alignes the packet after the Ethernet header at a 32-bit * boundary. HOWEVER! This means that the RFA is misaligned! */ #define RFA_ALIGNMENT_FUDGE 2 /* * Set initial transmit threshold at 64 (512 bytes). This is * increased by 64 (512 bytes) at a time, to maximum of 192 * (1536 bytes), if an underrun occurs. */ static int tx_threshold = 64; /* * The configuration byte map has several undefined fields which * must be one or must be zero. Set up a template for these bits * only, (assuming a 82557 chip) leaving the actual configuration * to fxp_init. * * See struct fxp_cb_config for the bit definitions. */ static u_char fxp_cb_config_template[] = { 0x0, 0x0, /* cb_status */ 0x0, 0x0, /* cb_command */ 0x0, 0x0, 0x0, 0x0, /* link_addr */ 0x0, /* 0 */ 0x0, /* 1 */ 0x0, /* 2 */ 0x0, /* 3 */ 0x0, /* 4 */ 0x0, /* 5 */ 0x32, /* 6 */ 0x0, /* 7 */ 0x0, /* 8 */ 0x0, /* 9 */ 0x6, /* 10 */ 0x0, /* 11 */ 0x0, /* 12 */ 0x0, /* 13 */ 0xf2, /* 14 */ 0x48, /* 15 */ 0x0, /* 16 */ 0x40, /* 17 */ 0xf0, /* 18 */ 0x0, /* 19 */ 0x3f, /* 20 */ 0x5 /* 21 */ }; struct fxp_ident { uint16_t devid; int16_t revid; /* -1 matches anything */ char *name; }; /* * Claim various Intel PCI device identifiers for this driver. The * sub-vendor and sub-device field are extensively used to identify * particular variants, but we don't currently differentiate between * them. */ static struct fxp_ident fxp_ident_table[] = { { 0x1029, -1, "Intel 82559 PCI/CardBus Pro/100" }, { 0x1030, -1, "Intel 82559 Pro/100 Ethernet" }, { 0x1031, -1, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" }, { 0x1032, -1, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" }, { 0x1033, -1, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" }, { 0x1034, -1, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" }, { 0x1035, -1, "Intel 82801CAM (ICH3) Pro/100 Ethernet" }, { 0x1036, -1, "Intel 82801CAM (ICH3) Pro/100 Ethernet" }, { 0x1037, -1, "Intel 82801CAM (ICH3) Pro/100 Ethernet" }, { 0x1038, -1, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" }, { 0x1039, -1, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" }, { 0x103A, -1, "Intel 82801DB (ICH4) Pro/100 Ethernet" }, { 0x103B, -1, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" }, { 0x103C, -1, "Intel 82801DB (ICH4) Pro/100 Ethernet" }, { 0x103D, -1, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" }, { 0x103E, -1, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" }, { 0x1050, -1, "Intel 82801BA (D865) Pro/100 VE Ethernet" }, { 0x1051, -1, "Intel 82562ET (ICH5/ICH5R) Pro/100 VE Ethernet" }, { 0x1059, -1, "Intel 82551QM Pro/100 M Mobile Connection" }, { 0x1064, -1, "Intel 82562EZ (ICH6)" }, { 0x1065, -1, "Intel 82562ET/EZ/GT/GZ PRO/100 VE Ethernet" }, { 0x1068, -1, "Intel 82801FBM (ICH6-M) Pro/100 VE Ethernet" }, { 0x1069, -1, "Intel 82562EM/EX/GX Pro/100 Ethernet" }, { 0x1091, -1, "Intel 82562GX Pro/100 Ethernet" }, { 0x1092, -1, "Intel Pro/100 VE Network Connection" }, { 0x1093, -1, "Intel Pro/100 VM Network Connection" }, { 0x1094, -1, "Intel Pro/100 946GZ (ICH7) Network Connection" }, { 0x1209, -1, "Intel 82559ER Embedded 10/100 Ethernet" }, { 0x1229, 0x01, "Intel 82557 Pro/100 Ethernet" }, { 0x1229, 0x02, "Intel 82557 Pro/100 Ethernet" }, { 0x1229, 0x03, "Intel 82557 Pro/100 Ethernet" }, { 0x1229, 0x04, "Intel 82558 Pro/100 Ethernet" }, { 0x1229, 0x05, "Intel 82558 Pro/100 Ethernet" }, { 0x1229, 0x06, "Intel 82559 Pro/100 Ethernet" }, { 0x1229, 0x07, "Intel 82559 Pro/100 Ethernet" }, { 0x1229, 0x08, "Intel 82559 Pro/100 Ethernet" }, { 0x1229, 0x09, "Intel 82559ER Pro/100 Ethernet" }, { 0x1229, 0x0c, "Intel 82550 Pro/100 Ethernet" }, { 0x1229, 0x0d, "Intel 82550 Pro/100 Ethernet" }, { 0x1229, 0x0e, "Intel 82550 Pro/100 Ethernet" }, { 0x1229, 0x0f, "Intel 82551 Pro/100 Ethernet" }, { 0x1229, 0x10, "Intel 82551 Pro/100 Ethernet" }, { 0x1229, -1, "Intel 82557/8/9 Pro/100 Ethernet" }, { 0x2449, -1, "Intel 82801BA/CAM (ICH2/3) Pro/100 Ethernet" }, { 0x27dc, -1, "Intel 82801GB (ICH7) 10/100 Ethernet" }, { 0, -1, NULL }, }; #ifdef FXP_IP_CSUM_WAR #define FXP_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) #else #define FXP_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) #endif static int fxp_probe(device_t dev); static int fxp_attach(device_t dev); static int fxp_detach(device_t dev); static int fxp_shutdown(device_t dev); static int fxp_suspend(device_t dev); static int fxp_resume(device_t dev); static void fxp_intr(void *xsc); static void fxp_rxcsum(struct fxp_softc *sc, struct ifnet *ifp, struct mbuf *m, uint16_t status, int pos); -static void fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp, +static int fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp, uint8_t statack, int count); static void fxp_init(void *xsc); static void fxp_init_body(struct fxp_softc *sc); static void fxp_tick(void *xsc); static void fxp_start(struct ifnet *ifp); static void fxp_start_body(struct ifnet *ifp); static int fxp_encap(struct fxp_softc *sc, struct mbuf **m_head); static void fxp_txeof(struct fxp_softc *sc); static void fxp_stop(struct fxp_softc *sc); static void fxp_release(struct fxp_softc *sc); static int fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data); static void fxp_watchdog(struct fxp_softc *sc); static void fxp_add_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp); static void fxp_discard_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp); static int fxp_new_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp); static int fxp_mc_addrs(struct fxp_softc *sc); static void fxp_mc_setup(struct fxp_softc *sc); static uint16_t fxp_eeprom_getword(struct fxp_softc *sc, int offset, int autosize); static void fxp_eeprom_putword(struct fxp_softc *sc, int offset, uint16_t data); static void fxp_autosize_eeprom(struct fxp_softc *sc); static void fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words); static void fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words); static int fxp_ifmedia_upd(struct ifnet *ifp); static void fxp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); static int fxp_serial_ifmedia_upd(struct ifnet *ifp); static void fxp_serial_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); static int fxp_miibus_readreg(device_t dev, int phy, int reg); static int fxp_miibus_writereg(device_t dev, int phy, int reg, int value); static void fxp_load_ucode(struct fxp_softc *sc); static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high); static int sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS); static int sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS); static void fxp_scb_wait(struct fxp_softc *sc); static void fxp_scb_cmd(struct fxp_softc *sc, int cmd); static void fxp_dma_wait(struct fxp_softc *sc, volatile uint16_t *status, bus_dma_tag_t dmat, bus_dmamap_t map); static device_method_t fxp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, fxp_probe), DEVMETHOD(device_attach, fxp_attach), DEVMETHOD(device_detach, fxp_detach), DEVMETHOD(device_shutdown, fxp_shutdown), DEVMETHOD(device_suspend, fxp_suspend), DEVMETHOD(device_resume, fxp_resume), /* MII interface */ DEVMETHOD(miibus_readreg, fxp_miibus_readreg), DEVMETHOD(miibus_writereg, fxp_miibus_writereg), { 0, 0 } }; static driver_t fxp_driver = { "fxp", fxp_methods, sizeof(struct fxp_softc), }; static devclass_t fxp_devclass; DRIVER_MODULE(fxp, pci, fxp_driver, fxp_devclass, 0, 0); DRIVER_MODULE(miibus, fxp, miibus_driver, miibus_devclass, 0, 0); static struct resource_spec fxp_res_spec_mem[] = { { SYS_RES_MEMORY, FXP_PCI_MMBA, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; static struct resource_spec fxp_res_spec_io[] = { { SYS_RES_IOPORT, FXP_PCI_IOBA, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; /* * Wait for the previous command to be accepted (but not necessarily * completed). */ static void fxp_scb_wait(struct fxp_softc *sc) { union { uint16_t w; uint8_t b[2]; } flowctl; int i = 10000; while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i) DELAY(2); if (i == 0) { flowctl.b[0] = CSR_READ_1(sc, FXP_CSR_FLOWCONTROL); flowctl.b[1] = CSR_READ_1(sc, FXP_CSR_FLOWCONTROL + 1); device_printf(sc->dev, "SCB timeout: 0x%x 0x%x 0x%x 0x%x\n", CSR_READ_1(sc, FXP_CSR_SCB_COMMAND), CSR_READ_1(sc, FXP_CSR_SCB_STATACK), CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS), flowctl.w); } } static void fxp_scb_cmd(struct fxp_softc *sc, int cmd) { if (cmd == FXP_SCB_COMMAND_CU_RESUME && sc->cu_resume_bug) { CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_CB_COMMAND_NOP); fxp_scb_wait(sc); } CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, cmd); } static void fxp_dma_wait(struct fxp_softc *sc, volatile uint16_t *status, bus_dma_tag_t dmat, bus_dmamap_t map) { int i = 10000; bus_dmamap_sync(dmat, map, BUS_DMASYNC_POSTREAD); while (!(le16toh(*status) & FXP_CB_STATUS_C) && --i) { DELAY(2); bus_dmamap_sync(dmat, map, BUS_DMASYNC_POSTREAD); } if (i == 0) device_printf(sc->dev, "DMA timeout\n"); } /* * Return identification string if this device is ours. */ static int fxp_probe(device_t dev) { uint16_t devid; uint8_t revid; struct fxp_ident *ident; if (pci_get_vendor(dev) == FXP_VENDORID_INTEL) { devid = pci_get_device(dev); revid = pci_get_revid(dev); for (ident = fxp_ident_table; ident->name != NULL; ident++) { if (ident->devid == devid && (ident->revid == revid || ident->revid == -1)) { device_set_desc(dev, ident->name); return (BUS_PROBE_DEFAULT); } } } return (ENXIO); } static void fxp_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { uint32_t *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; } static int fxp_attach(device_t dev) { struct fxp_softc *sc; struct fxp_cb_tx *tcbp; struct fxp_tx *txp; struct fxp_rx *rxp; struct ifnet *ifp; uint32_t val; uint16_t data, myea[ETHER_ADDR_LEN / 2]; u_char eaddr[ETHER_ADDR_LEN]; int i, pmc, prefer_iomap; int error; error = 0; sc = device_get_softc(dev); sc->dev = dev; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->stat_ch, &sc->sc_mtx, 0); ifmedia_init(&sc->sc_media, 0, fxp_serial_ifmedia_upd, fxp_serial_ifmedia_sts); ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } /* * Enable bus mastering. */ pci_enable_busmaster(dev); val = pci_read_config(dev, PCIR_COMMAND, 2); /* * Figure out which we should try first - memory mapping or i/o mapping? * We default to memory mapping. Then we accept an override from the * command line. Then we check to see which one is enabled. */ prefer_iomap = 0; resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer_iomap", &prefer_iomap); if (prefer_iomap) sc->fxp_spec = fxp_res_spec_io; else sc->fxp_spec = fxp_res_spec_mem; error = bus_alloc_resources(dev, sc->fxp_spec, sc->fxp_res); if (error) { if (sc->fxp_spec == fxp_res_spec_mem) sc->fxp_spec = fxp_res_spec_io; else sc->fxp_spec = fxp_res_spec_mem; error = bus_alloc_resources(dev, sc->fxp_spec, sc->fxp_res); } if (error) { device_printf(dev, "could not allocate resources\n"); error = ENXIO; goto fail; } if (bootverbose) { device_printf(dev, "using %s space register mapping\n", sc->fxp_spec == fxp_res_spec_mem ? "memory" : "I/O"); } /* * Reset to a stable state. */ CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); DELAY(10); /* * Find out how large of an SEEPROM we have. */ fxp_autosize_eeprom(sc); /* * Find out the chip revision; lump all 82557 revs together. */ fxp_read_eeprom(sc, &data, 5, 1); if ((data >> 8) == 1) sc->revision = FXP_REV_82557; else sc->revision = pci_get_revid(dev); /* * Check availability of WOL. 82559ER does not support WOL. */ if (sc->revision >= FXP_REV_82558_A4 && sc->revision != FXP_REV_82559S_A) { fxp_read_eeprom(sc, &data, 10, 1); if ((data & 0x20) != 0 && pci_find_extcap(sc->dev, PCIY_PMG, &pmc) == 0) sc->flags |= FXP_FLAG_WOLCAP; } /* * Determine whether we must use the 503 serial interface. */ fxp_read_eeprom(sc, &data, 6, 1); if (sc->revision == FXP_REV_82557 && (data & FXP_PHY_DEVICE_MASK) != 0 && (data & FXP_PHY_SERIAL_ONLY)) sc->flags |= FXP_FLAG_SERIAL_MEDIA; SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "int_delay", CTLTYPE_INT | CTLFLAG_RW, &sc->tunable_int_delay, 0, sysctl_hw_fxp_int_delay, "I", "FXP driver receive interrupt microcode bundling delay"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "bundle_max", CTLTYPE_INT | CTLFLAG_RW, &sc->tunable_bundle_max, 0, sysctl_hw_fxp_bundle_max, "I", "FXP driver receive interrupt microcode bundle size limit"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rnr", CTLFLAG_RD, &sc->rnr, 0, "FXP RNR events"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "noflow", CTLFLAG_RW, &sc->tunable_noflow, 0, "FXP flow control disabled"); /* * Pull in device tunables. */ sc->tunable_int_delay = TUNABLE_INT_DELAY; sc->tunable_bundle_max = TUNABLE_BUNDLE_MAX; sc->tunable_noflow = 1; (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "int_delay", &sc->tunable_int_delay); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "bundle_max", &sc->tunable_bundle_max); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "noflow", &sc->tunable_noflow); sc->rnr = 0; /* * Enable workarounds for certain chip revision deficiencies. * * Systems based on the ICH2/ICH2-M chip from Intel, and possibly * some systems based a normal 82559 design, have a defect where * the chip can cause a PCI protocol violation if it receives * a CU_RESUME command when it is entering the IDLE state. The * workaround is to disable Dynamic Standby Mode, so the chip never * deasserts CLKRUN#, and always remains in an active state. * * See Intel 82801BA/82801BAM Specification Update, Errata #30. */ i = pci_get_device(dev); if (i == 0x2449 || (i > 0x1030 && i < 0x1039) || sc->revision >= FXP_REV_82559_A0) { fxp_read_eeprom(sc, &data, 10, 1); if (data & 0x02) { /* STB enable */ uint16_t cksum; int i; device_printf(dev, "Disabling dynamic standby mode in EEPROM\n"); data &= ~0x02; fxp_write_eeprom(sc, &data, 10, 1); device_printf(dev, "New EEPROM ID: 0x%x\n", data); cksum = 0; for (i = 0; i < (1 << sc->eeprom_size) - 1; i++) { fxp_read_eeprom(sc, &data, i, 1); cksum += data; } i = (1 << sc->eeprom_size) - 1; cksum = 0xBABA - cksum; fxp_read_eeprom(sc, &data, i, 1); fxp_write_eeprom(sc, &cksum, i, 1); device_printf(dev, "EEPROM checksum @ 0x%x: 0x%x -> 0x%x\n", i, data, cksum); #if 1 /* * If the user elects to continue, try the software * workaround, as it is better than nothing. */ sc->flags |= FXP_FLAG_CU_RESUME_BUG; #endif } } /* * If we are not a 82557 chip, we can enable extended features. */ if (sc->revision != FXP_REV_82557) { /* * If MWI is enabled in the PCI configuration, and there * is a valid cacheline size (8 or 16 dwords), then tell * the board to turn on MWI. */ if (val & PCIM_CMD_MWRICEN && pci_read_config(dev, PCIR_CACHELNSZ, 1) != 0) sc->flags |= FXP_FLAG_MWI_ENABLE; /* turn on the extended TxCB feature */ sc->flags |= FXP_FLAG_EXT_TXCB; /* enable reception of long frames for VLAN */ sc->flags |= FXP_FLAG_LONG_PKT_EN; } else { /* a hack to get long VLAN frames on a 82557 */ sc->flags |= FXP_FLAG_SAVE_BAD; } /* For 82559 or later chips, Rx checksum offload is supported. */ if (sc->revision >= FXP_REV_82559_A0) sc->flags |= FXP_FLAG_82559_RXCSUM; /* * Enable use of extended RFDs and TCBs for 82550 * and later chips. Note: we need extended TXCB support * too, but that's already enabled by the code above. * Be careful to do this only on the right devices. */ if (sc->revision == FXP_REV_82550 || sc->revision == FXP_REV_82550_C || sc->revision == FXP_REV_82551_E || sc->revision == FXP_REV_82551_F || sc->revision == FXP_REV_82551_10) { sc->rfa_size = sizeof (struct fxp_rfa); sc->tx_cmd = FXP_CB_COMMAND_IPCBXMIT; sc->flags |= FXP_FLAG_EXT_RFA; /* Use extended RFA instead of 82559 checksum mode. */ sc->flags &= ~FXP_FLAG_82559_RXCSUM; } else { sc->rfa_size = sizeof (struct fxp_rfa) - FXP_RFAX_LEN; sc->tx_cmd = FXP_CB_COMMAND_XMIT; } /* * Allocate DMA tags and DMA safe memory. */ sc->maxtxseg = FXP_NTXSEG; sc->maxsegsize = MCLBYTES; if (sc->flags & FXP_FLAG_EXT_RFA) { sc->maxtxseg--; sc->maxsegsize = FXP_TSO_SEGSIZE; } error = bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sc->maxsegsize * sc->maxtxseg + sizeof(struct ether_vlan_header), sc->maxtxseg, sc->maxsegsize, 0, busdma_lock_mutex, &Giant, &sc->fxp_mtag); if (error) { device_printf(dev, "could not allocate dma tag\n"); goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct fxp_stats), 1, sizeof(struct fxp_stats), 0, busdma_lock_mutex, &Giant, &sc->fxp_stag); if (error) { device_printf(dev, "could not allocate dma tag\n"); goto fail; } error = bus_dmamem_alloc(sc->fxp_stag, (void **)&sc->fxp_stats, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->fxp_smap); if (error) goto fail; error = bus_dmamap_load(sc->fxp_stag, sc->fxp_smap, sc->fxp_stats, sizeof(struct fxp_stats), fxp_dma_map_addr, &sc->stats_addr, 0); if (error) { device_printf(dev, "could not map the stats buffer\n"); goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, FXP_TXCB_SZ, 1, FXP_TXCB_SZ, 0, busdma_lock_mutex, &Giant, &sc->cbl_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); goto fail; } error = bus_dmamem_alloc(sc->cbl_tag, (void **)&sc->fxp_desc.cbl_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->cbl_map); if (error) goto fail; error = bus_dmamap_load(sc->cbl_tag, sc->cbl_map, sc->fxp_desc.cbl_list, FXP_TXCB_SZ, fxp_dma_map_addr, &sc->fxp_desc.cbl_addr, 0); if (error) { device_printf(dev, "could not map DMA memory\n"); goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct fxp_cb_mcs), 1, sizeof(struct fxp_cb_mcs), 0, busdma_lock_mutex, &Giant, &sc->mcs_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); goto fail; } error = bus_dmamem_alloc(sc->mcs_tag, (void **)&sc->mcsp, BUS_DMA_NOWAIT, &sc->mcs_map); if (error) goto fail; error = bus_dmamap_load(sc->mcs_tag, sc->mcs_map, sc->mcsp, sizeof(struct fxp_cb_mcs), fxp_dma_map_addr, &sc->mcs_addr, 0); if (error) { device_printf(dev, "can't map the multicast setup command\n"); goto fail; } /* * Pre-allocate the TX DMA maps and setup the pointers to * the TX command blocks. */ txp = sc->fxp_desc.tx_list; tcbp = sc->fxp_desc.cbl_list; for (i = 0; i < FXP_NTXCB; i++) { txp[i].tx_cb = tcbp + i; error = bus_dmamap_create(sc->fxp_mtag, 0, &txp[i].tx_map); if (error) { device_printf(dev, "can't create DMA map for TX\n"); goto fail; } } error = bus_dmamap_create(sc->fxp_mtag, 0, &sc->spare_map); if (error) { device_printf(dev, "can't create spare DMA map\n"); goto fail; } /* * Pre-allocate our receive buffers. */ sc->fxp_desc.rx_head = sc->fxp_desc.rx_tail = NULL; for (i = 0; i < FXP_NRFABUFS; i++) { rxp = &sc->fxp_desc.rx_list[i]; error = bus_dmamap_create(sc->fxp_mtag, 0, &rxp->rx_map); if (error) { device_printf(dev, "can't create DMA map for RX\n"); goto fail; } if (fxp_new_rfabuf(sc, rxp) != 0) { error = ENOMEM; goto fail; } fxp_add_rfabuf(sc, rxp); } /* * Read MAC address. */ fxp_read_eeprom(sc, myea, 0, 3); eaddr[0] = myea[0] & 0xff; eaddr[1] = myea[0] >> 8; eaddr[2] = myea[1] & 0xff; eaddr[3] = myea[1] >> 8; eaddr[4] = myea[2] & 0xff; eaddr[5] = myea[2] >> 8; if (bootverbose) { device_printf(dev, "PCI IDs: %04x %04x %04x %04x %04x\n", pci_get_vendor(dev), pci_get_device(dev), pci_get_subvendor(dev), pci_get_subdevice(dev), pci_get_revid(dev)); fxp_read_eeprom(sc, &data, 10, 1); device_printf(dev, "Dynamic Standby mode is %s\n", data & 0x02 ? "enabled" : "disabled"); } /* * If this is only a 10Mbps device, then there is no MII, and * the PHY will use a serial interface instead. * * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter * doesn't have a programming interface of any sort. The * media is sensed automatically based on how the link partner * is configured. This is, in essence, manual configuration. */ if (sc->flags & FXP_FLAG_SERIAL_MEDIA) { ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); } else { if (mii_phy_probe(dev, &sc->miibus, fxp_ifmedia_upd, fxp_ifmedia_sts)) { device_printf(dev, "MII without any PHY!\n"); error = ENXIO; goto fail; } } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_init = fxp_init; ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = fxp_ioctl; ifp->if_start = fxp_start; ifp->if_capabilities = ifp->if_capenable = 0; /* Enable checksum offload/TSO for 82550 or better chips */ if (sc->flags & FXP_FLAG_EXT_RFA) { ifp->if_hwassist = FXP_CSUM_FEATURES | CSUM_TSO; ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4; ifp->if_capenable |= IFCAP_HWCSUM | IFCAP_TSO4; } if (sc->flags & FXP_FLAG_82559_RXCSUM) { ifp->if_capabilities |= IFCAP_RXCSUM; ifp->if_capenable |= IFCAP_RXCSUM; } if (sc->flags & FXP_FLAG_WOLCAP) { ifp->if_capabilities |= IFCAP_WOL_MAGIC; ifp->if_capenable |= IFCAP_WOL_MAGIC; } #ifdef DEVICE_POLLING /* Inform the world we support polling. */ ifp->if_capabilities |= IFCAP_POLLING; #endif /* * Attach the interface. */ ether_ifattach(ifp, eaddr); /* * Tell the upper layer(s) we support long frames. * Must appear after the call to ether_ifattach() because * ether_ifattach() sets ifi_hdrlen to the default value. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable |= IFCAP_VLAN_MTU; /* the hw bits already set */ if ((sc->flags & FXP_FLAG_EXT_RFA) != 0) { ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; } /* * Let the system queue as many packets as we have available * TX descriptors. */ IFQ_SET_MAXLEN(&ifp->if_snd, FXP_NTXCB - 1); ifp->if_snd.ifq_drv_maxlen = FXP_NTXCB - 1; IFQ_SET_READY(&ifp->if_snd); /* * Hook our interrupt after all initialization is complete. */ error = bus_setup_intr(dev, sc->fxp_res[1], INTR_TYPE_NET | INTR_MPSAFE, NULL, fxp_intr, sc, &sc->ih); if (error) { device_printf(dev, "could not setup irq\n"); ether_ifdetach(sc->ifp); goto fail; } /* * Configure hardware to reject magic frames otherwise * system will hang on recipt of magic frames. */ if ((sc->flags & FXP_FLAG_WOLCAP) != 0) { FXP_LOCK(sc); /* Clear wakeup events. */ CSR_WRITE_1(sc, FXP_CSR_PMDR, CSR_READ_1(sc, FXP_CSR_PMDR)); fxp_init_body(sc); fxp_stop(sc); FXP_UNLOCK(sc); } fail: if (error) fxp_release(sc); return (error); } /* * Release all resources. The softc lock should not be held and the * interrupt should already be torn down. */ static void fxp_release(struct fxp_softc *sc) { struct fxp_rx *rxp; struct fxp_tx *txp; int i; FXP_LOCK_ASSERT(sc, MA_NOTOWNED); KASSERT(sc->ih == NULL, ("fxp_release() called with intr handle still active")); if (sc->miibus) device_delete_child(sc->dev, sc->miibus); bus_generic_detach(sc->dev); ifmedia_removeall(&sc->sc_media); if (sc->fxp_desc.cbl_list) { bus_dmamap_unload(sc->cbl_tag, sc->cbl_map); bus_dmamem_free(sc->cbl_tag, sc->fxp_desc.cbl_list, sc->cbl_map); } if (sc->fxp_stats) { bus_dmamap_unload(sc->fxp_stag, sc->fxp_smap); bus_dmamem_free(sc->fxp_stag, sc->fxp_stats, sc->fxp_smap); } if (sc->mcsp) { bus_dmamap_unload(sc->mcs_tag, sc->mcs_map); bus_dmamem_free(sc->mcs_tag, sc->mcsp, sc->mcs_map); } bus_release_resources(sc->dev, sc->fxp_spec, sc->fxp_res); if (sc->fxp_mtag) { for (i = 0; i < FXP_NRFABUFS; i++) { rxp = &sc->fxp_desc.rx_list[i]; if (rxp->rx_mbuf != NULL) { bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map); m_freem(rxp->rx_mbuf); } bus_dmamap_destroy(sc->fxp_mtag, rxp->rx_map); } bus_dmamap_destroy(sc->fxp_mtag, sc->spare_map); for (i = 0; i < FXP_NTXCB; i++) { txp = &sc->fxp_desc.tx_list[i]; if (txp->tx_mbuf != NULL) { bus_dmamap_sync(sc->fxp_mtag, txp->tx_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->fxp_mtag, txp->tx_map); m_freem(txp->tx_mbuf); } bus_dmamap_destroy(sc->fxp_mtag, txp->tx_map); } bus_dma_tag_destroy(sc->fxp_mtag); } if (sc->fxp_stag) bus_dma_tag_destroy(sc->fxp_stag); if (sc->cbl_tag) bus_dma_tag_destroy(sc->cbl_tag); if (sc->mcs_tag) bus_dma_tag_destroy(sc->mcs_tag); if (sc->ifp) if_free(sc->ifp); mtx_destroy(&sc->sc_mtx); } /* * Detach interface. */ static int fxp_detach(device_t dev) { struct fxp_softc *sc = device_get_softc(dev); #ifdef DEVICE_POLLING if (sc->ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(sc->ifp); #endif FXP_LOCK(sc); sc->suspended = 1; /* Do same thing as we do for suspend */ /* * Stop DMA and drop transmit queue, but disable interrupts first. */ CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE); fxp_stop(sc); FXP_UNLOCK(sc); callout_drain(&sc->stat_ch); /* * Close down routes etc. */ ether_ifdetach(sc->ifp); /* * Unhook interrupt before dropping lock. This is to prevent * races with fxp_intr(). */ bus_teardown_intr(sc->dev, sc->fxp_res[1], sc->ih); sc->ih = NULL; /* Release our allocated resources. */ fxp_release(sc); return (0); } /* * Device shutdown routine. Called at system shutdown after sync. The * main purpose of this routine is to shut off receiver DMA so that * kernel memory doesn't get clobbered during warmboot. */ static int fxp_shutdown(device_t dev) { /* * Make sure that DMA is disabled prior to reboot. Not doing * do could allow DMA to corrupt kernel memory during the * reboot before the driver initializes. */ return (fxp_suspend(dev)); } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int fxp_suspend(device_t dev) { struct fxp_softc *sc = device_get_softc(dev); struct ifnet *ifp; int pmc; uint16_t pmstat; FXP_LOCK(sc); ifp = sc->ifp; if (pci_find_extcap(sc->dev, PCIY_PMG, &pmc) == 0) { pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2); pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) { /* Request PME. */ pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; sc->flags |= FXP_FLAG_WOL; /* Reconfigure hardware to accept magic frames. */ fxp_init_body(sc); } pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2); } fxp_stop(sc); sc->suspended = 1; FXP_UNLOCK(sc); return (0); } /* * Device resume routine. re-enable busmastering, and restart the interface if * appropriate. */ static int fxp_resume(device_t dev) { struct fxp_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->ifp; int pmc; uint16_t pmstat; FXP_LOCK(sc); if (pci_find_extcap(sc->dev, PCIY_PMG, &pmc) == 0) { sc->flags &= ~FXP_FLAG_WOL; pmstat = pci_read_config(sc->dev, pmc + PCIR_POWER_STATUS, 2); /* Disable PME and clear PME status. */ pmstat &= ~PCIM_PSTAT_PMEENABLE; pci_write_config(sc->dev, pmc + PCIR_POWER_STATUS, pmstat, 2); if ((sc->flags & FXP_FLAG_WOLCAP) != 0) CSR_WRITE_1(sc, FXP_CSR_PMDR, CSR_READ_1(sc, FXP_CSR_PMDR)); } CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); DELAY(10); /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) fxp_init_body(sc); sc->suspended = 0; FXP_UNLOCK(sc); return (0); } static void fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int length) { uint16_t reg; int x; /* * Shift in data. */ for (x = 1 << (length - 1); x; x >>= 1) { if (data & x) reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; else reg = FXP_EEPROM_EECS; CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); DELAY(1); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); } } /* * Read from the serial EEPROM. Basically, you manually shift in * the read opcode (one bit at a time) and then shift in the address, * and then you shift out the data (all of this one bit at a time). * The word size is 16 bits, so you have to provide the address for * every 16 bits of data. */ static uint16_t fxp_eeprom_getword(struct fxp_softc *sc, int offset, int autosize) { uint16_t reg, data; int x; CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); /* * Shift in read opcode. */ fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_READ, 3); /* * Shift in address. */ data = 0; for (x = 1 << (sc->eeprom_size - 1); x; x >>= 1) { if (offset & x) reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; else reg = FXP_EEPROM_EECS; CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); DELAY(1); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); reg = CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO; data++; if (autosize && reg == 0) { sc->eeprom_size = data; break; } } /* * Shift out data. */ data = 0; reg = FXP_EEPROM_EECS; for (x = 1 << 15; x; x >>= 1) { CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); DELAY(1); if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) data |= x; CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); } CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); DELAY(1); return (data); } static void fxp_eeprom_putword(struct fxp_softc *sc, int offset, uint16_t data) { int i; /* * Erase/write enable. */ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); fxp_eeprom_shiftin(sc, 0x4, 3); fxp_eeprom_shiftin(sc, 0x03 << (sc->eeprom_size - 2), sc->eeprom_size); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); DELAY(1); /* * Shift in write opcode, address, data. */ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3); fxp_eeprom_shiftin(sc, offset, sc->eeprom_size); fxp_eeprom_shiftin(sc, data, 16); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); DELAY(1); /* * Wait for EEPROM to finish up. */ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); DELAY(1); for (i = 0; i < 1000; i++) { if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) break; DELAY(50); } CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); DELAY(1); /* * Erase/write disable. */ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); fxp_eeprom_shiftin(sc, 0x4, 3); fxp_eeprom_shiftin(sc, 0, sc->eeprom_size); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); DELAY(1); } /* * From NetBSD: * * Figure out EEPROM size. * * 559's can have either 64-word or 256-word EEPROMs, the 558 * datasheet only talks about 64-word EEPROMs, and the 557 datasheet * talks about the existance of 16 to 256 word EEPROMs. * * The only known sizes are 64 and 256, where the 256 version is used * by CardBus cards to store CIS information. * * The address is shifted in msb-to-lsb, and after the last * address-bit the EEPROM is supposed to output a `dummy zero' bit, * after which follows the actual data. We try to detect this zero, by * probing the data-out bit in the EEPROM control register just after * having shifted in a bit. If the bit is zero, we assume we've * shifted enough address bits. The data-out should be tri-state, * before this, which should translate to a logical one. */ static void fxp_autosize_eeprom(struct fxp_softc *sc) { /* guess maximum size of 256 words */ sc->eeprom_size = 8; /* autosize */ (void) fxp_eeprom_getword(sc, 0, 1); } static void fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words) { int i; for (i = 0; i < words; i++) data[i] = fxp_eeprom_getword(sc, offset + i, 0); } static void fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words) { int i; for (i = 0; i < words; i++) fxp_eeprom_putword(sc, offset + i, data[i]); } /* * Grab the softc lock and call the real fxp_start_body() routine */ static void fxp_start(struct ifnet *ifp) { struct fxp_softc *sc = ifp->if_softc; FXP_LOCK(sc); fxp_start_body(ifp); FXP_UNLOCK(sc); } /* * Start packet transmission on the interface. * This routine must be called with the softc lock held, and is an * internal entry point only. */ static void fxp_start_body(struct ifnet *ifp) { struct fxp_softc *sc = ifp->if_softc; struct mbuf *mb_head; int txqueued; FXP_LOCK_ASSERT(sc, MA_OWNED); /* * See if we need to suspend xmit until the multicast filter * has been reprogrammed (which can only be done at the head * of the command chain). */ if (sc->need_mcsetup) return; if (sc->tx_queued > FXP_NTXCB_HIWAT) fxp_txeof(sc); /* * We're finished if there is nothing more to add to the list or if * we're all filled up with buffers to transmit. * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add * a NOP command when needed. */ txqueued = 0; while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->tx_queued < FXP_NTXCB - 1) { /* * Grab a packet to transmit. */ IFQ_DRV_DEQUEUE(&ifp->if_snd, mb_head); if (mb_head == NULL) break; if (fxp_encap(sc, &mb_head)) { if (mb_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, mb_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; } txqueued++; /* * Pass packet to bpf if there is a listener. */ BPF_MTAP(ifp, mb_head); } /* * We're finished. If we added to the list, issue a RESUME to get DMA * going again if suspended. */ if (txqueued > 0) { bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); fxp_scb_wait(sc); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME); /* * Set a 5 second timer just in case we don't hear * from the card again. */ sc->watchdog_timer = 5; } } static int fxp_encap(struct fxp_softc *sc, struct mbuf **m_head) { struct ifnet *ifp; struct mbuf *m; struct fxp_tx *txp; struct fxp_cb_tx *cbp; struct tcphdr *tcp; bus_dma_segment_t segs[FXP_NTXSEG]; int error, i, nseg, tcp_payload; FXP_LOCK_ASSERT(sc, MA_OWNED); ifp = sc->ifp; tcp_payload = 0; tcp = NULL; /* * Get pointer to next available tx desc. */ txp = sc->fxp_desc.tx_last->tx_next; /* * A note in Appendix B of the Intel 8255x 10/100 Mbps * Ethernet Controller Family Open Source Software * Developer Manual says: * Using software parsing is only allowed with legal * TCP/IP or UDP/IP packets. * ... * For all other datagrams, hardware parsing must * be used. * Software parsing appears to truncate ICMP and * fragmented UDP packets that contain one to three * bytes in the second (and final) mbuf of the packet. */ if (sc->flags & FXP_FLAG_EXT_RFA) txp->tx_cb->ipcb_ip_activation_high = FXP_IPCB_HARDWAREPARSING_ENABLE; m = *m_head; /* * Deal with TCP/IP checksum offload. Note that * in order for TCP checksum offload to work, * the pseudo header checksum must have already * been computed and stored in the checksum field * in the TCP header. The stack should have * already done this for us. */ if (m->m_pkthdr.csum_flags & FXP_CSUM_FEATURES) { txp->tx_cb->ipcb_ip_schedule = FXP_IPCB_TCPUDP_CHECKSUM_ENABLE; if (m->m_pkthdr.csum_flags & CSUM_TCP) txp->tx_cb->ipcb_ip_schedule |= FXP_IPCB_TCP_PACKET; #ifdef FXP_IP_CSUM_WAR /* * XXX The 82550 chip appears to have trouble * dealing with IP header checksums in very small * datagrams, namely fragments from 1 to 3 bytes * in size. For example, say you want to transmit * a UDP packet of 1473 bytes. The packet will be * fragmented over two IP datagrams, the latter * containing only one byte of data. The 82550 will * botch the header checksum on the 1-byte fragment. * As long as the datagram contains 4 or more bytes * of data, you're ok. * * The following code attempts to work around this * problem: if the datagram is less than 38 bytes * in size (14 bytes ether header, 20 bytes IP header, * plus 4 bytes of data), we punt and compute the IP * header checksum by hand. This workaround doesn't * work very well, however, since it can be fooled * by things like VLAN tags and IP options that make * the header sizes/offsets vary. */ if (m->m_pkthdr.csum_flags & CSUM_IP) { if (m->m_pkthdr.len < 38) { struct ip *ip; m->m_data += ETHER_HDR_LEN; ip = mtod(m, struct ip *); ip->ip_sum = in_cksum(m, ip->ip_hl << 2); m->m_data -= ETHER_HDR_LEN; m->m_pkthdr.csum_flags &= ~CSUM_IP; } else { txp->tx_cb->ipcb_ip_activation_high = FXP_IPCB_HARDWAREPARSING_ENABLE; txp->tx_cb->ipcb_ip_schedule |= FXP_IPCB_IP_CHECKSUM_ENABLE; } } #endif } if (m->m_pkthdr.csum_flags & CSUM_TSO) { /* * 82550/82551 requires ethernet/IP/TCP headers must be * contained in the first active transmit buffer. */ struct ether_header *eh; struct ip *ip; uint32_t ip_off, poff; if (M_WRITABLE(*m_head) == 0) { /* Get a writable copy. */ m = m_dup(*m_head, M_DONTWAIT); m_freem(*m_head); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } *m_head = m; } ip_off = sizeof(struct ether_header); m = m_pullup(*m_head, ip_off); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } eh = mtod(m, struct ether_header *); /* Check the existence of VLAN tag. */ if (eh->ether_type == htons(ETHERTYPE_VLAN)) { ip_off = sizeof(struct ether_vlan_header); m = m_pullup(m, ip_off); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } } m = m_pullup(m, ip_off + sizeof(struct ip)); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } ip = (struct ip *)(mtod(m, char *) + ip_off); poff = ip_off + (ip->ip_hl << 2); m = m_pullup(m, poff + sizeof(struct tcphdr)); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } tcp = (struct tcphdr *)(mtod(m, char *) + poff); m = m_pullup(m, poff + sizeof(struct tcphdr) + tcp->th_off); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } /* * Since 82550/82551 doesn't modify IP length and pseudo * checksum in the first frame driver should compute it. */ ip->ip_sum = 0; ip->ip_len = htons(m->m_pkthdr.tso_segsz + (ip->ip_hl << 2) + (tcp->th_off << 2)); tcp->th_sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP + (tcp->th_off << 2) + m->m_pkthdr.tso_segsz)); /* Compute total TCP payload. */ tcp_payload = m->m_pkthdr.len - ip_off - (ip->ip_hl << 2); tcp_payload -= tcp->th_off << 2; *m_head = m; } error = bus_dmamap_load_mbuf_sg(sc->fxp_mtag, txp->tx_map, *m_head, segs, &nseg, 0); if (error == EFBIG) { m = m_collapse(*m_head, M_DONTWAIT, sc->maxtxseg); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOMEM); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->fxp_mtag, txp->tx_map, *m_head, segs, &nseg, 0); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (ENOMEM); } } else if (error != 0) return (error); if (nseg == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } KASSERT(nseg <= sc->maxtxseg, ("too many DMA segments")); bus_dmamap_sync(sc->fxp_mtag, txp->tx_map, BUS_DMASYNC_PREWRITE); cbp = txp->tx_cb; for (i = 0; i < nseg; i++) { /* * If this is an 82550/82551, then we're using extended * TxCBs _and_ we're using checksum offload. This means * that the TxCB is really an IPCB. One major difference * between the two is that with plain extended TxCBs, * the bottom half of the TxCB contains two entries from * the TBD array, whereas IPCBs contain just one entry: * one entry (8 bytes) has been sacrificed for the TCP/IP * checksum offload control bits. So to make things work * right, we have to start filling in the TBD array * starting from a different place depending on whether * the chip is an 82550/82551 or not. */ if (sc->flags & FXP_FLAG_EXT_RFA) { cbp->tbd[i + 1].tb_addr = htole32(segs[i].ds_addr); cbp->tbd[i + 1].tb_size = htole32(segs[i].ds_len); } else { cbp->tbd[i].tb_addr = htole32(segs[i].ds_addr); cbp->tbd[i].tb_size = htole32(segs[i].ds_len); } } if (sc->flags & FXP_FLAG_EXT_RFA) { /* Configure dynamic TBD for 82550/82551. */ cbp->tbd_number = 0xFF; cbp->tbd[nseg].tb_size |= htole32(0x8000); } else cbp->tbd_number = nseg; /* Configure TSO. */ if (m->m_pkthdr.csum_flags & CSUM_TSO) { cbp->tbd[-1].tb_size = htole32(m->m_pkthdr.tso_segsz << 16); cbp->tbd[1].tb_size |= htole32(tcp_payload << 16); cbp->ipcb_ip_schedule |= FXP_IPCB_LARGESEND_ENABLE | FXP_IPCB_IP_CHECKSUM_ENABLE | FXP_IPCB_TCP_PACKET | FXP_IPCB_TCPUDP_CHECKSUM_ENABLE; } /* Configure VLAN hardware tag insertion. */ if ((m->m_flags & M_VLANTAG) != 0) { cbp->ipcb_vlan_id = htons(m->m_pkthdr.ether_vtag); txp->tx_cb->ipcb_ip_activation_high |= FXP_IPCB_INSERTVLAN_ENABLE; } txp->tx_mbuf = m; txp->tx_cb->cb_status = 0; txp->tx_cb->byte_count = 0; if (sc->tx_queued != FXP_CXINT_THRESH - 1) txp->tx_cb->cb_command = htole16(sc->tx_cmd | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S); else txp->tx_cb->cb_command = htole16(sc->tx_cmd | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I); if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) txp->tx_cb->tx_threshold = tx_threshold; /* * Advance the end of list forward. */ #ifdef __alpha__ /* * On platforms which can't access memory in 16-bit * granularities, we must prevent the card from DMA'ing * up the status while we update the command field. * This could cause us to overwrite the completion status. * XXX This is probably bogus and we're _not_ looking * for atomicity here. */ atomic_clear_16(&sc->fxp_desc.tx_last->tx_cb->cb_command, htole16(FXP_CB_COMMAND_S)); #else sc->fxp_desc.tx_last->tx_cb->cb_command &= htole16(~FXP_CB_COMMAND_S); #endif /*__alpha__*/ sc->fxp_desc.tx_last = txp; /* * Advance the beginning of the list forward if there are * no other packets queued (when nothing is queued, tx_first * sits on the last TxCB that was sent out). */ if (sc->tx_queued == 0) sc->fxp_desc.tx_first = txp; sc->tx_queued++; return (0); } #ifdef DEVICE_POLLING static poll_handler_t fxp_poll; -static void +static int fxp_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct fxp_softc *sc = ifp->if_softc; uint8_t statack; + int rx_npkts = 0; FXP_LOCK(sc); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { FXP_UNLOCK(sc); - return; + return (rx_npkts); } statack = FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA | FXP_SCB_STATACK_FR; if (cmd == POLL_AND_CHECK_STATUS) { uint8_t tmp; tmp = CSR_READ_1(sc, FXP_CSR_SCB_STATACK); if (tmp == 0xff || tmp == 0) { FXP_UNLOCK(sc); - return; /* nothing to do */ + return (rx_npkts); /* nothing to do */ } tmp &= ~statack; /* ack what we can */ if (tmp != 0) CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, tmp); statack |= tmp; } - fxp_intr_body(sc, ifp, statack, count); + rx_npkts = fxp_intr_body(sc, ifp, statack, count); FXP_UNLOCK(sc); + return (rx_npkts); } #endif /* DEVICE_POLLING */ /* * Process interface interrupts. */ static void fxp_intr(void *xsc) { struct fxp_softc *sc = xsc; struct ifnet *ifp = sc->ifp; uint8_t statack; FXP_LOCK(sc); if (sc->suspended) { FXP_UNLOCK(sc); return; } #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { FXP_UNLOCK(sc); return; } #endif while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) { /* * It should not be possible to have all bits set; the * FXP_SCB_INTR_SWI bit always returns 0 on a read. If * all bits are set, this may indicate that the card has * been physically ejected, so ignore it. */ if (statack == 0xff) { FXP_UNLOCK(sc); return; } /* * First ACK all the interrupts in this pass. */ CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack); fxp_intr_body(sc, ifp, statack, -1); } FXP_UNLOCK(sc); } static void fxp_txeof(struct fxp_softc *sc) { struct ifnet *ifp; struct fxp_tx *txp; ifp = sc->ifp; bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREREAD); for (txp = sc->fxp_desc.tx_first; sc->tx_queued && (le16toh(txp->tx_cb->cb_status) & FXP_CB_STATUS_C) != 0; txp = txp->tx_next) { if (txp->tx_mbuf != NULL) { bus_dmamap_sync(sc->fxp_mtag, txp->tx_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->fxp_mtag, txp->tx_map); m_freem(txp->tx_mbuf); txp->tx_mbuf = NULL; /* clear this to reset csum offload bits */ txp->tx_cb->tbd[0].tb_addr = 0; } sc->tx_queued--; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } sc->fxp_desc.tx_first = txp; bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); if (sc->tx_queued == 0) { sc->watchdog_timer = 0; if (sc->need_mcsetup) fxp_mc_setup(sc); } } static void fxp_rxcsum(struct fxp_softc *sc, struct ifnet *ifp, struct mbuf *m, uint16_t status, int pos) { struct ether_header *eh; struct ip *ip; struct udphdr *uh; int32_t hlen, len, pktlen, temp32; uint16_t csum, *opts; if ((sc->flags & FXP_FLAG_82559_RXCSUM) == 0) { if ((status & FXP_RFA_STATUS_PARSE) != 0) { if (status & FXP_RFDX_CS_IP_CSUM_BIT_VALID) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (status & FXP_RFDX_CS_IP_CSUM_VALID) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((status & FXP_RFDX_CS_TCPUDP_CSUM_BIT_VALID) && (status & FXP_RFDX_CS_TCPUDP_CSUM_VALID)) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } return; } pktlen = m->m_pkthdr.len; if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) return; eh = mtod(m, struct ether_header *); if (eh->ether_type != htons(ETHERTYPE_IP)) return; ip = (struct ip *)(eh + 1); if (ip->ip_v != IPVERSION) return; hlen = ip->ip_hl << 2; pktlen -= sizeof(struct ether_header); if (hlen < sizeof(struct ip)) return; if (ntohs(ip->ip_len) < hlen) return; if (ntohs(ip->ip_len) != pktlen) return; if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) return; /* can't handle fragmented packet */ switch (ip->ip_p) { case IPPROTO_TCP: if (pktlen < (hlen + sizeof(struct tcphdr))) return; break; case IPPROTO_UDP: if (pktlen < (hlen + sizeof(struct udphdr))) return; uh = (struct udphdr *)((caddr_t)ip + hlen); if (uh->uh_sum == 0) return; /* no checksum */ break; default: return; } /* Extract computed checksum. */ csum = be16dec(mtod(m, char *) + pos); /* checksum fixup for IP options */ len = hlen - sizeof(struct ip); if (len > 0) { opts = (uint16_t *)(ip + 1); for (; len > 0; len -= sizeof(uint16_t), opts++) { temp32 = csum - *opts; temp32 = (temp32 >> 16) + (temp32 & 65535); csum = temp32 & 65535; } } m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; m->m_pkthdr.csum_data = csum; } -static void +static int fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp, uint8_t statack, int count) { struct mbuf *m; struct fxp_rx *rxp; struct fxp_rfa *rfa; int rnr = (statack & FXP_SCB_STATACK_RNR) ? 1 : 0; + int rx_npkts; uint16_t status; + rx_npkts = 0; FXP_LOCK_ASSERT(sc, MA_OWNED); + if (rnr) sc->rnr++; #ifdef DEVICE_POLLING /* Pick up a deferred RNR condition if `count' ran out last time. */ if (sc->flags & FXP_FLAG_DEFERRED_RNR) { sc->flags &= ~FXP_FLAG_DEFERRED_RNR; rnr = 1; } #endif /* * Free any finished transmit mbuf chains. * * Handle the CNA event likt a CXTNO event. It used to * be that this event (control unit not ready) was not * encountered, but it is now with the SMPng modifications. * The exact sequence of events that occur when the interface * is brought up are different now, and if this event * goes unhandled, the configuration/rxfilter setup sequence * can stall for several seconds. The result is that no * packets go out onto the wire for about 5 to 10 seconds * after the interface is ifconfig'ed for the first time. */ if (statack & (FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA)) fxp_txeof(sc); /* * Try to start more packets transmitting. */ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) fxp_start_body(ifp); /* * Just return if nothing happened on the receive side. */ if (!rnr && (statack & FXP_SCB_STATACK_FR) == 0) - return; + return (rx_npkts); /* * Process receiver interrupts. If a no-resource (RNR) * condition exists, get whatever packets we can and * re-start the receiver. * * When using polling, we do not process the list to completion, * so when we get an RNR interrupt we must defer the restart * until we hit the last buffer with the C bit set. * If we run out of cycles and rfa_headm has the C bit set, * record the pending RNR in the FXP_FLAG_DEFERRED_RNR flag so * that the info will be used in the subsequent polling cycle. */ for (;;) { rxp = sc->fxp_desc.rx_head; m = rxp->rx_mbuf; rfa = (struct fxp_rfa *)(m->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE); bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map, BUS_DMASYNC_POSTREAD); #ifdef DEVICE_POLLING /* loop at most count times if count >=0 */ if (count >= 0 && count-- == 0) { if (rnr) { /* Defer RNR processing until the next time. */ sc->flags |= FXP_FLAG_DEFERRED_RNR; rnr = 0; } break; } #endif /* DEVICE_POLLING */ status = le16toh(rfa->rfa_status); if ((status & FXP_RFA_STATUS_C) == 0) break; /* * Advance head forward. */ sc->fxp_desc.rx_head = rxp->rx_next; /* * Add a new buffer to the receive chain. * If this fails, the old buffer is recycled * instead. */ if (fxp_new_rfabuf(sc, rxp) == 0) { int total_len; /* * Fetch packet length (the top 2 bits of * actual_size are flags set by the controller * upon completion), and drop the packet in case * of bogus length or CRC errors. */ total_len = le16toh(rfa->actual_size) & 0x3fff; if ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0 && (ifp->if_capenable & IFCAP_RXCSUM) != 0) { /* Adjust for appended checksum bytes. */ total_len -= 2; } if (total_len < sizeof(struct ether_header) || total_len > MCLBYTES - RFA_ALIGNMENT_FUDGE - sc->rfa_size || status & FXP_RFA_STATUS_CRC) { m_freem(m); continue; } m->m_pkthdr.len = m->m_len = total_len; m->m_pkthdr.rcvif = ifp; /* Do IP checksum checking. */ if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) fxp_rxcsum(sc, ifp, m, status, total_len); if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && (status & FXP_RFA_STATUS_VLAN) != 0) { m->m_pkthdr.ether_vtag = ntohs(rfa->rfax_vlan_id); m->m_flags |= M_VLANTAG; } /* * Drop locks before calling if_input() since it * may re-enter fxp_start() in the netisr case. * This would result in a lock reversal. Better * performance might be obtained by chaining all * packets received, dropping the lock, and then * calling if_input() on each one. */ FXP_UNLOCK(sc); (*ifp->if_input)(ifp, m); FXP_LOCK(sc); + rx_npkts++; } else { /* Reuse RFA and loaded DMA map. */ ifp->if_iqdrops++; fxp_discard_rfabuf(sc, rxp); } fxp_add_rfabuf(sc, rxp); } if (rnr) { fxp_scb_wait(sc); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.rx_head->rx_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START); } + return (rx_npkts); } /* * Update packet in/out/collision statistics. The i82557 doesn't * allow you to access these counters without doing a fairly * expensive DMA to get _all_ of the statistics it maintains, so * we do this operation here only once per second. The statistics * counters in the kernel are updated from the previous dump-stats * DMA and then a new dump-stats DMA is started. The on-chip * counters are zeroed when the DMA completes. If we can't start * the DMA immediately, we don't wait - we just prepare to read * them again next time. */ static void fxp_tick(void *xsc) { struct fxp_softc *sc = xsc; struct ifnet *ifp = sc->ifp; struct fxp_stats *sp = sc->fxp_stats; FXP_LOCK_ASSERT(sc, MA_OWNED); bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_POSTREAD); ifp->if_opackets += le32toh(sp->tx_good); ifp->if_collisions += le32toh(sp->tx_total_collisions); if (sp->rx_good) { ifp->if_ipackets += le32toh(sp->rx_good); sc->rx_idle_secs = 0; } else { /* * Receiver's been idle for another second. */ sc->rx_idle_secs++; } ifp->if_ierrors += le32toh(sp->rx_crc_errors) + le32toh(sp->rx_alignment_errors) + le32toh(sp->rx_rnr_errors) + le32toh(sp->rx_overrun_errors); /* * If any transmit underruns occured, bump up the transmit * threshold by another 512 bytes (64 * 8). */ if (sp->tx_underruns) { ifp->if_oerrors += le32toh(sp->tx_underruns); if (tx_threshold < 192) tx_threshold += 64; } /* * Release any xmit buffers that have completed DMA. This isn't * strictly necessary to do here, but it's advantagous for mbufs * with external storage to be released in a timely manner rather * than being defered for a potentially long time. This limits * the delay to a maximum of one second. */ fxp_txeof(sc); /* * If we haven't received any packets in FXP_MAC_RX_IDLE seconds, * then assume the receiver has locked up and attempt to clear * the condition by reprogramming the multicast filter. This is * a work-around for a bug in the 82557 where the receiver locks * up if it gets certain types of garbage in the syncronization * bits prior to the packet header. This bug is supposed to only * occur in 10Mbps mode, but has been seen to occur in 100Mbps * mode as well (perhaps due to a 10/100 speed transition). */ if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) { sc->rx_idle_secs = 0; fxp_mc_setup(sc); } /* * If there is no pending command, start another stats * dump. Otherwise punt for now. */ if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) { /* * Start another stats dump. */ bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_PREREAD); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET); } else { /* * A previous command is still waiting to be accepted. * Just zero our copy of the stats and wait for the * next timer event to update them. */ sp->tx_good = 0; sp->tx_underruns = 0; sp->tx_total_collisions = 0; sp->rx_good = 0; sp->rx_crc_errors = 0; sp->rx_alignment_errors = 0; sp->rx_rnr_errors = 0; sp->rx_overrun_errors = 0; } if (sc->miibus != NULL) mii_tick(device_get_softc(sc->miibus)); /* * Check that chip hasn't hung. */ fxp_watchdog(sc); /* * Schedule another timeout one second from now. */ callout_reset(&sc->stat_ch, hz, fxp_tick, sc); } /* * Stop the interface. Cancels the statistics updater and resets * the interface. */ static void fxp_stop(struct fxp_softc *sc) { struct ifnet *ifp = sc->ifp; struct fxp_tx *txp; int i; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->watchdog_timer = 0; /* * Cancel stats updater. */ callout_stop(&sc->stat_ch); /* * Preserve PCI configuration, configure, IA/multicast * setup and put RU and CU into idle state. */ CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); DELAY(50); /* Disable interrupts. */ CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE); /* * Release any xmit buffers. */ txp = sc->fxp_desc.tx_list; if (txp != NULL) { for (i = 0; i < FXP_NTXCB; i++) { if (txp[i].tx_mbuf != NULL) { bus_dmamap_sync(sc->fxp_mtag, txp[i].tx_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->fxp_mtag, txp[i].tx_map); m_freem(txp[i].tx_mbuf); txp[i].tx_mbuf = NULL; /* clear this to reset csum offload bits */ txp[i].tx_cb->tbd[0].tb_addr = 0; } } } bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); sc->tx_queued = 0; } /* * Watchdog/transmission transmit timeout handler. Called when a * transmission is started on the interface, but no interrupt is * received before the timeout. This usually indicates that the * card has wedged for some reason. */ static void fxp_watchdog(struct fxp_softc *sc) { FXP_LOCK_ASSERT(sc, MA_OWNED); if (sc->watchdog_timer == 0 || --sc->watchdog_timer) return; device_printf(sc->dev, "device timeout\n"); sc->ifp->if_oerrors++; fxp_init_body(sc); } /* * Acquire locks and then call the real initialization function. This * is necessary because ether_ioctl() calls if_init() and this would * result in mutex recursion if the mutex was held. */ static void fxp_init(void *xsc) { struct fxp_softc *sc = xsc; FXP_LOCK(sc); fxp_init_body(sc); FXP_UNLOCK(sc); } /* * Perform device initialization. This routine must be called with the * softc lock held. */ static void fxp_init_body(struct fxp_softc *sc) { struct ifnet *ifp = sc->ifp; struct fxp_cb_config *cbp; struct fxp_cb_ias *cb_ias; struct fxp_cb_tx *tcbp; struct fxp_tx *txp; struct fxp_cb_mcs *mcsp; int i, prm; FXP_LOCK_ASSERT(sc, MA_OWNED); /* * Cancel any pending I/O */ fxp_stop(sc); /* * Issue software reset, which also unloads the microcode. */ sc->flags &= ~FXP_FLAG_UCODE; CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET); DELAY(50); prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0; /* * Initialize base of CBL and RFA memory. Loading with zero * sets it up for regular linear addressing. */ CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE); fxp_scb_wait(sc); fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE); /* * Initialize base of dump-stats buffer. */ fxp_scb_wait(sc); bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_PREREAD); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->stats_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR); /* * Attempt to load microcode if requested. */ if (ifp->if_flags & IFF_LINK0 && (sc->flags & FXP_FLAG_UCODE) == 0) fxp_load_ucode(sc); /* * Initialize the multicast address list. */ if (fxp_mc_addrs(sc)) { mcsp = sc->mcsp; mcsp->cb_status = 0; mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL); mcsp->link_addr = 0xffffffff; /* * Start the multicast setup command. */ fxp_scb_wait(sc); bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); /* ...and wait for it to complete. */ fxp_dma_wait(sc, &mcsp->cb_status, sc->mcs_tag, sc->mcs_map); bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_POSTWRITE); } /* * We temporarily use memory that contains the TxCB list to * construct the config CB. The TxCB list memory is rebuilt * later. */ cbp = (struct fxp_cb_config *)sc->fxp_desc.cbl_list; /* * This bcopy is kind of disgusting, but there are a bunch of must be * zero and must be one bits in this structure and this is the easiest * way to initialize them all to proper values. */ bcopy(fxp_cb_config_template, cbp, sizeof(fxp_cb_config_template)); cbp->cb_status = 0; cbp->cb_command = htole16(FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL); cbp->link_addr = 0xffffffff; /* (no) next command */ cbp->byte_count = sc->flags & FXP_FLAG_EXT_RFA ? 32 : 22; cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */ cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */ cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */ cbp->mwi_enable = sc->flags & FXP_FLAG_MWI_ENABLE ? 1 : 0; cbp->type_enable = 0; /* actually reserved */ cbp->read_align_en = sc->flags & FXP_FLAG_READ_ALIGN ? 1 : 0; cbp->end_wr_on_cl = sc->flags & FXP_FLAG_WRITE_ALIGN ? 1 : 0; cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */ cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */ cbp->dma_mbce = 0; /* (disable) dma max counters */ cbp->late_scb = 0; /* (don't) defer SCB update */ cbp->direct_dma_dis = 1; /* disable direct rcv dma mode */ cbp->tno_int_or_tco_en =0; /* (disable) tx not okay interrupt */ cbp->ci_int = 1; /* interrupt on CU idle */ cbp->ext_txcb_dis = sc->flags & FXP_FLAG_EXT_TXCB ? 0 : 1; cbp->ext_stats_dis = 1; /* disable extended counters */ cbp->keep_overrun_rx = 0; /* don't pass overrun frames to host */ cbp->save_bf = sc->flags & FXP_FLAG_SAVE_BAD ? 1 : prm; cbp->disc_short_rx = !prm; /* discard short packets */ cbp->underrun_retry = 1; /* retry mode (once) on DMA underrun */ cbp->two_frames = 0; /* do not limit FIFO to 2 frames */ cbp->dyn_tbd = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0; cbp->ext_rfa = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0; cbp->mediatype = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 0 : 1; cbp->csma_dis = 0; /* (don't) disable link */ cbp->tcp_udp_cksum = ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0 && (ifp->if_capenable & IFCAP_RXCSUM) != 0) ? 1 : 0; cbp->vlan_tco = 0; /* (don't) enable vlan wakeup */ cbp->link_wake_en = 0; /* (don't) assert PME# on link change */ cbp->arp_wake_en = 0; /* (don't) assert PME# on arp */ cbp->mc_wake_en = 0; /* (don't) enable PME# on mcmatch */ cbp->nsai = 1; /* (don't) disable source addr insert */ cbp->preamble_length = 2; /* (7 byte) preamble */ cbp->loopback = 0; /* (don't) loopback */ cbp->linear_priority = 0; /* (normal CSMA/CD operation) */ cbp->linear_pri_mode = 0; /* (wait after xmit only) */ cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */ cbp->promiscuous = prm; /* promiscuous mode */ cbp->bcast_disable = 0; /* (don't) disable broadcasts */ cbp->wait_after_win = 0; /* (don't) enable modified backoff alg*/ cbp->ignore_ul = 0; /* consider U/L bit in IA matching */ cbp->crc16_en = 0; /* (don't) enable crc-16 algorithm */ cbp->crscdt = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 1 : 0; cbp->stripping = !prm; /* truncate rx packet to byte count */ cbp->padding = 1; /* (do) pad short tx packets */ cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */ cbp->long_rx_en = sc->flags & FXP_FLAG_LONG_PKT_EN ? 1 : 0; cbp->ia_wake_en = 0; /* (don't) wake up on address match */ cbp->magic_pkt_dis = sc->flags & FXP_FLAG_WOL ? 0 : 1; cbp->force_fdx = 0; /* (don't) force full duplex */ cbp->fdx_pin_en = 1; /* (enable) FDX# pin */ cbp->multi_ia = 0; /* (don't) accept multiple IAs */ cbp->mc_all = sc->flags & FXP_FLAG_ALL_MCAST ? 1 : 0; cbp->gamla_rx = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0; cbp->vlan_strip_en = ((sc->flags & FXP_FLAG_EXT_RFA) != 0 && (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) ? 1 : 0; if (sc->tunable_noflow || sc->revision == FXP_REV_82557) { /* * The 82557 has no hardware flow control, the values * below are the defaults for the chip. */ cbp->fc_delay_lsb = 0; cbp->fc_delay_msb = 0x40; cbp->pri_fc_thresh = 3; cbp->tx_fc_dis = 0; cbp->rx_fc_restop = 0; cbp->rx_fc_restart = 0; cbp->fc_filter = 0; cbp->pri_fc_loc = 1; } else { cbp->fc_delay_lsb = 0x1f; cbp->fc_delay_msb = 0x01; cbp->pri_fc_thresh = 3; cbp->tx_fc_dis = 0; /* enable transmit FC */ cbp->rx_fc_restop = 1; /* enable FC restop frames */ cbp->rx_fc_restart = 1; /* enable FC restart frames */ cbp->fc_filter = !prm; /* drop FC frames to host */ cbp->pri_fc_loc = 1; /* FC pri location (byte31) */ } /* * Start the config command/DMA. */ fxp_scb_wait(sc); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); /* ...and wait for it to complete. */ fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE); /* * Now initialize the station address. Temporarily use the TxCB * memory area like we did above for the config CB. */ cb_ias = (struct fxp_cb_ias *)sc->fxp_desc.cbl_list; cb_ias->cb_status = 0; cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL); cb_ias->link_addr = 0xffffffff; bcopy(IF_LLADDR(sc->ifp), cb_ias->macaddr, ETHER_ADDR_LEN); /* * Start the IAS (Individual Address Setup) command/DMA. */ fxp_scb_wait(sc); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); /* ...and wait for it to complete. */ fxp_dma_wait(sc, &cb_ias->cb_status, sc->cbl_tag, sc->cbl_map); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE); /* * Initialize transmit control block (TxCB) list. */ txp = sc->fxp_desc.tx_list; tcbp = sc->fxp_desc.cbl_list; bzero(tcbp, FXP_TXCB_SZ); for (i = 0; i < FXP_NTXCB; i++) { txp[i].tx_mbuf = NULL; tcbp[i].cb_status = htole16(FXP_CB_STATUS_C | FXP_CB_STATUS_OK); tcbp[i].cb_command = htole16(FXP_CB_COMMAND_NOP); tcbp[i].link_addr = htole32(sc->fxp_desc.cbl_addr + (((i + 1) & FXP_TXCB_MASK) * sizeof(struct fxp_cb_tx))); if (sc->flags & FXP_FLAG_EXT_TXCB) tcbp[i].tbd_array_addr = htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[2])); else tcbp[i].tbd_array_addr = htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[0])); txp[i].tx_next = &txp[(i + 1) & FXP_TXCB_MASK]; } /* * Set the suspend flag on the first TxCB and start the control * unit. It will execute the NOP and then suspend. */ tcbp->cb_command = htole16(FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp; sc->tx_queued = 1; fxp_scb_wait(sc); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); /* * Initialize receiver buffer area - RFA. */ fxp_scb_wait(sc); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.rx_head->rx_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START); /* * Set current media. */ if (sc->miibus != NULL) mii_mediachg(device_get_softc(sc->miibus)); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* * Enable interrupts. */ #ifdef DEVICE_POLLING /* * ... but only do that if we are not polling. And because (presumably) * the default is interrupts on, we need to disable them explicitly! */ if (ifp->if_capenable & IFCAP_POLLING ) CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE); else #endif /* DEVICE_POLLING */ CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0); /* * Start stats updater. */ callout_reset(&sc->stat_ch, hz, fxp_tick, sc); } static int fxp_serial_ifmedia_upd(struct ifnet *ifp) { return (0); } static void fxp_serial_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; } /* * Change media according to request. */ static int fxp_ifmedia_upd(struct ifnet *ifp) { struct fxp_softc *sc = ifp->if_softc; struct mii_data *mii; mii = device_get_softc(sc->miibus); FXP_LOCK(sc); if (mii->mii_instance) { struct mii_softc *miisc; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) mii_phy_reset(miisc); } mii_mediachg(mii); FXP_UNLOCK(sc); return (0); } /* * Notify the world which media we're using. */ static void fxp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct fxp_softc *sc = ifp->if_softc; struct mii_data *mii; mii = device_get_softc(sc->miibus); FXP_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; if (IFM_SUBTYPE(ifmr->ifm_active) == IFM_10_T && sc->flags & FXP_FLAG_CU_RESUME_BUG) sc->cu_resume_bug = 1; else sc->cu_resume_bug = 0; FXP_UNLOCK(sc); } /* * Add a buffer to the end of the RFA buffer list. * Return 0 if successful, 1 for failure. A failure results in * reusing the RFA buffer. * The RFA struct is stuck at the beginning of mbuf cluster and the * data pointer is fixed up to point just past it. */ static int fxp_new_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp) { struct mbuf *m; struct fxp_rfa *rfa; bus_dmamap_t tmp_map; int error; m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); /* * Move the data pointer up so that the incoming data packet * will be 32-bit aligned. */ m->m_data += RFA_ALIGNMENT_FUDGE; /* * Get a pointer to the base of the mbuf cluster and move * data start past it. */ rfa = mtod(m, struct fxp_rfa *); m->m_data += sc->rfa_size; rfa->size = htole16(MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE); rfa->rfa_status = 0; rfa->rfa_control = htole16(FXP_RFA_CONTROL_EL); rfa->actual_size = 0; m->m_len = m->m_pkthdr.len = MCLBYTES - RFA_ALIGNMENT_FUDGE - sc->rfa_size; /* * Initialize the rest of the RFA. Note that since the RFA * is misaligned, we cannot store values directly. We're thus * using the le32enc() function which handles endianness and * is also alignment-safe. */ le32enc(&rfa->link_addr, 0xffffffff); le32enc(&rfa->rbd_addr, 0xffffffff); /* Map the RFA into DMA memory. */ error = bus_dmamap_load(sc->fxp_mtag, sc->spare_map, rfa, MCLBYTES - RFA_ALIGNMENT_FUDGE, fxp_dma_map_addr, &rxp->rx_addr, 0); if (error) { m_freem(m); return (error); } if (rxp->rx_mbuf != NULL) bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map); tmp_map = sc->spare_map; sc->spare_map = rxp->rx_map; rxp->rx_map = tmp_map; rxp->rx_mbuf = m; bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } static void fxp_add_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp) { struct fxp_rfa *p_rfa; struct fxp_rx *p_rx; /* * If there are other buffers already on the list, attach this * one to the end by fixing up the tail to point to this one. */ if (sc->fxp_desc.rx_head != NULL) { p_rx = sc->fxp_desc.rx_tail; p_rfa = (struct fxp_rfa *) (p_rx->rx_mbuf->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE); p_rx->rx_next = rxp; le32enc(&p_rfa->link_addr, rxp->rx_addr); p_rfa->rfa_control = 0; bus_dmamap_sync(sc->fxp_mtag, p_rx->rx_map, BUS_DMASYNC_PREWRITE); } else { rxp->rx_next = NULL; sc->fxp_desc.rx_head = rxp; } sc->fxp_desc.rx_tail = rxp; } static void fxp_discard_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp) { struct mbuf *m; struct fxp_rfa *rfa; m = rxp->rx_mbuf; m->m_data = m->m_ext.ext_buf; /* * Move the data pointer up so that the incoming data packet * will be 32-bit aligned. */ m->m_data += RFA_ALIGNMENT_FUDGE; /* * Get a pointer to the base of the mbuf cluster and move * data start past it. */ rfa = mtod(m, struct fxp_rfa *); m->m_data += sc->rfa_size; rfa->size = htole16(MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE); rfa->rfa_status = 0; rfa->rfa_control = htole16(FXP_RFA_CONTROL_EL); rfa->actual_size = 0; /* * Initialize the rest of the RFA. Note that since the RFA * is misaligned, we cannot store values directly. We're thus * using the le32enc() function which handles endianness and * is also alignment-safe. */ le32enc(&rfa->link_addr, 0xffffffff); le32enc(&rfa->rbd_addr, 0xffffffff); bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static int fxp_miibus_readreg(device_t dev, int phy, int reg) { struct fxp_softc *sc = device_get_softc(dev); int count = 10000; int value; CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21)); while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0 && count--) DELAY(10); if (count <= 0) device_printf(dev, "fxp_miibus_readreg: timed out\n"); return (value & 0xffff); } static int fxp_miibus_writereg(device_t dev, int phy, int reg, int value) { struct fxp_softc *sc = device_get_softc(dev); int count = 10000; CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) | (value & 0xffff)); while ((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 && count--) DELAY(10); if (count <= 0) device_printf(dev, "fxp_miibus_writereg: timed out\n"); return (0); } static int fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct fxp_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; int flag, mask, error = 0, reinit; switch (command) { case SIOCSIFFLAGS: FXP_LOCK(sc); if (ifp->if_flags & IFF_ALLMULTI) sc->flags |= FXP_FLAG_ALL_MCAST; else sc->flags &= ~FXP_FLAG_ALL_MCAST; /* * If interface is marked up and not running, then start it. * If it is marked down and running, stop it. * XXX If it's up then re-initialize it. This is so flags * such as IFF_PROMISC are handled. */ if (ifp->if_flags & IFF_UP) { fxp_init_body(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) fxp_stop(sc); } FXP_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: FXP_LOCK(sc); if (ifp->if_flags & IFF_ALLMULTI) sc->flags |= FXP_FLAG_ALL_MCAST; else sc->flags &= ~FXP_FLAG_ALL_MCAST; /* * Multicast list has changed; set the hardware filter * accordingly. */ if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0) fxp_mc_setup(sc); /* * fxp_mc_setup() can set FXP_FLAG_ALL_MCAST, so check it * again rather than else {}. */ if (sc->flags & FXP_FLAG_ALL_MCAST) fxp_init_body(sc); FXP_UNLOCK(sc); error = 0; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: if (sc->miibus != NULL) { mii = device_get_softc(sc->miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } else { error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command); } break; case SIOCSIFCAP: reinit = 0; mask = ifp->if_capenable ^ ifr->ifr_reqcap; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(fxp_poll, ifp); if (error) return(error); FXP_LOCK(sc); CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE); ifp->if_capenable |= IFCAP_POLLING; FXP_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupts in any case */ FXP_LOCK(sc); CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0); ifp->if_capenable &= ~IFCAP_POLLING; FXP_UNLOCK(sc); } } #endif FXP_LOCK(sc); if ((mask & IFCAP_TXCSUM) != 0 && (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { ifp->if_capenable ^= IFCAP_TXCSUM; if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) ifp->if_hwassist |= FXP_CSUM_FEATURES; else ifp->if_hwassist &= ~FXP_CSUM_FEATURES; } if ((mask & IFCAP_RXCSUM) != 0 && (ifp->if_capabilities & IFCAP_RXCSUM) != 0) { ifp->if_capenable ^= IFCAP_RXCSUM; if ((sc->flags & FXP_FLAG_82559_RXCSUM) != 0) reinit++; } if ((mask & IFCAP_TSO4) != 0 && (ifp->if_capabilities & IFCAP_TSO4) != 0) { ifp->if_capenable ^= IFCAP_TSO4; if ((ifp->if_capenable & IFCAP_TSO4) != 0) ifp->if_hwassist |= CSUM_TSO; else ifp->if_hwassist &= ~CSUM_TSO; } if ((mask & IFCAP_WOL_MAGIC) != 0 && (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) ifp->if_capenable ^= IFCAP_WOL_MAGIC; if ((mask & IFCAP_VLAN_MTU) != 0 && (ifp->if_capabilities & IFCAP_VLAN_MTU) != 0) { ifp->if_capenable ^= IFCAP_VLAN_MTU; if (sc->revision != FXP_REV_82557) flag = FXP_FLAG_LONG_PKT_EN; else /* a hack to get long frames on the old chip */ flag = FXP_FLAG_SAVE_BAD; sc->flags ^= flag; if (ifp->if_flags & IFF_UP) reinit++; } if ((mask & IFCAP_VLAN_HWTAGGING) != 0 && (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) != 0) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; reinit++; } if (reinit > 0 && ifp->if_flags & IFF_UP) fxp_init_body(sc); FXP_UNLOCK(sc); VLAN_CAPABILITIES(ifp); break; default: error = ether_ioctl(ifp, command, data); } return (error); } /* * Fill in the multicast address list and return number of entries. */ static int fxp_mc_addrs(struct fxp_softc *sc) { struct fxp_cb_mcs *mcsp = sc->mcsp; struct ifnet *ifp = sc->ifp; struct ifmultiaddr *ifma; int nmcasts; nmcasts = 0; if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0) { IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (nmcasts >= MAXMCADDR) { sc->flags |= FXP_FLAG_ALL_MCAST; nmcasts = 0; break; } bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), &sc->mcsp->mc_addr[nmcasts][0], ETHER_ADDR_LEN); nmcasts++; } IF_ADDR_UNLOCK(ifp); } mcsp->mc_cnt = htole16(nmcasts * ETHER_ADDR_LEN); return (nmcasts); } /* * Program the multicast filter. * * We have an artificial restriction that the multicast setup command * must be the first command in the chain, so we take steps to ensure * this. By requiring this, it allows us to keep up the performance of * the pre-initialized command ring (esp. link pointers) by not actually * inserting the mcsetup command in the ring - i.e. its link pointer * points to the TxCB ring, but the mcsetup descriptor itself is not part * of it. We then can do 'CU_START' on the mcsetup descriptor and have it * lead into the regular TxCB ring when it completes. * * This function must be called at splimp. */ static void fxp_mc_setup(struct fxp_softc *sc) { struct fxp_cb_mcs *mcsp = sc->mcsp; struct fxp_tx *txp; int count; FXP_LOCK_ASSERT(sc, MA_OWNED); /* * If there are queued commands, we must wait until they are all * completed. If we are already waiting, then add a NOP command * with interrupt option so that we're notified when all commands * have been completed - fxp_start() ensures that no additional * TX commands will be added when need_mcsetup is true. */ if (sc->tx_queued) { /* * need_mcsetup will be true if we are already waiting for the * NOP command to be completed (see below). In this case, bail. */ if (sc->need_mcsetup) return; sc->need_mcsetup = 1; /* * Add a NOP command with interrupt so that we are notified * when all TX commands have been processed. */ txp = sc->fxp_desc.tx_last->tx_next; txp->tx_mbuf = NULL; txp->tx_cb->cb_status = 0; txp->tx_cb->cb_command = htole16(FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I); /* * Advance the end of list forward. */ sc->fxp_desc.tx_last->tx_cb->cb_command &= htole16(~FXP_CB_COMMAND_S); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); sc->fxp_desc.tx_last = txp; sc->tx_queued++; /* * Issue a resume in case the CU has just suspended. */ fxp_scb_wait(sc); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME); /* * Set a 5 second timer just in case we don't hear from the * card again. */ sc->watchdog_timer = 5; return; } sc->need_mcsetup = 0; /* * Initialize multicast setup descriptor. */ mcsp->cb_status = 0; mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I); mcsp->link_addr = htole32(sc->fxp_desc.cbl_addr); txp = &sc->fxp_desc.mcs_tx; txp->tx_mbuf = NULL; txp->tx_cb = (struct fxp_cb_tx *)sc->mcsp; txp->tx_next = sc->fxp_desc.tx_list; (void) fxp_mc_addrs(sc); sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp; sc->tx_queued = 1; /* * Wait until command unit is not active. This should never * be the case when nothing is queued, but make sure anyway. */ count = 100; while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) == FXP_SCB_CUS_ACTIVE && --count) DELAY(10); if (count == 0) { device_printf(sc->dev, "command queue timeout\n"); return; } /* * Start the multicast setup command. */ fxp_scb_wait(sc); bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); sc->watchdog_timer = 2; return; } static uint32_t fxp_ucode_d101a[] = D101_A_RCVBUNDLE_UCODE; static uint32_t fxp_ucode_d101b0[] = D101_B0_RCVBUNDLE_UCODE; static uint32_t fxp_ucode_d101ma[] = D101M_B_RCVBUNDLE_UCODE; static uint32_t fxp_ucode_d101s[] = D101S_RCVBUNDLE_UCODE; static uint32_t fxp_ucode_d102[] = D102_B_RCVBUNDLE_UCODE; static uint32_t fxp_ucode_d102c[] = D102_C_RCVBUNDLE_UCODE; static uint32_t fxp_ucode_d102e[] = D102_E_RCVBUNDLE_UCODE; #define UCODE(x) x, sizeof(x)/sizeof(uint32_t) struct ucode { uint32_t revision; uint32_t *ucode; int length; u_short int_delay_offset; u_short bundle_max_offset; } ucode_table[] = { { FXP_REV_82558_A4, UCODE(fxp_ucode_d101a), D101_CPUSAVER_DWORD, 0 }, { FXP_REV_82558_B0, UCODE(fxp_ucode_d101b0), D101_CPUSAVER_DWORD, 0 }, { FXP_REV_82559_A0, UCODE(fxp_ucode_d101ma), D101M_CPUSAVER_DWORD, D101M_CPUSAVER_BUNDLE_MAX_DWORD }, { FXP_REV_82559S_A, UCODE(fxp_ucode_d101s), D101S_CPUSAVER_DWORD, D101S_CPUSAVER_BUNDLE_MAX_DWORD }, { FXP_REV_82550, UCODE(fxp_ucode_d102), D102_B_CPUSAVER_DWORD, D102_B_CPUSAVER_BUNDLE_MAX_DWORD }, { FXP_REV_82550_C, UCODE(fxp_ucode_d102c), D102_C_CPUSAVER_DWORD, D102_C_CPUSAVER_BUNDLE_MAX_DWORD }, { FXP_REV_82551_F, UCODE(fxp_ucode_d102e), D102_E_CPUSAVER_DWORD, D102_E_CPUSAVER_BUNDLE_MAX_DWORD }, { 0, NULL, 0, 0, 0 } }; static void fxp_load_ucode(struct fxp_softc *sc) { struct ucode *uc; struct fxp_cb_ucode *cbp; int i; for (uc = ucode_table; uc->ucode != NULL; uc++) if (sc->revision == uc->revision) break; if (uc->ucode == NULL) return; cbp = (struct fxp_cb_ucode *)sc->fxp_desc.cbl_list; cbp->cb_status = 0; cbp->cb_command = htole16(FXP_CB_COMMAND_UCODE | FXP_CB_COMMAND_EL); cbp->link_addr = 0xffffffff; /* (no) next command */ for (i = 0; i < uc->length; i++) cbp->ucode[i] = htole32(uc->ucode[i]); if (uc->int_delay_offset) *(uint16_t *)&cbp->ucode[uc->int_delay_offset] = htole16(sc->tunable_int_delay + sc->tunable_int_delay / 2); if (uc->bundle_max_offset) *(uint16_t *)&cbp->ucode[uc->bundle_max_offset] = htole16(sc->tunable_bundle_max); /* * Download the ucode to the chip. */ fxp_scb_wait(sc); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); /* ...and wait for it to complete. */ fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE); device_printf(sc->dev, "Microcode loaded, int_delay: %d usec bundle_max: %d\n", sc->tunable_int_delay, uc->bundle_max_offset == 0 ? 0 : sc->tunable_bundle_max); sc->flags |= FXP_FLAG_UCODE; } static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) { int error, value; value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if (error || !req->newptr) return (error); if (value < low || value > high) return (EINVAL); *(int *)arg1 = value; return (0); } /* * Interrupt delay is expressed in microseconds, a multiplier is used * to convert this to the appropriate clock ticks before using. */ static int sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, 300, 3000)); } static int sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, 1, 0xffff)); } Index: head/sys/dev/ixgb/if_ixgb.c =================================================================== --- head/sys/dev/ixgb/if_ixgb.c (revision 193095) +++ head/sys/dev/ixgb/if_ixgb.c (revision 193096) @@ -1,2526 +1,2532 @@ /******************************************************************************* Copyright (c) 2001-2004, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************************************************************************/ /*$FreeBSD$*/ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include /********************************************************************* * Set this to one to display debug statistics *********************************************************************/ int ixgb_display_debug_stats = 0; /********************************************************************* * Linked list of board private structures for all NICs found *********************************************************************/ struct adapter *ixgb_adapter_list = NULL; /********************************************************************* * Driver version *********************************************************************/ char ixgb_driver_version[] = "1.0.6"; char ixgb_copyright[] = "Copyright (c) 2001-2004 Intel Corporation."; /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on * Last field stores an index into ixgb_strings * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } *********************************************************************/ static ixgb_vendor_info_t ixgb_vendor_info_array[] = { /* Intel(R) PRO/10000 Network Connection */ {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX, PCI_ANY_ID, PCI_ANY_ID, 0}, {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, PCI_ANY_ID, PCI_ANY_ID, 0}, /* required last entry */ {0, 0, 0, 0, 0} }; /********************************************************************* * Table of branding strings for all supported NICs. *********************************************************************/ static char *ixgb_strings[] = { "Intel(R) PRO/10GbE Network Driver" }; /********************************************************************* * Function prototypes *********************************************************************/ static int ixgb_probe(device_t); static int ixgb_attach(device_t); static int ixgb_detach(device_t); static int ixgb_shutdown(device_t); static void ixgb_intr(void *); static void ixgb_start(struct ifnet *); static void ixgb_start_locked(struct ifnet *); static int ixgb_ioctl(struct ifnet *, IOCTL_CMD_TYPE, caddr_t); static void ixgb_watchdog(struct ifnet *); static void ixgb_init(void *); static void ixgb_init_locked(struct adapter *); static void ixgb_stop(void *); static void ixgb_media_status(struct ifnet *, struct ifmediareq *); static int ixgb_media_change(struct ifnet *); static void ixgb_identify_hardware(struct adapter *); static int ixgb_allocate_pci_resources(struct adapter *); static void ixgb_free_pci_resources(struct adapter *); static void ixgb_local_timer(void *); static int ixgb_hardware_init(struct adapter *); static void ixgb_setup_interface(device_t, struct adapter *); static int ixgb_setup_transmit_structures(struct adapter *); static void ixgb_initialize_transmit_unit(struct adapter *); static int ixgb_setup_receive_structures(struct adapter *); static void ixgb_initialize_receive_unit(struct adapter *); static void ixgb_enable_intr(struct adapter *); static void ixgb_disable_intr(struct adapter *); static void ixgb_free_transmit_structures(struct adapter *); static void ixgb_free_receive_structures(struct adapter *); static void ixgb_update_stats_counters(struct adapter *); static void ixgb_clean_transmit_interrupts(struct adapter *); static int ixgb_allocate_receive_structures(struct adapter *); static int ixgb_allocate_transmit_structures(struct adapter *); -static void ixgb_process_receive_interrupts(struct adapter *, int); +static int ixgb_process_receive_interrupts(struct adapter *, int); static void ixgb_receive_checksum(struct adapter *, struct ixgb_rx_desc * rx_desc, struct mbuf *); static void ixgb_transmit_checksum_setup(struct adapter *, struct mbuf *, u_int8_t *); static void ixgb_set_promisc(struct adapter *); static void ixgb_disable_promisc(struct adapter *); static void ixgb_set_multi(struct adapter *); static void ixgb_print_hw_stats(struct adapter *); static void ixgb_print_link_status(struct adapter *); static int ixgb_get_buf(int i, struct adapter *, struct mbuf *); static void ixgb_enable_vlans(struct adapter * adapter); static int ixgb_encap(struct adapter * adapter, struct mbuf * m_head); static int ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS); static int ixgb_dma_malloc(struct adapter *, bus_size_t, struct ixgb_dma_alloc *, int); static void ixgb_dma_free(struct adapter *, struct ixgb_dma_alloc *); #ifdef DEVICE_POLLING static poll_handler_t ixgb_poll; #endif /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t ixgb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ixgb_probe), DEVMETHOD(device_attach, ixgb_attach), DEVMETHOD(device_detach, ixgb_detach), DEVMETHOD(device_shutdown, ixgb_shutdown), {0, 0} }; static driver_t ixgb_driver = { "ixgb", ixgb_methods, sizeof(struct adapter), }; static devclass_t ixgb_devclass; DRIVER_MODULE(ixgb, pci, ixgb_driver, ixgb_devclass, 0, 0); MODULE_DEPEND(ixgb, pci, 1, 1, 1); MODULE_DEPEND(ixgb, ether, 1, 1, 1); /* some defines for controlling descriptor fetches in h/w */ #define RXDCTL_PTHRESH_DEFAULT 128 /* chip considers prefech below this */ #define RXDCTL_HTHRESH_DEFAULT 16 /* chip will only prefetch if tail is * pushed this many descriptors from * head */ #define RXDCTL_WTHRESH_DEFAULT 0 /* chip writes back at this many or RXT0 */ /********************************************************************* * Device identification routine * * ixgb_probe determines if the driver should be loaded on * adapter based on PCI vendor/device id of the adapter. * * return 0 on success, positive on failure *********************************************************************/ static int ixgb_probe(device_t dev) { ixgb_vendor_info_t *ent; u_int16_t pci_vendor_id = 0; u_int16_t pci_device_id = 0; u_int16_t pci_subvendor_id = 0; u_int16_t pci_subdevice_id = 0; char adapter_name[60]; INIT_DEBUGOUT("ixgb_probe: begin"); pci_vendor_id = pci_get_vendor(dev); if (pci_vendor_id != IXGB_VENDOR_ID) return (ENXIO); pci_device_id = pci_get_device(dev); pci_subvendor_id = pci_get_subvendor(dev); pci_subdevice_id = pci_get_subdevice(dev); ent = ixgb_vendor_info_array; while (ent->vendor_id != 0) { if ((pci_vendor_id == ent->vendor_id) && (pci_device_id == ent->device_id) && ((pci_subvendor_id == ent->subvendor_id) || (ent->subvendor_id == PCI_ANY_ID)) && ((pci_subdevice_id == ent->subdevice_id) || (ent->subdevice_id == PCI_ANY_ID))) { sprintf(adapter_name, "%s, Version - %s", ixgb_strings[ent->index], ixgb_driver_version); device_set_desc_copy(dev, adapter_name); return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); } /********************************************************************* * Device initialization routine * * The attach entry point is called when the driver is being loaded. * This routine identifies the type of hardware, allocates all resources * and initializes the hardware. * * return 0 on success, positive on failure *********************************************************************/ static int ixgb_attach(device_t dev) { struct adapter *adapter; int tsize, rsize; int error = 0; printf("ixgb%d: %s\n", device_get_unit(dev), ixgb_copyright); INIT_DEBUGOUT("ixgb_attach: begin"); /* Allocate, clear, and link in our adapter structure */ if (!(adapter = device_get_softc(dev))) { printf("ixgb: adapter structure allocation failed\n"); return (ENOMEM); } bzero(adapter, sizeof(struct adapter)); adapter->dev = dev; adapter->osdep.dev = dev; adapter->unit = device_get_unit(dev); IXGB_LOCK_INIT(adapter, device_get_nameunit(dev)); if (ixgb_adapter_list != NULL) ixgb_adapter_list->prev = adapter; adapter->next = ixgb_adapter_list; ixgb_adapter_list = adapter; /* SYSCTL APIs */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, (void *)adapter, 0, ixgb_sysctl_stats, "I", "Statistics"); callout_init(&adapter->timer, CALLOUT_MPSAFE); /* Determine hardware revision */ ixgb_identify_hardware(adapter); /* Parameters (to be read from user) */ adapter->num_tx_desc = IXGB_MAX_TXD; adapter->num_rx_desc = IXGB_MAX_RXD; adapter->tx_int_delay = TIDV; adapter->rx_int_delay = RDTR; adapter->rx_buffer_len = IXGB_RXBUFFER_2048; adapter->hw.fc.high_water = FCRTH; adapter->hw.fc.low_water = FCRTL; adapter->hw.fc.pause_time = FCPAUSE; adapter->hw.fc.send_xon = TRUE; adapter->hw.fc.type = FLOW_CONTROL; /* Set the max frame size assuming standard ethernet sized frames */ adapter->hw.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; if (ixgb_allocate_pci_resources(adapter)) { printf("ixgb%d: Allocation of PCI resources failed\n", adapter->unit); error = ENXIO; goto err_pci; } tsize = IXGB_ROUNDUP(adapter->num_tx_desc * sizeof(struct ixgb_tx_desc), 4096); /* Allocate Transmit Descriptor ring */ if (ixgb_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) { printf("ixgb%d: Unable to allocate TxDescriptor memory\n", adapter->unit); error = ENOMEM; goto err_tx_desc; } adapter->tx_desc_base = (struct ixgb_tx_desc *) adapter->txdma.dma_vaddr; rsize = IXGB_ROUNDUP(adapter->num_rx_desc * sizeof(struct ixgb_rx_desc), 4096); /* Allocate Receive Descriptor ring */ if (ixgb_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) { printf("ixgb%d: Unable to allocate rx_desc memory\n", adapter->unit); error = ENOMEM; goto err_rx_desc; } adapter->rx_desc_base = (struct ixgb_rx_desc *) adapter->rxdma.dma_vaddr; /* Initialize the hardware */ if (ixgb_hardware_init(adapter)) { printf("ixgb%d: Unable to initialize the hardware\n", adapter->unit); error = EIO; goto err_hw_init; } /* Setup OS specific network interface */ ixgb_setup_interface(dev, adapter); /* Initialize statistics */ ixgb_clear_hw_cntrs(&adapter->hw); ixgb_update_stats_counters(adapter); INIT_DEBUGOUT("ixgb_attach: end"); return (0); err_hw_init: ixgb_dma_free(adapter, &adapter->rxdma); err_rx_desc: ixgb_dma_free(adapter, &adapter->txdma); err_tx_desc: err_pci: ixgb_free_pci_resources(adapter); sysctl_ctx_free(&adapter->sysctl_ctx); return (error); } /********************************************************************* * Device removal routine * * The detach entry point is called when the driver is being removed. * This routine stops the adapter and deallocates all the resources * that were allocated for driver operation. * * return 0 on success, positive on failure *********************************************************************/ static int ixgb_detach(device_t dev) { struct adapter *adapter = device_get_softc(dev); struct ifnet *ifp = adapter->ifp; INIT_DEBUGOUT("ixgb_detach: begin"); #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif IXGB_LOCK(adapter); adapter->in_detach = 1; ixgb_stop(adapter); IXGB_UNLOCK(adapter); #if __FreeBSD_version < 500000 ether_ifdetach(adapter->ifp, ETHER_BPF_SUPPORTED); #else ether_ifdetach(adapter->ifp); #endif ixgb_free_pci_resources(adapter); #if __FreeBSD_version >= 500000 if_free(adapter->ifp); #endif /* Free Transmit Descriptor ring */ if (adapter->tx_desc_base) { ixgb_dma_free(adapter, &adapter->txdma); adapter->tx_desc_base = NULL; } /* Free Receive Descriptor ring */ if (adapter->rx_desc_base) { ixgb_dma_free(adapter, &adapter->rxdma); adapter->rx_desc_base = NULL; } /* Remove from the adapter list */ if (ixgb_adapter_list == adapter) ixgb_adapter_list = adapter->next; if (adapter->next != NULL) adapter->next->prev = adapter->prev; if (adapter->prev != NULL) adapter->prev->next = adapter->next; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); ifp->if_timer = 0; IXGB_LOCK_DESTROY(adapter); return (0); } /********************************************************************* * * Shutdown entry point * **********************************************************************/ static int ixgb_shutdown(device_t dev) { struct adapter *adapter = device_get_softc(dev); IXGB_LOCK(adapter); ixgb_stop(adapter); IXGB_UNLOCK(adapter); return (0); } /********************************************************************* * Transmit entry point * * ixgb_start is called by the stack to initiate a transmit. * The driver will remain in this routine as long as there are * packets to transmit and transmit resources are available. * In case resources are not available stack is notified and * the packet is requeued. **********************************************************************/ static void ixgb_start_locked(struct ifnet * ifp) { struct mbuf *m_head; struct adapter *adapter = ifp->if_softc; IXGB_LOCK_ASSERT(adapter); if (!adapter->link_active) return; while (ifp->if_snd.ifq_head != NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (ixgb_encap(adapter, m_head)) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; IF_PREPEND(&ifp->if_snd, m_head); break; } /* Send a copy of the frame to the BPF listener */ #if __FreeBSD_version < 500000 if (ifp->if_bpf) bpf_mtap(ifp, m_head); #else ETHER_BPF_MTAP(ifp, m_head); #endif /* Set timeout in case hardware has problems transmitting */ ifp->if_timer = IXGB_TX_TIMEOUT; } return; } static void ixgb_start(struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; IXGB_LOCK(adapter); ixgb_start_locked(ifp); IXGB_UNLOCK(adapter); return; } /********************************************************************* * Ioctl entry point * * ixgb_ioctl is called when the user wants to configure the * interface. * * return 0 on success, positive on failure **********************************************************************/ static int ixgb_ioctl(struct ifnet * ifp, IOCTL_CMD_TYPE command, caddr_t data) { int mask, error = 0; struct ifreq *ifr = (struct ifreq *) data; struct adapter *adapter = ifp->if_softc; if (adapter->in_detach) goto out; switch (command) { case SIOCSIFADDR: case SIOCGIFADDR: IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)"); ether_ioctl(ifp, command, data); break; case SIOCSIFMTU: IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); if (ifr->ifr_mtu > IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) { error = EINVAL; } else { IXGB_LOCK(adapter); ifp->if_mtu = ifr->ifr_mtu; adapter->hw.max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; ixgb_init_locked(adapter); IXGB_UNLOCK(adapter); } break; case SIOCSIFFLAGS: IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)"); IXGB_LOCK(adapter); if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { ixgb_init_locked(adapter); } ixgb_disable_promisc(adapter); ixgb_set_promisc(adapter); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ixgb_stop(adapter); } } IXGB_UNLOCK(adapter); break; case SIOCADDMULTI: case SIOCDELMULTI: IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { IXGB_LOCK(adapter); ixgb_disable_intr(adapter); ixgb_set_multi(adapter); ixgb_enable_intr(adapter); IXGB_UNLOCK(adapter); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)"); error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); break; case SIOCSIFCAP: IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)"); mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(ixgb_poll, ifp); if (error) return(error); IXGB_LOCK(adapter); ixgb_disable_intr(adapter); ifp->if_capenable |= IFCAP_POLLING; IXGB_UNLOCK(adapter); } else { error = ether_poll_deregister(ifp); /* Enable interrupt even in error case */ IXGB_LOCK(adapter); ixgb_enable_intr(adapter); ifp->if_capenable &= ~IFCAP_POLLING; IXGB_UNLOCK(adapter); } } #endif /* DEVICE_POLLING */ if (mask & IFCAP_HWCSUM) { if (IFCAP_HWCSUM & ifp->if_capenable) ifp->if_capenable &= ~IFCAP_HWCSUM; else ifp->if_capenable |= IFCAP_HWCSUM; if (ifp->if_drv_flags & IFF_DRV_RUNNING) ixgb_init(adapter); } break; default: IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%X)\n", (int)command); error = EINVAL; } out: return (error); } /********************************************************************* * Watchdog entry point * * This routine is called whenever hardware quits transmitting. * **********************************************************************/ static void ixgb_watchdog(struct ifnet * ifp) { struct adapter *adapter; adapter = ifp->if_softc; /* * If we are in this routine because of pause frames, then don't * reset the hardware. */ if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF) { ifp->if_timer = IXGB_TX_TIMEOUT; return; } printf("ixgb%d: watchdog timeout -- resetting\n", adapter->unit); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ixgb_stop(adapter); ixgb_init(adapter); ifp->if_oerrors++; return; } /********************************************************************* * Init entry point * * This routine is used in two ways. It is used by the stack as * init entry point in network interface structure. It is also used * by the driver as a hw/sw initialization routine to get to a * consistent state. * * return 0 on success, positive on failure **********************************************************************/ static void ixgb_init_locked(struct adapter *adapter) { struct ifnet *ifp; INIT_DEBUGOUT("ixgb_init: begin"); IXGB_LOCK_ASSERT(adapter); ixgb_stop(adapter); /* Get the latest mac address, User can use a LAA */ bcopy(IF_LLADDR(adapter->ifp), adapter->hw.curr_mac_addr, IXGB_ETH_LENGTH_OF_ADDRESS); /* Initialize the hardware */ if (ixgb_hardware_init(adapter)) { printf("ixgb%d: Unable to initialize the hardware\n", adapter->unit); return; } ixgb_enable_vlans(adapter); /* Prepare transmit descriptors and buffers */ if (ixgb_setup_transmit_structures(adapter)) { printf("ixgb%d: Could not setup transmit structures\n", adapter->unit); ixgb_stop(adapter); return; } ixgb_initialize_transmit_unit(adapter); /* Setup Multicast table */ ixgb_set_multi(adapter); /* Prepare receive descriptors and buffers */ if (ixgb_setup_receive_structures(adapter)) { printf("ixgb%d: Could not setup receive structures\n", adapter->unit); ixgb_stop(adapter); return; } ixgb_initialize_receive_unit(adapter); /* Don't lose promiscuous settings */ ixgb_set_promisc(adapter); ifp = adapter->ifp; ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist = IXGB_CHECKSUM_FEATURES; else ifp->if_hwassist = 0; /* Enable jumbo frames */ if (ifp->if_mtu > ETHERMTU) { uint32_t temp_reg; IXGB_WRITE_REG(&adapter->hw, MFS, adapter->hw.max_frame_size << IXGB_MFS_SHIFT); temp_reg = IXGB_READ_REG(&adapter->hw, CTRL0); temp_reg |= IXGB_CTRL0_JFE; IXGB_WRITE_REG(&adapter->hw, CTRL0, temp_reg); } callout_reset(&adapter->timer, 2 * hz, ixgb_local_timer, adapter); ixgb_clear_hw_cntrs(&adapter->hw); #ifdef DEVICE_POLLING /* * Only disable interrupts if we are polling, make sure they are on * otherwise. */ if (ifp->if_capenable & IFCAP_POLLING) ixgb_disable_intr(adapter); else #endif ixgb_enable_intr(adapter); return; } static void ixgb_init(void *arg) { struct adapter *adapter = arg; IXGB_LOCK(adapter); ixgb_init_locked(adapter); IXGB_UNLOCK(adapter); return; } #ifdef DEVICE_POLLING -static void +static int ixgb_poll_locked(struct ifnet * ifp, enum poll_cmd cmd, int count) { struct adapter *adapter = ifp->if_softc; u_int32_t reg_icr; + int rx_npkts; IXGB_LOCK_ASSERT(adapter); if (cmd == POLL_AND_CHECK_STATUS) { reg_icr = IXGB_READ_REG(&adapter->hw, ICR); if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) { callout_stop(&adapter->timer); ixgb_check_for_link(&adapter->hw); ixgb_print_link_status(adapter); callout_reset(&adapter->timer, 2 * hz, ixgb_local_timer, adapter); } } - ixgb_process_receive_interrupts(adapter, count); + rx_npkts = ixgb_process_receive_interrupts(adapter, count); ixgb_clean_transmit_interrupts(adapter); if (ifp->if_snd.ifq_head != NULL) ixgb_start_locked(ifp); + return (rx_npkts); } -static void +static int ixgb_poll(struct ifnet * ifp, enum poll_cmd cmd, int count) { struct adapter *adapter = ifp->if_softc; + int rx_npkts = 0; IXGB_LOCK(adapter); if (ifp->if_drv_flags & IFF_DRV_RUNNING) - ixgb_poll_locked(ifp, cmd, count); + rx_npkts = ixgb_poll_locked(ifp, cmd, count); IXGB_UNLOCK(adapter); + return (rx_npkts); } #endif /* DEVICE_POLLING */ /********************************************************************* * * Interrupt Service routine * **********************************************************************/ static void ixgb_intr(void *arg) { u_int32_t loop_cnt = IXGB_MAX_INTR; u_int32_t reg_icr; struct ifnet *ifp; struct adapter *adapter = arg; boolean_t rxdmt0 = FALSE; IXGB_LOCK(adapter); ifp = adapter->ifp; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { IXGB_UNLOCK(adapter); return; } #endif reg_icr = IXGB_READ_REG(&adapter->hw, ICR); if (reg_icr == 0) { IXGB_UNLOCK(adapter); return; } if (reg_icr & IXGB_INT_RXDMT0) rxdmt0 = TRUE; #ifdef _SV_ if (reg_icr & IXGB_INT_RXDMT0) adapter->sv_stats.icr_rxdmt0++; if (reg_icr & IXGB_INT_RXO) adapter->sv_stats.icr_rxo++; if (reg_icr & IXGB_INT_RXT0) adapter->sv_stats.icr_rxt0++; if (reg_icr & IXGB_INT_TXDW) adapter->sv_stats.icr_TXDW++; #endif /* _SV_ */ /* Link status change */ if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) { callout_stop(&adapter->timer); ixgb_check_for_link(&adapter->hw); ixgb_print_link_status(adapter); callout_reset(&adapter->timer, 2 * hz, ixgb_local_timer, adapter); } while (loop_cnt > 0) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ixgb_process_receive_interrupts(adapter, -1); ixgb_clean_transmit_interrupts(adapter); } loop_cnt--; } if (rxdmt0 && adapter->raidc) { IXGB_WRITE_REG(&adapter->hw, IMC, IXGB_INT_RXDMT0); IXGB_WRITE_REG(&adapter->hw, IMS, IXGB_INT_RXDMT0); } if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_snd.ifq_head != NULL) ixgb_start_locked(ifp); IXGB_UNLOCK(adapter); return; } /********************************************************************* * * Media Ioctl callback * * This routine is called whenever the user queries the status of * the interface using ifconfig. * **********************************************************************/ static void ixgb_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) { struct adapter *adapter = ifp->if_softc; INIT_DEBUGOUT("ixgb_media_status: begin"); ixgb_check_for_link(&adapter->hw); ixgb_print_link_status(adapter); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!adapter->hw.link_up) return; ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; return; } /********************************************************************* * * Media Ioctl callback * * This routine is called when the user changes speed/duplex using * media/mediopt option with ifconfig. * **********************************************************************/ static int ixgb_media_change(struct ifnet * ifp) { struct adapter *adapter = ifp->if_softc; struct ifmedia *ifm = &adapter->media; INIT_DEBUGOUT("ixgb_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); return (0); } /********************************************************************* * * This routine maps the mbufs to tx descriptors. * * return 0 on success, positive on failure **********************************************************************/ static int ixgb_encap(struct adapter * adapter, struct mbuf * m_head) { u_int8_t txd_popts; int i, j, error, nsegs; #if __FreeBSD_version < 500000 struct ifvlan *ifv = NULL; #endif bus_dma_segment_t segs[IXGB_MAX_SCATTER]; bus_dmamap_t map; struct ixgb_buffer *tx_buffer = NULL; struct ixgb_tx_desc *current_tx_desc = NULL; struct ifnet *ifp = adapter->ifp; /* * Force a cleanup if number of TX descriptors available hits the * threshold */ if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) { ixgb_clean_transmit_interrupts(adapter); } if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) { adapter->no_tx_desc_avail1++; return (ENOBUFS); } /* * Map the packet for DMA. */ if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) { adapter->no_tx_map_avail++; return (ENOMEM); } error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { adapter->no_tx_dma_setup++; printf("ixgb%d: ixgb_encap: bus_dmamap_load_mbuf failed; " "error %u\n", adapter->unit, error); bus_dmamap_destroy(adapter->txtag, map); return (error); } KASSERT(nsegs != 0, ("ixgb_encap: empty packet")); if (nsegs > adapter->num_tx_desc_avail) { adapter->no_tx_desc_avail2++; bus_dmamap_destroy(adapter->txtag, map); return (ENOBUFS); } if (ifp->if_hwassist > 0) { ixgb_transmit_checksum_setup(adapter, m_head, &txd_popts); } else txd_popts = 0; /* Find out if we are in vlan mode */ #if __FreeBSD_version < 500000 if ((m_head->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) && m_head->m_pkthdr.rcvif != NULL && m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN) ifv = m_head->m_pkthdr.rcvif->if_softc; #elseif __FreeBSD_version < 700000 mtag = VLAN_OUTPUT_TAG(ifp, m_head); #endif i = adapter->next_avail_tx_desc; for (j = 0; j < nsegs; j++) { tx_buffer = &adapter->tx_buffer_area[i]; current_tx_desc = &adapter->tx_desc_base[i]; current_tx_desc->buff_addr = htole64(segs[j].ds_addr); current_tx_desc->cmd_type_len = (adapter->txd_cmd | segs[j].ds_len); current_tx_desc->popts = txd_popts; if (++i == adapter->num_tx_desc) i = 0; tx_buffer->m_head = NULL; } adapter->num_tx_desc_avail -= nsegs; adapter->next_avail_tx_desc = i; #if __FreeBSD_version < 500000 if (ifv != NULL) { /* Set the vlan id */ current_tx_desc->vlan = ifv->ifv_tag; #elseif __FreeBSD_version < 700000 if (mtag != NULL) { /* Set the vlan id */ current_tx_desc->vlan = VLAN_TAG_VALUE(mtag); #else if (m_head->m_flags & M_VLANTAG) { current_tx_desc->vlan = m_head->m_pkthdr.ether_vtag; #endif /* Tell hardware to add tag */ current_tx_desc->cmd_type_len |= IXGB_TX_DESC_CMD_VLE; } tx_buffer->m_head = m_head; tx_buffer->map = map; bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE); /* * Last Descriptor of Packet needs End Of Packet (EOP) */ current_tx_desc->cmd_type_len |= (IXGB_TX_DESC_CMD_EOP); /* * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000 * that this frame is available to transmit. */ IXGB_WRITE_REG(&adapter->hw, TDT, i); return (0); } static void ixgb_set_promisc(struct adapter * adapter) { u_int32_t reg_rctl; struct ifnet *ifp = adapter->ifp; reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); if (ifp->if_flags & IFF_PROMISC) { reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE); IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); } else if (ifp->if_flags & IFF_ALLMULTI) { reg_rctl |= IXGB_RCTL_MPE; reg_rctl &= ~IXGB_RCTL_UPE; IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); } return; } static void ixgb_disable_promisc(struct adapter * adapter) { u_int32_t reg_rctl; reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); reg_rctl &= (~IXGB_RCTL_UPE); reg_rctl &= (~IXGB_RCTL_MPE); IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); return; } /********************************************************************* * Multicast Update * * This routine is called whenever multicast address list is updated. * **********************************************************************/ static void ixgb_set_multi(struct adapter * adapter) { u_int32_t reg_rctl = 0; u_int8_t mta[MAX_NUM_MULTICAST_ADDRESSES * IXGB_ETH_LENGTH_OF_ADDRESS]; struct ifmultiaddr *ifma; int mcnt = 0; struct ifnet *ifp = adapter->ifp; IOCTL_DEBUGOUT("ixgb_set_multi: begin"); IF_ADDR_LOCK(ifp); #if __FreeBSD_version < 500000 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { #else TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { #endif if (ifma->ifma_addr->sa_family != AF_LINK) continue; bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), &mta[mcnt * IXGB_ETH_LENGTH_OF_ADDRESS], IXGB_ETH_LENGTH_OF_ADDRESS); mcnt++; } IF_ADDR_UNLOCK(ifp); if (mcnt > MAX_NUM_MULTICAST_ADDRESSES) { reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); reg_rctl |= IXGB_RCTL_MPE; IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); } else ixgb_mc_addr_list_update(&adapter->hw, mta, mcnt, 0); return; } /********************************************************************* * Timer routine * * This routine checks for link status and updates statistics. * **********************************************************************/ static void ixgb_local_timer(void *arg) { struct ifnet *ifp; struct adapter *adapter = arg; ifp = adapter->ifp; IXGB_LOCK(adapter); ixgb_check_for_link(&adapter->hw); ixgb_print_link_status(adapter); ixgb_update_stats_counters(adapter); if (ixgb_display_debug_stats && ifp->if_drv_flags & IFF_DRV_RUNNING) { ixgb_print_hw_stats(adapter); } callout_reset(&adapter->timer, 2 * hz, ixgb_local_timer, adapter); IXGB_UNLOCK(adapter); return; } static void ixgb_print_link_status(struct adapter * adapter) { if (adapter->hw.link_up) { if (!adapter->link_active) { printf("ixgb%d: Link is up %d Mbps %s \n", adapter->unit, 10000, "Full Duplex"); adapter->link_active = 1; } } else { if (adapter->link_active) { printf("ixgb%d: Link is Down \n", adapter->unit); adapter->link_active = 0; } } return; } /********************************************************************* * * This routine disables all traffic on the adapter by issuing a * global reset on the MAC and deallocates TX/RX buffers. * **********************************************************************/ static void ixgb_stop(void *arg) { struct ifnet *ifp; struct adapter *adapter = arg; ifp = adapter->ifp; IXGB_LOCK_ASSERT(adapter); INIT_DEBUGOUT("ixgb_stop: begin\n"); ixgb_disable_intr(adapter); adapter->hw.adapter_stopped = FALSE; ixgb_adapter_stop(&adapter->hw); callout_stop(&adapter->timer); ixgb_free_transmit_structures(adapter); ixgb_free_receive_structures(adapter); /* Tell the stack that the interface is no longer active */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); return; } /********************************************************************* * * Determine hardware revision. * **********************************************************************/ static void ixgb_identify_hardware(struct adapter * adapter) { device_t dev = adapter->dev; /* Make sure our PCI config space has the necessary stuff set */ adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) && (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) { printf("ixgb%d: Memory Access and/or Bus Master bits were not set!\n", adapter->unit); adapter->hw.pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2); } /* Save off the information about this board */ adapter->hw.vendor_id = pci_get_vendor(dev); adapter->hw.device_id = pci_get_device(dev); adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); /* Set MacType, etc. based on this PCI info */ switch (adapter->hw.device_id) { case IXGB_DEVICE_ID_82597EX: case IXGB_DEVICE_ID_82597EX_SR: adapter->hw.mac_type = ixgb_82597; break; default: INIT_DEBUGOUT1("Unknown device if 0x%x", adapter->hw.device_id); printf("ixgb%d: unsupported device id 0x%x\n", adapter->unit, adapter->hw.device_id); } return; } static int ixgb_allocate_pci_resources(struct adapter * adapter) { int rid; device_t dev = adapter->dev; rid = IXGB_MMBA; adapter->res_memory = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); if (!(adapter->res_memory)) { printf("ixgb%d: Unable to allocate bus resource: memory\n", adapter->unit); return (ENXIO); } adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->res_memory); adapter->osdep.mem_bus_space_handle = rman_get_bushandle(adapter->res_memory); adapter->hw.hw_addr = (uint8_t *) & adapter->osdep.mem_bus_space_handle; rid = 0x0; adapter->res_interrupt = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (!(adapter->res_interrupt)) { printf("ixgb%d: Unable to allocate bus resource: interrupt\n", adapter->unit); return (ENXIO); } if (bus_setup_intr(dev, adapter->res_interrupt, INTR_TYPE_NET | INTR_MPSAFE, NULL, (void (*) (void *))ixgb_intr, adapter, &adapter->int_handler_tag)) { printf("ixgb%d: Error registering interrupt handler!\n", adapter->unit); return (ENXIO); } adapter->hw.back = &adapter->osdep; return (0); } static void ixgb_free_pci_resources(struct adapter * adapter) { device_t dev = adapter->dev; if (adapter->res_interrupt != NULL) { bus_teardown_intr(dev, adapter->res_interrupt, adapter->int_handler_tag); bus_release_resource(dev, SYS_RES_IRQ, 0, adapter->res_interrupt); } if (adapter->res_memory != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, IXGB_MMBA, adapter->res_memory); } if (adapter->res_ioport != NULL) { bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid, adapter->res_ioport); } return; } /********************************************************************* * * Initialize the hardware to a configuration as specified by the * adapter structure. The controller is reset, the EEPROM is * verified, the MAC address is set, then the shared initialization * routines are called. * **********************************************************************/ static int ixgb_hardware_init(struct adapter * adapter) { /* Issue a global reset */ adapter->hw.adapter_stopped = FALSE; ixgb_adapter_stop(&adapter->hw); /* Make sure we have a good EEPROM before we read from it */ if (!ixgb_validate_eeprom_checksum(&adapter->hw)) { printf("ixgb%d: The EEPROM Checksum Is Not Valid\n", adapter->unit); return (EIO); } if (!ixgb_init_hw(&adapter->hw)) { printf("ixgb%d: Hardware Initialization Failed", adapter->unit); return (EIO); } return (0); } /********************************************************************* * * Setup networking device structure and register an interface. * **********************************************************************/ static void ixgb_setup_interface(device_t dev, struct adapter * adapter) { struct ifnet *ifp; INIT_DEBUGOUT("ixgb_setup_interface: begin"); ifp = adapter->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) panic("%s: can not if_alloc()\n", device_get_nameunit(dev)); #if __FreeBSD_version >= 502000 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); #else ifp->if_unit = adapter->unit; ifp->if_name = "ixgb"; #endif ifp->if_mtu = ETHERMTU; ifp->if_baudrate = 1000000000; ifp->if_init = ixgb_init; ifp->if_softc = adapter; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = ixgb_ioctl; ifp->if_start = ixgb_start; ifp->if_watchdog = ixgb_watchdog; ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1; #if __FreeBSD_version < 500000 ether_ifattach(ifp, ETHER_BPF_SUPPORTED); #else ether_ifattach(ifp, adapter->hw.curr_mac_addr); #endif ifp->if_capabilities = IFCAP_HWCSUM; /* * Tell the upper layer(s) we support long frames. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); #if __FreeBSD_version >= 500000 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; #endif ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* * Specify the media types supported by this adapter and register * callbacks to update media and link information */ ifmedia_init(&adapter->media, IFM_IMASK, ixgb_media_change, ixgb_media_status); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); return; } /******************************************************************** * Manage DMA'able memory. *******************************************************************/ static void ixgb_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error) { if (error) return; *(bus_addr_t *) arg = segs->ds_addr; return; } static int ixgb_dma_malloc(struct adapter * adapter, bus_size_t size, struct ixgb_dma_alloc * dma, int mapflags) { int r; r = bus_dma_tag_create(NULL, /* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ #if __FreeBSD_version >= 502000 NULL, /* lockfunc */ NULL, /* lockfuncarg */ #endif &dma->dma_tag); if (r != 0) { printf("ixgb%d: ixgb_dma_malloc: bus_dma_tag_create failed; " "error %u\n", adapter->unit, r); goto fail_0; } r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, BUS_DMA_NOWAIT, &dma->dma_map); if (r != 0) { printf("ixgb%d: ixgb_dma_malloc: bus_dmamem_alloc failed; " "error %u\n", adapter->unit, r); goto fail_1; } r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, ixgb_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); if (r != 0) { printf("ixgb%d: ixgb_dma_malloc: bus_dmamap_load failed; " "error %u\n", adapter->unit, r); goto fail_2; } dma->dma_size = size; return (0); fail_2: bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); fail_1: bus_dma_tag_destroy(dma->dma_tag); fail_0: dma->dma_map = NULL; dma->dma_tag = NULL; return (r); } static void ixgb_dma_free(struct adapter * adapter, struct ixgb_dma_alloc * dma) { bus_dmamap_unload(dma->dma_tag, dma->dma_map); bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); } /********************************************************************* * * Allocate memory for tx_buffer structures. The tx_buffer stores all * the information needed to transmit a packet on the wire. * **********************************************************************/ static int ixgb_allocate_transmit_structures(struct adapter * adapter) { if (!(adapter->tx_buffer_area = (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) * adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { printf("ixgb%d: Unable to allocate tx_buffer memory\n", adapter->unit); return ENOMEM; } bzero(adapter->tx_buffer_area, sizeof(struct ixgb_buffer) * adapter->num_tx_desc); return 0; } /********************************************************************* * * Allocate and initialize transmit structures. * **********************************************************************/ static int ixgb_setup_transmit_structures(struct adapter * adapter) { /* * Setup DMA descriptor areas. */ if (bus_dma_tag_create(NULL, /* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES * IXGB_MAX_SCATTER, /* maxsize */ IXGB_MAX_SCATTER, /* nsegments */ MCLBYTES, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ #if __FreeBSD_version >= 502000 NULL, /* lockfunc */ NULL, /* lockfuncarg */ #endif &adapter->txtag)) { printf("ixgb%d: Unable to allocate TX DMA tag\n", adapter->unit); return (ENOMEM); } if (ixgb_allocate_transmit_structures(adapter)) return ENOMEM; bzero((void *)adapter->tx_desc_base, (sizeof(struct ixgb_tx_desc)) * adapter->num_tx_desc); adapter->next_avail_tx_desc = 0; adapter->oldest_used_tx_desc = 0; /* Set number of descriptors available */ adapter->num_tx_desc_avail = adapter->num_tx_desc; /* Set checksum context */ adapter->active_checksum_context = OFFLOAD_NONE; return 0; } /********************************************************************* * * Enable transmit unit. * **********************************************************************/ static void ixgb_initialize_transmit_unit(struct adapter * adapter) { u_int32_t reg_tctl; u_int64_t tdba = adapter->txdma.dma_paddr; /* Setup the Base and Length of the Tx Descriptor Ring */ IXGB_WRITE_REG(&adapter->hw, TDBAL, (tdba & 0x00000000ffffffffULL)); IXGB_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32)); IXGB_WRITE_REG(&adapter->hw, TDLEN, adapter->num_tx_desc * sizeof(struct ixgb_tx_desc)); /* Setup the HW Tx Head and Tail descriptor pointers */ IXGB_WRITE_REG(&adapter->hw, TDH, 0); IXGB_WRITE_REG(&adapter->hw, TDT, 0); HW_DEBUGOUT2("Base = %x, Length = %x\n", IXGB_READ_REG(&adapter->hw, TDBAL), IXGB_READ_REG(&adapter->hw, TDLEN)); IXGB_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay); /* Program the Transmit Control Register */ reg_tctl = IXGB_READ_REG(&adapter->hw, TCTL); reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE; IXGB_WRITE_REG(&adapter->hw, TCTL, reg_tctl); /* Setup Transmit Descriptor Settings for this adapter */ adapter->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS; if (adapter->tx_int_delay > 0) adapter->txd_cmd |= IXGB_TX_DESC_CMD_IDE; return; } /********************************************************************* * * Free all transmit related data structures. * **********************************************************************/ static void ixgb_free_transmit_structures(struct adapter * adapter) { struct ixgb_buffer *tx_buffer; int i; INIT_DEBUGOUT("free_transmit_structures: begin"); if (adapter->tx_buffer_area != NULL) { tx_buffer = adapter->tx_buffer_area; for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { if (tx_buffer->m_head != NULL) { bus_dmamap_unload(adapter->txtag, tx_buffer->map); bus_dmamap_destroy(adapter->txtag, tx_buffer->map); m_freem(tx_buffer->m_head); } tx_buffer->m_head = NULL; } } if (adapter->tx_buffer_area != NULL) { free(adapter->tx_buffer_area, M_DEVBUF); adapter->tx_buffer_area = NULL; } if (adapter->txtag != NULL) { bus_dma_tag_destroy(adapter->txtag); adapter->txtag = NULL; } return; } /********************************************************************* * * The offload context needs to be set when we transfer the first * packet of a particular protocol (TCP/UDP). We change the * context only if the protocol type changes. * **********************************************************************/ static void ixgb_transmit_checksum_setup(struct adapter * adapter, struct mbuf * mp, u_int8_t * txd_popts) { struct ixgb_context_desc *TXD; struct ixgb_buffer *tx_buffer; int curr_txd; if (mp->m_pkthdr.csum_flags) { if (mp->m_pkthdr.csum_flags & CSUM_TCP) { *txd_popts = IXGB_TX_DESC_POPTS_TXSM; if (adapter->active_checksum_context == OFFLOAD_TCP_IP) return; else adapter->active_checksum_context = OFFLOAD_TCP_IP; } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) { *txd_popts = IXGB_TX_DESC_POPTS_TXSM; if (adapter->active_checksum_context == OFFLOAD_UDP_IP) return; else adapter->active_checksum_context = OFFLOAD_UDP_IP; } else { *txd_popts = 0; return; } } else { *txd_popts = 0; return; } /* * If we reach this point, the checksum offload context needs to be * reset. */ curr_txd = adapter->next_avail_tx_desc; tx_buffer = &adapter->tx_buffer_area[curr_txd]; TXD = (struct ixgb_context_desc *) & adapter->tx_desc_base[curr_txd]; TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip); TXD->tucse = 0; TXD->mss = 0; if (adapter->active_checksum_context == OFFLOAD_TCP_IP) { TXD->tucso = ENET_HEADER_SIZE + sizeof(struct ip) + offsetof(struct tcphdr, th_sum); } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) { TXD->tucso = ENET_HEADER_SIZE + sizeof(struct ip) + offsetof(struct udphdr, uh_sum); } TXD->cmd_type_len = IXGB_CONTEXT_DESC_CMD_TCP | IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE; tx_buffer->m_head = NULL; if (++curr_txd == adapter->num_tx_desc) curr_txd = 0; adapter->num_tx_desc_avail--; adapter->next_avail_tx_desc = curr_txd; return; } /********************************************************************** * * Examine each tx_buffer in the used queue. If the hardware is done * processing the packet then free associated resources. The * tx_buffer is put back on the free queue. * **********************************************************************/ static void ixgb_clean_transmit_interrupts(struct adapter * adapter) { int i, num_avail; struct ixgb_buffer *tx_buffer; struct ixgb_tx_desc *tx_desc; IXGB_LOCK_ASSERT(adapter); if (adapter->num_tx_desc_avail == adapter->num_tx_desc) return; #ifdef _SV_ adapter->clean_tx_interrupts++; #endif num_avail = adapter->num_tx_desc_avail; i = adapter->oldest_used_tx_desc; tx_buffer = &adapter->tx_buffer_area[i]; tx_desc = &adapter->tx_desc_base[i]; while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) { tx_desc->status = 0; num_avail++; if (tx_buffer->m_head) { bus_dmamap_sync(adapter->txtag, tx_buffer->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(adapter->txtag, tx_buffer->map); bus_dmamap_destroy(adapter->txtag, tx_buffer->map); m_freem(tx_buffer->m_head); tx_buffer->m_head = NULL; } if (++i == adapter->num_tx_desc) i = 0; tx_buffer = &adapter->tx_buffer_area[i]; tx_desc = &adapter->tx_desc_base[i]; } adapter->oldest_used_tx_desc = i; /* * If we have enough room, clear IFF_DRV_OACTIVE to tell the stack that * it is OK to send packets. If there are no pending descriptors, * clear the timeout. Otherwise, if some descriptors have been freed, * restart the timeout. */ if (num_avail > IXGB_TX_CLEANUP_THRESHOLD) { struct ifnet *ifp = adapter->ifp; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (num_avail == adapter->num_tx_desc) ifp->if_timer = 0; else if (num_avail == adapter->num_tx_desc_avail) ifp->if_timer = IXGB_TX_TIMEOUT; } adapter->num_tx_desc_avail = num_avail; return; } /********************************************************************* * * Get a buffer from system mbuf buffer pool. * **********************************************************************/ static int ixgb_get_buf(int i, struct adapter * adapter, struct mbuf * nmp) { register struct mbuf *mp = nmp; struct ixgb_buffer *rx_buffer; struct ifnet *ifp; bus_addr_t paddr; int error; ifp = adapter->ifp; if (mp == NULL) { mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (mp == NULL) { adapter->mbuf_alloc_failed++; return (ENOBUFS); } mp->m_len = mp->m_pkthdr.len = MCLBYTES; } else { mp->m_len = mp->m_pkthdr.len = MCLBYTES; mp->m_data = mp->m_ext.ext_buf; mp->m_next = NULL; } if (ifp->if_mtu <= ETHERMTU) { m_adj(mp, ETHER_ALIGN); } rx_buffer = &adapter->rx_buffer_area[i]; /* * Using memory from the mbuf cluster pool, invoke the bus_dma * machinery to arrange the memory mapping. */ error = bus_dmamap_load(adapter->rxtag, rx_buffer->map, mtod(mp, void *), mp->m_len, ixgb_dmamap_cb, &paddr, 0); if (error) { m_free(mp); return (error); } rx_buffer->m_head = mp; adapter->rx_desc_base[i].buff_addr = htole64(paddr); bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD); return (0); } /********************************************************************* * * Allocate memory for rx_buffer structures. Since we use one * rx_buffer per received packet, the maximum number of rx_buffer's * that we'll need is equal to the number of receive descriptors * that we've allocated. * **********************************************************************/ static int ixgb_allocate_receive_structures(struct adapter * adapter) { int i, error; struct ixgb_buffer *rx_buffer; if (!(adapter->rx_buffer_area = (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) * adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { printf("ixgb%d: Unable to allocate rx_buffer memory\n", adapter->unit); return (ENOMEM); } bzero(adapter->rx_buffer_area, sizeof(struct ixgb_buffer) * adapter->num_rx_desc); error = bus_dma_tag_create(NULL, /* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, /* maxsize */ 1, /* nsegments */ MCLBYTES, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ #if __FreeBSD_version >= 502000 NULL, /* lockfunc */ NULL, /* lockfuncarg */ #endif &adapter->rxtag); if (error != 0) { printf("ixgb%d: ixgb_allocate_receive_structures: " "bus_dma_tag_create failed; error %u\n", adapter->unit, error); goto fail_0; } rx_buffer = adapter->rx_buffer_area; for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT, &rx_buffer->map); if (error != 0) { printf("ixgb%d: ixgb_allocate_receive_structures: " "bus_dmamap_create failed; error %u\n", adapter->unit, error); goto fail_1; } } for (i = 0; i < adapter->num_rx_desc; i++) { if (ixgb_get_buf(i, adapter, NULL) == ENOBUFS) { adapter->rx_buffer_area[i].m_head = NULL; adapter->rx_desc_base[i].buff_addr = 0; return (ENOBUFS); } } return (0); fail_1: bus_dma_tag_destroy(adapter->rxtag); fail_0: adapter->rxtag = NULL; free(adapter->rx_buffer_area, M_DEVBUF); adapter->rx_buffer_area = NULL; return (error); } /********************************************************************* * * Allocate and initialize receive structures. * **********************************************************************/ static int ixgb_setup_receive_structures(struct adapter * adapter) { bzero((void *)adapter->rx_desc_base, (sizeof(struct ixgb_rx_desc)) * adapter->num_rx_desc); if (ixgb_allocate_receive_structures(adapter)) return ENOMEM; /* Setup our descriptor pointers */ adapter->next_rx_desc_to_check = 0; adapter->next_rx_desc_to_use = 0; return (0); } /********************************************************************* * * Enable receive unit. * **********************************************************************/ static void ixgb_initialize_receive_unit(struct adapter * adapter) { u_int32_t reg_rctl; u_int32_t reg_rxcsum; u_int32_t reg_rxdctl; struct ifnet *ifp; u_int64_t rdba = adapter->rxdma.dma_paddr; ifp = adapter->ifp; /* * Make sure receives are disabled while setting up the descriptor * ring */ reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN); /* Set the Receive Delay Timer Register */ IXGB_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay); /* Setup the Base and Length of the Rx Descriptor Ring */ IXGB_WRITE_REG(&adapter->hw, RDBAL, (rdba & 0x00000000ffffffffULL)); IXGB_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32)); IXGB_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc * sizeof(struct ixgb_rx_desc)); /* Setup the HW Rx Head and Tail Descriptor Pointers */ IXGB_WRITE_REG(&adapter->hw, RDH, 0); IXGB_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1); reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT | RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT | RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT; IXGB_WRITE_REG(&adapter->hw, RXDCTL, reg_rxdctl); adapter->raidc = 1; if (adapter->raidc) { uint32_t raidc; uint8_t poll_threshold; #define IXGB_RAIDC_POLL_DEFAULT 120 poll_threshold = ((adapter->num_rx_desc - 1) >> 3); poll_threshold >>= 1; poll_threshold &= 0x3F; raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE | (IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) | (adapter->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) | poll_threshold; IXGB_WRITE_REG(&adapter->hw, RAIDC, raidc); } /* Enable Receive Checksum Offload for TCP and UDP ? */ if (ifp->if_capenable & IFCAP_RXCSUM) { reg_rxcsum = IXGB_READ_REG(&adapter->hw, RXCSUM); reg_rxcsum |= IXGB_RXCSUM_TUOFL; IXGB_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum); } /* Setup the Receive Control Register */ reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT); reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC | IXGB_RCTL_CFF | (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT); switch (adapter->rx_buffer_len) { default: case IXGB_RXBUFFER_2048: reg_rctl |= IXGB_RCTL_BSIZE_2048; break; case IXGB_RXBUFFER_4096: reg_rctl |= IXGB_RCTL_BSIZE_4096; break; case IXGB_RXBUFFER_8192: reg_rctl |= IXGB_RCTL_BSIZE_8192; break; case IXGB_RXBUFFER_16384: reg_rctl |= IXGB_RCTL_BSIZE_16384; break; } reg_rctl |= IXGB_RCTL_RXEN; /* Enable Receives */ IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); return; } /********************************************************************* * * Free receive related data structures. * **********************************************************************/ static void ixgb_free_receive_structures(struct adapter * adapter) { struct ixgb_buffer *rx_buffer; int i; INIT_DEBUGOUT("free_receive_structures: begin"); if (adapter->rx_buffer_area != NULL) { rx_buffer = adapter->rx_buffer_area; for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { if (rx_buffer->map != NULL) { bus_dmamap_unload(adapter->rxtag, rx_buffer->map); bus_dmamap_destroy(adapter->rxtag, rx_buffer->map); } if (rx_buffer->m_head != NULL) m_freem(rx_buffer->m_head); rx_buffer->m_head = NULL; } } if (adapter->rx_buffer_area != NULL) { free(adapter->rx_buffer_area, M_DEVBUF); adapter->rx_buffer_area = NULL; } if (adapter->rxtag != NULL) { bus_dma_tag_destroy(adapter->rxtag); adapter->rxtag = NULL; } return; } /********************************************************************* * * This routine executes in interrupt context. It replenishes * the mbufs in the descriptor and sends data which has been * dma'ed into host memory to upper layer. * * We loop at most count times if count is > 0, or until done if * count < 0. * *********************************************************************/ -static void +static int ixgb_process_receive_interrupts(struct adapter * adapter, int count) { struct ifnet *ifp; struct mbuf *mp; #if __FreeBSD_version < 500000 struct ether_header *eh; #endif int eop = 0; int len; u_int8_t accept_frame = 0; int i; int next_to_use = 0; int eop_desc; + int rx_npkts = 0; /* Pointer to the receive descriptor being examined. */ struct ixgb_rx_desc *current_desc; IXGB_LOCK_ASSERT(adapter); ifp = adapter->ifp; i = adapter->next_rx_desc_to_check; next_to_use = adapter->next_rx_desc_to_use; eop_desc = adapter->next_rx_desc_to_check; current_desc = &adapter->rx_desc_base[i]; if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD)) { #ifdef _SV_ adapter->no_pkts_avail++; #endif - return; + return (rx_npkts); } while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) && (count != 0)) { mp = adapter->rx_buffer_area[i].m_head; bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map, BUS_DMASYNC_POSTREAD); accept_frame = 1; if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) { count--; eop = 1; } else { eop = 0; } len = current_desc->length; if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE)) { accept_frame = 0; } if (accept_frame) { /* Assign correct length to the current fragment */ mp->m_len = len; if (adapter->fmp == NULL) { mp->m_pkthdr.len = len; adapter->fmp = mp; /* Store the first mbuf */ adapter->lmp = mp; } else { /* Chain mbuf's together */ mp->m_flags &= ~M_PKTHDR; adapter->lmp->m_next = mp; adapter->lmp = adapter->lmp->m_next; adapter->fmp->m_pkthdr.len += len; } if (eop) { eop_desc = i; adapter->fmp->m_pkthdr.rcvif = ifp; #if __FreeBSD_version < 500000 eh = mtod(adapter->fmp, struct ether_header *); /* Remove ethernet header from mbuf */ m_adj(adapter->fmp, sizeof(struct ether_header)); ixgb_receive_checksum(adapter, current_desc, adapter->fmp); if (current_desc->status & IXGB_RX_DESC_STATUS_VP) VLAN_INPUT_TAG(eh, adapter->fmp, current_desc->special); else ether_input(ifp, eh, adapter->fmp); #else ixgb_receive_checksum(adapter, current_desc, adapter->fmp); #if __FreeBSD_version < 700000 if (current_desc->status & IXGB_RX_DESC_STATUS_VP) VLAN_INPUT_TAG(ifp, adapter->fmp, current_desc->special); #else if (current_desc->status & IXGB_RX_DESC_STATUS_VP) { adapter->fmp->m_pkthdr.ether_vtag = current_desc->special; adapter->fmp->m_flags |= M_VLANTAG; } #endif if (adapter->fmp != NULL) { IXGB_UNLOCK(adapter); (*ifp->if_input) (ifp, adapter->fmp); IXGB_LOCK(adapter); + rx_npkts++; } #endif adapter->fmp = NULL; adapter->lmp = NULL; } adapter->rx_buffer_area[i].m_head = NULL; } else { adapter->dropped_pkts++; if (adapter->fmp != NULL) m_freem(adapter->fmp); adapter->fmp = NULL; adapter->lmp = NULL; } /* Zero out the receive descriptors status */ current_desc->status = 0; /* Advance our pointers to the next descriptor */ if (++i == adapter->num_rx_desc) { i = 0; current_desc = adapter->rx_desc_base; } else current_desc++; } adapter->next_rx_desc_to_check = i; if (--i < 0) i = (adapter->num_rx_desc - 1); /* * 82597EX: Workaround for redundent write back in receive descriptor ring (causes * memory corruption). Avoid using and re-submitting the most recently received RX * descriptor back to hardware. * * if(Last written back descriptor == EOP bit set descriptor) * then avoid re-submitting the most recently received RX descriptor * back to hardware. * if(Last written back descriptor != EOP bit set descriptor) * then avoid re-submitting the most recently received RX descriptors * till last EOP bit set descriptor. */ if (eop_desc != i) { if (++eop_desc == adapter->num_rx_desc) eop_desc = 0; i = eop_desc; } /* Replenish the descriptors with new mbufs till last EOP bit set descriptor */ while (next_to_use != i) { current_desc = &adapter->rx_desc_base[next_to_use]; if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) { mp = adapter->rx_buffer_area[next_to_use].m_head; ixgb_get_buf(next_to_use, adapter, mp); } else { if (ixgb_get_buf(next_to_use, adapter, NULL) == ENOBUFS) break; } /* Advance our pointers to the next descriptor */ if (++next_to_use == adapter->num_rx_desc) { next_to_use = 0; current_desc = adapter->rx_desc_base; } else current_desc++; } adapter->next_rx_desc_to_use = next_to_use; if (--next_to_use < 0) next_to_use = (adapter->num_rx_desc - 1); /* Advance the IXGB's Receive Queue #0 "Tail Pointer" */ IXGB_WRITE_REG(&adapter->hw, RDT, next_to_use); - return; + return (rx_npkts); } /********************************************************************* * * Verify that the hardware indicated that the checksum is valid. * Inform the stack about the status of checksum so that stack * doesn't spend time verifying the checksum. * *********************************************************************/ static void ixgb_receive_checksum(struct adapter * adapter, struct ixgb_rx_desc * rx_desc, struct mbuf * mp) { if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) { mp->m_pkthdr.csum_flags = 0; return; } if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) { /* Did it pass? */ if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) { /* IP Checksum Good */ mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; } else { mp->m_pkthdr.csum_flags = 0; } } if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) { /* Did it pass? */ if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) { mp->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); mp->m_pkthdr.csum_data = htons(0xffff); } } return; } static void ixgb_enable_vlans(struct adapter * adapter) { uint32_t ctrl; ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); ctrl |= IXGB_CTRL0_VME; IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl); return; } static void ixgb_enable_intr(struct adapter * adapter) { IXGB_WRITE_REG(&adapter->hw, IMS, (IXGB_INT_RXT0 | IXGB_INT_TXDW | IXGB_INT_RXDMT0 | IXGB_INT_LSC | IXGB_INT_RXO)); return; } static void ixgb_disable_intr(struct adapter * adapter) { IXGB_WRITE_REG(&adapter->hw, IMC, ~0); return; } void ixgb_write_pci_cfg(struct ixgb_hw * hw, uint32_t reg, uint16_t * value) { pci_write_config(((struct ixgb_osdep *) hw->back)->dev, reg, *value, 2); } /********************************************************************** * * Update the board statistics counters. * **********************************************************************/ static void ixgb_update_stats_counters(struct adapter * adapter) { struct ifnet *ifp; adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS); adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL); adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH); adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL); adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH); adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL); adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH); adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL); adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH); adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC); adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC); adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC); adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC); adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC); adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC); adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC); adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC); adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL); adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH); adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL); adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH); adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC); adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC); adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC); adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL); adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH); adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL); adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH); adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL); adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH); adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL); adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH); adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C); adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL); adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH); adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL); adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH); adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL); adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH); adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL); adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH); adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL); adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH); adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC); adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC); adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC); adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL); adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH); adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL); adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH); adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL); adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH); adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC); adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC); adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC); adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC); adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC); adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC); adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC); ifp = adapter->ifp; /* Fill out the OS statistics structure */ ifp->if_ipackets = adapter->stats.gprcl; ifp->if_opackets = adapter->stats.gptcl; ifp->if_ibytes = adapter->stats.gorcl; ifp->if_obytes = adapter->stats.gotcl; ifp->if_imcasts = adapter->stats.mprcl; ifp->if_collisions = 0; /* Rx Errors */ ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.crcerrs + adapter->stats.rnbc + adapter->stats.mpc + adapter->stats.rlec; } /********************************************************************** * * This routine is called only when ixgb_display_debug_stats is enabled. * This routine provides a way to take a look at important statistics * maintained by the driver and hardware. * **********************************************************************/ static void ixgb_print_hw_stats(struct adapter * adapter) { char buf_speed[100], buf_type[100]; ixgb_bus_speed bus_speed; ixgb_bus_type bus_type; int unit = adapter->unit; #ifdef _SV_ printf("ixgb%d: Packets not Avail = %ld\n", unit, adapter->no_pkts_avail); printf("ixgb%d: CleanTxInterrupts = %ld\n", unit, adapter->clean_tx_interrupts); printf("ixgb%d: ICR RXDMT0 = %lld\n", unit, (long long)adapter->sv_stats.icr_rxdmt0); printf("ixgb%d: ICR RXO = %lld\n", unit, (long long)adapter->sv_stats.icr_rxo); printf("ixgb%d: ICR RXT0 = %lld\n", unit, (long long)adapter->sv_stats.icr_rxt0); printf("ixgb%d: ICR TXDW = %lld\n", unit, (long long)adapter->sv_stats.icr_TXDW); #endif /* _SV_ */ bus_speed = adapter->hw.bus.speed; bus_type = adapter->hw.bus.type; sprintf(buf_speed, bus_speed == ixgb_bus_speed_33 ? "33MHz" : bus_speed == ixgb_bus_speed_66 ? "66MHz" : bus_speed == ixgb_bus_speed_100 ? "100MHz" : bus_speed == ixgb_bus_speed_133 ? "133MHz" : "UNKNOWN"); printf("ixgb%d: PCI_Bus_Speed = %s\n", unit, buf_speed); sprintf(buf_type, bus_type == ixgb_bus_type_pci ? "PCI" : bus_type == ixgb_bus_type_pcix ? "PCI-X" : "UNKNOWN"); printf("ixgb%d: PCI_Bus_Type = %s\n", unit, buf_type); printf("ixgb%d: Tx Descriptors not Avail1 = %ld\n", unit, adapter->no_tx_desc_avail1); printf("ixgb%d: Tx Descriptors not Avail2 = %ld\n", unit, adapter->no_tx_desc_avail2); printf("ixgb%d: Std Mbuf Failed = %ld\n", unit, adapter->mbuf_alloc_failed); printf("ixgb%d: Std Cluster Failed = %ld\n", unit, adapter->mbuf_cluster_failed); printf("ixgb%d: Defer count = %lld\n", unit, (long long)adapter->stats.dc); printf("ixgb%d: Missed Packets = %lld\n", unit, (long long)adapter->stats.mpc); printf("ixgb%d: Receive No Buffers = %lld\n", unit, (long long)adapter->stats.rnbc); printf("ixgb%d: Receive length errors = %lld\n", unit, (long long)adapter->stats.rlec); printf("ixgb%d: Crc errors = %lld\n", unit, (long long)adapter->stats.crcerrs); printf("ixgb%d: Driver dropped packets = %ld\n", unit, adapter->dropped_pkts); printf("ixgb%d: XON Rcvd = %lld\n", unit, (long long)adapter->stats.xonrxc); printf("ixgb%d: XON Xmtd = %lld\n", unit, (long long)adapter->stats.xontxc); printf("ixgb%d: XOFF Rcvd = %lld\n", unit, (long long)adapter->stats.xoffrxc); printf("ixgb%d: XOFF Xmtd = %lld\n", unit, (long long)adapter->stats.xofftxc); printf("ixgb%d: Good Packets Rcvd = %lld\n", unit, (long long)adapter->stats.gprcl); printf("ixgb%d: Good Packets Xmtd = %lld\n", unit, (long long)adapter->stats.gptcl); printf("ixgb%d: Jumbo frames recvd = %lld\n", unit, (long long)adapter->stats.jprcl); printf("ixgb%d: Jumbo frames Xmtd = %lld\n", unit, (long long)adapter->stats.jptcl); return; } static int ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS) { int error; int result; struct adapter *adapter; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { adapter = (struct adapter *) arg1; ixgb_print_hw_stats(adapter); } return error; } Index: head/sys/dev/lmc/if_lmc.c =================================================================== --- head/sys/dev/lmc/if_lmc.c (revision 193095) +++ head/sys/dev/lmc/if_lmc.c (revision 193096) @@ -1,7041 +1,7042 @@ /* * $FreeBSD$ * * Copyright (c) 2002-2004 David Boggs. * All rights reserved. * * BSD License: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * GNU General Public License: * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * Description: * * This is an open-source Unix device driver for PCI-bus WAN interface cards. * It sends and receives packets in HDLC frames over synchronous links. * A generic PC plus Unix plus some SBE/LMC cards makes an OPEN router. * This driver works with FreeBSD, NetBSD, OpenBSD, BSD/OS and Linux. * It has been tested on i386 (32-bit little-end), Sparc (64-bit big-end), * and Alpha (64-bit little-end) architectures. * * History and Authors: * * Ron Crane had the neat idea to use a Fast Ethernet chip as a PCI * interface and add an Ethernet-to-HDLC gate array to make a WAN card. * David Boggs designed the Ethernet-to-HDLC gate arrays and PC cards. * We did this at our company, LAN Media Corporation (LMC). * SBE Corp acquired LMC and continues to make the cards. * * Since the cards use Tulip Ethernet chips, we started with Matt Thomas' * ubiquitous "de" driver. Michael Graff stripped out the Ethernet stuff * and added HSSI stuff. Basil Gunn ported it to Solaris (lost) and * Rob Braun ported it to Linux. Andrew Stanley-Jones added support * for three more cards and wrote the first version of lmcconfig. * During 2002-5 David Boggs rewrote it and now feels responsible for it. * * Responsible Individual: * * Send bug reports and improvements to . */ #ifdef __FreeBSD__ # include /* OS version */ # define IFNET 1 # include "opt_inet.h" /* INET */ # include "opt_inet6.h" /* INET6 */ # include "opt_netgraph.h" /* NETGRAPH */ # ifdef HAVE_KERNEL_OPTION_HEADERS # include "opt_device_polling.h" /* DEVICE_POLLING */ # endif # ifndef INET # define INET 0 # endif # ifndef INET6 # define INET6 0 # endif # ifndef NETGRAPH # define NETGRAPH 0 # endif # define P2P 0 /* not in FreeBSD */ # if (__FreeBSD_version >= 500000) # define NSPPP 1 /* No count devices in FreeBSD 5 */ # include "opt_bpf.h" /* DEV_BPF */ # define NBPFILTER DEV_BPF # else /* FreeBSD-4 */ # include "sppp.h" /* NSPPP */ # include "bpf.h" /* NBPF */ # define NBPFILTER NBPF # endif # define GEN_HDLC 0 /* not in FreeBSD */ # # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # if (__FreeBSD_version >= 700000) # include # endif # if (__FreeBSD_version >= 500000) # include # include # else /* FreeBSD-4 */ # include # include # endif # if NETGRAPH # include # include # endif # if (INET || INET6) # include # include # endif # if NSPPP # include # endif # if NBPFILTER # include # endif /* and finally... */ # include #endif /*__FreeBSD__*/ #ifdef __NetBSD__ # include /* OS version */ # define IFNET 1 # include "opt_inet.h" /* INET6, INET */ # define NETGRAPH 0 /* not in NetBSD */ # include "sppp.h" /* NSPPP */ # define P2P 0 /* not in NetBSD */ # include "opt_altq_enabled.h" /* ALTQ */ # include "bpfilter.h" /* NBPFILTER */ # define GEN_HDLC 0 /* not in NetBSD */ # # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # if (__NetBSD_Version__ >= 106000000) # include # else # include # endif # if (INET || INET6) # include # include # endif # if NSPPP # if (__NetBSD_Version__ >= 106000000) # include # else # include # endif # endif # if NBPFILTER # include # endif /* and finally... */ # include "if_lmc.h" #endif /*__NetBSD__*/ #ifdef __OpenBSD__ # include /* OS version */ # define IFNET 1 /* -DINET is passed on the compiler command line */ /* -DINET6 is passed on the compiler command line */ # define NETGRAPH 0 /* not in OpenBSD */ # include "sppp.h" /* NSPPP */ # define P2P 0 /* not in OpenBSD */ /* -DALTQ is passed on the compiler command line */ # include "bpfilter.h" /* NBPFILTER */ # define GEN_HDLC 0 /* not in OpenBSD */ # # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # if (OpenBSD >= 200206) # include # else # include # endif # if (INET || INET6) # include # include # endif # if NSPPP # include # endif # if NBPFILTER # include # endif /* and finally... */ # include "if_lmc.h" #endif /*__OpenBSD__*/ #ifdef __bsdi__ # include /* OS version */ # define IFNET 1 /* -DINET is passed on the compiler command line */ /* -DINET6 is passed on the compiler command line */ # define NETGRAPH 0 /* not in BSD/OS */ # define NSPPP 0 /* not in BSD/OS */ /* -DPPP is passed on the compiler command line */ /* -DCISCO_HDLC is passed on the compiler command line */ /* -DFR is passed on the compiler command line */ # if (PPP || CISCO_HDLC || FR) # define P2P 1 # else # define P2P 0 # endif # define ALTQ 0 /* not in BSD/OS */ # include "bpfilter.h" /* NBPFILTER */ # define GEN_HDLC 0 /* not in BSD/OS */ # # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # include # if (INET || INET6) # include # include # endif # if P2P # include # include # endif # if NBPFILTER # include # endif /* and finally... */ # include "if_lmc.h" #endif /*__bsdi__*/ #ifdef __linux__ # include # if (CONFIG_HDLC || CONFIG_HDLC_MODULE) # define GEN_HDLC 1 # else # define GEN_HDLC 0 # endif # define IFNET 0 /* different in Linux */ # define NETGRAPH 0 /* not in Linux */ # define NSPPP 0 /* different in Linux */ # define P2P 0 /* not in Linux */ # define ALTQ 0 /* different in Linux */ # define NBPFILTER 0 /* different in Linux */ # # include # include # include # include # if GEN_HDLC # include # endif /* and finally... */ # include "if_lmc.h" #endif /* __linux__ */ /* The SROM is a generic 93C46 serial EEPROM (64 words by 16 bits). */ /* Data is set up before the RISING edge of CLK; CLK is parked low. */ static void shift_srom_bits(softc_t *sc, u_int32_t data, u_int32_t len) { u_int32_t csr = READ_CSR(TLP_SROM_MII); for (; len>0; len--) { /* MSB first */ if (data & (1<<(len-1))) csr |= TLP_SROM_DIN; /* DIN setup */ else csr &= ~TLP_SROM_DIN; /* DIN setup */ WRITE_CSR(TLP_SROM_MII, csr); csr |= TLP_SROM_CLK; /* CLK rising edge */ WRITE_CSR(TLP_SROM_MII, csr); csr &= ~TLP_SROM_CLK; /* CLK falling edge */ WRITE_CSR(TLP_SROM_MII, csr); } } /* Data is sampled on the RISING edge of CLK; CLK is parked low. */ static u_int16_t read_srom(softc_t *sc, u_int8_t addr) { int i; u_int32_t csr; u_int16_t data; /* Enable SROM access. */ csr = (TLP_SROM_SEL | TLP_SROM_RD | TLP_MII_MDOE); WRITE_CSR(TLP_SROM_MII, csr); /* CS rising edge prepares SROM for a new cycle. */ csr |= TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* assert CS */ shift_srom_bits(sc, 6, 4); /* issue read cmd */ shift_srom_bits(sc, addr, 6); /* issue address */ for (data=0, i=16; i>=0; i--) /* read ->17<- bits of data */ { /* MSB first */ csr = READ_CSR(TLP_SROM_MII); /* DOUT sampled */ data = (data<<1) | ((csr & TLP_SROM_DOUT) ? 1:0); csr |= TLP_SROM_CLK; /* CLK rising edge */ WRITE_CSR(TLP_SROM_MII, csr); csr &= ~TLP_SROM_CLK; /* CLK falling edge */ WRITE_CSR(TLP_SROM_MII, csr); } /* Disable SROM access. */ WRITE_CSR(TLP_SROM_MII, TLP_MII_MDOE); return data; } /* The SROM is formatted by the mfgr and should NOT be written! */ /* But lmcconfig can rewrite it in case it gets overwritten somehow. */ /* IOCTL SYSCALL: can sleep. */ static void write_srom(softc_t *sc, u_int8_t addr, u_int16_t data) { u_int32_t csr; int i; /* Enable SROM access. */ csr = (TLP_SROM_SEL | TLP_SROM_RD | TLP_MII_MDOE); WRITE_CSR(TLP_SROM_MII, csr); /* Issue write-enable command. */ csr |= TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* assert CS */ shift_srom_bits(sc, 4, 4); /* issue write enable cmd */ shift_srom_bits(sc, 63, 6); /* issue address */ csr &= ~TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* deassert CS */ /* Issue erase command. */ csr |= TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* assert CS */ shift_srom_bits(sc, 7, 4); /* issue erase cmd */ shift_srom_bits(sc, addr, 6); /* issue address */ csr &= ~TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* deassert CS */ /* Issue write command. */ csr |= TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* assert CS */ for (i=0; i<10; i++) /* 100 ms max wait */ if ((READ_CSR(TLP_SROM_MII) & TLP_SROM_DOUT)==0) SLEEP(10000); shift_srom_bits(sc, 5, 4); /* issue write cmd */ shift_srom_bits(sc, addr, 6); /* issue address */ shift_srom_bits(sc, data, 16); /* issue data */ csr &= ~TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* deassert CS */ /* Issue write-disable command. */ csr |= TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* assert CS */ for (i=0; i<10; i++) /* 100 ms max wait */ if ((READ_CSR(TLP_SROM_MII) & TLP_SROM_DOUT)==0) SLEEP(10000); shift_srom_bits(sc, 4, 4); /* issue write disable cmd */ shift_srom_bits(sc, 0, 6); /* issue address */ csr &= ~TLP_SROM_CS; WRITE_CSR(TLP_SROM_MII, csr); /* deassert CS */ /* Disable SROM access. */ WRITE_CSR(TLP_SROM_MII, TLP_MII_MDOE); } /* Not all boards have BIOS roms. */ /* The BIOS ROM is an AMD 29F010 1Mbit (128K by 8) EEPROM. */ static u_int8_t read_bios(softc_t *sc, u_int32_t addr) { u_int32_t srom_mii; /* Load the BIOS rom address register. */ WRITE_CSR(TLP_BIOS_ROM, addr); /* Enable the BIOS rom. */ srom_mii = TLP_BIOS_SEL | TLP_BIOS_RD | TLP_MII_MDOE; WRITE_CSR(TLP_SROM_MII, srom_mii); /* Wait at least 20 PCI cycles. */ DELAY(20); /* Read the BIOS rom data. */ srom_mii = READ_CSR(TLP_SROM_MII); /* Disable the BIOS rom. */ WRITE_CSR(TLP_SROM_MII, TLP_MII_MDOE); return (u_int8_t)srom_mii & 0xFF; } static void write_bios_phys(softc_t *sc, u_int32_t addr, u_int8_t data) { u_int32_t srom_mii; /* Load the BIOS rom address register. */ WRITE_CSR(TLP_BIOS_ROM, addr); /* Enable the BIOS rom. */ srom_mii = TLP_BIOS_SEL | TLP_BIOS_WR | TLP_MII_MDOE; /* Load the data into the data register. */ srom_mii = (srom_mii & 0xFFFFFF00) | (data & 0xFF); WRITE_CSR(TLP_SROM_MII, srom_mii); /* Wait at least 20 PCI cycles. */ DELAY(20); /* Disable the BIOS rom. */ WRITE_CSR(TLP_SROM_MII, TLP_MII_MDOE); } /* IOCTL SYSCALL: can sleep. */ static void write_bios(softc_t *sc, u_int32_t addr, u_int8_t data) { u_int8_t read_data; /* this sequence enables writing */ write_bios_phys(sc, 0x5555, 0xAA); write_bios_phys(sc, 0x2AAA, 0x55); write_bios_phys(sc, 0x5555, 0xA0); write_bios_phys(sc, addr, data); /* Wait for the write operation to complete. */ for (;;) /* interruptable syscall */ { for (;;) { read_data = read_bios(sc, addr); if ((read_data & 0x80) == (data & 0x80)) break; if (read_data & 0x20) { /* Data sheet says read it again. */ read_data = read_bios(sc, addr); if ((read_data & 0x80) == (data & 0x80)) break; if (DRIVER_DEBUG) printf("%s: write_bios() failed; rom addr=0x%x\n", NAME_UNIT, addr); return; } } read_data = read_bios(sc, addr); if (read_data == data) break; } } /* IOCTL SYSCALL: can sleep. */ static void erase_bios(softc_t *sc) { unsigned char read_data; /* This sequence enables erasing: */ write_bios_phys(sc, 0x5555, 0xAA); write_bios_phys(sc, 0x2AAA, 0x55); write_bios_phys(sc, 0x5555, 0x80); write_bios_phys(sc, 0x5555, 0xAA); write_bios_phys(sc, 0x2AAA, 0x55); write_bios_phys(sc, 0x5555, 0x10); /* Wait for the erase operation to complete. */ for (;;) /* interruptable syscall */ { for (;;) { read_data = read_bios(sc, 0); if (read_data & 0x80) break; if (read_data & 0x20) { /* Data sheet says read it again. */ read_data = read_bios(sc, 0); if (read_data & 0x80) break; if (DRIVER_DEBUG) printf("%s: erase_bios() failed\n", NAME_UNIT); return; } } read_data = read_bios(sc, 0); if (read_data == 0xFF) break; } } /* MDIO is 3-stated between tranactions. */ /* MDIO is set up before the RISING edge of MDC; MDC is parked low. */ static void shift_mii_bits(softc_t *sc, u_int32_t data, u_int32_t len) { u_int32_t csr = READ_CSR(TLP_SROM_MII); for (; len>0; len--) { /* MSB first */ if (data & (1<<(len-1))) csr |= TLP_MII_MDOUT; /* MDOUT setup */ else csr &= ~TLP_MII_MDOUT; /* MDOUT setup */ WRITE_CSR(TLP_SROM_MII, csr); csr |= TLP_MII_MDC; /* MDC rising edge */ WRITE_CSR(TLP_SROM_MII, csr); csr &= ~TLP_MII_MDC; /* MDC falling edge */ WRITE_CSR(TLP_SROM_MII, csr); } } /* The specification for the MII is IEEE Std 802.3 clause 22. */ /* MDIO is sampled on the RISING edge of MDC; MDC is parked low. */ static u_int16_t read_mii(softc_t *sc, u_int8_t regad) { int i; u_int32_t csr; u_int16_t data = 0; WRITE_CSR(TLP_SROM_MII, TLP_MII_MDOUT); shift_mii_bits(sc, 0xFFFFF, 20); /* preamble */ shift_mii_bits(sc, 0xFFFFF, 20); /* preamble */ shift_mii_bits(sc, 1, 2); /* start symbol */ shift_mii_bits(sc, 2, 2); /* read op */ shift_mii_bits(sc, 0, 5); /* phyad=0 */ shift_mii_bits(sc, regad, 5); /* regad */ csr = READ_CSR(TLP_SROM_MII); csr |= TLP_MII_MDOE; WRITE_CSR(TLP_SROM_MII, csr); shift_mii_bits(sc, 0, 2); /* turn-around */ for (i=15; i>=0; i--) /* data */ { /* MSB first */ csr = READ_CSR(TLP_SROM_MII); /* MDIN sampled */ data = (data<<1) | ((csr & TLP_MII_MDIN) ? 1:0); csr |= TLP_MII_MDC; /* MDC rising edge */ WRITE_CSR(TLP_SROM_MII, csr); csr &= ~TLP_MII_MDC; /* MDC falling edge */ WRITE_CSR(TLP_SROM_MII, csr); } return data; } static void write_mii(softc_t *sc, u_int8_t regad, u_int16_t data) { WRITE_CSR(TLP_SROM_MII, TLP_MII_MDOUT); shift_mii_bits(sc, 0xFFFFF, 20); /* preamble */ shift_mii_bits(sc, 0xFFFFF, 20); /* preamble */ shift_mii_bits(sc, 1, 2); /* start symbol */ shift_mii_bits(sc, 1, 2); /* write op */ shift_mii_bits(sc, 0, 5); /* phyad=0 */ shift_mii_bits(sc, regad, 5); /* regad */ shift_mii_bits(sc, 2, 2); /* turn-around */ shift_mii_bits(sc, data, 16); /* data */ WRITE_CSR(TLP_SROM_MII, TLP_MII_MDOE); if (regad == 16) sc->led_state = data; /* a small optimization */ } static void set_mii16_bits(softc_t *sc, u_int16_t bits) { u_int16_t mii16 = read_mii(sc, 16); mii16 |= bits; write_mii(sc, 16, mii16); } static void clr_mii16_bits(softc_t *sc, u_int16_t bits) { u_int16_t mii16 = read_mii(sc, 16); mii16 &= ~bits; write_mii(sc, 16, mii16); } static void set_mii17_bits(softc_t *sc, u_int16_t bits) { u_int16_t mii17 = read_mii(sc, 17); mii17 |= bits; write_mii(sc, 17, mii17); } static void clr_mii17_bits(softc_t *sc, u_int16_t bits) { u_int16_t mii17 = read_mii(sc, 17); mii17 &= ~bits; write_mii(sc, 17, mii17); } /* * Watchdog code is more readable if it refreshes LEDs * once a second whether they need it or not. * But MII refs take 150 uSecs each, so remember the last value * written to MII16 and avoid LED writes that do nothing. */ static void led_off(softc_t *sc, u_int16_t led) { if ((led & sc->led_state) == led) return; set_mii16_bits(sc, led); } static void led_on(softc_t *sc, u_int16_t led) { if ((led & sc->led_state) == 0) return; clr_mii16_bits(sc, led); } static void led_inv(softc_t *sc, u_int16_t led) { u_int16_t mii16 = read_mii(sc, 16); mii16 ^= led; write_mii(sc, 16, mii16); } /* * T1 & T3 framer registers are accessed through MII regs 17 & 18. * Write the address to MII reg 17 then R/W data through MII reg 18. * The hardware interface is an Intel-style 8-bit muxed A/D bus. */ static void write_framer(softc_t *sc, u_int16_t addr, u_int8_t data) { write_mii(sc, 17, addr); write_mii(sc, 18, data); } static u_int8_t read_framer(softc_t *sc, u_int16_t addr) { write_mii(sc, 17, addr); return (u_int8_t)read_mii(sc, 18); } /* Tulip's hardware implementation of General Purpose IO * (GPIO) pins makes life difficult for software. * Bits 7-0 in the Tulip GPIO CSR are used for two purposes * depending on the state of bit 8. * If bit 8 is 0 then bits 7-0 are "data" bits. * If bit 8 is 1 then bits 7-0 are "direction" bits. * If a direction bit is one, the data bit is an output. * The problem is that the direction bits are WRITE-ONLY. * Software must remember the direction bits in a shadow copy. * (sc->gpio_dir) in order to change some but not all of the bits. * All accesses to the Tulip GPIO register use these five procedures. */ static void make_gpio_input(softc_t *sc, u_int32_t bits) { sc->gpio_dir &= ~bits; WRITE_CSR(TLP_GPIO, TLP_GPIO_DIR | (sc->gpio_dir)); } static void make_gpio_output(softc_t *sc, u_int32_t bits) { sc->gpio_dir |= bits; WRITE_CSR(TLP_GPIO, TLP_GPIO_DIR | (sc->gpio_dir)); } static u_int32_t read_gpio(softc_t *sc) { return READ_CSR(TLP_GPIO); } static void set_gpio_bits(softc_t *sc, u_int32_t bits) { WRITE_CSR(TLP_GPIO, (read_gpio(sc) | bits) & 0xFF); } static void clr_gpio_bits(softc_t *sc, u_int32_t bits) { WRITE_CSR(TLP_GPIO, (read_gpio(sc) & ~bits) & 0xFF); } /* Reset ALL of the flip-flops in the gate array to zero. */ /* This does NOT change the gate array programming. */ /* Called during initialization so it must not sleep. */ static void reset_xilinx(softc_t *sc) { /* Drive RESET low to force initialization. */ clr_gpio_bits(sc, GPIO_RESET); make_gpio_output(sc, GPIO_RESET); /* Hold RESET low for more than 10 uSec. */ DELAY(50); /* Done with RESET; make it an input. */ make_gpio_input(sc, GPIO_RESET); } /* Load Xilinx gate array program from on-board rom. */ /* This changes the gate array programming. */ /* IOCTL SYSCALL: can sleep. */ static void load_xilinx_from_rom(softc_t *sc) { int i; /* Drive MODE low to load from ROM rather than GPIO. */ clr_gpio_bits(sc, GPIO_MODE); make_gpio_output(sc, GPIO_MODE); /* Drive DP & RESET low to force configuration. */ clr_gpio_bits(sc, GPIO_RESET | GPIO_DP); make_gpio_output(sc, GPIO_RESET | GPIO_DP); /* Hold RESET & DP low for more than 10 uSec. */ DELAY(50); /* Done with RESET & DP; make them inputs. */ make_gpio_input(sc, GPIO_DP | GPIO_RESET); /* BUSY-WAIT for Xilinx chip to configure itself from ROM bits. */ for (i=0; i<100; i++) /* 1 sec max delay */ if ((read_gpio(sc) & GPIO_DP) == 0) SLEEP(10000); /* Done with MODE; make it an input. */ make_gpio_input(sc, GPIO_MODE); } /* Load the Xilinx gate array program from userland bits. */ /* This changes the gate array programming. */ /* IOCTL SYSCALL: can sleep. */ static int load_xilinx_from_file(softc_t *sc, char *addr, u_int32_t len) { char *data; int i, j, error; /* Get some pages to hold the Xilinx bits; biggest file is < 6 KB. */ if (len > 8192) return EFBIG; /* too big */ data = malloc(len, M_DEVBUF, M_WAITOK); if (data == NULL) return ENOMEM; /* Copy the Xilinx bits from userland. */ if ((error = copyin(addr, data, len))) { free(data, M_DEVBUF); return error; } /* Drive MODE high to load from GPIO rather than ROM. */ set_gpio_bits(sc, GPIO_MODE); make_gpio_output(sc, GPIO_MODE); /* Drive DP & RESET low to force configuration. */ clr_gpio_bits(sc, GPIO_RESET | GPIO_DP); make_gpio_output(sc, GPIO_RESET | GPIO_DP); /* Hold RESET & DP low for more than 10 uSec. */ DELAY(50); /* Done with RESET & DP; make them inputs. */ make_gpio_input(sc, GPIO_RESET | GPIO_DP); /* BUSY-WAIT for Xilinx chip to clear its config memory. */ make_gpio_input(sc, GPIO_INIT); for (i=0; i<10000; i++) /* 1 sec max delay */ if ((read_gpio(sc) & GPIO_INIT)==0) SLEEP(10000); /* Configure CLK and DATA as outputs. */ set_gpio_bits(sc, GPIO_CLK); /* park CLK high */ make_gpio_output(sc, GPIO_CLK | GPIO_DATA); /* Write bits to Xilinx; CLK is parked HIGH. */ /* DATA is set up before the RISING edge of CLK. */ for (i=0; istatus.card_type == TLP_CSID_SSI) { if (synth->prescale == 9) /* divide by 512 */ set_mii17_bits(sc, MII17_SSI_PRESCALE); else /* divide by 32 */ clr_mii17_bits(sc, MII17_SSI_PRESCALE); } clr_gpio_bits(sc, GPIO_DATA | GPIO_CLK); make_gpio_output(sc, GPIO_DATA | GPIO_CLK); /* SYNTH is a low-true chip enable for the AV9110 chip. */ set_gpio_bits(sc, GPIO_SSI_SYNTH); make_gpio_output(sc, GPIO_SSI_SYNTH); clr_gpio_bits(sc, GPIO_SSI_SYNTH); /* Serially shift the command into the AV9110 chip. */ shift_synth_bits(sc, synth->n, 7); shift_synth_bits(sc, synth->m, 7); shift_synth_bits(sc, synth->v, 1); shift_synth_bits(sc, synth->x, 2); shift_synth_bits(sc, synth->r, 2); shift_synth_bits(sc, 0x16, 5); /* enable clk/x output */ /* SYNTH (chip enable) going high ends the command. */ set_gpio_bits(sc, GPIO_SSI_SYNTH); make_gpio_input(sc, GPIO_SSI_SYNTH); /* Stop driving serial-related signals; pullups/pulldowns take over. */ make_gpio_input(sc, GPIO_DATA | GPIO_CLK); /* remember the new synthesizer parameters */ if (&sc->config.synth != synth) sc->config.synth = *synth; } /* Write a command to the DAC controlling the VCXO on some T3 adapters. */ /* The DAC is a TI-TLV5636: 12-bit resolution and a serial interface. */ /* DATA is set up before the FALLING edge of CLK. CLK is parked HIGH. */ static void write_dac(softc_t *sc, u_int16_t data) { int i; /* Prepare to use DATA and CLK. */ set_gpio_bits(sc, GPIO_DATA | GPIO_CLK); make_gpio_output(sc, GPIO_DATA | GPIO_CLK); /* High-to-low transition prepares DAC for new value. */ set_gpio_bits(sc, GPIO_T3_DAC); make_gpio_output(sc, GPIO_T3_DAC); clr_gpio_bits(sc, GPIO_T3_DAC); /* Serially shift command bits into DAC. */ for (i=0; i<16; i++) { /* MSB first */ if ((data & (1<<(15-i))) != 0) set_gpio_bits(sc, GPIO_DATA); /* DATA setup */ else clr_gpio_bits(sc, GPIO_DATA); /* DATA setup */ clr_gpio_bits(sc, GPIO_CLK); /* CLK falling edge */ set_gpio_bits(sc, GPIO_CLK); /* CLK rising edge */ } /* Done with DAC; make it an input; loads new value into DAC. */ set_gpio_bits(sc, GPIO_T3_DAC); make_gpio_input(sc, GPIO_T3_DAC); /* Stop driving serial-related signals; pullups/pulldowns take over. */ make_gpio_input(sc, GPIO_DATA | GPIO_CLK); } /* begin HSSI card code */ /* Must not sleep. */ static void hssi_config(softc_t *sc) { if (sc->status.card_type == 0) { /* defaults */ sc->status.card_type = READ_PCI_CFG(sc, TLP_CSID); sc->config.crc_len = CFG_CRC_16; sc->config.loop_back = CFG_LOOP_NONE; sc->config.tx_clk_src = CFG_CLKMUX_ST; sc->config.dte_dce = CFG_DTE; sc->config.synth.n = 52; /* 52.000 Mbs */ sc->config.synth.m = 5; sc->config.synth.v = 0; sc->config.synth.x = 0; sc->config.synth.r = 0; sc->config.synth.prescale = 2; } /* set CRC length */ if (sc->config.crc_len == CFG_CRC_32) set_mii16_bits(sc, MII16_HSSI_CRC32); else clr_mii16_bits(sc, MII16_HSSI_CRC32); /* Assert pin LA in HSSI conn: ask modem for local loop. */ if (sc->config.loop_back == CFG_LOOP_LL) set_mii16_bits(sc, MII16_HSSI_LA); else clr_mii16_bits(sc, MII16_HSSI_LA); /* Assert pin LB in HSSI conn: ask modem for remote loop. */ if (sc->config.loop_back == CFG_LOOP_RL) set_mii16_bits(sc, MII16_HSSI_LB); else clr_mii16_bits(sc, MII16_HSSI_LB); if (sc->status.card_type == TLP_CSID_HSSI) { /* set TXCLK src */ if (sc->config.tx_clk_src == CFG_CLKMUX_ST) set_gpio_bits(sc, GPIO_HSSI_TXCLK); else clr_gpio_bits(sc, GPIO_HSSI_TXCLK); make_gpio_output(sc, GPIO_HSSI_TXCLK); } else if (sc->status.card_type == TLP_CSID_HSSIc) { /* cPCI HSSI rev C has extra features */ /* Set TXCLK source. */ u_int16_t mii16 = read_mii(sc, 16); mii16 &= ~MII16_HSSI_CLKMUX; mii16 |= (sc->config.tx_clk_src&3)<<13; write_mii(sc, 16, mii16); /* cPCI HSSI implements loopback towards the net. */ if (sc->config.loop_back == CFG_LOOP_LINE) set_mii16_bits(sc, MII16_HSSI_LOOP); else clr_mii16_bits(sc, MII16_HSSI_LOOP); /* Set DTE/DCE mode. */ if (sc->config.dte_dce == CFG_DCE) set_gpio_bits(sc, GPIO_HSSI_DCE); else clr_gpio_bits(sc, GPIO_HSSI_DCE); make_gpio_output(sc, GPIO_HSSI_DCE); /* Program the synthesized oscillator. */ write_synth(sc, &sc->config.synth); } } static void hssi_ident(softc_t *sc) { } /* Called once a second; must not sleep. */ static int hssi_watchdog(softc_t *sc) { u_int16_t mii16 = read_mii(sc, 16) & MII16_HSSI_MODEM; int link_status = STATUS_UP; led_inv(sc, MII16_HSSI_LED_UL); /* Software is alive. */ led_on(sc, MII16_HSSI_LED_LL); /* always on (SSI cable) */ /* Check the transmit clock. */ if (sc->status.tx_speed == 0) { led_on(sc, MII16_HSSI_LED_UR); link_status = STATUS_DOWN; } else led_off(sc, MII16_HSSI_LED_UR); /* Is the modem ready? */ if ((mii16 & MII16_HSSI_CA) == 0) { led_off(sc, MII16_HSSI_LED_LR); link_status = STATUS_DOWN; } else led_on(sc, MII16_HSSI_LED_LR); /* Print the modem control signals if they changed. */ if ((DRIVER_DEBUG) && (mii16 != sc->last_mii16)) { char *on = "ON ", *off = "OFF"; printf("%s: TA=%s CA=%s LA=%s LB=%s LC=%s TM=%s\n", NAME_UNIT, (mii16 & MII16_HSSI_TA) ? on : off, (mii16 & MII16_HSSI_CA) ? on : off, (mii16 & MII16_HSSI_LA) ? on : off, (mii16 & MII16_HSSI_LB) ? on : off, (mii16 & MII16_HSSI_LC) ? on : off, (mii16 & MII16_HSSI_TM) ? on : off); } /* SNMP one-second-report */ sc->status.snmp.hssi.sigs = mii16 & MII16_HSSI_MODEM; /* Remember this state until next time. */ sc->last_mii16 = mii16; /* If a loop back is in effect, link status is UP */ if (sc->config.loop_back != CFG_LOOP_NONE) link_status = STATUS_UP; return link_status; } /* IOCTL SYSCALL: can sleep (but doesn't). */ static int hssi_ioctl(softc_t *sc, struct ioctl *ioctl) { int error = 0; if (ioctl->cmd == IOCTL_SNMP_SIGS) { u_int16_t mii16 = read_mii(sc, 16); mii16 &= ~MII16_HSSI_MODEM; mii16 |= (MII16_HSSI_MODEM & ioctl->data); write_mii(sc, 16, mii16); } else if (ioctl->cmd == IOCTL_SET_STATUS) { if (ioctl->data != 0) set_mii16_bits(sc, MII16_HSSI_TA); else clr_mii16_bits(sc, MII16_HSSI_TA); } else error = EINVAL; return error; } /* begin DS3 card code */ /* Must not sleep. */ static void t3_config(softc_t *sc) { int i; u_int8_t ctl1; if (sc->status.card_type == 0) { /* defaults */ sc->status.card_type = TLP_CSID_T3; sc->config.crc_len = CFG_CRC_16; sc->config.loop_back = CFG_LOOP_NONE; sc->config.format = CFG_FORMAT_T3CPAR; sc->config.cable_len = 10; /* meters */ sc->config.scrambler = CFG_SCRAM_DL_KEN; sc->config.tx_clk_src = CFG_CLKMUX_INT; /* Center the VCXO -- get within 20 PPM of 44736000. */ write_dac(sc, 0x9002); /* set Vref = 2.048 volts */ write_dac(sc, 2048); /* range is 0..4095 */ } /* Set cable length. */ if (sc->config.cable_len > 30) clr_mii16_bits(sc, MII16_DS3_ZERO); else set_mii16_bits(sc, MII16_DS3_ZERO); /* Set payload scrambler polynomial. */ if (sc->config.scrambler == CFG_SCRAM_LARS) set_mii16_bits(sc, MII16_DS3_POLY); else clr_mii16_bits(sc, MII16_DS3_POLY); /* Set payload scrambler on/off. */ if (sc->config.scrambler == CFG_SCRAM_OFF) clr_mii16_bits(sc, MII16_DS3_SCRAM); else set_mii16_bits(sc, MII16_DS3_SCRAM); /* Set CRC length. */ if (sc->config.crc_len == CFG_CRC_32) set_mii16_bits(sc, MII16_DS3_CRC32); else clr_mii16_bits(sc, MII16_DS3_CRC32); /* Loopback towards host thru the line interface. */ if (sc->config.loop_back == CFG_LOOP_OTHER) set_mii16_bits(sc, MII16_DS3_TRLBK); else clr_mii16_bits(sc, MII16_DS3_TRLBK); /* Loopback towards network thru the line interface. */ if (sc->config.loop_back == CFG_LOOP_LINE) set_mii16_bits(sc, MII16_DS3_LNLBK); else if (sc->config.loop_back == CFG_LOOP_DUAL) set_mii16_bits(sc, MII16_DS3_LNLBK); else clr_mii16_bits(sc, MII16_DS3_LNLBK); /* Configure T3 framer chip; write EVERY writeable register. */ ctl1 = CTL1_SER | CTL1_XTX; if (sc->config.loop_back == CFG_LOOP_INWARD) ctl1 |= CTL1_3LOOP; if (sc->config.loop_back == CFG_LOOP_DUAL) ctl1 |= CTL1_3LOOP; if (sc->config.format == CFG_FORMAT_T3M13) ctl1 |= CTL1_M13MODE; write_framer(sc, T3CSR_CTL1, ctl1); write_framer(sc, T3CSR_TX_FEAC, CTL5_EMODE); write_framer(sc, T3CSR_CTL8, CTL8_FBEC); write_framer(sc, T3CSR_CTL12, CTL12_DLCB1 | CTL12_C21 | CTL12_MCB1); write_framer(sc, T3CSR_DBL_FEAC, 0); write_framer(sc, T3CSR_CTL14, CTL14_RGCEN | CTL14_TGCEN); write_framer(sc, T3CSR_INTEN, 0); write_framer(sc, T3CSR_CTL20, CTL20_CVEN); /* Clear error counters and latched error bits */ /* that may have happened while initializing. */ for (i=0; i<21; i++) read_framer(sc, i); } static void t3_ident(softc_t *sc) { printf(", TXC03401 rev B"); } /* Called once a second; must not sleep. */ static int t3_watchdog(softc_t *sc) { u_int16_t CV; u_int8_t CERR, PERR, MERR, FERR, FEBE; u_int8_t ctl1, stat16, feac; int link_status = STATUS_UP; u_int16_t mii16; /* Read the alarm registers. */ ctl1 = read_framer(sc, T3CSR_CTL1); stat16 = read_framer(sc, T3CSR_STAT16); mii16 = read_mii(sc, 16); /* Always ignore the RTLOC alarm bit. */ stat16 &= ~STAT16_RTLOC; /* Software is alive. */ led_inv(sc, MII16_DS3_LED_GRN); /* Receiving Alarm Indication Signal (AIS). */ if ((stat16 & STAT16_RAIS) != 0) /* receiving ais */ led_on(sc, MII16_DS3_LED_BLU); else if (ctl1 & CTL1_TXAIS) /* sending ais */ led_inv(sc, MII16_DS3_LED_BLU); else led_off(sc, MII16_DS3_LED_BLU); /* Receiving Remote Alarm Indication (RAI). */ if ((stat16 & STAT16_XERR) != 0) /* receiving rai */ led_on(sc, MII16_DS3_LED_YEL); else if ((ctl1 & CTL1_XTX) == 0) /* sending rai */ led_inv(sc, MII16_DS3_LED_YEL); else led_off(sc, MII16_DS3_LED_YEL); /* If certain status bits are set then the link is 'down'. */ /* The bad bits are: rxlos rxoof rxais rxidl xerr. */ if ((stat16 & ~(STAT16_FEAC | STAT16_SEF)) != 0) link_status = STATUS_DOWN; /* Declare local Red Alarm if the link is down. */ if (link_status == STATUS_DOWN) led_on(sc, MII16_DS3_LED_RED); else if (sc->loop_timer != 0) /* loopback is active */ led_inv(sc, MII16_DS3_LED_RED); else led_off(sc, MII16_DS3_LED_RED); /* Print latched error bits if they changed. */ if ((DRIVER_DEBUG) && ((stat16 & ~STAT16_FEAC) != sc->last_stat16)) { char *on = "ON ", *off = "OFF"; printf("%s: RLOS=%s ROOF=%s RAIS=%s RIDL=%s SEF=%s XERR=%s\n", NAME_UNIT, (stat16 & STAT16_RLOS) ? on : off, (stat16 & STAT16_ROOF) ? on : off, (stat16 & STAT16_RAIS) ? on : off, (stat16 & STAT16_RIDL) ? on : off, (stat16 & STAT16_SEF) ? on : off, (stat16 & STAT16_XERR) ? on : off); } /* Check and print error counters if non-zero. */ CV = read_framer(sc, T3CSR_CVHI)<<8; CV += read_framer(sc, T3CSR_CVLO); PERR = read_framer(sc, T3CSR_PERR); CERR = read_framer(sc, T3CSR_CERR); FERR = read_framer(sc, T3CSR_FERR); MERR = read_framer(sc, T3CSR_MERR); FEBE = read_framer(sc, T3CSR_FEBE); /* CV is invalid during LOS. */ if ((stat16 & STAT16_RLOS)!=0) CV = 0; /* CERR & FEBE are invalid in M13 mode */ if (sc->config.format == CFG_FORMAT_T3M13) CERR = FEBE = 0; /* FEBE is invalid during AIS. */ if ((stat16 & STAT16_RAIS)!=0) FEBE = 0; if (DRIVER_DEBUG && (CV || PERR || CERR || FERR || MERR || FEBE)) printf("%s: CV=%u PERR=%u CERR=%u FERR=%u MERR=%u FEBE=%u\n", NAME_UNIT, CV, PERR, CERR, FERR, MERR, FEBE); /* Driver keeps crude link-level error counters (SNMP is better). */ sc->status.cntrs.lcv_errs += CV; sc->status.cntrs.par_errs += PERR; sc->status.cntrs.cpar_errs += CERR; sc->status.cntrs.frm_errs += FERR; sc->status.cntrs.mfrm_errs += MERR; sc->status.cntrs.febe_errs += FEBE; /* Check for FEAC messages (FEAC not defined in M13 mode). */ if (FORMAT_T3CPAR && (stat16 & STAT16_FEAC)) do { feac = read_framer(sc, T3CSR_FEAC_STK); if ((feac & FEAC_STK_VALID)==0) break; /* Ignore RxFEACs while a far end loopback has been requested. */ if ((sc->status.snmp.t3.line & TLOOP_FAR_LINE)!=0) continue; switch (feac & FEAC_STK_FEAC) { case T3BOP_LINE_UP: break; case T3BOP_LINE_DOWN: break; case T3BOP_LOOP_DS3: { if (sc->last_FEAC == T3BOP_LINE_DOWN) { if (DRIVER_DEBUG) printf("%s: Received a 'line loopback deactivate' FEAC msg\n", NAME_UNIT); clr_mii16_bits(sc, MII16_DS3_LNLBK); sc->loop_timer = 0; } if (sc->last_FEAC == T3BOP_LINE_UP) { if (DRIVER_DEBUG) printf("%s: Received a 'line loopback activate' FEAC msg\n", NAME_UNIT); set_mii16_bits(sc, MII16_DS3_LNLBK); sc->loop_timer = 300; } break; } case T3BOP_OOF: { if (DRIVER_DEBUG) printf("%s: Received a 'far end LOF' FEAC msg\n", NAME_UNIT); break; } case T3BOP_IDLE: { if (DRIVER_DEBUG) printf("%s: Received a 'far end IDL' FEAC msg\n", NAME_UNIT); break; } case T3BOP_AIS: { if (DRIVER_DEBUG) printf("%s: Received a 'far end AIS' FEAC msg\n", NAME_UNIT); break; } case T3BOP_LOS: { if (DRIVER_DEBUG) printf("%s: Received a 'far end LOS' FEAC msg\n", NAME_UNIT); break; } default: { if (DRIVER_DEBUG) printf("%s: Received a 'type 0x%02X' FEAC msg\n", NAME_UNIT, feac & FEAC_STK_FEAC); break; } } sc->last_FEAC = feac & FEAC_STK_FEAC; } while ((feac & FEAC_STK_MORE) != 0); stat16 &= ~STAT16_FEAC; /* Send Service-Affecting priority FEAC messages */ if (((sc->last_stat16 ^ stat16) & 0xF0) && (FORMAT_T3CPAR)) { /* Transmit continuous FEACs */ write_framer(sc, T3CSR_CTL14, read_framer(sc, T3CSR_CTL14) & ~CTL14_FEAC10); if ((stat16 & STAT16_RLOS)!=0) write_framer(sc, T3CSR_TX_FEAC, 0xC0 + T3BOP_LOS); else if ((stat16 & STAT16_ROOF)!=0) write_framer(sc, T3CSR_TX_FEAC, 0xC0 + T3BOP_OOF); else if ((stat16 & STAT16_RAIS)!=0) write_framer(sc, T3CSR_TX_FEAC, 0xC0 + T3BOP_AIS); else if ((stat16 & STAT16_RIDL)!=0) write_framer(sc, T3CSR_TX_FEAC, 0xC0 + T3BOP_IDLE); else write_framer(sc, T3CSR_TX_FEAC, CTL5_EMODE); } /* Start sending RAI, Remote Alarm Indication. */ if (((stat16 & STAT16_ROOF)!=0) && ((stat16 & STAT16_RLOS)==0) && ((sc->last_stat16 & STAT16_ROOF)==0)) write_framer(sc, T3CSR_CTL1, ctl1 &= ~CTL1_XTX); /* Stop sending RAI, Remote Alarm Indication. */ else if (((stat16 & STAT16_ROOF)==0) && ((sc->last_stat16 & STAT16_ROOF)!=0)) write_framer(sc, T3CSR_CTL1, ctl1 |= CTL1_XTX); /* Start sending AIS, Alarm Indication Signal */ if (((stat16 & STAT16_RLOS)!=0) && ((sc->last_stat16 & STAT16_RLOS)==0)) { set_mii16_bits(sc, MII16_DS3_FRAME); write_framer(sc, T3CSR_CTL1, ctl1 | CTL1_TXAIS); } /* Stop sending AIS, Alarm Indication Signal */ else if (((stat16 & STAT16_RLOS)==0) && ((sc->last_stat16 & STAT16_RLOS)!=0)) { clr_mii16_bits(sc, MII16_DS3_FRAME); write_framer(sc, T3CSR_CTL1, ctl1 & ~CTL1_TXAIS); } /* Time out loopback requests. */ if (sc->loop_timer != 0) if (--sc->loop_timer == 0) if ((mii16 & MII16_DS3_LNLBK)!=0) { if (DRIVER_DEBUG) printf("%s: Timeout: Loop Down after 300 seconds\n", NAME_UNIT); clr_mii16_bits(sc, MII16_DS3_LNLBK); /* line loopback off */ } /* SNMP error counters */ sc->status.snmp.t3.lcv = CV; sc->status.snmp.t3.pcv = PERR; sc->status.snmp.t3.ccv = CERR; sc->status.snmp.t3.febe = FEBE; /* SNMP Line Status */ sc->status.snmp.t3.line = 0; if ((ctl1 & CTL1_XTX)==0) sc->status.snmp.t3.line |= TLINE_TX_RAI; if (stat16 & STAT16_XERR) sc->status.snmp.t3.line |= TLINE_RX_RAI; if (ctl1 & CTL1_TXAIS) sc->status.snmp.t3.line |= TLINE_TX_AIS; if (stat16 & STAT16_RAIS) sc->status.snmp.t3.line |= TLINE_RX_AIS; if (stat16 & STAT16_ROOF) sc->status.snmp.t3.line |= TLINE_LOF; if (stat16 & STAT16_RLOS) sc->status.snmp.t3.line |= TLINE_LOS; if (stat16 & STAT16_SEF) sc->status.snmp.t3.line |= T3LINE_SEF; /* SNMP Loopback Status */ sc->status.snmp.t3.loop &= ~TLOOP_FAR_LINE; if (sc->config.loop_back == CFG_LOOP_TULIP) sc->status.snmp.t3.loop |= TLOOP_NEAR_OTHER; if (ctl1 & CTL1_3LOOP) sc->status.snmp.t3.loop |= TLOOP_NEAR_INWARD; if (mii16 & MII16_DS3_TRLBK) sc->status.snmp.t3.loop |= TLOOP_NEAR_OTHER; if (mii16 & MII16_DS3_LNLBK) sc->status.snmp.t3.loop |= TLOOP_NEAR_LINE; /*if (ctl12 & CTL12_RTPLOOP) sc->status.snmp.t3.loop |= TLOOP_NEAR_PAYLOAD; */ /* Remember this state until next time. */ sc->last_stat16 = stat16; /* If an INWARD loopback is in effect, link status is UP */ if (sc->config.loop_back != CFG_LOOP_NONE) /* XXX INWARD ONLY */ link_status = STATUS_UP; return link_status; } /* IOCTL SYSCALL: can sleep. */ static void t3_send_dbl_feac(softc_t *sc, int feac1, int feac2) { u_int8_t tx_feac; int i; /* The FEAC transmitter could be sending a continuous */ /* FEAC msg when told to send a double FEAC message. */ /* So save the current state of the FEAC transmitter. */ tx_feac = read_framer(sc, T3CSR_TX_FEAC); /* Load second FEAC code and stop FEAC transmitter. */ write_framer(sc, T3CSR_TX_FEAC, CTL5_EMODE + feac2); /* FEAC transmitter sends 10 more FEACs and then stops. */ SLEEP(20000); /* sending one FEAC takes 1700 uSecs */ /* Load first FEAC code and start FEAC transmitter. */ write_framer(sc, T3CSR_DBL_FEAC, CTL13_DFEXEC + feac1); /* Wait for double FEAC sequence to complete -- about 70 ms. */ for (i=0; i<10; i++) /* max delay 100 ms */ if (read_framer(sc, T3CSR_DBL_FEAC) & CTL13_DFEXEC) SLEEP(10000); /* Flush received FEACS; don't respond to our own loop cmd! */ while (read_framer(sc, T3CSR_FEAC_STK) & FEAC_STK_VALID) DELAY(1); /* XXX HANG */ /* Restore previous state of the FEAC transmitter. */ /* If it was sending a continous FEAC, it will resume. */ write_framer(sc, T3CSR_TX_FEAC, tx_feac); } /* IOCTL SYSCALL: can sleep. */ static int t3_ioctl(softc_t *sc, struct ioctl *ioctl) { int error = 0; switch (ioctl->cmd) { case IOCTL_SNMP_SEND: /* set opstatus? */ { if (sc->config.format != CFG_FORMAT_T3CPAR) error = EINVAL; else if (ioctl->data == TSEND_LINE) { sc->status.snmp.t3.loop |= TLOOP_FAR_LINE; t3_send_dbl_feac(sc, T3BOP_LINE_UP, T3BOP_LOOP_DS3); } else if (ioctl->data == TSEND_RESET) { t3_send_dbl_feac(sc, T3BOP_LINE_DOWN, T3BOP_LOOP_DS3); sc->status.snmp.t3.loop &= ~TLOOP_FAR_LINE; } else error = EINVAL; break; } case IOCTL_SNMP_LOOP: /* set opstatus = test? */ { if (ioctl->data == CFG_LOOP_NONE) { clr_mii16_bits(sc, MII16_DS3_FRAME); clr_mii16_bits(sc, MII16_DS3_TRLBK); clr_mii16_bits(sc, MII16_DS3_LNLBK); write_framer(sc, T3CSR_CTL1, read_framer(sc, T3CSR_CTL1) & ~CTL1_3LOOP); write_framer(sc, T3CSR_CTL12, read_framer(sc, T3CSR_CTL12) & ~(CTL12_RTPLOOP | CTL12_RTPLLEN)); } else if (ioctl->data == CFG_LOOP_LINE) set_mii16_bits(sc, MII16_DS3_LNLBK); else if (ioctl->data == CFG_LOOP_OTHER) set_mii16_bits(sc, MII16_DS3_TRLBK); else if (ioctl->data == CFG_LOOP_INWARD) write_framer(sc, T3CSR_CTL1, read_framer(sc, T3CSR_CTL1) | CTL1_3LOOP); else if (ioctl->data == CFG_LOOP_DUAL) { set_mii16_bits(sc, MII16_DS3_LNLBK); write_framer(sc, T3CSR_CTL1, read_framer(sc, T3CSR_CTL1) | CTL1_3LOOP); } else if (ioctl->data == CFG_LOOP_PAYLOAD) { set_mii16_bits(sc, MII16_DS3_FRAME); write_framer(sc, T3CSR_CTL12, read_framer(sc, T3CSR_CTL12) | CTL12_RTPLOOP); write_framer(sc, T3CSR_CTL12, read_framer(sc, T3CSR_CTL12) | CTL12_RTPLLEN); DELAY(25); /* at least two frames (22 uS) */ write_framer(sc, T3CSR_CTL12, read_framer(sc, T3CSR_CTL12) & ~CTL12_RTPLLEN); } else error = EINVAL; break; } default: error = EINVAL; break; } return error; } /* begin SSI card code */ /* Must not sleep. */ static void ssi_config(softc_t *sc) { if (sc->status.card_type == 0) { /* defaults */ sc->status.card_type = TLP_CSID_SSI; sc->config.crc_len = CFG_CRC_16; sc->config.loop_back = CFG_LOOP_NONE; sc->config.tx_clk_src = CFG_CLKMUX_ST; sc->config.dte_dce = CFG_DTE; sc->config.synth.n = 51; /* 1.536 MHz */ sc->config.synth.m = 83; sc->config.synth.v = 1; sc->config.synth.x = 1; sc->config.synth.r = 1; sc->config.synth.prescale = 4; } /* Disable the TX clock driver while programming the oscillator. */ clr_gpio_bits(sc, GPIO_SSI_DCE); make_gpio_output(sc, GPIO_SSI_DCE); /* Program the synthesized oscillator. */ write_synth(sc, &sc->config.synth); /* Set DTE/DCE mode. */ /* If DTE mode then DCD & TXC are received. */ /* If DCE mode then DCD & TXC are driven. */ /* Boards with MII rev=4.0 don't drive DCD. */ if (sc->config.dte_dce == CFG_DCE) set_gpio_bits(sc, GPIO_SSI_DCE); else clr_gpio_bits(sc, GPIO_SSI_DCE); make_gpio_output(sc, GPIO_SSI_DCE); /* Set CRC length. */ if (sc->config.crc_len == CFG_CRC_32) set_mii16_bits(sc, MII16_SSI_CRC32); else clr_mii16_bits(sc, MII16_SSI_CRC32); /* Loop towards host thru cable drivers and receivers. */ /* Asserts DCD at the far end of a null modem cable. */ if (sc->config.loop_back == CFG_LOOP_PINS) set_mii16_bits(sc, MII16_SSI_LOOP); else clr_mii16_bits(sc, MII16_SSI_LOOP); /* Assert pin LL in modem conn: ask modem for local loop. */ /* Asserts TM at the far end of a null modem cable. */ if (sc->config.loop_back == CFG_LOOP_LL) set_mii16_bits(sc, MII16_SSI_LL); else clr_mii16_bits(sc, MII16_SSI_LL); /* Assert pin RL in modem conn: ask modem for remote loop. */ if (sc->config.loop_back == CFG_LOOP_RL) set_mii16_bits(sc, MII16_SSI_RL); else clr_mii16_bits(sc, MII16_SSI_RL); } static void ssi_ident(softc_t *sc) { printf(", LTC1343/44"); } /* Called once a second; must not sleep. */ static int ssi_watchdog(softc_t *sc) { u_int16_t cable; u_int16_t mii16 = read_mii(sc, 16) & MII16_SSI_MODEM; int link_status = STATUS_UP; /* Software is alive. */ led_inv(sc, MII16_SSI_LED_UL); /* Check the transmit clock. */ if (sc->status.tx_speed == 0) { led_on(sc, MII16_SSI_LED_UR); link_status = STATUS_DOWN; } else led_off(sc, MII16_SSI_LED_UR); /* Check the external cable. */ cable = read_mii(sc, 17); cable = cable & MII17_SSI_CABLE_MASK; cable = cable >> MII17_SSI_CABLE_SHIFT; if (cable == 7) { led_off(sc, MII16_SSI_LED_LL); /* no cable */ link_status = STATUS_DOWN; } else led_on(sc, MII16_SSI_LED_LL); /* The unit at the other end of the cable is ready if: */ /* DTE mode and DCD pin is asserted */ /* DCE mode and DSR pin is asserted */ if (((sc->config.dte_dce == CFG_DTE) && ((mii16 & MII16_SSI_DCD)==0)) || ((sc->config.dte_dce == CFG_DCE) && ((mii16 & MII16_SSI_DSR)==0))) { led_off(sc, MII16_SSI_LED_LR); link_status = STATUS_DOWN; } else led_on(sc, MII16_SSI_LED_LR); if (DRIVER_DEBUG && (cable != sc->status.cable_type)) printf("%s: SSI cable type changed to '%s'\n", NAME_UNIT, ssi_cables[cable]); sc->status.cable_type = cable; /* Print the modem control signals if they changed. */ if ((DRIVER_DEBUG) && (mii16 != sc->last_mii16)) { char *on = "ON ", *off = "OFF"; printf("%s: DTR=%s DSR=%s RTS=%s CTS=%s DCD=%s RI=%s LL=%s RL=%s TM=%s\n", NAME_UNIT, (mii16 & MII16_SSI_DTR) ? on : off, (mii16 & MII16_SSI_DSR) ? on : off, (mii16 & MII16_SSI_RTS) ? on : off, (mii16 & MII16_SSI_CTS) ? on : off, (mii16 & MII16_SSI_DCD) ? on : off, (mii16 & MII16_SSI_RI) ? on : off, (mii16 & MII16_SSI_LL) ? on : off, (mii16 & MII16_SSI_RL) ? on : off, (mii16 & MII16_SSI_TM) ? on : off); } /* SNMP one-second report */ sc->status.snmp.ssi.sigs = mii16 & MII16_SSI_MODEM; /* Remember this state until next time. */ sc->last_mii16 = mii16; /* If a loop back is in effect, link status is UP */ if (sc->config.loop_back != CFG_LOOP_NONE) link_status = STATUS_UP; return link_status; } /* IOCTL SYSCALL: can sleep (but doesn't). */ static int ssi_ioctl(softc_t *sc, struct ioctl *ioctl) { int error = 0; if (ioctl->cmd == IOCTL_SNMP_SIGS) { u_int16_t mii16 = read_mii(sc, 16); mii16 &= ~MII16_SSI_MODEM; mii16 |= (MII16_SSI_MODEM & ioctl->data); write_mii(sc, 16, mii16); } else if (ioctl->cmd == IOCTL_SET_STATUS) { if (ioctl->data != 0) set_mii16_bits(sc, (MII16_SSI_DTR | MII16_SSI_RTS | MII16_SSI_DCD)); else clr_mii16_bits(sc, (MII16_SSI_DTR | MII16_SSI_RTS | MII16_SSI_DCD)); } else error = EINVAL; return error; } /* begin T1E1 card code */ /* Must not sleep. */ static void t1_config(softc_t *sc) { int i; u_int8_t pulse, lbo, gain; if (sc->status.card_type == 0) { /* defaults */ sc->status.card_type = TLP_CSID_T1E1; sc->config.crc_len = CFG_CRC_16; sc->config.loop_back = CFG_LOOP_NONE; sc->config.tx_clk_src = CFG_CLKMUX_INT; sc->config.format = CFG_FORMAT_T1ESF; sc->config.cable_len = 10; sc->config.time_slots = 0x01FFFFFE; sc->config.tx_pulse = CFG_PULSE_AUTO; sc->config.rx_gain = CFG_GAIN_AUTO; sc->config.tx_lbo = CFG_LBO_AUTO; /* Bt8370 occasionally powers up in a loopback mode. */ /* Data sheet says zero LOOP reg and do a s/w reset. */ write_framer(sc, Bt8370_LOOP, 0x00); /* no loopback */ write_framer(sc, Bt8370_CR0, 0x80); /* s/w reset */ for (i=0; i<10; i++) /* max delay 10 ms */ if (read_framer(sc, Bt8370_CR0) & 0x80) DELAY(1000); } /* Set CRC length. */ if (sc->config.crc_len == CFG_CRC_32) set_mii16_bits(sc, MII16_T1_CRC32); else clr_mii16_bits(sc, MII16_T1_CRC32); /* Invert HDLC payload data in SF/AMI mode. */ /* HDLC stuff bits satisfy T1 pulse density. */ if (FORMAT_T1SF) set_mii16_bits(sc, MII16_T1_INVERT); else clr_mii16_bits(sc, MII16_T1_INVERT); /* Set the transmitter output impedance. */ if (FORMAT_E1ANY) set_mii16_bits(sc, MII16_T1_Z); /* 001:CR0 -- Control Register 0 - T1/E1 and frame format */ write_framer(sc, Bt8370_CR0, sc->config.format); /* 002:JAT_CR -- Jitter Attenuator Control Register */ if (sc->config.tx_clk_src == CFG_CLKMUX_RT) /* loop timing */ write_framer(sc, Bt8370_JAT_CR, 0xA3); /* JAT in RX path */ else { /* 64-bit elastic store; free-running JCLK and CLADO */ write_framer(sc, Bt8370_JAT_CR, 0x4B); /* assert jcenter */ write_framer(sc, Bt8370_JAT_CR, 0x43); /* release jcenter */ } /* 00C-013:IERn -- Interrupt Enable Registers */ for (i=Bt8370_IER7; i<=Bt8370_IER0; i++) write_framer(sc, i, 0); /* no interrupts; polled */ /* 014:LOOP -- loopbacks */ if (sc->config.loop_back == CFG_LOOP_PAYLOAD) write_framer(sc, Bt8370_LOOP, LOOP_PAYLOAD); else if (sc->config.loop_back == CFG_LOOP_LINE) write_framer(sc, Bt8370_LOOP, LOOP_LINE); else if (sc->config.loop_back == CFG_LOOP_OTHER) write_framer(sc, Bt8370_LOOP, LOOP_ANALOG); else if (sc->config.loop_back == CFG_LOOP_INWARD) write_framer(sc, Bt8370_LOOP, LOOP_FRAMER); else if (sc->config.loop_back == CFG_LOOP_DUAL) write_framer(sc, Bt8370_LOOP, LOOP_DUAL); else write_framer(sc, Bt8370_LOOP, 0x00); /* no loopback */ /* 015:DL3_TS -- Data Link 3 */ write_framer(sc, Bt8370_DL3_TS, 0x00); /* disabled */ /* 018:PIO -- Programmable I/O */ write_framer(sc, Bt8370_PIO, 0xFF); /* all pins are outputs */ /* 019:POE -- Programmable Output Enable */ write_framer(sc, Bt8370_POE, 0x00); /* all outputs are enabled */ /* 01A;CMUX -- Clock Input Mux */ if (sc->config.tx_clk_src == CFG_CLKMUX_EXT) write_framer(sc, Bt8370_CMUX, 0x0C); /* external timing */ else write_framer(sc, Bt8370_CMUX, 0x0F); /* internal timing */ /* 020:LIU_CR -- Line Interface Unit Config Register */ write_framer(sc, Bt8370_LIU_CR, 0xC1); /* reset LIU, squelch */ /* 022:RLIU_CR -- RX Line Interface Unit Config Reg */ /* Errata sheet says don't use freeze-short, but we do anyway! */ write_framer(sc, Bt8370_RLIU_CR, 0xB1); /* AGC=2048, Long Eye */ /* Select Rx sensitivity based on cable length. */ if ((gain = sc->config.rx_gain) == CFG_GAIN_AUTO) { if (sc->config.cable_len > 2000) gain = CFG_GAIN_EXTEND; else if (sc->config.cable_len > 1000) gain = CFG_GAIN_LONG; else if (sc->config.cable_len > 100) gain = CFG_GAIN_MEDIUM; else gain = CFG_GAIN_SHORT; } /* 024:VGA_MAX -- Variable Gain Amplifier Max gain */ write_framer(sc, Bt8370_VGA_MAX, gain); /* 028:PRE_EQ -- Pre Equalizer */ if (gain == CFG_GAIN_EXTEND) write_framer(sc, Bt8370_PRE_EQ, 0xE6); /* ON; thresh 6 */ else write_framer(sc, Bt8370_PRE_EQ, 0xA6); /* OFF; thresh 6 */ /* 038-03C:GAINn -- RX Equalizer gain thresholds */ write_framer(sc, Bt8370_GAIN0, 0x24); write_framer(sc, Bt8370_GAIN1, 0x28); write_framer(sc, Bt8370_GAIN2, 0x2C); write_framer(sc, Bt8370_GAIN3, 0x30); write_framer(sc, Bt8370_GAIN4, 0x34); /* 040:RCR0 -- Receiver Control Register 0 */ if (FORMAT_T1ESF) write_framer(sc, Bt8370_RCR0, 0x05); /* B8ZS, 2/5 FErrs */ else if (FORMAT_T1SF) write_framer(sc, Bt8370_RCR0, 0x84); /* AMI, 2/5 FErrs */ else if (FORMAT_E1NONE) write_framer(sc, Bt8370_RCR0, 0x41); /* HDB3, rabort */ else if (FORMAT_E1CRC) write_framer(sc, Bt8370_RCR0, 0x09); /* HDB3, 3 FErrs or 915 CErrs */ else /* E1 no CRC */ write_framer(sc, Bt8370_RCR0, 0x19); /* HDB3, 3 FErrs */ /* 041:RPATT -- Receive Test Pattern configuration */ write_framer(sc, Bt8370_RPATT, 0x3E); /* looking for framed QRSS */ /* 042:RLB -- Receive Loop Back code detector config */ write_framer(sc, Bt8370_RLB, 0x09); /* 6 bits down; 5 bits up */ /* 043:LBA -- Loop Back Activate code */ write_framer(sc, Bt8370_LBA, 0x08); /* 10000 10000 10000 ... */ /* 044:LBD -- Loop Back Deactivate code */ write_framer(sc, Bt8370_LBD, 0x24); /* 100100 100100 100100 ... */ /* 045:RALM -- Receive Alarm signal configuration */ write_framer(sc, Bt8370_RALM, 0x0C); /* yel_intg rlof_intg */ /* 046:LATCH -- Alarm/Error/Counter Latch register */ write_framer(sc, Bt8370_LATCH, 0x1F); /* stop_cnt latch_{cnt,err,alm} */ /* Select Pulse Shape based on cable length (T1 only). */ if ((pulse = sc->config.tx_pulse) == CFG_PULSE_AUTO) { if (FORMAT_T1ANY) { if (sc->config.cable_len > 200) pulse = CFG_PULSE_T1CSU; else if (sc->config.cable_len > 160) pulse = CFG_PULSE_T1DSX4; else if (sc->config.cable_len > 120) pulse = CFG_PULSE_T1DSX3; else if (sc->config.cable_len > 80) pulse = CFG_PULSE_T1DSX2; else if (sc->config.cable_len > 40) pulse = CFG_PULSE_T1DSX1; else pulse = CFG_PULSE_T1DSX0; } else pulse = CFG_PULSE_E1TWIST; } /* Select Line Build Out based on cable length (T1CSU only). */ if ((lbo = sc->config.tx_lbo) == CFG_LBO_AUTO) { if (pulse == CFG_PULSE_T1CSU) { if (sc->config.cable_len > 1500) lbo = CFG_LBO_0DB; else if (sc->config.cable_len > 1000) lbo = CFG_LBO_7DB; else if (sc->config.cable_len > 500) lbo = CFG_LBO_15DB; else lbo = CFG_LBO_22DB; } else lbo = 0; } /* 068:TLIU_CR -- Transmit LIU Control Register */ write_framer(sc, Bt8370_TLIU_CR, (0x40 | (lbo & 0x30) | (pulse & 0x0E))); /* 070:TCR0 -- Transmit Framer Configuration */ write_framer(sc, Bt8370_TCR0, sc->config.format>>1); /* 071:TCR1 -- Transmitter Configuration */ if (FORMAT_T1SF) write_framer(sc, Bt8370_TCR1, 0x43); /* tabort, AMI PDV enforced */ else write_framer(sc, Bt8370_TCR1, 0x41); /* tabort, B8ZS or HDB3 */ /* 072:TFRM -- Transmit Frame format MYEL YEL MF FE CRC FBIT */ if (sc->config.format == CFG_FORMAT_T1ESF) write_framer(sc, Bt8370_TFRM, 0x0B); /* - YEL MF - CRC FBIT */ else if (sc->config.format == CFG_FORMAT_T1SF) write_framer(sc, Bt8370_TFRM, 0x19); /* - YEL MF - - FBIT */ else if (sc->config.format == CFG_FORMAT_E1FAS) write_framer(sc, Bt8370_TFRM, 0x11); /* - YEL - - - FBIT */ else if (sc->config.format == CFG_FORMAT_E1FASCRC) write_framer(sc, Bt8370_TFRM, 0x1F); /* - YEL MF FE CRC FBIT */ else if (sc->config.format == CFG_FORMAT_E1FASCAS) write_framer(sc, Bt8370_TFRM, 0x31); /* MYEL YEL - - - FBIT */ else if (sc->config.format == CFG_FORMAT_E1FASCRCCAS) write_framer(sc, Bt8370_TFRM, 0x3F); /* MYEL YEL MF FE CRC FBIT */ else if (sc->config.format == CFG_FORMAT_E1NONE) write_framer(sc, Bt8370_TFRM, 0x00); /* NO FRAMING BITS AT ALL! */ /* 073:TERROR -- Transmit Error Insert */ write_framer(sc, Bt8370_TERROR, 0x00); /* no errors, please! */ /* 074:TMAN -- Transmit Manual Sa-byte/FEBE configuration */ write_framer(sc, Bt8370_TMAN, 0x00); /* none */ /* 075:TALM -- Transmit Alarm Signal Configuration */ if (FORMAT_E1ANY) write_framer(sc, Bt8370_TALM, 0x38); /* auto_myel auto_yel auto_ais */ else if (FORMAT_T1ANY) write_framer(sc, Bt8370_TALM, 0x18); /* auto_yel auto_ais */ /* 076:TPATT -- Transmit Test Pattern Configuration */ write_framer(sc, Bt8370_TPATT, 0x00); /* disabled */ /* 077:TLB -- Transmit Inband Loopback Code Configuration */ write_framer(sc, Bt8370_TLB, 0x00); /* disabled */ /* 090:CLAD_CR -- Clack Rate Adapter Configuration */ if (FORMAT_T1ANY) write_framer(sc, Bt8370_CLAD_CR, 0x06); /* loop filter gain 1/2^6 */ else write_framer(sc, Bt8370_CLAD_CR, 0x08); /* loop filter gain 1/2^8 */ /* 091:CSEL -- CLAD frequency Select */ if (FORMAT_T1ANY) write_framer(sc, Bt8370_CSEL, 0x55); /* 1544 kHz */ else write_framer(sc, Bt8370_CSEL, 0x11); /* 2048 kHz */ /* 092:CPHASE -- CLAD Phase detector */ if (FORMAT_T1ANY) write_framer(sc, Bt8370_CPHASE, 0x22); /* phase compare @ 386 kHz */ else write_framer(sc, Bt8370_CPHASE, 0x00); /* phase compare @ 2048 kHz */ if (FORMAT_T1ESF) /* BOP & PRM are enabled in T1ESF mode only. */ { /* 0A0:BOP -- Bit Oriented Protocol messages */ write_framer(sc, Bt8370_BOP, RBOP_25 | TBOP_OFF); /* 0A4:DL1_TS -- Data Link 1 Time Slot Enable */ write_framer(sc, Bt8370_DL1_TS, 0x40); /* FDL bits in odd frames */ /* 0A6:DL1_CTL -- Data Link 1 Control */ write_framer(sc, Bt8370_DL1_CTL, 0x03); /* FCS mode, TX on, RX on */ /* 0A7:RDL1_FFC -- Rx Data Link 1 Fifo Fill Control */ write_framer(sc, Bt8370_RDL1_FFC, 0x30); /* assert "near full" at 48 */ /* 0AA:PRM -- Performance Report Messages */ write_framer(sc, Bt8370_PRM, 0x80); } /* 0D0:SBI_CR -- System Bus Interface Configuration Register */ if (FORMAT_T1ANY) write_framer(sc, Bt8370_SBI_CR, 0x47); /* 1.544 with 24 TS +Fbits */ else write_framer(sc, Bt8370_SBI_CR, 0x46); /* 2.048 with 32 TS */ /* 0D1:RSB_CR -- Receive System Bus Configuration Register */ /* Change RINDO & RFSYNC on falling edge of RSBCLKI. */ write_framer(sc, Bt8370_RSB_CR, 0x70); /* 0D2,0D3:RSYNC_{TS,BIT} -- Receive frame Sync offset */ write_framer(sc, Bt8370_RSYNC_BIT, 0x00); write_framer(sc, Bt8370_RSYNC_TS, 0x00); /* 0D4:TSB_CR -- Transmit System Bus Configuration Register */ /* Change TINDO & TFSYNC on falling edge of TSBCLKI. */ write_framer(sc, Bt8370_TSB_CR, 0x30); /* 0D5,0D6:TSYNC_{TS,BIT} -- Transmit frame Sync offset */ write_framer(sc, Bt8370_TSYNC_BIT, 0x00); write_framer(sc, Bt8370_TSYNC_TS, 0x00); /* 0D7:RSIG_CR -- Receive SIGnalling Configuratin Register */ write_framer(sc, Bt8370_RSIG_CR, 0x00); /* Assign and configure 64Kb TIME SLOTS. */ /* TS24..TS1 must be assigned for T1, TS31..TS0 for E1. */ /* Timeslots with no user data have RINDO and TINDO off. */ for (i=0; i<32; i++) { /* 0E0-0FF:SBCn -- System Bus Per-Channel Control */ if (FORMAT_T1ANY && (i==0 || i>24)) write_framer(sc, Bt8370_SBCn +i, 0x00); /* not assigned in T1 mode */ else if (FORMAT_E1ANY && (i==0) && !FORMAT_E1NONE) write_framer(sc, Bt8370_SBCn +i, 0x01); /* assigned, TS0 o/h bits */ else if (FORMAT_E1CAS && (i==16) && !FORMAT_E1NONE) write_framer(sc, Bt8370_SBCn +i, 0x01); /* assigned, TS16 o/h bits */ else if ((sc->config.time_slots & (1<config.time_slots & (1<>4, read_framer(sc, Bt8370_DID)&0x0F); } /* Called once a second; must not sleep. */ static int t1_watchdog(softc_t *sc) { u_int16_t LCV = 0, FERR = 0, CRC = 0, FEBE = 0; u_int8_t alm1, alm3, loop, isr0; int link_status = STATUS_UP; int i; /* Read the alarm registers */ alm1 = read_framer(sc, Bt8370_ALM1); alm3 = read_framer(sc, Bt8370_ALM3); loop = read_framer(sc, Bt8370_LOOP); isr0 = read_framer(sc, Bt8370_ISR0); /* Always ignore the SIGFRZ alarm bit, */ alm1 &= ~ALM1_SIGFRZ; if (FORMAT_T1ANY) /* ignore RYEL in T1 modes */ alm1 &= ~ALM1_RYEL; else if (FORMAT_E1NONE) /* ignore all alarms except LOS */ alm1 &= ALM1_RLOS; /* Software is alive. */ led_inv(sc, MII16_T1_LED_GRN); /* Receiving Alarm Indication Signal (AIS). */ if ((alm1 & ALM1_RAIS)!=0) /* receiving ais */ led_on(sc, MII16_T1_LED_BLU); else if ((alm1 & ALM1_RLOS)!=0) /* sending ais */ led_inv(sc, MII16_T1_LED_BLU); else led_off(sc, MII16_T1_LED_BLU); /* Receiving Remote Alarm Indication (RAI). */ if ((alm1 & (ALM1_RMYEL | ALM1_RYEL))!=0) /* receiving rai */ led_on(sc, MII16_T1_LED_YEL); else if ((alm1 & ALM1_RLOF)!=0) /* sending rai */ led_inv(sc, MII16_T1_LED_YEL); else led_off(sc, MII16_T1_LED_YEL); /* If any alarm bits are set then the link is 'down'. */ /* The bad bits are: rmyel ryel rais ralos rlos rlof. */ /* Some alarm bits have been masked by this point. */ if (alm1 != 0) link_status = STATUS_DOWN; /* Declare local Red Alarm if the link is down. */ if (link_status == STATUS_DOWN) led_on(sc, MII16_T1_LED_RED); else if (sc->loop_timer != 0) /* loopback is active */ led_inv(sc, MII16_T1_LED_RED); else led_off(sc, MII16_T1_LED_RED); /* Print latched error bits if they changed. */ if ((DRIVER_DEBUG) && (alm1 != sc->last_alm1)) { char *on = "ON ", *off = "OFF"; printf("%s: RLOF=%s RLOS=%s RALOS=%s RAIS=%s RYEL=%s RMYEL=%s\n", NAME_UNIT, (alm1 & ALM1_RLOF) ? on : off, (alm1 & ALM1_RLOS) ? on : off, (alm1 & ALM1_RALOS) ? on : off, (alm1 & ALM1_RAIS) ? on : off, (alm1 & ALM1_RYEL) ? on : off, (alm1 & ALM1_RMYEL) ? on : off); } /* Check and print error counters if non-zero. */ LCV = read_framer(sc, Bt8370_LCV_LO) + (read_framer(sc, Bt8370_LCV_HI)<<8); if (!FORMAT_E1NONE) FERR = read_framer(sc, Bt8370_FERR_LO) + (read_framer(sc, Bt8370_FERR_HI)<<8); if (FORMAT_E1CRC || FORMAT_T1ESF) CRC = read_framer(sc, Bt8370_CRC_LO) + (read_framer(sc, Bt8370_CRC_HI)<<8); if (FORMAT_E1CRC) FEBE = read_framer(sc, Bt8370_FEBE_LO) + (read_framer(sc, Bt8370_FEBE_HI)<<8); /* Only LCV is valid if Out-Of-Frame */ if (FORMAT_E1NONE) FERR = CRC = FEBE = 0; if ((DRIVER_DEBUG) && (LCV || FERR || CRC || FEBE)) printf("%s: LCV=%u FERR=%u CRC=%u FEBE=%u\n", NAME_UNIT, LCV, FERR, CRC, FEBE); /* Driver keeps crude link-level error counters (SNMP is better). */ sc->status.cntrs.lcv_errs += LCV; sc->status.cntrs.frm_errs += FERR; sc->status.cntrs.crc_errs += CRC; sc->status.cntrs.febe_errs += FEBE; /* Check for BOP messages in the ESF Facility Data Link. */ if ((FORMAT_T1ESF) && (read_framer(sc, Bt8370_ISR1) & 0x80)) { u_int8_t bop_code = read_framer(sc, Bt8370_RBOP) & 0x3F; switch (bop_code) { case T1BOP_OOF: { if ((DRIVER_DEBUG) && ((sc->last_alm1 & ALM1_RMYEL)==0)) printf("%s: Receiving a 'yellow alarm' BOP msg\n", NAME_UNIT); break; } case T1BOP_LINE_UP: { if (DRIVER_DEBUG) printf("%s: Received a 'line loopback activate' BOP msg\n", NAME_UNIT); write_framer(sc, Bt8370_LOOP, LOOP_LINE); sc->loop_timer = 305; break; } case T1BOP_LINE_DOWN: { if (DRIVER_DEBUG) printf("%s: Received a 'line loopback deactivate' BOP msg\n", NAME_UNIT); write_framer(sc, Bt8370_LOOP, read_framer(sc, Bt8370_LOOP) & ~LOOP_LINE); sc->loop_timer = 0; break; } case T1BOP_PAY_UP: { if (DRIVER_DEBUG) printf("%s: Received a 'payload loopback activate' BOP msg\n", NAME_UNIT); write_framer(sc, Bt8370_LOOP, LOOP_PAYLOAD); sc->loop_timer = 305; break; } case T1BOP_PAY_DOWN: { if (DRIVER_DEBUG) printf("%s: Received a 'payload loopback deactivate' BOP msg\n", NAME_UNIT); write_framer(sc, Bt8370_LOOP, read_framer(sc, Bt8370_LOOP) & ~LOOP_PAYLOAD); sc->loop_timer = 0; break; } default: { if (DRIVER_DEBUG) printf("%s: Received a type 0x%02X BOP msg\n", NAME_UNIT, bop_code); break; } } } /* Check for HDLC pkts in the ESF Facility Data Link. */ if ((FORMAT_T1ESF) && (read_framer(sc, Bt8370_ISR2) & 0x70)) { /* while (not fifo-empty && not start-of-msg) flush fifo */ while ((read_framer(sc, Bt8370_RDL1_STAT) & 0x0C) == 0) read_framer(sc, Bt8370_RDL1); /* If (not fifo-empty), then begin processing fifo contents. */ if ((read_framer(sc, Bt8370_RDL1_STAT) & 0x0C) == 0x08) { u_int8_t msg[64]; u_int8_t stat = read_framer(sc, Bt8370_RDL1); sc->status.cntrs.fdl_pkts++; for (i=0; i<(stat & 0x3F); i++) msg[i] = read_framer(sc, Bt8370_RDL1); /* Is this FDL message a T1.403 performance report? */ if (((stat & 0x3F)==11) && ((msg[0]==0x38) || (msg[0]==0x3A)) && (msg[1]==1) && (msg[2]==3)) /* Copy 4 PRs from FDL pkt to SNMP struct. */ memcpy(sc->status.snmp.t1.prm, msg+3, 8); } } /* Check for inband loop up/down commands. */ if (FORMAT_T1ANY) { u_int8_t isr6 = read_framer(sc, Bt8370_ISR6); u_int8_t alarm2 = read_framer(sc, Bt8370_ALM2); u_int8_t tlb = read_framer(sc, Bt8370_TLB); /* Inband Code == Loop Up && On Transition && Inband Tx Inactive */ if ((isr6 & 0x40) && (alarm2 & 0x40) && ((tlb & 1)==0)) { /* CSU loop up is 10000 10000 ... */ if (DRIVER_DEBUG) printf("%s: Received a 'CSU Loop Up' inband msg\n", NAME_UNIT); write_framer(sc, Bt8370_LOOP, LOOP_LINE); /* Loop up */ sc->loop_timer = 305; } /* Inband Code == Loop Down && On Transition && Inband Tx Inactive */ if ((isr6 & 0x80) && (alarm2 & 0x80) && ((tlb & 1)==0)) { /* CSU loop down is 100 100 100 ... */ if (DRIVER_DEBUG) printf("%s: Received a 'CSU Loop Down' inband msg\n", NAME_UNIT); write_framer(sc, Bt8370_LOOP, read_framer(sc, Bt8370_LOOP) & ~LOOP_LINE); /* loop down */ sc->loop_timer = 0; } } /* Manually send Yellow Alarm BOP msgs. */ if (FORMAT_T1ESF) { u_int8_t isr7 = read_framer(sc, Bt8370_ISR7); if ((isr7 & 0x02) && (alm1 & 0x02)) /* RLOF on-transition */ { /* Start sending continuous Yellow Alarm BOP messages. */ write_framer(sc, Bt8370_BOP, RBOP_25 | TBOP_CONT); write_framer(sc, Bt8370_TBOP, 0x00); /* send BOP; order matters */ } else if ((isr7 & 0x02) && ((alm1 & 0x02)==0)) /* RLOF off-transition */ { /* Stop sending continuous Yellow Alarm BOP messages. */ write_framer(sc, Bt8370_BOP, RBOP_25 | TBOP_OFF); } } /* Time out loopback requests. */ if (sc->loop_timer != 0) if (--sc->loop_timer == 0) if (loop != 0) { if (DRIVER_DEBUG) printf("%s: Timeout: Loop Down after 300 seconds\n", NAME_UNIT); write_framer(sc, Bt8370_LOOP, loop & ~(LOOP_PAYLOAD | LOOP_LINE)); } /* RX Test Pattern status */ if ((DRIVER_DEBUG) && (isr0 & 0x10)) printf("%s: RX Test Pattern Sync\n", NAME_UNIT); /* SNMP Error Counters */ sc->status.snmp.t1.lcv = LCV; sc->status.snmp.t1.fe = FERR; sc->status.snmp.t1.crc = CRC; sc->status.snmp.t1.febe = FEBE; /* SNMP Line Status */ sc->status.snmp.t1.line = 0; if (alm1 & ALM1_RMYEL) sc->status.snmp.t1.line |= TLINE_RX_RAI; if (alm1 & ALM1_RYEL) sc->status.snmp.t1.line |= TLINE_RX_RAI; if (alm1 & ALM1_RLOF) sc->status.snmp.t1.line |= TLINE_TX_RAI; if (alm1 & ALM1_RAIS) sc->status.snmp.t1.line |= TLINE_RX_AIS; if (alm1 & ALM1_RLOS) sc->status.snmp.t1.line |= TLINE_TX_AIS; if (alm1 & ALM1_RLOF) sc->status.snmp.t1.line |= TLINE_LOF; if (alm1 & ALM1_RLOS) sc->status.snmp.t1.line |= TLINE_LOS; if (alm3 & ALM3_RMAIS) sc->status.snmp.t1.line |= T1LINE_RX_TS16_AIS; if (alm3 & ALM3_SRED) sc->status.snmp.t1.line |= T1LINE_TX_TS16_LOMF; if (alm3 & ALM3_SEF) sc->status.snmp.t1.line |= T1LINE_SEF; if (isr0 & 0x10) sc->status.snmp.t1.line |= T1LINE_RX_TEST; if ((alm1 & ALM1_RMYEL) && (FORMAT_E1CAS)) sc->status.snmp.t1.line |= T1LINE_RX_TS16_LOMF; /* SNMP Loopback Status */ sc->status.snmp.t1.loop &= ~(TLOOP_FAR_LINE | TLOOP_FAR_PAYLOAD); if (sc->config.loop_back == CFG_LOOP_TULIP) sc->status.snmp.t1.loop |= TLOOP_NEAR_OTHER; if (loop & LOOP_PAYLOAD) sc->status.snmp.t1.loop |= TLOOP_NEAR_PAYLOAD; if (loop & LOOP_LINE) sc->status.snmp.t1.loop |= TLOOP_NEAR_LINE; if (loop & LOOP_ANALOG) sc->status.snmp.t1.loop |= TLOOP_NEAR_OTHER; if (loop & LOOP_FRAMER) sc->status.snmp.t1.loop |= TLOOP_NEAR_INWARD; /* Remember this state until next time. */ sc->last_alm1 = alm1; /* If an INWARD loopback is in effect, link status is UP */ if (sc->config.loop_back != CFG_LOOP_NONE) /* XXX INWARD ONLY */ link_status = STATUS_UP; return link_status; } /* IOCTL SYSCALL: can sleep. */ static void t1_send_bop(softc_t *sc, int bop_code) { u_int8_t bop; int i; /* The BOP transmitter could be sending a continuous */ /* BOP msg when told to send this BOP_25 message. */ /* So save and restore the state of the BOP machine. */ bop = read_framer(sc, Bt8370_BOP); write_framer(sc, Bt8370_BOP, RBOP_OFF | TBOP_OFF); for (i=0; i<40; i++) /* max delay 400 ms. */ if (read_framer(sc, Bt8370_BOP_STAT) & 0x80) SLEEP(10000); /* send 25 repetitions of bop_code */ write_framer(sc, Bt8370_BOP, RBOP_OFF | TBOP_25); write_framer(sc, Bt8370_TBOP, bop_code); /* order matters */ /* wait for tx to stop */ for (i=0; i<40; i++) /* max delay 400 ms. */ if (read_framer(sc, Bt8370_BOP_STAT) & 0x80) SLEEP(10000); /* Restore previous state of the BOP machine. */ write_framer(sc, Bt8370_BOP, bop); } /* IOCTL SYSCALL: can sleep. */ static int t1_ioctl(softc_t *sc, struct ioctl *ioctl) { int error = 0; switch (ioctl->cmd) { case IOCTL_SNMP_SEND: /* set opstatus? */ { switch (ioctl->data) { case TSEND_NORMAL: { write_framer(sc, Bt8370_TPATT, 0x00); /* tx pattern generator off */ write_framer(sc, Bt8370_RPATT, 0x00); /* rx pattern detector off */ write_framer(sc, Bt8370_TLB, 0x00); /* tx inband generator off */ break; } case TSEND_LINE: { if (FORMAT_T1ESF) t1_send_bop(sc, T1BOP_LINE_UP); else if (FORMAT_T1SF) { write_framer(sc, Bt8370_LBP, 0x08); /* 10000 10000 ... */ write_framer(sc, Bt8370_TLB, 0x05); /* 5 bits, framed, start */ } sc->status.snmp.t1.loop |= TLOOP_FAR_LINE; break; } case TSEND_PAYLOAD: { t1_send_bop(sc, T1BOP_PAY_UP); sc->status.snmp.t1.loop |= TLOOP_FAR_PAYLOAD; break; } case TSEND_RESET: { if (sc->status.snmp.t1.loop == TLOOP_FAR_LINE) { if (FORMAT_T1ESF) t1_send_bop(sc, T1BOP_LINE_DOWN); else if (FORMAT_T1SF) { write_framer(sc, Bt8370_LBP, 0x24); /* 100100 100100 ... */ write_framer(sc, Bt8370_TLB, 0x09); /* 6 bits, framed, start */ } sc->status.snmp.t1.loop &= ~TLOOP_FAR_LINE; } if (sc->status.snmp.t1.loop == TLOOP_FAR_PAYLOAD) { t1_send_bop(sc, T1BOP_PAY_DOWN); sc->status.snmp.t1.loop &= ~TLOOP_FAR_PAYLOAD; } break; } case TSEND_QRS: { write_framer(sc, Bt8370_TPATT, 0x1E); /* framed QRSS */ break; } default: { error = EINVAL; break; } } break; } case IOCTL_SNMP_LOOP: /* set opstatus = test? */ { u_int8_t new_loop = 0; if (ioctl->data == CFG_LOOP_NONE) new_loop = 0; else if (ioctl->data == CFG_LOOP_PAYLOAD) new_loop = LOOP_PAYLOAD; else if (ioctl->data == CFG_LOOP_LINE) new_loop = LOOP_LINE; else if (ioctl->data == CFG_LOOP_OTHER) new_loop = LOOP_ANALOG; else if (ioctl->data == CFG_LOOP_INWARD) new_loop = LOOP_FRAMER; else if (ioctl->data == CFG_LOOP_DUAL) new_loop = LOOP_DUAL; else error = EINVAL; if (error == 0) { write_framer(sc, Bt8370_LOOP, new_loop); sc->config.loop_back = ioctl->data; } break; } default: error = EINVAL; break; } return error; } static struct card hssi_card = { .config = hssi_config, .ident = hssi_ident, .watchdog = hssi_watchdog, .ioctl = hssi_ioctl, }; static struct card t3_card = { .config = t3_config, .ident = t3_ident, .watchdog = t3_watchdog, .ioctl = t3_ioctl, }; static struct card ssi_card = { .config = ssi_config, .ident = ssi_ident, .watchdog = ssi_watchdog, .ioctl = ssi_ioctl, }; static struct card t1_card = { .config = t1_config, .ident = t1_ident, .watchdog = t1_watchdog, .ioctl = t1_ioctl, }; /* RAWIP is raw IP packets (v4 or v6) in HDLC frames with NO HEADERS. */ /* No HDLC Address/Control fields! No line control protocol at all! */ /* This code is BSD/ifnet-specific; Linux and Netgraph also do RAWIP. */ #if IFNET # if ((defined(__FreeBSD__) && (__FreeBSD_version < 500000)) ||\ defined(__NetBSD__) || defined(__OpenBSD__) || defined(__bsdi__)) static void netisr_dispatch(int isr, struct mbuf *mbuf) { struct ifqueue *intrq = NULL; int qfull = 0; #if INET if (isr == NETISR_IP) intrq = &ipintrq; #endif #if INET6 if (isr == NETISR_IPV6) intrq = &ip6intrq; #endif if ((intrq != NULL) && ((qfull = IF_QFULL(intrq)) == 0)) { /* rxintr_cleanup() ENQUEUES in a hard interrupt. */ /* networking code DEQUEUES in a soft interrupt. */ /* Some BSD QUEUE routines are not interrupt-safe. */ DISABLE_INTR; /* noop in FreeBSD */ IF_ENQUEUE(intrq, mbuf); ENABLE_INTR; schednetisr(isr); /* schedule a soft interrupt */ } else { m_freem(mbuf); if ((intrq != NULL) && (qfull != 0)) IF_DROP(intrq); } } # endif /* ((__FreeBSD__ && (__FreeBSD_version < 500000)) || */ /* __NetBSD__ || __OpenBSD__ || __bsdi__) */ /* rxintr_cleanup calls this to give a newly arrived pkt to higher levels. */ static void lmc_raw_input(struct ifnet *ifp, struct mbuf *mbuf) { softc_t *sc = IFP2SC(ifp); # if INET if (mbuf->m_data[0]>>4 == 4) netisr_dispatch(NETISR_IP, mbuf); else # endif # if INET6 if (mbuf->m_data[0]>>4 == 6) netisr_dispatch(NETISR_IPV6, mbuf); else # endif { m_freem(mbuf); sc->status.cntrs.idiscards++; if (DRIVER_DEBUG) printf("%s: lmc_raw_input: rx pkt discarded: not IPv4 or IPv6\n", NAME_UNIT); } } #endif /* IFNET */ /* There are TWO VERSIONS of interrupt/DMA code: Linux & BSD. * Handling Linux and the BSDs with CPP directives would * make the code unreadable, so there are two versions. * Conceptually, the two versions do the same thing and * core_interrupt() doesn't know they are different. * * We are "standing on the head of a pin" in these routines. * Tulip CSRs can be accessed, but nothing else is interrupt-safe! * Do NOT access: MII, GPIO, SROM, BIOSROM, XILINX, SYNTH, or DAC. */ #if BSD /* BSD version of interrupt/DMA code */ /* Singly-linked tail-queues hold mbufs with active DMA. * For RX, single mbuf clusters; for TX, mbuf chains are queued. * NB: mbufs are linked through their m_nextpkt field. * Callers must hold sc->bottom_lock; not otherwise locked. */ /* Put an mbuf (chain) on the tail of the descriptor ring queue. */ static void /* BSD version */ mbuf_enqueue(struct desc_ring *ring, struct mbuf *m) { m->m_nextpkt = NULL; if (ring->tail == NULL) ring->head = m; else ring->tail->m_nextpkt = m; ring->tail = m; } /* Get an mbuf (chain) from the head of the descriptor ring queue. */ static struct mbuf* /* BSD version */ mbuf_dequeue(struct desc_ring *ring) { struct mbuf *m = ring->head; if (m != NULL) if ((ring->head = m->m_nextpkt) == NULL) ring->tail = NULL; return m; } # ifdef __FreeBSD__ static void /* *** FreeBSD ONLY *** Callout from bus_dmamap_load() */ fbsd_dmamap_load(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct desc_ring *ring = arg; ring->nsegs = error ? 0 : nsegs; ring->segs[0] = segs[0]; ring->segs[1] = segs[1]; } # endif /* Initialize a DMA descriptor ring. */ static int /* BSD version */ create_ring(softc_t *sc, struct desc_ring *ring, int num_descs) { struct dma_desc *descs; int size_descs = sizeof(struct dma_desc)*num_descs; int i, error = 0; /* The DMA descriptor array must not cross a page boundary. */ if (size_descs > PAGE_SIZE) { printf("%s: DMA descriptor array > PAGE_SIZE (%d)\n", NAME_UNIT, (u_int)PAGE_SIZE); return EINVAL; } #ifdef __FreeBSD__ /* Create a DMA tag for descriptors and buffers. */ if ((error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, PAGE_SIZE, 2, PAGE_SIZE, BUS_DMA_ALLOCNOW, # if (__FreeBSD_version >= 502000) NULL, NULL, # endif &ring->tag))) { printf("%s: bus_dma_tag_create() failed: error %d\n", NAME_UNIT, error); return error; } /* Allocate wired physical memory for DMA descriptor array */ /* and map physical address to kernel virtual address. */ if ((error = bus_dmamem_alloc(ring->tag, (void**)&ring->first, BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->map))) { printf("%s: bus_dmamem_alloc() failed; error %d\n", NAME_UNIT, error); return error; } descs = ring->first; /* Map kernel virtual address to PCI address for DMA descriptor array. */ if ((error = bus_dmamap_load(ring->tag, ring->map, descs, size_descs, fbsd_dmamap_load, ring, 0))) { printf("%s: bus_dmamap_load() failed; error %d\n", NAME_UNIT, error); return error; } ring->dma_addr = ring->segs[0].ds_addr; /* Allocate dmamaps for each DMA descriptor. */ for (i=0; itag, 0, &descs[i].map))) { printf("%s: bus_dmamap_create() failed; error %d\n", NAME_UNIT, error); return error; } #elif (defined(__NetBSD__) || defined(__OpenBSD__)) /* Use the DMA tag passed to attach() for descriptors and buffers. */ ring->tag = sc->pa_dmat; /* Allocate wired physical memory for DMA descriptor array. */ if ((error = bus_dmamem_alloc(ring->tag, size_descs, PAGE_SIZE, 0, ring->segs, 1, &ring->nsegs, BUS_DMA_NOWAIT))) { printf("%s: bus_dmamem_alloc() failed; error %d\n", NAME_UNIT, error); return error; } /* Map physical address to kernel virtual address. */ if ((error = bus_dmamem_map(ring->tag, ring->segs, ring->nsegs, size_descs, (caddr_t *)&ring->first, BUS_DMA_NOWAIT | BUS_DMA_COHERENT))) { printf("%s: bus_dmamem_map() failed; error %d\n", NAME_UNIT, error); return error; } descs = ring->first; /* suppress compiler warning about aliasing */ memset(descs, 0, size_descs); /* Allocate dmamap for PCI access to DMA descriptor array. */ if ((error = bus_dmamap_create(ring->tag, size_descs, 1, size_descs, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ring->map))) { printf("%s: bus_dmamap_create() failed; error %d\n", NAME_UNIT, error); return error; } /* Map kernel virtual address to PCI address for DMA descriptor array. */ if ((error = bus_dmamap_load(ring->tag, ring->map, descs, size_descs, 0, BUS_DMA_NOWAIT))) { printf("%s: bus_dmamap_load() failed; error %d\n", NAME_UNIT, error); return error; } ring->dma_addr = ring->map->dm_segs[0].ds_addr; /* Allocate dmamaps for each DMA descriptor. */ for (i=0; itag, MAX_DESC_LEN, 2, MAX_CHUNK_LEN, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &descs[i].map))) { printf("%s: bus_dmamap_create() failed; error %d\n", NAME_UNIT, error); return error; } #elif defined(__bsdi__) /* Allocate wired physical memory for DMA descriptor array. */ if ((ring->first = malloc(size_descs, M_DEVBUF, M_NOWAIT)) == NULL) { printf("%s: malloc() failed for DMA descriptor array\n", NAME_UNIT); return ENOMEM; } descs = ring->first; memset(descs, 0, size_descs); /* Map kernel virtual address to PCI address for DMA descriptor array. */ ring->dma_addr = vtophys(descs); /* Relax! BSD/OS only. */ #endif ring->read = descs; ring->write = descs; ring->first = descs; ring->last = descs + num_descs -1; ring->last->control = TLP_DCTL_END_RING; ring->num_descs = num_descs; ring->size_descs = size_descs; ring->head = NULL; ring->tail = NULL; return 0; } /* Destroy a DMA descriptor ring */ static void /* BSD version */ destroy_ring(softc_t *sc, struct desc_ring *ring) { struct dma_desc *desc; struct mbuf *m; /* Free queued mbufs. */ while ((m = mbuf_dequeue(ring)) != NULL) m_freem(m); /* TX may have one pkt that is not on any queue. */ if (sc->tx_mbuf != NULL) { m_freem(sc->tx_mbuf); sc->tx_mbuf = NULL; } /* Unmap active DMA descriptors. */ while (ring->read != ring->write) { bus_dmamap_unload(ring->tag, ring->read->map); if (ring->read++ == ring->last) ring->read = ring->first; } #ifdef __FreeBSD__ /* Free the dmamaps of all DMA descriptors. */ for (desc=ring->first; desc!=ring->last+1; desc++) if (desc->map != NULL) bus_dmamap_destroy(ring->tag, desc->map); /* Unmap PCI address for DMA descriptor array. */ if (ring->dma_addr != 0) bus_dmamap_unload(ring->tag, ring->map); /* Free kernel memory for DMA descriptor array. */ if (ring->first != NULL) bus_dmamem_free(ring->tag, ring->first, ring->map); /* Free the DMA tag created for this ring. */ if (ring->tag != NULL) bus_dma_tag_destroy(ring->tag); #elif (defined(__NetBSD__) || defined(__OpenBSD__)) /* Free the dmamaps of all DMA descriptors. */ for (desc=ring->first; desc!=ring->last+1; desc++) if (desc->map != NULL) bus_dmamap_destroy(ring->tag, desc->map); /* Unmap PCI address for DMA descriptor array. */ if (ring->dma_addr != 0) bus_dmamap_unload(ring->tag, ring->map); /* Free dmamap for DMA descriptor array. */ if (ring->map != NULL) bus_dmamap_destroy(ring->tag, ring->map); /* Unmap kernel address for DMA descriptor array. */ if (ring->first != NULL) bus_dmamem_unmap(ring->tag, (caddr_t)ring->first, ring->size_descs); /* Free kernel memory for DMA descriptor array. */ if (ring->segs[0].ds_addr != 0) bus_dmamem_free(ring->tag, ring->segs, ring->nsegs); #elif defined(__bsdi__) /* Free kernel memory for DMA descriptor array. */ if (ring->first != NULL) free(ring->first, M_DEVBUF); #endif } /* Clean up after a packet has been received. */ static int /* BSD version */ rxintr_cleanup(softc_t *sc) { struct desc_ring *ring = &sc->rxring; struct dma_desc *first_desc, *last_desc; struct mbuf *first_mbuf=NULL, *last_mbuf=NULL; struct mbuf *new_mbuf; int pkt_len, desc_len; #if (defined(__FreeBSD__) && defined(DEVICE_POLLING)) /* Input packet flow control (livelock prevention): */ /* Give pkts to higher levels only if quota is > 0. */ if (sc->quota <= 0) return 0; #endif /* This looks complicated, but remember: typically packets up */ /* to 2048 bytes long fit in one mbuf and use one descriptor. */ first_desc = last_desc = ring->read; /* ASSERTION: If there is a descriptor in the ring and the hardware has */ /* finished with it, then that descriptor will have RX_FIRST_DESC set. */ if ((ring->read != ring->write) && /* descriptor ring not empty */ ((ring->read->status & TLP_DSTS_OWNER) == 0) && /* hardware done */ ((ring->read->status & TLP_DSTS_RX_FIRST_DESC) == 0)) /* should be set */ panic("%s: rxintr_cleanup: rx-first-descriptor not set.\n", NAME_UNIT); /* First decide if a complete packet has arrived. */ /* Run down DMA descriptors looking for one marked "last". */ /* Bail out if an active descriptor is encountered. */ /* Accumulate most significant bits of packet length. */ pkt_len = 0; for (;;) { if (last_desc == ring->write) return 0; /* no more descs */ if (last_desc->status & TLP_DSTS_OWNER) return 0; /* still active */ if (last_desc->status & TLP_DSTS_RX_LAST_DESC) break; /* end of packet */ pkt_len += last_desc->length1 + last_desc->length2; /* entire desc filled */ if (last_desc++->control & TLP_DCTL_END_RING) last_desc = ring->first; /* ring wrap */ } /* A complete packet has arrived; how long is it? */ /* H/w ref man shows RX pkt length as a 14-bit field. */ /* An experiment found that only the 12 LSBs work. */ if (((last_desc->status>>16)&0xFFF) == 0) pkt_len += 4096; /* carry-bit */ pkt_len = (pkt_len & 0xF000) + ((last_desc->status>>16) & 0x0FFF); /* Subtract the CRC length unless doing so would underflow. */ if (pkt_len >= sc->config.crc_len) pkt_len -= sc->config.crc_len; /* Run down DMA descriptors again doing the following: * 1) put pkt info in pkthdr of first mbuf, * 2) link mbufs, * 3) set mbuf lengths. */ first_desc = ring->read; do { /* Read a DMA descriptor from the ring. */ last_desc = ring->read; /* Advance the ring read pointer. */ if (ring->read++ == ring->last) ring->read = ring->first; /* Dequeue the corresponding cluster mbuf. */ new_mbuf = mbuf_dequeue(ring); if (new_mbuf == NULL) panic("%s: rxintr_cleanup: expected an mbuf\n", NAME_UNIT); desc_len = last_desc->length1 + last_desc->length2; /* If bouncing, copy bounce buf to mbuf. */ DMA_SYNC(last_desc->map, desc_len, BUS_DMASYNC_POSTREAD); /* Unmap kernel virtual address to PCI address. */ bus_dmamap_unload(ring->tag, last_desc->map); /* 1) Put pkt info in pkthdr of first mbuf. */ if (last_desc == first_desc) { first_mbuf = new_mbuf; first_mbuf->m_pkthdr.len = pkt_len; /* total pkt length */ #if IFNET first_mbuf->m_pkthdr.rcvif = sc->ifp; /* how it got here */ #else first_mbuf->m_pkthdr.rcvif = NULL; #endif } else /* 2) link mbufs. */ { last_mbuf->m_next = new_mbuf; /* M_PKTHDR should be set in the first mbuf only. */ new_mbuf->m_flags &= ~M_PKTHDR; } last_mbuf = new_mbuf; /* 3) Set mbuf lengths. */ new_mbuf->m_len = (pkt_len >= desc_len) ? desc_len : pkt_len; pkt_len -= new_mbuf->m_len; } while ((last_desc->status & TLP_DSTS_RX_LAST_DESC) == 0); /* Decide whether to accept or to discard this packet. */ /* RxHDLC sets MIIERR for bad CRC, abort and partial byte at pkt end. */ if (((last_desc->status & TLP_DSTS_RX_BAD) == 0) && (sc->status.oper_status == STATUS_UP) && (first_mbuf->m_pkthdr.len > 0)) { /* Optimization: copy a small pkt into a small mbuf. */ if (first_mbuf->m_pkthdr.len <= COPY_BREAK) { MGETHDR(new_mbuf, M_DONTWAIT, MT_DATA); if (new_mbuf != NULL) { new_mbuf->m_pkthdr.rcvif = first_mbuf->m_pkthdr.rcvif; new_mbuf->m_pkthdr.len = first_mbuf->m_pkthdr.len; new_mbuf->m_len = first_mbuf->m_len; memcpy(new_mbuf->m_data, first_mbuf->m_data, first_mbuf->m_pkthdr.len); m_freem(first_mbuf); first_mbuf = new_mbuf; } } /* Include CRC and one flag byte in input byte count. */ sc->status.cntrs.ibytes += first_mbuf->m_pkthdr.len + sc->config.crc_len +1; sc->status.cntrs.ipackets++; #if IFNET sc->ifp->if_ipackets++; LMC_BPF_MTAP(first_mbuf); #endif #if (defined(__FreeBSD__) && defined(DEVICE_POLLING)) sc->quota--; #endif /* Give this good packet to the network stacks. */ #if NETGRAPH if (sc->ng_hook != NULL) /* is hook connected? */ { # if (__FreeBSD_version >= 500000) int error; /* ignore error */ NG_SEND_DATA_ONLY(error, sc->ng_hook, first_mbuf); # else /* FreeBSD-4 */ ng_queue_data(sc->ng_hook, first_mbuf, NULL); # endif return 1; /* did something */ } #endif /* NETGRAPH */ if (sc->config.line_pkg == PKG_RAWIP) lmc_raw_input(sc->ifp, first_mbuf); else { #if NSPPP sppp_input(sc->ifp, first_mbuf); #elif P2P new_mbuf = first_mbuf; while (new_mbuf != NULL) { sc->p2p->p2p_hdrinput(sc->p2p, new_mbuf->m_data, new_mbuf->m_len); new_mbuf = new_mbuf->m_next; } sc->p2p->p2p_input(sc->p2p, NULL); m_freem(first_mbuf); #else m_freem(first_mbuf); sc->status.cntrs.idiscards++; #endif } } else if (sc->status.oper_status != STATUS_UP) { /* If the link is down, this packet is probably noise. */ m_freem(first_mbuf); sc->status.cntrs.idiscards++; if (DRIVER_DEBUG) printf("%s: rxintr_cleanup: rx pkt discarded: link down\n", NAME_UNIT); } else /* Log and discard this bad packet. */ { if (DRIVER_DEBUG) printf("%s: RX bad pkt; len=%d %s%s%s%s\n", NAME_UNIT, first_mbuf->m_pkthdr.len, (last_desc->status & TLP_DSTS_RX_MII_ERR) ? " miierr" : "", (last_desc->status & TLP_DSTS_RX_DRIBBLE) ? " dribble" : "", (last_desc->status & TLP_DSTS_RX_DESC_ERR) ? " descerr" : "", (last_desc->status & TLP_DSTS_RX_OVERRUN) ? " overrun" : ""); if (last_desc->status & TLP_DSTS_RX_OVERRUN) sc->status.cntrs.fifo_over++; else sc->status.cntrs.ierrors++; m_freem(first_mbuf); } return 1; /* did something */ } /* Setup (prepare) to receive a packet. */ /* Try to keep the RX descriptor ring full of empty buffers. */ static int /* BSD version */ rxintr_setup(softc_t *sc) { struct desc_ring *ring = &sc->rxring; struct dma_desc *desc; struct mbuf *m; int desc_len; int error; /* Ring is full if (wrap(write+1)==read) */ if (((ring->write == ring->last) ? ring->first : ring->write+1) == ring->read) return 0; /* ring is full; nothing to do */ /* Allocate a small mbuf and attach an mbuf cluster. */ MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { sc->status.cntrs.rxdma++; if (DRIVER_DEBUG) printf("%s: rxintr_setup: MGETHDR() failed\n", NAME_UNIT); return 0; } MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { m_freem(m); sc->status.cntrs.rxdma++; if (DRIVER_DEBUG) printf("%s: rxintr_setup: MCLGET() failed\n", NAME_UNIT); return 0; } /* Queue the mbuf for later processing by rxintr_cleanup. */ mbuf_enqueue(ring, m); /* Write a DMA descriptor into the ring. */ /* Hardware won't see it until the OWNER bit is set. */ desc = ring->write; /* Advance the ring write pointer. */ if (ring->write++ == ring->last) ring->write = ring->first; desc_len = (MCLBYTES < MAX_DESC_LEN) ? MCLBYTES : MAX_DESC_LEN; /* Map kernel virtual address to PCI address. */ if ((error = DMA_LOAD(desc->map, m->m_data, desc_len))) printf("%s: bus_dmamap_load(rx) failed; error %d\n", NAME_UNIT, error); /* Invalidate the cache for this mbuf. */ DMA_SYNC(desc->map, desc_len, BUS_DMASYNC_PREREAD); /* Set up the DMA descriptor. */ #ifdef __FreeBSD__ desc->address1 = ring->segs[0].ds_addr; #elif (defined(__NetBSD__) || defined(__OpenBSD__)) desc->address1 = desc->map->dm_segs[0].ds_addr; #elif defined(__bsdi__) desc->address1 = vtophys(m->m_data); /* Relax! BSD/OS only. */ #endif desc->length1 = desc_len>>1; desc->address2 = desc->address1 + desc->length1; desc->length2 = desc_len>>1; /* Before setting the OWNER bit, flush the cache (memory barrier). */ DMA_SYNC(ring->map, ring->size_descs, BUS_DMASYNC_PREWRITE); /* Commit the DMA descriptor to the hardware. */ desc->status = TLP_DSTS_OWNER; /* Notify the receiver that there is another buffer available. */ WRITE_CSR(TLP_RX_POLL, 1); return 1; /* did something */ } /* Clean up after a packet has been transmitted. */ /* Free the mbuf chain and update the DMA descriptor ring. */ static int /* BSD version */ txintr_cleanup(softc_t *sc) { struct desc_ring *ring = &sc->txring; struct dma_desc *desc; while ((ring->read != ring->write) && /* while ring is not empty */ ((ring->read->status & TLP_DSTS_OWNER) == 0)) { /* Read a DMA descriptor from the ring. */ desc = ring->read; /* Advance the ring read pointer. */ if (ring->read++ == ring->last) ring->read = ring->first; /* This is a no-op on most architectures. */ DMA_SYNC(desc->map, desc->length1 + desc->length2, BUS_DMASYNC_POSTWRITE); /* Unmap kernel virtual address to PCI address. */ bus_dmamap_unload(ring->tag, desc->map); /* If this descriptor is the last segment of a packet, */ /* then dequeue and free the corresponding mbuf chain. */ if ((desc->control & TLP_DCTL_TX_LAST_SEG) != 0) { struct mbuf *m; if ((m = mbuf_dequeue(ring)) == NULL) panic("%s: txintr_cleanup: expected an mbuf\n", NAME_UNIT); /* Include CRC and one flag byte in output byte count. */ sc->status.cntrs.obytes += m->m_pkthdr.len + sc->config.crc_len +1; sc->status.cntrs.opackets++; #if IFNET sc->ifp->if_opackets++; LMC_BPF_MTAP(m); #endif /* The only bad TX status is fifo underrun. */ if ((desc->status & TLP_DSTS_TX_UNDERRUN) != 0) sc->status.cntrs.fifo_under++; m_freem(m); return 1; /* did something */ } } return 0; } /* Build DMA descriptors for a transmit packet mbuf chain. */ static int /* 0=success; 1=error */ /* BSD version */ txintr_setup_mbuf(softc_t *sc, struct mbuf *m) { struct desc_ring *ring = &sc->txring; struct dma_desc *desc; unsigned int desc_len; /* build DMA descriptors for a chain of mbufs. */ while (m != NULL) { char *data = m->m_data; int length = m->m_len; /* zero length mbufs happen! */ /* Build DMA descriptors for one mbuf. */ while (length > 0) { int error; /* Ring is full if (wrap(write+1)==read) */ if (((ring->temp==ring->last) ? ring->first : ring->temp+1) == ring->read) { /* Not enough DMA descriptors; try later. */ for (; ring->temp!=ring->write; ring->temp = (ring->temp==ring->first)? ring->last : ring->temp-1) bus_dmamap_unload(ring->tag, ring->temp->map); sc->status.cntrs.txdma++; return 1; } /* Provisionally, write a descriptor into the ring. */ /* But don't change the REAL ring write pointer. */ /* Hardware won't see it until the OWNER bit is set. */ desc = ring->temp; /* Advance the temporary ring write pointer. */ if (ring->temp++ == ring->last) ring->temp = ring->first; /* Clear all control bits except the END_RING bit. */ desc->control &= TLP_DCTL_END_RING; /* Don't pad short packets up to 64 bytes. */ desc->control |= TLP_DCTL_TX_NO_PAD; /* Use Tulip's CRC-32 generator, if appropriate. */ if (sc->config.crc_len != CFG_CRC_32) desc->control |= TLP_DCTL_TX_NO_CRC; /* Set the OWNER bit, except in the first descriptor. */ if (desc != ring->write) desc->status = TLP_DSTS_OWNER; desc_len = (length > MAX_CHUNK_LEN) ? MAX_CHUNK_LEN : length; /* Map kernel virtual address to PCI address. */ if ((error = DMA_LOAD(desc->map, data, desc_len))) printf("%s: bus_dmamap_load(tx) failed; error %d\n", NAME_UNIT, error); /* Flush the cache and if bouncing, copy mbuf to bounce buf. */ DMA_SYNC(desc->map, desc_len, BUS_DMASYNC_PREWRITE); /* Prevent wild fetches if mapping fails (nsegs==0). */ desc->length1 = desc->length2 = 0; desc->address1 = desc->address2 = 0; #if (defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__)) { # ifdef __FreeBSD__ bus_dma_segment_t *segs = ring->segs; int nsegs = ring->nsegs; # elif (defined(__NetBSD__) || defined(__OpenBSD__)) bus_dma_segment_t *segs = desc->map->dm_segs; int nsegs = desc->map->dm_nsegs; # endif if (nsegs >= 1) { desc->address1 = segs[0].ds_addr; desc->length1 = segs[0].ds_len; } if (nsegs == 2) { desc->address2 = segs[1].ds_addr; desc->length2 = segs[1].ds_len; } } #elif defined(__bsdi__) desc->address1 = vtophys(data); /* Relax! BSD/OS only. */ desc->length1 = desc_len; #endif data += desc_len; length -= desc_len; } /* while (length > 0) */ m = m->m_next; } /* while (m != NULL) */ return 0; /* success */ } /* Setup (prepare) to transmit a packet. */ /* Select a packet, build DMA descriptors and give packet to hardware. */ /* If DMA descriptors run out, abandon the attempt and return 0. */ static int /* BSD version */ txintr_setup(softc_t *sc) { struct desc_ring *ring = &sc->txring; struct dma_desc *first_desc, *last_desc; /* Protect against half-up links: Don't transmit */ /* if the receiver can't hear the far end. */ if (sc->status.oper_status != STATUS_UP) return 0; /* Pick a packet to transmit. */ #if NETGRAPH if ((sc->ng_hook != NULL) && (sc->tx_mbuf == NULL)) { if (!IFQ_IS_EMPTY(&sc->ng_fastq)) IFQ_DEQUEUE(&sc->ng_fastq, sc->tx_mbuf); else IFQ_DEQUEUE(&sc->ng_sndq, sc->tx_mbuf); } else #endif if (sc->tx_mbuf == NULL) { if (sc->config.line_pkg == PKG_RAWIP) IFQ_DEQUEUE(&sc->ifp->if_snd, sc->tx_mbuf); else { #if NSPPP sc->tx_mbuf = sppp_dequeue(sc->ifp); #elif P2P if (!IFQ_IS_EMPTY(&sc->p2p->p2p_isnd)) IFQ_DEQUEUE(&sc->p2p->p2p_isnd, sc->tx_mbuf); else IFQ_DEQUEUE(&sc->ifp->if_snd, sc->tx_mbuf); #endif } } if (sc->tx_mbuf == NULL) return 0; /* no pkt to transmit */ /* Build DMA descriptors for an outgoing mbuf chain. */ ring->temp = ring->write; /* temporary ring write pointer */ if (txintr_setup_mbuf(sc, sc->tx_mbuf) != 0) return 0; /* Enqueue the mbuf; txintr_cleanup will free it. */ mbuf_enqueue(ring, sc->tx_mbuf); /* The transmitter has room for another packet. */ sc->tx_mbuf = NULL; /* Set first & last segment bits. */ /* last_desc is the desc BEFORE the one pointed to by ring->temp. */ first_desc = ring->write; first_desc->control |= TLP_DCTL_TX_FIRST_SEG; last_desc = (ring->temp==ring->first)? ring->last : ring->temp-1; last_desc->control |= TLP_DCTL_TX_LAST_SEG; /* Interrupt at end-of-transmission? Why bother the poor computer! */ /* last_desc->control |= TLP_DCTL_TX_INTERRUPT; */ /* Make sure the OWNER bit is not set in the next descriptor. */ /* The OWNER bit may have been set if a previous call aborted. */ ring->temp->status = 0; /* Commit the DMA descriptors to the software. */ ring->write = ring->temp; /* Before setting the OWNER bit, flush the cache (memory barrier). */ DMA_SYNC(ring->map, ring->size_descs, BUS_DMASYNC_PREWRITE); /* Commit the DMA descriptors to the hardware. */ first_desc->status = TLP_DSTS_OWNER; /* Notify the transmitter that there is another packet to send. */ WRITE_CSR(TLP_TX_POLL, 1); return 1; /* did something */ } #endif /* BSD */ #ifdef __linux__ /* NOTE: this is the LINUX version of the interrupt/DMA code, */ /* Singly-linked tail-queues hold sk_buffs with active DMA. * skbuffs are linked through their sk_buff.next field. * Callers must hold sc->bottom_lock; not otherwise locked. */ /* Put an skbuff on the tail of the descriptor ring queue. */ static void /* Linux version */ skbuff_enqueue(struct desc_ring *ring, struct sk_buff *skb) { skb->next = NULL; if (ring->tail == NULL) ring->head = skb; else ring->tail->next = skb; ring->tail = skb; } /* Get an skbuff from the head of the descriptor ring queue. */ static struct sk_buff* /* Linux version */ skbuff_dequeue(struct desc_ring *ring) { struct sk_buff *skb = ring->head; if (skb != NULL) if ((ring->head = skb->next) == NULL) ring->tail = NULL; return skb; } /* Initialize a DMA descriptor ring. */ static int /* Linux version */ create_ring(softc_t *sc, struct desc_ring *ring, int num_descs) { struct dma_desc *descs; int size_descs = sizeof(struct dma_desc)*num_descs; /* Allocate and map memory for DMA descriptor array. */ if ((descs = pci_alloc_consistent(sc->pci_dev, size_descs, &ring->dma_addr)) == NULL) { printk("%s: pci_alloc_consistent() failed\n", NAME_UNIT); return ENOMEM; } memset(descs, 0, size_descs); ring->read = descs; ring->write = descs; ring->first = descs; ring->last = descs + num_descs -1; ring->last->control = TLP_DCTL_END_RING; ring->num_descs = num_descs; ring->size_descs = size_descs; ring->head = NULL; ring->tail = NULL; return 0; } /* Destroy a DMA descriptor ring */ static void /* Linux version */ destroy_ring(softc_t *sc, struct desc_ring *ring) { struct sk_buff *skb; /* Free queued skbuffs. */ while ((skb = skbuff_dequeue(ring)) != NULL) dev_kfree_skb(skb); /* TX may have one pkt that is not on any queue. */ if (sc->tx_skb != NULL) { dev_kfree_skb(sc->tx_skb); sc->tx_skb = NULL; } if (ring->first != NULL) { /* Unmap active DMA descriptors. */ while (ring->read != ring->write) { pci_unmap_single(sc->pci_dev, ring->read->address1, ring->read->length1 + ring->read->length2, PCI_DMA_BIDIRECTIONAL); if (ring->read++ == ring->last) ring->read = ring->first; } /* Unmap and free memory for DMA descriptor array. */ pci_free_consistent(sc->pci_dev, ring->size_descs, ring->first, ring->dma_addr); } } static int /* Linux version */ rxintr_cleanup(softc_t *sc) { struct desc_ring *ring = &sc->rxring; struct dma_desc *first_desc, *last_desc; struct sk_buff *first_skb=NULL, *last_skb=NULL; struct sk_buff *new_skb; int pkt_len, desc_len; /* Input packet flow control (livelock prevention): */ /* Give pkts to higher levels only if quota is > 0. */ if (sc->quota <= 0) return 0; /* This looks complicated, but remember: packets up to 4032 */ /* bytes long fit in one skbuff and use one DMA descriptor. */ first_desc = last_desc = ring->read; /* ASSERTION: If there is a descriptor in the ring and the hardware has */ /* finished with it, then that descriptor will have RX_FIRST_DESC set. */ if ((ring->read != ring->write) && /* descriptor ring not empty */ ((ring->read->status & TLP_DSTS_OWNER) == 0) && /* hardware done */ ((ring->read->status & TLP_DSTS_RX_FIRST_DESC) == 0)) /* should be set */ panic("%s: rxintr_cleanup: rx-first-descriptor not set.\n", NAME_UNIT); /* First decide if a complete packet has arrived. */ /* Run down DMA descriptors looking for one marked "last". */ /* Bail out if an active descriptor is encountered. */ /* Accumulate most significant bits of packet length. */ pkt_len = 0; for (;;) { if (last_desc == ring->write) return 0; /* no more descs */ if (last_desc->status & TLP_DSTS_OWNER) return 0; /* still active */ if (last_desc->status & TLP_DSTS_RX_LAST_DESC) break; /* end of packet */ pkt_len += last_desc->length1 + last_desc->length2; /* entire desc filled */ if (last_desc++->control & TLP_DCTL_END_RING) last_desc = ring->first; /* ring wrap */ } /* A complete packet has arrived; how long is it? */ /* H/w ref man shows RX pkt length as a 14-bit field. */ /* An experiment found that only the 12 LSBs work. */ if (((last_desc->status>>16)&0xFFF) == 0) pkt_len += 4096; /* carry-bit */ pkt_len = (pkt_len & 0xF000) + ((last_desc->status>>16) & 0x0FFF); /* Subtract the CRC length unless doing so would underflow. */ if (pkt_len >= sc->config.crc_len) pkt_len -= sc->config.crc_len; /* Run down DMA descriptors again doing the following: * 1) put pkt info in hdr of first skbuff. * 2) put additional skbuffs on frag_list. * 3) set skbuff lengths. */ first_desc = ring->read; do { /* Read a DMA descriptor from the ring. */ last_desc = ring->read; /* Advance the ring read pointer. */ if (ring->read++ == ring->last) ring->read = ring->first; /* Dequeue the corresponding skbuff. */ new_skb = skbuff_dequeue(ring); if (new_skb == NULL) panic("%s: rxintr_cleanup: expected an skbuff\n", NAME_UNIT); desc_len = last_desc->length1 + last_desc->length2; /* Unmap kernel virtual addresss to PCI address. */ pci_unmap_single(sc->pci_dev, last_desc->address1, desc_len, PCI_DMA_FROMDEVICE); /* Set skbuff length. */ skb_put(new_skb, (pkt_len >= desc_len) ? desc_len : pkt_len); pkt_len -= new_skb->len; /* 1) Put pkt info in hdr of first skbuff. */ if (last_desc == first_desc) { first_skb = new_skb; if (sc->config.line_pkg == PKG_RAWIP) { if (first_skb->data[0]>>4 == 4) first_skb->protocol = htons(ETH_P_IP); else if (first_skb->data[0]>>4 == 6) first_skb->protocol = htons(ETH_P_IPV6); } else #if GEN_HDLC first_skb->protocol = hdlc_type_trans(first_skb, sc->net_dev); #else first_skb->protocol = htons(ETH_P_HDLC); #endif first_skb->mac.raw = first_skb->data; first_skb->dev = sc->net_dev; do_gettimeofday(&first_skb->stamp); sc->net_dev->last_rx = jiffies; } else /* 2) link skbuffs. */ { /* Put this skbuff on the frag_list of the first skbuff. */ new_skb->next = NULL; if (skb_shinfo(first_skb)->frag_list == NULL) skb_shinfo(first_skb)->frag_list = new_skb; else last_skb->next = new_skb; /* 3) set skbuff lengths. */ first_skb->len += new_skb->len; first_skb->data_len += new_skb->len; } last_skb = new_skb; } while ((last_desc->status & TLP_DSTS_RX_LAST_DESC) == 0); /* Decide whether to accept or to discard this packet. */ /* RxHDLC sets MIIERR for bad CRC, abort and partial byte at pkt end. */ if (((last_desc->status & TLP_DSTS_RX_BAD) == 0) && (sc->status.oper_status == STATUS_UP) && (first_skb->len > 0)) { /* Optimization: copy a small pkt into a small skbuff. */ if (first_skb->len <= COPY_BREAK) if ((new_skb = skb_copy(first_skb, GFP_ATOMIC)) != NULL) { dev_kfree_skb_any(first_skb); first_skb = new_skb; } /* Include CRC and one flag byte in input byte count. */ sc->status.cntrs.ibytes += first_skb->len + sc->config.crc_len +1; sc->status.cntrs.ipackets++; /* Give this good packet to the network stacks. */ netif_receive_skb(first_skb); /* NAPI */ sc->quota--; } else if (sc->status.oper_status != STATUS_UP) { /* If the link is down, this packet is probably noise. */ sc->status.cntrs.idiscards++; dev_kfree_skb_any(first_skb); if (DRIVER_DEBUG) printk("%s: rxintr_cleanup: rx pkt discarded: link down\n", NAME_UNIT); } else /* Log and discard this bad packet. */ { if (DRIVER_DEBUG) printk("%s: RX bad pkt; len=%d %s%s%s%s\n", NAME_UNIT, first_skb->len, (last_desc->status & TLP_DSTS_RX_MII_ERR) ? " miierr" : "", (last_desc->status & TLP_DSTS_RX_DRIBBLE) ? " dribble" : "", (last_desc->status & TLP_DSTS_RX_DESC_ERR) ? " descerr" : "", (last_desc->status & TLP_DSTS_RX_OVERRUN) ? " overrun" : ""); if (last_desc->status & TLP_DSTS_RX_OVERRUN) sc->status.cntrs.fifo_over++; else sc->status.cntrs.ierrors++; dev_kfree_skb_any(first_skb); } return 1; /* did something */ } /* Setup (prepare) to receive a packet. */ /* Try to keep the RX descriptor ring full of empty buffers. */ static int /* Linux version */ rxintr_setup(softc_t *sc) { struct desc_ring *ring = &sc->rxring; struct dma_desc *desc; struct sk_buff *skb; u_int32_t dma_addr; /* Ring is full if (wrap(write+1)==read) */ if (((ring->write == ring->last) ? ring->first : ring->write+1) == ring->read) return 0; /* ring is full; nothing to do */ /* Allocate an skbuff. */ if ((skb = dev_alloc_skb(MAX_DESC_LEN)) == NULL) { sc->status.cntrs.rxdma++; if (DRIVER_DEBUG) printk("%s: rxintr_setup: dev_alloc_skb() failed\n", NAME_UNIT); return 0; } skb->dev = sc->net_dev; /* Queue the skbuff for later processing by rxintr_cleanup. */ skbuff_enqueue(ring, skb); /* Write a DMA descriptor into the ring. */ /* Hardware won't see it until the OWNER bit is set. */ desc = ring->write; /* Advance the ring write pointer. */ if (ring->write++ == ring->last) ring->write = ring->first; /* Map kernel virtual addresses to PCI addresses. */ dma_addr = pci_map_single(sc->pci_dev, skb->data, MAX_DESC_LEN, PCI_DMA_FROMDEVICE); /* Set up the DMA descriptor. */ desc->address1 = dma_addr; desc->length1 = MAX_CHUNK_LEN; desc->address2 = desc->address1 + desc->length1; desc->length2 = MAX_CHUNK_LEN; /* Before setting the OWNER bit, flush the cache (memory barrier). */ wmb(); /* write memory barrier */ /* Commit the DMA descriptor to the hardware. */ desc->status = TLP_DSTS_OWNER; /* Notify the receiver that there is another buffer available. */ WRITE_CSR(TLP_RX_POLL, 1); return 1; /* did something */ } /* Clean up after a packet has been transmitted. */ /* Free the sk_buff and update the DMA descriptor ring. */ static int /* Linux version */ txintr_cleanup(softc_t *sc) { struct desc_ring *ring = &sc->txring; struct dma_desc *desc; while ((ring->read != ring->write) && /* ring is not empty */ ((ring->read->status & TLP_DSTS_OWNER) == 0)) { /* Read a DMA descriptor from the ring. */ desc = ring->read; /* Advance the ring read pointer. */ if (ring->read++ == ring->last) ring->read = ring->first; /* Unmap kernel virtual address to PCI address. */ pci_unmap_single(sc->pci_dev, desc->address1, desc->length1 + desc->length2, PCI_DMA_TODEVICE); /* If this descriptor is the last segment of a packet, */ /* then dequeue and free the corresponding skbuff. */ if ((desc->control & TLP_DCTL_TX_LAST_SEG) != 0) { struct sk_buff *skb; if ((skb = skbuff_dequeue(ring)) == NULL) panic("%s: txintr_cleanup: expected an sk_buff\n", NAME_UNIT); /* Include CRC and one flag byte in output byte count. */ sc->status.cntrs.obytes += skb->len + sc->config.crc_len +1; sc->status.cntrs.opackets++; /* The only bad TX status is fifo underrun. */ if ((desc->status & TLP_DSTS_TX_UNDERRUN) != 0) { sc->status.cntrs.fifo_under++; /* also increment oerrors? */ if (DRIVER_DEBUG) printk("%s: txintr_cleanup: tx fifo underrun\n", NAME_UNIT); } dev_kfree_skb_any(skb); return 1; /* did something */ } } return 0; } /* Build DMA descriptors for a tranmit packet fragment, */ /* Assertion: fragment is contiguous in physical memory. */ static int /* 0=success; 1=error */ /* linux version */ txintr_setup_frag(softc_t *sc, char *data, int length) { struct desc_ring *ring = &sc->txring; struct dma_desc *desc; unsigned int desc_len; u_int32_t dma_addr; while (length > 0) { /* Ring is full if (wrap(write+1)==read) */ if (((ring->temp==ring->last) ? ring->first : ring->temp+1) == ring->read) { /* Not enough DMA descriptors; try later. */ for (; ring->temp!=ring->write; ring->temp = (ring->temp==ring->first)? ring->last : ring->temp-1) pci_unmap_single(sc->pci_dev, ring->temp->address1, ring->temp->length1 + ring->temp->length2, PCI_DMA_FROMDEVICE); sc->status.cntrs.txdma++; return 1; } /* Provisionally, write a DMA descriptor into the ring. */ /* But don't change the REAL ring write pointer. */ /* Hardware won't see it until the OWNER bit is set. */ desc = ring->temp; /* Advance the temporary ring write pointer. */ if (ring->temp++ == ring->last) ring->temp = ring->first; /* Clear all control bits except the END_RING bit. */ desc->control &= TLP_DCTL_END_RING; /* Don't pad short packets up to 64 bytes */ desc->control |= TLP_DCTL_TX_NO_PAD; /* Use Tulip's CRC-32 generator, if appropriate. */ if (sc->config.crc_len != CFG_CRC_32) desc->control |= TLP_DCTL_TX_NO_CRC; /* Set the OWNER bit, except in the first descriptor. */ if (desc != ring->write) desc->status = TLP_DSTS_OWNER; desc_len = (length >= MAX_DESC_LEN) ? MAX_DESC_LEN : length; /* Map kernel virtual address to PCI address. */ dma_addr = pci_map_single(sc->pci_dev, data, desc_len, PCI_DMA_TODEVICE); /* If it will fit in one chunk, do so, otherwise split it. */ if (desc_len <= MAX_CHUNK_LEN) { desc->address1 = dma_addr; desc->length1 = desc_len; desc->address2 = 0; desc->length2 = 0; } else { desc->address1 = dma_addr; desc->length1 = desc_len>>1; desc->address2 = desc->address1 + desc->length1; desc->length2 = desc_len>>1; if (desc_len & 1) desc->length2++; } data += desc_len; length -= desc_len; } /* while (length > 0) */ return 0; /* success */ } /* NB: this procedure is recursive! */ static int /* 0=success; 1=error */ txintr_setup_skb(softc_t *sc, struct sk_buff *skb) { struct sk_buff *list; int i; /* First, handle the data in the skbuff itself. */ if (txintr_setup_frag(sc, skb->data, skb_headlen(skb))) return 1; /* Next, handle the VM pages in the Scatter/Gather list. */ if (skb_shinfo(skb)->nr_frags != 0) for (i=0; inr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; if (txintr_setup_frag(sc, page_address(frag->page) + frag->page_offset, frag->size)) return 1; } /* Finally, handle the skbuffs in the frag_list. */ if ((list = skb_shinfo(skb)->frag_list) != NULL) for (; list; list=list->next) if (txintr_setup_skb(sc, list)) /* recursive! */ return 1; return 0; } /* Setup (prepare) to transmit a packet. */ /* Select a packet, build DMA descriptors and give packet to hardware. */ /* If DMA descriptors run out, abandon the attempt and return 0. */ static int /* Linux version */ txintr_setup(softc_t *sc) { struct desc_ring *ring = &sc->txring; struct dma_desc *first_desc, *last_desc; /* Protect against half-up links: Don't transmit */ /* if the receiver can't hear the far end. */ if (sc->status.oper_status != STATUS_UP) return 0; /* Pick a packet to transmit. */ /* linux_start() puts packets in sc->tx_skb. */ if (sc->tx_skb == NULL) { if (netif_queue_stopped(sc->net_dev) != 0) netif_wake_queue(sc->net_dev); return 0; /* no pkt to transmit */ } /* Build DMA descriptors for an outgoing skbuff. */ ring->temp = ring->write; /* temporary ring write pointer */ if (txintr_setup_skb(sc, sc->tx_skb) != 0) return 0; /* Enqueue the skbuff; txintr_cleanup will free it. */ skbuff_enqueue(ring, sc->tx_skb); /* The transmitter has room for another packet. */ sc->tx_skb = NULL; /* Set first & last segment bits. */ /* last_desc is the desc BEFORE the one pointed to by ring->temp. */ first_desc = ring->write; first_desc->control |= TLP_DCTL_TX_FIRST_SEG; last_desc = (ring->temp==ring->first)? ring->last : ring->temp-1; last_desc->control |= TLP_DCTL_TX_LAST_SEG; /* Interrupt at end-of-transmission? Why bother the poor computer! */ /* last_desc->control |= TLP_DCTL_TX_INTERRUPT; */ /* Make sure the OWNER bit is not set in the next descriptor. */ /* The OWNER bit may have been set if a previous call aborted. */ ring->temp->status = 0; /* Commit the DMA descriptors to the software. */ ring->write = ring->temp; /* Before setting the OWNER bit, flush the cache (memory barrier). */ wmb(); /* write memory barrier */ /* Commit the DMA descriptors to the hardware. */ first_desc->status = TLP_DSTS_OWNER; /* Notify the transmitter that there is another packet to send. */ WRITE_CSR(TLP_TX_POLL, 1); sc->net_dev->trans_start = jiffies; return 1; /* did something */ } #endif /* __linux__ */ static void check_intr_status(softc_t *sc) { u_int32_t status, cfcs, op_mode; u_int32_t missed, overruns; /* Check for four unusual events: * 1) fatal PCI bus errors - some are recoverable * 2) transmitter FIFO underruns - increase fifo threshold * 3) receiver FIFO overruns - clear potential hangup * 4) no receive descs or bufs - count missed packets */ /* 1) A fatal bus error causes a Tulip to stop initiating bus cycles. */ /* Module unload/load or boot are the only fixes for Parity Errors. */ /* Master and Target Aborts can be cleared and life may continue. */ status = READ_CSR(TLP_STATUS); if ((status & TLP_STAT_FATAL_ERROR) != 0) { u_int32_t fatal = (status & TLP_STAT_FATAL_BITS)>>TLP_STAT_FATAL_SHIFT; printf("%s: FATAL PCI BUS ERROR: %s%s%s%s\n", NAME_UNIT, (fatal == 0) ? "PARITY ERROR" : "", (fatal == 1) ? "MASTER ABORT" : "", (fatal == 2) ? "TARGET ABORT" : "", (fatal >= 3) ? "RESERVED (?)" : ""); cfcs = READ_PCI_CFG(sc, TLP_CFCS); /* try to clear it */ cfcs &= ~(TLP_CFCS_MSTR_ABORT | TLP_CFCS_TARG_ABORT); WRITE_PCI_CFG(sc, TLP_CFCS, cfcs); } /* 2) If the transmitter fifo underruns, increase the transmit fifo */ /* threshold: the number of bytes required to be in the fifo */ /* before starting the transmitter (cost: increased tx delay). */ /* The TX_FSM must be stopped to change this parameter. */ if ((status & TLP_STAT_TX_UNDERRUN) != 0) { op_mode = READ_CSR(TLP_OP_MODE); /* enable store-and-forward mode if tx_threshold tops out? */ if ((op_mode & TLP_OP_TX_THRESH) < TLP_OP_TX_THRESH) { op_mode += 0x4000; /* increment TX_THRESH field; can't overflow */ WRITE_CSR(TLP_OP_MODE, op_mode & ~TLP_OP_TX_RUN); /* Wait for the TX FSM to stop; it might be processing a pkt. */ while (READ_CSR(TLP_STATUS) & TLP_STAT_TX_FSM); /* XXX HANG */ WRITE_CSR(TLP_OP_MODE, op_mode); /* restart tx */ if (DRIVER_DEBUG) printf("%s: tx underrun; tx fifo threshold now %d bytes\n", NAME_UNIT, 128<<((op_mode>>TLP_OP_TR_SHIFT)&3)); } } /* 3) Errata memo from Digital Equipment Corp warns that 21140A */ /* receivers through rev 2.2 can hang if the fifo overruns. */ /* Recommended fix: stop and start the RX FSM after an overrun. */ missed = READ_CSR(TLP_MISSED); if ((overruns = ((missed & TLP_MISS_OVERRUN)>>TLP_OVERRUN_SHIFT)) != 0) { if (DRIVER_DEBUG) printf("%s: rx overrun cntr=%d\n", NAME_UNIT, overruns); sc->status.cntrs.overruns += overruns; if ((READ_PCI_CFG(sc, TLP_CFRV) & 0xFF) <= 0x22) { op_mode = READ_CSR(TLP_OP_MODE); WRITE_CSR(TLP_OP_MODE, op_mode & ~TLP_OP_RX_RUN); /* Wait for the RX FSM to stop; it might be processing a pkt. */ while (READ_CSR(TLP_STATUS) & TLP_STAT_RX_FSM); /* XXX HANG */ WRITE_CSR(TLP_OP_MODE, op_mode); /* restart rx */ } } /* 4) When the receiver is enabled and a packet arrives, but no DMA */ /* descriptor is available, the packet is counted as 'missed'. */ /* The receiver should never miss packets; warn if it happens. */ if ((missed = (missed & TLP_MISS_MISSED)) != 0) { if (DRIVER_DEBUG) printf("%s: rx missed %d pkts\n", NAME_UNIT, missed); sc->status.cntrs.missed += missed; } } static void /* This is where the work gets done. */ core_interrupt(void *arg, int check_status) { softc_t *sc = arg; int activity; /* If any CPU is inside this critical section, then */ /* other CPUs should go away without doing anything. */ if (BOTTOM_TRYLOCK == 0) { sc->status.cntrs.lck_intr++; return; } /* Clear pending card interrupts. */ WRITE_CSR(TLP_STATUS, READ_CSR(TLP_STATUS)); /* In Linux, pci_alloc_consistent() means DMA descriptors */ /* don't need explicit syncing. */ #if BSD { struct desc_ring *ring = &sc->txring; DMA_SYNC(sc->txring.map, sc->txring.size_descs, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); ring = &sc->rxring; DMA_SYNC(sc->rxring.map, sc->rxring.size_descs, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); } #endif do /* This is the main loop for interrupt processing. */ { activity = txintr_cleanup(sc); activity += txintr_setup(sc); activity += rxintr_cleanup(sc); activity += rxintr_setup(sc); } while (activity); #if BSD { struct desc_ring *ring = &sc->txring; DMA_SYNC(sc->txring.map, sc->txring.size_descs, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ring = &sc->rxring; DMA_SYNC(sc->rxring.map, sc->rxring.size_descs, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } #endif /* As the interrupt is dismissed, check for four unusual events. */ if (check_status) check_intr_status(sc); BOTTOM_UNLOCK; } /* user_interrupt() may be called from a syscall or a softirq */ static void user_interrupt(softc_t *sc, int check_status) { DISABLE_INTR; /* noop on FreeBSD-5 and Linux */ core_interrupt(sc, check_status); ENABLE_INTR; /* noop on FreeBSD-5 and Linux */ } #if BSD # if (defined(__FreeBSD__) && defined(DEVICE_POLLING)) /* Service the card from the kernel idle loop without interrupts. */ -static void +static int fbsd_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { softc_t *sc = IFP2SC(ifp); #if (__FreeBSD_version < 700000) if ((ifp->if_capenable & IFCAP_POLLING) == 0) { ether_poll_deregister(ifp); cmd = POLL_DEREGISTER; } if (cmd == POLL_DEREGISTER) { /* Last call -- reenable card interrupts. */ WRITE_CSR(TLP_INT_ENBL, TLP_INT_TXRX); return; } #endif sc->quota = count; core_interrupt(sc, (cmd==POLL_AND_CHECK_STATUS)); } # endif /* (__FreeBSD__ && DEVICE_POLLING) */ /* BSD kernels call this procedure when an interrupt happens. */ static intr_return_t bsd_interrupt(void *arg) { softc_t *sc = arg; /* Cut losses early if this is not our interrupt. */ if ((READ_CSR(TLP_STATUS) & TLP_INT_TXRX) == 0) return IRQ_NONE; # if (defined(__FreeBSD__) && defined(DEVICE_POLLING)) if (sc->ifp->if_capenable & IFCAP_POLLING) return IRQ_NONE; if ((sc->ifp->if_capabilities & IFCAP_POLLING) && (ether_poll_register(fbsd_poll, sc->ifp))) { WRITE_CSR(TLP_INT_ENBL, TLP_INT_DISABLE); return IRQ_NONE; } else sc->quota = sc->rxring.num_descs; /* input flow control */ # endif /* (__FreeBSD__ && DEVICE_POLLING) */ /* Disable card interrupts. */ WRITE_CSR(TLP_INT_ENBL, TLP_INT_DISABLE); core_interrupt(sc, 0); /* Enable card interrupts. */ WRITE_CSR(TLP_INT_ENBL, TLP_INT_TXRX); return IRQ_HANDLED; } #endif /* BSD */ /* Administrative status of the driver (UP or DOWN) has changed. */ /* A card-specific action may be required: T1 and T3 cards: no-op. */ /* HSSI and SSI cards change the state of modem ready signals. */ static void set_status(softc_t *sc, int status) { struct ioctl ioctl; ioctl.cmd = IOCTL_SET_STATUS; ioctl.data = status; sc->card->ioctl(sc, &ioctl); } #if P2P /* Callout from P2P: */ /* Get the state of DCD (Data Carrier Detect). */ static int p2p_getmdm(struct p2pcom *p2p, caddr_t result) { softc_t *sc = IFP2SC(&p2p->p2p_if); /* Non-zero isn't good enough; TIOCM_CAR is 0x40. */ *(int *)result = (sc->status.oper_status==STATUS_UP) ? TIOCM_CAR : 0; return 0; } /* Callout from P2P: */ /* Set the state of DTR (Data Terminal Ready). */ static int p2p_mdmctl(struct p2pcom *p2p, int flag) { softc_t *sc = IFP2SC(&p2p->p2p_if); set_status(sc, flag); return 0; } #endif /* P2P */ #if NSPPP # ifndef PP_FR # define PP_FR 0 # endif /* Callout from SPPP: */ static void sppp_tls(struct sppp *sppp) { # ifdef __FreeBSD__ if (!(sppp->pp_mode & IFF_LINK2) && !(sppp->pp_flags & PP_FR)) # elif defined(__NetBSD__) || defined(__OpenBSD__) if (!(sppp->pp_flags & PP_CISCO)) # endif sppp->pp_up(sppp); } /* Callout from SPPP: */ static void sppp_tlf(struct sppp *sppp) { # ifdef __FreeBSD__ if (!(sppp->pp_mode & IFF_LINK2) && !(sppp->pp_flags & PP_FR)) # elif defined(__NetBSD__) || defined(__OpenBSD__) if (!(sppp->pp_flags & PP_CISCO)) # endif sppp->pp_down(sppp); } #endif /* NSPPP */ /* Configure line protocol stuff. * Called by attach_card() during module init. * Called by core_ioctl() when lmcconfig writes sc->config. * Called by detach_card() during module shutdown. */ static void config_proto(softc_t *sc, struct config *config) { /* Use line protocol stack instead of RAWIP mode. */ if ((sc->config.line_pkg == PKG_RAWIP) && (config->line_pkg != PKG_RAWIP)) { #if NSPPP LMC_BPF_DETACH; sppp_attach(sc->ifp); LMC_BPF_ATTACH(DLT_PPP, 4); sc->sppp->pp_tls = sppp_tls; sc->sppp->pp_tlf = sppp_tlf; /* Force reconfiguration of SPPP params. */ sc->config.line_prot = 0; sc->config.keep_alive = config->keep_alive ? 0:1; #elif P2P int error = 0; sc->p2p->p2p_proto = 0; /* force p2p_attach */ if ((error = p2p_attach(sc->p2p))) /* calls bpfattach() */ { printf("%s: p2p_attach() failed; error %d\n", NAME_UNIT, error); config->line_pkg = PKG_RAWIP; /* still in RAWIP mode */ } else { sc->p2p->p2p_mdmctl = p2p_mdmctl; /* set DTR */ sc->p2p->p2p_getmdm = p2p_getmdm; /* get DCD */ } #elif GEN_HDLC int error = 0; sc->net_dev->mtu = HDLC_MAX_MTU; if ((error = hdlc_open(sc->net_dev))) { printf("%s: hdlc_open() failed; error %d\n", NAME_UNIT, error); printf("%s: Try 'sethdlc %s ppp'\n", NAME_UNIT, NAME_UNIT); config->line_pkg = PKG_RAWIP; /* still in RAWIP mode */ } #else /* no line protocol stack was configured */ config->line_pkg = PKG_RAWIP; /* still in RAWIP mode */ #endif } /* Bypass line protocol stack and return to RAWIP mode. */ if ((sc->config.line_pkg != PKG_RAWIP) && (config->line_pkg == PKG_RAWIP)) { #if NSPPP LMC_BPF_DETACH; sppp_flush(sc->ifp); sppp_detach(sc->ifp); setup_ifnet(sc->ifp); LMC_BPF_ATTACH(DLT_RAW, 0); #elif P2P int error = 0; if_qflush(&sc->p2p->p2p_isnd); if ((error = p2p_detach(sc->p2p))) { printf("%s: p2p_detach() failed; error %d\n", NAME_UNIT, error); printf("%s: Try 'ifconfig %s down -remove'\n", NAME_UNIT, NAME_UNIT); config->line_pkg = PKG_P2P; /* not in RAWIP mode; still attached to P2P */ } else { setup_ifnet(sc->ifp); LMC_BPF_ATTACH(DLT_RAW, 0); } #elif GEN_HDLC hdlc_proto_detach(sc->hdlc_dev); hdlc_close(sc->net_dev); setup_netdev(sc->net_dev); #endif } #if NSPPP if (config->line_pkg != PKG_RAWIP) { /* Check for change to PPP protocol. */ if ((sc->config.line_prot != PROT_PPP) && (config->line_prot == PROT_PPP)) { LMC_BPF_DETACH; # if (defined(__NetBSD__) || defined(__OpenBSD__)) sc->sppp->pp_flags &= ~PP_CISCO; # elif defined(__FreeBSD__) sc->ifp->if_flags &= ~IFF_LINK2; sc->sppp->pp_flags &= ~PP_FR; # endif LMC_BPF_ATTACH(DLT_PPP, 4); sppp_ioctl(sc->ifp, SIOCSIFFLAGS, NULL); } # ifndef DLT_C_HDLC # define DLT_C_HDLC DLT_PPP # endif /* Check for change to C_HDLC protocol. */ if ((sc->config.line_prot != PROT_C_HDLC) && (config->line_prot == PROT_C_HDLC)) { LMC_BPF_DETACH; # if (defined(__NetBSD__) || defined(__OpenBSD__)) sc->sppp->pp_flags |= PP_CISCO; # elif defined(__FreeBSD__) sc->ifp->if_flags |= IFF_LINK2; sc->sppp->pp_flags &= ~PP_FR; # endif LMC_BPF_ATTACH(DLT_C_HDLC, 4); sppp_ioctl(sc->ifp, SIOCSIFFLAGS, NULL); } /* Check for change to Frame Relay protocol. */ if ((sc->config.line_prot != PROT_FRM_RLY) && (config->line_prot == PROT_FRM_RLY)) { LMC_BPF_DETACH; # if (defined(__NetBSD__) || defined(__OpenBSD__)) sc->sppp->pp_flags &= ~PP_CISCO; # elif defined(__FreeBSD__) sc->ifp->if_flags &= ~IFF_LINK2; sc->sppp->pp_flags |= PP_FR; # endif LMC_BPF_ATTACH(DLT_FRELAY, 4); sppp_ioctl(sc->ifp, SIOCSIFFLAGS, NULL); } /* Check for disabling keep-alives. */ if ((sc->config.keep_alive != 0) && (config->keep_alive == 0)) sc->sppp->pp_flags &= ~PP_KEEPALIVE; /* Check for enabling keep-alives. */ if ((sc->config.keep_alive == 0) && (config->keep_alive != 0)) sc->sppp->pp_flags |= PP_KEEPALIVE; } #endif /* NSPPP */ /* Loop back through the TULIP Ethernet chip; (no CRC). */ /* Data sheet says stop DMA before changing OPMODE register. */ /* But that's not as simple as it sounds; works anyway. */ /* Check for enabling loopback thru Tulip chip. */ if ((sc->config.loop_back != CFG_LOOP_TULIP) && (config->loop_back == CFG_LOOP_TULIP)) { u_int32_t op_mode = READ_CSR(TLP_OP_MODE); op_mode |= TLP_OP_INT_LOOP; WRITE_CSR(TLP_OP_MODE, op_mode); config->crc_len = CFG_CRC_0; } /* Check for disabling loopback thru Tulip chip. */ if ((sc->config.loop_back == CFG_LOOP_TULIP) && (config->loop_back != CFG_LOOP_TULIP)) { u_int32_t op_mode = READ_CSR(TLP_OP_MODE); op_mode &= ~TLP_OP_LOOP_MODE; WRITE_CSR(TLP_OP_MODE, op_mode); config->crc_len = CFG_CRC_16; } } /* This is the core ioctl procedure. */ /* It handles IOCTLs from lmcconfig(8). */ /* It must not run when card watchdogs run. */ /* Called from a syscall (user context; no spinlocks). */ /* This procedure can SLEEP. */ static int core_ioctl(softc_t *sc, u_long cmd, caddr_t data) { struct iohdr *iohdr = (struct iohdr *) data; struct ioctl *ioctl = (struct ioctl *) data; struct status *status = (struct status *) data; struct config *config = (struct config *) data; int error = 0; /* All structs start with a string and a cookie. */ if (((struct iohdr *)data)->cookie != NGM_LMC_COOKIE) return EINVAL; while (TOP_TRYLOCK == 0) { sc->status.cntrs.lck_ioctl++; SLEEP(10000); /* yield? */ } switch (cmd) { case LMCIOCGSTAT: { *status = sc->status; iohdr->cookie = NGM_LMC_COOKIE; break; } case LMCIOCGCFG: { *config = sc->config; iohdr->cookie = NGM_LMC_COOKIE; break; } case LMCIOCSCFG: { if ((error = CHECK_CAP)) break; config_proto(sc, config); sc->config = *config; sc->card->config(sc); break; } case LMCIOCREAD: { if (ioctl->cmd == IOCTL_RW_PCI) { if (ioctl->address > 252) { error = EFAULT; break; } ioctl->data = READ_PCI_CFG(sc, ioctl->address); } else if (ioctl->cmd == IOCTL_RW_CSR) { if (ioctl->address > 15) { error = EFAULT; break; } ioctl->data = READ_CSR(ioctl->address*TLP_CSR_STRIDE); } else if (ioctl->cmd == IOCTL_RW_SROM) { if (ioctl->address > 63) { error = EFAULT; break; } ioctl->data = read_srom(sc, ioctl->address); } else if (ioctl->cmd == IOCTL_RW_BIOS) ioctl->data = read_bios(sc, ioctl->address); else if (ioctl->cmd == IOCTL_RW_MII) ioctl->data = read_mii(sc, ioctl->address); else if (ioctl->cmd == IOCTL_RW_FRAME) ioctl->data = read_framer(sc, ioctl->address); else error = EINVAL; break; } case LMCIOCWRITE: { if ((error = CHECK_CAP)) break; if (ioctl->cmd == IOCTL_RW_PCI) { if (ioctl->address > 252) { error = EFAULT; break; } WRITE_PCI_CFG(sc, ioctl->address, ioctl->data); } else if (ioctl->cmd == IOCTL_RW_CSR) { if (ioctl->address > 15) { error = EFAULT; break; } WRITE_CSR(ioctl->address*TLP_CSR_STRIDE, ioctl->data); } else if (ioctl->cmd == IOCTL_RW_SROM) { if (ioctl->address > 63) { error = EFAULT; break; } write_srom(sc, ioctl->address, ioctl->data); /* can sleep */ } else if (ioctl->cmd == IOCTL_RW_BIOS) { if (ioctl->address == 0) erase_bios(sc); write_bios(sc, ioctl->address, ioctl->data); /* can sleep */ } else if (ioctl->cmd == IOCTL_RW_MII) write_mii(sc, ioctl->address, ioctl->data); else if (ioctl->cmd == IOCTL_RW_FRAME) write_framer(sc, ioctl->address, ioctl->data); else if (ioctl->cmd == IOCTL_WO_SYNTH) write_synth(sc, (struct synth *)&ioctl->data); else if (ioctl->cmd == IOCTL_WO_DAC) { write_dac(sc, 0x9002); /* set Vref = 2.048 volts */ write_dac(sc, ioctl->data & 0xFFF); } else error = EINVAL; break; } case LMCIOCTL: { if ((error = CHECK_CAP)) break; if (ioctl->cmd == IOCTL_XILINX_RESET) { reset_xilinx(sc); sc->card->config(sc); } else if (ioctl->cmd == IOCTL_XILINX_ROM) { load_xilinx_from_rom(sc); /* can sleep */ sc->card->config(sc); } else if (ioctl->cmd == IOCTL_XILINX_FILE) { /* load_xilinx_from_file() can sleep. */ error = load_xilinx_from_file(sc, ioctl->ucode, ioctl->data); if (error != 0) load_xilinx_from_rom(sc); /* try the rom */ sc->card->config(sc); set_status(sc, (error==0)); /* XXX */ } else if (ioctl->cmd == IOCTL_RESET_CNTRS) { memset(&sc->status.cntrs, 0, sizeof(struct event_cntrs)); microtime(&sc->status.cntrs.reset_time); } else error = sc->card->ioctl(sc, ioctl); /* can sleep */ break; } default: error = EINVAL; break; } TOP_UNLOCK; return error; } /* This is the core watchdog procedure. */ /* It calculates link speed, and calls the card-specific watchdog code. */ /* Calls interrupt() in case one got lost; also kick-starts the device. */ /* ioctl syscalls and card watchdog routines must be interlocked. */ /* This procedure must not sleep. */ static void core_watchdog(softc_t *sc) { /* Read and restart the Tulip timer. */ u_int32_t tx_speed = READ_CSR(TLP_TIMER); WRITE_CSR(TLP_TIMER, 0xFFFF); /* Measure MII clock using a timer in the Tulip chip. * This timer counts transmitter bits divided by 4096. * Since this is called once a second the math is easy. * This is only correct when the link is NOT sending pkts. * On a fully-loaded link, answer will be HALF actual rate. * Clock rate during pkt is HALF clk rate between pkts. * Measuring clock rate really measures link utilization! */ sc->status.tx_speed = (0xFFFF - (tx_speed & 0xFFFF)) << 12; /* The first status reset time is when the calendar clock is set. */ if (sc->status.cntrs.reset_time.tv_sec < 1000) microtime(&sc->status.cntrs.reset_time); /* Update hardware (operational) status. */ /* Call the card-specific watchdog routines. */ if (TOP_TRYLOCK != 0) { sc->status.oper_status = sc->card->watchdog(sc); /* Increment a counter which tells user-land */ /* observers that SNMP state has been updated. */ sc->status.ticks++; TOP_UNLOCK; } else sc->status.cntrs.lck_watch++; /* In case an interrupt gets lost... */ user_interrupt(sc, 1); } #if IFNET /* Called from a syscall (user context; no spinlocks). */ static int lmc_raw_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ifreq *ifr = (struct ifreq *) data; int error = 0; switch (cmd) { # if (defined(__FreeBSD__) && defined(DEVICE_POLLING)) /* XXX necessary? */ case SIOCSIFCAP: # endif case SIOCSIFDSTADDR: case SIOCAIFADDR: case SIOCSIFFLAGS: #if 0 case SIOCADDMULTI: case SIOCDELMULTI: break; #endif case SIOCSIFADDR: ifp->if_flags |= IFF_UP; /* a Unix tradition */ break; case SIOCSIFMTU: ifp->if_mtu = ifr->ifr_mtu; break; default: error = EINVAL; break; } return error; } /* Called from a syscall (user context; no spinlocks). */ static int lmc_ifnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { softc_t *sc = IFP2SC(ifp); # ifdef __OpenBSD__ struct ifreq *ifr = (struct ifreq *) data; # endif int error = 0; switch (cmd) { /* Catch the IOCTLs used by lmcconfig. */ case LMCIOCGSTAT: case LMCIOCGCFG: case LMCIOCSCFG: case LMCIOCREAD: case LMCIOCWRITE: case LMCIOCTL: error = core_ioctl(sc, cmd, data); break; # ifdef __OpenBSD__ /* Catch the IOCTLs used by ifconfig. */ case SIOCSIFMEDIA: if ((error = CHECK_CAP)) break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->ifm, cmd); break; case SIOCSIFTIMESLOT: if ((error = CHECK_CAP)) break; if (sc->status.card_type == TLP_CSID_T1E1) { struct config config = sc->config; if ((error = copyin(ifr->ifr_data, &config.time_slots, sizeof config.time_slots))) break; config.iohdr.cookie = NGM_LMC_COOKIE; error = core_ioctl(sc, LMCIOCSCFG, (caddr_t)&config); } else error = EINVAL; break; case SIOCGIFTIMESLOT: if (sc->status.card_type == TLP_CSID_T1E1) error = copyout(&sc->config.time_slots, ifr->ifr_data, sizeof sc->config.time_slots); else error = EINVAL; break; # endif /* Pass the rest to the line protocol. */ default: if (sc->config.line_pkg == PKG_RAWIP) error = lmc_raw_ioctl(ifp, cmd, data); else # if NSPPP error = sppp_ioctl(ifp, cmd, data); # elif P2P error = p2p_ioctl(ifp, cmd, data); # else error = EINVAL; # endif break; } if (DRIVER_DEBUG && (error!=0)) printf("%s: lmc_ifnet_ioctl; cmd=0x%08lx error=%d\n", NAME_UNIT, cmd, error); return error; } /* Called from a syscall (user context; no spinlocks). */ static void lmc_ifnet_start(struct ifnet *ifp) { softc_t *sc = IFP2SC(ifp); /* Start the transmitter; incoming pkts are NOT processed. */ user_interrupt(sc, 0); } /* sppp and p2p replace this with their own proc. */ /* RAWIP mode is the only time this is used. */ /* Called from a syscall (user context; no spinlocks). */ static int lmc_raw_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *dst, struct route *ro) { softc_t *sc = IFP2SC(ifp); int error = 0; /* Fail if the link is down. */ if (sc->status.oper_status != STATUS_UP) { m_freem(m); sc->status.cntrs.odiscards++; if (DRIVER_DEBUG) printf("%s: lmc_raw_output: tx pkt discarded: link down\n", NAME_UNIT); return ENETDOWN; } # if NETGRAPH /* Netgraph has priority over the ifnet kernel interface. */ if (sc->ng_hook != NULL) { m_freem(m); sc->status.cntrs.odiscards++; if (DRIVER_DEBUG) printf("%s: lmc_raw_output: tx pkt discarded: netgraph active\n", NAME_UNIT); return EBUSY; } # endif /* lmc_raw_output() ENQUEUEs in a syscall or softirq. */ /* txintr_setup() DEQUEUEs in a hard interrupt. */ /* Some BSD QUEUE routines are not interrupt-safe. */ { DISABLE_INTR; # if (__FreeBSD_version >= 503000) IFQ_ENQUEUE(&ifp->if_snd, m, error); # else IFQ_ENQUEUE(&ifp->if_snd, m, NULL, error); # endif ENABLE_INTR; } if (error==0) user_interrupt(sc, 0); /* start the transmitter */ else { m_freem(m); sc->status.cntrs.odiscards++; if (DRIVER_DEBUG) printf("%s: lmc_raw_output: IFQ_ENQUEUE() failed; error %d\n", NAME_UNIT, error); } return error; } /* Called from a softirq once a second. */ static void lmc_ifnet_watchdog(struct ifnet *ifp) { softc_t *sc = IFP2SC(ifp); u_int8_t old_oper_status = sc->status.oper_status; struct event_cntrs *cntrs = &sc->status.cntrs; core_watchdog(sc); /* updates oper_status */ #if NETGRAPH if (sc->ng_hook != NULL) { sc->status.line_pkg = PKG_NG; sc->status.line_prot = 0; } else #endif if (sc->config.line_pkg == PKG_RAWIP) { sc->status.line_pkg = PKG_RAWIP; sc->status.line_prot = PROT_IP_HDLC; } else { # if P2P /* Notice change in link status. */ if ((old_oper_status != sc->status.oper_status) && (sc->p2p->p2p_modem)) (*sc->p2p->p2p_modem)(sc->p2p, sc->status.oper_status==STATUS_UP); /* Notice change in line protocol. */ sc->status.line_pkg = PKG_P2P; switch (sc->ifp->if_type) { case IFT_PPP: sc->status.line_prot = PROT_PPP; break; case IFT_PTPSERIAL: sc->status.line_prot = PROT_C_HDLC; break; case IFT_FRELAY: sc->status.line_prot = PROT_FRM_RLY; break; default: sc->status.line_prot = 0; break; } # elif NSPPP /* Notice change in link status. */ if ((old_oper_status != STATUS_UP) && (sc->status.oper_status == STATUS_UP)) /* link came up */ sppp_tls(sc->sppp); if ((old_oper_status == STATUS_UP) && (sc->status.oper_status != STATUS_UP)) /* link went down */ sppp_tlf(sc->sppp); /* Notice change in line protocol. */ sc->status.line_pkg = PKG_SPPP; # ifdef __FreeBSD__ if (sc->sppp->pp_flags & PP_FR) sc->status.line_prot = PROT_FRM_RLY; else if (sc->ifp->if_flags & IFF_LINK2) # elif (defined(__NetBSD__) || defined(__OpenBSD__)) if (sc->sppp->pp_flags & PP_CISCO) # endif sc->status.line_prot = PROT_C_HDLC; else sc->status.line_prot = PROT_PPP; # else /* Suppress compiler warning. */ if (old_oper_status == STATUS_UP); # endif } /* Copy statistics from sc to ifp. */ ifp->if_baudrate = sc->status.tx_speed; ifp->if_ipackets = cntrs->ipackets; ifp->if_opackets = cntrs->opackets; ifp->if_ibytes = cntrs->ibytes; ifp->if_obytes = cntrs->obytes; ifp->if_ierrors = cntrs->ierrors; ifp->if_oerrors = cntrs->oerrors; ifp->if_iqdrops = cntrs->idiscards; # if ((__FreeBSD_version >= 500000) || defined(__OpenBSD__) || defined(__NetBSD__)) if (sc->status.oper_status == STATUS_UP) ifp->if_link_state = LINK_STATE_UP; else ifp->if_link_state = LINK_STATE_DOWN; # endif /* Call this procedure again after one second. */ ifp->if_timer = 1; } # ifdef __OpenBSD__ /* Callback from ifmedia. */ static int ifmedia_change(struct ifnet *ifp) { softc_t *sc = IFP2SC(ifp); struct config config = sc->config; int media = sc->ifm.ifm_media; int error; /* ifconfig lmc0 media t1 */ if (sc->status.card_type == TLP_CSID_T3) { if ((media & IFM_TMASK) == IFM_TDM_T3) config.format = CFG_FORMAT_T3CPAR; else if ((media & IFM_TMASK) == IFM_TDM_T3_M13) config.format = CFG_FORMAT_T3M13; } else if (sc->status.card_type == TLP_CSID_T1E1) { if ((media & IFM_TMASK) == IFM_TDM_T1) config.format = CFG_FORMAT_T1ESF; else if ((media & IFM_TMASK) == IFM_TDM_T1_AMI) config.format = CFG_FORMAT_T1SF; else if ((media & IFM_TMASK) == IFM_TDM_E1) config.format = CFG_FORMAT_E1NONE; else if ((media & IFM_TMASK) == IFM_TDM_E1_G704) config.format = CFG_FORMAT_E1FASCRC; } /* ifconfig lmc0 mediaopt loopback */ if (media & IFM_LOOP) config.loop_back = CFG_LOOP_TULIP; else config.loop_back = CFG_LOOP_NONE; /* ifconfig lmc0 mediaopt crc16 */ if (media & IFM_TDM_HDLC_CRC16) config.crc_len = CFG_CRC_16; else config.crc_len = CFG_CRC_32; /* Set ConFiGuration. */ config.iohdr.cookie = NGM_LMC_COOKIE; error = core_ioctl(sc, LMCIOCSCFG, (caddr_t)&config); return error; } /* Callback from ifmedia. */ static void ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) { softc_t *sc = IFP2SC(ifp); /* ifconfig wants to know if the hardware link is up. */ ifmr->ifm_status = IFM_AVALID; if (sc->status.oper_status == STATUS_UP) ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active = sc->ifm.ifm_cur->ifm_media; if (sc->config.loop_back != CFG_LOOP_NONE) ifmr->ifm_active |= IFM_LOOP; if (sc->config.crc_len == CFG_CRC_16) ifmr->ifm_active |= IFM_TDM_HDLC_CRC16; } # endif /* __OpenBSD__ */ static void setup_ifnet(struct ifnet *ifp) { softc_t *sc = ifp->if_softc; /* Initialize the generic network interface. */ /* Note similarity to linux's setup_netdev(). */ ifp->if_flags = IFF_POINTOPOINT; ifp->if_flags |= IFF_RUNNING; ifp->if_ioctl = lmc_ifnet_ioctl; ifp->if_start = lmc_ifnet_start; /* sppp changes this */ ifp->if_output = lmc_raw_output; /* sppp & p2p change this */ ifp->if_input = lmc_raw_input; ifp->if_watchdog = lmc_ifnet_watchdog; ifp->if_timer = 1; ifp->if_mtu = MAX_DESC_LEN; /* sppp & p2p change this */ ifp->if_type = IFT_PTPSERIAL; /* p2p changes this */ # if (defined(__FreeBSD__) && defined(DEVICE_POLLING)) ifp->if_capabilities |= IFCAP_POLLING; + ifp->if_capenable |= IFCAP_POLLING_NOCOUNT; # if (__FreeBSD_version < 500000) ifp->if_capenable |= IFCAP_POLLING; # endif # endif /* Every OS does it differently! */ # if (defined(__FreeBSD__) && (__FreeBSD_version < 502000)) (const char *)ifp->if_name = device_get_name(sc->dev); ifp->if_unit = device_get_unit(sc->dev); # elif (__FreeBSD_version >= 502000) if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); # elif defined(__NetBSD__) strcpy(ifp->if_xname, sc->dev.dv_xname); # elif __OpenBSD__ bcopy(sc->dev.dv_xname, ifp->if_xname, IFNAMSIZ); # elif defined(__bsdi__) ifp->if_name = sc->dev.dv_cfdata->cf_driver->cd_name; ifp->if_unit = sc->dev.dv_unit; # endif } static int lmc_ifnet_attach(softc_t *sc) { # if (__FreeBSD_version >= 600000) sc->ifp = if_alloc(NSPPP ? IFT_PPP : IFT_OTHER); if (sc->ifp == NULL) return ENOMEM; # endif # if NSPPP # if (__FreeBSD_version >= 600000) sc->sppp = sc->ifp->if_l2com; # else sc->ifp = &sc->spppcom.pp_if; sc->sppp = &sc->spppcom; # endif # elif P2P sc->ifp = &sc->p2pcom.p2p_if; sc->p2p = &sc->p2pcom; # elif (__FreeBSD_version < 600000) sc->ifp = &sc->ifnet; # endif /* Initialize the network interface struct. */ sc->ifp->if_softc = sc; setup_ifnet(sc->ifp); /* ALTQ output queue initialization. */ IFQ_SET_MAXLEN(&sc->ifp->if_snd, SNDQ_MAXLEN); IFQ_SET_READY(&sc->ifp->if_snd); /* Attach to the ifnet kernel interface. */ if_attach(sc->ifp); # if ((defined(__NetBSD__) && __NetBSD_Version__ >= 106000000) || \ (defined(__OpenBSD__) && OpenBSD >= 200211)) if_alloc_sadl(sc->ifp); # endif /* Attach Berkeley Packet Filter. */ LMC_BPF_ATTACH(DLT_RAW, 0); # ifdef __OpenBSD__ /* Initialize ifmedia mechanism. */ ifmedia_init(&sc->ifm, IFM_OMASK | IFM_GMASK | IFM_IMASK, ifmedia_change, ifmedia_status); if (sc->status.card_type == TLP_CSID_T3) { ifmedia_add(&sc->ifm, IFM_TDM | IFM_TDM_T3, 0, NULL); ifmedia_add(&sc->ifm, IFM_TDM | IFM_TDM_T3_M13, 0, NULL); ifmedia_set(&sc->ifm, IFM_TDM | IFM_TDM_T3); } else if (sc->status.card_type == TLP_CSID_T1E1) { ifmedia_add(&sc->ifm, IFM_TDM | IFM_TDM_T1, 0, NULL); ifmedia_add(&sc->ifm, IFM_TDM | IFM_TDM_T1_AMI, 0, NULL); ifmedia_add(&sc->ifm, IFM_TDM | IFM_TDM_E1, 0, NULL); ifmedia_add(&sc->ifm, IFM_TDM | IFM_TDM_E1_G704, 0, NULL); ifmedia_set(&sc->ifm, IFM_TDM | IFM_TDM_T1); } else if ((sc->status.card_type == TLP_CSID_HSSI) || (sc->status.card_type == TLP_CSID_SSI)) { ifmedia_add(&sc->ifm, IFM_TDM | IFM_NONE, 0, NULL); ifmedia_set(&sc->ifm, IFM_TDM | IFM_NONE); } # endif /* __OpenBSD__ */ return 0; } static void lmc_ifnet_detach(softc_t *sc) { # ifdef __OpenBSD__ ifmedia_delete_instance(&sc->ifm, IFM_INST_ANY); # endif # if (defined(__FreeBSD__) && defined(DEVICE_POLLING)) if (sc->ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(sc->ifp); # endif /* Detach Berkeley Packet Filter. */ LMC_BPF_DETACH; # if ((defined(__NetBSD__) && __NetBSD_Version__ >= 106000000) || \ (defined(__OpenBSD__) && OpenBSD >= 200211)) if_free_sadl(sc->ifp); # endif /* Detach from the ifnet kernel interface. */ if_detach(sc->ifp); # if (__FreeBSD_version >= 600000) if_free_type(sc->ifp, NSPPP ? IFT_PPP : IFT_OTHER); # endif } #endif /* IFNET */ #if NETGRAPH /* Netgraph changed significantly between FreeBSD-4 and -5. */ /* These are backward compatibility hacks for FreeBSD-4. */ # if (__FreeBSD_version >= 500000) /* These next two macros should be added to netgraph */ # define NG_TYPE_REF(type) atomic_add_int(&(type)->refs, 1) # define NG_TYPE_UNREF(type) \ do { \ if ((type)->refs == 1) \ ng_rmtype(type); \ else \ atomic_subtract_int(&(type)->refs, 1); \ } while (0) # else /* FreeBSD-4 */ # define NGI_GET_MSG(item, msg) /* nothing */ # define NG_HOOK_FORCE_QUEUE(hook) /* nothing */ # define NG_TYPE_REF(type) atomic_add_int(&(type)->refs, 1) # define NG_TYPE_UNREF(type) \ do { \ if ((type)->refs == 1) \ LIST_REMOVE(type, types); \ else \ atomic_subtract_int(&(type)->refs, 1); \ } while (0) # endif /* It is an error to construct new copies of this Netgraph node. */ /* All instances are constructed by ng_attach and are persistent. */ # if (__FreeBSD_version >= 500000) static int ng_constructor(node_p node) { return EINVAL; } # else /* FreeBSD-4 */ static int ng_constructor(node_p *node) { return EINVAL; } # endif /* Incoming Netgraph control message. */ # if (__FreeBSD_version >= 500000) static int ng_rcvmsg(node_p node, item_p item, hook_p lasthook) { struct ng_mesg *msg; # else /* FreeBSD-4 */ static int ng_rcvmsg(node_p node, struct ng_mesg *msg, const char *retaddr, struct ng_mesg **rptr) { # endif struct ng_mesg *resp = NULL; softc_t *sc = NG_NODE_PRIVATE(node); int error = 0; NGI_GET_MSG(item, msg); if (msg->header.typecookie == NGM_LMC_COOKIE) { switch (msg->header.cmd) { case LMCIOCGSTAT: case LMCIOCGCFG: case LMCIOCSCFG: case LMCIOCREAD: case LMCIOCWRITE: case LMCIOCTL: { /* Call the core ioctl procedure. */ error = core_ioctl(sc, msg->header.cmd, msg->data); if ((msg->header.cmd & IOC_OUT) != 0) { /* synchronous response */ NG_MKRESPONSE(resp, msg, sizeof(struct ng_mesg) + IOCPARM_LEN(msg->header.cmd), M_NOWAIT); if (resp == NULL) error = ENOMEM; else memcpy(resp->data, msg->data, IOCPARM_LEN(msg->header.cmd)); } break; } default: error = EINVAL; break; } } else if ((msg->header.typecookie == NGM_GENERIC_COOKIE) && (msg->header.cmd == NGM_TEXT_STATUS)) { /* synchronous response */ NG_MKRESPONSE(resp, msg, sizeof(struct ng_mesg) + NG_TEXTRESPONSE, M_NOWAIT); if (resp == NULL) error = ENOMEM; else { char *s = resp->data; sprintf(s, "Card type = <%s>\n" "This driver considers the link to be %s.\n" "Use lmcconfig to configure this interface.\n", sc->dev_desc, (sc->status.oper_status==STATUS_UP) ? "UP" : "DOWN"); resp->header.arglen = strlen(s) +1; } } else /* Netgraph should be able to read and write these * parameters with text-format control messages: * SSI HSSI T1E1 T3 * crc crc crc crc * loop loop loop loop * clksrc clksrc * dte dte format format * synth synth cablen cablen * cable timeslot scram * gain * pulse * lbo * Someday I'll implement this... */ error = EINVAL; /* Handle synchronous response. */ # if (__FreeBSD_version >= 500000) NG_RESPOND_MSG(error, node, item, resp); NG_FREE_MSG(msg); # else /* FreeBSD-4 */ if (rptr != NULL) *rptr = resp; else if (resp != NULL) free(resp, M_NETGRAPH); free(msg, M_NETGRAPH); # endif return error; } /* This is a persistent netgraph node. */ static int ng_shutdown(node_p node) { # if (__FreeBSD_version >= 500000) /* unless told to really die, bounce back to life */ if ((node->nd_flags & NG_REALLY_DIE)==0) node->nd_flags &= ~NG_INVALID; /* bounce back to life */ # else /* FreeBSD-4 */ ng_cutlinks(node); node->flags &= ~NG_INVALID; /* bounce back to life */ # endif return 0; } /* ng_disconnect is the opposite of this procedure. */ static int ng_newhook(node_p node, hook_p hook, const char *name) { softc_t *sc = NG_NODE_PRIVATE(node); /* Hook name must be 'rawdata'. */ if (strncmp(name, "rawdata", 7) != 0) return EINVAL; /* Is our hook connected? */ if (sc->ng_hook != NULL) return EBUSY; /* Accept the hook. */ sc->ng_hook = hook; return 0; } /* Both ends have accepted their hooks and the links have been made. */ /* This is the last chance to reject the connection request. */ static int ng_connect(hook_p hook) { /* Probably not at splnet, force outward queueing. (huh?) */ NG_HOOK_FORCE_QUEUE(NG_HOOK_PEER(hook)); return 0; /* always accept */ } /* Receive data in mbufs from another Netgraph node. */ /* Transmit an mbuf-chain on the communication link. */ /* This procedure is very similar to lmc_raw_output(). */ /* Called from a syscall (user context; no spinlocks). */ # if (__FreeBSD_version >= 500000) static int ng_rcvdata(hook_p hook, item_p item) { softc_t *sc = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); int error = 0; struct mbuf *m; meta_p meta = NULL; NGI_GET_M(item, m); NGI_GET_META(item, meta); NG_FREE_ITEM(item); # else /* FreeBSD-4 */ static int ng_rcvdata(hook_p hook, struct mbuf *m, meta_p meta) { softc_t *sc = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); int error = 0; # endif /* This macro must not store into meta! */ NG_FREE_META(meta); /* Fail if the link is down. */ if (sc->status.oper_status != STATUS_UP) { m_freem(m); sc->status.cntrs.odiscards++; if (DRIVER_DEBUG) printf("%s: ng_rcvdata: tx pkt discarded: link down\n", NAME_UNIT); return ENETDOWN; } /* ng_rcvdata() ENQUEUEs in a syscall or softirq. */ /* txintr_setup() DEQUEUEs in a hard interrupt. */ /* Some BSD QUEUE routines are not interrupt-safe. */ { DISABLE_INTR; # if (__FreeBSD_version >= 503000) if (meta==NULL) IFQ_ENQUEUE(&sc->ng_sndq, m, error); else IFQ_ENQUEUE(&sc->ng_fastq, m, error); # else if (meta==NULL) IFQ_ENQUEUE(&sc->ng_sndq, m, NULL, error); else IFQ_ENQUEUE(&sc->ng_fastq, m, NULL, error); # endif ENABLE_INTR; } if (error==0) user_interrupt(sc, 0); /* start the transmitter */ else { m_freem(m); sc->status.cntrs.odiscards++; if (DRIVER_DEBUG) printf("%s: ng_rcvdata: IFQ_ENQUEUE() failed; error %d\n", NAME_UNIT, error); } return error; } /* ng_newhook is the opposite of this procedure, not */ /* ng_connect, as you might expect from the names. */ static int ng_disconnect(hook_p hook) { softc_t *sc = NG_NODE_PRIVATE(NG_HOOK_NODE(hook)); /* Disconnect the hook. */ sc->ng_hook = NULL; return 0; } static struct ng_type ng_type = { .version = NG_ABI_VERSION, .name = NG_LMC_NODE_TYPE, .mod_event = NULL, .constructor = ng_constructor, .rcvmsg = ng_rcvmsg, # if (__FreeBSD_version >=503000) .close = NULL, # endif .shutdown = ng_shutdown, .newhook = ng_newhook, .findhook = NULL, .connect = ng_connect, .rcvdata = ng_rcvdata, # if (defined(__FreeBSD__) && (__FreeBSD_version < 500000)) .rcvdataq = ng_rcvdata, # endif .disconnect = ng_disconnect, }; # if (IFNET == 0) /* Called from a softirq once a second. */ static void ng_watchdog(void *arg) { softc_t *sc = arg; /* Call the core watchdog procedure. */ core_watchdog(sc); /* Set line protocol and package status. */ sc->status.line_pkg = PKG_NG; sc->status.line_prot = 0; /* Call this procedure again after one second. */ callout_reset(&sc->ng_callout, hz, ng_watchdog, sc); } # endif /* Attach to the Netgraph kernel interface (/sys/netgraph). * It is called once for each physical card during device attach. * This is effectively ng_constructor. */ static int ng_attach(softc_t *sc) { int error; /* If this node type is not known to Netgraph then register it. */ if (ng_type.refs == 0) /* or: if (ng_findtype(&ng_type) == NULL) */ { if ((error = ng_newtype(&ng_type))) { printf("%s: ng_newtype() failed; error %d\n", NAME_UNIT, error); return error; } } else NG_TYPE_REF(&ng_type); /* Call the superclass node constructor. */ if ((error = ng_make_node_common(&ng_type, &sc->ng_node))) { NG_TYPE_UNREF(&ng_type); printf("%s: ng_make_node_common() failed; error %d\n", NAME_UNIT, error); return error; } /* Associate a name with this netgraph node. */ if ((error = ng_name_node(sc->ng_node, NAME_UNIT))) { NG_NODE_UNREF(sc->ng_node); NG_TYPE_UNREF(&ng_type); printf("%s: ng_name_node() failed; error %d\n", NAME_UNIT, error); return error; } # if (__FreeBSD_version >= 500000) /* Initialize the send queue mutexes. */ mtx_init(&sc->ng_sndq.ifq_mtx, NAME_UNIT, "sndq", MTX_DEF); mtx_init(&sc->ng_fastq.ifq_mtx, NAME_UNIT, "fastq", MTX_DEF); # endif /* Put a backpointer to the softc in the netgraph node. */ NG_NODE_SET_PRIVATE(sc->ng_node, sc); /* ALTQ output queue initialization. */ IFQ_SET_MAXLEN(&sc->ng_fastq, SNDQ_MAXLEN); IFQ_SET_READY(&sc->ng_fastq); IFQ_SET_MAXLEN(&sc->ng_sndq, SNDQ_MAXLEN); IFQ_SET_READY(&sc->ng_sndq); /* If ifnet is present, it will call watchdog. */ /* Otherwise, arrange to call watchdog here. */ # if (IFNET == 0) /* Arrange to call ng_watchdog() once a second. */ # if (__FreeBSD_version >= 500000) callout_init(&sc->ng_callout, 0); # else /* FreeBSD-4 */ callout_init(&sc->ng_callout); # endif callout_reset(&sc->ng_callout, hz, ng_watchdog, sc); # endif return 0; } static void ng_detach(softc_t *sc) { # if (IFNET == 0) callout_stop(&sc->ng_callout); # endif # if (__FreeBSD_version >= 500000) mtx_destroy(&sc->ng_sndq.ifq_mtx); mtx_destroy(&sc->ng_fastq.ifq_mtx); ng_rmnode_self(sc->ng_node); /* free hook */ NG_NODE_UNREF(sc->ng_node); /* free node */ NG_TYPE_UNREF(&ng_type); # else /* FreeBSD-4 */ ng_unname(sc->ng_node); /* free name */ ng_cutlinks(sc->ng_node); /* free hook */ NG_NODE_UNREF(sc->ng_node); /* free node */ NG_TYPE_UNREF(&ng_type); # endif } #endif /* NETGRAPH */ /* The next few procedures initialize the card. */ /* Returns 0 on success; error code on failure. */ static int startup_card(softc_t *sc) { int num_rx_descs, error = 0; u_int32_t tlp_bus_pbl, tlp_bus_cal, tlp_op_tr; u_int32_t tlp_cfdd, tlp_cfcs; u_int32_t tlp_cflt, tlp_csid, tlp_cfit; /* Make sure the COMMAND bits are reasonable. */ tlp_cfcs = READ_PCI_CFG(sc, TLP_CFCS); tlp_cfcs &= ~TLP_CFCS_MWI_ENABLE; tlp_cfcs |= TLP_CFCS_BUS_MASTER; tlp_cfcs |= TLP_CFCS_MEM_ENABLE; tlp_cfcs |= TLP_CFCS_IO_ENABLE; tlp_cfcs |= TLP_CFCS_PAR_ERROR; tlp_cfcs |= TLP_CFCS_SYS_ERROR; WRITE_PCI_CFG(sc, TLP_CFCS, tlp_cfcs); /* Set the LATENCY TIMER to the recommended value, */ /* and make sure the CACHE LINE SIZE is reasonable. */ tlp_cfit = READ_PCI_CFG(sc, TLP_CFIT); tlp_cflt = READ_PCI_CFG(sc, TLP_CFLT); tlp_cflt &= ~TLP_CFLT_LATENCY; tlp_cflt |= (tlp_cfit & TLP_CFIT_MAX_LAT)>>16; /* "prgmbl burst length" and "cache alignment" used below. */ switch(tlp_cflt & TLP_CFLT_CACHE) { case 8: /* 8 bytes per cache line */ { tlp_bus_pbl = 32; tlp_bus_cal = 1; break; } case 16: { tlp_bus_pbl = 32; tlp_bus_cal = 2; break; } case 32: { tlp_bus_pbl = 32; tlp_bus_cal = 3; break; } default: { tlp_bus_pbl = 32; tlp_bus_cal = 1; tlp_cflt &= ~TLP_CFLT_CACHE; tlp_cflt |= 8; break; } } WRITE_PCI_CFG(sc, TLP_CFLT, tlp_cflt); /* Make sure SNOOZE and SLEEP modes are disabled. */ tlp_cfdd = READ_PCI_CFG(sc, TLP_CFDD); tlp_cfdd &= ~TLP_CFDD_SLEEP; tlp_cfdd &= ~TLP_CFDD_SNOOZE; WRITE_PCI_CFG(sc, TLP_CFDD, tlp_cfdd); DELAY(11*1000); /* Tulip wakes up in 10 ms max */ /* Software Reset the Tulip chip; stops DMA and Interrupts. */ /* This does not change the PCI config regs just set above. */ WRITE_CSR(TLP_BUS_MODE, TLP_BUS_RESET); /* self-clearing */ DELAY(5); /* Tulip is dead for 50 PCI cycles after reset. */ /* Reset the Xilinx Field Programmable Gate Array. */ reset_xilinx(sc); /* side effect: turns on all four LEDs */ /* Configure card-specific stuff (framers, line interfaces, etc.). */ sc->card->config(sc); /* Initializing cards can glitch clocks and upset fifos. */ /* Reset the FIFOs between the Tulip and Xilinx chips. */ set_mii16_bits(sc, MII16_FIFO); clr_mii16_bits(sc, MII16_FIFO); /* Initialize the PCI busmode register. */ /* The PCI bus cycle type "Memory Write and Invalidate" does NOT */ /* work cleanly in any version of the 21140A, so don't enable it! */ WRITE_CSR(TLP_BUS_MODE, (tlp_bus_cal ? TLP_BUS_READ_LINE : 0) | (tlp_bus_cal ? TLP_BUS_READ_MULT : 0) | (tlp_bus_pbl<txring, NUM_TX_DESCS))) return error; WRITE_CSR(TLP_TX_LIST, sc->txring.dma_addr); if ((error = create_ring(sc, &sc->rxring, num_rx_descs))) return error; WRITE_CSR(TLP_RX_LIST, sc->rxring.dma_addr); /* Initialize the operating mode register. */ WRITE_CSR(TLP_OP_MODE, TLP_OP_INIT | (tlp_op_tr<txring); destroy_ring(sc, &sc->rxring); } /* Start the card and attach a kernel interface and line protocol. */ static int attach_card(softc_t *sc, const char *intrstr) { struct config config; u_int32_t tlp_cfrv; u_int16_t mii3; u_int8_t *ieee; int i, error = 0; /* Start the card. */ if ((error = startup_card(sc))) return error; /* Attach a kernel interface. */ #if NETGRAPH if ((error = ng_attach(sc))) return error; sc->flags |= FLAG_NETGRAPH; #endif #if IFNET if ((error = lmc_ifnet_attach(sc))) return error; sc->flags |= FLAG_IFNET; #endif /* Attach a line protocol stack. */ sc->config.line_pkg = PKG_RAWIP; config = sc->config; /* get current config */ config.line_pkg = 0; /* select external stack */ config.line_prot = PROT_C_HDLC; config.keep_alive = 1; config_proto(sc, &config); /* reconfigure */ sc->config = config; /* save new configuration */ /* Print interesting hardware-related things. */ mii3 = read_mii(sc, 3); tlp_cfrv = READ_PCI_CFG(sc, TLP_CFRV); printf("%s: PCI rev %d.%d, MII rev %d.%d", NAME_UNIT, (tlp_cfrv>>4) & 0xF, tlp_cfrv & 0xF, (mii3>>4) & 0xF, mii3 & 0xF); ieee = (u_int8_t *)sc->status.ieee; for (i=0; i<3; i++) sc->status.ieee[i] = read_srom(sc, 10+i); printf(", IEEE addr %02x:%02x:%02x:%02x:%02x:%02x", ieee[0], ieee[1], ieee[2], ieee[3], ieee[4], ieee[5]); sc->card->ident(sc); printf(" %s\n", intrstr); /* Print interesting software-related things. */ printf("%s: Driver rev %d.%d.%d", NAME_UNIT, DRIVER_MAJOR_VERSION, DRIVER_MINOR_VERSION, DRIVER_SUB_VERSION); printf(", Options %s%s%s%s%s%s%s%s%s\n", NETGRAPH ? "NETGRAPH " : "", GEN_HDLC ? "GEN_HDLC " : "", NSPPP ? "SPPP " : "", P2P ? "P2P " : "", ALTQ_PRESENT ? "ALTQ " : "", NBPFILTER ? "BPF " : "", DEV_POLL ? "POLL " : "", IOREF_CSR ? "IO_CSR " : "MEM_CSR ", (BYTE_ORDER == BIG_ENDIAN) ? "BIG_END " : "LITTLE_END "); /* Make the local hardware ready. */ set_status(sc, 1); return 0; } /* Detach from the kernel in all ways. */ static void detach_card(softc_t *sc) { struct config config; /* Make the local hardware NOT ready. */ set_status(sc, 0); /* Detach external line protocol stack. */ if (sc->config.line_pkg != PKG_RAWIP) { config = sc->config; config.line_pkg = PKG_RAWIP; config_proto(sc, &config); sc->config = config; } /* Detach kernel interfaces. */ #if NETGRAPH if (sc->flags & FLAG_NETGRAPH) { IFQ_PURGE(&sc->ng_fastq); IFQ_PURGE(&sc->ng_sndq); ng_detach(sc); sc->flags &= ~FLAG_NETGRAPH; } #endif #if IFNET if (sc->flags & FLAG_IFNET) { IFQ_PURGE(&sc->ifp->if_snd); lmc_ifnet_detach(sc); sc->flags &= ~FLAG_IFNET; } #endif /* Reset the Tulip chip; stops DMA and Interrupts. */ shutdown_card(sc); } /* This is the I/O configuration interface for FreeBSD */ #ifdef __FreeBSD__ static int fbsd_probe(device_t dev) { u_int32_t cfid = pci_read_config(dev, TLP_CFID, 4); u_int32_t csid = pci_read_config(dev, TLP_CSID, 4); /* Looking for a DEC 21140A chip on any Lan Media Corp card. */ if (cfid != TLP_CFID_TULIP) return ENXIO; switch (csid) { case TLP_CSID_HSSI: case TLP_CSID_HSSIc: device_set_desc(dev, HSSI_DESC); break; case TLP_CSID_T3: device_set_desc(dev, T3_DESC); break; case TLP_CSID_SSI: device_set_desc(dev, SSI_DESC); break; case TLP_CSID_T1E1: device_set_desc(dev, T1E1_DESC); break; default: return ENXIO; } return 0; } static int fbsd_detach(device_t dev) { softc_t *sc = device_get_softc(dev); /* Stop the card and detach from the kernel. */ detach_card(sc); /* Release resources. */ if (sc->irq_cookie != NULL) { bus_teardown_intr(dev, sc->irq_res, sc->irq_cookie); sc->irq_cookie = NULL; } if (sc->irq_res != NULL) { bus_release_resource(dev, SYS_RES_IRQ, sc->irq_res_id, sc->irq_res); sc->irq_res = NULL; } if (sc->csr_res != NULL) { bus_release_resource(dev, sc->csr_res_type, sc->csr_res_id, sc->csr_res); sc->csr_res = NULL; } # if (__FreeBSD_version >= 500000) mtx_destroy(&sc->top_mtx); mtx_destroy(&sc->bottom_mtx); # endif return 0; /* no error */ } static int fbsd_shutdown(device_t dev) { shutdown_card(device_get_softc(dev)); return 0; } static int fbsd_attach(device_t dev) { softc_t *sc = device_get_softc(dev); int error; /* READ/WRITE_PCI_CFG need this. */ sc->dev = dev; /* What kind of card are we driving? */ switch (READ_PCI_CFG(sc, TLP_CSID)) { case TLP_CSID_HSSI: case TLP_CSID_HSSIc: sc->card = &hssi_card; break; case TLP_CSID_T3: sc->card = &t3_card; break; case TLP_CSID_SSI: sc->card = &ssi_card; break; case TLP_CSID_T1E1: sc->card = &t1_card; break; default: return ENXIO; } sc->dev_desc = device_get_desc(dev); /* Allocate PCI memory or IO resources to access the Tulip chip CSRs. */ # if IOREF_CSR sc->csr_res_id = TLP_CBIO; sc->csr_res_type = SYS_RES_IOPORT; # else sc->csr_res_id = TLP_CBMA; sc->csr_res_type = SYS_RES_MEMORY; # endif sc->csr_res = bus_alloc_resource(dev, sc->csr_res_type, &sc->csr_res_id, 0, ~0, 1, RF_ACTIVE); if (sc->csr_res == NULL) { printf("%s: bus_alloc_resource(csr) failed.\n", NAME_UNIT); return ENXIO; } sc->csr_tag = rman_get_bustag(sc->csr_res); sc->csr_handle = rman_get_bushandle(sc->csr_res); /* Allocate PCI interrupt resources for the card. */ sc->irq_res_id = 0; sc->irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->irq_res_id, 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); if (sc->irq_res == NULL) { printf("%s: bus_alloc_resource(irq) failed.\n", NAME_UNIT); fbsd_detach(dev); return ENXIO; } if ((error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE, NULL, bsd_interrupt, sc, &sc->irq_cookie))) { printf("%s: bus_setup_intr() failed; error %d\n", NAME_UNIT, error); fbsd_detach(dev); return error; } # if (__FreeBSD_version >= 500000) /* Initialize the top-half and bottom-half locks. */ mtx_init(&sc->top_mtx, NAME_UNIT, "top half lock", MTX_DEF); mtx_init(&sc->bottom_mtx, NAME_UNIT, "bottom half lock", MTX_DEF); # endif /* Start the card and attach a kernel interface and line protocol. */ if ((error = attach_card(sc, ""))) detach_card(sc); return error; } static device_method_t methods[] = { DEVMETHOD(device_probe, fbsd_probe), DEVMETHOD(device_attach, fbsd_attach), DEVMETHOD(device_detach, fbsd_detach), DEVMETHOD(device_shutdown, fbsd_shutdown), /* This driver does not suspend and resume. */ { 0, 0 } }; static driver_t driver = { .name = DEVICE_NAME, .methods = methods, # if (__FreeBSD_version >= 500000) .size = sizeof(softc_t), # else /* FreeBSD-4 */ .softc = sizeof(softc_t), # endif }; static devclass_t devclass; DRIVER_MODULE(lmc, pci, driver, devclass, 0, 0); MODULE_VERSION(lmc, 2); MODULE_DEPEND(lmc, pci, 1, 1, 1); # if NETGRAPH MODULE_DEPEND(lmc, netgraph, NG_ABI_VERSION, NG_ABI_VERSION, NG_ABI_VERSION); # endif # if NSPPP MODULE_DEPEND(lmc, sppp, 1, 1, 1); # endif #endif /* __FreeBSD__ */ /* This is the I/O configuration interface for NetBSD. */ #ifdef __NetBSD__ static int nbsd_match(struct device *parent, struct cfdata *match, void *aux) { struct pci_attach_args *pa = aux; u_int32_t cfid = pci_conf_read(pa->pa_pc, pa->pa_tag, TLP_CFID); u_int32_t csid = pci_conf_read(pa->pa_pc, pa->pa_tag, TLP_CSID); /* Looking for a DEC 21140A chip on any Lan Media Corp card. */ if (cfid != TLP_CFID_TULIP) return 0; switch (csid) { case TLP_CSID_HSSI: case TLP_CSID_HSSIc: case TLP_CSID_T3: case TLP_CSID_SSI: case TLP_CSID_T1E1: return 100; default: return 0; } } static int nbsd_detach(struct device *self, int flags) { softc_t *sc = (softc_t *)self; /* device is first in softc */ /* Stop the card and detach from the kernel. */ detach_card(sc); /* Release resources. */ if (sc->sdh_cookie != NULL) { shutdownhook_disestablish(sc->sdh_cookie); sc->sdh_cookie = NULL; } if (sc->irq_cookie != NULL) { pci_intr_disestablish(sc->pa_pc, sc->irq_cookie); sc->irq_cookie = NULL; } if (sc->csr_handle) { bus_space_unmap(sc->csr_tag, sc->csr_handle, TLP_CSR_SIZE); sc->csr_handle = 0; } return 0; /* no error */ } static void nbsd_attach(struct device *parent, struct device *self, void *aux) { softc_t *sc = (softc_t *)self; /* device is first in softc */ struct pci_attach_args *pa = aux; const char *intrstr; bus_addr_t csr_addr; int error; /* READ/WRITE_PCI_CFG need these. */ sc->pa_pc = pa->pa_pc; sc->pa_tag = pa->pa_tag; /* bus_dma needs this. */ sc->pa_dmat = pa->pa_dmat; /* What kind of card are we driving? */ switch (READ_PCI_CFG(sc, TLP_CSID)) { case TLP_CSID_HSSI: case TLP_CSID_HSSIc: sc->dev_desc = HSSI_DESC; sc->card = &hssi_card; break; case TLP_CSID_T3: sc->dev_desc = T3_DESC; sc->card = &t3_card; break; case TLP_CSID_SSI: sc->dev_desc = SSI_DESC; sc->card = &ssi_card; break; case TLP_CSID_T1E1: sc->dev_desc = T1E1_DESC; sc->card = &t1_card; break; default: return; } printf(": %s\n", sc->dev_desc); /* Allocate PCI resources to access the Tulip chip CSRs. */ # if IOREF_CSR csr_addr = (bus_addr_t)READ_PCI_CFG(sc, TLP_CBIO) & -2; sc->csr_tag = pa->pa_iot; /* bus_space tag for IO refs */ # else csr_addr = (bus_addr_t)READ_PCI_CFG(sc, TLP_CBMA); sc->csr_tag = pa->pa_memt; /* bus_space tag for MEM refs */ # endif if ((error = bus_space_map(sc->csr_tag, csr_addr, TLP_CSR_SIZE, 0, &sc->csr_handle))) { printf("%s: bus_space_map() failed; error %d\n", NAME_UNIT, error); return; } /* Allocate PCI interrupt resources. */ if ((error = pci_intr_map(pa, &sc->intr_handle))) { printf("%s: pci_intr_map() failed; error %d\n", NAME_UNIT, error); nbsd_detach(self, 0); return; } sc->irq_cookie = pci_intr_establish(pa->pa_pc, sc->intr_handle, IPL_NET, bsd_interrupt, sc); if (sc->irq_cookie == NULL) { printf("%s: pci_intr_establish() failed\n", NAME_UNIT); nbsd_detach(self, 0); return; } intrstr = pci_intr_string(pa->pa_pc, sc->intr_handle); /* Install a shutdown hook. */ sc->sdh_cookie = shutdownhook_establish(shutdown_card, sc); if (sc->sdh_cookie == NULL) { printf("%s: shutdown_hook_establish() failed\n", NAME_UNIT); nbsd_detach(self, 0); return; } /* Initialize the top-half and bottom-half locks. */ simple_lock_init(&sc->top_lock); simple_lock_init(&sc->bottom_lock); /* Start the card and attach a kernel interface and line protocol. */ if ((error = attach_card(sc, intrstr))) detach_card(sc); } # if (__NetBSD_Version__ >= 106080000) /* 1.6H */ CFATTACH_DECL(lmc, sizeof(softc_t), nbsd_match, nbsd_attach, nbsd_detach, NULL); # else struct cfattach lmc_ca = { /*.ca_name = DEVICE_NAME, */ .ca_devsize = sizeof(softc_t), .ca_match = nbsd_match, .ca_attach = nbsd_attach, .ca_detach = nbsd_detach, .ca_activate = NULL, }; # endif # if (__NetBSD_Version__ >= 106080000) CFDRIVER_DECL(lmc, DV_IFNET, NULL); # else static struct cfdriver lmc_cd = { .cd_name = DEVICE_NAME, .cd_class = DV_IFNET, .cd_ndevs = 0, .cd_devs = NULL, }; # endif /* cfdata is declared static, unseen outside this module. */ /* It is used for LKM; config builds its own in ioconf.c. */ static struct cfdata lmc_cf = { # if (__NetBSD_Version__ >= 106080000) .cf_name = DEVICE_NAME, .cf_atname = DEVICE_NAME, # else .cf_driver = &lmc_cd, .cf_attach = &lmc_ca, # endif .cf_unit = 0, .cf_fstate = FSTATE_STAR, }; # if (__NetBSD_Version__ >= 106080000) MOD_MISC(DEVICE_NAME) # else static struct lkm_misc _module = { .lkm_name = DEVICE_NAME, .lkm_type = LM_MISC, .lkm_offset = 0, .lkm_ver = LKM_VERSION, }; # endif /* From /sys/dev/pci/pci.c (no public prototype). */ int pciprint(void *, const char *); static int lkm_nbsd_match(struct pci_attach_args *pa) { return nbsd_match(0, 0, pa); } /* LKM loader finds this by appending "_lkmentry" to filename "if_lmc". */ int if_lmc_lkmentry(struct lkm_table *lkmtp, int cmd, int ver) { int i, error = 0; if (ver != LKM_VERSION) return EINVAL; switch (cmd) { case LKM_E_LOAD: { struct cfdriver* pcicd; lkmtp->private.lkm_misc = &_module; if ((pcicd = config_cfdriver_lookup("pci")) == NULL) { printf("%s: config_cfdriver_lookup(pci) failed; error %d\n", lmc_cd.cd_name, error); return error; } # if (__NetBSD_Version__ >= 106080000) if ((error = config_cfdriver_attach(&lmc_cd))) { printf("%s: config_cfdriver_attach() failed; error %d\n", lmc_cd.cd_name, error); return error; } if ((error = config_cfattach_attach(lmc_cd.cd_name, &lmc_ca))) { printf("%s: config_cfattach_attach() failed; error %d\n", lmc_cd.cd_name, error); config_cfdriver_detach(&lmc_cd); return error; } # endif for (i=0; icd_ndevs; i++) { int dev; /* A pointer to a device is a pointer to its softc. */ struct pci_softc *sc = pcicd->cd_devs[i]; if (sc == NULL) continue; for (dev=0; devsc_maxndevs; dev++) { struct pci_attach_args pa; pcitag_t tag = pci_make_tag(sc->sc_pc, sc->sc_bus, dev, 0); if (pci_probe_device(sc, tag, lkm_nbsd_match, &pa) != 0) config_attach(pcicd->cd_devs[i], &lmc_cf, &pa, pciprint); /* config_attach doesn't return on failure; it calls panic. */ } } break; } case LKM_E_UNLOAD: { for (i=lmc_cd.cd_ndevs-1; i>=0; i--) { struct device *dev = lmc_cd.cd_devs[i]; if (dev == NULL) continue; if ((error = config_detach(dev, 0))) { printf("%s: config_detach() failed; error %d\n", dev->dv_xname, error); return error; } } # if (__NetBSD_Version__ >= 106080000) if ((error = config_cfattach_detach(lmc_cd.cd_name, &lmc_ca))) { printf("%s: config_cfattach_detach() failed; error %d\n", lmc_cd.cd_name, error); return error; } if ((error = config_cfdriver_detach(&lmc_cd))) { printf("%s: config_cfdriver_detach() failed; error %d\n", lmc_cd.cd_name, error); return error; } # endif break; } case LKM_E_STAT: break; } return error; } #endif /* __NetBSD__ */ /* This is the I/O configuration interface for OpenBSD. */ #ifdef __OpenBSD__ static int obsd_match(struct device *parent, void *match, void *aux) { struct pci_attach_args *pa = aux; u_int32_t cfid = pci_conf_read(pa->pa_pc, pa->pa_tag, TLP_CFID); u_int32_t csid = pci_conf_read(pa->pa_pc, pa->pa_tag, TLP_CSID); /* Looking for a DEC 21140A chip on any Lan Media Corp card. */ if (cfid != TLP_CFID_TULIP) return 0; switch (csid) { case TLP_CSID_HSSI: case TLP_CSID_HSSIc: case TLP_CSID_T3: case TLP_CSID_SSI: case TLP_CSID_T1E1: return 100; /* match better than other 21140 drivers */ default: return 0; } } static int obsd_detach(struct device *self, int flags) { softc_t *sc = (softc_t *)self; /* device is first in softc */ /* Stop the card and detach from the kernel. */ detach_card(sc); /* Release resources. */ if (sc->sdh_cookie != NULL) { shutdownhook_disestablish(sc->sdh_cookie); sc->sdh_cookie = NULL; } if (sc->irq_cookie != NULL) { pci_intr_disestablish(sc->pa_pc, sc->irq_cookie); sc->irq_cookie = NULL; } if (sc->csr_handle) { bus_space_unmap(sc->csr_tag, sc->csr_handle, TLP_CSR_SIZE); sc->csr_handle = 0; } return 0; /* no error */ } static void obsd_attach(struct device *parent, struct device *self, void *aux) { softc_t *sc = (softc_t *)self; /* device is first in softc */ struct pci_attach_args *pa = aux; const char *intrstr; bus_addr_t csr_addr; int error; /* READ/WRITE_PCI_CFG need these. */ sc->pa_pc = pa->pa_pc; sc->pa_tag = pa->pa_tag; /* bus_dma needs this. */ sc->pa_dmat = pa->pa_dmat; /* What kind of card are we driving? */ switch (READ_PCI_CFG(sc, TLP_CSID)) { case TLP_CSID_HSSI: case TLP_CSID_HSSIc: sc->dev_desc = HSSI_DESC; sc->card = &hssi_card; break; case TLP_CSID_T3: sc->dev_desc = T3_DESC; sc->card = &t3_card; break; case TLP_CSID_SSI: sc->dev_desc = SSI_DESC; sc->card = &ssi_card; break; case TLP_CSID_T1E1: sc->dev_desc = T1E1_DESC; sc->card = &t1_card; break; default: return; } printf(": %s\n", sc->dev_desc); /* Allocate PCI resources to access the Tulip chip CSRs. */ # if IOREF_CSR csr_addr = (bus_addr_t)READ_PCI_CFG(sc, TLP_CBIO) & -2; sc->csr_tag = pa->pa_iot; /* bus_space tag for IO refs */ # else csr_addr = (bus_addr_t)READ_PCI_CFG(sc, TLP_CBMA); sc->csr_tag = pa->pa_memt; /* bus_space tag for MEM refs */ # endif if ((error = bus_space_map(sc->csr_tag, csr_addr, TLP_CSR_SIZE, 0, &sc->csr_handle))) { printf("%s: bus_space_map() failed; error %d\n", NAME_UNIT, error); return; } /* Allocate PCI interrupt resources. */ if ((error = pci_intr_map(pa, &sc->intr_handle))) { printf("%s: pci_intr_map() failed; error %d\n", NAME_UNIT, error); obsd_detach(self, 0); return; } sc->irq_cookie = pci_intr_establish(pa->pa_pc, sc->intr_handle, IPL_NET, bsd_interrupt, sc, self->dv_xname); if (sc->irq_cookie == NULL) { printf("%s: pci_intr_establish() failed\n", NAME_UNIT); obsd_detach(self, 0); return; } intrstr = pci_intr_string(pa->pa_pc, sc->intr_handle); /* Install a shutdown hook. */ sc->sdh_cookie = shutdownhook_establish(shutdown_card, sc); if (sc->sdh_cookie == NULL) { printf("%s: shutdown_hook_establish() failed\n", NAME_UNIT); obsd_detach(self, 0); return; } /* Initialize the top-half and bottom-half locks. */ simple_lock_init(&sc->top_lock); simple_lock_init(&sc->bottom_lock); /* Start the card and attach a kernel interface and line protocol. */ if ((error = attach_card(sc, intrstr))) detach_card(sc); } struct cfattach lmc_ca = { .ca_devsize = sizeof(softc_t), .ca_match = obsd_match, .ca_attach = obsd_attach, .ca_detach = obsd_detach, .ca_activate = NULL, }; struct cfdriver lmc_cd = { .cd_name = DEVICE_NAME, .cd_devs = NULL, .cd_class = DV_IFNET, .cd_indirect = 0, .cd_ndevs = 0, }; /* cfdata is declared static, unseen outside this module. */ /* It is used for LKM; config builds its own in ioconf.c. */ static struct cfdata lmc_cfdata = { .cf_attach = &lmc_ca, .cf_driver = &lmc_cd, .cf_unit = 0, .cf_fstate = FSTATE_STAR, }; static struct lkm_any _module = { .lkm_name = DEVICE_NAME, .lkm_type = LM_MISC, .lkm_offset = 0, .lkm_ver = LKM_VERSION, }; /* From /sys/dev/pci/pci.c (no public prototype). */ int pciprint(void *, const char *); extern struct cfdriver pci_cd; /* LKM loader finds this by appending "_lkmentry" to filename "if_lmc". */ int if_lmc_lkmentry(struct lkm_table *lkmtp, int cmd, int ver) { int i, error = 0; if (ver != LKM_VERSION) return EINVAL; switch (cmd) { case LKM_E_LOAD: { /* XXX This works for ONE card on pci0 of a i386 machine! XXX */ lkmtp->private.lkm_any = &_module; for (i=0; idv_unit)!=0) continue; /* only bus zero */ /* XXX For machine independence, need: pcibus_attach_args. XXX */ /* XXX See NetBSD's sys/dev/pci/pci.c/pci_probe_device. XXX */ /* XXX Why isn't there an LKM network interface module? XXX */ pa.pa_pc = NULL; /* XXX */ pa.pa_bus = 0; /* XXX */ pa.pa_iot = I386_BUS_SPACE_IO; /* XXX */ pa.pa_memt = I386_BUS_SPACE_MEM; /* XXX */ pa.pa_dmat = &pci_bus_dma_tag; /* XXX */ for (pa.pa_device=0; pa.pa_device<32; pa.pa_device++) /* XXX */ { int intr; pa.pa_function = 0; /* DEC-21140A has function 0 only XXX */ pa.pa_tag = pci_make_tag(pa.pa_pc, pa.pa_bus, pa.pa_device, 0); pa.pa_id = pci_conf_read(pa.pa_pc, pa.pa_tag, PCI_ID_REG); if ((pa.pa_id & 0xFFFF) == 0xFFFF) continue; if ((pa.pa_id & 0xFFFF) == 0) continue; /* XXX this only works for pci0 -- no swizzelling XXX */ pa.pa_intrswiz = 0; pa.pa_intrtag = pa.pa_tag; intr = pci_conf_read(pa.pa_pc, pa.pa_tag, PCI_INTERRUPT_REG); pa.pa_intrline = PCI_INTERRUPT_LINE(intr); pa.pa_intrpin = ((PCI_INTERRUPT_PIN(intr) -1) % 4) +1; if (obsd_match(parent, &lmc_cfdata, &pa)) config_attach(parent, &lmc_cfdata, &pa, pciprint); /* config_attach doesn't return on failure; it calls panic. */ } } break; } case LKM_E_UNLOAD: { for (i=lmc_cd.cd_ndevs-1; i>=0; i--) { struct device *dev = lmc_cd.cd_devs[i]; if (dev == NULL) continue; if ((error = config_detach(dev, 0))) printf("%s: config_detach() failed; error %d\n", dev->dv_xname, error); } break; } case LKM_E_STAT: break; } return error; } #endif /* __OpenBSD__ */ /* This is the I/O configuration interface for BSD/OS. */ #ifdef __bsdi__ static int bsdi_match(pci_devaddr_t *pa) { u_int32_t cfid = pci_inl(pa, TLP_CFID); u_int32_t csid = pci_inl(pa, TLP_CSID); /* Looking for a DEC 21140A chip on any Lan Media Corp card. */ if (cfid != TLP_CFID_TULIP) return 0; switch (csid) { case TLP_CSID_HSSI: case TLP_CSID_HSSIc: case TLP_CSID_T3: case TLP_CSID_SSI: case TLP_CSID_T1E1: return 1; default: return 0; } } static int bsdi_probe(struct device *parent, struct cfdata *cf, void *aux) { struct isa_attach_args *ia = aux; pci_devaddr_t *pa = NULL; pci_devres_t res; /* This must be a PCI bus. */ if (ia->ia_bustype != BUS_PCI) return 0; /* Scan PCI bus for our boards. */ if ((pa = pci_scan(bsdi_match)) == 0) return 0; /* Scan config space for IO and MEM base registers and IRQ info. */ pci_getres(pa, &res, 1, ia); /* Crucial: pass pci_devaddr to bsdi_attach in ia_aux. */ ia->ia_aux = (void *)pa; return 1; } static void bsdi_attach(struct device *parent, struct device *self, void *aux) { softc_t *sc = (softc_t *)self; /* device is first in softc */ struct isa_attach_args *ia = aux; pci_devaddr_t *pa = ia->ia_aux; /* this is crucial! */ int error; /* READ/WRITE_PCI_CFG need this. */ sc->cfgbase = *pa; /* What kind of card are we driving? */ switch (READ_PCI_CFG(sc, TLP_CSID)) { case TLP_CSID_HSSI: case TLP_CSID_HSSIc: sc->dev_desc = HSSI_DESC; sc->card = &hssi_card; break; case TLP_CSID_T3: sc->dev_desc = T3_DESC; sc->card = &t3_card; break; case TLP_CSID_SSI: sc->dev_desc = SSI_DESC; sc->card = &ssi_card; break; case TLP_CSID_T1E1: sc->dev_desc = T1E1_DESC; sc->card = &t1_card; break; default: return; } printf(": %s\n", sc->dev_desc); /* Allocate PCI memory or IO resources to access the Tulip chip CSRs. */ sc->csr_iobase = ia->ia_iobase; sc->csr_membase = (u_int32_t *)mapphys((vm_offset_t)ia->ia_maddr, TLP_CSR_SIZE); /* Attach to the PCI bus. */ isa_establish(&sc->id, &sc->dev); /* Allocate PCI interrupt resources for the card. */ sc->ih.ih_fun = bsd_interrupt; sc->ih.ih_arg = sc; intr_establish(ia->ia_irq, &sc->ih, DV_NET); /* Install a shutdown hook. */ sc->ats.func = shutdown_card; sc->ats.arg = sc; atshutdown(&sc->ats, ATSH_ADD); /* Initialize the top-half and bottom-half locks. */ simple_lock_init(&sc->top_lock); simple_lock_init(&sc->bottom_lock); /* Start the card and attach a kernel interface and line protocol. */ if ((error = attach_card(sc, ""))) detach_card(sc); } struct cfdriver lmccd = { .cd_devs = NULL, .cd_name = DEVICE_NAME, .cd_match = bsdi_probe, .cd_attach = bsdi_attach, .cd_class = DV_IFNET, .cd_devsize = sizeof(softc_t), }; #endif /* __bsdi__ */ #ifdef __linux__ /* The kernel calls this procedure when an interrupt happens. */ static irqreturn_t linux_interrupt(int irq, void *dev, struct pt_regs *regs) { struct net_device *net_dev = dev; softc_t *sc = dev_to_hdlc(net_dev)->priv; /* Cut losses early if this is not our interrupt. */ if ((READ_CSR(TLP_STATUS) & TLP_INT_TXRX) == 0) return IRQ_NONE; /* Disable card interrupts. */ WRITE_CSR(TLP_INT_ENBL, TLP_INT_DISABLE); /* Handle the card interrupt with the dev->poll method. */ if (netif_rx_schedule_prep(net_dev)) __netif_rx_schedule(net_dev); /* NAPI - add to poll list */ else printk("%s: interrupt while on poll list\n", NAME_UNIT); return IRQ_HANDLED; } /* This net_device method services interrupts in a softirq. */ /* With rxintr_cleanup(), it implements input flow control. */ static int linux_poll(struct net_device *net_dev, int *budget) { softc_t *sc = dev_to_hdlc(net_dev)->priv; int received; /* Yes, we do NAPI. */ /* Allow processing up to net_dev->quota incoming packets. */ /* This is the ONLY time core_interrupt() may process rx pkts. */ /* Otherwise (sc->quota == 0) and rxintr_cleanup() is a NOOP. */ sc->quota = net_dev->quota; /* Handle the card interrupt with kernel ints enabled. */ /* Process rx pkts (and tx pkts, too). */ /* Card interrupts are disabled. */ core_interrupt(sc, 0); /* Report number of rx packets processed. */ received = net_dev->quota - sc->quota; net_dev->quota -= received; *budget -= received; /* if quota prevented processing all rx pkts, leave rx ints disabled */ if (sc->quota == 0) /* this is off by one...but harmless */ { WRITE_CSR(TLP_INT_ENBL, TLP_INT_TX); return 1; /* more pkts to handle -- reschedule */ } sc->quota = 0; /* disable rx pkt processing by rxintr_cleanup() */ netif_rx_complete(net_dev); /* NAPI - remove from poll list */ /* Enable card interrupts. */ WRITE_CSR(TLP_INT_ENBL, TLP_INT_TXRX); return 0; } /* These next routines are similar to BSD's ifnet kernel/driver interface. */ /* This net_device method hands outgoing packets to the transmitter. */ /* With txintr_setup(), it implements output flow control. */ /* Called from a syscall (user context; no spinlocks). */ static int linux_start(struct sk_buff *skb, struct net_device *net_dev) { softc_t *sc = dev_to_hdlc(net_dev)->priv; if (sc->tx_skb == NULL) { /* Put this skb where the transmitter will see it. */ sc->tx_skb = skb; /* Start the transmitter; incoming pkts are NOT processed. */ user_interrupt(sc, 0); /* If the tx didn't take the skb then stop the queue. */ /* This can happen if another CPU is in core_interrupt(). */ if (sc->tx_skb != NULL) netif_stop_queue(net_dev); return 0; } /* This shouldn't happen; skb is NOT consumed. */ if (netif_queue_stopped(net_dev)) printk("%s: dev->start() called with queue stopped\n", NAME_UNIT); else netif_stop_queue(net_dev); return 1; } /* This net_device method restarts the transmitter if it hangs. */ /* Called from a softirq. */ static void linux_timeout(struct net_device *net_dev) { softc_t *sc = dev_to_hdlc(net_dev)->priv; /* Start the transmitter; incoming packets are NOT processed. */ user_interrupt(sc, 1); } /* This net_device method handles IOCTL syscalls. */ /* Called from a syscall (user context; no spinlocks; can sleep). */ static int linux_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) { softc_t *sc = dev_to_hdlc(net_dev)->priv; int error = 0; if ((cmd >= SIOCDEVPRIVATE) && (cmd <= SIOCDEVPRIVATE+15)) { struct iohdr *iohdr = (struct iohdr *)ifr; u_int16_t direction = iohdr->direction; u_int16_t length = iohdr->length; char *user_addr = (char *)iohdr->iohdr; char *kern_addr; if (iohdr->cookie != NGM_LMC_COOKIE) return -EINVAL; /* Emulate a BSD-style IOCTL syscall. */ kern_addr = kmalloc(length, GFP_KERNEL); if (kern_addr == NULL) error = -ENOMEM; if ((error == 0) && ((direction & DIR_IOW) != 0)) error = copy_from_user(kern_addr, user_addr, length); if (error == 0) error = -core_ioctl(sc, (unsigned long)cmd, kern_addr); if ((error == 0) && ((direction & DIR_IOR) != 0)) error = copy_to_user(user_addr, kern_addr, length); kfree(kern_addr); } # if GEN_HDLC else if (cmd == SIOCWANDEV) { const size_t size = sizeof(sync_serial_settings); switch (ifr->ifr_settings.type) { case IF_GET_IFACE: /* get interface config */ { ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; if (ifr->ifr_settings.size < size) { ifr->ifr_settings.size = size; error = -ENOBUFS; } else { if (sc->config.tx_clk_src == CFG_CLKMUX_ST) sc->hdlc_settings.clock_type = CLOCK_EXT; if (sc->config.tx_clk_src == CFG_CLKMUX_INT) sc->hdlc_settings.clock_type = CLOCK_TXINT; if (sc->config.tx_clk_src == CFG_CLKMUX_RT) sc->hdlc_settings.clock_type = CLOCK_TXFROMRX; sc->hdlc_settings.loopback = (sc->config.loop_back != CFG_LOOP_NONE) ? 1:0; sc->hdlc_settings.clock_rate = sc->status.tx_speed; error = copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &sc->hdlc_settings, size); } break; } case IF_IFACE_SYNC_SERIAL: /* set interface config */ { if (!capable(CAP_NET_ADMIN)) error = -EPERM; if (error == 0) error = copy_from_user(&sc->hdlc_settings, ifr->ifr_settings.ifs_ifsu.sync, size); /* hdlc_settings are currently ignored. */ break; } default: /* Pass the rest to the line protocol code. */ { error = hdlc_ioctl(net_dev, ifr, cmd); break; } } } # endif /* GEN_HDLC */ else /* unknown IOCTL command */ error = -EINVAL; if (DRIVER_DEBUG) printk("%s: linux_ioctl; cmd=0x%08x error=%d\n", NAME_UNIT, cmd, error); return error; } /* This net_device method returns a pointer to device statistics. */ static struct net_device_stats * linux_stats(struct net_device *net_dev) { # if GEN_HDLC return &dev_to_hdlc(net_dev)->stats; # else softc_t *sc = net_dev->priv; return &sc->net_stats; # endif } /* Called from a softirq once a second. */ static void linux_watchdog(unsigned long softc) { softc_t *sc = (softc_t *)softc; u_int8_t old_oper_status = sc->status.oper_status; struct event_cntrs *cntrs = &sc->status.cntrs; struct net_device_stats *stats = linux_stats(sc->net_dev); core_watchdog(sc); /* updates oper_status */ /* Notice change in link status. */ if ((old_oper_status != STATUS_UP) && (sc->status.oper_status == STATUS_UP)) /* link came up */ { hdlc_set_carrier(1, sc->net_dev); netif_wake_queue(sc->net_dev); } if ((old_oper_status == STATUS_UP) && (sc->status.oper_status != STATUS_UP)) /* link went down */ { hdlc_set_carrier(0, sc->net_dev); netif_stop_queue(sc->net_dev); } /* Notice change in line protocol. */ if (sc->config.line_pkg == PKG_RAWIP) { sc->status.line_pkg = PKG_RAWIP; sc->status.line_prot = PROT_IP_HDLC; } # if GEN_HDLC else { sc->status.line_pkg = PKG_GEN_HDLC; switch (sc->hdlc_dev->proto.id) { case IF_PROTO_PPP: sc->status.line_prot = PROT_PPP; break; case IF_PROTO_CISCO: sc->status.line_prot = PROT_C_HDLC; break; case IF_PROTO_FR: sc->status.line_prot = PROT_FRM_RLY; break; case IF_PROTO_HDLC: sc->status.line_prot = PROT_IP_HDLC; break; case IF_PROTO_X25: sc->status.line_prot = PROT_X25; break; case IF_PROTO_HDLC_ETH: sc->status.line_prot = PROT_ETH_HDLC; break; default: sc->status.line_prot = 0; break; } } # endif /* GEN_HDLC */ /* Copy statistics from sc to net_dev for get_stats(). */ stats->rx_packets = cntrs->ipackets; stats->tx_packets = cntrs->opackets; stats->rx_bytes = cntrs->ibytes; stats->tx_bytes = cntrs->obytes; stats->rx_errors = cntrs->ierrors; stats->tx_errors = cntrs->oerrors; stats->rx_dropped = cntrs->idiscards; stats->tx_dropped = cntrs->odiscards; stats->rx_fifo_errors = cntrs->fifo_over; stats->tx_fifo_errors = cntrs->fifo_under; stats->rx_missed_errors = cntrs->missed; stats->rx_over_errors = cntrs->overruns; /* Call this procedure again after one second. */ sc->wd_timer.expires = jiffies + HZ; /* now plus one second */ add_timer(&sc->wd_timer); } /* This is the I/O configuration interface for Linux. */ /* This net_device method is called when IFF_UP goes false. */ static int linux_stop(struct net_device *net_dev) { softc_t *sc = dev_to_hdlc(net_dev)->priv; /* Stop the card and detach from the kernel. */ detach_card(sc); /* doesn't fail */ free_irq(net_dev->irq, net_dev); /* doesn't fail */ del_timer(&sc->wd_timer); /* return value ignored */ return 0; } /* This net_device method is called when IFF_UP goes true. */ static int linux_open(struct net_device *net_dev) { softc_t *sc = dev_to_hdlc(net_dev)->priv; int error; /* Allocate PCI interrupt resources for the card. */ if ((error = request_irq(net_dev->irq, &linux_interrupt, SA_SHIRQ, NAME_UNIT, net_dev))) { printk("%s: request_irq() failed; error %d\n", NAME_UNIT, error); return error; } /* Arrange to call linux_watchdog() once a second. */ init_timer(&sc->wd_timer); sc->wd_timer.expires = jiffies + HZ; /* now plus one second */ sc->wd_timer.function = &linux_watchdog; sc->wd_timer.data = (unsigned long) sc; add_timer(&sc->wd_timer); /* Start the card and attach a kernel interface and line protocol. */ if ((error = -attach_card(sc, ""))) linux_stop(net_dev); else { net_dev->weight = sc->rxring.num_descs; /* input flow control */ netif_start_queue(net_dev); /* output flow control */ } return error; } # if GEN_HDLC static int hdlc_attach(struct net_device *net_dev, unsigned short encoding, unsigned short parity) { return 0; } # endif /* This pci_driver method is called during shutdown or module-unload. */ /* This is called from user context; can sleep; no spinlocks! */ static void __exit linux_remove(struct pci_dev *pci_dev) { struct net_device *net_dev = (struct net_device *)pci_get_drvdata(pci_dev); softc_t *sc = dev_to_hdlc(net_dev)->priv; if (net_dev == NULL) return; /* Assume that linux_stop() has already been called. */ if (sc->flags & FLAG_NETDEV) # if GEN_HDLC unregister_hdlc_device(net_dev); # else unregister_netdev(net_dev); # endif # if (IOREF_CSR == 0) if (sc->csr_membase != NULL) iounmap(sc->csr_membase); # endif pci_disable_device(pci_dev); if (sc->csr_iobase != 0) pci_release_regions(pci_dev); pci_set_drvdata(pci_dev, NULL); kfree(sc); free_netdev(net_dev); } static void setup_netdev(struct net_device *net_dev) { /* Initialize the generic network device. */ /* Note similarity to BSD's lmc_ifnet_attach(). */ net_dev->flags = IFF_POINTOPOINT; net_dev->flags |= IFF_RUNNING; net_dev->open = linux_open; net_dev->stop = linux_stop; net_dev->hard_start_xmit = linux_start; net_dev->do_ioctl = linux_ioctl; net_dev->get_stats = linux_stats; net_dev->tx_timeout = linux_timeout; net_dev->poll = linux_poll; net_dev->watchdog_timeo = 1 * HZ; net_dev->tx_queue_len = SNDQ_MAXLEN; net_dev->mtu = MAX_DESC_LEN; net_dev->type = ARPHRD_RAWHDLC; /* The receiver generates frag-lists for packets >4032 bytes. */ /* The transmitter accepts scatter/gather lists and frag-lists. */ /* However Linux linearizes outgoing packets since our hardware */ /* doesn't compute soft checksums. All that work for nothing! */ /*net_dev->features |= NETIF_F_SG; */ /*net_dev->features |= NETIF_F_FRAGLIST; */ } /* This pci_driver method is called during boot or module-load. */ /* This is called from user context; can sleep; no spinlocks! */ static int __init linux_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { u_int32_t cfid, csid; struct net_device *net_dev; softc_t *sc; int error; /* Looking for a DEC 21140A chip on any Lan Media Corp card. */ pci_read_config_dword(pci_dev, TLP_CFID, &cfid); if (cfid != TLP_CFID_TULIP) return -ENXIO; pci_read_config_dword(pci_dev, TLP_CSID, &csid); switch (csid) { case TLP_CSID_HSSI: case TLP_CSID_HSSIc: case TLP_CSID_T3: case TLP_CSID_SSI: case TLP_CSID_T1E1: break; default: return -ENXIO; } /* Declare that these cards use 32-bit single-address PCI cycles. */ if ((error = pci_set_dma_mask(pci_dev, DMA_32BIT_MASK))) { printk("%s: pci_set_dma_mask() failed; error %d\n", DEVICE_NAME, error); return error; } pci_set_consistent_dma_mask(pci_dev, DMA_32BIT_MASK); /* can't fail */ # if GEN_HDLC /* generic-hdlc line protocols */ /* device driver instance data, aka Soft Context or sc */ if ((sc = kmalloc(sizeof(softc_t), GFP_KERNEL)) == NULL) { printk("%s: kmalloc() failed\n", DEVICE_NAME); return -ENOMEM; } memset(sc, 0, sizeof(softc_t)); /* Allocate space for the HDLC network device struct. */ if ((net_dev = alloc_hdlcdev(sc)) == NULL) { printk("%s: alloc_hdlcdev() failed\n", DEVICE_NAME); kfree(sc); return -ENOMEM; } /* Initialize the network device struct. */ setup_netdev(net_dev); /* Initialize the HDLC extension to the network device. */ sc->hdlc_dev = dev_to_hdlc(net_dev); sc->hdlc_dev->attach = hdlc_attach; /* noop for this driver */ sc->hdlc_dev->xmit = linux_start; /* the REAL hard_start_xmit() */ # else /* GEN_HDLC */ /* no line protocol. */ /* Allocate space for the bare network device struct. */ net_dev = alloc_netdev(sizeof(softc_t), DEVICE_NAME"%d", setup_netdev); if (net_dev == NULL) { printk("%s: alloc_netdev() failed\n", DEVICE_NAME); return -ENOMEM; } /* device driver instance data, aka Soft Context or sc */ sc = net_dev->priv; # endif /* GEN_HDLC */ sc->net_dev = net_dev; /* NAME_UNIT macro needs this */ sc->pci_dev = pci_dev; /* READ/WRITE_PCI_CFG macros need this */ /* Cross-link pci_dev and net_dev. */ pci_set_drvdata(pci_dev, net_dev); /* pci_dev->driver_data = net_dev */ SET_NETDEV_DEV(net_dev, &pci_dev->dev); /* net_dev->class_dev.dev = &pci_dev->dev */ SET_MODULE_OWNER(net_dev); /* ??? NOOP in linux-2.6.3. ??? */ /* Sets cfcs.io and cfcs.mem; sets pci_dev->irq based on cfit.int */ if ((error = pci_enable_device(pci_dev))) { printk("%s: pci_enable_device() failed; error %d\n", DEVICE_NAME, error); linux_remove(pci_dev); return error; } net_dev->irq = pci_dev->irq; /* linux_open/stop need this */ /* Allocate PCI memory and IO resources to access the Tulip chip CSRs. */ if ((error = pci_request_regions(pci_dev, DEVICE_NAME))) { printk("%s: pci_request_regions() failed; error %d\n", DEVICE_NAME, error); linux_remove(pci_dev); return error; } net_dev->base_addr = pci_resource_start(pci_dev, 0); net_dev->mem_start = pci_resource_start(pci_dev, 1); net_dev->mem_end = pci_resource_end(pci_dev, 1); sc->csr_iobase = net_dev->base_addr; # if (IOREF_CSR == 0) sc->csr_membase = ioremap_nocache(net_dev->mem_start, TLP_CSR_SIZE); if (sc->csr_membase == NULL) { printk("%s: ioremap_nocache() failed\n", DEVICE_NAME); linux_remove(pci_dev); return -EFAULT; } # endif /* Sets cfcs.master, enabling PCI DMA; checks latency timer value. */ pci_set_master(pci_dev); /* Later, attach_card() does this too. */ /* Initialize the top-half and bottom-half locks. */ /* Top_lock must be initialized before net_dev is registered. */ init_MUTEX(&sc->top_lock); spin_lock_init(&sc->bottom_lock); # if GEN_HDLC if ((error = register_hdlc_device(net_dev))) { printk("%s: register_hdlc_device() failed; error %d\n", DEVICE_NAME, error); linux_remove(pci_dev); return error; } # else if ((error = register_netdev(net_dev))) { printk("%s: register_netdev() failed; error %d\n", DEVICE_NAME, error); linux_remove(pci_dev); return error; } # endif /* The NAME_UNIT macro now works. Use DEVICE_NAME before this. */ sc->flags |= FLAG_NETDEV; /* What kind of card are we driving? */ switch (READ_PCI_CFG(sc, TLP_CSID)) { case TLP_CSID_HSSI: case TLP_CSID_HSSIc: sc->dev_desc = HSSI_DESC; sc->card = &hssi_card; break; case TLP_CSID_T3: sc->dev_desc = T3_DESC; sc->card = &t3_card; break; case TLP_CSID_SSI: sc->dev_desc = SSI_DESC; sc->card = &ssi_card; break; case TLP_CSID_T1E1: sc->dev_desc = T1E1_DESC; sc->card = &t1_card; break; default: /* shouldn't happen! */ linux_remove(pci_dev); return -ENXIO; } /* Announce the hardware on the console. */ printk("%s: <%s> io 0x%04lx/9 mem 0x%08lx/25 rom 0x%08lx/14 irq %d pci %s\n", NAME_UNIT, sc->dev_desc, pci_resource_start(pci_dev, 0), pci_resource_start(pci_dev, 1), pci_resource_start(pci_dev, 6), pci_dev->irq, pci_name(pci_dev)); return 0; } /* This pci driver knows how to drive these devices: */ static __initdata struct pci_device_id pci_device_id_tbl[] = { /* Looking for a DEC 21140A chip on any Lan Media Corp card. */ { 0x1011, 0x0009, 0x1376, PCI_ANY_ID, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, 0 } }; MODULE_DEVICE_TABLE(pci, pci_device_id_tbl); static struct pci_driver pci_driver = { .name = DEVICE_NAME, .id_table = pci_device_id_tbl, .probe = linux_probe, .remove = __devexit_p(linux_remove), /* This driver does not suspend and resume. */ }; /* This ultimately calls our pci_driver.probe() method. */ static int __init linux_modload(void) { return pci_module_init(&pci_driver); } module_init(linux_modload); /* This ultimately calls our pci_driver.remove() method. */ static void __exit linux_modunload(void) { pci_unregister_driver(&pci_driver); } module_exit(linux_modunload); MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("Device driver for SBE/LMC Wide-Area Network cards"); MODULE_AUTHOR("David Boggs "); #endif /* __linux__ */ Index: head/sys/dev/mge/if_mge.c =================================================================== --- head/sys/dev/mge/if_mge.c (revision 193095) +++ head/sys/dev/mge/if_mge.c (revision 193096) @@ -1,1824 +1,1828 @@ /*- * Copyright (C) 2008 MARVELL INTERNATIONAL LTD. * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of MARVELL nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define MV_PHY_ADDR_BASE 8 #include #include #include #include "miibus_if.h" /* PHY registers are in the address space of the first mge unit */ static struct mge_softc *sc_mge0 = NULL; static int mge_probe(device_t dev); static int mge_attach(device_t dev); static int mge_detach(device_t dev); static int mge_shutdown(device_t dev); static int mge_suspend(device_t dev); static int mge_resume(device_t dev); static int mge_miibus_readreg(device_t dev, int phy, int reg); static void mge_miibus_writereg(device_t dev, int phy, int reg, int value); static int mge_ifmedia_upd(struct ifnet *ifp); static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); static void mge_init(void *arg); static void mge_init_locked(void *arg); static void mge_start(struct ifnet *ifp); static void mge_start_locked(struct ifnet *ifp); static void mge_watchdog(struct mge_softc *sc); static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data); static uint32_t mge_tfut_ipg(uint32_t val, int ver); static uint32_t mge_rx_ipg(uint32_t val, int ver); static void mge_ver_params(struct mge_softc *sc); static void mge_intrs_ctrl(struct mge_softc *sc, int enable); static void mge_intr_rx(void *arg); -static void mge_intr_rx_locked(struct mge_softc *sc, int count); +static int mge_intr_rx_locked(struct mge_softc *sc, int count); static void mge_intr_tx(void *arg); static void mge_intr_tx_locked(struct mge_softc *sc); static void mge_intr_misc(void *arg); static void mge_intr_sum(void *arg); static void mge_intr_err(void *arg); static void mge_stop(struct mge_softc *sc); static void mge_tick(void *msc); static uint32_t mge_set_port_serial_control(uint32_t media); static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr); static void mge_set_mac_address(struct mge_softc *sc); static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue); static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue); static int mge_allocate_dma(struct mge_softc *sc); static int mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* desc_tab, uint32_t size, bus_dma_tag_t *buffer_tag); static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp, bus_addr_t *paddr); static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error); static void mge_free_dma(struct mge_softc *sc); static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs); static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame, uint32_t status, uint16_t bufsize); static void mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw); static uint8_t mge_crc8(uint8_t *data, int size); static void mge_setup_multicast(struct mge_softc *sc); static void mge_set_rxic(struct mge_softc *sc); static void mge_set_txic(struct mge_softc *sc); static void mge_add_sysctls(struct mge_softc *sc); static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS); static device_method_t mge_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mge_probe), DEVMETHOD(device_attach, mge_attach), DEVMETHOD(device_detach, mge_detach), DEVMETHOD(device_shutdown, mge_shutdown), DEVMETHOD(device_suspend, mge_suspend), DEVMETHOD(device_resume, mge_resume), /* MII interface */ DEVMETHOD(miibus_readreg, mge_miibus_readreg), DEVMETHOD(miibus_writereg, mge_miibus_writereg), { 0, 0 } }; static driver_t mge_driver = { "mge", mge_methods, sizeof(struct mge_softc), }; static devclass_t mge_devclass; DRIVER_MODULE(mge, mbus, mge_driver, mge_devclass, 0, 0); DRIVER_MODULE(miibus, mge, miibus_driver, miibus_devclass, 0, 0); MODULE_DEPEND(mge, ether, 1, 1, 1); MODULE_DEPEND(mge, miibus, 1, 1, 1); static struct resource_spec res_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_IRQ, 4, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; static struct { driver_intr_t *handler; char * description; } mge_intrs[MGE_INTR_COUNT] = { { mge_intr_rx, "GbE receive interrupt" }, { mge_intr_tx, "GbE transmit interrupt" }, { mge_intr_misc,"GbE misc interrupt" }, { mge_intr_sum, "GbE summary interrupt" }, { mge_intr_err, "GbE error interrupt" }, }; static void mge_get_mac_address(struct mge_softc *sc, uint8_t *addr) { uint32_t mac_l, mac_h; /* XXX use currently programmed MAC address; eventually this info will * be provided by the loader */ mac_l = MGE_READ(sc, MGE_MAC_ADDR_L); mac_h = MGE_READ(sc, MGE_MAC_ADDR_H); addr[0] = (mac_h & 0xff000000) >> 24; addr[1] = (mac_h & 0x00ff0000) >> 16; addr[2] = (mac_h & 0x0000ff00) >> 8; addr[3] = (mac_h & 0x000000ff); addr[4] = (mac_l & 0x0000ff00) >> 8; addr[5] = (mac_l & 0x000000ff); } static uint32_t mge_tfut_ipg(uint32_t val, int ver) { switch (ver) { case 1: return ((val & 0x3fff) << 4); case 2: default: return ((val & 0xffff) << 4); } } static uint32_t mge_rx_ipg(uint32_t val, int ver) { switch (ver) { case 1: return ((val & 0x3fff) << 8); case 2: default: return (((val & 0x8000) << 10) | ((val & 0x7fff) << 7)); } } static void mge_ver_params(struct mge_softc *sc) { uint32_t d, r; soc_id(&d, &r); if (d == MV_DEV_88F6281 || d == MV_DEV_MV78100 || d == MV_DEV_MV78100_Z0) { sc->mge_ver = 2; sc->mge_mtu = 0x4e8; sc->mge_tfut_ipg_max = 0xFFFF; sc->mge_rx_ipg_max = 0xFFFF; sc->mge_tx_arb_cfg = 0xFC0000FF; sc->mge_tx_tok_cfg = 0xFFFF7FFF; sc->mge_tx_tok_cnt = 0x3FFFFFFF; } else { sc->mge_ver = 1; sc->mge_mtu = 0x458; sc->mge_tfut_ipg_max = 0x3FFF; sc->mge_rx_ipg_max = 0x3FFF; sc->mge_tx_arb_cfg = 0x000000FF; sc->mge_tx_tok_cfg = 0x3FFFFFFF; sc->mge_tx_tok_cnt = 0x3FFFFFFF; } } static void mge_set_mac_address(struct mge_softc *sc) { char *if_mac; uint32_t mac_l, mac_h; MGE_GLOBAL_LOCK_ASSERT(sc); if_mac = (char *)IF_LLADDR(sc->ifp); mac_l = (if_mac[4] << 8) | (if_mac[5]); mac_h = (if_mac[0] << 24)| (if_mac[1] << 16) | (if_mac[2] << 8) | (if_mac[3] << 0); MGE_WRITE(sc, MGE_MAC_ADDR_L, mac_l); MGE_WRITE(sc, MGE_MAC_ADDR_H, mac_h); mge_set_ucast_address(sc, if_mac[5], MGE_RX_DEFAULT_QUEUE); } static void mge_set_ucast_address(struct mge_softc *sc, uint8_t last_byte, uint8_t queue) { uint32_t reg_idx, reg_off, reg_val, i; last_byte &= 0xf; reg_idx = last_byte / MGE_UCAST_REG_NUMBER; reg_off = (last_byte % MGE_UCAST_REG_NUMBER) * 8; reg_val = (1 | (queue << 1)) << reg_off; for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) { if ( i == reg_idx) MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val); else MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), 0); } } static void mge_set_prom_mode(struct mge_softc *sc, uint8_t queue) { uint32_t port_config; uint32_t reg_val, i; /* Enable or disable promiscuous mode as needed */ if (sc->ifp->if_flags & IFF_PROMISC) { port_config = MGE_READ(sc, MGE_PORT_CONFIG); port_config |= PORT_CONFIG_UPM; MGE_WRITE(sc, MGE_PORT_CONFIG, port_config); reg_val = ((1 | (queue << 1)) | (1 | (queue << 1)) << 8 | (1 | (queue << 1)) << 16 | (1 | (queue << 1)) << 24); for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) { MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), reg_val); MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), reg_val); } for (i = 0; i < MGE_UCAST_REG_NUMBER; i++) MGE_WRITE(sc, MGE_DA_FILTER_UCAST(i), reg_val); } else { port_config = MGE_READ(sc, MGE_PORT_CONFIG); port_config &= ~PORT_CONFIG_UPM; MGE_WRITE(sc, MGE_PORT_CONFIG, port_config); for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) { MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), 0); MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), 0); } mge_set_mac_address(sc); } } static void mge_get_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { u_int32_t *paddr; KASSERT(nseg == 1, ("wrong number of segments, should be 1")); paddr = arg; *paddr = segs->ds_addr; } static int mge_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp, bus_addr_t *paddr) { struct mbuf *new_mbuf; bus_dma_segment_t seg[1]; int error; int nsegs; KASSERT(mbufp != NULL, ("NULL mbuf pointer!")); new_mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (new_mbuf == NULL) return (ENOBUFS); new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size; if (*mbufp) { bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(tag, map); } error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs, BUS_DMA_NOWAIT); KASSERT(nsegs == 1, ("Too many segments returned!")); if (nsegs != 1 || error) panic("mge_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error); bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD); (*mbufp) = new_mbuf; (*paddr) = seg->ds_addr; return (0); } static int mge_alloc_desc_dma(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size, bus_dma_tag_t *buffer_tag) { struct mge_desc_wrapper *dw; bus_addr_t desc_paddr; int i, error; desc_paddr = 0; for (i = size - 1; i >= 0; i--) { dw = &(tab[i]); error = bus_dmamem_alloc(sc->mge_desc_dtag, (void**)&(dw->mge_desc), BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &(dw->desc_dmap)); if (error) { if_printf(sc->ifp, "failed to allocate DMA memory\n"); dw->mge_desc = NULL; return (ENXIO); } error = bus_dmamap_load(sc->mge_desc_dtag, dw->desc_dmap, dw->mge_desc, sizeof(struct mge_desc), mge_get_dma_addr, &(dw->mge_desc_paddr), BUS_DMA_NOWAIT); if (error) { if_printf(sc->ifp, "can't load descriptor\n"); bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc, dw->desc_dmap); dw->mge_desc = NULL; return (ENXIO); } /* Chain descriptors */ dw->mge_desc->next_desc = desc_paddr; desc_paddr = dw->mge_desc_paddr; } tab[size - 1].mge_desc->next_desc = desc_paddr; /* Allocate a busdma tag for mbufs. */ error = bus_dma_tag_create(NULL, /* parent */ 8, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ MCLBYTES, 1, /* maxsize, nsegments */ MCLBYTES, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ buffer_tag); /* dmat */ if (error) { if_printf(sc->ifp, "failed to create busdma tag for mbufs\n"); return (ENXIO); } /* Create TX busdma maps */ for (i = 0; i < size; i++) { dw = &(tab[i]); error = bus_dmamap_create(*buffer_tag, 0, &dw->buffer_dmap); if (error) { if_printf(sc->ifp, "failed to create map for mbuf\n"); return (ENXIO); } dw->buffer = (struct mbuf*)NULL; dw->mge_desc->buffer = (bus_addr_t)NULL; } return (0); } static int mge_allocate_dma(struct mge_softc *sc) { int error; struct mge_desc_wrapper *dw; int num, i; num = MGE_TX_DESC_NUM + MGE_RX_DESC_NUM; /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */ error = bus_dma_tag_create(NULL, /* parent */ 16, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ sizeof(struct mge_desc), 1, /* maxsize, nsegments */ sizeof(struct mge_desc), 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->mge_desc_dtag); /* dmat */ mge_alloc_desc_dma(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, &sc->mge_tx_dtag); mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, &sc->mge_rx_dtag); for (i = 0; i < MGE_RX_DESC_NUM; i++) { dw = &(sc->mge_rx_desc[i]); mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer, &dw->mge_desc->buffer); } sc->tx_desc_start = sc->mge_tx_desc[0].mge_desc_paddr; sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr; return (0); } static void mge_free_desc(struct mge_softc *sc, struct mge_desc_wrapper* tab, uint32_t size, bus_dma_tag_t buffer_tag, uint8_t free_mbufs) { struct mge_desc_wrapper *dw; int i; for (i = 0; i < size; i++) { /* Free RX mbuf */ dw = &(tab[i]); if (dw->buffer_dmap) { if (free_mbufs) { bus_dmamap_sync(buffer_tag, dw->buffer_dmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(buffer_tag, dw->buffer_dmap); } bus_dmamap_destroy(buffer_tag, dw->buffer_dmap); if (free_mbufs) m_freem(dw->buffer); } /* Free RX descriptors */ if (dw->desc_dmap) { bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->mge_desc_dtag, dw->desc_dmap); bus_dmamem_free(sc->mge_desc_dtag, dw->mge_desc, dw->desc_dmap); } } } static void mge_free_dma(struct mge_softc *sc) { /* Free desciptors and mbufs */ mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1); mge_free_desc(sc, sc->mge_tx_desc, MGE_TX_DESC_NUM, sc->mge_tx_dtag, 0); /* Destroy mbuf dma tag */ bus_dma_tag_destroy(sc->mge_tx_dtag); bus_dma_tag_destroy(sc->mge_rx_dtag); /* Destroy descriptors tag */ bus_dma_tag_destroy(sc->mge_desc_dtag); } static void mge_reinit_rx(struct mge_softc *sc) { struct mge_desc_wrapper *dw; int i; MGE_RECEIVE_LOCK(sc); mge_free_desc(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, sc->mge_rx_dtag, 1); mge_alloc_desc_dma(sc, sc->mge_rx_desc, MGE_RX_DESC_NUM, &sc->mge_rx_dtag); for (i = 0; i < MGE_RX_DESC_NUM; i++) { dw = &(sc->mge_rx_desc[i]); mge_new_rxbuf(sc->mge_rx_dtag, dw->buffer_dmap, &dw->buffer, &dw->mge_desc->buffer); } sc->rx_desc_start = sc->mge_rx_desc[0].mge_desc_paddr; sc->rx_desc_curr = 0; MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE), sc->rx_desc_start); /* Enable RX queue */ MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE)); MGE_RECEIVE_UNLOCK(sc); } #ifdef DEVICE_POLLING static poll_handler_t mge_poll; -static void +static int mge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct mge_softc *sc = ifp->if_softc; uint32_t int_cause, int_cause_ext; + int rx_npkts = 0; MGE_GLOBAL_LOCK(sc); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { MGE_GLOBAL_UNLOCK(sc); - return; + return (rx_npkts); } if (cmd == POLL_AND_CHECK_STATUS) { int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE); int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT); /* Check for resource error */ if (int_cause & MGE_PORT_INT_RXERRQ0) mge_reinit_rx(sc); if (int_cause || int_cause_ext) { MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause); MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext); } } mge_intr_tx_locked(sc); - mge_intr_rx_locked(sc, count); + rx_npkts = mge_intr_rx_locked(sc, count); MGE_GLOBAL_UNLOCK(sc); + return (rx_npkts); } #endif /* DEVICE_POLLING */ static int mge_attach(device_t dev) { struct mge_softc *sc; struct ifnet *ifp; uint8_t hwaddr[ETHER_ADDR_LEN]; int i, error ; sc = device_get_softc(dev); sc->dev = dev; if (device_get_unit(dev) == 0) sc_mge0 = sc; /* Set chip version-dependent parameters */ mge_ver_params(sc); /* Initialize mutexes */ mtx_init(&sc->transmit_lock, device_get_nameunit(dev), "mge TX lock", MTX_DEF); mtx_init(&sc->receive_lock, device_get_nameunit(dev), "mge RX lock", MTX_DEF); /* Allocate IO and IRQ resources */ error = bus_alloc_resources(dev, res_spec, sc->res); if (error) { device_printf(dev, "could not allocate resources\n"); mge_detach(dev); return (ENXIO); } /* Allocate DMA, buffers, buffer descriptors */ error = mge_allocate_dma(sc); if (error) { mge_detach(dev); return (ENXIO); } sc->tx_desc_curr = 0; sc->rx_desc_curr = 0; sc->tx_desc_used_idx = 0; sc->tx_desc_used_count = 0; /* Configure defaults for interrupts coalescing */ sc->rx_ic_time = 768; sc->tx_ic_time = 768; mge_add_sysctls(sc); /* Allocate network interface */ ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "if_alloc() failed\n"); mge_detach(dev); return (ENOMEM); } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_softc = sc; ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST; ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_MTU; ifp->if_capenable = ifp->if_capabilities; ifp->if_hwassist = MGE_CHECKSUM_FEATURES; #ifdef DEVICE_POLLING /* Advertise that polling is supported */ ifp->if_capabilities |= IFCAP_POLLING; #endif ifp->if_init = mge_init; ifp->if_start = mge_start; ifp->if_ioctl = mge_ioctl; ifp->if_snd.ifq_drv_maxlen = MGE_TX_DESC_NUM - 1; IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); IFQ_SET_READY(&ifp->if_snd); mge_get_mac_address(sc, hwaddr); ether_ifattach(ifp, hwaddr); callout_init(&sc->wd_callout, 0); /* Probe PHY(s) */ error = mii_phy_probe(dev, &sc->miibus, mge_ifmedia_upd, mge_ifmedia_sts); if (error) { device_printf(dev, "MII failed to find PHY\n"); if_free(ifp); sc->ifp = NULL; mge_detach(dev); return (error); } sc->mii = device_get_softc(sc->miibus); /* Attach interrupt handlers */ for (i = 0; i < 2; ++i) { error = bus_setup_intr(dev, sc->res[1 + i], INTR_TYPE_NET | INTR_MPSAFE, NULL, *mge_intrs[i].handler, sc, &sc->ih_cookie[i]); if (error) { device_printf(dev, "could not setup %s\n", mge_intrs[i].description); ether_ifdetach(sc->ifp); return (error); } } return (0); } static int mge_detach(device_t dev) { struct mge_softc *sc; int error,i; sc = device_get_softc(dev); /* Stop controller and free TX queue */ if (sc->ifp) mge_shutdown(dev); /* Wait for stopping ticks */ callout_drain(&sc->wd_callout); /* Stop and release all interrupts */ for (i = 0; i < 2; ++i) { error = bus_teardown_intr(dev, sc->res[1 + i], sc->ih_cookie[i]); if (error) device_printf(dev, "could not release %s\n", mge_intrs[i].description); } /* Detach network interface */ if (sc->ifp) { ether_ifdetach(sc->ifp); if_free(sc->ifp); } /* Free DMA resources */ mge_free_dma(sc); /* Free IO memory handler */ bus_release_resources(dev, res_spec, sc->res); /* Destroy mutexes */ mtx_destroy(&sc->receive_lock); mtx_destroy(&sc->transmit_lock); return (0); } static void mge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct mge_softc *sc = ifp->if_softc; struct mii_data *mii; MGE_TRANSMIT_LOCK(sc); mii = sc->mii; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; MGE_TRANSMIT_UNLOCK(sc); } static uint32_t mge_set_port_serial_control(uint32_t media) { uint32_t port_config; port_config = PORT_SERIAL_RES_BIT9 | PORT_SERIAL_FORCE_LINK_FAIL | PORT_SERIAL_MRU(PORT_SERIAL_MRU_1552); if (IFM_TYPE(media) == IFM_ETHER) { switch(IFM_SUBTYPE(media)) { case IFM_AUTO: break; case IFM_1000_T: port_config |= (PORT_SERIAL_GMII_SPEED_1000 | PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC | PORT_SERIAL_SPEED_AUTONEG); break; case IFM_100_TX: port_config |= (PORT_SERIAL_MII_SPEED_100 | PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC | PORT_SERIAL_SPEED_AUTONEG); break; case IFM_10_T: port_config |= (PORT_SERIAL_AUTONEG | PORT_SERIAL_AUTONEG_FC | PORT_SERIAL_SPEED_AUTONEG); break; } if (media & IFM_FDX) port_config |= PORT_SERIAL_FULL_DUPLEX; } return (port_config); } static int mge_ifmedia_upd(struct ifnet *ifp) { struct mge_softc *sc = ifp->if_softc; if (ifp->if_flags & IFF_UP) { MGE_GLOBAL_LOCK(sc); sc->mge_media_status = sc->mii->mii_media.ifm_media; mii_mediachg(sc->mii); mge_init_locked(sc); MGE_GLOBAL_UNLOCK(sc); } return (0); } static void mge_init(void *arg) { struct mge_softc *sc = arg; MGE_GLOBAL_LOCK(sc); mge_init_locked(arg); MGE_GLOBAL_UNLOCK(sc); } static void mge_init_locked(void *arg) { struct mge_softc *sc = arg; struct mge_desc_wrapper *dw; volatile uint32_t reg_val; int i, count; MGE_GLOBAL_LOCK_ASSERT(sc); /* Stop interface */ mge_stop(sc); /* Disable interrupts */ mge_intrs_ctrl(sc, 0); /* Set MAC address */ mge_set_mac_address(sc); /* Setup multicast filters */ mge_setup_multicast(sc); if (sc->mge_ver == 2) { MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL1, MGE_RGMII_EN); MGE_WRITE(sc, MGE_FIXED_PRIO_CONF, MGE_FIXED_PRIO_EN(0)); } /* Initialize TX queue configuration registers */ MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(0), sc->mge_tx_tok_cnt); MGE_WRITE(sc, MGE_TX_TOKEN_CONF(0), sc->mge_tx_tok_cfg); MGE_WRITE(sc, MGE_TX_ARBITER_CONF(0), sc->mge_tx_arb_cfg); /* Clear TX queue configuration registers for unused queues */ for (i = 1; i < 7; i++) { MGE_WRITE(sc, MGE_TX_TOKEN_COUNT(i), 0); MGE_WRITE(sc, MGE_TX_TOKEN_CONF(i), 0); MGE_WRITE(sc, MGE_TX_ARBITER_CONF(i), 0); } /* Set default MTU */ MGE_WRITE(sc, sc->mge_mtu, 0); /* Port configuration */ MGE_WRITE(sc, MGE_PORT_CONFIG, PORT_CONFIG_RXCS | PORT_CONFIG_DFLT_RXQ(0) | PORT_CONFIG_ARO_RXQ(0)); MGE_WRITE(sc, MGE_PORT_EXT_CONFIG , 0x0); /* Setup port configuration */ reg_val = mge_set_port_serial_control(sc->mge_media_status); MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val); /* Setup SDMA configuration */ MGE_WRITE(sc, MGE_SDMA_CONFIG , MGE_SDMA_RX_BYTE_SWAP | MGE_SDMA_TX_BYTE_SWAP | MGE_SDMA_RX_BURST_SIZE(MGE_SDMA_BURST_16_WORD) | MGE_SDMA_TX_BURST_SIZE(MGE_SDMA_BURST_16_WORD)); MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, 0x0); MGE_WRITE(sc, MGE_TX_CUR_DESC_PTR, sc->tx_desc_start); MGE_WRITE(sc, MGE_RX_CUR_DESC_PTR(MGE_RX_DEFAULT_QUEUE), sc->rx_desc_start); /* Reset descriptor indexes */ sc->tx_desc_curr = 0; sc->rx_desc_curr = 0; sc->tx_desc_used_idx = 0; sc->tx_desc_used_count = 0; /* Enable RX descriptors */ for (i = 0; i < MGE_RX_DESC_NUM; i++) { dw = &sc->mge_rx_desc[i]; dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED; dw->mge_desc->buff_size = MCLBYTES; bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } /* Enable RX queue */ MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_ENABLE_RXQ(MGE_RX_DEFAULT_QUEUE)); /* Enable port */ reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL); reg_val |= PORT_SERIAL_ENABLE; MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL, reg_val); count = 0x100000; for (;;) { reg_val = MGE_READ(sc, MGE_PORT_STATUS); if (reg_val & MGE_STATUS_LINKUP) break; DELAY(100); if (--count == 0) { if_printf(sc->ifp, "Timeout on link-up\n"); break; } } /* Setup interrupts coalescing */ mge_set_rxic(sc); mge_set_txic(sc); /* Enable interrupts */ #ifdef DEVICE_POLLING /* * * ...only if polling is not turned on. Disable interrupts explicitly * if polling is enabled. */ if (sc->ifp->if_capenable & IFCAP_POLLING) mge_intrs_ctrl(sc, 0); else #endif /* DEVICE_POLLING */ mge_intrs_ctrl(sc, 1); /* Activate network interface */ sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->wd_timer = 0; /* Schedule watchdog timeout */ callout_reset(&sc->wd_callout, hz, mge_tick, sc); } static void mge_intr_err(void *arg) { struct mge_softc *sc = arg; struct ifnet *ifp; ifp = sc->ifp; if_printf(ifp, "%s\n", __FUNCTION__); } static void mge_intr_misc(void *arg) { struct mge_softc *sc = arg; struct ifnet *ifp; ifp = sc->ifp; if_printf(ifp, "%s\n", __FUNCTION__); } static void mge_intr_rx(void *arg) { struct mge_softc *sc = arg; uint32_t int_cause, int_cause_ext; MGE_RECEIVE_LOCK(sc); #ifdef DEVICE_POLLING if (sc->ifp->if_capenable & IFCAP_POLLING) { MGE_RECEIVE_UNLOCK(sc); return; } #endif /* Get interrupt cause */ int_cause = MGE_READ(sc, MGE_PORT_INT_CAUSE); int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT); /* Check for resource error */ if (int_cause & MGE_PORT_INT_RXERRQ0) { mge_reinit_rx(sc); MGE_WRITE(sc, MGE_PORT_INT_CAUSE, int_cause & ~MGE_PORT_INT_RXERRQ0); } int_cause &= MGE_PORT_INT_RXQ0; int_cause_ext &= MGE_PORT_INT_EXT_RXOR; if (int_cause || int_cause_ext) { MGE_WRITE(sc, MGE_PORT_INT_CAUSE, ~int_cause); MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, ~int_cause_ext); mge_intr_rx_locked(sc, -1); } MGE_RECEIVE_UNLOCK(sc); } -static void +static int mge_intr_rx_locked(struct mge_softc *sc, int count) { struct ifnet *ifp = sc->ifp; uint32_t status; uint16_t bufsize; struct mge_desc_wrapper* dw; struct mbuf *mb; + int rx_npkts = 0; MGE_RECEIVE_LOCK_ASSERT(sc); while (count != 0) { dw = &sc->mge_rx_desc[sc->rx_desc_curr]; bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap, BUS_DMASYNC_POSTREAD); /* Get status */ status = dw->mge_desc->cmd_status; bufsize = dw->mge_desc->buff_size; if ((status & MGE_DMA_OWNED) != 0) break; if (dw->mge_desc->byte_count && ~(status & MGE_ERR_SUMMARY)) { bus_dmamap_sync(sc->mge_rx_dtag, dw->buffer_dmap, BUS_DMASYNC_POSTREAD); mb = m_devget(dw->buffer->m_data, dw->mge_desc->byte_count - ETHER_CRC_LEN, 0, ifp, NULL); if (mb == NULL) /* Give up if no mbufs */ break; mb->m_len -= 2; mb->m_pkthdr.len -= 2; mb->m_data += 2; mge_offload_process_frame(ifp, mb, status, bufsize); MGE_RECEIVE_UNLOCK(sc); (*ifp->if_input)(ifp, mb); MGE_RECEIVE_LOCK(sc); + rx_npkts++; } dw->mge_desc->byte_count = 0; dw->mge_desc->cmd_status = MGE_RX_ENABLE_INT | MGE_DMA_OWNED; sc->rx_desc_curr = (++sc->rx_desc_curr % MGE_RX_DESC_NUM); bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (count > 0) count -= 1; } - return; + return (rx_npkts); } static void mge_intr_sum(void *arg) { struct mge_softc *sc = arg; struct ifnet *ifp; ifp = sc->ifp; if_printf(ifp, "%s\n", __FUNCTION__); } static void mge_intr_tx(void *arg) { struct mge_softc *sc = arg; uint32_t int_cause_ext; MGE_TRANSMIT_LOCK(sc); #ifdef DEVICE_POLLING if (sc->ifp->if_capenable & IFCAP_POLLING) { MGE_TRANSMIT_UNLOCK(sc); return; } #endif /* Ack the interrupt */ int_cause_ext = MGE_READ(sc, MGE_PORT_INT_CAUSE_EXT); MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, int_cause_ext & ~MGE_PORT_INT_EXT_TXBUF0); mge_intr_tx_locked(sc); MGE_TRANSMIT_UNLOCK(sc); } static void mge_intr_tx_locked(struct mge_softc *sc) { struct ifnet *ifp = sc->ifp; struct mge_desc_wrapper *dw; struct mge_desc *desc; uint32_t status; int send = 0; MGE_TRANSMIT_LOCK_ASSERT(sc); /* Disable watchdog */ sc->wd_timer = 0; while (sc->tx_desc_used_count) { /* Get the descriptor */ dw = &sc->mge_tx_desc[sc->tx_desc_used_idx]; desc = dw->mge_desc; bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap, BUS_DMASYNC_POSTREAD); /* Get descriptor status */ status = desc->cmd_status; if (status & MGE_DMA_OWNED) break; sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM;; sc->tx_desc_used_count--; /* Update collision statistics */ if (status & MGE_ERR_SUMMARY) { if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_LC) ifp->if_collisions++; if ((status & MGE_ERR_MASK) == MGE_TX_ERROR_RL) ifp->if_collisions += 16; } bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap); m_freem(dw->buffer); dw->buffer = (struct mbuf*)NULL; send++; ifp->if_opackets++; } if (send) { /* Now send anything that was pending */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; mge_start_locked(ifp); } } static int mge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct mge_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int mask, error; uint32_t flags; error = 0; switch (command) { case SIOCSIFFLAGS: MGE_GLOBAL_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { flags = ifp->if_flags ^ sc->mge_if_flags; if (flags & IFF_PROMISC) mge_set_prom_mode(sc, MGE_RX_DEFAULT_QUEUE); if (flags & IFF_ALLMULTI) mge_setup_multicast(sc); } else mge_init_locked(sc); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) mge_stop(sc); sc->mge_if_flags = ifp->if_flags; MGE_GLOBAL_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: if (ifp->if_drv_flags & IFF_DRV_RUNNING) { MGE_GLOBAL_LOCK(sc); mge_setup_multicast(sc); MGE_GLOBAL_UNLOCK(sc); } break; case SIOCSIFCAP: mask = ifp->if_capenable ^ ifr->ifr_reqcap; if (mask & IFCAP_HWCSUM) { ifp->if_capenable &= ~IFCAP_HWCSUM; ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist = MGE_CHECKSUM_FEATURES; else ifp->if_hwassist = 0; } #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(mge_poll, ifp); if (error) return(error); MGE_GLOBAL_LOCK(sc); mge_intrs_ctrl(sc, 0); ifp->if_capenable |= IFCAP_POLLING; MGE_GLOBAL_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); MGE_GLOBAL_LOCK(sc); mge_intrs_ctrl(sc, 1); ifp->if_capenable &= ~IFCAP_POLLING; MGE_GLOBAL_UNLOCK(sc); } } #endif break; case SIOCGIFMEDIA: /* fall through */ case SIOCSIFMEDIA: if (IFM_SUBTYPE(ifr->ifr_media) == IFM_1000_T && !(ifr->ifr_media & IFM_FDX)) { device_printf(sc->dev, "1000baseTX half-duplex unsupported\n"); return 0; } error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); } return (error); } static int mge_miibus_readreg(device_t dev, int phy, int reg) { uint32_t retries; /* * We assume static PHY address <=> device unit mapping: * PHY Address = MV_PHY_ADDR_BASE + devce unit. * This is true for most Marvell boards. * * Code below grants proper PHY detection on each device * unit. */ if ((MV_PHY_ADDR_BASE + device_get_unit(dev)) != phy) return (0); MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff & (MGE_SMI_READ | (reg << 21) | (phy << 16))); retries = MGE_SMI_READ_RETRIES; while (--retries && !(MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_READVALID)) DELAY(MGE_SMI_READ_DELAY); if (retries == 0) device_printf(dev, "Timeout while reading from PHY\n"); return (MGE_READ(sc_mge0, MGE_REG_SMI) & 0xffff); } static void mge_miibus_writereg(device_t dev, int phy, int reg, int value) { uint32_t retries; if ((MV_PHY_ADDR_BASE + device_get_unit(dev)) != phy) return; MGE_WRITE(sc_mge0, MGE_REG_SMI, 0x1fffffff & (MGE_SMI_WRITE | (reg << 21) | (phy << 16) | (value & 0xffff))); retries = MGE_SMI_WRITE_RETRIES; while (--retries && MGE_READ(sc_mge0, MGE_REG_SMI) & MGE_SMI_BUSY) DELAY(MGE_SMI_WRITE_DELAY); if (retries == 0) device_printf(dev, "Timeout while writing to PHY\n"); } static int mge_probe(device_t dev) { device_set_desc(dev, "Marvell Gigabit Ethernet controller"); return (BUS_PROBE_DEFAULT); } static int mge_resume(device_t dev) { device_printf(dev, "%s\n", __FUNCTION__); return (0); } static int mge_shutdown(device_t dev) { struct mge_softc *sc = device_get_softc(dev); MGE_GLOBAL_LOCK(sc); #ifdef DEVICE_POLLING if (sc->ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(sc->ifp); #endif mge_stop(sc); MGE_GLOBAL_UNLOCK(sc); return (0); } static int mge_encap(struct mge_softc *sc, struct mbuf *m0) { struct mge_desc_wrapper *dw = NULL; struct ifnet *ifp; bus_dma_segment_t segs[MGE_TX_DESC_NUM]; bus_dmamap_t mapp; int error; int seg, nsegs; int desc_no; ifp = sc->ifp; /* Check for free descriptors */ if (sc->tx_desc_used_count + 1 >= MGE_TX_DESC_NUM) { /* No free descriptors */ return (-1); } /* Fetch unused map */ desc_no = sc->tx_desc_curr; dw = &sc->mge_tx_desc[desc_no]; mapp = dw->buffer_dmap; bus_dmamap_sync(sc->mge_desc_dtag, mapp, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* Create mapping in DMA memory */ error = bus_dmamap_load_mbuf_sg(sc->mge_tx_dtag, mapp, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0 || nsegs != 1 ) { bus_dmamap_unload(sc->mge_tx_dtag, mapp); return ((error != 0) ? error : -1); } bus_dmamap_sync(sc->mge_tx_dtag, mapp, BUS_DMASYNC_PREWRITE); /* Everything is ok, now we can send buffers */ for (seg = 0; seg < nsegs; seg++) { dw->mge_desc->byte_count = segs[seg].ds_len; dw->mge_desc->buffer = segs[seg].ds_addr; dw->buffer = m0; dw->mge_desc->cmd_status = MGE_TX_LAST | MGE_TX_FIRST | MGE_TX_ETH_CRC | MGE_TX_EN_INT | MGE_TX_PADDING | MGE_DMA_OWNED; if (seg == 0) mge_offload_setup_descriptor(sc, dw); } bus_dmamap_sync(sc->mge_desc_dtag, mapp, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); sc->tx_desc_curr = (++sc->tx_desc_curr) % MGE_TX_DESC_NUM; sc->tx_desc_used_count++; return (0); } static void mge_tick(void *msc) { struct mge_softc *sc = msc; /* Check for TX timeout */ mge_watchdog(sc); mii_tick(sc->mii); /* Check for media type change */ if(sc->mge_media_status != sc->mii->mii_media.ifm_media) mge_ifmedia_upd(sc->ifp); /* Schedule another timeout one second from now */ callout_reset(&sc->wd_callout, hz, mge_tick, sc); } static void mge_watchdog(struct mge_softc *sc) { struct ifnet *ifp; ifp = sc->ifp; MGE_GLOBAL_LOCK(sc); if (sc->wd_timer == 0 || --sc->wd_timer) { MGE_GLOBAL_UNLOCK(sc); return; } ifp->if_oerrors++; if_printf(ifp, "watchdog timeout\n"); mge_stop(sc); mge_init_locked(sc); MGE_GLOBAL_UNLOCK(sc); } static void mge_start(struct ifnet *ifp) { struct mge_softc *sc = ifp->if_softc; MGE_TRANSMIT_LOCK(sc); mge_start_locked(ifp); MGE_TRANSMIT_UNLOCK(sc); } static void mge_start_locked(struct ifnet *ifp) { struct mge_softc *sc; struct mbuf *m0, *mtmp; uint32_t reg_val, queued = 0; sc = ifp->if_softc; MGE_TRANSMIT_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; for (;;) { /* Get packet from the queue */ IF_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) break; mtmp = m_defrag(m0, M_DONTWAIT); if (mtmp) m0 = mtmp; if (mge_encap(sc, m0)) { IF_PREPEND(&ifp->if_snd, m0); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } queued++; BPF_MTAP(ifp, m0); } if (queued) { /* Enable transmitter and watchdog timer */ reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD); MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_ENABLE_TXQ); sc->wd_timer = 5; } } static void mge_stop(struct mge_softc *sc) { struct ifnet *ifp; volatile uint32_t reg_val, status; struct mge_desc_wrapper *dw; struct mge_desc *desc; int count; ifp = sc->ifp; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; /* Stop tick engine */ callout_stop(&sc->wd_callout); /* Disable interface */ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->wd_timer = 0; /* Disable interrupts */ mge_intrs_ctrl(sc, 0); /* Disable Rx and Tx */ reg_val = MGE_READ(sc, MGE_TX_QUEUE_CMD); MGE_WRITE(sc, MGE_TX_QUEUE_CMD, reg_val | MGE_DISABLE_TXQ); MGE_WRITE(sc, MGE_RX_QUEUE_CMD, MGE_DISABLE_RXQ_ALL); /* Remove pending data from TX queue */ while (sc->tx_desc_used_idx != sc->tx_desc_curr && sc->tx_desc_used_count) { /* Get the descriptor */ dw = &sc->mge_tx_desc[sc->tx_desc_used_idx]; desc = dw->mge_desc; bus_dmamap_sync(sc->mge_desc_dtag, dw->desc_dmap, BUS_DMASYNC_POSTREAD); /* Get descriptor status */ status = desc->cmd_status; if (status & MGE_DMA_OWNED) break; sc->tx_desc_used_idx = (++sc->tx_desc_used_idx) % MGE_TX_DESC_NUM; sc->tx_desc_used_count--; bus_dmamap_sync(sc->mge_tx_dtag, dw->buffer_dmap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->mge_tx_dtag, dw->buffer_dmap); m_freem(dw->buffer); dw->buffer = (struct mbuf*)NULL; } /* Wait for end of transmission */ count = 0x100000; while (count--) { reg_val = MGE_READ(sc, MGE_PORT_STATUS); if ( !(reg_val & MGE_STATUS_TX_IN_PROG) && (reg_val & MGE_STATUS_TX_FIFO_EMPTY)) break; DELAY(100); } if(!count) if_printf(ifp, "%s: timeout while waiting for end of transmission\n", __FUNCTION__); reg_val = MGE_READ(sc, MGE_PORT_SERIAL_CTRL); reg_val &= ~(PORT_SERIAL_ENABLE); MGE_WRITE(sc, MGE_PORT_SERIAL_CTRL ,reg_val); } static int mge_suspend(device_t dev) { device_printf(dev, "%s\n", __FUNCTION__); return (0); } static void mge_offload_process_frame(struct ifnet *ifp, struct mbuf *frame, uint32_t status, uint16_t bufsize) { int csum_flags = 0; if (ifp->if_capenable & IFCAP_RXCSUM) { if ((status & MGE_RX_L3_IS_IP) && (status & MGE_RX_IP_OK)) csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; if ((bufsize & MGE_RX_IP_FRAGMENT) == 0 && (MGE_RX_L4_IS_TCP(status) || MGE_RX_L4_IS_UDP(status)) && (status & MGE_RX_L4_CSUM_OK)) { csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; frame->m_pkthdr.csum_data = 0xFFFF; } frame->m_pkthdr.csum_flags = csum_flags; } } static void mge_offload_setup_descriptor(struct mge_softc *sc, struct mge_desc_wrapper *dw) { struct mbuf *m0 = dw->buffer; struct ether_vlan_header *eh = mtod(m0, struct ether_vlan_header *); int csum_flags = m0->m_pkthdr.csum_flags; int cmd_status = 0; struct ip *ip; int ehlen, etype; if (csum_flags) { if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { etype = ntohs(eh->evl_proto); ehlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; csum_flags |= MGE_TX_VLAN_TAGGED; } else { etype = ntohs(eh->evl_encap_proto); ehlen = ETHER_HDR_LEN; } if (etype != ETHERTYPE_IP) { if_printf(sc->ifp, "TCP/IP Offload enabled for unsupported " "protocol!\n"); return; } ip = (struct ip *)(m0->m_data + ehlen); cmd_status |= MGE_TX_IP_HDR_SIZE(ip->ip_hl); if ((m0->m_flags & M_FRAG) == 0) cmd_status |= MGE_TX_NOT_FRAGMENT; } if (csum_flags & CSUM_IP) cmd_status |= MGE_TX_GEN_IP_CSUM; if (csum_flags & CSUM_TCP) cmd_status |= MGE_TX_GEN_L4_CSUM; if (csum_flags & CSUM_UDP) cmd_status |= MGE_TX_GEN_L4_CSUM | MGE_TX_UDP; dw->mge_desc->cmd_status |= cmd_status; } static void mge_intrs_ctrl(struct mge_softc *sc, int enable) { if (enable) { MGE_WRITE(sc, MGE_PORT_INT_MASK , MGE_PORT_INT_RXQ0 | MGE_PORT_INT_EXTEND | MGE_PORT_INT_RXERRQ0); MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT , MGE_PORT_INT_EXT_TXERR0 | MGE_PORT_INT_EXT_RXOR | MGE_PORT_INT_EXT_TXUR | MGE_PORT_INT_EXT_TXBUF0); } else { MGE_WRITE(sc, MGE_INT_CAUSE, 0x0); MGE_WRITE(sc, MGE_INT_MASK, 0x0); MGE_WRITE(sc, MGE_PORT_INT_CAUSE, 0x0); MGE_WRITE(sc, MGE_PORT_INT_CAUSE_EXT, 0x0); MGE_WRITE(sc, MGE_PORT_INT_MASK, 0x0); MGE_WRITE(sc, MGE_PORT_INT_MASK_EXT, 0x0); } } static uint8_t mge_crc8(uint8_t *data, int size) { uint8_t crc = 0; static const uint8_t ct[256] = { 0x00, 0x07, 0x0E, 0x09, 0x1C, 0x1B, 0x12, 0x15, 0x38, 0x3F, 0x36, 0x31, 0x24, 0x23, 0x2A, 0x2D, 0x70, 0x77, 0x7E, 0x79, 0x6C, 0x6B, 0x62, 0x65, 0x48, 0x4F, 0x46, 0x41, 0x54, 0x53, 0x5A, 0x5D, 0xE0, 0xE7, 0xEE, 0xE9, 0xFC, 0xFB, 0xF2, 0xF5, 0xD8, 0xDF, 0xD6, 0xD1, 0xC4, 0xC3, 0xCA, 0xCD, 0x90, 0x97, 0x9E, 0x99, 0x8C, 0x8B, 0x82, 0x85, 0xA8, 0xAF, 0xA6, 0xA1, 0xB4, 0xB3, 0xBA, 0xBD, 0xC7, 0xC0, 0xC9, 0xCE, 0xDB, 0xDC, 0xD5, 0xD2, 0xFF, 0xF8, 0xF1, 0xF6, 0xE3, 0xE4, 0xED, 0xEA, 0xB7, 0xB0, 0xB9, 0xBE, 0xAB, 0xAC, 0xA5, 0xA2, 0x8F, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9D, 0x9A, 0x27, 0x20, 0x29, 0x2E, 0x3B, 0x3C, 0x35, 0x32, 0x1F, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0D, 0x0A, 0x57, 0x50, 0x59, 0x5E, 0x4B, 0x4C, 0x45, 0x42, 0x6F, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7D, 0x7A, 0x89, 0x8E, 0x87, 0x80, 0x95, 0x92, 0x9B, 0x9C, 0xB1, 0xB6, 0xBF, 0xB8, 0xAD, 0xAA, 0xA3, 0xA4, 0xF9, 0xFE, 0xF7, 0xF0, 0xE5, 0xE2, 0xEB, 0xEC, 0xC1, 0xC6, 0xCF, 0xC8, 0xDD, 0xDA, 0xD3, 0xD4, 0x69, 0x6E, 0x67, 0x60, 0x75, 0x72, 0x7B, 0x7C, 0x51, 0x56, 0x5F, 0x58, 0x4D, 0x4A, 0x43, 0x44, 0x19, 0x1E, 0x17, 0x10, 0x05, 0x02, 0x0B, 0x0C, 0x21, 0x26, 0x2F, 0x28, 0x3D, 0x3A, 0x33, 0x34, 0x4E, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5C, 0x5B, 0x76, 0x71, 0x78, 0x7F, 0x6A, 0x6D, 0x64, 0x63, 0x3E, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2C, 0x2B, 0x06, 0x01, 0x08, 0x0F, 0x1A, 0x1D, 0x14, 0x13, 0xAE, 0xA9, 0xA0, 0xA7, 0xB2, 0xB5, 0xBC, 0xBB, 0x96, 0x91, 0x98, 0x9F, 0x8A, 0x8D, 0x84, 0x83, 0xDE, 0xD9, 0xD0, 0xD7, 0xC2, 0xC5, 0xCC, 0xCB, 0xE6, 0xE1, 0xE8, 0xEF, 0xFA, 0xFD, 0xF4, 0xF3 }; while(size--) crc = ct[crc ^ *(data++)]; return(crc); } static void mge_setup_multicast(struct mge_softc *sc) { uint8_t special[5] = { 0x01, 0x00, 0x5E, 0x00, 0x00 }; uint8_t v = (MGE_RX_DEFAULT_QUEUE << 1) | 1; uint32_t smt[MGE_MCAST_REG_NUMBER]; uint32_t omt[MGE_MCAST_REG_NUMBER]; struct ifnet *ifp = sc->ifp; struct ifmultiaddr *ifma; uint8_t *mac; int i; if (ifp->if_flags & IFF_ALLMULTI) { for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) smt[i] = omt[i] = (v << 24) | (v << 16) | (v << 8) | v; } else { memset(smt, 0, sizeof(smt)); memset(omt, 0, sizeof(omt)); IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; mac = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); if (memcmp(mac, special, sizeof(special)) == 0) { i = mac[5]; smt[i >> 2] |= v << ((i & 0x03) << 3); } else { i = mge_crc8(mac, ETHER_ADDR_LEN); omt[i >> 2] |= v << ((i & 0x03) << 3); } } IF_ADDR_UNLOCK(ifp); } for (i = 0; i < MGE_MCAST_REG_NUMBER; i++) { MGE_WRITE(sc, MGE_DA_FILTER_SPEC_MCAST(i), smt[i]); MGE_WRITE(sc, MGE_DA_FILTER_OTH_MCAST(i), omt[i]); } } static void mge_set_rxic(struct mge_softc *sc) { uint32_t reg; if (sc->rx_ic_time > sc->mge_rx_ipg_max) sc->rx_ic_time = sc->mge_rx_ipg_max; reg = MGE_READ(sc, MGE_SDMA_CONFIG); reg &= ~mge_rx_ipg(sc->mge_rx_ipg_max, sc->mge_ver); reg |= mge_rx_ipg(sc->rx_ic_time, sc->mge_ver); MGE_WRITE(sc, MGE_SDMA_CONFIG, reg); } static void mge_set_txic(struct mge_softc *sc) { uint32_t reg; if (sc->tx_ic_time > sc->mge_tfut_ipg_max) sc->tx_ic_time = sc->mge_tfut_ipg_max; reg = MGE_READ(sc, MGE_TX_FIFO_URGENT_TRSH); reg &= ~mge_tfut_ipg(sc->mge_tfut_ipg_max, sc->mge_ver); reg |= mge_tfut_ipg(sc->tx_ic_time, sc->mge_ver); MGE_WRITE(sc, MGE_TX_FIFO_URGENT_TRSH, reg); } static int mge_sysctl_ic(SYSCTL_HANDLER_ARGS) { struct mge_softc *sc = (struct mge_softc *)arg1; uint32_t time; int error; time = (arg2 == MGE_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time; error = sysctl_handle_int(oidp, &time, 0, req); if (error != 0) return(error); MGE_GLOBAL_LOCK(sc); if (arg2 == MGE_IC_RX) { sc->rx_ic_time = time; mge_set_rxic(sc); } else { sc->tx_ic_time = time; mge_set_txic(sc); } MGE_GLOBAL_UNLOCK(sc); return(0); } static void mge_add_sysctls(struct mge_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid *tree; ctx = device_get_sysctl_ctx(sc->dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal", CTLFLAG_RD, 0, "MGE Interrupts coalescing"); children = SYSCTL_CHILDREN(tree); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time", CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_RX, mge_sysctl_ic, "I", "IC RX time threshold"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time", CTLTYPE_UINT | CTLFLAG_RW, sc, MGE_IC_TX, mge_sysctl_ic, "I", "IC TX time threshold"); } Index: head/sys/dev/nfe/if_nfe.c =================================================================== --- head/sys/dev/nfe/if_nfe.c (revision 193095) +++ head/sys/dev/nfe/if_nfe.c (revision 193096) @@ -1,3206 +1,3216 @@ /* $OpenBSD: if_nfe.c,v 1.54 2006/04/07 12:38:12 jsg Exp $ */ /*- * Copyright (c) 2006 Shigeaki Tagashira * Copyright (c) 2006 Damien Bergamini * Copyright (c) 2005, 2006 Jonathan Gray * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* Driver for NVIDIA nForce MCP Fast Ethernet and Gigabit Ethernet */ #include __FBSDID("$FreeBSD$"); #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(nfe, pci, 1, 1, 1); MODULE_DEPEND(nfe, ether, 1, 1, 1); MODULE_DEPEND(nfe, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" static int nfe_probe(device_t); static int nfe_attach(device_t); static int nfe_detach(device_t); static int nfe_suspend(device_t); static int nfe_resume(device_t); static int nfe_shutdown(device_t); static void nfe_power(struct nfe_softc *); static int nfe_miibus_readreg(device_t, int, int); static int nfe_miibus_writereg(device_t, int, int, int); static void nfe_miibus_statchg(device_t); static void nfe_link_task(void *, int); static void nfe_set_intr(struct nfe_softc *); static __inline void nfe_enable_intr(struct nfe_softc *); static __inline void nfe_disable_intr(struct nfe_softc *); static int nfe_ioctl(struct ifnet *, u_long, caddr_t); static void nfe_alloc_msix(struct nfe_softc *, int); static int nfe_intr(void *); static void nfe_int_task(void *, int); static __inline void nfe_discard_rxbuf(struct nfe_softc *, int); static __inline void nfe_discard_jrxbuf(struct nfe_softc *, int); static int nfe_newbuf(struct nfe_softc *, int); static int nfe_jnewbuf(struct nfe_softc *, int); -static int nfe_rxeof(struct nfe_softc *, int); -static int nfe_jrxeof(struct nfe_softc *, int); +static int nfe_rxeof(struct nfe_softc *, int, int *); +static int nfe_jrxeof(struct nfe_softc *, int, int *); static void nfe_txeof(struct nfe_softc *); static int nfe_encap(struct nfe_softc *, struct mbuf **); static void nfe_setmulti(struct nfe_softc *); static void nfe_tx_task(void *, int); static void nfe_start(struct ifnet *); static void nfe_watchdog(struct ifnet *); static void nfe_init(void *); static void nfe_init_locked(void *); static void nfe_stop(struct ifnet *); static int nfe_alloc_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); static void nfe_alloc_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); static int nfe_init_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); static int nfe_init_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); static void nfe_free_rx_ring(struct nfe_softc *, struct nfe_rx_ring *); static void nfe_free_jrx_ring(struct nfe_softc *, struct nfe_jrx_ring *); static int nfe_alloc_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); static void nfe_init_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); static void nfe_free_tx_ring(struct nfe_softc *, struct nfe_tx_ring *); static int nfe_ifmedia_upd(struct ifnet *); static void nfe_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void nfe_tick(void *); static void nfe_get_macaddr(struct nfe_softc *, uint8_t *); static void nfe_set_macaddr(struct nfe_softc *, uint8_t *); static void nfe_dma_map_segs(void *, bus_dma_segment_t *, int, int); static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS); static void nfe_sysctl_node(struct nfe_softc *); static void nfe_stats_clear(struct nfe_softc *); static void nfe_stats_update(struct nfe_softc *); #ifdef NFE_DEBUG static int nfedebug = 0; #define DPRINTF(sc, ...) do { \ if (nfedebug) \ device_printf((sc)->nfe_dev, __VA_ARGS__); \ } while (0) #define DPRINTFN(sc, n, ...) do { \ if (nfedebug >= (n)) \ device_printf((sc)->nfe_dev, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, ...) #define DPRINTFN(sc, n, ...) #endif #define NFE_LOCK(_sc) mtx_lock(&(_sc)->nfe_mtx) #define NFE_UNLOCK(_sc) mtx_unlock(&(_sc)->nfe_mtx) #define NFE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->nfe_mtx, MA_OWNED) /* Tunables. */ static int msi_disable = 0; static int msix_disable = 0; static int jumbo_disable = 0; TUNABLE_INT("hw.nfe.msi_disable", &msi_disable); TUNABLE_INT("hw.nfe.msix_disable", &msix_disable); TUNABLE_INT("hw.nfe.jumbo_disable", &jumbo_disable); static device_method_t nfe_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nfe_probe), DEVMETHOD(device_attach, nfe_attach), DEVMETHOD(device_detach, nfe_detach), DEVMETHOD(device_suspend, nfe_suspend), DEVMETHOD(device_resume, nfe_resume), DEVMETHOD(device_shutdown, nfe_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, nfe_miibus_readreg), DEVMETHOD(miibus_writereg, nfe_miibus_writereg), DEVMETHOD(miibus_statchg, nfe_miibus_statchg), { NULL, NULL } }; static driver_t nfe_driver = { "nfe", nfe_methods, sizeof(struct nfe_softc) }; static devclass_t nfe_devclass; DRIVER_MODULE(nfe, pci, nfe_driver, nfe_devclass, 0, 0); DRIVER_MODULE(miibus, nfe, miibus_driver, miibus_devclass, 0, 0); static struct nfe_type nfe_devs[] = { {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE_LAN, "NVIDIA nForce MCP Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_LAN, "NVIDIA nForce2 MCP2 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN1, "NVIDIA nForce2 400 MCP4 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE2_400_LAN2, "NVIDIA nForce2 400 MCP5 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN1, "NVIDIA nForce3 MCP3 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_250_LAN, "NVIDIA nForce3 250 MCP6 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE3_LAN4, "NVIDIA nForce3 MCP7 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN1, "NVIDIA nForce4 CK804 MCP8 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE4_LAN2, "NVIDIA nForce4 CK804 MCP9 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN1, "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP10 */ {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP04_LAN2, "NVIDIA nForce MCP04 Networking Adapter"}, /* MCP11 */ {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN1, "NVIDIA nForce 430 MCP12 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_NFORCE430_LAN2, "NVIDIA nForce 430 MCP13 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN1, "NVIDIA nForce MCP55 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP55_LAN2, "NVIDIA nForce MCP55 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN1, "NVIDIA nForce MCP61 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN2, "NVIDIA nForce MCP61 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN3, "NVIDIA nForce MCP61 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP61_LAN4, "NVIDIA nForce MCP61 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN1, "NVIDIA nForce MCP65 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN2, "NVIDIA nForce MCP65 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN3, "NVIDIA nForce MCP65 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP65_LAN4, "NVIDIA nForce MCP65 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN1, "NVIDIA nForce MCP67 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN2, "NVIDIA nForce MCP67 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN3, "NVIDIA nForce MCP67 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP67_LAN4, "NVIDIA nForce MCP67 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN1, "NVIDIA nForce MCP73 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN2, "NVIDIA nForce MCP73 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN3, "NVIDIA nForce MCP73 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP73_LAN4, "NVIDIA nForce MCP73 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN1, "NVIDIA nForce MCP77 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN2, "NVIDIA nForce MCP77 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN3, "NVIDIA nForce MCP77 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP77_LAN4, "NVIDIA nForce MCP77 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN1, "NVIDIA nForce MCP79 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN2, "NVIDIA nForce MCP79 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN3, "NVIDIA nForce MCP79 Networking Adapter"}, {PCI_VENDOR_NVIDIA, PCI_PRODUCT_NVIDIA_MCP79_LAN4, "NVIDIA nForce MCP79 Networking Adapter"}, {0, 0, NULL} }; /* Probe for supported hardware ID's */ static int nfe_probe(device_t dev) { struct nfe_type *t; t = nfe_devs; /* Check for matching PCI DEVICE ID's */ while (t->name != NULL) { if ((pci_get_vendor(dev) == t->vid_id) && (pci_get_device(dev) == t->dev_id)) { device_set_desc(dev, t->name); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } static void nfe_alloc_msix(struct nfe_softc *sc, int count) { int rid; rid = PCIR_BAR(2); sc->nfe_msix_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->nfe_msix_res == NULL) { device_printf(sc->nfe_dev, "couldn't allocate MSIX table resource\n"); return; } rid = PCIR_BAR(3); sc->nfe_msix_pba_res = bus_alloc_resource_any(sc->nfe_dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->nfe_msix_pba_res == NULL) { device_printf(sc->nfe_dev, "couldn't allocate MSIX PBA resource\n"); bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2), sc->nfe_msix_res); sc->nfe_msix_res = NULL; return; } if (pci_alloc_msix(sc->nfe_dev, &count) == 0) { if (count == NFE_MSI_MESSAGES) { if (bootverbose) device_printf(sc->nfe_dev, "Using %d MSIX messages\n", count); sc->nfe_msix = 1; } else { if (bootverbose) device_printf(sc->nfe_dev, "couldn't allocate MSIX\n"); pci_release_msi(sc->nfe_dev); bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(3), sc->nfe_msix_pba_res); bus_release_resource(sc->nfe_dev, SYS_RES_MEMORY, PCIR_BAR(2), sc->nfe_msix_res); sc->nfe_msix_pba_res = NULL; sc->nfe_msix_res = NULL; } } } static int nfe_attach(device_t dev) { struct nfe_softc *sc; struct ifnet *ifp; bus_addr_t dma_addr_max; int error = 0, i, msic, reg, rid; sc = device_get_softc(dev); sc->nfe_dev = dev; mtx_init(&sc->nfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->nfe_stat_ch, &sc->nfe_mtx, 0); TASK_INIT(&sc->nfe_link_task, 0, nfe_link_task, sc); pci_enable_busmaster(dev); rid = PCIR_BAR(0); sc->nfe_res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->nfe_res[0] == NULL) { device_printf(dev, "couldn't map memory resources\n"); mtx_destroy(&sc->nfe_mtx); return (ENXIO); } if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { uint16_t v, width; v = pci_read_config(dev, reg + 0x08, 2); /* Change max. read request size to 4096. */ v &= ~(7 << 12); v |= (5 << 12); pci_write_config(dev, reg + 0x08, v, 2); v = pci_read_config(dev, reg + 0x0c, 2); /* link capability */ v = (v >> 4) & 0x0f; width = pci_read_config(dev, reg + 0x12, 2); /* negotiated link width */ width = (width >> 4) & 0x3f; if (v != width) device_printf(sc->nfe_dev, "warning, negotiated width of link(x%d) != " "max. width of link(x%d)\n", width, v); } /* Allocate interrupt */ if (msix_disable == 0 || msi_disable == 0) { if (msix_disable == 0 && (msic = pci_msix_count(dev)) == NFE_MSI_MESSAGES) nfe_alloc_msix(sc, msic); if (msi_disable == 0 && sc->nfe_msix == 0 && (msic = pci_msi_count(dev)) == NFE_MSI_MESSAGES && pci_alloc_msi(dev, &msic) == 0) { if (msic == NFE_MSI_MESSAGES) { if (bootverbose) device_printf(dev, "Using %d MSI messages\n", msic); sc->nfe_msi = 1; } else pci_release_msi(dev); } } if (sc->nfe_msix == 0 && sc->nfe_msi == 0) { rid = 0; sc->nfe_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->nfe_irq[0] == NULL) { device_printf(dev, "couldn't allocate IRQ resources\n"); error = ENXIO; goto fail; } } else { for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) { sc->nfe_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->nfe_irq[i] == NULL) { device_printf(dev, "couldn't allocate IRQ resources for " "message %d\n", rid); error = ENXIO; goto fail; } } /* Map interrupts to vector 0. */ if (sc->nfe_msix != 0) { NFE_WRITE(sc, NFE_MSIX_MAP0, 0); NFE_WRITE(sc, NFE_MSIX_MAP1, 0); } else if (sc->nfe_msi != 0) { NFE_WRITE(sc, NFE_MSI_MAP0, 0); NFE_WRITE(sc, NFE_MSI_MAP1, 0); } } /* Set IRQ status/mask register. */ sc->nfe_irq_status = NFE_IRQ_STATUS; sc->nfe_irq_mask = NFE_IRQ_MASK; sc->nfe_intrs = NFE_IRQ_WANTED; sc->nfe_nointrs = 0; if (sc->nfe_msix != 0) { sc->nfe_irq_status = NFE_MSIX_IRQ_STATUS; sc->nfe_nointrs = NFE_IRQ_WANTED; } else if (sc->nfe_msi != 0) { sc->nfe_irq_mask = NFE_MSI_IRQ_MASK; sc->nfe_intrs = NFE_MSI_VECTOR_0_ENABLED; } sc->nfe_devid = pci_get_device(dev); sc->nfe_revid = pci_get_revid(dev); sc->nfe_flags = 0; switch (sc->nfe_devid) { case PCI_PRODUCT_NVIDIA_NFORCE3_LAN2: case PCI_PRODUCT_NVIDIA_NFORCE3_LAN3: case PCI_PRODUCT_NVIDIA_NFORCE3_LAN4: case PCI_PRODUCT_NVIDIA_NFORCE3_LAN5: sc->nfe_flags |= NFE_JUMBO_SUP | NFE_HW_CSUM; break; case PCI_PRODUCT_NVIDIA_MCP51_LAN1: case PCI_PRODUCT_NVIDIA_MCP51_LAN2: sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_MIB_V1; break; case PCI_PRODUCT_NVIDIA_CK804_LAN1: case PCI_PRODUCT_NVIDIA_CK804_LAN2: case PCI_PRODUCT_NVIDIA_MCP04_LAN1: case PCI_PRODUCT_NVIDIA_MCP04_LAN2: sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_MIB_V1; break; case PCI_PRODUCT_NVIDIA_MCP55_LAN1: case PCI_PRODUCT_NVIDIA_MCP55_LAN2: sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_HW_VLAN | NFE_PWR_MGMT | NFE_TX_FLOW_CTRL | NFE_MIB_V2; break; case PCI_PRODUCT_NVIDIA_MCP61_LAN1: case PCI_PRODUCT_NVIDIA_MCP61_LAN2: case PCI_PRODUCT_NVIDIA_MCP61_LAN3: case PCI_PRODUCT_NVIDIA_MCP61_LAN4: case PCI_PRODUCT_NVIDIA_MCP67_LAN1: case PCI_PRODUCT_NVIDIA_MCP67_LAN2: case PCI_PRODUCT_NVIDIA_MCP67_LAN3: case PCI_PRODUCT_NVIDIA_MCP67_LAN4: case PCI_PRODUCT_NVIDIA_MCP73_LAN1: case PCI_PRODUCT_NVIDIA_MCP73_LAN2: case PCI_PRODUCT_NVIDIA_MCP73_LAN3: case PCI_PRODUCT_NVIDIA_MCP73_LAN4: sc->nfe_flags |= NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2; break; case PCI_PRODUCT_NVIDIA_MCP77_LAN1: case PCI_PRODUCT_NVIDIA_MCP77_LAN2: case PCI_PRODUCT_NVIDIA_MCP77_LAN3: case PCI_PRODUCT_NVIDIA_MCP77_LAN4: /* XXX flow control */ sc->nfe_flags |= NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3; break; case PCI_PRODUCT_NVIDIA_MCP79_LAN1: case PCI_PRODUCT_NVIDIA_MCP79_LAN2: case PCI_PRODUCT_NVIDIA_MCP79_LAN3: case PCI_PRODUCT_NVIDIA_MCP79_LAN4: /* XXX flow control */ sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_HW_CSUM | NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_MIB_V3; break; case PCI_PRODUCT_NVIDIA_MCP65_LAN1: case PCI_PRODUCT_NVIDIA_MCP65_LAN2: case PCI_PRODUCT_NVIDIA_MCP65_LAN3: case PCI_PRODUCT_NVIDIA_MCP65_LAN4: sc->nfe_flags |= NFE_JUMBO_SUP | NFE_40BIT_ADDR | NFE_PWR_MGMT | NFE_CORRECT_MACADDR | NFE_TX_FLOW_CTRL | NFE_MIB_V2; break; } nfe_power(sc); /* Check for reversed ethernet address */ if ((NFE_READ(sc, NFE_TX_UNK) & NFE_MAC_ADDR_INORDER) != 0) sc->nfe_flags |= NFE_CORRECT_MACADDR; nfe_get_macaddr(sc, sc->eaddr); /* * Allocate the parent bus DMA tag appropriate for PCI. */ dma_addr_max = BUS_SPACE_MAXADDR_32BIT; if ((sc->nfe_flags & NFE_40BIT_ADDR) != 0) dma_addr_max = NFE_DMA_MAXADDR; error = bus_dma_tag_create( bus_get_dma_tag(sc->nfe_dev), /* parent */ 1, 0, /* alignment, boundary */ dma_addr_max, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->nfe_parent_tag); if (error) goto fail; ifp = sc->nfe_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } TASK_INIT(&sc->nfe_tx_task, 1, nfe_tx_task, ifp); /* * Allocate Tx and Rx rings. */ if ((error = nfe_alloc_tx_ring(sc, &sc->txq)) != 0) goto fail; if ((error = nfe_alloc_rx_ring(sc, &sc->rxq)) != 0) goto fail; nfe_alloc_jrx_ring(sc, &sc->jrxq); /* Create sysctl node. */ nfe_sysctl_node(sc); ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = nfe_ioctl; ifp->if_start = nfe_start; ifp->if_hwassist = 0; ifp->if_capabilities = 0; ifp->if_watchdog = NULL; ifp->if_init = nfe_init; IFQ_SET_MAXLEN(&ifp->if_snd, NFE_TX_RING_COUNT - 1); ifp->if_snd.ifq_drv_maxlen = NFE_TX_RING_COUNT - 1; IFQ_SET_READY(&ifp->if_snd); if (sc->nfe_flags & NFE_HW_CSUM) { ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_TSO4; ifp->if_hwassist |= NFE_CSUM_FEATURES | CSUM_TSO; } ifp->if_capenable = ifp->if_capabilities; sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS; /* VLAN capability setup. */ ifp->if_capabilities |= IFCAP_VLAN_MTU; if ((sc->nfe_flags & NFE_HW_VLAN) != 0) { ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; if ((ifp->if_capabilities & IFCAP_HWCSUM) != 0) ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; } ifp->if_capenable = ifp->if_capabilities; /* * Tell the upper layer(s) we support long frames. * Must appear after the call to ether_ifattach() because * ether_ifattach() sets ifi_hdrlen to the default value. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* Do MII setup */ if (mii_phy_probe(dev, &sc->nfe_miibus, nfe_ifmedia_upd, nfe_ifmedia_sts)) { device_printf(dev, "MII without any phy!\n"); error = ENXIO; goto fail; } ether_ifattach(ifp, sc->eaddr); TASK_INIT(&sc->nfe_int_task, 0, nfe_int_task, sc); sc->nfe_tq = taskqueue_create_fast("nfe_taskq", M_WAITOK, taskqueue_thread_enqueue, &sc->nfe_tq); taskqueue_start_threads(&sc->nfe_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->nfe_dev)); error = 0; if (sc->nfe_msi == 0 && sc->nfe_msix == 0) { error = bus_setup_intr(dev, sc->nfe_irq[0], INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc, &sc->nfe_intrhand[0]); } else { for (i = 0; i < NFE_MSI_MESSAGES; i++) { error = bus_setup_intr(dev, sc->nfe_irq[i], INTR_TYPE_NET | INTR_MPSAFE, nfe_intr, NULL, sc, &sc->nfe_intrhand[i]); if (error != 0) break; } } if (error) { device_printf(dev, "couldn't set up irq\n"); taskqueue_free(sc->nfe_tq); sc->nfe_tq = NULL; ether_ifdetach(ifp); goto fail; } fail: if (error) nfe_detach(dev); return (error); } static int nfe_detach(device_t dev) { struct nfe_softc *sc; struct ifnet *ifp; uint8_t eaddr[ETHER_ADDR_LEN]; int i, rid; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->nfe_mtx), ("nfe mutex not initialized")); ifp = sc->nfe_ifp; #ifdef DEVICE_POLLING if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif if (device_is_attached(dev)) { NFE_LOCK(sc); nfe_stop(ifp); ifp->if_flags &= ~IFF_UP; NFE_UNLOCK(sc); callout_drain(&sc->nfe_stat_ch); taskqueue_drain(taskqueue_fast, &sc->nfe_tx_task); taskqueue_drain(taskqueue_swi, &sc->nfe_link_task); ether_ifdetach(ifp); } if (ifp) { /* restore ethernet address */ if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) { for (i = 0; i < ETHER_ADDR_LEN; i++) { eaddr[i] = sc->eaddr[5 - i]; } } else bcopy(sc->eaddr, eaddr, ETHER_ADDR_LEN); nfe_set_macaddr(sc, eaddr); if_free(ifp); } if (sc->nfe_miibus) device_delete_child(dev, sc->nfe_miibus); bus_generic_detach(dev); if (sc->nfe_tq != NULL) { taskqueue_drain(sc->nfe_tq, &sc->nfe_int_task); taskqueue_free(sc->nfe_tq); sc->nfe_tq = NULL; } for (i = 0; i < NFE_MSI_MESSAGES; i++) { if (sc->nfe_intrhand[i] != NULL) { bus_teardown_intr(dev, sc->nfe_irq[i], sc->nfe_intrhand[i]); sc->nfe_intrhand[i] = NULL; } } if (sc->nfe_msi == 0 && sc->nfe_msix == 0) { if (sc->nfe_irq[0] != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nfe_irq[0]); } else { for (i = 0, rid = 1; i < NFE_MSI_MESSAGES; i++, rid++) { if (sc->nfe_irq[i] != NULL) { bus_release_resource(dev, SYS_RES_IRQ, rid, sc->nfe_irq[i]); sc->nfe_irq[i] = NULL; } } pci_release_msi(dev); } if (sc->nfe_msix_pba_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(3), sc->nfe_msix_pba_res); sc->nfe_msix_pba_res = NULL; } if (sc->nfe_msix_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(2), sc->nfe_msix_res); sc->nfe_msix_res = NULL; } if (sc->nfe_res[0] != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->nfe_res[0]); sc->nfe_res[0] = NULL; } nfe_free_tx_ring(sc, &sc->txq); nfe_free_rx_ring(sc, &sc->rxq); nfe_free_jrx_ring(sc, &sc->jrxq); if (sc->nfe_parent_tag) { bus_dma_tag_destroy(sc->nfe_parent_tag); sc->nfe_parent_tag = NULL; } mtx_destroy(&sc->nfe_mtx); return (0); } static int nfe_suspend(device_t dev) { struct nfe_softc *sc; sc = device_get_softc(dev); NFE_LOCK(sc); nfe_stop(sc->nfe_ifp); sc->nfe_suspended = 1; NFE_UNLOCK(sc); return (0); } static int nfe_resume(device_t dev) { struct nfe_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); NFE_LOCK(sc); ifp = sc->nfe_ifp; if (ifp->if_flags & IFF_UP) nfe_init_locked(sc); sc->nfe_suspended = 0; NFE_UNLOCK(sc); return (0); } /* Take PHY/NIC out of powerdown, from Linux */ static void nfe_power(struct nfe_softc *sc) { uint32_t pwr; if ((sc->nfe_flags & NFE_PWR_MGMT) == 0) return; NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | NFE_RXTX_BIT2); NFE_WRITE(sc, NFE_MAC_RESET, NFE_MAC_RESET_MAGIC); DELAY(100); NFE_WRITE(sc, NFE_MAC_RESET, 0); DELAY(100); NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT2); pwr = NFE_READ(sc, NFE_PWR2_CTL); pwr &= ~NFE_PWR2_WAKEUP_MASK; if (sc->nfe_revid >= 0xa3 && (sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN1 || sc->nfe_devid == PCI_PRODUCT_NVIDIA_NFORCE430_LAN2)) pwr |= NFE_PWR2_REVA3; NFE_WRITE(sc, NFE_PWR2_CTL, pwr); } static void nfe_miibus_statchg(device_t dev) { struct nfe_softc *sc; sc = device_get_softc(dev); taskqueue_enqueue(taskqueue_swi, &sc->nfe_link_task); } static void nfe_link_task(void *arg, int pending) { struct nfe_softc *sc; struct mii_data *mii; struct ifnet *ifp; uint32_t phy, seed, misc = NFE_MISC1_MAGIC, link = NFE_MEDIA_SET; uint32_t gmask, rxctl, txctl, val; sc = (struct nfe_softc *)arg; NFE_LOCK(sc); mii = device_get_softc(sc->nfe_miibus); ifp = sc->nfe_ifp; if (mii == NULL || ifp == NULL) { NFE_UNLOCK(sc); return; } if (mii->mii_media_status & IFM_ACTIVE) { if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) sc->nfe_link = 1; } else sc->nfe_link = 0; phy = NFE_READ(sc, NFE_PHY_IFACE); phy &= ~(NFE_PHY_HDX | NFE_PHY_100TX | NFE_PHY_1000T); seed = NFE_READ(sc, NFE_RNDSEED); seed &= ~NFE_SEED_MASK; if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) == 0) { phy |= NFE_PHY_HDX; /* half-duplex */ misc |= NFE_MISC1_HDX; } switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_1000_T: /* full-duplex only */ link |= NFE_MEDIA_1000T; seed |= NFE_SEED_1000T; phy |= NFE_PHY_1000T; break; case IFM_100_TX: link |= NFE_MEDIA_100TX; seed |= NFE_SEED_100TX; phy |= NFE_PHY_100TX; break; case IFM_10_T: link |= NFE_MEDIA_10T; seed |= NFE_SEED_10T; break; } if ((phy & 0x10000000) != 0) { if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) val = NFE_R1_MAGIC_1000; else val = NFE_R1_MAGIC_10_100; } else val = NFE_R1_MAGIC_DEFAULT; NFE_WRITE(sc, NFE_SETUP_R1, val); NFE_WRITE(sc, NFE_RNDSEED, seed); /* XXX: gigabit NICs only? */ NFE_WRITE(sc, NFE_PHY_IFACE, phy); NFE_WRITE(sc, NFE_MISC1, misc); NFE_WRITE(sc, NFE_LINKSPEED, link); gmask = mii->mii_media_active & IFM_GMASK; if ((gmask & IFM_FDX) != 0) { /* It seems all hardwares supports Rx pause frames. */ val = NFE_READ(sc, NFE_RXFILTER); if ((gmask & IFM_FLAG0) != 0) val |= NFE_PFF_RX_PAUSE; else val &= ~NFE_PFF_RX_PAUSE; NFE_WRITE(sc, NFE_RXFILTER, val); if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) { val = NFE_READ(sc, NFE_MISC1); if ((gmask & IFM_FLAG1) != 0) { NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_ENABLE); val |= NFE_MISC1_TX_PAUSE; } else { val &= ~NFE_MISC1_TX_PAUSE; NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE); } NFE_WRITE(sc, NFE_MISC1, val); } } else { /* disable rx/tx pause frames */ val = NFE_READ(sc, NFE_RXFILTER); val &= ~NFE_PFF_RX_PAUSE; NFE_WRITE(sc, NFE_RXFILTER, val); if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) { NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE); val = NFE_READ(sc, NFE_MISC1); val &= ~NFE_MISC1_TX_PAUSE; NFE_WRITE(sc, NFE_MISC1, val); } } txctl = NFE_READ(sc, NFE_TX_CTL); rxctl = NFE_READ(sc, NFE_RX_CTL); if (sc->nfe_link != 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { txctl |= NFE_TX_START; rxctl |= NFE_RX_START; } else { txctl &= ~NFE_TX_START; rxctl &= ~NFE_RX_START; } NFE_WRITE(sc, NFE_TX_CTL, txctl); NFE_WRITE(sc, NFE_RX_CTL, rxctl); NFE_UNLOCK(sc); } static int nfe_miibus_readreg(device_t dev, int phy, int reg) { struct nfe_softc *sc = device_get_softc(dev); uint32_t val; int ntries; NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); DELAY(100); } NFE_WRITE(sc, NFE_PHY_CTL, (phy << NFE_PHYADD_SHIFT) | reg); for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) { DELAY(100); if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) break; } if (ntries == NFE_TIMEOUT) { DPRINTFN(sc, 2, "timeout waiting for PHY\n"); return 0; } if (NFE_READ(sc, NFE_PHY_STATUS) & NFE_PHY_ERROR) { DPRINTFN(sc, 2, "could not read PHY\n"); return 0; } val = NFE_READ(sc, NFE_PHY_DATA); if (val != 0xffffffff && val != 0) sc->mii_phyaddr = phy; DPRINTFN(sc, 2, "mii read phy %d reg 0x%x ret 0x%x\n", phy, reg, val); return (val); } static int nfe_miibus_writereg(device_t dev, int phy, int reg, int val) { struct nfe_softc *sc = device_get_softc(dev); uint32_t ctl; int ntries; NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); if (NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY) { NFE_WRITE(sc, NFE_PHY_CTL, NFE_PHY_BUSY); DELAY(100); } NFE_WRITE(sc, NFE_PHY_DATA, val); ctl = NFE_PHY_WRITE | (phy << NFE_PHYADD_SHIFT) | reg; NFE_WRITE(sc, NFE_PHY_CTL, ctl); for (ntries = 0; ntries < NFE_TIMEOUT; ntries++) { DELAY(100); if (!(NFE_READ(sc, NFE_PHY_CTL) & NFE_PHY_BUSY)) break; } #ifdef NFE_DEBUG if (nfedebug >= 2 && ntries == NFE_TIMEOUT) device_printf(sc->nfe_dev, "could not write to PHY\n"); #endif return (0); } struct nfe_dmamap_arg { bus_addr_t nfe_busaddr; }; static int nfe_alloc_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) { struct nfe_dmamap_arg ctx; struct nfe_rx_data *data; void *desc; int i, error, descsize; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->desc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->desc32; descsize = sizeof (struct nfe_desc32); } ring->cur = ring->next = 0; error = bus_dma_tag_create(sc->nfe_parent_tag, NFE_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ NFE_RX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ NFE_RX_RING_COUNT * descsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &ring->rx_desc_tag); if (error != 0) { device_printf(sc->nfe_dev, "could not create desc DMA tag\n"); goto fail; } /* allocate memory to desc */ error = bus_dmamem_alloc(ring->rx_desc_tag, &desc, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->rx_desc_map); if (error != 0) { device_printf(sc->nfe_dev, "could not create desc DMA map\n"); goto fail; } if (sc->nfe_flags & NFE_40BIT_ADDR) ring->desc64 = desc; else ring->desc32 = desc; /* map desc to device visible address space */ ctx.nfe_busaddr = 0; error = bus_dmamap_load(ring->rx_desc_tag, ring->rx_desc_map, desc, NFE_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); if (error != 0) { device_printf(sc->nfe_dev, "could not load desc DMA map\n"); goto fail; } ring->physaddr = ctx.nfe_busaddr; error = bus_dma_tag_create(sc->nfe_parent_tag, 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, 1, /* maxsize, nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &ring->rx_data_tag); if (error != 0) { device_printf(sc->nfe_dev, "could not create Rx DMA tag\n"); goto fail; } error = bus_dmamap_create(ring->rx_data_tag, 0, &ring->rx_spare_map); if (error != 0) { device_printf(sc->nfe_dev, "could not create Rx DMA spare map\n"); goto fail; } /* * Pre-allocate Rx buffers and populate Rx ring. */ for (i = 0; i < NFE_RX_RING_COUNT; i++) { data = &sc->rxq.data[i]; data->rx_data_map = NULL; data->m = NULL; error = bus_dmamap_create(ring->rx_data_tag, 0, &data->rx_data_map); if (error != 0) { device_printf(sc->nfe_dev, "could not create Rx DMA map\n"); goto fail; } } fail: return (error); } static void nfe_alloc_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) { struct nfe_dmamap_arg ctx; struct nfe_rx_data *data; void *desc; int i, error, descsize; if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0) return; if (jumbo_disable != 0) { device_printf(sc->nfe_dev, "disabling jumbo frame support\n"); sc->nfe_jumbo_disable = 1; return; } if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->jdesc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->jdesc32; descsize = sizeof (struct nfe_desc32); } ring->jcur = ring->jnext = 0; /* Create DMA tag for jumbo Rx ring. */ error = bus_dma_tag_create(sc->nfe_parent_tag, NFE_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsize */ 1, /* nsegments */ NFE_JUMBO_RX_RING_COUNT * descsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &ring->jrx_desc_tag); if (error != 0) { device_printf(sc->nfe_dev, "could not create jumbo ring DMA tag\n"); goto fail; } /* Create DMA tag for jumbo Rx buffers. */ error = bus_dma_tag_create(sc->nfe_parent_tag, 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MJUM9BYTES, /* maxsize */ 1, /* nsegments */ MJUM9BYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &ring->jrx_data_tag); if (error != 0) { device_printf(sc->nfe_dev, "could not create jumbo Rx buffer DMA tag\n"); goto fail; } /* Allocate DMA'able memory and load the DMA map for jumbo Rx ring. */ error = bus_dmamem_alloc(ring->jrx_desc_tag, &desc, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->jrx_desc_map); if (error != 0) { device_printf(sc->nfe_dev, "could not allocate DMA'able memory for jumbo Rx ring\n"); goto fail; } if (sc->nfe_flags & NFE_40BIT_ADDR) ring->jdesc64 = desc; else ring->jdesc32 = desc; ctx.nfe_busaddr = 0; error = bus_dmamap_load(ring->jrx_desc_tag, ring->jrx_desc_map, desc, NFE_JUMBO_RX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); if (error != 0) { device_printf(sc->nfe_dev, "could not load DMA'able memory for jumbo Rx ring\n"); goto fail; } ring->jphysaddr = ctx.nfe_busaddr; /* Create DMA maps for jumbo Rx buffers. */ error = bus_dmamap_create(ring->jrx_data_tag, 0, &ring->jrx_spare_map); if (error != 0) { device_printf(sc->nfe_dev, "could not create jumbo Rx DMA spare map\n"); goto fail; } for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { data = &sc->jrxq.jdata[i]; data->rx_data_map = NULL; data->m = NULL; error = bus_dmamap_create(ring->jrx_data_tag, 0, &data->rx_data_map); if (error != 0) { device_printf(sc->nfe_dev, "could not create jumbo Rx DMA map\n"); goto fail; } } return; fail: /* * Running without jumbo frame support is ok for most cases * so don't fail on creating dma tag/map for jumbo frame. */ nfe_free_jrx_ring(sc, ring); device_printf(sc->nfe_dev, "disabling jumbo frame support due to " "resource shortage\n"); sc->nfe_jumbo_disable = 1; } static int nfe_init_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) { void *desc; size_t descsize; int i; ring->cur = ring->next = 0; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->desc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->desc32; descsize = sizeof (struct nfe_desc32); } bzero(desc, descsize * NFE_RX_RING_COUNT); for (i = 0; i < NFE_RX_RING_COUNT; i++) { if (nfe_newbuf(sc, i) != 0) return (ENOBUFS); } bus_dmamap_sync(ring->rx_desc_tag, ring->rx_desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } static int nfe_init_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) { void *desc; size_t descsize; int i; ring->jcur = ring->jnext = 0; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->jdesc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->jdesc32; descsize = sizeof (struct nfe_desc32); } bzero(desc, descsize * NFE_JUMBO_RX_RING_COUNT); for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { if (nfe_jnewbuf(sc, i) != 0) return (ENOBUFS); } bus_dmamap_sync(ring->jrx_desc_tag, ring->jrx_desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } static void nfe_free_rx_ring(struct nfe_softc *sc, struct nfe_rx_ring *ring) { struct nfe_rx_data *data; void *desc; int i, descsize; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->desc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->desc32; descsize = sizeof (struct nfe_desc32); } for (i = 0; i < NFE_RX_RING_COUNT; i++) { data = &ring->data[i]; if (data->rx_data_map != NULL) { bus_dmamap_destroy(ring->rx_data_tag, data->rx_data_map); data->rx_data_map = NULL; } if (data->m != NULL) { m_freem(data->m); data->m = NULL; } } if (ring->rx_data_tag != NULL) { if (ring->rx_spare_map != NULL) { bus_dmamap_destroy(ring->rx_data_tag, ring->rx_spare_map); ring->rx_spare_map = NULL; } bus_dma_tag_destroy(ring->rx_data_tag); ring->rx_data_tag = NULL; } if (desc != NULL) { bus_dmamap_unload(ring->rx_desc_tag, ring->rx_desc_map); bus_dmamem_free(ring->rx_desc_tag, desc, ring->rx_desc_map); ring->desc64 = NULL; ring->desc32 = NULL; ring->rx_desc_map = NULL; } if (ring->rx_desc_tag != NULL) { bus_dma_tag_destroy(ring->rx_desc_tag); ring->rx_desc_tag = NULL; } } static void nfe_free_jrx_ring(struct nfe_softc *sc, struct nfe_jrx_ring *ring) { struct nfe_rx_data *data; void *desc; int i, descsize; if ((sc->nfe_flags & NFE_JUMBO_SUP) == 0) return; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->jdesc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->jdesc32; descsize = sizeof (struct nfe_desc32); } for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { data = &ring->jdata[i]; if (data->rx_data_map != NULL) { bus_dmamap_destroy(ring->jrx_data_tag, data->rx_data_map); data->rx_data_map = NULL; } if (data->m != NULL) { m_freem(data->m); data->m = NULL; } } if (ring->jrx_data_tag != NULL) { if (ring->jrx_spare_map != NULL) { bus_dmamap_destroy(ring->jrx_data_tag, ring->jrx_spare_map); ring->jrx_spare_map = NULL; } bus_dma_tag_destroy(ring->jrx_data_tag); ring->jrx_data_tag = NULL; } if (desc != NULL) { bus_dmamap_unload(ring->jrx_desc_tag, ring->jrx_desc_map); bus_dmamem_free(ring->jrx_desc_tag, desc, ring->jrx_desc_map); ring->jdesc64 = NULL; ring->jdesc32 = NULL; ring->jrx_desc_map = NULL; } if (ring->jrx_desc_tag != NULL) { bus_dma_tag_destroy(ring->jrx_desc_tag); ring->jrx_desc_tag = NULL; } } static int nfe_alloc_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) { struct nfe_dmamap_arg ctx; int i, error; void *desc; int descsize; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->desc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->desc32; descsize = sizeof (struct nfe_desc32); } ring->queued = 0; ring->cur = ring->next = 0; error = bus_dma_tag_create(sc->nfe_parent_tag, NFE_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ NFE_TX_RING_COUNT * descsize, 1, /* maxsize, nsegments */ NFE_TX_RING_COUNT * descsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &ring->tx_desc_tag); if (error != 0) { device_printf(sc->nfe_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->tx_desc_tag, &desc, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &ring->tx_desc_map); if (error != 0) { device_printf(sc->nfe_dev, "could not create desc DMA map\n"); goto fail; } if (sc->nfe_flags & NFE_40BIT_ADDR) ring->desc64 = desc; else ring->desc32 = desc; ctx.nfe_busaddr = 0; error = bus_dmamap_load(ring->tx_desc_tag, ring->tx_desc_map, desc, NFE_TX_RING_COUNT * descsize, nfe_dma_map_segs, &ctx, 0); if (error != 0) { device_printf(sc->nfe_dev, "could not load desc DMA map\n"); goto fail; } ring->physaddr = ctx.nfe_busaddr; error = bus_dma_tag_create(sc->nfe_parent_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, NFE_TSO_MAXSIZE, NFE_MAX_SCATTER, NFE_TSO_MAXSGSIZE, 0, NULL, NULL, &ring->tx_data_tag); if (error != 0) { device_printf(sc->nfe_dev, "could not create Tx DMA tag\n"); goto fail; } for (i = 0; i < NFE_TX_RING_COUNT; i++) { error = bus_dmamap_create(ring->tx_data_tag, 0, &ring->data[i].tx_data_map); if (error != 0) { device_printf(sc->nfe_dev, "could not create Tx DMA map\n"); goto fail; } } fail: return (error); } static void nfe_init_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) { void *desc; size_t descsize; sc->nfe_force_tx = 0; ring->queued = 0; ring->cur = ring->next = 0; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->desc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->desc32; descsize = sizeof (struct nfe_desc32); } bzero(desc, descsize * NFE_TX_RING_COUNT); bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static void nfe_free_tx_ring(struct nfe_softc *sc, struct nfe_tx_ring *ring) { struct nfe_tx_data *data; void *desc; int i, descsize; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc = ring->desc64; descsize = sizeof (struct nfe_desc64); } else { desc = ring->desc32; descsize = sizeof (struct nfe_desc32); } for (i = 0; i < NFE_TX_RING_COUNT; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->tx_data_tag, data->tx_data_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->tx_data_tag, data->tx_data_map); m_freem(data->m); data->m = NULL; } if (data->tx_data_map != NULL) { bus_dmamap_destroy(ring->tx_data_tag, data->tx_data_map); data->tx_data_map = NULL; } } if (ring->tx_data_tag != NULL) { bus_dma_tag_destroy(ring->tx_data_tag); ring->tx_data_tag = NULL; } if (desc != NULL) { bus_dmamap_sync(ring->tx_desc_tag, ring->tx_desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->tx_desc_tag, ring->tx_desc_map); bus_dmamem_free(ring->tx_desc_tag, desc, ring->tx_desc_map); ring->desc64 = NULL; ring->desc32 = NULL; ring->tx_desc_map = NULL; bus_dma_tag_destroy(ring->tx_desc_tag); ring->tx_desc_tag = NULL; } } #ifdef DEVICE_POLLING static poll_handler_t nfe_poll; -static void +static int nfe_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct nfe_softc *sc = ifp->if_softc; uint32_t r; + int rx_npkts = 0; NFE_LOCK(sc); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { NFE_UNLOCK(sc); - return; + return (rx_npkts); } if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) - nfe_jrxeof(sc, count); + rx_npkts = nfe_jrxeof(sc, count, &rx_npkts); else - nfe_rxeof(sc, count); + rx_npkts = nfe_rxeof(sc, count, &rx_npkts); nfe_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task); if (cmd == POLL_AND_CHECK_STATUS) { if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) { NFE_UNLOCK(sc); - return; + return (rx_npkts); } NFE_WRITE(sc, sc->nfe_irq_status, r); if (r & NFE_IRQ_LINK) { NFE_READ(sc, NFE_PHY_STATUS); NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); DPRINTF(sc, "link state changed\n"); } } NFE_UNLOCK(sc); + return (rx_npkts); } #endif /* DEVICE_POLLING */ static void nfe_set_intr(struct nfe_softc *sc) { if (sc->nfe_msi != 0) NFE_WRITE(sc, NFE_IRQ_MASK, NFE_IRQ_WANTED); } /* In MSIX, a write to mask reegisters behaves as XOR. */ static __inline void nfe_enable_intr(struct nfe_softc *sc) { if (sc->nfe_msix != 0) { /* XXX Should have a better way to enable interrupts! */ if (NFE_READ(sc, sc->nfe_irq_mask) == 0) NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs); } else NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_intrs); } static __inline void nfe_disable_intr(struct nfe_softc *sc) { if (sc->nfe_msix != 0) { /* XXX Should have a better way to disable interrupts! */ if (NFE_READ(sc, sc->nfe_irq_mask) != 0) NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs); } else NFE_WRITE(sc, sc->nfe_irq_mask, sc->nfe_nointrs); } static int nfe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct nfe_softc *sc; struct ifreq *ifr; struct mii_data *mii; int error, init, mask; sc = ifp->if_softc; ifr = (struct ifreq *) data; error = 0; init = 0; switch (cmd) { case SIOCSIFMTU: if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > NFE_JUMBO_MTU) error = EINVAL; else if (ifp->if_mtu != ifr->ifr_mtu) { if ((((sc->nfe_flags & NFE_JUMBO_SUP) == 0) || (sc->nfe_jumbo_disable != 0)) && ifr->ifr_mtu > ETHERMTU) error = EINVAL; else { NFE_LOCK(sc); ifp->if_mtu = ifr->ifr_mtu; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) nfe_init_locked(sc); NFE_UNLOCK(sc); } } break; case SIOCSIFFLAGS: NFE_LOCK(sc); if (ifp->if_flags & IFF_UP) { /* * If only the PROMISC or ALLMULTI flag changes, then * don't do a full re-init of the chip, just update * the Rx filter. */ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && ((ifp->if_flags ^ sc->nfe_if_flags) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) nfe_setmulti(sc); else nfe_init_locked(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) nfe_stop(ifp); } sc->nfe_if_flags = ifp->if_flags; NFE_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { NFE_LOCK(sc); nfe_setmulti(sc); NFE_UNLOCK(sc); error = 0; } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: mii = device_get_softc(sc->nfe_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if ((mask & IFCAP_POLLING) != 0) { if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { error = ether_poll_register(nfe_poll, ifp); if (error) break; NFE_LOCK(sc); nfe_disable_intr(sc); ifp->if_capenable |= IFCAP_POLLING; NFE_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupt even in error case */ NFE_LOCK(sc); nfe_enable_intr(sc); ifp->if_capenable &= ~IFCAP_POLLING; NFE_UNLOCK(sc); } } #endif /* DEVICE_POLLING */ if ((sc->nfe_flags & NFE_HW_CSUM) != 0 && (mask & IFCAP_HWCSUM) != 0) { ifp->if_capenable ^= IFCAP_HWCSUM; if ((IFCAP_TXCSUM & ifp->if_capenable) != 0 && (IFCAP_TXCSUM & ifp->if_capabilities) != 0) ifp->if_hwassist |= NFE_CSUM_FEATURES; else ifp->if_hwassist &= ~NFE_CSUM_FEATURES; init++; } if ((sc->nfe_flags & NFE_HW_VLAN) != 0 && (mask & IFCAP_VLAN_HWTAGGING) != 0) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; init++; } /* * XXX * It seems that VLAN stripping requires Rx checksum offload. * Unfortunately FreeBSD has no way to disable only Rx side * VLAN stripping. So when we know Rx checksum offload is * disabled turn entire hardware VLAN assist off. */ if ((sc->nfe_flags & (NFE_HW_CSUM | NFE_HW_VLAN)) == (NFE_HW_CSUM | NFE_HW_VLAN)) { if ((ifp->if_capenable & IFCAP_RXCSUM) == 0) ifp->if_capenable &= ~IFCAP_VLAN_HWTAGGING; } if ((sc->nfe_flags & NFE_HW_CSUM) != 0 && (mask & IFCAP_TSO4) != 0) { ifp->if_capenable ^= IFCAP_TSO4; if ((IFCAP_TSO4 & ifp->if_capenable) != 0 && (IFCAP_TSO4 & ifp->if_capabilities) != 0) ifp->if_hwassist |= CSUM_TSO; else ifp->if_hwassist &= ~CSUM_TSO; } if (init > 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; nfe_init(sc); } if ((sc->nfe_flags & NFE_HW_VLAN) != 0) VLAN_CAPABILITIES(ifp); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } static int nfe_intr(void *arg) { struct nfe_softc *sc; uint32_t status; sc = (struct nfe_softc *)arg; status = NFE_READ(sc, sc->nfe_irq_status); if (status == 0 || status == 0xffffffff) return (FILTER_STRAY); nfe_disable_intr(sc); taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task); return (FILTER_HANDLED); } static void nfe_int_task(void *arg, int pending) { struct nfe_softc *sc = arg; struct ifnet *ifp = sc->nfe_ifp; uint32_t r; int domore; NFE_LOCK(sc); if ((r = NFE_READ(sc, sc->nfe_irq_status)) == 0) { nfe_enable_intr(sc); NFE_UNLOCK(sc); return; /* not for us */ } NFE_WRITE(sc, sc->nfe_irq_status, r); DPRINTFN(sc, 5, "nfe_intr: interrupt register %x\n", r); #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { NFE_UNLOCK(sc); return; } #endif if (r & NFE_IRQ_LINK) { NFE_READ(sc, NFE_PHY_STATUS); NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); DPRINTF(sc, "link state changed\n"); } if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { NFE_UNLOCK(sc); nfe_enable_intr(sc); return; } domore = 0; /* check Rx ring */ if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) - domore = nfe_jrxeof(sc, sc->nfe_process_limit); + domore = nfe_jrxeof(sc, sc->nfe_process_limit, NULL); else - domore = nfe_rxeof(sc, sc->nfe_process_limit); + domore = nfe_rxeof(sc, sc->nfe_process_limit, NULL); /* check Tx ring */ nfe_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task); NFE_UNLOCK(sc); if (domore || (NFE_READ(sc, sc->nfe_irq_status) != 0)) { taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_int_task); return; } /* Reenable interrupts. */ nfe_enable_intr(sc); } static __inline void nfe_discard_rxbuf(struct nfe_softc *sc, int idx) { struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; struct nfe_rx_data *data; struct mbuf *m; data = &sc->rxq.data[idx]; m = data->m; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->rxq.desc64[idx]; /* VLAN packet may have overwritten it. */ desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr)); desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr)); desc64->length = htole16(m->m_len); desc64->flags = htole16(NFE_RX_READY); } else { desc32 = &sc->rxq.desc32[idx]; desc32->length = htole16(m->m_len); desc32->flags = htole16(NFE_RX_READY); } } static __inline void nfe_discard_jrxbuf(struct nfe_softc *sc, int idx) { struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; struct nfe_rx_data *data; struct mbuf *m; data = &sc->jrxq.jdata[idx]; m = data->m; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->jrxq.jdesc64[idx]; /* VLAN packet may have overwritten it. */ desc64->physaddr[0] = htole32(NFE_ADDR_HI(data->paddr)); desc64->physaddr[1] = htole32(NFE_ADDR_LO(data->paddr)); desc64->length = htole16(m->m_len); desc64->flags = htole16(NFE_RX_READY); } else { desc32 = &sc->jrxq.jdesc32[idx]; desc32->length = htole16(m->m_len); desc32->flags = htole16(NFE_RX_READY); } } static int nfe_newbuf(struct nfe_softc *sc, int idx) { struct nfe_rx_data *data; struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; int nsegs; m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; m_adj(m, ETHER_ALIGN); if (bus_dmamap_load_mbuf_sg(sc->rxq.rx_data_tag, sc->rxq.rx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); data = &sc->rxq.data[idx]; if (data->m != NULL) { bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rxq.rx_data_tag, data->rx_data_map); } map = data->rx_data_map; data->rx_data_map = sc->rxq.rx_spare_map; sc->rxq.rx_spare_map = map; bus_dmamap_sync(sc->rxq.rx_data_tag, data->rx_data_map, BUS_DMASYNC_PREREAD); data->paddr = segs[0].ds_addr; data->m = m; /* update mapping address in h/w descriptor */ if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->rxq.desc64[idx]; desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr)); desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr)); desc64->length = htole16(segs[0].ds_len); desc64->flags = htole16(NFE_RX_READY); } else { desc32 = &sc->rxq.desc32[idx]; desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr)); desc32->length = htole16(segs[0].ds_len); desc32->flags = htole16(NFE_RX_READY); } return (0); } static int nfe_jnewbuf(struct nfe_softc *sc, int idx) { struct nfe_rx_data *data; struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; int nsegs; m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUM9BYTES); if (m == NULL) return (ENOBUFS); if ((m->m_flags & M_EXT) == 0) { m_freem(m); return (ENOBUFS); } m->m_pkthdr.len = m->m_len = MJUM9BYTES; m_adj(m, ETHER_ALIGN); if (bus_dmamap_load_mbuf_sg(sc->jrxq.jrx_data_tag, sc->jrxq.jrx_spare_map, m, segs, &nsegs, BUS_DMA_NOWAIT) != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); data = &sc->jrxq.jdata[idx]; if (data->m != NULL) { bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->jrxq.jrx_data_tag, data->rx_data_map); } map = data->rx_data_map; data->rx_data_map = sc->jrxq.jrx_spare_map; sc->jrxq.jrx_spare_map = map; bus_dmamap_sync(sc->jrxq.jrx_data_tag, data->rx_data_map, BUS_DMASYNC_PREREAD); data->paddr = segs[0].ds_addr; data->m = m; /* update mapping address in h/w descriptor */ if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->jrxq.jdesc64[idx]; desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[0].ds_addr)); desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[0].ds_addr)); desc64->length = htole16(segs[0].ds_len); desc64->flags = htole16(NFE_RX_READY); } else { desc32 = &sc->jrxq.jdesc32[idx]; desc32->physaddr = htole32(NFE_ADDR_LO(segs[0].ds_addr)); desc32->length = htole16(segs[0].ds_len); desc32->flags = htole16(NFE_RX_READY); } return (0); } static int -nfe_rxeof(struct nfe_softc *sc, int count) +nfe_rxeof(struct nfe_softc *sc, int count, int *rx_npktsp) { struct ifnet *ifp = sc->nfe_ifp; struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; struct nfe_rx_data *data; struct mbuf *m; uint16_t flags; - int len, prog; + int len, prog, rx_npkts; uint32_t vtag = 0; + rx_npkts = 0; NFE_LOCK_ASSERT(sc); bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, BUS_DMASYNC_POSTREAD); for (prog = 0;;NFE_INC(sc->rxq.cur, NFE_RX_RING_COUNT), vtag = 0) { if (count <= 0) break; count--; data = &sc->rxq.data[sc->rxq.cur]; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->rxq.desc64[sc->rxq.cur]; vtag = le32toh(desc64->physaddr[1]); flags = le16toh(desc64->flags); len = le16toh(desc64->length) & NFE_RX_LEN_MASK; } else { desc32 = &sc->rxq.desc32[sc->rxq.cur]; flags = le16toh(desc32->flags); len = le16toh(desc32->length) & NFE_RX_LEN_MASK; } if (flags & NFE_RX_READY) break; prog++; if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { if (!(flags & NFE_RX_VALID_V1)) { ifp->if_ierrors++; nfe_discard_rxbuf(sc, sc->rxq.cur); continue; } if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { flags &= ~NFE_RX_ERROR; len--; /* fix buffer length */ } } else { if (!(flags & NFE_RX_VALID_V2)) { ifp->if_ierrors++; nfe_discard_rxbuf(sc, sc->rxq.cur); continue; } if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { flags &= ~NFE_RX_ERROR; len--; /* fix buffer length */ } } if (flags & NFE_RX_ERROR) { ifp->if_ierrors++; nfe_discard_rxbuf(sc, sc->rxq.cur); continue; } m = data->m; if (nfe_newbuf(sc, sc->rxq.cur) != 0) { ifp->if_iqdrops++; nfe_discard_rxbuf(sc, sc->rxq.cur); continue; } if ((vtag & NFE_RX_VTAG) != 0 && (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { m->m_pkthdr.ether_vtag = vtag & 0xffff; m->m_flags |= M_VLANTAG; } m->m_pkthdr.len = m->m_len = len; m->m_pkthdr.rcvif = ifp; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { if ((flags & NFE_RX_IP_CSUMOK) != 0) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((flags & NFE_RX_TCP_CSUMOK) != 0 || (flags & NFE_RX_UDP_CSUMOK) != 0) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } } ifp->if_ipackets++; NFE_UNLOCK(sc); (*ifp->if_input)(ifp, m); NFE_LOCK(sc); + rx_npkts++; } if (prog > 0) bus_dmamap_sync(sc->rxq.rx_desc_tag, sc->rxq.rx_desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + if (rx_npktsp != NULL) + *rx_npktsp = rx_npkts; return (count > 0 ? 0 : EAGAIN); } static int -nfe_jrxeof(struct nfe_softc *sc, int count) +nfe_jrxeof(struct nfe_softc *sc, int count, int *rx_npktsp) { struct ifnet *ifp = sc->nfe_ifp; struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; struct nfe_rx_data *data; struct mbuf *m; uint16_t flags; - int len, prog; + int len, prog, rx_npkts; uint32_t vtag = 0; + rx_npkts = 0; NFE_LOCK_ASSERT(sc); bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map, BUS_DMASYNC_POSTREAD); for (prog = 0;;NFE_INC(sc->jrxq.jcur, NFE_JUMBO_RX_RING_COUNT), vtag = 0) { if (count <= 0) break; count--; data = &sc->jrxq.jdata[sc->jrxq.jcur]; if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->jrxq.jdesc64[sc->jrxq.jcur]; vtag = le32toh(desc64->physaddr[1]); flags = le16toh(desc64->flags); len = le16toh(desc64->length) & NFE_RX_LEN_MASK; } else { desc32 = &sc->jrxq.jdesc32[sc->jrxq.jcur]; flags = le16toh(desc32->flags); len = le16toh(desc32->length) & NFE_RX_LEN_MASK; } if (flags & NFE_RX_READY) break; prog++; if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { if (!(flags & NFE_RX_VALID_V1)) { ifp->if_ierrors++; nfe_discard_jrxbuf(sc, sc->jrxq.jcur); continue; } if ((flags & NFE_RX_FIXME_V1) == NFE_RX_FIXME_V1) { flags &= ~NFE_RX_ERROR; len--; /* fix buffer length */ } } else { if (!(flags & NFE_RX_VALID_V2)) { ifp->if_ierrors++; nfe_discard_jrxbuf(sc, sc->jrxq.jcur); continue; } if ((flags & NFE_RX_FIXME_V2) == NFE_RX_FIXME_V2) { flags &= ~NFE_RX_ERROR; len--; /* fix buffer length */ } } if (flags & NFE_RX_ERROR) { ifp->if_ierrors++; nfe_discard_jrxbuf(sc, sc->jrxq.jcur); continue; } m = data->m; if (nfe_jnewbuf(sc, sc->jrxq.jcur) != 0) { ifp->if_iqdrops++; nfe_discard_jrxbuf(sc, sc->jrxq.jcur); continue; } if ((vtag & NFE_RX_VTAG) != 0 && (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { m->m_pkthdr.ether_vtag = vtag & 0xffff; m->m_flags |= M_VLANTAG; } m->m_pkthdr.len = m->m_len = len; m->m_pkthdr.rcvif = ifp; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { if ((flags & NFE_RX_IP_CSUMOK) != 0) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((flags & NFE_RX_TCP_CSUMOK) != 0 || (flags & NFE_RX_UDP_CSUMOK) != 0) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } } ifp->if_ipackets++; NFE_UNLOCK(sc); (*ifp->if_input)(ifp, m); NFE_LOCK(sc); + rx_npkts++; } if (prog > 0) bus_dmamap_sync(sc->jrxq.jrx_desc_tag, sc->jrxq.jrx_desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + if (rx_npktsp != NULL) + *rx_npktsp = rx_npkts; return (count > 0 ? 0 : EAGAIN); } static void nfe_txeof(struct nfe_softc *sc) { struct ifnet *ifp = sc->nfe_ifp; struct nfe_desc32 *desc32; struct nfe_desc64 *desc64; struct nfe_tx_data *data = NULL; uint16_t flags; int cons, prog; NFE_LOCK_ASSERT(sc); bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, BUS_DMASYNC_POSTREAD); prog = 0; for (cons = sc->txq.next; cons != sc->txq.cur; NFE_INC(cons, NFE_TX_RING_COUNT)) { if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->txq.desc64[cons]; flags = le16toh(desc64->flags); } else { desc32 = &sc->txq.desc32[cons]; flags = le16toh(desc32->flags); } if (flags & NFE_TX_VALID) break; prog++; sc->txq.queued--; data = &sc->txq.data[cons]; if ((sc->nfe_flags & (NFE_JUMBO_SUP | NFE_40BIT_ADDR)) == 0) { if ((flags & NFE_TX_LASTFRAG_V1) == 0) continue; if ((flags & NFE_TX_ERROR_V1) != 0) { device_printf(sc->nfe_dev, "tx v1 error 0x%4b\n", flags, NFE_V1_TXERR); ifp->if_oerrors++; } else ifp->if_opackets++; } else { if ((flags & NFE_TX_LASTFRAG_V2) == 0) continue; if ((flags & NFE_TX_ERROR_V2) != 0) { device_printf(sc->nfe_dev, "tx v2 error 0x%4b\n", flags, NFE_V2_TXERR); ifp->if_oerrors++; } else ifp->if_opackets++; } /* last fragment of the mbuf chain transmitted */ KASSERT(data->m != NULL, ("%s: freeing NULL mbuf!", __func__)); bus_dmamap_sync(sc->txq.tx_data_tag, data->tx_data_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->txq.tx_data_tag, data->tx_data_map); m_freem(data->m); data->m = NULL; } if (prog > 0) { sc->nfe_force_tx = 0; sc->txq.next = cons; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (sc->txq.queued == 0) sc->nfe_watchdog_timer = 0; } } static int nfe_encap(struct nfe_softc *sc, struct mbuf **m_head) { struct nfe_desc32 *desc32 = NULL; struct nfe_desc64 *desc64 = NULL; bus_dmamap_t map; bus_dma_segment_t segs[NFE_MAX_SCATTER]; int error, i, nsegs, prod, si; uint32_t tso_segsz; uint16_t cflags, flags; struct mbuf *m; prod = si = sc->txq.cur; map = sc->txq.data[prod].tx_data_map; error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m = m_collapse(*m_head, M_DONTWAIT, NFE_MAX_SCATTER); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->txq.tx_data_tag, map, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } } else if (error != 0) return (error); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } if (sc->txq.queued + nsegs >= NFE_TX_RING_COUNT - 2) { bus_dmamap_unload(sc->txq.tx_data_tag, map); return (ENOBUFS); } m = *m_head; cflags = flags = 0; tso_segsz = 0; if ((m->m_pkthdr.csum_flags & NFE_CSUM_FEATURES) != 0) { if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) cflags |= NFE_TX_IP_CSUM; if ((m->m_pkthdr.csum_flags & CSUM_TCP) != 0) cflags |= NFE_TX_TCP_UDP_CSUM; if ((m->m_pkthdr.csum_flags & CSUM_UDP) != 0) cflags |= NFE_TX_TCP_UDP_CSUM; } if ((m->m_pkthdr.csum_flags & CSUM_TSO) != 0) { tso_segsz = (uint32_t)m->m_pkthdr.tso_segsz << NFE_TX_TSO_SHIFT; cflags &= ~(NFE_TX_IP_CSUM | NFE_TX_TCP_UDP_CSUM); cflags |= NFE_TX_TSO; } for (i = 0; i < nsegs; i++) { if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64 = &sc->txq.desc64[prod]; desc64->physaddr[0] = htole32(NFE_ADDR_HI(segs[i].ds_addr)); desc64->physaddr[1] = htole32(NFE_ADDR_LO(segs[i].ds_addr)); desc64->vtag = 0; desc64->length = htole16(segs[i].ds_len - 1); desc64->flags = htole16(flags); } else { desc32 = &sc->txq.desc32[prod]; desc32->physaddr = htole32(NFE_ADDR_LO(segs[i].ds_addr)); desc32->length = htole16(segs[i].ds_len - 1); desc32->flags = htole16(flags); } /* * Setting of the valid bit in the first descriptor is * deferred until the whole chain is fully setup. */ flags |= NFE_TX_VALID; sc->txq.queued++; NFE_INC(prod, NFE_TX_RING_COUNT); } /* * the whole mbuf chain has been DMA mapped, fix last/first descriptor. * csum flags, vtag and TSO belong to the first fragment only. */ if (sc->nfe_flags & NFE_40BIT_ADDR) { desc64->flags |= htole16(NFE_TX_LASTFRAG_V2); desc64 = &sc->txq.desc64[si]; if ((m->m_flags & M_VLANTAG) != 0) desc64->vtag = htole32(NFE_TX_VTAG | m->m_pkthdr.ether_vtag); if (tso_segsz != 0) { /* * XXX * The following indicates the descriptor element * is a 32bit quantity. */ desc64->length |= htole16((uint16_t)tso_segsz); desc64->flags |= htole16(tso_segsz >> 16); } /* * finally, set the valid/checksum/TSO bit in the first * descriptor. */ desc64->flags |= htole16(NFE_TX_VALID | cflags); } else { if (sc->nfe_flags & NFE_JUMBO_SUP) desc32->flags |= htole16(NFE_TX_LASTFRAG_V2); else desc32->flags |= htole16(NFE_TX_LASTFRAG_V1); desc32 = &sc->txq.desc32[si]; if (tso_segsz != 0) { /* * XXX * The following indicates the descriptor element * is a 32bit quantity. */ desc32->length |= htole16((uint16_t)tso_segsz); desc32->flags |= htole16(tso_segsz >> 16); } /* * finally, set the valid/checksum/TSO bit in the first * descriptor. */ desc32->flags |= htole16(NFE_TX_VALID | cflags); } sc->txq.cur = prod; prod = (prod + NFE_TX_RING_COUNT - 1) % NFE_TX_RING_COUNT; sc->txq.data[si].tx_data_map = sc->txq.data[prod].tx_data_map; sc->txq.data[prod].tx_data_map = map; sc->txq.data[prod].m = m; bus_dmamap_sync(sc->txq.tx_data_tag, map, BUS_DMASYNC_PREWRITE); return (0); } static void nfe_setmulti(struct nfe_softc *sc) { struct ifnet *ifp = sc->nfe_ifp; struct ifmultiaddr *ifma; int i; uint32_t filter; uint8_t addr[ETHER_ADDR_LEN], mask[ETHER_ADDR_LEN]; uint8_t etherbroadcastaddr[ETHER_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; NFE_LOCK_ASSERT(sc); if ((ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { bzero(addr, ETHER_ADDR_LEN); bzero(mask, ETHER_ADDR_LEN); goto done; } bcopy(etherbroadcastaddr, addr, ETHER_ADDR_LEN); bcopy(etherbroadcastaddr, mask, ETHER_ADDR_LEN); IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { u_char *addrp; if (ifma->ifma_addr->sa_family != AF_LINK) continue; addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); for (i = 0; i < ETHER_ADDR_LEN; i++) { u_int8_t mcaddr = addrp[i]; addr[i] &= mcaddr; mask[i] &= ~mcaddr; } } IF_ADDR_UNLOCK(ifp); for (i = 0; i < ETHER_ADDR_LEN; i++) { mask[i] |= addr[i]; } done: addr[0] |= 0x01; /* make sure multicast bit is set */ NFE_WRITE(sc, NFE_MULTIADDR_HI, addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); NFE_WRITE(sc, NFE_MULTIADDR_LO, addr[5] << 8 | addr[4]); NFE_WRITE(sc, NFE_MULTIMASK_HI, mask[3] << 24 | mask[2] << 16 | mask[1] << 8 | mask[0]); NFE_WRITE(sc, NFE_MULTIMASK_LO, mask[5] << 8 | mask[4]); filter = NFE_READ(sc, NFE_RXFILTER); filter &= NFE_PFF_RX_PAUSE; filter |= NFE_RXFILTER_MAGIC; filter |= (ifp->if_flags & IFF_PROMISC) ? NFE_PFF_PROMISC : NFE_PFF_U2M; NFE_WRITE(sc, NFE_RXFILTER, filter); } static void nfe_tx_task(void *arg, int pending) { struct ifnet *ifp; ifp = (struct ifnet *)arg; nfe_start(ifp); } static void nfe_start(struct ifnet *ifp) { struct nfe_softc *sc = ifp->if_softc; struct mbuf *m0; int enq; NFE_LOCK(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || sc->nfe_link == 0) { NFE_UNLOCK(sc); return; } for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd);) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) break; if (nfe_encap(sc, &m0) != 0) { if (m0 == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m0); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } enq++; ETHER_BPF_MTAP(ifp, m0); } if (enq > 0) { bus_dmamap_sync(sc->txq.tx_desc_tag, sc->txq.tx_desc_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* kick Tx */ NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); /* * Set a timeout in case the chip goes out to lunch. */ sc->nfe_watchdog_timer = 5; } NFE_UNLOCK(sc); } static void nfe_watchdog(struct ifnet *ifp) { struct nfe_softc *sc = ifp->if_softc; if (sc->nfe_watchdog_timer == 0 || --sc->nfe_watchdog_timer) return; /* Check if we've lost Tx completion interrupt. */ nfe_txeof(sc); if (sc->txq.queued == 0) { if_printf(ifp, "watchdog timeout (missed Tx interrupts) " "-- recovering\n"); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) taskqueue_enqueue_fast(sc->nfe_tq, &sc->nfe_tx_task); return; } /* Check if we've lost start Tx command. */ sc->nfe_force_tx++; if (sc->nfe_force_tx <= 3) { /* * If this is the case for watchdog timeout, the following * code should go to nfe_txeof(). */ NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_KICKTX | sc->rxtxctl); return; } sc->nfe_force_tx = 0; if_printf(ifp, "watchdog timeout\n"); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ifp->if_oerrors++; nfe_init_locked(sc); } static void nfe_init(void *xsc) { struct nfe_softc *sc = xsc; NFE_LOCK(sc); nfe_init_locked(sc); NFE_UNLOCK(sc); } static void nfe_init_locked(void *xsc) { struct nfe_softc *sc = xsc; struct ifnet *ifp = sc->nfe_ifp; struct mii_data *mii; uint32_t val; int error; NFE_LOCK_ASSERT(sc); mii = device_get_softc(sc->nfe_miibus); if (ifp->if_drv_flags & IFF_DRV_RUNNING) return; nfe_stop(ifp); sc->nfe_framesize = ifp->if_mtu + NFE_RX_HEADERS; nfe_init_tx_ring(sc, &sc->txq); if (sc->nfe_framesize > (MCLBYTES - ETHER_HDR_LEN)) error = nfe_init_jrx_ring(sc, &sc->jrxq); else error = nfe_init_rx_ring(sc, &sc->rxq); if (error != 0) { device_printf(sc->nfe_dev, "initialization failed: no memory for rx buffers\n"); nfe_stop(ifp); return; } val = 0; if ((sc->nfe_flags & NFE_CORRECT_MACADDR) != 0) val |= NFE_MAC_ADDR_INORDER; NFE_WRITE(sc, NFE_TX_UNK, val); NFE_WRITE(sc, NFE_STATUS, 0); if ((sc->nfe_flags & NFE_TX_FLOW_CTRL) != 0) NFE_WRITE(sc, NFE_TX_PAUSE_FRAME, NFE_TX_PAUSE_FRAME_DISABLE); sc->rxtxctl = NFE_RXTX_BIT2; if (sc->nfe_flags & NFE_40BIT_ADDR) sc->rxtxctl |= NFE_RXTX_V3MAGIC; else if (sc->nfe_flags & NFE_JUMBO_SUP) sc->rxtxctl |= NFE_RXTX_V2MAGIC; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) sc->rxtxctl |= NFE_RXTX_RXCSUM; if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) sc->rxtxctl |= NFE_RXTX_VTAG_INSERT | NFE_RXTX_VTAG_STRIP; NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_RESET | sc->rxtxctl); DELAY(10); NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) NFE_WRITE(sc, NFE_VTAG_CTL, NFE_VTAG_ENABLE); else NFE_WRITE(sc, NFE_VTAG_CTL, 0); NFE_WRITE(sc, NFE_SETUP_R6, 0); /* set MAC address */ nfe_set_macaddr(sc, IF_LLADDR(ifp)); /* tell MAC where rings are in memory */ if (sc->nfe_framesize > MCLBYTES - ETHER_HDR_LEN) { NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, NFE_ADDR_HI(sc->jrxq.jphysaddr)); NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, NFE_ADDR_LO(sc->jrxq.jphysaddr)); } else { NFE_WRITE(sc, NFE_RX_RING_ADDR_HI, NFE_ADDR_HI(sc->rxq.physaddr)); NFE_WRITE(sc, NFE_RX_RING_ADDR_LO, NFE_ADDR_LO(sc->rxq.physaddr)); } NFE_WRITE(sc, NFE_TX_RING_ADDR_HI, NFE_ADDR_HI(sc->txq.physaddr)); NFE_WRITE(sc, NFE_TX_RING_ADDR_LO, NFE_ADDR_LO(sc->txq.physaddr)); NFE_WRITE(sc, NFE_RING_SIZE, (NFE_RX_RING_COUNT - 1) << 16 | (NFE_TX_RING_COUNT - 1)); NFE_WRITE(sc, NFE_RXBUFSZ, sc->nfe_framesize); /* force MAC to wakeup */ val = NFE_READ(sc, NFE_PWR_STATE); if ((val & NFE_PWR_WAKEUP) == 0) NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_WAKEUP); DELAY(10); val = NFE_READ(sc, NFE_PWR_STATE); NFE_WRITE(sc, NFE_PWR_STATE, val | NFE_PWR_VALID); #if 1 /* configure interrupts coalescing/mitigation */ NFE_WRITE(sc, NFE_IMTIMER, NFE_IM_DEFAULT); #else /* no interrupt mitigation: one interrupt per packet */ NFE_WRITE(sc, NFE_IMTIMER, 970); #endif NFE_WRITE(sc, NFE_SETUP_R1, NFE_R1_MAGIC_10_100); NFE_WRITE(sc, NFE_SETUP_R2, NFE_R2_MAGIC); NFE_WRITE(sc, NFE_SETUP_R6, NFE_R6_MAGIC); /* update MAC knowledge of PHY; generates a NFE_IRQ_LINK interrupt */ NFE_WRITE(sc, NFE_STATUS, sc->mii_phyaddr << 24 | NFE_STATUS_MAGIC); NFE_WRITE(sc, NFE_SETUP_R4, NFE_R4_MAGIC); NFE_WRITE(sc, NFE_WOL_CTL, NFE_WOL_MAGIC); sc->rxtxctl &= ~NFE_RXTX_BIT2; NFE_WRITE(sc, NFE_RXTX_CTL, sc->rxtxctl); DELAY(10); NFE_WRITE(sc, NFE_RXTX_CTL, NFE_RXTX_BIT1 | sc->rxtxctl); /* set Rx filter */ nfe_setmulti(sc); /* enable Rx */ NFE_WRITE(sc, NFE_RX_CTL, NFE_RX_START); /* enable Tx */ NFE_WRITE(sc, NFE_TX_CTL, NFE_TX_START); NFE_WRITE(sc, NFE_PHY_STATUS, 0xf); /* Clear hardware stats. */ nfe_stats_clear(sc); #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) nfe_disable_intr(sc); else #endif nfe_set_intr(sc); nfe_enable_intr(sc); /* enable interrupts */ ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->nfe_link = 0; mii_mediachg(mii); callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc); } static void nfe_stop(struct ifnet *ifp) { struct nfe_softc *sc = ifp->if_softc; struct nfe_rx_ring *rx_ring; struct nfe_jrx_ring *jrx_ring; struct nfe_tx_ring *tx_ring; struct nfe_rx_data *rdata; struct nfe_tx_data *tdata; int i; NFE_LOCK_ASSERT(sc); sc->nfe_watchdog_timer = 0; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); callout_stop(&sc->nfe_stat_ch); /* abort Tx */ NFE_WRITE(sc, NFE_TX_CTL, 0); /* disable Rx */ NFE_WRITE(sc, NFE_RX_CTL, 0); /* disable interrupts */ nfe_disable_intr(sc); sc->nfe_link = 0; /* free Rx and Tx mbufs still in the queues. */ rx_ring = &sc->rxq; for (i = 0; i < NFE_RX_RING_COUNT; i++) { rdata = &rx_ring->data[i]; if (rdata->m != NULL) { bus_dmamap_sync(rx_ring->rx_data_tag, rdata->rx_data_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rx_ring->rx_data_tag, rdata->rx_data_map); m_freem(rdata->m); rdata->m = NULL; } } if ((sc->nfe_flags & NFE_JUMBO_SUP) != 0) { jrx_ring = &sc->jrxq; for (i = 0; i < NFE_JUMBO_RX_RING_COUNT; i++) { rdata = &jrx_ring->jdata[i]; if (rdata->m != NULL) { bus_dmamap_sync(jrx_ring->jrx_data_tag, rdata->rx_data_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(jrx_ring->jrx_data_tag, rdata->rx_data_map); m_freem(rdata->m); rdata->m = NULL; } } } tx_ring = &sc->txq; for (i = 0; i < NFE_RX_RING_COUNT; i++) { tdata = &tx_ring->data[i]; if (tdata->m != NULL) { bus_dmamap_sync(tx_ring->tx_data_tag, tdata->tx_data_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(tx_ring->tx_data_tag, tdata->tx_data_map); m_freem(tdata->m); tdata->m = NULL; } } /* Update hardware stats. */ nfe_stats_update(sc); } static int nfe_ifmedia_upd(struct ifnet *ifp) { struct nfe_softc *sc = ifp->if_softc; struct mii_data *mii; NFE_LOCK(sc); mii = device_get_softc(sc->nfe_miibus); mii_mediachg(mii); NFE_UNLOCK(sc); return (0); } static void nfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct nfe_softc *sc; struct mii_data *mii; sc = ifp->if_softc; NFE_LOCK(sc); mii = device_get_softc(sc->nfe_miibus); mii_pollstat(mii); NFE_UNLOCK(sc); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } void nfe_tick(void *xsc) { struct nfe_softc *sc; struct mii_data *mii; struct ifnet *ifp; sc = (struct nfe_softc *)xsc; NFE_LOCK_ASSERT(sc); ifp = sc->nfe_ifp; mii = device_get_softc(sc->nfe_miibus); mii_tick(mii); nfe_stats_update(sc); nfe_watchdog(ifp); callout_reset(&sc->nfe_stat_ch, hz, nfe_tick, sc); } static int nfe_shutdown(device_t dev) { struct nfe_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); NFE_LOCK(sc); ifp = sc->nfe_ifp; nfe_stop(ifp); /* nfe_reset(sc); */ NFE_UNLOCK(sc); return (0); } static void nfe_get_macaddr(struct nfe_softc *sc, uint8_t *addr) { uint32_t val; if ((sc->nfe_flags & NFE_CORRECT_MACADDR) == 0) { val = NFE_READ(sc, NFE_MACADDR_LO); addr[0] = (val >> 8) & 0xff; addr[1] = (val & 0xff); val = NFE_READ(sc, NFE_MACADDR_HI); addr[2] = (val >> 24) & 0xff; addr[3] = (val >> 16) & 0xff; addr[4] = (val >> 8) & 0xff; addr[5] = (val & 0xff); } else { val = NFE_READ(sc, NFE_MACADDR_LO); addr[5] = (val >> 8) & 0xff; addr[4] = (val & 0xff); val = NFE_READ(sc, NFE_MACADDR_HI); addr[3] = (val >> 24) & 0xff; addr[2] = (val >> 16) & 0xff; addr[1] = (val >> 8) & 0xff; addr[0] = (val & 0xff); } } static void nfe_set_macaddr(struct nfe_softc *sc, uint8_t *addr) { NFE_WRITE(sc, NFE_MACADDR_LO, addr[5] << 8 | addr[4]); NFE_WRITE(sc, NFE_MACADDR_HI, addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]); } /* * Map a single buffer address. */ static void nfe_dma_map_segs(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct nfe_dmamap_arg *ctx; if (error != 0) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); ctx = (struct nfe_dmamap_arg *)arg; ctx->nfe_busaddr = segs[0].ds_addr; } static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) { int error, value; if (!arg1) return (EINVAL); value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if (error || !req->newptr) return (error); if (value < low || value > high) return (EINVAL); *(int *)arg1 = value; return (0); } static int sysctl_hw_nfe_proc_limit(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, NFE_PROC_MIN, NFE_PROC_MAX)); } #define NFE_SYSCTL_STAT_ADD32(c, h, n, p, d) \ SYSCTL_ADD_UINT(c, h, OID_AUTO, n, CTLFLAG_RD, p, 0, d) #define NFE_SYSCTL_STAT_ADD64(c, h, n, p, d) \ SYSCTL_ADD_QUAD(c, h, OID_AUTO, n, CTLFLAG_RD, p, d) static void nfe_sysctl_node(struct nfe_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *child, *parent; struct sysctl_oid *tree; struct nfe_hw_stats *stats; int error; stats = &sc->nfe_stats; ctx = device_get_sysctl_ctx(sc->nfe_dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->nfe_dev)); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "process_limit", CTLTYPE_INT | CTLFLAG_RW, &sc->nfe_process_limit, 0, sysctl_hw_nfe_proc_limit, "I", "max number of Rx events to process"); sc->nfe_process_limit = NFE_PROC_DEFAULT; error = resource_int_value(device_get_name(sc->nfe_dev), device_get_unit(sc->nfe_dev), "process_limit", &sc->nfe_process_limit); if (error == 0) { if (sc->nfe_process_limit < NFE_PROC_MIN || sc->nfe_process_limit > NFE_PROC_MAX) { device_printf(sc->nfe_dev, "process_limit value out of range; " "using default: %d\n", NFE_PROC_DEFAULT); sc->nfe_process_limit = NFE_PROC_DEFAULT; } } if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0) return; tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, NULL, "NFE statistics"); parent = SYSCTL_CHILDREN(tree); /* Rx statistics. */ tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "rx", CTLFLAG_RD, NULL, "Rx MAC statistics"); child = SYSCTL_CHILDREN(tree); NFE_SYSCTL_STAT_ADD32(ctx, child, "frame_errors", &stats->rx_frame_errors, "Framing Errors"); NFE_SYSCTL_STAT_ADD32(ctx, child, "extra_bytes", &stats->rx_extra_bytes, "Extra Bytes"); NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols", &stats->rx_late_cols, "Late Collisions"); NFE_SYSCTL_STAT_ADD32(ctx, child, "runts", &stats->rx_runts, "Runts"); NFE_SYSCTL_STAT_ADD32(ctx, child, "jumbos", &stats->rx_jumbos, "Jumbos"); NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_overuns", &stats->rx_fifo_overuns, "FIFO Overruns"); NFE_SYSCTL_STAT_ADD32(ctx, child, "crc_errors", &stats->rx_crc_errors, "CRC Errors"); NFE_SYSCTL_STAT_ADD32(ctx, child, "fae", &stats->rx_fae, "Frame Alignment Errors"); NFE_SYSCTL_STAT_ADD32(ctx, child, "len_errors", &stats->rx_len_errors, "Length Errors"); NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast", &stats->rx_unicast, "Unicast Frames"); NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast", &stats->rx_multicast, "Multicast Frames"); NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast", &stats->rx_broadcast, "Broadcast Frames"); if ((sc->nfe_flags & NFE_MIB_V2) != 0) { NFE_SYSCTL_STAT_ADD64(ctx, child, "octets", &stats->rx_octets, "Octets"); NFE_SYSCTL_STAT_ADD32(ctx, child, "pause", &stats->rx_pause, "Pause frames"); NFE_SYSCTL_STAT_ADD32(ctx, child, "drops", &stats->rx_drops, "Drop frames"); } /* Tx statistics. */ tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "tx", CTLFLAG_RD, NULL, "Tx MAC statistics"); child = SYSCTL_CHILDREN(tree); NFE_SYSCTL_STAT_ADD64(ctx, child, "octets", &stats->tx_octets, "Octets"); NFE_SYSCTL_STAT_ADD32(ctx, child, "zero_rexmits", &stats->tx_zero_rexmits, "Zero Retransmits"); NFE_SYSCTL_STAT_ADD32(ctx, child, "one_rexmits", &stats->tx_one_rexmits, "One Retransmits"); NFE_SYSCTL_STAT_ADD32(ctx, child, "multi_rexmits", &stats->tx_multi_rexmits, "Multiple Retransmits"); NFE_SYSCTL_STAT_ADD32(ctx, child, "late_cols", &stats->tx_late_cols, "Late Collisions"); NFE_SYSCTL_STAT_ADD32(ctx, child, "fifo_underuns", &stats->tx_fifo_underuns, "FIFO Underruns"); NFE_SYSCTL_STAT_ADD32(ctx, child, "carrier_losts", &stats->tx_carrier_losts, "Carrier Losts"); NFE_SYSCTL_STAT_ADD32(ctx, child, "excess_deferrals", &stats->tx_excess_deferals, "Excess Deferrals"); NFE_SYSCTL_STAT_ADD32(ctx, child, "retry_errors", &stats->tx_retry_errors, "Retry Errors"); if ((sc->nfe_flags & NFE_MIB_V2) != 0) { NFE_SYSCTL_STAT_ADD32(ctx, child, "deferrals", &stats->tx_deferals, "Deferrals"); NFE_SYSCTL_STAT_ADD32(ctx, child, "frames", &stats->tx_frames, "Frames"); NFE_SYSCTL_STAT_ADD32(ctx, child, "pause", &stats->tx_pause, "Pause Frames"); } if ((sc->nfe_flags & NFE_MIB_V3) != 0) { NFE_SYSCTL_STAT_ADD32(ctx, child, "unicast", &stats->tx_deferals, "Unicast Frames"); NFE_SYSCTL_STAT_ADD32(ctx, child, "multicast", &stats->tx_frames, "Multicast Frames"); NFE_SYSCTL_STAT_ADD32(ctx, child, "broadcast", &stats->tx_pause, "Broadcast Frames"); } } #undef NFE_SYSCTL_STAT_ADD32 #undef NFE_SYSCTL_STAT_ADD64 static void nfe_stats_clear(struct nfe_softc *sc) { int i, mib_cnt; if ((sc->nfe_flags & NFE_MIB_V1) != 0) mib_cnt = NFE_NUM_MIB_STATV1; else if ((sc->nfe_flags & (NFE_MIB_V2 | NFE_MIB_V3)) != 0) mib_cnt = NFE_NUM_MIB_STATV2; else return; for (i = 0; i < mib_cnt; i += sizeof(uint32_t)) NFE_READ(sc, NFE_TX_OCTET + i); if ((sc->nfe_flags & NFE_MIB_V3) != 0) { NFE_READ(sc, NFE_TX_UNICAST); NFE_READ(sc, NFE_TX_MULTICAST); NFE_READ(sc, NFE_TX_BROADCAST); } } static void nfe_stats_update(struct nfe_softc *sc) { struct nfe_hw_stats *stats; NFE_LOCK_ASSERT(sc); if ((sc->nfe_flags & (NFE_MIB_V1 | NFE_MIB_V2 | NFE_MIB_V3)) == 0) return; stats = &sc->nfe_stats; stats->tx_octets += NFE_READ(sc, NFE_TX_OCTET); stats->tx_zero_rexmits += NFE_READ(sc, NFE_TX_ZERO_REXMIT); stats->tx_one_rexmits += NFE_READ(sc, NFE_TX_ONE_REXMIT); stats->tx_multi_rexmits += NFE_READ(sc, NFE_TX_MULTI_REXMIT); stats->tx_late_cols += NFE_READ(sc, NFE_TX_LATE_COL); stats->tx_fifo_underuns += NFE_READ(sc, NFE_TX_FIFO_UNDERUN); stats->tx_carrier_losts += NFE_READ(sc, NFE_TX_CARRIER_LOST); stats->tx_excess_deferals += NFE_READ(sc, NFE_TX_EXCESS_DEFERRAL); stats->tx_retry_errors += NFE_READ(sc, NFE_TX_RETRY_ERROR); stats->rx_frame_errors += NFE_READ(sc, NFE_RX_FRAME_ERROR); stats->rx_extra_bytes += NFE_READ(sc, NFE_RX_EXTRA_BYTES); stats->rx_late_cols += NFE_READ(sc, NFE_RX_LATE_COL); stats->rx_runts += NFE_READ(sc, NFE_RX_RUNT); stats->rx_jumbos += NFE_READ(sc, NFE_RX_JUMBO); stats->rx_fifo_overuns += NFE_READ(sc, NFE_RX_FIFO_OVERUN); stats->rx_crc_errors += NFE_READ(sc, NFE_RX_CRC_ERROR); stats->rx_fae += NFE_READ(sc, NFE_RX_FAE); stats->rx_len_errors += NFE_READ(sc, NFE_RX_LEN_ERROR); stats->rx_unicast += NFE_READ(sc, NFE_RX_UNICAST); stats->rx_multicast += NFE_READ(sc, NFE_RX_MULTICAST); stats->rx_broadcast += NFE_READ(sc, NFE_RX_BROADCAST); if ((sc->nfe_flags & NFE_MIB_V2) != 0) { stats->tx_deferals += NFE_READ(sc, NFE_TX_DEFERAL); stats->tx_frames += NFE_READ(sc, NFE_TX_FRAME); stats->rx_octets += NFE_READ(sc, NFE_RX_OCTET); stats->tx_pause += NFE_READ(sc, NFE_TX_PAUSE); stats->rx_pause += NFE_READ(sc, NFE_RX_PAUSE); stats->rx_drops += NFE_READ(sc, NFE_RX_DROP); } if ((sc->nfe_flags & NFE_MIB_V3) != 0) { stats->tx_unicast += NFE_READ(sc, NFE_TX_UNICAST); stats->tx_multicast += NFE_READ(sc, NFE_TX_MULTICAST); stats->rx_broadcast += NFE_READ(sc, NFE_TX_BROADCAST); } } Index: head/sys/dev/re/if_re.c =================================================================== --- head/sys/dev/re/if_re.c (revision 193095) +++ head/sys/dev/re/if_re.c (revision 193096) @@ -1,3133 +1,3140 @@ /*- * Copyright (c) 1997, 1998-2003 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * RealTek 8139C+/8169/8169S/8110S/8168/8111/8101E PCI NIC driver * * Written by Bill Paul * Senior Networking Software Engineer * Wind River Systems */ /* * This driver is designed to support RealTek's next generation of * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently * seven devices in this family: the RTL8139C+, the RTL8169, the RTL8169S, * RTL8110S, the RTL8168, the RTL8111 and the RTL8101E. * * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible * with the older 8139 family, however it also supports a special * C+ mode of operation that provides several new performance enhancing * features. These include: * * o Descriptor based DMA mechanism. Each descriptor represents * a single packet fragment. Data buffers may be aligned on * any byte boundary. * * o 64-bit DMA * * o TCP/IP checksum offload for both RX and TX * * o High and normal priority transmit DMA rings * * o VLAN tag insertion and extraction * * o TCP large send (segmentation offload) * * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ * programming API is fairly straightforward. The RX filtering, EEPROM * access and PHY access is the same as it is on the older 8139 series * chips. * * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the * same programming API and feature set as the 8139C+ with the following * differences and additions: * * o 1000Mbps mode * * o Jumbo frames * * o GMII and TBI ports/registers for interfacing with copper * or fiber PHYs * * o RX and TX DMA rings can have up to 1024 descriptors * (the 8139C+ allows a maximum of 64) * * o Slight differences in register layout from the 8139C+ * * The TX start and timer interrupt registers are at different locations * on the 8169 than they are on the 8139C+. Also, the status word in the * RX descriptor has a slightly different bit layout. The 8169 does not * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' * copper gigE PHY. * * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs * (the 'S' stands for 'single-chip'). These devices have the same * programming API as the older 8169, but also have some vendor-specific * registers for the on-board PHY. The 8110S is a LAN-on-motherboard * part designed to be pin-compatible with the RealTek 8100 10/100 chip. * * This driver takes advantage of the RX and TX checksum offload and * VLAN tag insertion/extraction features. It also implements TX * interrupt moderation using the timer interrupt registers, which * significantly reduces TX interrupt load. There is also support * for jumbo frames, however the 8169/8169S/8110S can not transmit * jumbo frames larger than 7440, so the max MTU possible with this * driver is 7422 bytes. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(re, pci, 1, 1, 1); MODULE_DEPEND(re, ether, 1, 1, 1); MODULE_DEPEND(re, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* Tunables. */ static int msi_disable = 0; TUNABLE_INT("hw.re.msi_disable", &msi_disable); static int prefer_iomap = 0; TUNABLE_INT("hw.re.prefer_iomap", &prefer_iomap); #define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) /* * Various supported device vendors/types and their names. */ static struct rl_type re_devs[] = { { DLINK_VENDORID, DLINK_DEVICEID_528T, 0, "D-Link DGE-528(T) Gigabit Ethernet Adapter" }, { RT_VENDORID, RT_DEVICEID_8139, 0, "RealTek 8139C+ 10/100BaseTX" }, { RT_VENDORID, RT_DEVICEID_8101E, 0, "RealTek 8101E/8102E/8102EL PCIe 10/100baseTX" }, { RT_VENDORID, RT_DEVICEID_8168, 0, "RealTek 8168/8168B/8168C/8168CP/8168D/8111B/8111C/8111CP PCIe " "Gigabit Ethernet" }, { RT_VENDORID, RT_DEVICEID_8169, 0, "RealTek 8169/8169S/8169SB(L)/8110S/8110SB(L) Gigabit Ethernet" }, { RT_VENDORID, RT_DEVICEID_8169SC, 0, "RealTek 8169SC/8110SC Single-chip Gigabit Ethernet" }, { COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, 0, "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" }, { LINKSYS_VENDORID, LINKSYS_DEVICEID_EG1032, 0, "Linksys EG1032 (RTL8169S) Gigabit Ethernet" }, { USR_VENDORID, USR_DEVICEID_997902, 0, "US Robotics 997902 (RTL8169S) Gigabit Ethernet" } }; static struct rl_hwrev re_hwrevs[] = { { RL_HWREV_8139, RL_8139, "" }, { RL_HWREV_8139A, RL_8139, "A" }, { RL_HWREV_8139AG, RL_8139, "A-G" }, { RL_HWREV_8139B, RL_8139, "B" }, { RL_HWREV_8130, RL_8139, "8130" }, { RL_HWREV_8139C, RL_8139, "C" }, { RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C" }, { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+"}, { RL_HWREV_8168_SPIN1, RL_8169, "8168"}, { RL_HWREV_8169, RL_8169, "8169"}, { RL_HWREV_8169S, RL_8169, "8169S"}, { RL_HWREV_8110S, RL_8169, "8110S"}, { RL_HWREV_8169_8110SB, RL_8169, "8169SB/8110SB"}, { RL_HWREV_8169_8110SC, RL_8169, "8169SC/8110SC"}, { RL_HWREV_8169_8110SBL, RL_8169, "8169SBL/8110SBL"}, { RL_HWREV_8169_8110SCE, RL_8169, "8169SC/8110SC"}, { RL_HWREV_8100, RL_8139, "8100"}, { RL_HWREV_8101, RL_8139, "8101"}, { RL_HWREV_8100E, RL_8169, "8100E"}, { RL_HWREV_8101E, RL_8169, "8101E"}, { RL_HWREV_8102E, RL_8169, "8102E"}, { RL_HWREV_8102EL, RL_8169, "8102EL"}, { RL_HWREV_8168_SPIN2, RL_8169, "8168"}, { RL_HWREV_8168_SPIN3, RL_8169, "8168"}, { RL_HWREV_8168C, RL_8169, "8168C/8111C"}, { RL_HWREV_8168C_SPIN2, RL_8169, "8168C/8111C"}, { RL_HWREV_8168CP, RL_8169, "8168CP/8111CP"}, { RL_HWREV_8168D, RL_8169, "8168D"}, { 0, 0, NULL } }; static int re_probe (device_t); static int re_attach (device_t); static int re_detach (device_t); static int re_encap (struct rl_softc *, struct mbuf **); static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int); static int re_allocmem (device_t, struct rl_softc *); static __inline void re_discard_rxbuf (struct rl_softc *, int); static int re_newbuf (struct rl_softc *, int); static int re_rx_list_init (struct rl_softc *); static int re_tx_list_init (struct rl_softc *); #ifdef RE_FIXUP_RX static __inline void re_fixup_rx (struct mbuf *); #endif -static int re_rxeof (struct rl_softc *); +static int re_rxeof (struct rl_softc *, int *); static void re_txeof (struct rl_softc *); #ifdef DEVICE_POLLING -static void re_poll (struct ifnet *, enum poll_cmd, int); -static void re_poll_locked (struct ifnet *, enum poll_cmd, int); +static int re_poll (struct ifnet *, enum poll_cmd, int); +static int re_poll_locked (struct ifnet *, enum poll_cmd, int); #endif static int re_intr (void *); static void re_tick (void *); static void re_tx_task (void *, int); static void re_int_task (void *, int); static void re_start (struct ifnet *); static int re_ioctl (struct ifnet *, u_long, caddr_t); static void re_init (void *); static void re_init_locked (struct rl_softc *); static void re_stop (struct rl_softc *); static void re_watchdog (struct rl_softc *); static int re_suspend (device_t); static int re_resume (device_t); static int re_shutdown (device_t); static int re_ifmedia_upd (struct ifnet *); static void re_ifmedia_sts (struct ifnet *, struct ifmediareq *); static void re_eeprom_putbyte (struct rl_softc *, int); static void re_eeprom_getword (struct rl_softc *, int, u_int16_t *); static void re_read_eeprom (struct rl_softc *, caddr_t, int, int); static int re_gmii_readreg (device_t, int, int); static int re_gmii_writereg (device_t, int, int, int); static int re_miibus_readreg (device_t, int, int); static int re_miibus_writereg (device_t, int, int, int); static void re_miibus_statchg (device_t); static void re_set_rxmode (struct rl_softc *); static void re_reset (struct rl_softc *); static void re_setwol (struct rl_softc *); static void re_clrwol (struct rl_softc *); #ifdef RE_DIAG static int re_diag (struct rl_softc *); #endif static device_method_t re_methods[] = { /* Device interface */ DEVMETHOD(device_probe, re_probe), DEVMETHOD(device_attach, re_attach), DEVMETHOD(device_detach, re_detach), DEVMETHOD(device_suspend, re_suspend), DEVMETHOD(device_resume, re_resume), DEVMETHOD(device_shutdown, re_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, re_miibus_readreg), DEVMETHOD(miibus_writereg, re_miibus_writereg), DEVMETHOD(miibus_statchg, re_miibus_statchg), { 0, 0 } }; static driver_t re_driver = { "re", re_methods, sizeof(struct rl_softc) }; static devclass_t re_devclass; DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0); DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0); #define EE_SET(x) \ CSR_WRITE_1(sc, RL_EECMD, \ CSR_READ_1(sc, RL_EECMD) | x) #define EE_CLR(x) \ CSR_WRITE_1(sc, RL_EECMD, \ CSR_READ_1(sc, RL_EECMD) & ~x) /* * Send a read command and address to the EEPROM, check for ACK. */ static void re_eeprom_putbyte(struct rl_softc *sc, int addr) { int d, i; d = addr | (RL_9346_READ << sc->rl_eewidth); /* * Feed in each bit and strobe the clock. */ for (i = 1 << (sc->rl_eewidth + 3); i; i >>= 1) { if (d & i) { EE_SET(RL_EE_DATAIN); } else { EE_CLR(RL_EE_DATAIN); } DELAY(100); EE_SET(RL_EE_CLK); DELAY(150); EE_CLR(RL_EE_CLK); DELAY(100); } } /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void re_eeprom_getword(struct rl_softc *sc, int addr, u_int16_t *dest) { int i; u_int16_t word = 0; /* * Send address of word we want to read. */ re_eeprom_putbyte(sc, addr); /* * Start reading bits from EEPROM. */ for (i = 0x8000; i; i >>= 1) { EE_SET(RL_EE_CLK); DELAY(100); if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) word |= i; EE_CLR(RL_EE_CLK); DELAY(100); } *dest = word; } /* * Read a sequence of words from the EEPROM. */ static void re_read_eeprom(struct rl_softc *sc, caddr_t dest, int off, int cnt) { int i; u_int16_t word = 0, *ptr; CSR_SETBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); DELAY(100); for (i = 0; i < cnt; i++) { CSR_SETBIT_1(sc, RL_EECMD, RL_EE_SEL); re_eeprom_getword(sc, off + i, &word); CSR_CLRBIT_1(sc, RL_EECMD, RL_EE_SEL); ptr = (u_int16_t *)(dest + (i * 2)); *ptr = word; } CSR_CLRBIT_1(sc, RL_EECMD, RL_EEMODE_PROGRAM); } static int re_gmii_readreg(device_t dev, int phy, int reg) { struct rl_softc *sc; u_int32_t rval; int i; if (phy != 1) return (0); sc = device_get_softc(dev); /* Let the rgephy driver read the GMEDIASTAT register */ if (reg == RL_GMEDIASTAT) { rval = CSR_READ_1(sc, RL_GMEDIASTAT); return (rval); } CSR_WRITE_4(sc, RL_PHYAR, reg << 16); DELAY(1000); for (i = 0; i < RL_PHY_TIMEOUT; i++) { rval = CSR_READ_4(sc, RL_PHYAR); if (rval & RL_PHYAR_BUSY) break; DELAY(100); } if (i == RL_PHY_TIMEOUT) { device_printf(sc->rl_dev, "PHY read failed\n"); return (0); } return (rval & RL_PHYAR_PHYDATA); } static int re_gmii_writereg(device_t dev, int phy, int reg, int data) { struct rl_softc *sc; u_int32_t rval; int i; sc = device_get_softc(dev); CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); DELAY(1000); for (i = 0; i < RL_PHY_TIMEOUT; i++) { rval = CSR_READ_4(sc, RL_PHYAR); if (!(rval & RL_PHYAR_BUSY)) break; DELAY(100); } if (i == RL_PHY_TIMEOUT) { device_printf(sc->rl_dev, "PHY write failed\n"); return (0); } return (0); } static int re_miibus_readreg(device_t dev, int phy, int reg) { struct rl_softc *sc; u_int16_t rval = 0; u_int16_t re8139_reg = 0; sc = device_get_softc(dev); if (sc->rl_type == RL_8169) { rval = re_gmii_readreg(dev, phy, reg); return (rval); } /* Pretend the internal PHY is only at address 0 */ if (phy) { return (0); } switch (reg) { case MII_BMCR: re8139_reg = RL_BMCR; break; case MII_BMSR: re8139_reg = RL_BMSR; break; case MII_ANAR: re8139_reg = RL_ANAR; break; case MII_ANER: re8139_reg = RL_ANER; break; case MII_ANLPAR: re8139_reg = RL_LPAR; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); /* * Allow the rlphy driver to read the media status * register. If we have a link partner which does not * support NWAY, this is the register which will tell * us the results of parallel detection. */ case RL_MEDIASTAT: rval = CSR_READ_1(sc, RL_MEDIASTAT); return (rval); default: device_printf(sc->rl_dev, "bad phy register\n"); return (0); } rval = CSR_READ_2(sc, re8139_reg); if (sc->rl_type == RL_8139CPLUS && re8139_reg == RL_BMCR) { /* 8139C+ has different bit layout. */ rval &= ~(BMCR_LOOP | BMCR_ISO); } return (rval); } static int re_miibus_writereg(device_t dev, int phy, int reg, int data) { struct rl_softc *sc; u_int16_t re8139_reg = 0; int rval = 0; sc = device_get_softc(dev); if (sc->rl_type == RL_8169) { rval = re_gmii_writereg(dev, phy, reg, data); return (rval); } /* Pretend the internal PHY is only at address 0 */ if (phy) return (0); switch (reg) { case MII_BMCR: re8139_reg = RL_BMCR; if (sc->rl_type == RL_8139CPLUS) { /* 8139C+ has different bit layout. */ data &= ~(BMCR_LOOP | BMCR_ISO); } break; case MII_BMSR: re8139_reg = RL_BMSR; break; case MII_ANAR: re8139_reg = RL_ANAR; break; case MII_ANER: re8139_reg = RL_ANER; break; case MII_ANLPAR: re8139_reg = RL_LPAR; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); break; default: device_printf(sc->rl_dev, "bad phy register\n"); return (0); } CSR_WRITE_2(sc, re8139_reg, data); return (0); } static void re_miibus_statchg(device_t dev) { struct rl_softc *sc; struct ifnet *ifp; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->rl_miibus); ifp = sc->rl_ifp; if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; sc->rl_flags &= ~RL_FLAG_LINK; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc->rl_flags |= RL_FLAG_LINK; break; case IFM_1000_T: if ((sc->rl_flags & RL_FLAG_FASTETHER) != 0) break; sc->rl_flags |= RL_FLAG_LINK; break; default: break; } } /* * RealTek controllers does not provide any interface to * Tx/Rx MACs for resolved speed, duplex and flow-control * parameters. */ } /* * Set the RX configuration and 64-bit multicast hash filter. */ static void re_set_rxmode(struct rl_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; uint32_t hashes[2] = { 0, 0 }; uint32_t h, rxfilt; RL_LOCK_ASSERT(sc); ifp = sc->rl_ifp; rxfilt = RL_RXCFG_CONFIG | RL_RXCFG_RX_INDIV | RL_RXCFG_RX_BROAD; if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { if (ifp->if_flags & IFF_PROMISC) rxfilt |= RL_RXCFG_RX_ALLPHYS; /* * Unlike other hardwares, we have to explicitly set * RL_RXCFG_RX_MULTI to receive multicast frames in * promiscuous mode. */ rxfilt |= RL_RXCFG_RX_MULTI; hashes[0] = hashes[1] = 0xffffffff; goto done; } IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } IF_ADDR_UNLOCK(ifp); if (hashes[0] != 0 || hashes[1] != 0) { /* * For some unfathomable reason, RealTek decided to * reverse the order of the multicast hash registers * in the PCI Express parts. This means we have to * write the hash pattern in reverse order for those * devices. */ if ((sc->rl_flags & RL_FLAG_PCIE) != 0) { h = bswap32(hashes[0]); hashes[0] = bswap32(hashes[1]); hashes[1] = h; } rxfilt |= RL_RXCFG_RX_MULTI; } done: CSR_WRITE_4(sc, RL_MAR0, hashes[0]); CSR_WRITE_4(sc, RL_MAR4, hashes[1]); CSR_WRITE_4(sc, RL_RXCFG, rxfilt); } static void re_reset(struct rl_softc *sc) { int i; RL_LOCK_ASSERT(sc); CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); for (i = 0; i < RL_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) break; } if (i == RL_TIMEOUT) device_printf(sc->rl_dev, "reset never completed!\n"); if ((sc->rl_flags & RL_FLAG_MACRESET) != 0) CSR_WRITE_1(sc, 0x82, 1); if (sc->rl_hwrev == RL_HWREV_8169S) re_gmii_writereg(sc->rl_dev, 1, 0x0b, 0); } #ifdef RE_DIAG /* * The following routine is designed to test for a defect on some * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64# * lines connected to the bus, however for a 32-bit only card, they * should be pulled high. The result of this defect is that the * NIC will not work right if you plug it into a 64-bit slot: DMA * operations will be done with 64-bit transfers, which will fail * because the 64-bit data lines aren't connected. * * There's no way to work around this (short of talking a soldering * iron to the board), however we can detect it. The method we use * here is to put the NIC into digital loopback mode, set the receiver * to promiscuous mode, and then try to send a frame. We then compare * the frame data we sent to what was received. If the data matches, * then the NIC is working correctly, otherwise we know the user has * a defective NIC which has been mistakenly plugged into a 64-bit PCI * slot. In the latter case, there's no way the NIC can work correctly, * so we print out a message on the console and abort the device attach. */ static int re_diag(struct rl_softc *sc) { struct ifnet *ifp = sc->rl_ifp; struct mbuf *m0; struct ether_header *eh; struct rl_desc *cur_rx; u_int16_t status; u_int32_t rxstat; int total_len, i, error = 0, phyaddr; u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' }; u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' }; /* Allocate a single mbuf */ MGETHDR(m0, M_DONTWAIT, MT_DATA); if (m0 == NULL) return (ENOBUFS); RL_LOCK(sc); /* * Initialize the NIC in test mode. This sets the chip up * so that it can send and receive frames, but performs the * following special functions: * - Puts receiver in promiscuous mode * - Enables digital loopback mode * - Leaves interrupts turned off */ ifp->if_flags |= IFF_PROMISC; sc->rl_testmode = 1; re_init_locked(sc); sc->rl_flags |= RL_FLAG_LINK; if (sc->rl_type == RL_8169) phyaddr = 1; else phyaddr = 0; re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_RESET); for (i = 0; i < RL_TIMEOUT; i++) { status = re_miibus_readreg(sc->rl_dev, phyaddr, MII_BMCR); if (!(status & BMCR_RESET)) break; } re_miibus_writereg(sc->rl_dev, phyaddr, MII_BMCR, BMCR_LOOP); CSR_WRITE_2(sc, RL_ISR, RL_INTRS); DELAY(100000); /* Put some data in the mbuf */ eh = mtod(m0, struct ether_header *); bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN); bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN); eh->ether_type = htons(ETHERTYPE_IP); m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN; /* * Queue the packet, start transmission. * Note: IF_HANDOFF() ultimately calls re_start() for us. */ CSR_WRITE_2(sc, RL_ISR, 0xFFFF); RL_UNLOCK(sc); /* XXX: re_diag must not be called when in ALTQ mode */ IF_HANDOFF(&ifp->if_snd, m0, ifp); RL_LOCK(sc); m0 = NULL; /* Wait for it to propagate through the chip */ DELAY(100000); for (i = 0; i < RL_TIMEOUT; i++) { status = CSR_READ_2(sc, RL_ISR); CSR_WRITE_2(sc, RL_ISR, status); if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) == (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) break; DELAY(10); } if (i == RL_TIMEOUT) { device_printf(sc->rl_dev, "diagnostic failed, failed to receive packet in" " loopback mode\n"); error = EIO; goto done; } /* * The packet should have been dumped into the first * entry in the RX DMA ring. Grab it from there. */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_POSTREAD); bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_desc[0].rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_desc[0].rx_dmamap); m0 = sc->rl_ldata.rl_rx_desc[0].rx_m; sc->rl_ldata.rl_rx_desc[0].rx_m = NULL; eh = mtod(m0, struct ether_header *); cur_rx = &sc->rl_ldata.rl_rx_list[0]; total_len = RL_RXBYTES(cur_rx); rxstat = le32toh(cur_rx->rl_cmdstat); if (total_len != ETHER_MIN_LEN) { device_printf(sc->rl_dev, "diagnostic failed, received short packet\n"); error = EIO; goto done; } /* Test that the received packet data matches what we sent. */ if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) || bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) || ntohs(eh->ether_type) != ETHERTYPE_IP) { device_printf(sc->rl_dev, "WARNING, DMA FAILURE!\n"); device_printf(sc->rl_dev, "expected TX data: %6D/%6D/0x%x\n", dst, ":", src, ":", ETHERTYPE_IP); device_printf(sc->rl_dev, "received RX data: %6D/%6D/0x%x\n", eh->ether_dhost, ":", eh->ether_shost, ":", ntohs(eh->ether_type)); device_printf(sc->rl_dev, "You may have a defective 32-bit " "NIC plugged into a 64-bit PCI slot.\n"); device_printf(sc->rl_dev, "Please re-install the NIC in a " "32-bit slot for proper operation.\n"); device_printf(sc->rl_dev, "Read the re(4) man page for more " "details.\n"); error = EIO; } done: /* Turn interface off, release resources */ sc->rl_testmode = 0; sc->rl_flags &= ~RL_FLAG_LINK; ifp->if_flags &= ~IFF_PROMISC; re_stop(sc); if (m0 != NULL) m_freem(m0); RL_UNLOCK(sc); return (error); } #endif /* * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int re_probe(device_t dev) { struct rl_type *t; uint16_t devid, vendor; uint16_t revid, sdevid; int i; vendor = pci_get_vendor(dev); devid = pci_get_device(dev); revid = pci_get_revid(dev); sdevid = pci_get_subdevice(dev); if (vendor == LINKSYS_VENDORID && devid == LINKSYS_DEVICEID_EG1032) { if (sdevid != LINKSYS_SUBDEVICE_EG1032_REV3) { /* * Only attach to rev. 3 of the Linksys EG1032 adapter. * Rev. 2 is supported by sk(4). */ return (ENXIO); } } if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) { if (revid != 0x20) { /* 8139, let rl(4) take care of this device. */ return (ENXIO); } } t = re_devs; for (i = 0; i < sizeof(re_devs) / sizeof(re_devs[0]); i++, t++) { if (vendor == t->rl_vid && devid == t->rl_did) { device_set_desc(dev, t->rl_name); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } /* * Map a single buffer address. */ static void re_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; } static int re_allocmem(device_t dev, struct rl_softc *sc) { bus_size_t rx_list_size, tx_list_size; int error; int i; rx_list_size = sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc); tx_list_size = sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc); /* * Allocate the parent bus DMA tag appropriate for PCI. * In order to use DAC, RL_CPLUSCMD_PCI_DAC bit of RL_CPLUS_CMD * register should be set. However some RealTek chips are known * to be buggy on DAC handling, therefore disable DAC by limiting * DMA address space to 32bit. PCIe variants of RealTek chips * may not have the limitation but I took safer path. */ error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->rl_parent_tag); if (error) { device_printf(dev, "could not allocate parent DMA tag\n"); return (error); } /* * Allocate map for TX mbufs. */ error = bus_dma_tag_create(sc->rl_parent_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * RL_NTXSEGS, RL_NTXSEGS, 4096, 0, NULL, NULL, &sc->rl_ldata.rl_tx_mtag); if (error) { device_printf(dev, "could not allocate TX DMA tag\n"); return (error); } /* * Allocate map for RX mbufs. */ error = bus_dma_tag_create(sc->rl_parent_tag, sizeof(uint64_t), 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &sc->rl_ldata.rl_rx_mtag); if (error) { device_printf(dev, "could not allocate RX DMA tag\n"); return (error); } /* * Allocate map for TX descriptor list. */ error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, tx_list_size, 1, tx_list_size, 0, NULL, NULL, &sc->rl_ldata.rl_tx_list_tag); if (error) { device_printf(dev, "could not allocate TX DMA ring tag\n"); return (error); } /* Allocate DMA'able memory for the TX ring */ error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag, (void **)&sc->rl_ldata.rl_tx_list, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_ldata.rl_tx_list_map); if (error) { device_printf(dev, "could not allocate TX DMA ring\n"); return (error); } /* Load the map for the TX ring. */ sc->rl_ldata.rl_tx_list_addr = 0; error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, tx_list_size, re_dma_map_addr, &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT); if (error != 0 || sc->rl_ldata.rl_tx_list_addr == 0) { device_printf(dev, "could not load TX DMA ring\n"); return (ENOMEM); } /* Create DMA maps for TX buffers */ for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { error = bus_dmamap_create(sc->rl_ldata.rl_tx_mtag, 0, &sc->rl_ldata.rl_tx_desc[i].tx_dmamap); if (error) { device_printf(dev, "could not create DMA map for TX\n"); return (error); } } /* * Allocate map for RX descriptor list. */ error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, rx_list_size, 1, rx_list_size, 0, NULL, NULL, &sc->rl_ldata.rl_rx_list_tag); if (error) { device_printf(dev, "could not create RX DMA ring tag\n"); return (error); } /* Allocate DMA'able memory for the RX ring */ error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag, (void **)&sc->rl_ldata.rl_rx_list, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_ldata.rl_rx_list_map); if (error) { device_printf(dev, "could not allocate RX DMA ring\n"); return (error); } /* Load the map for the RX ring. */ sc->rl_ldata.rl_rx_list_addr = 0; error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, rx_list_size, re_dma_map_addr, &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT); if (error != 0 || sc->rl_ldata.rl_rx_list_addr == 0) { device_printf(dev, "could not load RX DMA ring\n"); return (ENOMEM); } /* Create DMA maps for RX buffers */ error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, &sc->rl_ldata.rl_rx_sparemap); if (error) { device_printf(dev, "could not create spare DMA map for RX\n"); return (error); } for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { error = bus_dmamap_create(sc->rl_ldata.rl_rx_mtag, 0, &sc->rl_ldata.rl_rx_desc[i].rx_dmamap); if (error) { device_printf(dev, "could not create DMA map for RX\n"); return (error); } } return (0); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int re_attach(device_t dev) { u_char eaddr[ETHER_ADDR_LEN]; u_int16_t as[ETHER_ADDR_LEN / 2]; struct rl_softc *sc; struct ifnet *ifp; struct rl_hwrev *hw_rev; int hwrev; u_int16_t devid, re_did = 0; int error = 0, rid, i; int msic, reg; uint8_t cfg; sc = device_get_softc(dev); sc->rl_dev = dev; mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0); /* * Map control/status registers. */ pci_enable_busmaster(dev); devid = pci_get_device(dev); /* * Prefer memory space register mapping over IO space. * Because RTL8169SC does not seem to work when memory mapping * is used always activate io mapping. */ if (devid == RT_DEVICEID_8169SC) prefer_iomap = 1; if (prefer_iomap == 0) { sc->rl_res_id = PCIR_BAR(1); sc->rl_res_type = SYS_RES_MEMORY; /* RTL8168/8101E seems to use different BARs. */ if (devid == RT_DEVICEID_8168 || devid == RT_DEVICEID_8101E) sc->rl_res_id = PCIR_BAR(2); } else { sc->rl_res_id = PCIR_BAR(0); sc->rl_res_type = SYS_RES_IOPORT; } sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, &sc->rl_res_id, RF_ACTIVE); if (sc->rl_res == NULL && prefer_iomap == 0) { sc->rl_res_id = PCIR_BAR(0); sc->rl_res_type = SYS_RES_IOPORT; sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, &sc->rl_res_id, RF_ACTIVE); } if (sc->rl_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } sc->rl_btag = rman_get_bustag(sc->rl_res); sc->rl_bhandle = rman_get_bushandle(sc->rl_res); msic = 0; if (pci_find_extcap(dev, PCIY_EXPRESS, ®) == 0) { sc->rl_flags |= RL_FLAG_PCIE; msic = pci_msi_count(dev); if (bootverbose) device_printf(dev, "MSI count : %d\n", msic); } if (msic > 0 && msi_disable == 0) { msic = 1; if (pci_alloc_msi(dev, &msic) == 0) { if (msic == RL_MSI_MESSAGES) { device_printf(dev, "Using %d MSI messages\n", msic); sc->rl_flags |= RL_FLAG_MSI; /* Explicitly set MSI enable bit. */ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); cfg = CSR_READ_1(sc, RL_CFG2); cfg |= RL_CFG2_MSI; CSR_WRITE_1(sc, RL_CFG2, cfg); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); } else pci_release_msi(dev); } } /* Allocate interrupt */ if ((sc->rl_flags & RL_FLAG_MSI) == 0) { rid = 0; sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->rl_irq[0] == NULL) { device_printf(dev, "couldn't allocate IRQ resources\n"); error = ENXIO; goto fail; } } else { for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) { sc->rl_irq[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->rl_irq[i] == NULL) { device_printf(dev, "couldn't llocate IRQ resources for " "message %d\n", rid); error = ENXIO; goto fail; } } } if ((sc->rl_flags & RL_FLAG_MSI) == 0) { CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); cfg = CSR_READ_1(sc, RL_CFG2); if ((cfg & RL_CFG2_MSI) != 0) { device_printf(dev, "turning off MSI enable bit.\n"); cfg &= ~RL_CFG2_MSI; CSR_WRITE_1(sc, RL_CFG2, cfg); } CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); } /* Reset the adapter. */ RL_LOCK(sc); re_reset(sc); RL_UNLOCK(sc); hw_rev = re_hwrevs; hwrev = CSR_READ_4(sc, RL_TXCFG); switch (hwrev & 0x70000000) { case 0x00000000: case 0x10000000: device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0xfc800000); hwrev &= (RL_TXCFG_HWREV | 0x80000000); break; default: device_printf(dev, "Chip rev. 0x%08x\n", hwrev & 0x7c800000); hwrev &= RL_TXCFG_HWREV; break; } device_printf(dev, "MAC rev. 0x%08x\n", hwrev & 0x00700000); while (hw_rev->rl_desc != NULL) { if (hw_rev->rl_rev == hwrev) { sc->rl_type = hw_rev->rl_type; sc->rl_hwrev = hw_rev->rl_rev; break; } hw_rev++; } if (hw_rev->rl_desc == NULL) { device_printf(dev, "Unknown H/W revision: 0x%08x\n", hwrev); error = ENXIO; goto fail; } switch (hw_rev->rl_rev) { case RL_HWREV_8139CPLUS: sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_FASTETHER | RL_FLAG_AUTOPAD; break; case RL_HWREV_8100E: case RL_HWREV_8101E: sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_PHYWAKE | RL_FLAG_FASTETHER; break; case RL_HWREV_8102E: case RL_HWREV_8102EL: sc->rl_flags |= RL_FLAG_NOJUMBO | RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_FASTETHER | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; break; case RL_HWREV_8168_SPIN1: case RL_HWREV_8168_SPIN2: sc->rl_flags |= RL_FLAG_WOLRXENB; /* FALLTHROUGH */ case RL_HWREV_8168_SPIN3: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_MACSTAT; break; case RL_HWREV_8168C_SPIN2: sc->rl_flags |= RL_FLAG_MACSLEEP; /* FALLTHROUGH */ case RL_HWREV_8168C: if ((hwrev & 0x00700000) == 0x00200000) sc->rl_flags |= RL_FLAG_MACSLEEP; /* FALLTHROUGH */ case RL_HWREV_8168CP: case RL_HWREV_8168D: sc->rl_flags |= RL_FLAG_PHYWAKE | RL_FLAG_PAR | RL_FLAG_DESCV2 | RL_FLAG_MACSTAT | RL_FLAG_CMDSTOP | RL_FLAG_AUTOPAD; /* * These controllers support jumbo frame but it seems * that enabling it requires touching additional magic * registers. Depending on MAC revisions some * controllers need to disable checksum offload. So * disable jumbo frame until I have better idea what * it really requires to make it support. * RTL8168C/CP : supports up to 6KB jumbo frame. * RTL8111C/CP : supports up to 9KB jumbo frame. */ sc->rl_flags |= RL_FLAG_NOJUMBO; break; case RL_HWREV_8169_8110SB: case RL_HWREV_8169_8110SBL: case RL_HWREV_8169_8110SC: case RL_HWREV_8169_8110SCE: sc->rl_flags |= RL_FLAG_PHYWAKE; /* FALLTHROUGH */ case RL_HWREV_8169: case RL_HWREV_8169S: case RL_HWREV_8110S: sc->rl_flags |= RL_FLAG_MACRESET; break; default: break; } /* Enable PME. */ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); cfg = CSR_READ_1(sc, RL_CFG1); cfg |= RL_CFG1_PME; CSR_WRITE_1(sc, RL_CFG1, cfg); cfg = CSR_READ_1(sc, RL_CFG5); cfg &= RL_CFG5_PME_STS; CSR_WRITE_1(sc, RL_CFG5, cfg); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); if ((sc->rl_flags & RL_FLAG_PAR) != 0) { /* * XXX Should have a better way to extract station * address from EEPROM. */ for (i = 0; i < ETHER_ADDR_LEN; i++) eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); } else { sc->rl_eewidth = RL_9356_ADDR_LEN; re_read_eeprom(sc, (caddr_t)&re_did, 0, 1); if (re_did != 0x8129) sc->rl_eewidth = RL_9346_ADDR_LEN; /* * Get station address from the EEPROM. */ re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3); for (i = 0; i < ETHER_ADDR_LEN / 2; i++) as[i] = le16toh(as[i]); bcopy(as, eaddr, sizeof(eaddr)); } if (sc->rl_type == RL_8169) { /* Set RX length mask and number of descriptors. */ sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; sc->rl_txstart = RL_GTXSTART; sc->rl_ldata.rl_tx_desc_cnt = RL_8169_TX_DESC_CNT; sc->rl_ldata.rl_rx_desc_cnt = RL_8169_RX_DESC_CNT; } else { /* Set RX length mask and number of descriptors. */ sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; sc->rl_txstart = RL_TXSTART; sc->rl_ldata.rl_tx_desc_cnt = RL_8139_TX_DESC_CNT; sc->rl_ldata.rl_rx_desc_cnt = RL_8139_RX_DESC_CNT; } error = re_allocmem(dev, sc); if (error) goto fail; ifp = sc->rl_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } /* Take controller out of deep sleep mode. */ if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) CSR_WRITE_1(sc, RL_GPIO, CSR_READ_1(sc, RL_GPIO) | 0x01); else CSR_WRITE_1(sc, RL_GPIO, CSR_READ_1(sc, RL_GPIO) & ~0x01); } /* Take PHY out of power down mode. */ if ((sc->rl_flags & RL_FLAG_PHYWAKE) != 0) { re_gmii_writereg(dev, 1, 0x1f, 0); re_gmii_writereg(dev, 1, 0x0e, 0); } /* Do MII setup */ if (mii_phy_probe(dev, &sc->rl_miibus, re_ifmedia_upd, re_ifmedia_sts)) { device_printf(dev, "MII without any phy!\n"); error = ENXIO; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = re_ioctl; ifp->if_start = re_start; ifp->if_hwassist = RE_CSUM_FEATURES; ifp->if_capabilities = IFCAP_HWCSUM; ifp->if_capenable = ifp->if_capabilities; ifp->if_init = re_init; IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN); ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); TASK_INIT(&sc->rl_txtask, 1, re_tx_task, ifp); TASK_INIT(&sc->rl_inttask, 0, re_int_task, sc); /* * XXX * Still have no idea how to make TSO work on 8168C, 8168CP, * 8111C and 8111CP. */ if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { ifp->if_hwassist |= CSUM_TSO; ifp->if_capabilities |= IFCAP_TSO4; } /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* VLAN capability setup */ ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; if (ifp->if_capabilities & IFCAP_HWCSUM) ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; /* Enable WOL if PM is supported. */ if (pci_find_extcap(sc->rl_dev, PCIY_PMG, ®) == 0) ifp->if_capabilities |= IFCAP_WOL; ifp->if_capenable = ifp->if_capabilities; /* * Don't enable TSO by default. Under certain * circumtances the controller generated corrupted * packets in TSO size. */ ifp->if_hwassist &= ~CSUM_TSO; ifp->if_capenable &= ~IFCAP_TSO4; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* * Tell the upper layer(s) we support long frames. * Must appear after the call to ether_ifattach() because * ether_ifattach() sets ifi_hdrlen to the default value. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); #ifdef RE_DIAG /* * Perform hardware diagnostic on the original RTL8169. * Some 32-bit cards were incorrectly wired and would * malfunction if plugged into a 64-bit slot. */ if (hwrev == RL_HWREV_8169) { error = re_diag(sc); if (error) { device_printf(dev, "attach aborted due to hardware diag failure\n"); ether_ifdetach(ifp); goto fail; } } #endif /* Hook interrupt last to avoid having to lock softc */ if ((sc->rl_flags & RL_FLAG_MSI) == 0) error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc, &sc->rl_intrhand[0]); else { for (i = 0; i < RL_MSI_MESSAGES; i++) { error = bus_setup_intr(dev, sc->rl_irq[i], INTR_TYPE_NET | INTR_MPSAFE, re_intr, NULL, sc, &sc->rl_intrhand[i]); if (error != 0) break; } } if (error) { device_printf(dev, "couldn't set up irq\n"); ether_ifdetach(ifp); } fail: if (error) re_detach(dev); return (error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int re_detach(device_t dev) { struct rl_softc *sc; struct ifnet *ifp; int i, rid; sc = device_get_softc(dev); ifp = sc->rl_ifp; KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized")); /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif RL_LOCK(sc); #if 0 sc->suspended = 1; #endif re_stop(sc); RL_UNLOCK(sc); callout_drain(&sc->rl_stat_callout); taskqueue_drain(taskqueue_fast, &sc->rl_inttask); taskqueue_drain(taskqueue_fast, &sc->rl_txtask); /* * Force off the IFF_UP flag here, in case someone * still had a BPF descriptor attached to this * interface. If they do, ether_ifdetach() will cause * the BPF code to try and clear the promisc mode * flag, which will bubble down to re_ioctl(), * which will try to call re_init() again. This will * turn the NIC back on and restart the MII ticker, * which will panic the system when the kernel tries * to invoke the re_tick() function that isn't there * anymore. */ ifp->if_flags &= ~IFF_UP; ether_ifdetach(ifp); } if (sc->rl_miibus) device_delete_child(dev, sc->rl_miibus); bus_generic_detach(dev); /* * The rest is resource deallocation, so we should already be * stopped here. */ for (i = 0; i < RL_MSI_MESSAGES; i++) { if (sc->rl_intrhand[i] != NULL) { bus_teardown_intr(dev, sc->rl_irq[i], sc->rl_intrhand[i]); sc->rl_intrhand[i] = NULL; } } if (ifp != NULL) if_free(ifp); if ((sc->rl_flags & RL_FLAG_MSI) == 0) { if (sc->rl_irq[0] != NULL) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq[0]); sc->rl_irq[0] = NULL; } } else { for (i = 0, rid = 1; i < RL_MSI_MESSAGES; i++, rid++) { if (sc->rl_irq[i] != NULL) { bus_release_resource(dev, SYS_RES_IRQ, rid, sc->rl_irq[i]); sc->rl_irq[i] = NULL; } } pci_release_msi(dev); } if (sc->rl_res) bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id, sc->rl_res); /* Unload and free the RX DMA ring memory and map */ if (sc->rl_ldata.rl_rx_list_tag) { bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map); bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list, sc->rl_ldata.rl_rx_list_map); bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag); } /* Unload and free the TX DMA ring memory and map */ if (sc->rl_ldata.rl_tx_list_tag) { bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map); bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list, sc->rl_ldata.rl_tx_list_map); bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag); } /* Destroy all the RX and TX buffer maps */ if (sc->rl_ldata.rl_tx_mtag) { for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) bus_dmamap_destroy(sc->rl_ldata.rl_tx_mtag, sc->rl_ldata.rl_tx_desc[i].tx_dmamap); bus_dma_tag_destroy(sc->rl_ldata.rl_tx_mtag); } if (sc->rl_ldata.rl_rx_mtag) { for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_desc[i].rx_dmamap); if (sc->rl_ldata.rl_rx_sparemap) bus_dmamap_destroy(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_sparemap); bus_dma_tag_destroy(sc->rl_ldata.rl_rx_mtag); } /* Unload and free the stats buffer and map */ if (sc->rl_ldata.rl_stag) { bus_dmamap_unload(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_rx_list_map); bus_dmamem_free(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_stats, sc->rl_ldata.rl_smap); bus_dma_tag_destroy(sc->rl_ldata.rl_stag); } if (sc->rl_parent_tag) bus_dma_tag_destroy(sc->rl_parent_tag); mtx_destroy(&sc->rl_mtx); return (0); } static __inline void re_discard_rxbuf(struct rl_softc *sc, int idx) { struct rl_desc *desc; struct rl_rxdesc *rxd; uint32_t cmdstat; rxd = &sc->rl_ldata.rl_rx_desc[idx]; desc = &sc->rl_ldata.rl_rx_list[idx]; desc->rl_vlanctl = 0; cmdstat = rxd->rx_size; if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) cmdstat |= RL_RDESC_CMD_EOR; desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); } static int re_newbuf(struct rl_softc *sc, int idx) { struct mbuf *m; struct rl_rxdesc *rxd; bus_dma_segment_t segs[1]; bus_dmamap_t map; struct rl_desc *desc; uint32_t cmdstat; int error, nsegs; m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; #ifdef RE_FIXUP_RX /* * This is part of an evil trick to deal with non-x86 platforms. * The RealTek chip requires RX buffers to be aligned on 64-bit * boundaries, but that will hose non-x86 machines. To get around * this, we leave some empty space at the start of each buffer * and for non-x86 hosts, we copy the buffer back six bytes * to achieve word alignment. This is slightly more efficient * than allocating a new buffer, copying the contents, and * discarding the old buffer. */ m_adj(m, RE_ETHER_ALIGN); #endif error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_rx_mtag, sc->rl_ldata.rl_rx_sparemap, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segment returned!", __func__, nsegs)); rxd = &sc->rl_ldata.rl_rx_desc[idx]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap); } rxd->rx_m = m; map = rxd->rx_dmamap; rxd->rx_dmamap = sc->rl_ldata.rl_rx_sparemap; rxd->rx_size = segs[0].ds_len; sc->rl_ldata.rl_rx_sparemap = map; bus_dmamap_sync(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); desc = &sc->rl_ldata.rl_rx_list[idx]; desc->rl_vlanctl = 0; desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[0].ds_addr)); desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[0].ds_addr)); cmdstat = segs[0].ds_len; if (idx == sc->rl_ldata.rl_rx_desc_cnt - 1) cmdstat |= RL_RDESC_CMD_EOR; desc->rl_cmdstat = htole32(cmdstat | RL_RDESC_CMD_OWN); return (0); } #ifdef RE_FIXUP_RX static __inline void re_fixup_rx(struct mbuf *m) { int i; uint16_t *src, *dst; src = mtod(m, uint16_t *); dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src; for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) *dst++ = *src++; m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN; } #endif static int re_tx_list_init(struct rl_softc *sc) { struct rl_desc *desc; int i; RL_LOCK_ASSERT(sc); bzero(sc->rl_ldata.rl_tx_list, sc->rl_ldata.rl_tx_desc_cnt * sizeof(struct rl_desc)); for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) sc->rl_ldata.rl_tx_desc[i].tx_m = NULL; /* Set EOR. */ desc = &sc->rl_ldata.rl_tx_list[sc->rl_ldata.rl_tx_desc_cnt - 1]; desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOR); bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); sc->rl_ldata.rl_tx_prodidx = 0; sc->rl_ldata.rl_tx_considx = 0; sc->rl_ldata.rl_tx_free = sc->rl_ldata.rl_tx_desc_cnt; return (0); } static int re_rx_list_init(struct rl_softc *sc) { int error, i; bzero(sc->rl_ldata.rl_rx_list, sc->rl_ldata.rl_rx_desc_cnt * sizeof(struct rl_desc)); for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { sc->rl_ldata.rl_rx_desc[i].rx_m = NULL; if ((error = re_newbuf(sc, i)) != 0) return (error); } /* Flush the RX descriptors */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); sc->rl_ldata.rl_rx_prodidx = 0; sc->rl_head = sc->rl_tail = NULL; return (0); } /* * RX handler for C+ and 8169. For the gigE chips, we support * the reception of jumbo frames that have been fragmented * across multiple 2K mbuf cluster buffers. */ static int -re_rxeof(struct rl_softc *sc) +re_rxeof(struct rl_softc *sc, int *rx_npktsp) { struct mbuf *m; struct ifnet *ifp; int i, total_len; struct rl_desc *cur_rx; u_int32_t rxstat, rxvlan; - int maxpkt = 16; + int maxpkt = 16, rx_npkts = 0; RL_LOCK_ASSERT(sc); ifp = sc->rl_ifp; /* Invalidate the descriptor memory */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (i = sc->rl_ldata.rl_rx_prodidx; maxpkt > 0; i = RL_RX_DESC_NXT(sc, i)) { cur_rx = &sc->rl_ldata.rl_rx_list[i]; rxstat = le32toh(cur_rx->rl_cmdstat); if ((rxstat & RL_RDESC_STAT_OWN) != 0) break; total_len = rxstat & sc->rl_rxlenmask; rxvlan = le32toh(cur_rx->rl_vlanctl); m = sc->rl_ldata.rl_rx_desc[i].rx_m; if (!(rxstat & RL_RDESC_STAT_EOF)) { if (re_newbuf(sc, i) != 0) { /* * If this is part of a multi-fragment packet, * discard all the pieces. */ if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } re_discard_rxbuf(sc, i); continue; } m->m_len = RE_RX_DESC_BUFLEN; if (sc->rl_head == NULL) sc->rl_head = sc->rl_tail = m; else { m->m_flags &= ~M_PKTHDR; sc->rl_tail->m_next = m; sc->rl_tail = m; } continue; } /* * NOTE: for the 8139C+, the frame length field * is always 12 bits in size, but for the gigE chips, * it is 13 bits (since the max RX frame length is 16K). * Unfortunately, all 32 bits in the status word * were already used, so to make room for the extra * length bit, RealTek took out the 'frame alignment * error' bit and shifted the other status bits * over one slot. The OWN, EOR, FS and LS bits are * still in the same places. We have already extracted * the frame length and checked the OWN bit, so rather * than using an alternate bit mapping, we shift the * status bits one space to the right so we can evaluate * them using the 8169 status as though it was in the * same format as that of the 8139C+. */ if (sc->rl_type == RL_8169) rxstat >>= 1; /* * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be * set, but if CRC is clear, it will still be a valid frame. */ if (rxstat & RL_RDESC_STAT_RXERRSUM && !(total_len > 8191 && (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT)) { ifp->if_ierrors++; /* * If this is part of a multi-fragment packet, * discard all the pieces. */ if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } re_discard_rxbuf(sc, i); continue; } /* * If allocating a replacement mbuf fails, * reload the current one. */ if (re_newbuf(sc, i) != 0) { ifp->if_iqdrops++; if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } re_discard_rxbuf(sc, i); continue; } if (sc->rl_head != NULL) { m->m_len = total_len % RE_RX_DESC_BUFLEN; if (m->m_len == 0) m->m_len = RE_RX_DESC_BUFLEN; /* * Special case: if there's 4 bytes or less * in this buffer, the mbuf can be discarded: * the last 4 bytes is the CRC, which we don't * care about anyway. */ if (m->m_len <= ETHER_CRC_LEN) { sc->rl_tail->m_len -= (ETHER_CRC_LEN - m->m_len); m_freem(m); } else { m->m_len -= ETHER_CRC_LEN; m->m_flags &= ~M_PKTHDR; sc->rl_tail->m_next = m; } m = sc->rl_head; sc->rl_head = sc->rl_tail = NULL; m->m_pkthdr.len = total_len - ETHER_CRC_LEN; } else m->m_pkthdr.len = m->m_len = (total_len - ETHER_CRC_LEN); #ifdef RE_FIXUP_RX re_fixup_rx(m); #endif ifp->if_ipackets++; m->m_pkthdr.rcvif = ifp; /* Do RX checksumming if enabled */ if (ifp->if_capenable & IFCAP_RXCSUM) { if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { /* Check IP header checksum */ if (rxstat & RL_RDESC_STAT_PROTOID) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (!(rxstat & RL_RDESC_STAT_IPSUMBAD)) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; /* Check TCP/UDP checksum */ if ((RL_TCPPKT(rxstat) && !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || (RL_UDPPKT(rxstat) && !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } else { /* * RTL8168C/RTL816CP/RTL8111C/RTL8111CP */ if ((rxstat & RL_RDESC_STAT_PROTOID) && (rxvlan & RL_RDESC_IPV4)) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (!(rxstat & RL_RDESC_STAT_IPSUMBAD) && (rxvlan & RL_RDESC_IPV4)) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if (((rxstat & RL_RDESC_STAT_TCP) && !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || ((rxstat & RL_RDESC_STAT_UDP) && !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } } maxpkt--; if (rxvlan & RL_RDESC_VLANCTL_TAG) { m->m_pkthdr.ether_vtag = bswap16((rxvlan & RL_RDESC_VLANCTL_DATA)); m->m_flags |= M_VLANTAG; } RL_UNLOCK(sc); (*ifp->if_input)(ifp, m); RL_LOCK(sc); + rx_npkts++; } /* Flush the RX DMA ring */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); sc->rl_ldata.rl_rx_prodidx = i; + if (rx_npktsp != NULL) + *rx_npktsp = rx_npkts; if (maxpkt) return(EAGAIN); return(0); } static void re_txeof(struct rl_softc *sc) { struct ifnet *ifp; struct rl_txdesc *txd; u_int32_t txstat; int cons; cons = sc->rl_ldata.rl_tx_considx; if (cons == sc->rl_ldata.rl_tx_prodidx) return; ifp = sc->rl_ifp; /* Invalidate the TX descriptor list */ bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (; cons != sc->rl_ldata.rl_tx_prodidx; cons = RL_TX_DESC_NXT(sc, cons)) { txstat = le32toh(sc->rl_ldata.rl_tx_list[cons].rl_cmdstat); if (txstat & RL_TDESC_STAT_OWN) break; /* * We only stash mbufs in the last descriptor * in a fragment chain, which also happens to * be the only place where the TX status bits * are valid. */ if (txstat & RL_TDESC_CMD_EOF) { txd = &sc->rl_ldata.rl_tx_desc[cons]; bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap); KASSERT(txd->tx_m != NULL, ("%s: freeing NULL mbufs!", __func__)); m_freem(txd->tx_m); txd->tx_m = NULL; if (txstat & (RL_TDESC_STAT_EXCESSCOL| RL_TDESC_STAT_COLCNT)) ifp->if_collisions++; if (txstat & RL_TDESC_STAT_TXERRSUM) ifp->if_oerrors++; else ifp->if_opackets++; } sc->rl_ldata.rl_tx_free++; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } sc->rl_ldata.rl_tx_considx = cons; /* No changes made to the TX ring, so no flush needed */ if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) { #ifdef RE_TX_MODERATION /* * If not all descriptors have been reaped yet, reload * the timer so that we will eventually get another * interrupt that will cause us to re-enter this routine. * This is done in case the transmitter has gone idle. */ CSR_WRITE_4(sc, RL_TIMERCNT, 1); #endif } else sc->rl_watchdog_timer = 0; } static void re_tick(void *xsc) { struct rl_softc *sc; struct mii_data *mii; sc = xsc; RL_LOCK_ASSERT(sc); mii = device_get_softc(sc->rl_miibus); mii_tick(mii); if ((sc->rl_flags & RL_FLAG_LINK) == 0) re_miibus_statchg(sc->rl_dev); /* * Reclaim transmitted frames here. Technically it is not * necessary to do here but it ensures periodic reclamation * regardless of Tx completion interrupt which seems to be * lost on PCIe based controllers under certain situations. */ re_txeof(sc); re_watchdog(sc); callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); } #ifdef DEVICE_POLLING -static void +static int re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct rl_softc *sc = ifp->if_softc; + int rx_npkts = 0; RL_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) - re_poll_locked(ifp, cmd, count); + rx_npkts = re_poll_locked(ifp, cmd, count); RL_UNLOCK(sc); + return (rx_npkts); } -static void +static int re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct rl_softc *sc = ifp->if_softc; + int rx_npkts; RL_LOCK_ASSERT(sc); sc->rxcycles = count; - re_rxeof(sc); + re_rxeof(sc, &rx_npkts); re_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_txtask); if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ u_int16_t status; status = CSR_READ_2(sc, RL_ISR); if (status == 0xffff) - return; + return (rx_npkts); if (status) CSR_WRITE_2(sc, RL_ISR, status); if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && (sc->rl_flags & RL_FLAG_PCIE)) CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); /* * XXX check behaviour on receiver stalls. */ if (status & RL_ISR_SYSTEM_ERR) re_init_locked(sc); } + return (rx_npkts); } #endif /* DEVICE_POLLING */ static int re_intr(void *arg) { struct rl_softc *sc; uint16_t status; sc = arg; status = CSR_READ_2(sc, RL_ISR); if (status == 0xFFFF || (status & RL_INTRS_CPLUS) == 0) return (FILTER_STRAY); CSR_WRITE_2(sc, RL_IMR, 0); taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask); return (FILTER_HANDLED); } static void re_int_task(void *arg, int npending) { struct rl_softc *sc; struct ifnet *ifp; u_int16_t status; int rval = 0; sc = arg; ifp = sc->rl_ifp; RL_LOCK(sc); status = CSR_READ_2(sc, RL_ISR); CSR_WRITE_2(sc, RL_ISR, status); if (sc->suspended || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { RL_UNLOCK(sc); return; } #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { RL_UNLOCK(sc); return; } #endif if (status & (RL_ISR_RX_OK|RL_ISR_RX_ERR|RL_ISR_FIFO_OFLOW)) - rval = re_rxeof(sc); + rval = re_rxeof(sc, NULL); /* * Some chips will ignore a second TX request issued * while an existing transmission is in progress. If * the transmitter goes idle but there are still * packets waiting to be sent, we need to restart the * channel here to flush them out. This only seems to * be required with the PCIe devices. */ if ((status & (RL_ISR_TX_OK | RL_ISR_TX_DESC_UNAVAIL)) && (sc->rl_flags & RL_FLAG_PCIE)) CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); if (status & ( #ifdef RE_TX_MODERATION RL_ISR_TIMEOUT_EXPIRED| #else RL_ISR_TX_OK| #endif RL_ISR_TX_ERR|RL_ISR_TX_DESC_UNAVAIL)) re_txeof(sc); if (status & RL_ISR_SYSTEM_ERR) re_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_txtask); RL_UNLOCK(sc); if ((CSR_READ_2(sc, RL_ISR) & RL_INTRS_CPLUS) || rval) { taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_inttask); return; } CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); } static int re_encap(struct rl_softc *sc, struct mbuf **m_head) { struct rl_txdesc *txd, *txd_last; bus_dma_segment_t segs[RL_NTXSEGS]; bus_dmamap_t map; struct mbuf *m_new; struct rl_desc *desc; int nsegs, prod; int i, error, ei, si; int padlen; uint32_t cmdstat, csum_flags, vlanctl; RL_LOCK_ASSERT(sc); M_ASSERTPKTHDR((*m_head)); /* * With some of the RealTek chips, using the checksum offload * support in conjunction with the autopadding feature results * in the transmission of corrupt frames. For example, if we * need to send a really small IP fragment that's less than 60 * bytes in size, and IP header checksumming is enabled, the * resulting ethernet frame that appears on the wire will * have garbled payload. To work around this, if TX IP checksum * offload is enabled, we always manually pad short frames out * to the minimum ethernet frame size. */ if ((sc->rl_flags & RL_FLAG_AUTOPAD) == 0 && (*m_head)->m_pkthdr.len < RL_IP4CSUMTX_PADLEN && ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) != 0) { padlen = RL_MIN_FRAMELEN - (*m_head)->m_pkthdr.len; if (M_WRITABLE(*m_head) == 0) { /* Get a writable copy. */ m_new = m_dup(*m_head, M_DONTWAIT); m_freem(*m_head); if (m_new == NULL) { *m_head = NULL; return (ENOBUFS); } *m_head = m_new; } if ((*m_head)->m_next != NULL || M_TRAILINGSPACE(*m_head) < padlen) { m_new = m_defrag(*m_head, M_DONTWAIT); if (m_new == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } } else m_new = *m_head; /* * Manually pad short frames, and zero the pad space * to avoid leaking data. */ bzero(mtod(m_new, char *) + m_new->m_pkthdr.len, padlen); m_new->m_pkthdr.len += padlen; m_new->m_len = m_new->m_pkthdr.len; *m_head = m_new; } prod = sc->rl_ldata.rl_tx_prodidx; txd = &sc->rl_ldata.rl_tx_desc[prod]; error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m_new = m_collapse(*m_head, M_DONTWAIT, RL_NTXSEGS); if (m_new == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m_new; error = bus_dmamap_load_mbuf_sg(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (error); } } else if (error != 0) return (error); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } /* Check for number of available descriptors. */ if (sc->rl_ldata.rl_tx_free - nsegs <= 1) { bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap); return (ENOBUFS); } bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, BUS_DMASYNC_PREWRITE); /* * Set up checksum offload. Note: checksum offload bits must * appear in all descriptors of a multi-descriptor transmit * attempt. This is according to testing done with an 8169 * chip. This is a requirement. */ vlanctl = 0; csum_flags = 0; if (((*m_head)->m_pkthdr.csum_flags & CSUM_TSO) != 0) csum_flags = RL_TDESC_CMD_LGSEND | ((uint32_t)(*m_head)->m_pkthdr.tso_segsz << RL_TDESC_CMD_MSSVAL_SHIFT); else { /* * Unconditionally enable IP checksum if TCP or UDP * checksum is required. Otherwise, TCP/UDP checksum * does't make effects. */ if (((*m_head)->m_pkthdr.csum_flags & RE_CSUM_FEATURES) != 0) { if ((sc->rl_flags & RL_FLAG_DESCV2) == 0) { csum_flags |= RL_TDESC_CMD_IPCSUM; if (((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) != 0) csum_flags |= RL_TDESC_CMD_TCPCSUM; if (((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) != 0) csum_flags |= RL_TDESC_CMD_UDPCSUM; } else { vlanctl |= RL_TDESC_CMD_IPCSUMV2; if (((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) != 0) vlanctl |= RL_TDESC_CMD_TCPCSUMV2; if (((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) != 0) vlanctl |= RL_TDESC_CMD_UDPCSUMV2; } } } /* * Set up hardware VLAN tagging. Note: vlan tag info must * appear in all descriptors of a multi-descriptor * transmission attempt. */ if ((*m_head)->m_flags & M_VLANTAG) vlanctl |= bswap16((*m_head)->m_pkthdr.ether_vtag) | RL_TDESC_VLANCTL_TAG; si = prod; for (i = 0; i < nsegs; i++, prod = RL_TX_DESC_NXT(sc, prod)) { desc = &sc->rl_ldata.rl_tx_list[prod]; desc->rl_vlanctl = htole32(vlanctl); desc->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr)); desc->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr)); cmdstat = segs[i].ds_len; if (i != 0) cmdstat |= RL_TDESC_CMD_OWN; if (prod == sc->rl_ldata.rl_tx_desc_cnt - 1) cmdstat |= RL_TDESC_CMD_EOR; desc->rl_cmdstat = htole32(cmdstat | csum_flags); sc->rl_ldata.rl_tx_free--; } /* Update producer index. */ sc->rl_ldata.rl_tx_prodidx = prod; /* Set EOF on the last descriptor. */ ei = RL_TX_DESC_PRV(sc, prod); desc = &sc->rl_ldata.rl_tx_list[ei]; desc->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); desc = &sc->rl_ldata.rl_tx_list[si]; /* Set SOF and transfer ownership of packet to the chip. */ desc->rl_cmdstat |= htole32(RL_TDESC_CMD_OWN | RL_TDESC_CMD_SOF); /* * Insure that the map for this transmission * is placed at the array index of the last descriptor * in this chain. (Swap last and first dmamaps.) */ txd_last = &sc->rl_ldata.rl_tx_desc[ei]; map = txd->tx_dmamap; txd->tx_dmamap = txd_last->tx_dmamap; txd_last->tx_dmamap = map; txd_last->tx_m = *m_head; return (0); } static void re_tx_task(void *arg, int npending) { struct ifnet *ifp; ifp = arg; re_start(ifp); } /* * Main transmit routine for C+ and gigE NICs. */ static void re_start(struct ifnet *ifp) { struct rl_softc *sc; struct mbuf *m_head; int queued; sc = ifp->if_softc; RL_LOCK(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0) { RL_UNLOCK(sc); return; } for (queued = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->rl_ldata.rl_tx_free > 1;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (re_encap(sc, &m_head) != 0) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ ETHER_BPF_MTAP(ifp, m_head); queued++; } if (queued == 0) { #ifdef RE_TX_MODERATION if (sc->rl_ldata.rl_tx_free != sc->rl_ldata.rl_tx_desc_cnt) CSR_WRITE_4(sc, RL_TIMERCNT, 1); #endif RL_UNLOCK(sc); return; } /* Flush the TX descriptors */ bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); CSR_WRITE_1(sc, sc->rl_txstart, RL_TXSTART_START); #ifdef RE_TX_MODERATION /* * Use the countdown timer for interrupt moderation. * 'TX done' interrupts are disabled. Instead, we reset the * countdown timer, which will begin counting until it hits * the value in the TIMERINT register, and then trigger an * interrupt. Each time we write to the TIMERCNT register, * the timer count is reset to 0. */ CSR_WRITE_4(sc, RL_TIMERCNT, 1); #endif /* * Set a timeout in case the chip goes out to lunch. */ sc->rl_watchdog_timer = 5; RL_UNLOCK(sc); } static void re_init(void *xsc) { struct rl_softc *sc = xsc; RL_LOCK(sc); re_init_locked(sc); RL_UNLOCK(sc); } static void re_init_locked(struct rl_softc *sc) { struct ifnet *ifp = sc->rl_ifp; struct mii_data *mii; uint32_t reg; uint16_t cfg; union { uint32_t align_dummy; u_char eaddr[ETHER_ADDR_LEN]; } eaddr; RL_LOCK_ASSERT(sc); mii = device_get_softc(sc->rl_miibus); /* * Cancel pending I/O and free all RX/TX buffers. */ re_stop(sc); /* Put controller into known state. */ re_reset(sc); /* * Enable C+ RX and TX mode, as well as VLAN stripping and * RX checksum offload. We must configure the C+ register * before all others. */ cfg = RL_CPLUSCMD_PCI_MRW; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) cfg |= RL_CPLUSCMD_RXCSUM_ENB; if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) cfg |= RL_CPLUSCMD_VLANSTRIP; if ((sc->rl_flags & RL_FLAG_MACSTAT) != 0) { cfg |= RL_CPLUSCMD_MACSTAT_DIS; /* XXX magic. */ cfg |= 0x0001; } else cfg |= RL_CPLUSCMD_RXENB | RL_CPLUSCMD_TXENB; CSR_WRITE_2(sc, RL_CPLUS_CMD, cfg); if (sc->rl_hwrev == RL_HWREV_8169_8110SC || sc->rl_hwrev == RL_HWREV_8169_8110SCE) { reg = 0x000fff00; if ((CSR_READ_1(sc, RL_CFG2) & RL_CFG2_PCI66MHZ) != 0) reg |= 0x000000ff; if (sc->rl_hwrev == RL_HWREV_8169_8110SCE) reg |= 0x00f00000; CSR_WRITE_4(sc, 0x7c, reg); /* Disable interrupt mitigation. */ CSR_WRITE_2(sc, 0xe2, 0); } /* * Disable TSO if interface MTU size is greater than MSS * allowed in controller. */ if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) { ifp->if_capenable &= ~IFCAP_TSO4; ifp->if_hwassist &= ~CSUM_TSO; } /* * Init our MAC address. Even though the chipset * documentation doesn't mention it, we need to enter "Config * register write enable" mode to modify the ID registers. */ /* Copy MAC address on stack to align. */ bcopy(IF_LLADDR(ifp), eaddr.eaddr, ETHER_ADDR_LEN); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); CSR_WRITE_4(sc, RL_IDR0, htole32(*(u_int32_t *)(&eaddr.eaddr[0]))); CSR_WRITE_4(sc, RL_IDR4, htole32(*(u_int32_t *)(&eaddr.eaddr[4]))); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); /* * For C+ mode, initialize the RX descriptors and mbufs. */ re_rx_list_init(sc); re_tx_list_init(sc); /* * Load the addresses of the RX and TX lists into the chip. */ CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr)); CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr)); CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr)); CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr)); /* * Enable transmit and receive. */ CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); /* * Set the initial TX configuration. */ if (sc->rl_testmode) { if (sc->rl_type == RL_8169) CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG|RL_LOOPTEST_ON); else CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS); } else CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16); /* * Set the initial RX configuration. */ re_set_rxmode(sc); #ifdef DEVICE_POLLING /* * Disable interrupts if we are polling. */ if (ifp->if_capenable & IFCAP_POLLING) CSR_WRITE_2(sc, RL_IMR, 0); else /* otherwise ... */ #endif /* * Enable interrupts. */ if (sc->rl_testmode) CSR_WRITE_2(sc, RL_IMR, 0); else CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); CSR_WRITE_2(sc, RL_ISR, RL_INTRS_CPLUS); /* Set initial TX threshold */ sc->rl_txthresh = RL_TX_THRESH_INIT; /* Start RX/TX process. */ CSR_WRITE_4(sc, RL_MISSEDPKT, 0); #ifdef notdef /* Enable receiver and transmitter. */ CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); #endif #ifdef RE_TX_MODERATION /* * Initialize the timer interrupt register so that * a timer interrupt will be generated once the timer * reaches a certain number of ticks. The timer is * reloaded on each transmit. This gives us TX interrupt * moderation, which dramatically improves TX frame rate. */ if (sc->rl_type == RL_8169) CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800); else CSR_WRITE_4(sc, RL_TIMERINT, 0x400); #endif /* * For 8169 gigE NICs, set the max allowed RX packet * size so we can receive jumbo frames. */ if (sc->rl_type == RL_8169) CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383); if (sc->rl_testmode) return; mii_mediachg(mii); CSR_WRITE_1(sc, RL_CFG1, CSR_READ_1(sc, RL_CFG1) | RL_CFG1_DRVLOAD); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->rl_flags &= ~RL_FLAG_LINK; sc->rl_watchdog_timer = 0; callout_reset(&sc->rl_stat_callout, hz, re_tick, sc); } /* * Set media options. */ static int re_ifmedia_upd(struct ifnet *ifp) { struct rl_softc *sc; struct mii_data *mii; int error; sc = ifp->if_softc; mii = device_get_softc(sc->rl_miibus); RL_LOCK(sc); error = mii_mediachg(mii); RL_UNLOCK(sc); return (error); } /* * Report current media status. */ static void re_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct rl_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->rl_miibus); RL_LOCK(sc); mii_pollstat(mii); RL_UNLOCK(sc); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static int re_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct rl_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; switch (command) { case SIOCSIFMTU: if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > RL_JUMBO_MTU) { error = EINVAL; break; } if ((sc->rl_flags & RL_FLAG_NOJUMBO) != 0 && ifr->ifr_mtu > RL_MAX_FRAMELEN) { error = EINVAL; break; } RL_LOCK(sc); if (ifp->if_mtu != ifr->ifr_mtu) ifp->if_mtu = ifr->ifr_mtu; if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) { ifp->if_capenable &= ~IFCAP_TSO4; ifp->if_hwassist &= ~CSUM_TSO; } RL_UNLOCK(sc); break; case SIOCSIFFLAGS: RL_LOCK(sc); if ((ifp->if_flags & IFF_UP) != 0) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if (((ifp->if_flags ^ sc->rl_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) re_set_rxmode(sc); } else re_init_locked(sc); } else { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) re_stop(sc); } sc->rl_if_flags = ifp->if_flags; RL_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: RL_LOCK(sc); re_set_rxmode(sc); RL_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->rl_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: { int mask, reinit; mask = ifr->ifr_reqcap ^ ifp->if_capenable; reinit = 0; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(re_poll, ifp); if (error) return(error); RL_LOCK(sc); /* Disable interrupts */ CSR_WRITE_2(sc, RL_IMR, 0x0000); ifp->if_capenable |= IFCAP_POLLING; RL_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupts. */ RL_LOCK(sc); CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); ifp->if_capenable &= ~IFCAP_POLLING; RL_UNLOCK(sc); } } #endif /* DEVICE_POLLING */ if (mask & IFCAP_HWCSUM) { ifp->if_capenable ^= IFCAP_HWCSUM; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist |= RE_CSUM_FEATURES; else ifp->if_hwassist &= ~RE_CSUM_FEATURES; reinit = 1; } if (mask & IFCAP_VLAN_HWTAGGING) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; reinit = 1; } if (mask & IFCAP_TSO4) { ifp->if_capenable ^= IFCAP_TSO4; if ((IFCAP_TSO4 & ifp->if_capenable) && (IFCAP_TSO4 & ifp->if_capabilities)) ifp->if_hwassist |= CSUM_TSO; else ifp->if_hwassist &= ~CSUM_TSO; if (ifp->if_mtu > RL_TSO_MTU && (ifp->if_capenable & IFCAP_TSO4) != 0) { ifp->if_capenable &= ~IFCAP_TSO4; ifp->if_hwassist &= ~CSUM_TSO; } } if ((mask & IFCAP_WOL) != 0 && (ifp->if_capabilities & IFCAP_WOL) != 0) { if ((mask & IFCAP_WOL_UCAST) != 0) ifp->if_capenable ^= IFCAP_WOL_UCAST; if ((mask & IFCAP_WOL_MCAST) != 0) ifp->if_capenable ^= IFCAP_WOL_MCAST; if ((mask & IFCAP_WOL_MAGIC) != 0) ifp->if_capenable ^= IFCAP_WOL_MAGIC; } if (reinit && ifp->if_drv_flags & IFF_DRV_RUNNING) re_init(sc); VLAN_CAPABILITIES(ifp); } break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void re_watchdog(struct rl_softc *sc) { struct ifnet *ifp; RL_LOCK_ASSERT(sc); if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer != 0) return; ifp = sc->rl_ifp; re_txeof(sc); if (sc->rl_ldata.rl_tx_free == sc->rl_ldata.rl_tx_desc_cnt) { if_printf(ifp, "watchdog timeout (missed Tx interrupts) " "-- recovering\n"); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_txtask); return; } if_printf(ifp, "watchdog timeout\n"); ifp->if_oerrors++; - re_rxeof(sc); + re_rxeof(sc, NULL); re_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) taskqueue_enqueue_fast(taskqueue_fast, &sc->rl_txtask); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void re_stop(struct rl_softc *sc) { int i; struct ifnet *ifp; struct rl_txdesc *txd; struct rl_rxdesc *rxd; RL_LOCK_ASSERT(sc); ifp = sc->rl_ifp; sc->rl_watchdog_timer = 0; callout_stop(&sc->rl_stat_callout); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); if ((sc->rl_flags & RL_FLAG_CMDSTOP) != 0) CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_STOPREQ | RL_CMD_TX_ENB | RL_CMD_RX_ENB); else CSR_WRITE_1(sc, RL_COMMAND, 0x00); DELAY(1000); CSR_WRITE_2(sc, RL_IMR, 0x0000); CSR_WRITE_2(sc, RL_ISR, 0xFFFF); if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } /* Free the TX list buffers. */ for (i = 0; i < sc->rl_ldata.rl_tx_desc_cnt; i++) { txd = &sc->rl_ldata.rl_tx_desc[i]; if (txd->tx_m != NULL) { bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->rl_ldata.rl_tx_mtag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; } } /* Free the RX list buffers. */ for (i = 0; i < sc->rl_ldata.rl_rx_desc_cnt; i++) { rxd = &sc->rl_ldata.rl_rx_desc[i]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->rl_ldata.rl_tx_mtag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rl_ldata.rl_rx_mtag, rxd->rx_dmamap); m_freem(rxd->rx_m); rxd->rx_m = NULL; } } } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int re_suspend(device_t dev) { struct rl_softc *sc; sc = device_get_softc(dev); RL_LOCK(sc); re_stop(sc); re_setwol(sc); sc->suspended = 1; RL_UNLOCK(sc); return (0); } /* * Device resume routine. Restore some PCI settings in case the BIOS * doesn't, re-enable busmastering, and restart the interface if * appropriate. */ static int re_resume(device_t dev) { struct rl_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); RL_LOCK(sc); ifp = sc->rl_ifp; /* Take controller out of sleep mode. */ if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) CSR_WRITE_1(sc, RL_GPIO, CSR_READ_1(sc, RL_GPIO) | 0x01); } /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) re_init_locked(sc); /* * Clear WOL matching such that normal Rx filtering * wouldn't interfere with WOL patterns. */ re_clrwol(sc); sc->suspended = 0; RL_UNLOCK(sc); return (0); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int re_shutdown(device_t dev) { struct rl_softc *sc; sc = device_get_softc(dev); RL_LOCK(sc); re_stop(sc); /* * Mark interface as down since otherwise we will panic if * interrupt comes in later on, which can happen in some * cases. */ sc->rl_ifp->if_flags &= ~IFF_UP; re_setwol(sc); RL_UNLOCK(sc); return (0); } static void re_setwol(struct rl_softc *sc) { struct ifnet *ifp; int pmc; uint16_t pmstat; uint8_t v; RL_LOCK_ASSERT(sc); if (pci_find_extcap(sc->rl_dev, PCIY_PMG, &pmc) != 0) return; ifp = sc->rl_ifp; /* Put controller into sleep mode. */ if ((sc->rl_flags & RL_FLAG_MACSLEEP) != 0) { if ((CSR_READ_1(sc, RL_MACDBG) & 0x80) == 0x80) CSR_WRITE_1(sc, RL_GPIO, CSR_READ_1(sc, RL_GPIO) & ~0x01); } if ((ifp->if_capenable & IFCAP_WOL) != 0 && (sc->rl_flags & RL_FLAG_WOLRXENB) != 0) CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RX_ENB); /* Enable config register write. */ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); /* Enable PME. */ v = CSR_READ_1(sc, RL_CFG1); v &= ~RL_CFG1_PME; if ((ifp->if_capenable & IFCAP_WOL) != 0) v |= RL_CFG1_PME; CSR_WRITE_1(sc, RL_CFG1, v); v = CSR_READ_1(sc, RL_CFG3); v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) v |= RL_CFG3_WOL_MAGIC; CSR_WRITE_1(sc, RL_CFG3, v); /* Config register write done. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); v = CSR_READ_1(sc, RL_CFG5); v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST); v &= ~RL_CFG5_WOL_LANWAKE; if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) v |= RL_CFG5_WOL_UCAST; if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST; if ((ifp->if_capenable & IFCAP_WOL) != 0) v |= RL_CFG5_WOL_LANWAKE; CSR_WRITE_1(sc, RL_CFG5, v); /* * It seems that hardware resets its link speed to 100Mbps in * power down mode so switching to 100Mbps in driver is not * needed. */ /* Request PME if WOL is requested. */ pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2); pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); if ((ifp->if_capenable & IFCAP_WOL) != 0) pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); } static void re_clrwol(struct rl_softc *sc) { int pmc; uint8_t v; RL_LOCK_ASSERT(sc); if (pci_find_extcap(sc->rl_dev, PCIY_PMG, &pmc) != 0) return; /* Enable config register write. */ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); v = CSR_READ_1(sc, RL_CFG3); v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); CSR_WRITE_1(sc, RL_CFG3, v); /* Config register write done. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); v = CSR_READ_1(sc, RL_CFG5); v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST); v &= ~RL_CFG5_WOL_LANWAKE; CSR_WRITE_1(sc, RL_CFG5, v); } Index: head/sys/dev/sf/if_sf.c =================================================================== --- head/sys/dev/sf/if_sf.c (revision 193095) +++ head/sys/dev/sf/if_sf.c (revision 193096) @@ -1,2716 +1,2722 @@ /*- * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Adaptec AIC-6915 "Starfire" PCI fast ethernet driver for FreeBSD. * Programming manual is available from: * http://download.adaptec.com/pdfs/user_guides/aic6915_pg.pdf. * * Written by Bill Paul * Department of Electical Engineering * Columbia University, New York City */ /* * The Adaptec AIC-6915 "Starfire" is a 64-bit 10/100 PCI ethernet * controller designed with flexibility and reducing CPU load in mind. * The Starfire offers high and low priority buffer queues, a * producer/consumer index mechanism and several different buffer * queue and completion queue descriptor types. Any one of a number * of different driver designs can be used, depending on system and * OS requirements. This driver makes use of type2 transmit frame * descriptors to take full advantage of fragmented packets buffers * and two RX buffer queues prioritized on size (one queue for small * frames that will fit into a single mbuf, another with full size * mbuf clusters for everything else). The producer/consumer indexes * and completion queues are also used. * * One downside to the Starfire has to do with alignment: buffer * queues must be aligned on 256-byte boundaries, and receive buffers * must be aligned on longword boundaries. The receive buffer alignment * causes problems on the strict alignment architecture, where the * packet payload should be longword aligned. There is no simple way * around this. * * For receive filtering, the Starfire offers 16 perfect filter slots * and a 512-bit hash table. * * The Starfire has no internal transceiver, relying instead on an * external MII-based transceiver. Accessing registers on external * PHYs is done through a special register map rather than with the * usual bitbang MDIO method. * * Acesssing the registers on the Starfire is a little tricky. The * Starfire has a 512K internal register space. When programmed for * PCI memory mapped mode, the entire register space can be accessed * directly. However in I/O space mode, only 256 bytes are directly * mapped into PCI I/O space. The other registers can be accessed * indirectly using the SF_INDIRECTIO_ADDR and SF_INDIRECTIO_DATA * registers inside the 256-byte I/O window. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" MODULE_DEPEND(sf, pci, 1, 1, 1); MODULE_DEPEND(sf, ether, 1, 1, 1); MODULE_DEPEND(sf, miibus, 1, 1, 1); #undef SF_GFP_DEBUG #define SF_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) /* Define this to activate partial TCP/UDP checksum offload. */ #undef SF_PARTIAL_CSUM_SUPPORT static struct sf_type sf_devs[] = { { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", AD_SUBSYSID_62011_REV0, "Adaptec ANA-62011 (rev 0) 10/100BaseTX" }, { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", AD_SUBSYSID_62011_REV1, "Adaptec ANA-62011 (rev 1) 10/100BaseTX" }, { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", AD_SUBSYSID_62022, "Adaptec ANA-62022 10/100BaseTX" }, { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", AD_SUBSYSID_62044_REV0, "Adaptec ANA-62044 (rev 0) 10/100BaseTX" }, { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", AD_SUBSYSID_62044_REV1, "Adaptec ANA-62044 (rev 1) 10/100BaseTX" }, { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", AD_SUBSYSID_62020, "Adaptec ANA-62020 10/100BaseFX" }, { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX", AD_SUBSYSID_69011, "Adaptec ANA-69011 10/100BaseTX" }, }; static int sf_probe(device_t); static int sf_attach(device_t); static int sf_detach(device_t); static int sf_shutdown(device_t); static int sf_suspend(device_t); static int sf_resume(device_t); static void sf_intr(void *); static void sf_tick(void *); static void sf_stats_update(struct sf_softc *); #ifndef __NO_STRICT_ALIGNMENT static __inline void sf_fixup_rx(struct mbuf *); #endif -static void sf_rxeof(struct sf_softc *); +static int sf_rxeof(struct sf_softc *); static void sf_txeof(struct sf_softc *); static int sf_encap(struct sf_softc *, struct mbuf **); static void sf_start(struct ifnet *); static void sf_start_locked(struct ifnet *); static int sf_ioctl(struct ifnet *, u_long, caddr_t); static void sf_download_fw(struct sf_softc *); static void sf_init(void *); static void sf_init_locked(struct sf_softc *); static void sf_stop(struct sf_softc *); static void sf_watchdog(struct sf_softc *); static int sf_ifmedia_upd(struct ifnet *); static void sf_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void sf_reset(struct sf_softc *); static int sf_dma_alloc(struct sf_softc *); static void sf_dma_free(struct sf_softc *); static int sf_init_rx_ring(struct sf_softc *); static void sf_init_tx_ring(struct sf_softc *); static int sf_newbuf(struct sf_softc *, int); static void sf_rxfilter(struct sf_softc *); static int sf_setperf(struct sf_softc *, int, uint8_t *); static int sf_sethash(struct sf_softc *, caddr_t, int); #ifdef notdef static int sf_setvlan(struct sf_softc *, int, uint32_t); #endif static uint8_t sf_read_eeprom(struct sf_softc *, int); static int sf_miibus_readreg(device_t, int, int); static int sf_miibus_writereg(device_t, int, int, int); static void sf_miibus_statchg(device_t); static void sf_link_task(void *, int); #ifdef DEVICE_POLLING -static void sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); +static int sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); #endif static uint32_t csr_read_4(struct sf_softc *, int); static void csr_write_4(struct sf_softc *, int, uint32_t); static void sf_txthresh_adjust(struct sf_softc *); static int sf_sysctl_stats(SYSCTL_HANDLER_ARGS); static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); static int sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS); static device_method_t sf_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sf_probe), DEVMETHOD(device_attach, sf_attach), DEVMETHOD(device_detach, sf_detach), DEVMETHOD(device_shutdown, sf_shutdown), DEVMETHOD(device_suspend, sf_suspend), DEVMETHOD(device_resume, sf_resume), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, sf_miibus_readreg), DEVMETHOD(miibus_writereg, sf_miibus_writereg), DEVMETHOD(miibus_statchg, sf_miibus_statchg), { NULL, NULL } }; static driver_t sf_driver = { "sf", sf_methods, sizeof(struct sf_softc), }; static devclass_t sf_devclass; DRIVER_MODULE(sf, pci, sf_driver, sf_devclass, 0, 0); DRIVER_MODULE(miibus, sf, miibus_driver, miibus_devclass, 0, 0); #define SF_SETBIT(sc, reg, x) \ csr_write_4(sc, reg, csr_read_4(sc, reg) | (x)) #define SF_CLRBIT(sc, reg, x) \ csr_write_4(sc, reg, csr_read_4(sc, reg) & ~(x)) static uint32_t csr_read_4(struct sf_softc *sc, int reg) { uint32_t val; if (sc->sf_restype == SYS_RES_MEMORY) val = CSR_READ_4(sc, (reg + SF_RMAP_INTREG_BASE)); else { CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE); val = CSR_READ_4(sc, SF_INDIRECTIO_DATA); } return (val); } static uint8_t sf_read_eeprom(struct sf_softc *sc, int reg) { uint8_t val; val = (csr_read_4(sc, SF_EEADDR_BASE + (reg & 0xFFFFFFFC)) >> (8 * (reg & 3))) & 0xFF; return (val); } static void csr_write_4(struct sf_softc *sc, int reg, uint32_t val) { if (sc->sf_restype == SYS_RES_MEMORY) CSR_WRITE_4(sc, (reg + SF_RMAP_INTREG_BASE), val); else { CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE); CSR_WRITE_4(sc, SF_INDIRECTIO_DATA, val); } } /* * Copy the address 'mac' into the perfect RX filter entry at * offset 'idx.' The perfect filter only has 16 entries so do * some sanity tests. */ static int sf_setperf(struct sf_softc *sc, int idx, uint8_t *mac) { if (idx < 0 || idx > SF_RXFILT_PERFECT_CNT) return (EINVAL); if (mac == NULL) return (EINVAL); csr_write_4(sc, SF_RXFILT_PERFECT_BASE + (idx * SF_RXFILT_PERFECT_SKIP) + 0, mac[5] | (mac[4] << 8)); csr_write_4(sc, SF_RXFILT_PERFECT_BASE + (idx * SF_RXFILT_PERFECT_SKIP) + 4, mac[3] | (mac[2] << 8)); csr_write_4(sc, SF_RXFILT_PERFECT_BASE + (idx * SF_RXFILT_PERFECT_SKIP) + 8, mac[1] | (mac[0] << 8)); return (0); } /* * Set the bit in the 512-bit hash table that corresponds to the * specified mac address 'mac.' If 'prio' is nonzero, update the * priority hash table instead of the filter hash table. */ static int sf_sethash(struct sf_softc *sc, caddr_t mac, int prio) { uint32_t h; if (mac == NULL) return (EINVAL); h = ether_crc32_be(mac, ETHER_ADDR_LEN) >> 23; if (prio) { SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_PRIOOFF + (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF))); } else { SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_ADDROFF + (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF))); } return (0); } #ifdef notdef /* * Set a VLAN tag in the receive filter. */ static int sf_setvlan(struct sf_softc *sc, int idx, uint32_t vlan) { if (idx < 0 || idx >> SF_RXFILT_HASH_CNT) return (EINVAL); csr_write_4(sc, SF_RXFILT_HASH_BASE + (idx * SF_RXFILT_HASH_SKIP) + SF_RXFILT_HASH_VLANOFF, vlan); return (0); } #endif static int sf_miibus_readreg(device_t dev, int phy, int reg) { struct sf_softc *sc; int i; uint32_t val = 0; sc = device_get_softc(dev); for (i = 0; i < SF_TIMEOUT; i++) { val = csr_read_4(sc, SF_PHY_REG(phy, reg)); if ((val & SF_MII_DATAVALID) != 0) break; } if (i == SF_TIMEOUT) return (0); val &= SF_MII_DATAPORT; if (val == 0xffff) return (0); return (val); } static int sf_miibus_writereg(device_t dev, int phy, int reg, int val) { struct sf_softc *sc; int i; int busy; sc = device_get_softc(dev); csr_write_4(sc, SF_PHY_REG(phy, reg), val); for (i = 0; i < SF_TIMEOUT; i++) { busy = csr_read_4(sc, SF_PHY_REG(phy, reg)); if ((busy & SF_MII_BUSY) == 0) break; } return (0); } static void sf_miibus_statchg(device_t dev) { struct sf_softc *sc; sc = device_get_softc(dev); taskqueue_enqueue(taskqueue_swi, &sc->sf_link_task); } static void sf_link_task(void *arg, int pending) { struct sf_softc *sc; struct mii_data *mii; struct ifnet *ifp; uint32_t val; sc = (struct sf_softc *)arg; SF_LOCK(sc); mii = device_get_softc(sc->sf_miibus); ifp = sc->sf_ifp; if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { SF_UNLOCK(sc); return; } if (mii->mii_media_status & IFM_ACTIVE) { if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) sc->sf_link = 1; } else sc->sf_link = 0; val = csr_read_4(sc, SF_MACCFG_1); val &= ~SF_MACCFG1_FULLDUPLEX; val &= ~(SF_MACCFG1_RX_FLOWENB | SF_MACCFG1_TX_FLOWENB); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) { val |= SF_MACCFG1_FULLDUPLEX; csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_FDX); #ifdef notyet /* Configure flow-control bits. */ if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) val |= SF_MACCFG1_RX_FLOWENB; if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) val |= SF_MACCFG1_TX_FLOWENB; #endif } else csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_HDX); /* Make sure to reset MAC to take changes effect. */ csr_write_4(sc, SF_MACCFG_1, val | SF_MACCFG1_SOFTRESET); DELAY(1000); csr_write_4(sc, SF_MACCFG_1, val); val = csr_read_4(sc, SF_TIMER_CTL); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) val |= SF_TIMER_TIMES_TEN; else val &= ~SF_TIMER_TIMES_TEN; csr_write_4(sc, SF_TIMER_CTL, val); SF_UNLOCK(sc); } static void sf_rxfilter(struct sf_softc *sc) { struct ifnet *ifp; int i; struct ifmultiaddr *ifma; uint8_t dummy[ETHER_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 }; uint32_t rxfilt; ifp = sc->sf_ifp; /* First zot all the existing filters. */ for (i = 1; i < SF_RXFILT_PERFECT_CNT; i++) sf_setperf(sc, i, dummy); for (i = SF_RXFILT_HASH_BASE; i < (SF_RXFILT_HASH_MAX + 1); i += sizeof(uint32_t)) csr_write_4(sc, i, 0); rxfilt = csr_read_4(sc, SF_RXFILT); rxfilt &= ~(SF_RXFILT_PROMISC | SF_RXFILT_ALLMULTI | SF_RXFILT_BROAD); if ((ifp->if_flags & IFF_BROADCAST) != 0) rxfilt |= SF_RXFILT_BROAD; if ((ifp->if_flags & IFF_ALLMULTI) != 0 || (ifp->if_flags & IFF_PROMISC) != 0) { if ((ifp->if_flags & IFF_PROMISC) != 0) rxfilt |= SF_RXFILT_PROMISC; if ((ifp->if_flags & IFF_ALLMULTI) != 0) rxfilt |= SF_RXFILT_ALLMULTI; goto done; } /* Now program new ones. */ i = 1; IF_ADDR_LOCK(ifp); TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; /* * Program the first 15 multicast groups * into the perfect filter. For all others, * use the hash table. */ if (i < SF_RXFILT_PERFECT_CNT) { sf_setperf(sc, i, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); i++; continue; } sf_sethash(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 0); } IF_ADDR_UNLOCK(ifp); done: csr_write_4(sc, SF_RXFILT, rxfilt); } /* * Set media options. */ static int sf_ifmedia_upd(struct ifnet *ifp) { struct sf_softc *sc; struct mii_data *mii; int error; sc = ifp->if_softc; SF_LOCK(sc); mii = device_get_softc(sc->sf_miibus); if (mii->mii_instance) { struct mii_softc *miisc; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) mii_phy_reset(miisc); } error = mii_mediachg(mii); SF_UNLOCK(sc); return (error); } /* * Report current media status. */ static void sf_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct sf_softc *sc; struct mii_data *mii; sc = ifp->if_softc; SF_LOCK(sc); mii = device_get_softc(sc->sf_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; SF_UNLOCK(sc); } static int sf_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct sf_softc *sc; struct ifreq *ifr; struct mii_data *mii; int error, mask; sc = ifp->if_softc; ifr = (struct ifreq *)data; error = 0; switch (command) { case SIOCSIFFLAGS: SF_LOCK(sc); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if ((ifp->if_flags ^ sc->sf_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) sf_rxfilter(sc); } else { if (sc->sf_detach == 0) sf_init_locked(sc); } } else { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) sf_stop(sc); } sc->sf_if_flags = ifp->if_flags; SF_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: SF_LOCK(sc); sf_rxfilter(sc); SF_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->sf_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if ((mask & IFCAP_POLLING) != 0) { if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { error = ether_poll_register(sf_poll, ifp); if (error != 0) break; SF_LOCK(sc); /* Disable interrupts. */ csr_write_4(sc, SF_IMR, 0); ifp->if_capenable |= IFCAP_POLLING; SF_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupts. */ SF_LOCK(sc); csr_write_4(sc, SF_IMR, SF_INTRS); ifp->if_capenable &= ~IFCAP_POLLING; SF_UNLOCK(sc); } } #endif /* DEVICE_POLLING */ if ((mask & IFCAP_TXCSUM) != 0) { if ((IFCAP_TXCSUM & ifp->if_capabilities) != 0) { SF_LOCK(sc); ifp->if_capenable ^= IFCAP_TXCSUM; if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) { ifp->if_hwassist |= SF_CSUM_FEATURES; SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB); } else { ifp->if_hwassist &= ~SF_CSUM_FEATURES; SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB); } SF_UNLOCK(sc); } } if ((mask & IFCAP_RXCSUM) != 0) { if ((IFCAP_RXCSUM & ifp->if_capabilities) != 0) { SF_LOCK(sc); ifp->if_capenable ^= IFCAP_RXCSUM; if ((IFCAP_RXCSUM & ifp->if_capenable) != 0) SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB); else SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB); SF_UNLOCK(sc); } } break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void sf_reset(struct sf_softc *sc) { int i; csr_write_4(sc, SF_GEN_ETH_CTL, 0); SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); DELAY(1000); SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_RESET); for (i = 0; i < SF_TIMEOUT; i++) { DELAY(10); if (!(csr_read_4(sc, SF_PCI_DEVCFG) & SF_PCIDEVCFG_RESET)) break; } if (i == SF_TIMEOUT) device_printf(sc->sf_dev, "reset never completed!\n"); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); } /* * Probe for an Adaptec AIC-6915 chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. * We also check the subsystem ID so that we can identify exactly which * NIC has been found, if possible. */ static int sf_probe(device_t dev) { struct sf_type *t; uint16_t vid; uint16_t did; uint16_t sdid; int i; vid = pci_get_vendor(dev); did = pci_get_device(dev); sdid = pci_get_subdevice(dev); t = sf_devs; for (i = 0; i < sizeof(sf_devs) / sizeof(sf_devs[0]); i++, t++) { if (vid == t->sf_vid && did == t->sf_did) { if (sdid == t->sf_sdid) { device_set_desc(dev, t->sf_sname); return (BUS_PROBE_DEFAULT); } } } if (vid == AD_VENDORID && did == AD_DEVICEID_STARFIRE) { /* unkown subdevice */ device_set_desc(dev, sf_devs[0].sf_name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int sf_attach(device_t dev) { int i; struct sf_softc *sc; struct ifnet *ifp; uint32_t reg; int rid, error = 0; uint8_t eaddr[ETHER_ADDR_LEN]; sc = device_get_softc(dev); sc->sf_dev = dev; mtx_init(&sc->sf_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->sf_co, &sc->sf_mtx, 0); TASK_INIT(&sc->sf_link_task, 0, sf_link_task, sc); /* * Map control/status registers. */ pci_enable_busmaster(dev); /* * Prefer memory space register mapping over I/O space as the * hardware requires lots of register access to get various * producer/consumer index during Tx/Rx operation. However this * requires large memory space(512K) to map the entire register * space. */ sc->sf_rid = PCIR_BAR(0); sc->sf_restype = SYS_RES_MEMORY; sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype, &sc->sf_rid, RF_ACTIVE); if (sc->sf_res == NULL) { reg = pci_read_config(dev, PCIR_BAR(0), 4); if ((reg & PCIM_BAR_MEM_64) == PCIM_BAR_MEM_64) sc->sf_rid = PCIR_BAR(2); else sc->sf_rid = PCIR_BAR(1); sc->sf_restype = SYS_RES_IOPORT; sc->sf_res = bus_alloc_resource_any(dev, sc->sf_restype, &sc->sf_rid, RF_ACTIVE); if (sc->sf_res == NULL) { device_printf(dev, "couldn't allocate resources\n"); mtx_destroy(&sc->sf_mtx); return (ENXIO); } } if (bootverbose) device_printf(dev, "using %s space register mapping\n", sc->sf_restype == SYS_RES_MEMORY ? "memory" : "I/O"); reg = pci_read_config(dev, PCIR_CACHELNSZ, 1); if (reg == 0) { /* * If cache line size is 0, MWI is not used at all, so set * reasonable default. AIC-6915 supports 0, 4, 8, 16, 32 * and 64. */ reg = 16; device_printf(dev, "setting PCI cache line size to %u\n", reg); pci_write_config(dev, PCIR_CACHELNSZ, reg, 1); } else { if (bootverbose) device_printf(dev, "PCI cache line size : %u\n", reg); } /* Enable MWI. */ reg = pci_read_config(dev, PCIR_COMMAND, 2); reg |= PCIM_CMD_MWRICEN; pci_write_config(dev, PCIR_COMMAND, reg, 2); /* Allocate interrupt. */ rid = 0; sc->sf_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->sf_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, sf_sysctl_stats, "I", "Statistics"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "int_mod", CTLTYPE_INT | CTLFLAG_RW, &sc->sf_int_mod, 0, sysctl_hw_sf_int_mod, "I", "sf interrupt moderation"); /* Pull in device tunables. */ sc->sf_int_mod = SF_IM_DEFAULT; error = resource_int_value(device_get_name(dev), device_get_unit(dev), "int_mod", &sc->sf_int_mod); if (error == 0) { if (sc->sf_int_mod < SF_IM_MIN || sc->sf_int_mod > SF_IM_MAX) { device_printf(dev, "int_mod value out of range; " "using default: %d\n", SF_IM_DEFAULT); sc->sf_int_mod = SF_IM_DEFAULT; } } /* Reset the adapter. */ sf_reset(sc); /* * Get station address from the EEPROM. */ for (i = 0; i < ETHER_ADDR_LEN; i++) eaddr[i] = sf_read_eeprom(sc, SF_EE_NODEADDR + ETHER_ADDR_LEN - i); /* Allocate DMA resources. */ if (sf_dma_alloc(sc) != 0) { error = ENOSPC; goto fail; } sc->sf_txthresh = SF_MIN_TX_THRESHOLD; ifp = sc->sf_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not allocate ifnet structure\n"); error = ENOSPC; goto fail; } /* Do MII setup. */ if (mii_phy_probe(dev, &sc->sf_miibus, sf_ifmedia_upd, sf_ifmedia_sts)) { device_printf(dev, "MII without any phy!\n"); error = ENXIO; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = sf_ioctl; ifp->if_start = sf_start; ifp->if_init = sf_init; IFQ_SET_MAXLEN(&ifp->if_snd, SF_TX_DLIST_CNT - 1); ifp->if_snd.ifq_drv_maxlen = SF_TX_DLIST_CNT - 1; IFQ_SET_READY(&ifp->if_snd); /* * With the help of firmware, AIC-6915 supports * Tx/Rx TCP/UDP checksum offload. */ ifp->if_hwassist = SF_CSUM_FEATURES; ifp->if_capabilities = IFCAP_HWCSUM; /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* VLAN capability setup. */ ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* * Tell the upper layer(s) we support long frames. * Must appear after the call to ether_ifattach() because * ether_ifattach() sets ifi_hdrlen to the default value. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->sf_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, sf_intr, sc, &sc->sf_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); ether_ifdetach(ifp); goto fail; } fail: if (error) sf_detach(dev); return (error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int sf_detach(device_t dev) { struct sf_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->sf_ifp; #ifdef DEVICE_POLLING if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { SF_LOCK(sc); sc->sf_detach = 1; sf_stop(sc); SF_UNLOCK(sc); callout_drain(&sc->sf_co); taskqueue_drain(taskqueue_swi, &sc->sf_link_task); if (ifp != NULL) ether_ifdetach(ifp); } if (sc->sf_miibus) { device_delete_child(dev, sc->sf_miibus); sc->sf_miibus = NULL; } bus_generic_detach(dev); if (sc->sf_intrhand != NULL) bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand); if (sc->sf_irq != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq); if (sc->sf_res != NULL) bus_release_resource(dev, sc->sf_restype, sc->sf_rid, sc->sf_res); sf_dma_free(sc); if (ifp != NULL) if_free(ifp); mtx_destroy(&sc->sf_mtx); return (0); } struct sf_dmamap_arg { bus_addr_t sf_busaddr; }; static void sf_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct sf_dmamap_arg *ctx; if (error != 0) return; ctx = arg; ctx->sf_busaddr = segs[0].ds_addr; } static int sf_dma_alloc(struct sf_softc *sc) { struct sf_dmamap_arg ctx; struct sf_txdesc *txd; struct sf_rxdesc *rxd; bus_addr_t lowaddr; bus_addr_t rx_ring_end, rx_cring_end; bus_addr_t tx_ring_end, tx_cring_end; int error, i; lowaddr = BUS_SPACE_MAXADDR; again: /* Create parent DMA tag. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->sf_dev), /* parent */ 1, 0, /* alignment, boundary */ lowaddr, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sf_cdata.sf_parent_tag); if (error != 0) { device_printf(sc->sf_dev, "failed to create parent DMA tag\n"); goto fail; } /* Create tag for Tx ring. */ error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ SF_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SF_TX_DLIST_SIZE, /* maxsize */ 1, /* nsegments */ SF_TX_DLIST_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sf_cdata.sf_tx_ring_tag); if (error != 0) { device_printf(sc->sf_dev, "failed to create Tx ring DMA tag\n"); goto fail; } /* Create tag for Tx completion ring. */ error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ SF_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SF_TX_CLIST_SIZE, /* maxsize */ 1, /* nsegments */ SF_TX_CLIST_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sf_cdata.sf_tx_cring_tag); if (error != 0) { device_printf(sc->sf_dev, "failed to create Tx completion ring DMA tag\n"); goto fail; } /* Create tag for Rx ring. */ error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ SF_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SF_RX_DLIST_SIZE, /* maxsize */ 1, /* nsegments */ SF_RX_DLIST_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sf_cdata.sf_rx_ring_tag); if (error != 0) { device_printf(sc->sf_dev, "failed to create Rx ring DMA tag\n"); goto fail; } /* Create tag for Rx completion ring. */ error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ SF_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SF_RX_CLIST_SIZE, /* maxsize */ 1, /* nsegments */ SF_RX_CLIST_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sf_cdata.sf_rx_cring_tag); if (error != 0) { device_printf(sc->sf_dev, "failed to create Rx completion ring DMA tag\n"); goto fail; } /* Create tag for Tx buffers. */ error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES * SF_MAXTXSEGS, /* maxsize */ SF_MAXTXSEGS, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sf_cdata.sf_tx_tag); if (error != 0) { device_printf(sc->sf_dev, "failed to create Tx DMA tag\n"); goto fail; } /* Create tag for Rx buffers. */ error = bus_dma_tag_create(sc->sf_cdata.sf_parent_tag,/* parent */ SF_RX_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, /* maxsize */ 1, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sf_cdata.sf_rx_tag); if (error != 0) { device_printf(sc->sf_dev, "failed to create Rx DMA tag\n"); goto fail; } /* Allocate DMA'able memory and load the DMA map for Tx ring. */ error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_ring_tag, (void **)&sc->sf_rdata.sf_tx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_ring_map); if (error != 0) { device_printf(sc->sf_dev, "failed to allocate DMA'able memory for Tx ring\n"); goto fail; } ctx.sf_busaddr = 0; error = bus_dmamap_load(sc->sf_cdata.sf_tx_ring_tag, sc->sf_cdata.sf_tx_ring_map, sc->sf_rdata.sf_tx_ring, SF_TX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0); if (error != 0 || ctx.sf_busaddr == 0) { device_printf(sc->sf_dev, "failed to load DMA'able memory for Tx ring\n"); goto fail; } sc->sf_rdata.sf_tx_ring_paddr = ctx.sf_busaddr; /* * Allocate DMA'able memory and load the DMA map for Tx completion ring. */ error = bus_dmamem_alloc(sc->sf_cdata.sf_tx_cring_tag, (void **)&sc->sf_rdata.sf_tx_cring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_tx_cring_map); if (error != 0) { device_printf(sc->sf_dev, "failed to allocate DMA'able memory for " "Tx completion ring\n"); goto fail; } ctx.sf_busaddr = 0; error = bus_dmamap_load(sc->sf_cdata.sf_tx_cring_tag, sc->sf_cdata.sf_tx_cring_map, sc->sf_rdata.sf_tx_cring, SF_TX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0); if (error != 0 || ctx.sf_busaddr == 0) { device_printf(sc->sf_dev, "failed to load DMA'able memory for Tx completion ring\n"); goto fail; } sc->sf_rdata.sf_tx_cring_paddr = ctx.sf_busaddr; /* Allocate DMA'able memory and load the DMA map for Rx ring. */ error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_ring_tag, (void **)&sc->sf_rdata.sf_rx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_ring_map); if (error != 0) { device_printf(sc->sf_dev, "failed to allocate DMA'able memory for Rx ring\n"); goto fail; } ctx.sf_busaddr = 0; error = bus_dmamap_load(sc->sf_cdata.sf_rx_ring_tag, sc->sf_cdata.sf_rx_ring_map, sc->sf_rdata.sf_rx_ring, SF_RX_DLIST_SIZE, sf_dmamap_cb, &ctx, 0); if (error != 0 || ctx.sf_busaddr == 0) { device_printf(sc->sf_dev, "failed to load DMA'able memory for Rx ring\n"); goto fail; } sc->sf_rdata.sf_rx_ring_paddr = ctx.sf_busaddr; /* * Allocate DMA'able memory and load the DMA map for Rx completion ring. */ error = bus_dmamem_alloc(sc->sf_cdata.sf_rx_cring_tag, (void **)&sc->sf_rdata.sf_rx_cring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sf_cdata.sf_rx_cring_map); if (error != 0) { device_printf(sc->sf_dev, "failed to allocate DMA'able memory for " "Rx completion ring\n"); goto fail; } ctx.sf_busaddr = 0; error = bus_dmamap_load(sc->sf_cdata.sf_rx_cring_tag, sc->sf_cdata.sf_rx_cring_map, sc->sf_rdata.sf_rx_cring, SF_RX_CLIST_SIZE, sf_dmamap_cb, &ctx, 0); if (error != 0 || ctx.sf_busaddr == 0) { device_printf(sc->sf_dev, "failed to load DMA'able memory for Rx completion ring\n"); goto fail; } sc->sf_rdata.sf_rx_cring_paddr = ctx.sf_busaddr; /* * Tx desciptor ring and Tx completion ring should be addressed in * the same 4GB space. The same rule applys to Rx ring and Rx * completion ring. Unfortunately there is no way to specify this * boundary restriction with bus_dma(9). So just try to allocate * without the restriction and check the restriction was satisfied. * If not, fall back to 32bit dma addressing mode which always * guarantees the restriction. */ tx_ring_end = sc->sf_rdata.sf_tx_ring_paddr + SF_TX_DLIST_SIZE; tx_cring_end = sc->sf_rdata.sf_tx_cring_paddr + SF_TX_CLIST_SIZE; rx_ring_end = sc->sf_rdata.sf_rx_ring_paddr + SF_RX_DLIST_SIZE; rx_cring_end = sc->sf_rdata.sf_rx_cring_paddr + SF_RX_CLIST_SIZE; if ((SF_ADDR_HI(sc->sf_rdata.sf_tx_ring_paddr) != SF_ADDR_HI(tx_cring_end)) || (SF_ADDR_HI(sc->sf_rdata.sf_tx_cring_paddr) != SF_ADDR_HI(tx_ring_end)) || (SF_ADDR_HI(sc->sf_rdata.sf_rx_ring_paddr) != SF_ADDR_HI(rx_cring_end)) || (SF_ADDR_HI(sc->sf_rdata.sf_rx_cring_paddr) != SF_ADDR_HI(rx_ring_end))) { device_printf(sc->sf_dev, "switching to 32bit DMA mode\n"); sf_dma_free(sc); /* Limit DMA address space to 32bit and try again. */ lowaddr = BUS_SPACE_MAXADDR_32BIT; goto again; } /* Create DMA maps for Tx buffers. */ for (i = 0; i < SF_TX_DLIST_CNT; i++) { txd = &sc->sf_cdata.sf_txdesc[i]; txd->tx_m = NULL; txd->ndesc = 0; txd->tx_dmamap = NULL; error = bus_dmamap_create(sc->sf_cdata.sf_tx_tag, 0, &txd->tx_dmamap); if (error != 0) { device_printf(sc->sf_dev, "failed to create Tx dmamap\n"); goto fail; } } /* Create DMA maps for Rx buffers. */ if ((error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0, &sc->sf_cdata.sf_rx_sparemap)) != 0) { device_printf(sc->sf_dev, "failed to create spare Rx dmamap\n"); goto fail; } for (i = 0; i < SF_RX_DLIST_CNT; i++) { rxd = &sc->sf_cdata.sf_rxdesc[i]; rxd->rx_m = NULL; rxd->rx_dmamap = NULL; error = bus_dmamap_create(sc->sf_cdata.sf_rx_tag, 0, &rxd->rx_dmamap); if (error != 0) { device_printf(sc->sf_dev, "failed to create Rx dmamap\n"); goto fail; } } fail: return (error); } static void sf_dma_free(struct sf_softc *sc) { struct sf_txdesc *txd; struct sf_rxdesc *rxd; int i; /* Tx ring. */ if (sc->sf_cdata.sf_tx_ring_tag) { if (sc->sf_cdata.sf_tx_ring_map) bus_dmamap_unload(sc->sf_cdata.sf_tx_ring_tag, sc->sf_cdata.sf_tx_ring_map); if (sc->sf_cdata.sf_tx_ring_map && sc->sf_rdata.sf_tx_ring) bus_dmamem_free(sc->sf_cdata.sf_tx_ring_tag, sc->sf_rdata.sf_tx_ring, sc->sf_cdata.sf_tx_ring_map); sc->sf_rdata.sf_tx_ring = NULL; sc->sf_cdata.sf_tx_ring_map = NULL; bus_dma_tag_destroy(sc->sf_cdata.sf_tx_ring_tag); sc->sf_cdata.sf_tx_ring_tag = NULL; } /* Tx completion ring. */ if (sc->sf_cdata.sf_tx_cring_tag) { if (sc->sf_cdata.sf_tx_cring_map) bus_dmamap_unload(sc->sf_cdata.sf_tx_cring_tag, sc->sf_cdata.sf_tx_cring_map); if (sc->sf_cdata.sf_tx_cring_map && sc->sf_rdata.sf_tx_cring) bus_dmamem_free(sc->sf_cdata.sf_tx_cring_tag, sc->sf_rdata.sf_tx_cring, sc->sf_cdata.sf_tx_cring_map); sc->sf_rdata.sf_tx_cring = NULL; sc->sf_cdata.sf_tx_cring_map = NULL; bus_dma_tag_destroy(sc->sf_cdata.sf_tx_cring_tag); sc->sf_cdata.sf_tx_cring_tag = NULL; } /* Rx ring. */ if (sc->sf_cdata.sf_rx_ring_tag) { if (sc->sf_cdata.sf_rx_ring_map) bus_dmamap_unload(sc->sf_cdata.sf_rx_ring_tag, sc->sf_cdata.sf_rx_ring_map); if (sc->sf_cdata.sf_rx_ring_map && sc->sf_rdata.sf_rx_ring) bus_dmamem_free(sc->sf_cdata.sf_rx_ring_tag, sc->sf_rdata.sf_rx_ring, sc->sf_cdata.sf_rx_ring_map); sc->sf_rdata.sf_rx_ring = NULL; sc->sf_cdata.sf_rx_ring_map = NULL; bus_dma_tag_destroy(sc->sf_cdata.sf_rx_ring_tag); sc->sf_cdata.sf_rx_ring_tag = NULL; } /* Rx completion ring. */ if (sc->sf_cdata.sf_rx_cring_tag) { if (sc->sf_cdata.sf_rx_cring_map) bus_dmamap_unload(sc->sf_cdata.sf_rx_cring_tag, sc->sf_cdata.sf_rx_cring_map); if (sc->sf_cdata.sf_rx_cring_map && sc->sf_rdata.sf_rx_cring) bus_dmamem_free(sc->sf_cdata.sf_rx_cring_tag, sc->sf_rdata.sf_rx_cring, sc->sf_cdata.sf_rx_cring_map); sc->sf_rdata.sf_rx_cring = NULL; sc->sf_cdata.sf_rx_cring_map = NULL; bus_dma_tag_destroy(sc->sf_cdata.sf_rx_cring_tag); sc->sf_cdata.sf_rx_cring_tag = NULL; } /* Tx buffers. */ if (sc->sf_cdata.sf_tx_tag) { for (i = 0; i < SF_TX_DLIST_CNT; i++) { txd = &sc->sf_cdata.sf_txdesc[i]; if (txd->tx_dmamap) { bus_dmamap_destroy(sc->sf_cdata.sf_tx_tag, txd->tx_dmamap); txd->tx_dmamap = NULL; } } bus_dma_tag_destroy(sc->sf_cdata.sf_tx_tag); sc->sf_cdata.sf_tx_tag = NULL; } /* Rx buffers. */ if (sc->sf_cdata.sf_rx_tag) { for (i = 0; i < SF_RX_DLIST_CNT; i++) { rxd = &sc->sf_cdata.sf_rxdesc[i]; if (rxd->rx_dmamap) { bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap); rxd->rx_dmamap = NULL; } } if (sc->sf_cdata.sf_rx_sparemap) { bus_dmamap_destroy(sc->sf_cdata.sf_rx_tag, sc->sf_cdata.sf_rx_sparemap); sc->sf_cdata.sf_rx_sparemap = 0; } bus_dma_tag_destroy(sc->sf_cdata.sf_rx_tag); sc->sf_cdata.sf_rx_tag = NULL; } if (sc->sf_cdata.sf_parent_tag) { bus_dma_tag_destroy(sc->sf_cdata.sf_parent_tag); sc->sf_cdata.sf_parent_tag = NULL; } } static int sf_init_rx_ring(struct sf_softc *sc) { struct sf_ring_data *rd; int i; sc->sf_cdata.sf_rxc_cons = 0; rd = &sc->sf_rdata; bzero(rd->sf_rx_ring, SF_RX_DLIST_SIZE); bzero(rd->sf_rx_cring, SF_RX_CLIST_SIZE); for (i = 0; i < SF_RX_DLIST_CNT; i++) { if (sf_newbuf(sc, i) != 0) return (ENOBUFS); } bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag, sc->sf_cdata.sf_rx_cring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag, sc->sf_cdata.sf_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } static void sf_init_tx_ring(struct sf_softc *sc) { struct sf_ring_data *rd; int i; sc->sf_cdata.sf_tx_prod = 0; sc->sf_cdata.sf_tx_cnt = 0; sc->sf_cdata.sf_txc_cons = 0; rd = &sc->sf_rdata; bzero(rd->sf_tx_ring, SF_TX_DLIST_SIZE); bzero(rd->sf_tx_cring, SF_TX_CLIST_SIZE); for (i = 0; i < SF_TX_DLIST_CNT; i++) { rd->sf_tx_ring[i].sf_tx_ctrl = htole32(SF_TX_DESC_ID); sc->sf_cdata.sf_txdesc[i].tx_m = NULL; sc->sf_cdata.sf_txdesc[i].ndesc = 0; } rd->sf_tx_ring[i].sf_tx_ctrl |= htole32(SF_TX_DESC_END); bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag, sc->sf_cdata.sf_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag, sc->sf_cdata.sf_tx_cring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int sf_newbuf(struct sf_softc *sc, int idx) { struct sf_rx_rdesc *desc; struct sf_rxdesc *rxd; struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; int nsegs; m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; m_adj(m, sizeof(uint32_t)); if (bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_rx_tag, sc->sf_cdata.sf_rx_sparemap, m, segs, &nsegs, 0) != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); rxd = &sc->sf_cdata.sf_rxdesc[idx]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap); } map = rxd->rx_dmamap; rxd->rx_dmamap = sc->sf_cdata.sf_rx_sparemap; sc->sf_cdata.sf_rx_sparemap = map; bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); rxd->rx_m = m; desc = &sc->sf_rdata.sf_rx_ring[idx]; desc->sf_addr = htole64(segs[0].ds_addr); return (0); } #ifndef __NO_STRICT_ALIGNMENT static __inline void sf_fixup_rx(struct mbuf *m) { int i; uint16_t *src, *dst; src = mtod(m, uint16_t *); dst = src - 1; for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) *dst++ = *src++; m->m_data -= ETHER_ALIGN; } #endif /* * The starfire is programmed to use 'normal' mode for packet reception, * which means we use the consumer/producer model for both the buffer * descriptor queue and the completion descriptor queue. The only problem * with this is that it involves a lot of register accesses: we have to * read the RX completion consumer and producer indexes and the RX buffer * producer index, plus the RX completion consumer and RX buffer producer * indexes have to be updated. It would have been easier if Adaptec had * put each index in a separate register, especially given that the damn * NIC has a 512K register space. * * In spite of all the lovely features that Adaptec crammed into the 6915, * it is marred by one truly stupid design flaw, which is that receive * buffer addresses must be aligned on a longword boundary. This forces * the packet payload to be unaligned, which is suboptimal on the x86 and * completely unuseable on the Alpha. Our only recourse is to copy received * packets into properly aligned buffers before handing them off. */ -static void +static int sf_rxeof(struct sf_softc *sc) { struct mbuf *m; struct ifnet *ifp; struct sf_rxdesc *rxd; struct sf_rx_rcdesc *cur_cmp; - int cons, eidx, prog; + int cons, eidx, prog, rx_npkts; uint32_t status, status2; SF_LOCK_ASSERT(sc); ifp = sc->sf_ifp; + rx_npkts = 0; bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag, sc->sf_cdata.sf_rx_ring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag, sc->sf_cdata.sf_rx_cring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* * To reduce register access, directly read Receive completion * queue entry. */ eidx = 0; prog = 0; for (cons = sc->sf_cdata.sf_rxc_cons; ; SF_INC(cons, SF_RX_CLIST_CNT)) { cur_cmp = &sc->sf_rdata.sf_rx_cring[cons]; status = le32toh(cur_cmp->sf_rx_status1); if (status == 0) break; #ifdef DEVICE_POLLING if ((ifp->if_capenable & IFCAP_POLLING) != 0) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif prog++; eidx = (status & SF_RX_CMPDESC_EIDX) >> 16; rxd = &sc->sf_cdata.sf_rxdesc[eidx]; m = rxd->rx_m; /* * Note, if_ipackets and if_ierrors counters * are handled in sf_stats_update(). */ if ((status & SF_RXSTAT1_OK) == 0) { cur_cmp->sf_rx_status1 = 0; continue; } if (sf_newbuf(sc, eidx) != 0) { ifp->if_iqdrops++; cur_cmp->sf_rx_status1 = 0; continue; } /* AIC-6915 supports TCP/UDP checksum offload. */ if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { status2 = le32toh(cur_cmp->sf_rx_status2); /* * Sometimes AIC-6915 generates an interrupt to * warn RxGFP stall with bad checksum bit set * in status word. I'm not sure what conditioan * triggers it but recevied packet's checksum * was correct even though AIC-6915 does not * agree on this. This may be an indication of * firmware bug. To fix the issue, do not rely * on bad checksum bit in status word and let * upper layer verify integrity of received * frame. * Another nice feature of AIC-6915 is hardware * assistance of checksum calculation by * providing partial checksum value for received * frame. The partial checksum value can be used * to accelerate checksum computation for * fragmented TCP/UDP packets. Upper network * stack already takes advantage of the partial * checksum value in IP reassembly stage. But * I'm not sure the correctness of the partial * hardware checksum assistance as frequent * RxGFP stalls are seen on non-fragmented * frames. Due to the nature of the complexity * of checksum computation code in firmware it's * possible to see another bug in RxGFP so * ignore checksum assistance for fragmented * frames. This can be changed in future. */ if ((status2 & SF_RXSTAT2_FRAG) == 0) { if ((status2 & (SF_RXSTAT2_TCP | SF_RXSTAT2_UDP)) != 0) { if ((status2 & SF_RXSTAT2_CSUM_OK)) { m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } } #ifdef SF_PARTIAL_CSUM_SUPPORT else if ((status2 & SF_RXSTAT2_FRAG) != 0) { if ((status2 & (SF_RXSTAT2_TCP | SF_RXSTAT2_UDP)) != 0) { if ((status2 & SF_RXSTAT2_PCSUM_OK)) { m->m_pkthdr.csum_flags = CSUM_DATA_VALID; m->m_pkthdr.csum_data = (status & SF_RX_CMPDESC_CSUM2); } } } #endif } m->m_pkthdr.len = m->m_len = status & SF_RX_CMPDESC_LEN; #ifndef __NO_STRICT_ALIGNMENT sf_fixup_rx(m); #endif m->m_pkthdr.rcvif = ifp; SF_UNLOCK(sc); (*ifp->if_input)(ifp, m); SF_LOCK(sc); + rx_npkts++; /* Clear completion status. */ cur_cmp->sf_rx_status1 = 0; } if (prog > 0) { sc->sf_cdata.sf_rxc_cons = cons; bus_dmamap_sync(sc->sf_cdata.sf_rx_ring_tag, sc->sf_cdata.sf_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->sf_cdata.sf_rx_cring_tag, sc->sf_cdata.sf_rx_cring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Update Rx completion Q1 consumer index. */ csr_write_4(sc, SF_CQ_CONSIDX, (csr_read_4(sc, SF_CQ_CONSIDX) & ~SF_CQ_CONSIDX_RXQ1) | (cons & SF_CQ_CONSIDX_RXQ1)); /* Update Rx descriptor Q1 ptr. */ csr_write_4(sc, SF_RXDQ_PTR_Q1, (csr_read_4(sc, SF_RXDQ_PTR_Q1) & ~SF_RXDQ_PRODIDX) | (eidx & SF_RXDQ_PRODIDX)); } + return (rx_npkts); } /* * Read the transmit status from the completion queue and release * mbufs. Note that the buffer descriptor index in the completion * descriptor is an offset from the start of the transmit buffer * descriptor list in bytes. This is important because the manual * gives the impression that it should match the producer/consumer * index, which is the offset in 8 byte blocks. */ static void sf_txeof(struct sf_softc *sc) { struct sf_txdesc *txd; struct sf_tx_rcdesc *cur_cmp; struct ifnet *ifp; uint32_t status; int cons, idx, prod; SF_LOCK_ASSERT(sc); ifp = sc->sf_ifp; bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag, sc->sf_cdata.sf_tx_cring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); cons = sc->sf_cdata.sf_txc_cons; prod = (csr_read_4(sc, SF_CQ_PRODIDX) & SF_TXDQ_PRODIDX_HIPRIO) >> 16; if (prod == cons) return; for (; cons != prod; SF_INC(cons, SF_TX_CLIST_CNT)) { cur_cmp = &sc->sf_rdata.sf_tx_cring[cons]; status = le32toh(cur_cmp->sf_tx_status1); if (status == 0) break; switch (status & SF_TX_CMPDESC_TYPE) { case SF_TXCMPTYPE_TX: /* Tx complete entry. */ break; case SF_TXCMPTYPE_DMA: /* DMA complete entry. */ idx = status & SF_TX_CMPDESC_IDX; idx = idx / sizeof(struct sf_tx_rdesc); /* * We don't need to check Tx status here. * SF_ISR_TX_LOFIFO intr would handle this. * Note, if_opackets, if_collisions and if_oerrors * counters are handled in sf_stats_update(). */ txd = &sc->sf_cdata.sf_txdesc[idx]; if (txd->tx_m != NULL) { bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; } sc->sf_cdata.sf_tx_cnt -= txd->ndesc; KASSERT(sc->sf_cdata.sf_tx_cnt >= 0, ("%s: Active Tx desc counter was garbled\n", __func__)); txd->ndesc = 0; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; break; default: /* It should not happen. */ device_printf(sc->sf_dev, "unknown Tx completion type : 0x%08x : %d : %d\n", status, cons, prod); break; } cur_cmp->sf_tx_status1 = 0; } sc->sf_cdata.sf_txc_cons = cons; bus_dmamap_sync(sc->sf_cdata.sf_tx_cring_tag, sc->sf_cdata.sf_tx_cring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (sc->sf_cdata.sf_tx_cnt == 0) sc->sf_watchdog_timer = 0; /* Update Tx completion consumer index. */ csr_write_4(sc, SF_CQ_CONSIDX, (csr_read_4(sc, SF_CQ_CONSIDX) & 0xffff) | ((cons << 16) & 0xffff0000)); } static void sf_txthresh_adjust(struct sf_softc *sc) { uint32_t txfctl; device_printf(sc->sf_dev, "Tx underrun -- "); if (sc->sf_txthresh < SF_MAX_TX_THRESHOLD) { txfctl = csr_read_4(sc, SF_TX_FRAMCTL); /* Increase Tx threshold 256 bytes. */ sc->sf_txthresh += 16; if (sc->sf_txthresh > SF_MAX_TX_THRESHOLD) sc->sf_txthresh = SF_MAX_TX_THRESHOLD; txfctl &= ~SF_TXFRMCTL_TXTHRESH; txfctl |= sc->sf_txthresh; printf("increasing Tx threshold to %d bytes\n", sc->sf_txthresh * SF_TX_THRESHOLD_UNIT); csr_write_4(sc, SF_TX_FRAMCTL, txfctl); } else printf("\n"); } #ifdef DEVICE_POLLING -static void +static int sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct sf_softc *sc; uint32_t status; + int rx_npkts; sc = ifp->if_softc; + rx_npkts = 0; SF_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { SF_UNLOCK(sc); - return; + return (rx_npkts); } sc->rxcycles = count; - sf_rxeof(sc); + rx_npkts = sf_rxeof(sc); sf_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) sf_start_locked(ifp); if (cmd == POLL_AND_CHECK_STATUS) { /* Reading the ISR register clears all interrrupts. */ status = csr_read_4(sc, SF_ISR); if ((status & SF_ISR_ABNORMALINTR) != 0) { if ((status & SF_ISR_STATSOFLOW) != 0) sf_stats_update(sc); else if ((status & SF_ISR_TX_LOFIFO) != 0) sf_txthresh_adjust(sc); else if ((status & SF_ISR_DMAERR) != 0) { device_printf(sc->sf_dev, "DMA error, resetting\n"); sf_init_locked(sc); } else if ((status & SF_ISR_NO_TX_CSUM) != 0) { sc->sf_statistics.sf_tx_gfp_stall++; #ifdef SF_GFP_DEBUG device_printf(sc->sf_dev, "TxGFP is not responding!\n"); #endif } else if ((status & SF_ISR_RXGFP_NORESP) != 0) { sc->sf_statistics.sf_rx_gfp_stall++; #ifdef SF_GFP_DEBUG device_printf(sc->sf_dev, "RxGFP is not responding!\n"); #endif } } } SF_UNLOCK(sc); + return (rx_npkts); } #endif /* DEVICE_POLLING */ static void sf_intr(void *arg) { struct sf_softc *sc; struct ifnet *ifp; uint32_t status; sc = (struct sf_softc *)arg; SF_LOCK(sc); if (sc->sf_suspended != 0) goto done_locked; /* Reading the ISR register clears all interrrupts. */ status = csr_read_4(sc, SF_ISR); if (status == 0 || status == 0xffffffff || (status & SF_ISR_PCIINT_ASSERTED) == 0) goto done_locked; ifp = sc->sf_ifp; #ifdef DEVICE_POLLING if ((ifp->if_capenable & IFCAP_POLLING) != 0) goto done_locked; #endif if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) goto done_locked; /* Disable interrupts. */ csr_write_4(sc, SF_IMR, 0x00000000); for (; (status & SF_INTRS) != 0;) { if ((status & SF_ISR_RXDQ1_DMADONE) != 0) sf_rxeof(sc); if ((status & (SF_ISR_TX_TXDONE | SF_ISR_TX_DMADONE | SF_ISR_TX_QUEUEDONE)) != 0) sf_txeof(sc); if ((status & SF_ISR_ABNORMALINTR) != 0) { if ((status & SF_ISR_STATSOFLOW) != 0) sf_stats_update(sc); else if ((status & SF_ISR_TX_LOFIFO) != 0) sf_txthresh_adjust(sc); else if ((status & SF_ISR_DMAERR) != 0) { device_printf(sc->sf_dev, "DMA error, resetting\n"); sf_init_locked(sc); break; } else if ((status & SF_ISR_NO_TX_CSUM) != 0) { sc->sf_statistics.sf_tx_gfp_stall++; #ifdef SF_GFP_DEBUG device_printf(sc->sf_dev, "TxGFP is not responding!\n"); #endif } else if ((status & SF_ISR_RXGFP_NORESP) != 0) { sc->sf_statistics.sf_rx_gfp_stall++; #ifdef SF_GFP_DEBUG device_printf(sc->sf_dev, "RxGFP is not responding!\n"); #endif } } /* Reading the ISR register clears all interrrupts. */ status = csr_read_4(sc, SF_ISR); } /* Re-enable interrupts. */ csr_write_4(sc, SF_IMR, SF_INTRS); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) sf_start_locked(ifp); done_locked: SF_UNLOCK(sc); } static void sf_download_fw(struct sf_softc *sc) { uint32_t gfpinst; int i, ndx; uint8_t *p; /* * A FP instruction is composed of 48bits so we have to * write it with two parts. */ p = txfwdata; ndx = 0; for (i = 0; i < sizeof(txfwdata) / SF_GFP_INST_BYTES; i++) { gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5]; csr_write_4(sc, SF_TXGFP_MEM_BASE + ndx * 4, gfpinst); gfpinst = p[0] << 8 | p[1]; csr_write_4(sc, SF_TXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst); p += SF_GFP_INST_BYTES; ndx += 2; } if (bootverbose) device_printf(sc->sf_dev, "%d Tx instructions downloaded\n", i); p = rxfwdata; ndx = 0; for (i = 0; i < sizeof(rxfwdata) / SF_GFP_INST_BYTES; i++) { gfpinst = p[2] << 24 | p[3] << 16 | p[4] << 8 | p[5]; csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx * 4), gfpinst); gfpinst = p[0] << 8 | p[1]; csr_write_4(sc, SF_RXGFP_MEM_BASE + (ndx + 1) * 4, gfpinst); p += SF_GFP_INST_BYTES; ndx += 2; } if (bootverbose) device_printf(sc->sf_dev, "%d Rx instructions downloaded\n", i); } static void sf_init(void *xsc) { struct sf_softc *sc; sc = (struct sf_softc *)xsc; SF_LOCK(sc); sf_init_locked(sc); SF_UNLOCK(sc); } static void sf_init_locked(struct sf_softc *sc) { struct ifnet *ifp; struct mii_data *mii; uint8_t eaddr[ETHER_ADDR_LEN]; bus_addr_t addr; int i; SF_LOCK_ASSERT(sc); ifp = sc->sf_ifp; mii = device_get_softc(sc->sf_miibus); sf_stop(sc); /* Reset the hardware to a known state. */ sf_reset(sc); /* Init all the receive filter registers */ for (i = SF_RXFILT_PERFECT_BASE; i < (SF_RXFILT_HASH_MAX + 1); i += sizeof(uint32_t)) csr_write_4(sc, i, 0); /* Empty stats counter registers. */ for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t)) csr_write_4(sc, i, 0); /* Init our MAC address. */ bcopy(IF_LLADDR(sc->sf_ifp), eaddr, sizeof(eaddr)); csr_write_4(sc, SF_PAR0, eaddr[2] << 24 | eaddr[3] << 16 | eaddr[4] << 8 | eaddr[5]); csr_write_4(sc, SF_PAR1, eaddr[0] << 8 | eaddr[1]); sf_setperf(sc, 0, eaddr); if (sf_init_rx_ring(sc) == ENOBUFS) { device_printf(sc->sf_dev, "initialization failed: no memory for rx buffers\n"); return; } sf_init_tx_ring(sc); /* * 16 perfect address filtering. * Hash only multicast destination address, Accept matching * frames regardless of VLAN ID. */ csr_write_4(sc, SF_RXFILT, SF_PERFMODE_NORMAL | SF_HASHMODE_ANYVLAN); /* * Set Rx filter. */ sf_rxfilter(sc); /* Init the completion queue indexes. */ csr_write_4(sc, SF_CQ_CONSIDX, 0); csr_write_4(sc, SF_CQ_PRODIDX, 0); /* Init the RX completion queue. */ addr = sc->sf_rdata.sf_rx_cring_paddr; csr_write_4(sc, SF_CQ_ADDR_HI, SF_ADDR_HI(addr)); csr_write_4(sc, SF_RXCQ_CTL_1, SF_ADDR_LO(addr) & SF_RXCQ_ADDR); if (SF_ADDR_HI(addr) != 0) SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQ_USE_64BIT); /* Set RX completion queue type 2. */ SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQTYPE_2); csr_write_4(sc, SF_RXCQ_CTL_2, 0); /* * Init RX DMA control. * default RxHighPriority Threshold, * default RxBurstSize, 128bytes. */ SF_SETBIT(sc, SF_RXDMA_CTL, SF_RXDMA_REPORTBADPKTS | (SF_RXDMA_HIGHPRIO_THRESH << 8) | SF_RXDMA_BURST); /* Init the RX buffer descriptor queue. */ addr = sc->sf_rdata.sf_rx_ring_paddr; csr_write_4(sc, SF_RXDQ_ADDR_HI, SF_ADDR_HI(addr)); csr_write_4(sc, SF_RXDQ_ADDR_Q1, SF_ADDR_LO(addr)); /* Set RX queue buffer length. */ csr_write_4(sc, SF_RXDQ_CTL_1, ((MCLBYTES - sizeof(uint32_t)) << 16) | SF_RXDQCTL_64BITBADDR | SF_RXDQCTL_VARIABLE); if (SF_ADDR_HI(addr) != 0) SF_SETBIT(sc, SF_RXDQ_CTL_1, SF_RXDQCTL_64BITDADDR); csr_write_4(sc, SF_RXDQ_PTR_Q1, SF_RX_DLIST_CNT - 1); csr_write_4(sc, SF_RXDQ_CTL_2, 0); /* Init the TX completion queue */ addr = sc->sf_rdata.sf_tx_cring_paddr; csr_write_4(sc, SF_TXCQ_CTL, SF_ADDR_LO(addr) & SF_TXCQ_ADDR); if (SF_ADDR_HI(addr) != 0) SF_SETBIT(sc, SF_TXCQ_CTL, SF_TXCQ_USE_64BIT); /* Init the TX buffer descriptor queue. */ addr = sc->sf_rdata.sf_tx_ring_paddr; csr_write_4(sc, SF_TXDQ_ADDR_HI, SF_ADDR_HI(addr)); csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0); csr_write_4(sc, SF_TXDQ_ADDR_LOPRIO, SF_ADDR_LO(addr)); csr_write_4(sc, SF_TX_FRAMCTL, SF_TXFRMCTL_CPLAFTERTX | sc->sf_txthresh); csr_write_4(sc, SF_TXDQ_CTL, SF_TXDMA_HIPRIO_THRESH << 24 | SF_TXSKIPLEN_0BYTES << 16 | SF_TXDDMA_BURST << 8 | SF_TXBUFDESC_TYPE2 | SF_TXMINSPACE_UNLIMIT); if (SF_ADDR_HI(addr) != 0) SF_SETBIT(sc, SF_TXDQ_CTL, SF_TXDQCTL_64BITADDR); /* Set VLAN Type register. */ csr_write_4(sc, SF_VLANTYPE, ETHERTYPE_VLAN); /* Set TxPause Timer. */ csr_write_4(sc, SF_TXPAUSETIMER, 0xffff); /* Enable autopadding of short TX frames. */ SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_AUTOPAD); SF_SETBIT(sc, SF_MACCFG_2, SF_MACCFG2_AUTOVLANPAD); /* Make sure to reset MAC to take changes effect. */ SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); DELAY(1000); SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); /* Enable PCI bus master. */ SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_PCIMEN); /* Load StarFire firmware. */ sf_download_fw(sc); /* Intialize interrupt moderation. */ csr_write_4(sc, SF_TIMER_CTL, SF_TIMER_IMASK_MODE | SF_TIMER_TIMES_TEN | (sc->sf_int_mod & SF_TIMER_IMASK_INTERVAL)); #ifdef DEVICE_POLLING /* Disable interrupts if we are polling. */ if ((ifp->if_capenable & IFCAP_POLLING) != 0) csr_write_4(sc, SF_IMR, 0x00000000); else #endif /* Enable interrupts. */ csr_write_4(sc, SF_IMR, SF_INTRS); SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_INTR_ENB); /* Enable the RX and TX engines. */ csr_write_4(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RX_ENB | SF_ETHCTL_RXDMA_ENB | SF_ETHCTL_TX_ENB | SF_ETHCTL_TXDMA_ENB); if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB); else SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TXGFP_ENB); if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB); else SF_CLRBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RXGFP_ENB); sc->sf_link = 0; mii_mediachg(mii); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->sf_co, hz, sf_tick, sc); } static int sf_encap(struct sf_softc *sc, struct mbuf **m_head) { struct sf_txdesc *txd; struct sf_tx_rdesc *desc; struct mbuf *m; bus_dmamap_t map; bus_dma_segment_t txsegs[SF_MAXTXSEGS]; int error, i, nsegs, prod, si; int avail, nskip; SF_LOCK_ASSERT(sc); m = *m_head; prod = sc->sf_cdata.sf_tx_prod; txd = &sc->sf_cdata.sf_txdesc[prod]; map = txd->tx_dmamap; error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag, map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m = m_collapse(*m_head, M_DONTWAIT, SF_MAXTXSEGS); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->sf_cdata.sf_tx_tag, map, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (error); } } else if (error != 0) return (error); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } /* Check number of available descriptors. */ avail = (SF_TX_DLIST_CNT - 1) - sc->sf_cdata.sf_tx_cnt; if (avail < nsegs) { bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map); return (ENOBUFS); } nskip = 0; if (prod + nsegs >= SF_TX_DLIST_CNT) { nskip = SF_TX_DLIST_CNT - prod - 1; if (avail < nsegs + nskip) { bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, map); return (ENOBUFS); } } bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, map, BUS_DMASYNC_PREWRITE); si = prod; for (i = 0; i < nsegs; i++) { desc = &sc->sf_rdata.sf_tx_ring[prod]; desc->sf_tx_ctrl = htole32(SF_TX_DESC_ID | (txsegs[i].ds_len & SF_TX_DESC_FRAGLEN)); desc->sf_tx_reserved = 0; desc->sf_addr = htole64(txsegs[i].ds_addr); if (i == 0 && prod + nsegs >= SF_TX_DLIST_CNT) { /* Queue wraps! */ desc->sf_tx_ctrl |= htole32(SF_TX_DESC_END); prod = 0; } else SF_INC(prod, SF_TX_DLIST_CNT); } /* Update producer index. */ sc->sf_cdata.sf_tx_prod = prod; sc->sf_cdata.sf_tx_cnt += nsegs + nskip; desc = &sc->sf_rdata.sf_tx_ring[si]; /* Check TDP/UDP checksum offload request. */ if ((m->m_pkthdr.csum_flags & SF_CSUM_FEATURES) != 0) desc->sf_tx_ctrl |= htole32(SF_TX_DESC_CALTCP); desc->sf_tx_ctrl |= htole32(SF_TX_DESC_CRCEN | SF_TX_DESC_INTR | (nsegs << 16)); txd->tx_dmamap = map; txd->tx_m = m; txd->ndesc = nsegs + nskip; return (0); } static void sf_start(struct ifnet *ifp) { struct sf_softc *sc; sc = ifp->if_softc; SF_LOCK(sc); sf_start_locked(ifp); SF_UNLOCK(sc); } static void sf_start_locked(struct ifnet *ifp) { struct sf_softc *sc; struct mbuf *m_head; int enq; sc = ifp->if_softc; SF_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || sc->sf_link == 0) return; /* * Since we don't know when descriptor wrap occurrs in advance * limit available number of active Tx descriptor counter to be * higher than maximum number of DMA segments allowed in driver. */ for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->sf_cdata.sf_tx_cnt < SF_TX_DLIST_CNT - SF_MAXTXSEGS; ) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (sf_encap(sc, &m_head)) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } enq++; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ ETHER_BPF_MTAP(ifp, m_head); } if (enq > 0) { bus_dmamap_sync(sc->sf_cdata.sf_tx_ring_tag, sc->sf_cdata.sf_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Kick transmit. */ csr_write_4(sc, SF_TXDQ_PRODIDX, sc->sf_cdata.sf_tx_prod * (sizeof(struct sf_tx_rdesc) / 8)); /* Set a timeout in case the chip goes out to lunch. */ sc->sf_watchdog_timer = 5; } } static void sf_stop(struct sf_softc *sc) { struct sf_txdesc *txd; struct sf_rxdesc *rxd; struct ifnet *ifp; int i; SF_LOCK_ASSERT(sc); ifp = sc->sf_ifp; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->sf_link = 0; callout_stop(&sc->sf_co); sc->sf_watchdog_timer = 0; /* Reading the ISR register clears all interrrupts. */ csr_read_4(sc, SF_ISR); /* Disable further interrupts. */ csr_write_4(sc, SF_IMR, 0); /* Disable Tx/Rx egine. */ csr_write_4(sc, SF_GEN_ETH_CTL, 0); csr_write_4(sc, SF_CQ_CONSIDX, 0); csr_write_4(sc, SF_CQ_PRODIDX, 0); csr_write_4(sc, SF_RXDQ_ADDR_Q1, 0); csr_write_4(sc, SF_RXDQ_CTL_1, 0); csr_write_4(sc, SF_RXDQ_PTR_Q1, 0); csr_write_4(sc, SF_TXCQ_CTL, 0); csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0); csr_write_4(sc, SF_TXDQ_CTL, 0); /* * Free RX and TX mbufs still in the queues. */ for (i = 0; i < SF_RX_DLIST_CNT; i++) { rxd = &sc->sf_cdata.sf_rxdesc[i]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sf_cdata.sf_rx_tag, rxd->rx_dmamap); m_freem(rxd->rx_m); rxd->rx_m = NULL; } } for (i = 0; i < SF_TX_DLIST_CNT; i++) { txd = &sc->sf_cdata.sf_txdesc[i]; if (txd->tx_m != NULL) { bus_dmamap_sync(sc->sf_cdata.sf_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sf_cdata.sf_tx_tag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; txd->ndesc = 0; } } } static void sf_tick(void *xsc) { struct sf_softc *sc; struct mii_data *mii; sc = xsc; SF_LOCK_ASSERT(sc); mii = device_get_softc(sc->sf_miibus); mii_tick(mii); sf_stats_update(sc); sf_watchdog(sc); callout_reset(&sc->sf_co, hz, sf_tick, sc); } /* * Note: it is important that this function not be interrupted. We * use a two-stage register access scheme: if we are interrupted in * between setting the indirect address register and reading from the * indirect data register, the contents of the address register could * be changed out from under us. */ static void sf_stats_update(struct sf_softc *sc) { struct ifnet *ifp; struct sf_stats now, *stats, *nstats; int i; SF_LOCK_ASSERT(sc); ifp = sc->sf_ifp; stats = &now; stats->sf_tx_frames = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAMES); stats->sf_tx_single_colls = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_SINGLE_COL); stats->sf_tx_multi_colls = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI_COL); stats->sf_tx_crcerrs = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CRC_ERRS); stats->sf_tx_bytes = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BYTES); stats->sf_tx_deferred = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_DEFERRED); stats->sf_tx_late_colls = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_LATE_COL); stats->sf_tx_pause_frames = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_PAUSE); stats->sf_tx_control_frames = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_CTL_FRAME); stats->sf_tx_excess_colls = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_COL); stats->sf_tx_excess_defer = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_EXCESS_DEF); stats->sf_tx_mcast_frames = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_MULTI); stats->sf_tx_bcast_frames = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_BCAST); stats->sf_tx_frames_lost = csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_FRAME_LOST); stats->sf_rx_frames = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAMES); stats->sf_rx_crcerrs = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CRC_ERRS); stats->sf_rx_alignerrs = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_ALIGN_ERRS); stats->sf_rx_bytes = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_BYTES); stats->sf_rx_pause_frames = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_PAUSE); stats->sf_rx_control_frames = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_CTL_FRAME); stats->sf_rx_unsup_control_frames = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_UNSUP_FRAME); stats->sf_rx_giants = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_GIANTS); stats->sf_rx_runts = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_RUNTS); stats->sf_rx_jabbererrs = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_JABBER); stats->sf_rx_fragments = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAGMENTS); stats->sf_rx_pkts_64 = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_64); stats->sf_rx_pkts_65_127 = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_65_127); stats->sf_rx_pkts_128_255 = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_128_255); stats->sf_rx_pkts_256_511 = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_256_511); stats->sf_rx_pkts_512_1023 = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_512_1023); stats->sf_rx_pkts_1024_1518 = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_1024_1518); stats->sf_rx_frames_lost = csr_read_4(sc, SF_STATS_BASE + SF_STATS_RX_FRAME_LOST); /* Lower 16bits are valid. */ stats->sf_tx_underruns = (csr_read_4(sc, SF_STATS_BASE + SF_STATS_TX_UNDERRUN) & 0xffff); /* Empty stats counter registers. */ for (i = SF_STATS_BASE; i < (SF_STATS_END + 1); i += sizeof(uint32_t)) csr_write_4(sc, i, 0); ifp->if_opackets += (u_long)stats->sf_tx_frames; ifp->if_collisions += (u_long)stats->sf_tx_single_colls + (u_long)stats->sf_tx_multi_colls; ifp->if_oerrors += (u_long)stats->sf_tx_excess_colls + (u_long)stats->sf_tx_excess_defer + (u_long)stats->sf_tx_frames_lost; ifp->if_ipackets += (u_long)stats->sf_rx_frames; ifp->if_ierrors += (u_long)stats->sf_rx_crcerrs + (u_long)stats->sf_rx_alignerrs + (u_long)stats->sf_rx_giants + (u_long)stats->sf_rx_runts + (u_long)stats->sf_rx_jabbererrs + (u_long)stats->sf_rx_frames_lost; nstats = &sc->sf_statistics; nstats->sf_tx_frames += stats->sf_tx_frames; nstats->sf_tx_single_colls += stats->sf_tx_single_colls; nstats->sf_tx_multi_colls += stats->sf_tx_multi_colls; nstats->sf_tx_crcerrs += stats->sf_tx_crcerrs; nstats->sf_tx_bytes += stats->sf_tx_bytes; nstats->sf_tx_deferred += stats->sf_tx_deferred; nstats->sf_tx_late_colls += stats->sf_tx_late_colls; nstats->sf_tx_pause_frames += stats->sf_tx_pause_frames; nstats->sf_tx_control_frames += stats->sf_tx_control_frames; nstats->sf_tx_excess_colls += stats->sf_tx_excess_colls; nstats->sf_tx_excess_defer += stats->sf_tx_excess_defer; nstats->sf_tx_mcast_frames += stats->sf_tx_mcast_frames; nstats->sf_tx_bcast_frames += stats->sf_tx_bcast_frames; nstats->sf_tx_frames_lost += stats->sf_tx_frames_lost; nstats->sf_rx_frames += stats->sf_rx_frames; nstats->sf_rx_crcerrs += stats->sf_rx_crcerrs; nstats->sf_rx_alignerrs += stats->sf_rx_alignerrs; nstats->sf_rx_bytes += stats->sf_rx_bytes; nstats->sf_rx_pause_frames += stats->sf_rx_pause_frames; nstats->sf_rx_control_frames += stats->sf_rx_control_frames; nstats->sf_rx_unsup_control_frames += stats->sf_rx_unsup_control_frames; nstats->sf_rx_giants += stats->sf_rx_giants; nstats->sf_rx_runts += stats->sf_rx_runts; nstats->sf_rx_jabbererrs += stats->sf_rx_jabbererrs; nstats->sf_rx_fragments += stats->sf_rx_fragments; nstats->sf_rx_pkts_64 += stats->sf_rx_pkts_64; nstats->sf_rx_pkts_65_127 += stats->sf_rx_pkts_65_127; nstats->sf_rx_pkts_128_255 += stats->sf_rx_pkts_128_255; nstats->sf_rx_pkts_256_511 += stats->sf_rx_pkts_256_511; nstats->sf_rx_pkts_512_1023 += stats->sf_rx_pkts_512_1023; nstats->sf_rx_pkts_1024_1518 += stats->sf_rx_pkts_1024_1518; nstats->sf_rx_frames_lost += stats->sf_rx_frames_lost; nstats->sf_tx_underruns += stats->sf_tx_underruns; } static void sf_watchdog(struct sf_softc *sc) { struct ifnet *ifp; SF_LOCK_ASSERT(sc); if (sc->sf_watchdog_timer == 0 || --sc->sf_watchdog_timer) return; ifp = sc->sf_ifp; ifp->if_oerrors++; if (sc->sf_link == 0) { if (bootverbose) if_printf(sc->sf_ifp, "watchdog timeout " "(missed link)\n"); } else if_printf(ifp, "watchdog timeout, %d Tx descs are active\n", sc->sf_cdata.sf_tx_cnt); sf_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) sf_start_locked(ifp); } static int sf_shutdown(device_t dev) { struct sf_softc *sc; sc = device_get_softc(dev); SF_LOCK(sc); sf_stop(sc); SF_UNLOCK(sc); return (0); } static int sf_suspend(device_t dev) { struct sf_softc *sc; sc = device_get_softc(dev); SF_LOCK(sc); sf_stop(sc); sc->sf_suspended = 1; bus_generic_suspend(dev); SF_UNLOCK(sc); return (0); } static int sf_resume(device_t dev) { struct sf_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); SF_LOCK(sc); bus_generic_resume(dev); ifp = sc->sf_ifp; if ((ifp->if_flags & IFF_UP) != 0) sf_init_locked(sc); sc->sf_suspended = 0; SF_UNLOCK(sc); return (0); } static int sf_sysctl_stats(SYSCTL_HANDLER_ARGS) { struct sf_softc *sc; struct sf_stats *stats; int error; int result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (result != 1) return (error); sc = (struct sf_softc *)arg1; stats = &sc->sf_statistics; printf("%s statistics:\n", device_get_nameunit(sc->sf_dev)); printf("Transmit good frames : %ju\n", (uintmax_t)stats->sf_tx_frames); printf("Transmit good octets : %ju\n", (uintmax_t)stats->sf_tx_bytes); printf("Transmit single collisions : %u\n", stats->sf_tx_single_colls); printf("Transmit multiple collisions : %u\n", stats->sf_tx_multi_colls); printf("Transmit late collisions : %u\n", stats->sf_tx_late_colls); printf("Transmit abort due to excessive collisions : %u\n", stats->sf_tx_excess_colls); printf("Transmit CRC errors : %u\n", stats->sf_tx_crcerrs); printf("Transmit deferrals : %u\n", stats->sf_tx_deferred); printf("Transmit abort due to excessive deferrals : %u\n", stats->sf_tx_excess_defer); printf("Transmit pause control frames : %u\n", stats->sf_tx_pause_frames); printf("Transmit control frames : %u\n", stats->sf_tx_control_frames); printf("Transmit good multicast frames : %u\n", stats->sf_tx_mcast_frames); printf("Transmit good broadcast frames : %u\n", stats->sf_tx_bcast_frames); printf("Transmit frames lost due to internal transmit errors : %u\n", stats->sf_tx_frames_lost); printf("Transmit FIFO underflows : %u\n", stats->sf_tx_underruns); printf("Transmit GFP stalls : %u\n", stats->sf_tx_gfp_stall); printf("Receive good frames : %ju\n", (uint64_t)stats->sf_rx_frames); printf("Receive good octets : %ju\n", (uint64_t)stats->sf_rx_bytes); printf("Receive CRC errors : %u\n", stats->sf_rx_crcerrs); printf("Receive alignment errors : %u\n", stats->sf_rx_alignerrs); printf("Receive pause frames : %u\n", stats->sf_rx_pause_frames); printf("Receive control frames : %u\n", stats->sf_rx_control_frames); printf("Receive control frames with unsupported opcode : %u\n", stats->sf_rx_unsup_control_frames); printf("Receive frames too long : %u\n", stats->sf_rx_giants); printf("Receive frames too short : %u\n", stats->sf_rx_runts); printf("Receive frames jabber errors : %u\n", stats->sf_rx_jabbererrs); printf("Receive frames fragments : %u\n", stats->sf_rx_fragments); printf("Receive packets 64 bytes : %ju\n", (uint64_t)stats->sf_rx_pkts_64); printf("Receive packets 65 to 127 bytes : %ju\n", (uint64_t)stats->sf_rx_pkts_65_127); printf("Receive packets 128 to 255 bytes : %ju\n", (uint64_t)stats->sf_rx_pkts_128_255); printf("Receive packets 256 to 511 bytes : %ju\n", (uint64_t)stats->sf_rx_pkts_256_511); printf("Receive packets 512 to 1023 bytes : %ju\n", (uint64_t)stats->sf_rx_pkts_512_1023); printf("Receive packets 1024 to 1518 bytes : %ju\n", (uint64_t)stats->sf_rx_pkts_1024_1518); printf("Receive frames lost due to internal receive errors : %u\n", stats->sf_rx_frames_lost); printf("Receive GFP stalls : %u\n", stats->sf_rx_gfp_stall); return (error); } static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) { int error, value; if (!arg1) return (EINVAL); value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if (error || !req->newptr) return (error); if (value < low || value > high) return (EINVAL); *(int *)arg1 = value; return (0); } static int sysctl_hw_sf_int_mod(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, SF_IM_MIN, SF_IM_MAX)); } Index: head/sys/dev/sis/if_sis.c =================================================================== --- head/sys/dev/sis/if_sis.c (revision 193095) +++ head/sys/dev/sis/if_sis.c (revision 193096) @@ -1,2302 +1,2306 @@ /*- * Copyright (c) 2005 Poul-Henning Kamp * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * SiS 900/SiS 7016 fast ethernet PCI NIC driver. Datasheets are * available from http://www.sis.com.tw. * * This driver also supports the NatSemi DP83815. Datasheets are * available from http://www.national.com. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The SiS 900 is a fairly simple chip. It uses bus master DMA with * simple TX and RX descriptors of 3 longwords in size. The receiver * has a single perfect filter entry for the station address and a * 128-bit multicast hash table. The SiS 900 has a built-in MII-based * transceiver while the 7016 requires an external transceiver chip. * Both chips offer the standard bit-bang MII interface as well as * an enchanced PHY interface which simplifies accessing MII registers. * * The only downside to this chipset is that RX descriptors must be * longword aligned. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define SIS_USEIOSPACE #include MODULE_DEPEND(sis, pci, 1, 1, 1); MODULE_DEPEND(sis, ether, 1, 1, 1); MODULE_DEPEND(sis, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #define SIS_LOCK(_sc) mtx_lock(&(_sc)->sis_mtx) #define SIS_UNLOCK(_sc) mtx_unlock(&(_sc)->sis_mtx) #define SIS_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sis_mtx, MA_OWNED) /* * register space access macros */ #define CSR_WRITE_4(sc, reg, val) bus_write_4(sc->sis_res[0], reg, val) #define CSR_READ_4(sc, reg) bus_read_4(sc->sis_res[0], reg) #define CSR_READ_2(sc, reg) bus_read_2(sc->sis_res[0], reg) /* * Various supported device vendors/types and their names. */ static struct sis_type sis_devs[] = { { SIS_VENDORID, SIS_DEVICEID_900, "SiS 900 10/100BaseTX" }, { SIS_VENDORID, SIS_DEVICEID_7016, "SiS 7016 10/100BaseTX" }, { NS_VENDORID, NS_DEVICEID_DP83815, "NatSemi DP8381[56] 10/100BaseTX" }, { 0, 0, NULL } }; static int sis_detach(device_t); static void sis_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int sis_ifmedia_upd(struct ifnet *); static void sis_init(void *); static void sis_initl(struct sis_softc *); static void sis_intr(void *); static int sis_ioctl(struct ifnet *, u_long, caddr_t); static int sis_newbuf(struct sis_softc *, struct sis_desc *, struct mbuf *); static void sis_start(struct ifnet *); static void sis_startl(struct ifnet *); static void sis_stop(struct sis_softc *); static void sis_watchdog(struct sis_softc *); static struct resource_spec sis_res_spec[] = { #ifdef SIS_USEIOSPACE { SYS_RES_IOPORT, SIS_PCI_LOIO, RF_ACTIVE}, #else { SYS_RES_MEMORY, SIS_PCI_LOMEM, RF_ACTIVE}, #endif { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE}, { -1, 0 } }; #define SIS_SETBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) | (x)) #define SIS_CLRBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) & ~(x)) #define SIO_SET(x) \ CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) | x) #define SIO_CLR(x) \ CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) & ~x) static void sis_dma_map_desc_next(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct sis_desc *r; r = arg; r->sis_next = segs->ds_addr; } static void sis_dma_map_desc_ptr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct sis_desc *r; r = arg; r->sis_ptr = segs->ds_addr; } static void sis_dma_map_ring(void *arg, bus_dma_segment_t *segs, int nseg, int error) { u_int32_t *p; p = arg; *p = segs->ds_addr; } /* * Routine to reverse the bits in a word. Stolen almost * verbatim from /usr/games/fortune. */ static uint16_t sis_reverse(uint16_t n) { n = ((n >> 1) & 0x5555) | ((n << 1) & 0xaaaa); n = ((n >> 2) & 0x3333) | ((n << 2) & 0xcccc); n = ((n >> 4) & 0x0f0f) | ((n << 4) & 0xf0f0); n = ((n >> 8) & 0x00ff) | ((n << 8) & 0xff00); return(n); } static void sis_delay(struct sis_softc *sc) { int idx; for (idx = (300 / 33) + 1; idx > 0; idx--) CSR_READ_4(sc, SIS_CSR); } static void sis_eeprom_idle(struct sis_softc *sc) { int i; SIO_SET(SIS_EECTL_CSEL); sis_delay(sc); SIO_SET(SIS_EECTL_CLK); sis_delay(sc); for (i = 0; i < 25; i++) { SIO_CLR(SIS_EECTL_CLK); sis_delay(sc); SIO_SET(SIS_EECTL_CLK); sis_delay(sc); } SIO_CLR(SIS_EECTL_CLK); sis_delay(sc); SIO_CLR(SIS_EECTL_CSEL); sis_delay(sc); CSR_WRITE_4(sc, SIS_EECTL, 0x00000000); } /* * Send a read command and address to the EEPROM, check for ACK. */ static void sis_eeprom_putbyte(struct sis_softc *sc, int addr) { int d, i; d = addr | SIS_EECMD_READ; /* * Feed in each bit and stobe the clock. */ for (i = 0x400; i; i >>= 1) { if (d & i) { SIO_SET(SIS_EECTL_DIN); } else { SIO_CLR(SIS_EECTL_DIN); } sis_delay(sc); SIO_SET(SIS_EECTL_CLK); sis_delay(sc); SIO_CLR(SIS_EECTL_CLK); sis_delay(sc); } } /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void sis_eeprom_getword(struct sis_softc *sc, int addr, uint16_t *dest) { int i; u_int16_t word = 0; /* Force EEPROM to idle state. */ sis_eeprom_idle(sc); /* Enter EEPROM access mode. */ sis_delay(sc); SIO_CLR(SIS_EECTL_CLK); sis_delay(sc); SIO_SET(SIS_EECTL_CSEL); sis_delay(sc); /* * Send address of word we want to read. */ sis_eeprom_putbyte(sc, addr); /* * Start reading bits from EEPROM. */ for (i = 0x8000; i; i >>= 1) { SIO_SET(SIS_EECTL_CLK); sis_delay(sc); if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECTL_DOUT) word |= i; sis_delay(sc); SIO_CLR(SIS_EECTL_CLK); sis_delay(sc); } /* Turn off EEPROM access mode. */ sis_eeprom_idle(sc); *dest = word; } /* * Read a sequence of words from the EEPROM. */ static void sis_read_eeprom(struct sis_softc *sc, caddr_t dest, int off, int cnt, int swap) { int i; u_int16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { sis_eeprom_getword(sc, off + i, &word); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } } #if defined(__i386__) || defined(__amd64__) static device_t sis_find_bridge(device_t dev) { devclass_t pci_devclass; device_t *pci_devices; int pci_count = 0; device_t *pci_children; int pci_childcount = 0; device_t *busp, *childp; device_t child = NULL; int i, j; if ((pci_devclass = devclass_find("pci")) == NULL) return(NULL); devclass_get_devices(pci_devclass, &pci_devices, &pci_count); for (i = 0, busp = pci_devices; i < pci_count; i++, busp++) { if (device_get_children(*busp, &pci_children, &pci_childcount)) continue; for (j = 0, childp = pci_children; j < pci_childcount; j++, childp++) { if (pci_get_vendor(*childp) == SIS_VENDORID && pci_get_device(*childp) == 0x0008) { child = *childp; free(pci_children, M_TEMP); goto done; } } free(pci_children, M_TEMP); } done: free(pci_devices, M_TEMP); return(child); } static void sis_read_cmos(struct sis_softc *sc, device_t dev, caddr_t dest, int off, int cnt) { device_t bridge; u_int8_t reg; int i; bus_space_tag_t btag; bridge = sis_find_bridge(dev); if (bridge == NULL) return; reg = pci_read_config(bridge, 0x48, 1); pci_write_config(bridge, 0x48, reg|0x40, 1); /* XXX */ #if defined(__i386__) btag = I386_BUS_SPACE_IO; #elif defined(__amd64__) btag = AMD64_BUS_SPACE_IO; #endif for (i = 0; i < cnt; i++) { bus_space_write_1(btag, 0x0, 0x70, i + off); *(dest + i) = bus_space_read_1(btag, 0x0, 0x71); } pci_write_config(bridge, 0x48, reg & ~0x40, 1); return; } static void sis_read_mac(struct sis_softc *sc, device_t dev, caddr_t dest) { u_int32_t filtsave, csrsave; filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL); csrsave = CSR_READ_4(sc, SIS_CSR); CSR_WRITE_4(sc, SIS_CSR, SIS_CSR_RELOAD | filtsave); CSR_WRITE_4(sc, SIS_CSR, 0); CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave & ~SIS_RXFILTCTL_ENABLE); CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0); ((u_int16_t *)dest)[0] = CSR_READ_2(sc, SIS_RXFILT_DATA); CSR_WRITE_4(sc, SIS_RXFILT_CTL,SIS_FILTADDR_PAR1); ((u_int16_t *)dest)[1] = CSR_READ_2(sc, SIS_RXFILT_DATA); CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2); ((u_int16_t *)dest)[2] = CSR_READ_2(sc, SIS_RXFILT_DATA); CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave); CSR_WRITE_4(sc, SIS_CSR, csrsave); return; } #endif /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void sis_mii_sync(struct sis_softc *sc) { int i; SIO_SET(SIS_MII_DIR|SIS_MII_DATA); for (i = 0; i < 32; i++) { SIO_SET(SIS_MII_CLK); DELAY(1); SIO_CLR(SIS_MII_CLK); DELAY(1); } } /* * Clock a series of bits through the MII. */ static void sis_mii_send(struct sis_softc *sc, uint32_t bits, int cnt) { int i; SIO_CLR(SIS_MII_CLK); for (i = (0x1 << (cnt - 1)); i; i >>= 1) { if (bits & i) { SIO_SET(SIS_MII_DATA); } else { SIO_CLR(SIS_MII_DATA); } DELAY(1); SIO_CLR(SIS_MII_CLK); DELAY(1); SIO_SET(SIS_MII_CLK); } } /* * Read an PHY register through the MII. */ static int sis_mii_readreg(struct sis_softc *sc, struct sis_mii_frame *frame) { int i, ack; /* * Set up frame for RX. */ frame->mii_stdelim = SIS_MII_STARTDELIM; frame->mii_opcode = SIS_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; /* * Turn on data xmit. */ SIO_SET(SIS_MII_DIR); sis_mii_sync(sc); /* * Send command/address info. */ sis_mii_send(sc, frame->mii_stdelim, 2); sis_mii_send(sc, frame->mii_opcode, 2); sis_mii_send(sc, frame->mii_phyaddr, 5); sis_mii_send(sc, frame->mii_regaddr, 5); /* Idle bit */ SIO_CLR((SIS_MII_CLK|SIS_MII_DATA)); DELAY(1); SIO_SET(SIS_MII_CLK); DELAY(1); /* Turn off xmit. */ SIO_CLR(SIS_MII_DIR); /* Check for ack */ SIO_CLR(SIS_MII_CLK); DELAY(1); ack = CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA; SIO_SET(SIS_MII_CLK); DELAY(1); /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ if (ack) { for(i = 0; i < 16; i++) { SIO_CLR(SIS_MII_CLK); DELAY(1); SIO_SET(SIS_MII_CLK); DELAY(1); } goto fail; } for (i = 0x8000; i; i >>= 1) { SIO_CLR(SIS_MII_CLK); DELAY(1); if (!ack) { if (CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA) frame->mii_data |= i; DELAY(1); } SIO_SET(SIS_MII_CLK); DELAY(1); } fail: SIO_CLR(SIS_MII_CLK); DELAY(1); SIO_SET(SIS_MII_CLK); DELAY(1); if (ack) return(1); return(0); } /* * Write to a PHY register through the MII. */ static int sis_mii_writereg(struct sis_softc *sc, struct sis_mii_frame *frame) { /* * Set up frame for TX. */ frame->mii_stdelim = SIS_MII_STARTDELIM; frame->mii_opcode = SIS_MII_WRITEOP; frame->mii_turnaround = SIS_MII_TURNAROUND; /* * Turn on data output. */ SIO_SET(SIS_MII_DIR); sis_mii_sync(sc); sis_mii_send(sc, frame->mii_stdelim, 2); sis_mii_send(sc, frame->mii_opcode, 2); sis_mii_send(sc, frame->mii_phyaddr, 5); sis_mii_send(sc, frame->mii_regaddr, 5); sis_mii_send(sc, frame->mii_turnaround, 2); sis_mii_send(sc, frame->mii_data, 16); /* Idle bit. */ SIO_SET(SIS_MII_CLK); DELAY(1); SIO_CLR(SIS_MII_CLK); DELAY(1); /* * Turn off xmit. */ SIO_CLR(SIS_MII_DIR); return(0); } static int sis_miibus_readreg(device_t dev, int phy, int reg) { struct sis_softc *sc; struct sis_mii_frame frame; sc = device_get_softc(dev); if (sc->sis_type == SIS_TYPE_83815) { if (phy != 0) return(0); /* * The NatSemi chip can take a while after * a reset to come ready, during which the BMSR * returns a value of 0. This is *never* supposed * to happen: some of the BMSR bits are meant to * be hardwired in the on position, and this can * confuse the miibus code a bit during the probe * and attach phase. So we make an effort to check * for this condition and wait for it to clear. */ if (!CSR_READ_4(sc, NS_BMSR)) DELAY(1000); return CSR_READ_4(sc, NS_BMCR + (reg * 4)); } /* * Chipsets < SIS_635 seem not to be able to read/write * through mdio. Use the enhanced PHY access register * again for them. */ if (sc->sis_type == SIS_TYPE_900 && sc->sis_rev < SIS_REV_635) { int i, val = 0; if (phy != 0) return(0); CSR_WRITE_4(sc, SIS_PHYCTL, (phy << 11) | (reg << 6) | SIS_PHYOP_READ); SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS); for (i = 0; i < SIS_TIMEOUT; i++) { if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS)) break; } if (i == SIS_TIMEOUT) { device_printf(sc->sis_dev, "PHY failed to come ready\n"); return(0); } val = (CSR_READ_4(sc, SIS_PHYCTL) >> 16) & 0xFFFF; if (val == 0xFFFF) return(0); return(val); } else { bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; sis_mii_readreg(sc, &frame); return(frame.mii_data); } } static int sis_miibus_writereg(device_t dev, int phy, int reg, int data) { struct sis_softc *sc; struct sis_mii_frame frame; sc = device_get_softc(dev); if (sc->sis_type == SIS_TYPE_83815) { if (phy != 0) return(0); CSR_WRITE_4(sc, NS_BMCR + (reg * 4), data); return(0); } /* * Chipsets < SIS_635 seem not to be able to read/write * through mdio. Use the enhanced PHY access register * again for them. */ if (sc->sis_type == SIS_TYPE_900 && sc->sis_rev < SIS_REV_635) { int i; if (phy != 0) return(0); CSR_WRITE_4(sc, SIS_PHYCTL, (data << 16) | (phy << 11) | (reg << 6) | SIS_PHYOP_WRITE); SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS); for (i = 0; i < SIS_TIMEOUT; i++) { if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS)) break; } if (i == SIS_TIMEOUT) device_printf(sc->sis_dev, "PHY failed to come ready\n"); } else { bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = data; sis_mii_writereg(sc, &frame); } return(0); } static void sis_miibus_statchg(device_t dev) { struct sis_softc *sc; sc = device_get_softc(dev); SIS_LOCK_ASSERT(sc); sis_initl(sc); } static uint32_t sis_mchash(struct sis_softc *sc, const uint8_t *addr) { uint32_t crc; /* Compute CRC for the address value. */ crc = ether_crc32_be(addr, ETHER_ADDR_LEN); /* * return the filter bit position * * The NatSemi chip has a 512-bit filter, which is * different than the SiS, so we special-case it. */ if (sc->sis_type == SIS_TYPE_83815) return (crc >> 23); else if (sc->sis_rev >= SIS_REV_635 || sc->sis_rev == SIS_REV_900B) return (crc >> 24); else return (crc >> 25); } static void sis_setmulti_ns(struct sis_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int32_t h = 0, i, filtsave; int bit, index; ifp = sc->sis_ifp; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { SIS_CLRBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH); SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI); return; } /* * We have to explicitly enable the multicast hash table * on the NatSemi chip if we want to use it, which we do. */ SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH); SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI); filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL); /* first, zot all the existing hash bits */ for (i = 0; i < 32; i++) { CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + (i*2)); CSR_WRITE_4(sc, SIS_RXFILT_DATA, 0); } IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = sis_mchash(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); index = h >> 3; bit = h & 0x1F; CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + index); if (bit > 0xF) bit -= 0x10; SIS_SETBIT(sc, SIS_RXFILT_DATA, (1 << bit)); } IF_ADDR_UNLOCK(ifp); CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave); return; } static void sis_setmulti_sis(struct sis_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int32_t h, i, n, ctl; u_int16_t hashes[16]; ifp = sc->sis_ifp; /* hash table size */ if (sc->sis_rev >= SIS_REV_635 || sc->sis_rev == SIS_REV_900B) n = 16; else n = 8; ctl = CSR_READ_4(sc, SIS_RXFILT_CTL) & SIS_RXFILTCTL_ENABLE; if (ifp->if_flags & IFF_BROADCAST) ctl |= SIS_RXFILTCTL_BROAD; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { ctl |= SIS_RXFILTCTL_ALLMULTI; if (ifp->if_flags & IFF_PROMISC) ctl |= SIS_RXFILTCTL_BROAD|SIS_RXFILTCTL_ALLPHYS; for (i = 0; i < n; i++) hashes[i] = ~0; } else { for (i = 0; i < n; i++) hashes[i] = 0; i = 0; IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = sis_mchash(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); hashes[h >> 4] |= 1 << (h & 0xf); i++; } IF_ADDR_UNLOCK(ifp); if (i > n) { ctl |= SIS_RXFILTCTL_ALLMULTI; for (i = 0; i < n; i++) hashes[i] = ~0; } } for (i = 0; i < n; i++) { CSR_WRITE_4(sc, SIS_RXFILT_CTL, (4 + i) << 16); CSR_WRITE_4(sc, SIS_RXFILT_DATA, hashes[i]); } CSR_WRITE_4(sc, SIS_RXFILT_CTL, ctl); } static void sis_reset(struct sis_softc *sc) { int i; SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RESET); for (i = 0; i < SIS_TIMEOUT; i++) { if (!(CSR_READ_4(sc, SIS_CSR) & SIS_CSR_RESET)) break; } if (i == SIS_TIMEOUT) device_printf(sc->sis_dev, "reset never completed\n"); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); /* * If this is a NetSemi chip, make sure to clear * PME mode. */ if (sc->sis_type == SIS_TYPE_83815) { CSR_WRITE_4(sc, NS_CLKRUN, NS_CLKRUN_PMESTS); CSR_WRITE_4(sc, NS_CLKRUN, 0); } return; } /* * Probe for an SiS chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int sis_probe(device_t dev) { struct sis_type *t; t = sis_devs; while(t->sis_name != NULL) { if ((pci_get_vendor(dev) == t->sis_vid) && (pci_get_device(dev) == t->sis_did)) { device_set_desc(dev, t->sis_name); return (BUS_PROBE_DEFAULT); } t++; } return(ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int sis_attach(device_t dev) { u_char eaddr[ETHER_ADDR_LEN]; struct sis_softc *sc; struct ifnet *ifp; int error = 0, waittime = 0; waittime = 0; sc = device_get_softc(dev); sc->sis_dev = dev; mtx_init(&sc->sis_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->sis_stat_ch, &sc->sis_mtx, 0); if (pci_get_device(dev) == SIS_DEVICEID_900) sc->sis_type = SIS_TYPE_900; if (pci_get_device(dev) == SIS_DEVICEID_7016) sc->sis_type = SIS_TYPE_7016; if (pci_get_vendor(dev) == NS_VENDORID) sc->sis_type = SIS_TYPE_83815; sc->sis_rev = pci_read_config(dev, PCIR_REVID, 1); /* * Map control/status registers. */ pci_enable_busmaster(dev); error = bus_alloc_resources(dev, sis_res_spec, sc->sis_res); if (error) { device_printf(dev, "couldn't allocate resources\n"); goto fail; } /* Reset the adapter. */ sis_reset(sc); if (sc->sis_type == SIS_TYPE_900 && (sc->sis_rev == SIS_REV_635 || sc->sis_rev == SIS_REV_900B)) { SIO_SET(SIS_CFG_RND_CNT); SIO_SET(SIS_CFG_PERR_DETECT); } /* * Get station address from the EEPROM. */ switch (pci_get_vendor(dev)) { case NS_VENDORID: sc->sis_srr = CSR_READ_4(sc, NS_SRR); /* We can't update the device description, so spew */ if (sc->sis_srr == NS_SRR_15C) device_printf(dev, "Silicon Revision: DP83815C\n"); else if (sc->sis_srr == NS_SRR_15D) device_printf(dev, "Silicon Revision: DP83815D\n"); else if (sc->sis_srr == NS_SRR_16A) device_printf(dev, "Silicon Revision: DP83816A\n"); else device_printf(dev, "Silicon Revision %x\n", sc->sis_srr); /* * Reading the MAC address out of the EEPROM on * the NatSemi chip takes a bit more work than * you'd expect. The address spans 4 16-bit words, * with the first word containing only a single bit. * You have to shift everything over one bit to * get it aligned properly. Also, the bits are * stored backwards (the LSB is really the MSB, * and so on) so you have to reverse them in order * to get the MAC address into the form we want. * Why? Who the hell knows. */ { u_int16_t tmp[4]; sis_read_eeprom(sc, (caddr_t)&tmp, NS_EE_NODEADDR, 4, 0); /* Shift everything over one bit. */ tmp[3] = tmp[3] >> 1; tmp[3] |= tmp[2] << 15; tmp[2] = tmp[2] >> 1; tmp[2] |= tmp[1] << 15; tmp[1] = tmp[1] >> 1; tmp[1] |= tmp[0] << 15; /* Now reverse all the bits. */ tmp[3] = sis_reverse(tmp[3]); tmp[2] = sis_reverse(tmp[2]); tmp[1] = sis_reverse(tmp[1]); bcopy((char *)&tmp[1], eaddr, ETHER_ADDR_LEN); } break; case SIS_VENDORID: default: #if defined(__i386__) || defined(__amd64__) /* * If this is a SiS 630E chipset with an embedded * SiS 900 controller, we have to read the MAC address * from the APC CMOS RAM. Our method for doing this * is very ugly since we have to reach out and grab * ahold of hardware for which we cannot properly * allocate resources. This code is only compiled on * the i386 architecture since the SiS 630E chipset * is for x86 motherboards only. Note that there are * a lot of magic numbers in this hack. These are * taken from SiS's Linux driver. I'd like to replace * them with proper symbolic definitions, but that * requires some datasheets that I don't have access * to at the moment. */ if (sc->sis_rev == SIS_REV_630S || sc->sis_rev == SIS_REV_630E || sc->sis_rev == SIS_REV_630EA1) sis_read_cmos(sc, dev, (caddr_t)&eaddr, 0x9, 6); else if (sc->sis_rev == SIS_REV_635 || sc->sis_rev == SIS_REV_630ET) sis_read_mac(sc, dev, (caddr_t)&eaddr); else if (sc->sis_rev == SIS_REV_96x) { /* Allow to read EEPROM from LAN. It is shared * between a 1394 controller and the NIC and each * time we access it, we need to set SIS_EECMD_REQ. */ SIO_SET(SIS_EECMD_REQ); for (waittime = 0; waittime < SIS_TIMEOUT; waittime++) { /* Force EEPROM to idle state. */ sis_eeprom_idle(sc); if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECMD_GNT) { sis_read_eeprom(sc, (caddr_t)&eaddr, SIS_EE_NODEADDR, 3, 0); break; } DELAY(1); } /* * Set SIS_EECTL_CLK to high, so a other master * can operate on the i2c bus. */ SIO_SET(SIS_EECTL_CLK); /* Refuse EEPROM access by LAN */ SIO_SET(SIS_EECMD_DONE); } else #endif sis_read_eeprom(sc, (caddr_t)&eaddr, SIS_EE_NODEADDR, 3, 0); break; } /* * Allocate the parent bus DMA tag appropriate for PCI. */ #define SIS_NSEG_NEW 32 error = bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MAXBSIZE, SIS_NSEG_NEW, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sis_parent_tag); if (error) goto fail; /* * Now allocate a tag for the DMA descriptor lists and a chunk * of DMA-able memory based on the tag. Also obtain the physical * addresses of the RX and TX ring, which we'll need later. * All of our lists are allocated as a contiguous block * of memory. */ error = bus_dma_tag_create(sc->sis_parent_tag, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SIS_RX_LIST_SZ, 1, /* maxsize,nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 0, /* flags */ busdma_lock_mutex, /* lockfunc */ &Giant, /* lockarg */ &sc->sis_rx_tag); if (error) goto fail; error = bus_dmamem_alloc(sc->sis_rx_tag, (void **)&sc->sis_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->sis_rx_dmamap); if (error) { device_printf(dev, "no memory for rx list buffers!\n"); bus_dma_tag_destroy(sc->sis_rx_tag); sc->sis_rx_tag = NULL; goto fail; } error = bus_dmamap_load(sc->sis_rx_tag, sc->sis_rx_dmamap, &(sc->sis_rx_list[0]), sizeof(struct sis_desc), sis_dma_map_ring, &sc->sis_rx_paddr, 0); if (error) { device_printf(dev, "cannot get address of the rx ring!\n"); bus_dmamem_free(sc->sis_rx_tag, sc->sis_rx_list, sc->sis_rx_dmamap); bus_dma_tag_destroy(sc->sis_rx_tag); sc->sis_rx_tag = NULL; goto fail; } error = bus_dma_tag_create(sc->sis_parent_tag, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SIS_TX_LIST_SZ, 1, /* maxsize,nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 0, /* flags */ busdma_lock_mutex, /* lockfunc */ &Giant, /* lockarg */ &sc->sis_tx_tag); if (error) goto fail; error = bus_dmamem_alloc(sc->sis_tx_tag, (void **)&sc->sis_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->sis_tx_dmamap); if (error) { device_printf(dev, "no memory for tx list buffers!\n"); bus_dma_tag_destroy(sc->sis_tx_tag); sc->sis_tx_tag = NULL; goto fail; } error = bus_dmamap_load(sc->sis_tx_tag, sc->sis_tx_dmamap, &(sc->sis_tx_list[0]), sizeof(struct sis_desc), sis_dma_map_ring, &sc->sis_tx_paddr, 0); if (error) { device_printf(dev, "cannot get address of the tx ring!\n"); bus_dmamem_free(sc->sis_tx_tag, sc->sis_tx_list, sc->sis_tx_dmamap); bus_dma_tag_destroy(sc->sis_tx_tag); sc->sis_tx_tag = NULL; goto fail; } error = bus_dma_tag_create(sc->sis_parent_tag, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, 1, /* maxsize,nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 0, /* flags */ busdma_lock_mutex, /* lockfunc */ &Giant, /* lockarg */ &sc->sis_tag); if (error) goto fail; /* * Obtain the physical addresses of the RX and TX * rings which we'll need later in the init routine. */ ifp = sc->sis_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = sis_ioctl; ifp->if_start = sis_start; ifp->if_init = sis_init; IFQ_SET_MAXLEN(&ifp->if_snd, SIS_TX_LIST_CNT - 1); ifp->if_snd.ifq_drv_maxlen = SIS_TX_LIST_CNT - 1; IFQ_SET_READY(&ifp->if_snd); /* * Do MII setup. */ if (mii_phy_probe(dev, &sc->sis_miibus, sis_ifmedia_upd, sis_ifmedia_sts)) { device_printf(dev, "MII without any PHY!\n"); error = ENXIO; goto fail; } /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* * Tell the upper layer(s) we support long frames. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->sis_res[1], INTR_TYPE_NET | INTR_MPSAFE, NULL, sis_intr, sc, &sc->sis_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); ether_ifdetach(ifp); goto fail; } fail: if (error) sis_detach(dev); return(error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int sis_detach(device_t dev) { struct sis_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->sis_mtx), ("sis mutex not initialized")); ifp = sc->sis_ifp; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif /* These should only be active if attach succeeded. */ if (device_is_attached(dev)) { SIS_LOCK(sc); sis_reset(sc); sis_stop(sc); SIS_UNLOCK(sc); callout_drain(&sc->sis_stat_ch); ether_ifdetach(ifp); } if (sc->sis_miibus) device_delete_child(dev, sc->sis_miibus); bus_generic_detach(dev); if (sc->sis_intrhand) bus_teardown_intr(dev, sc->sis_res[1], sc->sis_intrhand); bus_release_resources(dev, sis_res_spec, sc->sis_res); if (ifp) if_free(ifp); if (sc->sis_rx_tag) { bus_dmamap_unload(sc->sis_rx_tag, sc->sis_rx_dmamap); bus_dmamem_free(sc->sis_rx_tag, sc->sis_rx_list, sc->sis_rx_dmamap); bus_dma_tag_destroy(sc->sis_rx_tag); } if (sc->sis_tx_tag) { bus_dmamap_unload(sc->sis_tx_tag, sc->sis_tx_dmamap); bus_dmamem_free(sc->sis_tx_tag, sc->sis_tx_list, sc->sis_tx_dmamap); bus_dma_tag_destroy(sc->sis_tx_tag); } if (sc->sis_parent_tag) bus_dma_tag_destroy(sc->sis_parent_tag); if (sc->sis_tag) bus_dma_tag_destroy(sc->sis_tag); mtx_destroy(&sc->sis_mtx); return(0); } /* * Initialize the TX and RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int sis_ring_init(struct sis_softc *sc) { int i, error; struct sis_desc *dp; dp = &sc->sis_tx_list[0]; for (i = 0; i < SIS_TX_LIST_CNT; i++, dp++) { if (i == (SIS_TX_LIST_CNT - 1)) dp->sis_nextdesc = &sc->sis_tx_list[0]; else dp->sis_nextdesc = dp + 1; bus_dmamap_load(sc->sis_tx_tag, sc->sis_tx_dmamap, dp->sis_nextdesc, sizeof(struct sis_desc), sis_dma_map_desc_next, dp, 0); dp->sis_mbuf = NULL; dp->sis_ptr = 0; dp->sis_ctl = 0; } sc->sis_tx_prod = sc->sis_tx_cons = sc->sis_tx_cnt = 0; bus_dmamap_sync(sc->sis_tx_tag, sc->sis_tx_dmamap, BUS_DMASYNC_PREWRITE); dp = &sc->sis_rx_list[0]; for (i = 0; i < SIS_RX_LIST_CNT; i++, dp++) { error = sis_newbuf(sc, dp, NULL); if (error) return(error); if (i == (SIS_RX_LIST_CNT - 1)) dp->sis_nextdesc = &sc->sis_rx_list[0]; else dp->sis_nextdesc = dp + 1; bus_dmamap_load(sc->sis_rx_tag, sc->sis_rx_dmamap, dp->sis_nextdesc, sizeof(struct sis_desc), sis_dma_map_desc_next, dp, 0); } bus_dmamap_sync(sc->sis_rx_tag, sc->sis_rx_dmamap, BUS_DMASYNC_PREWRITE); sc->sis_rx_pdsc = &sc->sis_rx_list[0]; return(0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int sis_newbuf(struct sis_softc *sc, struct sis_desc *c, struct mbuf *m) { if (c == NULL) return(EINVAL); if (m == NULL) { m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return(ENOBUFS); } else m->m_data = m->m_ext.ext_buf; c->sis_mbuf = m; c->sis_ctl = SIS_RXLEN; bus_dmamap_create(sc->sis_tag, 0, &c->sis_map); bus_dmamap_load(sc->sis_tag, c->sis_map, mtod(m, void *), MCLBYTES, sis_dma_map_desc_ptr, c, 0); bus_dmamap_sync(sc->sis_tag, c->sis_map, BUS_DMASYNC_PREREAD); return(0); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ -static void +static int sis_rxeof(struct sis_softc *sc) { struct mbuf *m, *m0; struct ifnet *ifp; struct sis_desc *cur_rx; - int total_len = 0; + int total_len = 0, rx_npkts = 0; u_int32_t rxstat; SIS_LOCK_ASSERT(sc); ifp = sc->sis_ifp; for(cur_rx = sc->sis_rx_pdsc; SIS_OWNDESC(cur_rx); cur_rx = cur_rx->sis_nextdesc) { #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif rxstat = cur_rx->sis_rxstat; bus_dmamap_sync(sc->sis_tag, cur_rx->sis_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sis_tag, cur_rx->sis_map); bus_dmamap_destroy(sc->sis_tag, cur_rx->sis_map); m = cur_rx->sis_mbuf; cur_rx->sis_mbuf = NULL; total_len = SIS_RXBYTES(cur_rx); /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if ((ifp->if_capenable & IFCAP_VLAN_MTU) != 0 && total_len <= (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN - ETHER_CRC_LEN)) rxstat &= ~SIS_RXSTAT_GIANT; if (SIS_RXSTAT_ERROR(rxstat) != 0) { ifp->if_ierrors++; if (rxstat & SIS_RXSTAT_COLL) ifp->if_collisions++; sis_newbuf(sc, cur_rx, m); continue; } /* No errors; receive the packet. */ #ifdef __NO_STRICT_ALIGNMENT /* * On architectures without alignment problems we try to * allocate a new buffer for the receive ring, and pass up * the one where the packet is already, saving the expensive * copy done in m_devget(). * If we are on an architecture with alignment problems, or * if the allocation fails, then use m_devget and leave the * existing buffer in the receive ring. */ if (sis_newbuf(sc, cur_rx, NULL) == 0) m->m_pkthdr.len = m->m_len = total_len; else #endif { m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp, NULL); sis_newbuf(sc, cur_rx, m); if (m0 == NULL) { ifp->if_ierrors++; continue; } m = m0; } ifp->if_ipackets++; m->m_pkthdr.rcvif = ifp; SIS_UNLOCK(sc); (*ifp->if_input)(ifp, m); SIS_LOCK(sc); + rx_npkts++; } sc->sis_rx_pdsc = cur_rx; + return (rx_npkts); } static void sis_rxeoc(struct sis_softc *sc) { SIS_LOCK_ASSERT(sc); sis_rxeof(sc); sis_initl(sc); } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void sis_txeof(struct sis_softc *sc) { struct ifnet *ifp; u_int32_t idx; SIS_LOCK_ASSERT(sc); ifp = sc->sis_ifp; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ for (idx = sc->sis_tx_cons; sc->sis_tx_cnt > 0; sc->sis_tx_cnt--, SIS_INC(idx, SIS_TX_LIST_CNT) ) { struct sis_desc *cur_tx = &sc->sis_tx_list[idx]; if (SIS_OWNDESC(cur_tx)) break; if (cur_tx->sis_ctl & SIS_CMDSTS_MORE) continue; if (!(cur_tx->sis_ctl & SIS_CMDSTS_PKT_OK)) { ifp->if_oerrors++; if (cur_tx->sis_txstat & SIS_TXSTAT_EXCESSCOLLS) ifp->if_collisions++; if (cur_tx->sis_txstat & SIS_TXSTAT_OUTOFWINCOLL) ifp->if_collisions++; } ifp->if_collisions += (cur_tx->sis_txstat & SIS_TXSTAT_COLLCNT) >> 16; ifp->if_opackets++; if (cur_tx->sis_mbuf != NULL) { m_freem(cur_tx->sis_mbuf); cur_tx->sis_mbuf = NULL; bus_dmamap_unload(sc->sis_tag, cur_tx->sis_map); bus_dmamap_destroy(sc->sis_tag, cur_tx->sis_map); } } if (idx != sc->sis_tx_cons) { /* we freed up some buffers */ sc->sis_tx_cons = idx; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } sc->sis_watchdog_timer = (sc->sis_tx_cnt == 0) ? 0 : 5; return; } static void sis_tick(void *xsc) { struct sis_softc *sc; struct mii_data *mii; struct ifnet *ifp; sc = xsc; SIS_LOCK_ASSERT(sc); sc->in_tick = 1; ifp = sc->sis_ifp; mii = device_get_softc(sc->sis_miibus); mii_tick(mii); sis_watchdog(sc); if (!sc->sis_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->sis_link++; if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) sis_startl(ifp); } callout_reset(&sc->sis_stat_ch, hz, sis_tick, sc); sc->in_tick = 0; } #ifdef DEVICE_POLLING static poll_handler_t sis_poll; -static void +static int sis_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct sis_softc *sc = ifp->if_softc; + int rx_npkts = 0; SIS_LOCK(sc); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { SIS_UNLOCK(sc); - return; + return (rx_npkts); } /* * On the sis, reading the status register also clears it. * So before returning to intr mode we must make sure that all * possible pending sources of interrupts have been served. * In practice this means run to completion the *eof routines, * and then call the interrupt routine */ sc->rxcycles = count; - sis_rxeof(sc); + rx_npkts = sis_rxeof(sc); sis_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) sis_startl(ifp); if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) { u_int32_t status; /* Reading the ISR register clears all interrupts. */ status = CSR_READ_4(sc, SIS_ISR); if (status & (SIS_ISR_RX_ERR|SIS_ISR_RX_OFLOW)) sis_rxeoc(sc); if (status & (SIS_ISR_RX_IDLE)) SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE); if (status & SIS_ISR_SYSERR) { sis_reset(sc); sis_initl(sc); } } SIS_UNLOCK(sc); + return (rx_npkts); } #endif /* DEVICE_POLLING */ static void sis_intr(void *arg) { struct sis_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; ifp = sc->sis_ifp; if (sc->sis_stopped) /* Most likely shared interrupt */ return; SIS_LOCK(sc); #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { SIS_UNLOCK(sc); return; } #endif /* Disable interrupts. */ CSR_WRITE_4(sc, SIS_IER, 0); for (;;) { SIS_LOCK_ASSERT(sc); /* Reading the ISR register clears all interrupts. */ status = CSR_READ_4(sc, SIS_ISR); if ((status & SIS_INTRS) == 0) break; if (status & (SIS_ISR_TX_DESC_OK | SIS_ISR_TX_ERR | SIS_ISR_TX_OK | SIS_ISR_TX_IDLE) ) sis_txeof(sc); if (status & (SIS_ISR_RX_DESC_OK | SIS_ISR_RX_OK | SIS_ISR_RX_ERR | SIS_ISR_RX_IDLE)) sis_rxeof(sc); if (status & SIS_ISR_RX_OFLOW) sis_rxeoc(sc); if (status & (SIS_ISR_RX_IDLE)) SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE); if (status & SIS_ISR_SYSERR) { sis_reset(sc); sis_initl(sc); } } /* Re-enable interrupts. */ CSR_WRITE_4(sc, SIS_IER, 1); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) sis_startl(ifp); SIS_UNLOCK(sc); } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int sis_encap(struct sis_softc *sc, struct mbuf **m_head, uint32_t *txidx) { struct sis_desc *f = NULL; struct mbuf *m; int frag, cur, cnt = 0, chainlen = 0; /* * If there's no way we can send any packets, return now. */ if (SIS_TX_LIST_CNT - sc->sis_tx_cnt < 2) return (ENOBUFS); /* * Count the number of frags in this chain to see if * we need to m_defrag. Since the descriptor list is shared * by all packets, we'll m_defrag long chains so that they * do not use up the entire list, even if they would fit. */ for (m = *m_head; m != NULL; m = m->m_next) chainlen++; if ((chainlen > SIS_TX_LIST_CNT / 4) || ((SIS_TX_LIST_CNT - (chainlen + sc->sis_tx_cnt)) < 2)) { m = m_defrag(*m_head, M_DONTWAIT); if (m == NULL) return (ENOBUFS); *m_head = m; } /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ cur = frag = *txidx; for (m = *m_head; m != NULL; m = m->m_next) { if (m->m_len != 0) { if ((SIS_TX_LIST_CNT - (sc->sis_tx_cnt + cnt)) < 2) return(ENOBUFS); f = &sc->sis_tx_list[frag]; f->sis_ctl = SIS_CMDSTS_MORE | m->m_len; bus_dmamap_create(sc->sis_tag, 0, &f->sis_map); bus_dmamap_load(sc->sis_tag, f->sis_map, mtod(m, void *), m->m_len, sis_dma_map_desc_ptr, f, 0); bus_dmamap_sync(sc->sis_tag, f->sis_map, BUS_DMASYNC_PREREAD); if (cnt != 0) f->sis_ctl |= SIS_CMDSTS_OWN; cur = frag; SIS_INC(frag, SIS_TX_LIST_CNT); cnt++; } } if (m != NULL) return(ENOBUFS); sc->sis_tx_list[cur].sis_mbuf = *m_head; sc->sis_tx_list[cur].sis_ctl &= ~SIS_CMDSTS_MORE; sc->sis_tx_list[*txidx].sis_ctl |= SIS_CMDSTS_OWN; sc->sis_tx_cnt += cnt; *txidx = frag; return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void sis_start(struct ifnet *ifp) { struct sis_softc *sc; sc = ifp->if_softc; SIS_LOCK(sc); sis_startl(ifp); SIS_UNLOCK(sc); } static void sis_startl(struct ifnet *ifp) { struct sis_softc *sc; struct mbuf *m_head = NULL; u_int32_t idx, queued = 0; sc = ifp->if_softc; SIS_LOCK_ASSERT(sc); if (!sc->sis_link) return; idx = sc->sis_tx_prod; if (ifp->if_drv_flags & IFF_DRV_OACTIVE) return; while(sc->sis_tx_list[idx].sis_mbuf == NULL) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (sis_encap(sc, &m_head, &idx)) { IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } queued++; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); } if (queued) { /* Transmit */ sc->sis_tx_prod = idx; SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE); /* * Set a timeout in case the chip goes out to lunch. */ sc->sis_watchdog_timer = 5; } } static void sis_init(void *xsc) { struct sis_softc *sc = xsc; SIS_LOCK(sc); sis_initl(sc); SIS_UNLOCK(sc); } static void sis_initl(struct sis_softc *sc) { struct ifnet *ifp = sc->sis_ifp; struct mii_data *mii; SIS_LOCK_ASSERT(sc); /* * Cancel pending I/O and free all RX/TX buffers. */ sis_stop(sc); sc->sis_stopped = 0; #ifdef notyet if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr >= NS_SRR_16A) { /* * Configure 400usec of interrupt holdoff. This is based * on emperical tests on a Soekris 4801. */ CSR_WRITE_4(sc, NS_IHR, 0x100 | 4); } #endif mii = device_get_softc(sc->sis_miibus); /* Set MAC address */ if (sc->sis_type == SIS_TYPE_83815) { CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR0); CSR_WRITE_4(sc, SIS_RXFILT_DATA, ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[0]); CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR1); CSR_WRITE_4(sc, SIS_RXFILT_DATA, ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[1]); CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR2); CSR_WRITE_4(sc, SIS_RXFILT_DATA, ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[2]); } else { CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0); CSR_WRITE_4(sc, SIS_RXFILT_DATA, ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[0]); CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1); CSR_WRITE_4(sc, SIS_RXFILT_DATA, ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[1]); CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2); CSR_WRITE_4(sc, SIS_RXFILT_DATA, ((u_int16_t *)IF_LLADDR(sc->sis_ifp))[2]); } /* Init circular TX/RX lists. */ if (sis_ring_init(sc) != 0) { device_printf(sc->sis_dev, "initialization failed: no memory for rx buffers\n"); sis_stop(sc); return; } /* * Short Cable Receive Errors (MP21.E) * also: Page 78 of the DP83815 data sheet (september 2002 version) * recommends the following register settings "for optimum * performance." for rev 15C. Set this also for 15D parts as * they require it in practice. */ if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr <= NS_SRR_15D) { CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001); CSR_WRITE_4(sc, NS_PHY_CR, 0x189C); /* set val for c2 */ CSR_WRITE_4(sc, NS_PHY_TDATA, 0x0000); /* load/kill c2 */ CSR_WRITE_4(sc, NS_PHY_DSPCFG, 0x5040); /* rais SD off, from 4 to c */ CSR_WRITE_4(sc, NS_PHY_SDCFG, 0x008C); CSR_WRITE_4(sc, NS_PHY_PAGE, 0); } /* * For the NatSemi chip, we have to explicitly enable the * reception of ARP frames, as well as turn on the 'perfect * match' filter where we store the station address, otherwise * we won't receive unicasts meant for this host. */ if (sc->sis_type == SIS_TYPE_83815) { SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_ARP); SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_PERFECT); } /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS); } else { SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS); } /* * Set the capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD); } else { SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD); } /* * Load the multicast filter. */ if (sc->sis_type == SIS_TYPE_83815) sis_setmulti_ns(sc); else sis_setmulti_sis(sc); /* Turn the receive filter on */ SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE); /* * Load the address of the RX and TX lists. */ CSR_WRITE_4(sc, SIS_RX_LISTPTR, sc->sis_rx_paddr); CSR_WRITE_4(sc, SIS_TX_LISTPTR, sc->sis_tx_paddr); /* SIS_CFG_EDB_MASTER_EN indicates the EDB bus is used instead of * the PCI bus. When this bit is set, the Max DMA Burst Size * for TX/RX DMA should be no larger than 16 double words. */ if (CSR_READ_4(sc, SIS_CFG) & SIS_CFG_EDB_MASTER_EN) { CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG64); } else { CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG256); } /* Accept Long Packets for VLAN support */ SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_JABBER); /* Set TX configuration */ if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) { CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_10); } else { CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100); } /* Set full/half duplex mode. */ if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { SIS_SETBIT(sc, SIS_TX_CFG, (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR)); SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS); } else { SIS_CLRBIT(sc, SIS_TX_CFG, (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR)); SIS_CLRBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS); } if (sc->sis_type == SIS_TYPE_83816) { /* * MPII03.D: Half Duplex Excessive Collisions. * Also page 49 in 83816 manual */ SIS_SETBIT(sc, SIS_TX_CFG, SIS_TXCFG_MPII03D); } if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr < NS_SRR_16A && IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) { uint32_t reg; /* * Short Cable Receive Errors (MP21.E) */ CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001); reg = CSR_READ_4(sc, NS_PHY_DSPCFG) & 0xfff; CSR_WRITE_4(sc, NS_PHY_DSPCFG, reg | 0x1000); DELAY(100000); reg = CSR_READ_4(sc, NS_PHY_TDATA) & 0xff; if ((reg & 0x0080) == 0 || (reg > 0xd8 && reg <= 0xff)) { device_printf(sc->sis_dev, "Applying short cable fix (reg=%x)\n", reg); CSR_WRITE_4(sc, NS_PHY_TDATA, 0x00e8); SIS_SETBIT(sc, NS_PHY_DSPCFG, 0x20); } CSR_WRITE_4(sc, NS_PHY_PAGE, 0); } /* * Enable interrupts. */ CSR_WRITE_4(sc, SIS_IMR, SIS_INTRS); #ifdef DEVICE_POLLING /* * ... only enable interrupts if we are not polling, make sure * they are off otherwise. */ if (ifp->if_capenable & IFCAP_POLLING) CSR_WRITE_4(sc, SIS_IER, 0); else #endif CSR_WRITE_4(sc, SIS_IER, 1); /* Enable receiver and transmitter. */ SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE); SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE); #ifdef notdef mii_mediachg(mii); #endif ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (!sc->in_tick) callout_reset(&sc->sis_stat_ch, hz, sis_tick, sc); } /* * Set media options. */ static int sis_ifmedia_upd(struct ifnet *ifp) { struct sis_softc *sc; struct mii_data *mii; sc = ifp->if_softc; SIS_LOCK(sc); mii = device_get_softc(sc->sis_miibus); sc->sis_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) mii_phy_reset(miisc); } mii_mediachg(mii); SIS_UNLOCK(sc); return(0); } /* * Report current media status. */ static void sis_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct sis_softc *sc; struct mii_data *mii; sc = ifp->if_softc; SIS_LOCK(sc); mii = device_get_softc(sc->sis_miibus); mii_pollstat(mii); SIS_UNLOCK(sc); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static int sis_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct sis_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; switch(command) { case SIOCSIFFLAGS: SIS_LOCK(sc); if (ifp->if_flags & IFF_UP) { sis_initl(sc); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { sis_stop(sc); } SIS_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: SIS_LOCK(sc); if (sc->sis_type == SIS_TYPE_83815) sis_setmulti_ns(sc); else sis_setmulti_sis(sc); SIS_UNLOCK(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->sis_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: /* ok, disable interrupts */ #ifdef DEVICE_POLLING if (ifr->ifr_reqcap & IFCAP_POLLING && !(ifp->if_capenable & IFCAP_POLLING)) { error = ether_poll_register(sis_poll, ifp); if (error) return(error); SIS_LOCK(sc); /* Disable interrupts */ CSR_WRITE_4(sc, SIS_IER, 0); ifp->if_capenable |= IFCAP_POLLING; SIS_UNLOCK(sc); return (error); } if (!(ifr->ifr_reqcap & IFCAP_POLLING) && ifp->if_capenable & IFCAP_POLLING) { error = ether_poll_deregister(ifp); /* Enable interrupts. */ SIS_LOCK(sc); CSR_WRITE_4(sc, SIS_IER, 1); ifp->if_capenable &= ~IFCAP_POLLING; SIS_UNLOCK(sc); return (error); } #endif /* DEVICE_POLLING */ break; default: error = ether_ioctl(ifp, command, data); break; } return(error); } static void sis_watchdog(struct sis_softc *sc) { SIS_LOCK_ASSERT(sc); if (sc->sis_stopped) { SIS_UNLOCK(sc); return; } if (sc->sis_watchdog_timer == 0 || --sc->sis_watchdog_timer >0) return; device_printf(sc->sis_dev, "watchdog timeout\n"); sc->sis_ifp->if_oerrors++; sis_stop(sc); sis_reset(sc); sis_initl(sc); if (!IFQ_DRV_IS_EMPTY(&sc->sis_ifp->if_snd)) sis_startl(sc->sis_ifp); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void sis_stop(struct sis_softc *sc) { int i; struct ifnet *ifp; struct sis_desc *dp; if (sc->sis_stopped) return; SIS_LOCK_ASSERT(sc); ifp = sc->sis_ifp; sc->sis_watchdog_timer = 0; callout_stop(&sc->sis_stat_ch); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); CSR_WRITE_4(sc, SIS_IER, 0); CSR_WRITE_4(sc, SIS_IMR, 0); CSR_READ_4(sc, SIS_ISR); /* clear any interrupts already pending */ SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE); DELAY(1000); CSR_WRITE_4(sc, SIS_TX_LISTPTR, 0); CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0); sc->sis_link = 0; /* * Free data in the RX lists. */ dp = &sc->sis_rx_list[0]; for (i = 0; i < SIS_RX_LIST_CNT; i++, dp++) { if (dp->sis_mbuf == NULL) continue; bus_dmamap_unload(sc->sis_tag, dp->sis_map); bus_dmamap_destroy(sc->sis_tag, dp->sis_map); m_freem(dp->sis_mbuf); dp->sis_mbuf = NULL; } bzero(sc->sis_rx_list, SIS_RX_LIST_SZ); /* * Free the TX list buffers. */ dp = &sc->sis_tx_list[0]; for (i = 0; i < SIS_TX_LIST_CNT; i++, dp++) { if (dp->sis_mbuf == NULL) continue; bus_dmamap_unload(sc->sis_tag, dp->sis_map); bus_dmamap_destroy(sc->sis_tag, dp->sis_map); m_freem(dp->sis_mbuf); dp->sis_mbuf = NULL; } bzero(sc->sis_tx_list, SIS_TX_LIST_SZ); sc->sis_stopped = 1; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int sis_shutdown(device_t dev) { struct sis_softc *sc; sc = device_get_softc(dev); SIS_LOCK(sc); sis_reset(sc); sis_stop(sc); SIS_UNLOCK(sc); return (0); } static device_method_t sis_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sis_probe), DEVMETHOD(device_attach, sis_attach), DEVMETHOD(device_detach, sis_detach), DEVMETHOD(device_shutdown, sis_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, sis_miibus_readreg), DEVMETHOD(miibus_writereg, sis_miibus_writereg), DEVMETHOD(miibus_statchg, sis_miibus_statchg), { 0, 0 } }; static driver_t sis_driver = { "sis", sis_methods, sizeof(struct sis_softc) }; static devclass_t sis_devclass; DRIVER_MODULE(sis, pci, sis_driver, sis_devclass, 0, 0); DRIVER_MODULE(miibus, sis, miibus_driver, miibus_devclass, 0, 0); Index: head/sys/dev/smc/if_smc.c =================================================================== --- head/sys/dev/smc/if_smc.c (revision 193095) +++ head/sys/dev/smc/if_smc.c (revision 193096) @@ -1,1336 +1,1338 @@ /*- * Copyright (c) 2008 Benno Rice. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Driver for SMSC LAN91C111, may work for older variants. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #endif #include #include #include #include #include #include #define SMC_LOCK(sc) mtx_lock(&(sc)->smc_mtx) #define SMC_UNLOCK(sc) mtx_unlock(&(sc)->smc_mtx) #define SMC_ASSERT_LOCKED(sc) mtx_assert(&(sc)->smc_mtx, MA_OWNED) #define SMC_INTR_PRIORITY 0 #define SMC_RX_PRIORITY 5 #define SMC_TX_PRIORITY 10 devclass_t smc_devclass; static const char *smc_chip_ids[16] = { NULL, NULL, NULL, /* 3 */ "SMSC LAN91C90 or LAN91C92", /* 4 */ "SMSC LAN91C94", /* 5 */ "SMSC LAN91C95", /* 6 */ "SMSC LAN91C96", /* 7 */ "SMSC LAN91C100", /* 8 */ "SMSC LAN91C100FD", /* 9 */ "SMSC LAN91C110FD or LAN91C111FD", NULL, NULL, NULL, NULL, NULL, NULL }; static void smc_init(void *); static void smc_start(struct ifnet *); static void smc_stop(struct smc_softc *); static int smc_ioctl(struct ifnet *, u_long, caddr_t); static void smc_init_locked(struct smc_softc *); static void smc_start_locked(struct ifnet *); static void smc_reset(struct smc_softc *); static int smc_mii_ifmedia_upd(struct ifnet *); static void smc_mii_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void smc_mii_tick(void *); static void smc_mii_mediachg(struct smc_softc *); static int smc_mii_mediaioctl(struct smc_softc *, struct ifreq *, u_long); static void smc_task_intr(void *, int); static void smc_task_rx(void *, int); static void smc_task_tx(void *, int); static driver_filter_t smc_intr; static timeout_t smc_watchdog; #ifdef DEVICE_POLLING static poll_handler_t smc_poll; #endif static __inline void smc_select_bank(struct smc_softc *sc, uint16_t bank) { bus_write_2(sc->smc_reg, BSR, bank & BSR_BANK_MASK); } /* Never call this when not in bank 2. */ static __inline void smc_mmu_wait(struct smc_softc *sc) { KASSERT((bus_read_2(sc->smc_reg, BSR) & BSR_BANK_MASK) == 2, ("%s: smc_mmu_wait called when not in bank 2", device_get_nameunit(sc->smc_dev))); while (bus_read_2(sc->smc_reg, MMUCR) & MMUCR_BUSY) ; } static __inline uint8_t smc_read_1(struct smc_softc *sc, bus_addr_t offset) { return (bus_read_1(sc->smc_reg, offset)); } static __inline void smc_write_1(struct smc_softc *sc, bus_addr_t offset, uint8_t val) { bus_write_1(sc->smc_reg, offset, val); } static __inline uint16_t smc_read_2(struct smc_softc *sc, bus_addr_t offset) { return (bus_read_2(sc->smc_reg, offset)); } static __inline void smc_write_2(struct smc_softc *sc, bus_addr_t offset, uint16_t val) { bus_write_2(sc->smc_reg, offset, val); } static __inline void smc_read_multi_2(struct smc_softc *sc, bus_addr_t offset, uint16_t *datap, bus_size_t count) { bus_read_multi_2(sc->smc_reg, offset, datap, count); } static __inline void smc_write_multi_2(struct smc_softc *sc, bus_addr_t offset, uint16_t *datap, bus_size_t count) { bus_write_multi_2(sc->smc_reg, offset, datap, count); } int smc_probe(device_t dev) { int rid, type, error; uint16_t val; struct smc_softc *sc; struct resource *reg; sc = device_get_softc(dev); rid = 0; type = SYS_RES_IOPORT; error = 0; if (sc->smc_usemem) type = SYS_RES_MEMORY; reg = bus_alloc_resource(dev, type, &rid, 0, ~0, 16, RF_ACTIVE); if (reg == NULL) { if (bootverbose) device_printf(dev, "could not allocate I/O resource for probe\n"); return (ENXIO); } /* Check for the identification value in the BSR. */ val = bus_read_2(reg, BSR); if ((val & BSR_IDENTIFY_MASK) != BSR_IDENTIFY) { if (bootverbose) device_printf(dev, "identification value not in BSR\n"); error = ENXIO; goto done; } /* * Try switching banks and make sure we still get the identification * value. */ bus_write_2(reg, BSR, 0); val = bus_read_2(reg, BSR); if ((val & BSR_IDENTIFY_MASK) != BSR_IDENTIFY) { if (bootverbose) device_printf(dev, "identification value not in BSR after write\n"); error = ENXIO; goto done; } #if 0 /* Check the BAR. */ bus_write_2(reg, BSR, 1); val = bus_read_2(reg, BAR); val = BAR_ADDRESS(val); if (rman_get_start(reg) != val) { if (bootverbose) device_printf(dev, "BAR address %x does not match " "I/O resource address %lx\n", val, rman_get_start(reg)); error = ENXIO; goto done; } #endif /* Compare REV against known chip revisions. */ bus_write_2(reg, BSR, 3); val = bus_read_2(reg, REV); val = (val & REV_CHIP_MASK) >> REV_CHIP_SHIFT; if (smc_chip_ids[val] == NULL) { if (bootverbose) device_printf(dev, "Unknown chip revision: %d\n", val); error = ENXIO; goto done; } device_set_desc(dev, smc_chip_ids[val]); done: bus_release_resource(dev, type, rid, reg); return (error); } int smc_attach(device_t dev) { int type, error; uint16_t val; u_char eaddr[ETHER_ADDR_LEN]; struct smc_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); error = 0; sc->smc_dev = dev; ifp = sc->smc_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { error = ENOSPC; goto done; } mtx_init(&sc->smc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); /* Set up watchdog callout. */ callout_init_mtx(&sc->smc_watchdog, &sc->smc_mtx, 0); type = SYS_RES_IOPORT; if (sc->smc_usemem) type = SYS_RES_MEMORY; sc->smc_reg_rid = 0; sc->smc_reg = bus_alloc_resource(dev, type, &sc->smc_reg_rid, 0, ~0, 16, RF_ACTIVE); if (sc->smc_reg == NULL) { error = ENXIO; goto done; } sc->smc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->smc_irq_rid, 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); if (sc->smc_irq == NULL) { error = ENXIO; goto done; } SMC_LOCK(sc); smc_reset(sc); SMC_UNLOCK(sc); smc_select_bank(sc, 3); val = smc_read_2(sc, REV); sc->smc_chip = (val & REV_CHIP_MASK) >> REV_CHIP_SHIFT; sc->smc_rev = (val * REV_REV_MASK) >> REV_REV_SHIFT; if (bootverbose) device_printf(dev, "revision %x\n", sc->smc_rev); callout_init_mtx(&sc->smc_mii_tick_ch, &sc->smc_mtx, CALLOUT_RETURNUNLOCKED); if (sc->smc_chip >= REV_CHIP_91110FD) { mii_phy_probe(dev, &sc->smc_miibus, smc_mii_ifmedia_upd, smc_mii_ifmedia_sts); if (sc->smc_miibus != NULL) { sc->smc_mii_tick = smc_mii_tick; sc->smc_mii_mediachg = smc_mii_mediachg; sc->smc_mii_mediaioctl = smc_mii_mediaioctl; } } smc_select_bank(sc, 1); eaddr[0] = smc_read_1(sc, IAR0); eaddr[1] = smc_read_1(sc, IAR1); eaddr[2] = smc_read_1(sc, IAR2); eaddr[3] = smc_read_1(sc, IAR3); eaddr[4] = smc_read_1(sc, IAR4); eaddr[5] = smc_read_1(sc, IAR5); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = smc_init; ifp->if_ioctl = smc_ioctl; ifp->if_start = smc_start; IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); IFQ_SET_READY(&ifp->if_snd); ifp->if_capabilities = ifp->if_capenable = 0; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif ether_ifattach(ifp, eaddr); /* Set up taskqueue */ TASK_INIT(&sc->smc_intr, SMC_INTR_PRIORITY, smc_task_intr, ifp); TASK_INIT(&sc->smc_rx, SMC_RX_PRIORITY, smc_task_rx, ifp); TASK_INIT(&sc->smc_tx, SMC_TX_PRIORITY, smc_task_tx, ifp); sc->smc_tq = taskqueue_create_fast("smc_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->smc_tq); taskqueue_start_threads(&sc->smc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->smc_dev)); /* Mask all interrupts. */ sc->smc_mask = 0; smc_write_1(sc, MSK, 0); /* Wire up interrupt */ error = bus_setup_intr(dev, sc->smc_irq, INTR_TYPE_NET|INTR_MPSAFE, smc_intr, NULL, sc, &sc->smc_ih); if (error != 0) goto done; done: if (error != 0) smc_detach(dev); return (error); } int smc_detach(device_t dev) { int type; struct smc_softc *sc; sc = device_get_softc(dev); SMC_LOCK(sc); smc_stop(sc); SMC_UNLOCK(sc); if (sc->smc_ifp != NULL) { ether_ifdetach(sc->smc_ifp); } callout_drain(&sc->smc_watchdog); callout_drain(&sc->smc_mii_tick_ch); #ifdef DEVICE_POLLING if (sc->smc_ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(sc->smc_ifp); #endif if (sc->smc_ih != NULL) bus_teardown_intr(sc->smc_dev, sc->smc_irq, sc->smc_ih); if (sc->smc_tq != NULL) { taskqueue_drain(sc->smc_tq, &sc->smc_intr); taskqueue_drain(sc->smc_tq, &sc->smc_rx); taskqueue_drain(sc->smc_tq, &sc->smc_tx); taskqueue_free(sc->smc_tq); sc->smc_tq = NULL; } if (sc->smc_ifp != NULL) { if_free(sc->smc_ifp); } if (sc->smc_miibus != NULL) { device_delete_child(sc->smc_dev, sc->smc_miibus); bus_generic_detach(sc->smc_dev); } if (sc->smc_reg != NULL) { type = SYS_RES_IOPORT; if (sc->smc_usemem) type = SYS_RES_MEMORY; bus_release_resource(sc->smc_dev, type, sc->smc_reg_rid, sc->smc_reg); } if (sc->smc_irq != NULL) bus_release_resource(sc->smc_dev, SYS_RES_IRQ, sc->smc_irq_rid, sc->smc_irq); if (mtx_initialized(&sc->smc_mtx)) mtx_destroy(&sc->smc_mtx); return (0); } static void smc_start(struct ifnet *ifp) { struct smc_softc *sc; sc = ifp->if_softc; SMC_LOCK(sc); smc_start_locked(ifp); SMC_UNLOCK(sc); } static void smc_start_locked(struct ifnet *ifp) { struct smc_softc *sc; struct mbuf *m; u_int len, npages, spin_count; sc = ifp->if_softc; SMC_ASSERT_LOCKED(sc); if (ifp->if_drv_flags & IFF_DRV_OACTIVE) return; if (IFQ_IS_EMPTY(&ifp->if_snd)) return; /* * Grab the next packet. If it's too big, drop it. */ IFQ_DRV_DEQUEUE(&ifp->if_snd, m); len = m_length(m, NULL); len += (len & 1); if (len > ETHER_MAX_LEN - ETHER_CRC_LEN) { if_printf(ifp, "large packet discarded\n"); ++ifp->if_oerrors; m_freem(m); return; /* XXX readcheck? */ } /* * Flag that we're busy. */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; sc->smc_pending = m; /* * Work out how many 256 byte "pages" we need. We have to include the * control data for the packet in this calculation. */ npages = (len * PKT_CTRL_DATA_LEN) >> 8; if (npages == 0) npages = 1; /* * Request memory. */ smc_select_bank(sc, 2); smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_TX_ALLOC | npages); /* * Spin briefly to see if the allocation succeeds. */ spin_count = TX_ALLOC_WAIT_TIME; do { if (smc_read_1(sc, IST) & ALLOC_INT) { smc_write_1(sc, ACK, ALLOC_INT); break; } } while (--spin_count); /* * If the allocation is taking too long, unmask the alloc interrupt * and wait. */ if (spin_count == 0) { sc->smc_mask |= ALLOC_INT; if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); return; } taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx); } static void smc_task_tx(void *context, int pending) { struct ifnet *ifp; struct smc_softc *sc; struct mbuf *m, *m0; u_int packet, len; uint8_t *data; (void)pending; ifp = (struct ifnet *)context; sc = ifp->if_softc; SMC_LOCK(sc); if (sc->smc_pending == NULL) { SMC_UNLOCK(sc); goto next_packet; } m = m0 = sc->smc_pending; sc->smc_pending = NULL; smc_select_bank(sc, 2); /* * Check the allocation result. */ packet = smc_read_1(sc, ARR); /* * If the allocation failed, requeue the packet and retry. */ if (packet & ARR_FAILED) { IFQ_DRV_PREPEND(&ifp->if_snd, m); ++ifp->if_oerrors; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; smc_start_locked(ifp); SMC_UNLOCK(sc); return; } /* * Tell the device to write to our packet number. */ smc_write_1(sc, PNR, packet); smc_write_2(sc, PTR, 0 | PTR_AUTO_INCR); /* * Tell the device how long the packet is (including control data). */ len = m_length(m, 0); len += PKT_CTRL_DATA_LEN; smc_write_2(sc, DATA0, 0); smc_write_2(sc, DATA0, len); /* * Push the data out to the device. */ data = NULL; for (; m != NULL; m = m->m_next) { data = mtod(m, uint8_t *); smc_write_multi_2(sc, DATA0, (uint16_t *)data, m->m_len / 2); } /* * Push out the control byte and and the odd byte if needed. */ if ((len & 1) != 0 && data != NULL) smc_write_2(sc, DATA0, (CTRL_ODD << 8) | data[m->m_len - 1]); else smc_write_2(sc, DATA0, 0); /* * Unmask the TX empty interrupt. */ sc->smc_mask |= TX_EMPTY_INT; if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); /* * Enqueue the packet. */ smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_ENQUEUE); callout_reset(&sc->smc_watchdog, hz * 2, smc_watchdog, sc); /* * Finish up. */ ifp->if_opackets++; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; SMC_UNLOCK(sc); BPF_MTAP(ifp, m0); m_freem(m0); next_packet: /* * See if there's anything else to do. */ smc_start(ifp); } static void smc_task_rx(void *context, int pending) { u_int packet, status, len; uint8_t *data; struct ifnet *ifp; struct smc_softc *sc; struct mbuf *m, *mhead, *mtail; (void)pending; ifp = (struct ifnet *)context; sc = ifp->if_softc; mhead = mtail = NULL; SMC_LOCK(sc); packet = smc_read_1(sc, FIFO_RX); while ((packet & FIFO_EMPTY) == 0) { /* * Grab an mbuf and attach a cluster. */ MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { break; } MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { m_freem(m); break; } /* * Point to the start of the packet. */ smc_select_bank(sc, 2); smc_write_1(sc, PNR, packet); smc_write_2(sc, PTR, 0 | PTR_READ | PTR_RCV | PTR_AUTO_INCR); /* * Grab status and packet length. */ status = smc_read_2(sc, DATA0); len = smc_read_2(sc, DATA0) & RX_LEN_MASK; len -= 6; if (status & RX_ODDFRM) len += 1; /* * Check for errors. */ if (status & (RX_TOOSHORT | RX_TOOLNG | RX_BADCRC | RX_ALGNERR)) { smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE); ifp->if_ierrors++; m_freem(m); break; } /* * Set the mbuf up the way we want it. */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len + 2; /* XXX: Is this right? */ m_adj(m, ETHER_ALIGN); /* * Pull the packet out of the device. Make sure we're in the * right bank first as things may have changed while we were * allocating our mbuf. */ smc_select_bank(sc, 2); smc_write_1(sc, PNR, packet); smc_write_2(sc, PTR, 4 | PTR_READ | PTR_RCV | PTR_AUTO_INCR); data = mtod(m, uint8_t *); smc_read_multi_2(sc, DATA0, (uint16_t *)data, len >> 1); if (len & 1) { data += len & ~1; *data = smc_read_1(sc, DATA0); } /* * Tell the device we're done. */ smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE); if (m == NULL) { break; } if (mhead == NULL) { mhead = mtail = m; m->m_next = NULL; } else { mtail->m_next = m; mtail = m; } packet = smc_read_1(sc, FIFO_RX); } sc->smc_mask |= RCV_INT; if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); SMC_UNLOCK(sc); while (mhead != NULL) { m = mhead; mhead = mhead->m_next; m->m_next = NULL; ifp->if_ipackets++; (*ifp->if_input)(ifp, m); } } #ifdef DEVICE_POLLING static void smc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct smc_softc *sc; sc = ifp->if_softc; SMC_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { SMC_UNLOCK(sc); return; } SMC_UNLOCK(sc); if (cmd == POLL_AND_CHECK_STATUS) taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_intr); } #endif static int smc_intr(void *context) { struct smc_softc *sc; sc = (struct smc_softc *)context; taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_intr); return (FILTER_HANDLED); } static void smc_task_intr(void *context, int pending) { struct smc_softc *sc; struct ifnet *ifp; u_int status, packet, counter, tcr; (void)pending; ifp = (struct ifnet *)context; sc = ifp->if_softc; SMC_LOCK(sc); smc_select_bank(sc, 2); /* * Get the current mask, and then block all interrupts while we're * working. */ if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, 0); /* * Find out what interrupts are flagged. */ status = smc_read_1(sc, IST) & sc->smc_mask; /* * Transmit error */ if (status & TX_INT) { /* * Kill off the packet if there is one and re-enable transmit. */ packet = smc_read_1(sc, FIFO_TX); if ((packet & FIFO_EMPTY) == 0) { smc_write_1(sc, PNR, packet); smc_write_2(sc, PTR, 0 | PTR_READ | PTR_AUTO_INCR); tcr = smc_read_2(sc, DATA0); if ((tcr & EPHSR_TX_SUC) == 0) device_printf(sc->smc_dev, "bad packet\n"); smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_RELEASE_PKT); smc_select_bank(sc, 0); tcr = smc_read_2(sc, TCR); tcr |= TCR_TXENA | TCR_PAD_EN; smc_write_2(sc, TCR, tcr); smc_select_bank(sc, 2); taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx); } /* * Ack the interrupt. */ smc_write_1(sc, ACK, TX_INT); } /* * Receive */ if (status & RCV_INT) { smc_write_1(sc, ACK, RCV_INT); sc->smc_mask &= ~RCV_INT; taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_rx); } /* * Allocation */ if (status & ALLOC_INT) { smc_write_1(sc, ACK, ALLOC_INT); sc->smc_mask &= ~ALLOC_INT; taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx); } /* * Receive overrun */ if (status & RX_OVRN_INT) { smc_write_1(sc, ACK, RX_OVRN_INT); ifp->if_ierrors++; } /* * Transmit empty */ if (status & TX_EMPTY_INT) { smc_write_1(sc, ACK, TX_EMPTY_INT); sc->smc_mask &= ~TX_EMPTY_INT; callout_stop(&sc->smc_watchdog); /* * Update collision stats. */ smc_select_bank(sc, 0); counter = smc_read_2(sc, ECR); smc_select_bank(sc, 2); ifp->if_collisions += (counter & ECR_SNGLCOL_MASK) >> ECR_SNGLCOL_SHIFT; ifp->if_collisions += (counter & ECR_MULCOL_MASK) >> ECR_MULCOL_SHIFT; /* * See if there are any packets to transmit. */ taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_tx); } /* * Update the interrupt mask. */ if ((ifp->if_capenable & IFCAP_POLLING) == 0) smc_write_1(sc, MSK, sc->smc_mask); SMC_UNLOCK(sc); } static u_int smc_mii_readbits(struct smc_softc *sc, int nbits) { u_int mgmt, mask, val; SMC_ASSERT_LOCKED(sc); KASSERT((smc_read_2(sc, BSR) & BSR_BANK_MASK) == 3, ("%s: smc_mii_readbits called with bank %d (!= 3)", device_get_nameunit(sc->smc_dev), smc_read_2(sc, BSR) & BSR_BANK_MASK)); /* * Set up the MGMT (aka MII) register. */ mgmt = smc_read_2(sc, MGMT) & ~(MGMT_MCLK | MGMT_MDOE | MGMT_MDO); smc_write_2(sc, MGMT, mgmt); /* * Read the bits in. */ for (mask = 1 << (nbits - 1), val = 0; mask; mask >>= 1) { if (smc_read_2(sc, MGMT) & MGMT_MDI) val |= mask; smc_write_2(sc, MGMT, mgmt); DELAY(1); smc_write_2(sc, MGMT, mgmt | MGMT_MCLK); DELAY(1); } return (val); } static void smc_mii_writebits(struct smc_softc *sc, u_int val, int nbits) { u_int mgmt, mask; SMC_ASSERT_LOCKED(sc); KASSERT((smc_read_2(sc, BSR) & BSR_BANK_MASK) == 3, ("%s: smc_mii_writebits called with bank %d (!= 3)", device_get_nameunit(sc->smc_dev), smc_read_2(sc, BSR) & BSR_BANK_MASK)); /* * Set up the MGMT (aka MII) register). */ mgmt = smc_read_2(sc, MGMT) & ~(MGMT_MCLK | MGMT_MDOE | MGMT_MDO); mgmt |= MGMT_MDOE; /* * Push the bits out. */ for (mask = 1 << (nbits - 1); mask; mask >>= 1) { if (val & mask) mgmt |= MGMT_MDO; else mgmt &= ~MGMT_MDO; smc_write_2(sc, MGMT, mgmt); DELAY(1); smc_write_2(sc, MGMT, mgmt | MGMT_MCLK); DELAY(1); } } int smc_miibus_readreg(device_t dev, int phy, int reg) { struct smc_softc *sc; int val; sc = device_get_softc(dev); SMC_LOCK(sc); smc_select_bank(sc, 3); /* * Send out the idle pattern. */ smc_mii_writebits(sc, 0xffffffff, 32); /* * Start code + read opcode + phy address + phy register */ smc_mii_writebits(sc, 6 << 10 | phy << 5 | reg, 14); /* * Turnaround + data */ val = smc_mii_readbits(sc, 18); /* * Reset the MDIO interface. */ smc_write_2(sc, MGMT, smc_read_2(sc, MGMT) & ~(MGMT_MCLK | MGMT_MDOE | MGMT_MDO)); SMC_UNLOCK(sc); return (val); } void smc_miibus_writereg(device_t dev, int phy, int reg, int data) { struct smc_softc *sc; sc = device_get_softc(dev); SMC_LOCK(sc); smc_select_bank(sc, 3); /* * Send idle pattern. */ smc_mii_writebits(sc, 0xffffffff, 32); /* * Start code + write opcode + phy address + phy register + turnaround * + data. */ smc_mii_writebits(sc, 5 << 28 | phy << 23 | reg << 18 | 2 << 16 | data, 32); /* * Reset MDIO interface. */ smc_write_2(sc, MGMT, smc_read_2(sc, MGMT) & ~(MGMT_MCLK | MGMT_MDOE | MGMT_MDO)); SMC_UNLOCK(sc); } void smc_miibus_statchg(device_t dev) { struct smc_softc *sc; struct mii_data *mii; uint16_t tcr; sc = device_get_softc(dev); mii = device_get_softc(sc->smc_miibus); SMC_LOCK(sc); smc_select_bank(sc, 0); tcr = smc_read_2(sc, TCR); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0) tcr |= TCR_SWFDUP; else tcr &= ~TCR_SWFDUP; smc_write_2(sc, TCR, tcr); SMC_UNLOCK(sc); } static int smc_mii_ifmedia_upd(struct ifnet *ifp) { struct smc_softc *sc; struct mii_data *mii; sc = ifp->if_softc; if (sc->smc_miibus == NULL) return (ENXIO); mii = device_get_softc(sc->smc_miibus); return (mii_mediachg(mii)); } static void smc_mii_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct smc_softc *sc; struct mii_data *mii; sc = ifp->if_softc; if (sc->smc_miibus == NULL) return; mii = device_get_softc(sc->smc_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static void smc_mii_tick(void *context) { struct smc_softc *sc; sc = (struct smc_softc *)context; if (sc->smc_miibus == NULL) return; SMC_UNLOCK(sc); mii_tick(device_get_softc(sc->smc_miibus)); callout_reset(&sc->smc_mii_tick_ch, hz, smc_mii_tick, sc); } static void smc_mii_mediachg(struct smc_softc *sc) { if (sc->smc_miibus == NULL) return; mii_mediachg(device_get_softc(sc->smc_miibus)); } static int smc_mii_mediaioctl(struct smc_softc *sc, struct ifreq *ifr, u_long command) { struct mii_data *mii; if (sc->smc_miibus == NULL) return (EINVAL); mii = device_get_softc(sc->smc_miibus); return (ifmedia_ioctl(sc->smc_ifp, ifr, &mii->mii_media, command)); } static void smc_reset(struct smc_softc *sc) { u_int ctr; SMC_ASSERT_LOCKED(sc); smc_select_bank(sc, 2); /* * Mask all interrupts. */ smc_write_1(sc, MSK, 0); /* * Tell the device to reset. */ smc_select_bank(sc, 0); smc_write_2(sc, RCR, RCR_SOFT_RST); /* * Set up the configuration register. */ smc_select_bank(sc, 1); smc_write_2(sc, CR, CR_EPH_POWER_EN); DELAY(1); /* * Turn off transmit and receive. */ smc_select_bank(sc, 0); smc_write_2(sc, TCR, 0); smc_write_2(sc, RCR, 0); /* * Set up the control register. */ smc_select_bank(sc, 1); ctr = smc_read_2(sc, CTR); ctr |= CTR_LE_ENABLE | CTR_AUTO_RELEASE; smc_write_2(sc, CTR, ctr); /* * Reset the MMU. */ smc_select_bank(sc, 2); smc_mmu_wait(sc); smc_write_2(sc, MMUCR, MMUCR_CMD_MMU_RESET); } static void smc_enable(struct smc_softc *sc) { struct ifnet *ifp; SMC_ASSERT_LOCKED(sc); ifp = sc->smc_ifp; /* * Set up the receive/PHY control register. */ smc_select_bank(sc, 0); smc_write_2(sc, RPCR, RPCR_ANEG | (RPCR_LED_LINK_ANY << RPCR_LSA_SHIFT) | (RPCR_LED_ACT_ANY << RPCR_LSB_SHIFT)); /* * Set up the transmit and receive control registers. */ smc_write_2(sc, TCR, TCR_TXENA | TCR_PAD_EN); smc_write_2(sc, RCR, RCR_RXEN | RCR_STRIP_CRC); /* * Set up the interrupt mask. */ smc_select_bank(sc, 2); sc->smc_mask = EPH_INT | RX_OVRN_INT | RCV_INT | TX_INT; if ((ifp->if_capenable & IFCAP_POLLING) != 0) smc_write_1(sc, MSK, sc->smc_mask); } static void smc_stop(struct smc_softc *sc) { SMC_ASSERT_LOCKED(sc); /* * Turn off callouts. */ callout_stop(&sc->smc_watchdog); callout_stop(&sc->smc_mii_tick_ch); /* * Mask all interrupts. */ smc_select_bank(sc, 2); sc->smc_mask = 0; smc_write_1(sc, MSK, 0); #ifdef DEVICE_POLLING ether_poll_deregister(sc->smc_ifp); sc->smc_ifp->if_capenable &= ~IFCAP_POLLING; + sc->smc_ifp->if_capenable &= ~IFCAP_POLLING_NOCOUNT; #endif /* * Disable transmit and receive. */ smc_select_bank(sc, 0); smc_write_2(sc, TCR, 0); smc_write_2(sc, RCR, 0); sc->smc_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; } static void smc_watchdog(void *arg) { struct smc_softc *sc; sc = (struct smc_softc *)arg; device_printf(sc->smc_dev, "watchdog timeout\n"); taskqueue_enqueue_fast(sc->smc_tq, &sc->smc_intr); } static void smc_init(void *context) { struct smc_softc *sc; sc = (struct smc_softc *)context; SMC_LOCK(sc); smc_init_locked(sc); SMC_UNLOCK(sc); } static void smc_init_locked(struct smc_softc *sc) { struct ifnet *ifp; ifp = sc->smc_ifp; SMC_ASSERT_LOCKED(sc); smc_reset(sc); smc_enable(sc); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; smc_start_locked(ifp); if (sc->smc_mii_tick != NULL) callout_reset(&sc->smc_mii_tick_ch, hz, sc->smc_mii_tick, sc); #ifdef DEVICE_POLLING SMC_UNLOCK(sc); ether_poll_register(smc_poll, ifp); SMC_LOCK(sc); ifp->if_capenable |= IFCAP_POLLING; + ifp->if_capenable |= IFCAP_POLLING_NOCOUNT; #endif } static int smc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct smc_softc *sc; int error; sc = ifp->if_softc; error = 0; switch (cmd) { case SIOCSIFFLAGS: if ((ifp->if_flags & IFF_UP) == 0 && (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { SMC_LOCK(sc); smc_stop(sc); SMC_UNLOCK(sc); } else { smc_init(sc); if (sc->smc_mii_mediachg != NULL) sc->smc_mii_mediachg(sc); } break; case SIOCADDMULTI: case SIOCDELMULTI: /* XXX SMC_LOCK(sc); smc_setmcast(sc); SMC_UNLOCK(sc); */ error = EINVAL; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (sc->smc_mii_mediaioctl == NULL) { error = EINVAL; break; } sc->smc_mii_mediaioctl(sc, (struct ifreq *)data, cmd); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } Index: head/sys/dev/ste/if_ste.c =================================================================== --- head/sys/dev/ste/if_ste.c (revision 193095) +++ head/sys/dev/ste/if_ste.c (revision 193096) @@ -1,1752 +1,1757 @@ /*- * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #define STE_USEIOSPACE #include MODULE_DEPEND(ste, pci, 1, 1, 1); MODULE_DEPEND(ste, ether, 1, 1, 1); MODULE_DEPEND(ste, miibus, 1, 1, 1); /* * Various supported device vendors/types and their names. */ static struct ste_type ste_devs[] = { { ST_VENDORID, ST_DEVICEID_ST201_1, "Sundance ST201 10/100BaseTX" }, { ST_VENDORID, ST_DEVICEID_ST201_2, "Sundance ST201 10/100BaseTX" }, { DL_VENDORID, DL_DEVICEID_DL10050, "D-Link DL10050 10/100BaseTX" }, { 0, 0, NULL } }; static int ste_probe(device_t); static int ste_attach(device_t); static int ste_detach(device_t); static void ste_init(void *); static void ste_init_locked(struct ste_softc *); static void ste_intr(void *); static void ste_rxeoc(struct ste_softc *); -static void ste_rxeof(struct ste_softc *); +static int ste_rxeof(struct ste_softc *); static void ste_txeoc(struct ste_softc *); static void ste_txeof(struct ste_softc *); static void ste_stats_update(void *); static void ste_stop(struct ste_softc *); static void ste_reset(struct ste_softc *); static int ste_ioctl(struct ifnet *, u_long, caddr_t); static int ste_encap(struct ste_softc *, struct ste_chain *, struct mbuf *); static void ste_start(struct ifnet *); static void ste_start_locked(struct ifnet *); static void ste_watchdog(struct ifnet *); static int ste_shutdown(device_t); static int ste_newbuf(struct ste_softc *, struct ste_chain_onefrag *, struct mbuf *); static int ste_ifmedia_upd(struct ifnet *); static void ste_ifmedia_upd_locked(struct ifnet *); static void ste_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void ste_mii_sync(struct ste_softc *); static void ste_mii_send(struct ste_softc *, u_int32_t, int); static int ste_mii_readreg(struct ste_softc *, struct ste_mii_frame *); static int ste_mii_writereg(struct ste_softc *, struct ste_mii_frame *); static int ste_miibus_readreg(device_t, int, int); static int ste_miibus_writereg(device_t, int, int, int); static void ste_miibus_statchg(device_t); static int ste_eeprom_wait(struct ste_softc *); static int ste_read_eeprom(struct ste_softc *, caddr_t, int, int, int); static void ste_wait(struct ste_softc *); static void ste_setmulti(struct ste_softc *); static int ste_init_rx_list(struct ste_softc *); static void ste_init_tx_list(struct ste_softc *); #ifdef STE_USEIOSPACE #define STE_RES SYS_RES_IOPORT #define STE_RID STE_PCI_LOIO #else #define STE_RES SYS_RES_MEMORY #define STE_RID STE_PCI_LOMEM #endif static device_method_t ste_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ste_probe), DEVMETHOD(device_attach, ste_attach), DEVMETHOD(device_detach, ste_detach), DEVMETHOD(device_shutdown, ste_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, ste_miibus_readreg), DEVMETHOD(miibus_writereg, ste_miibus_writereg), DEVMETHOD(miibus_statchg, ste_miibus_statchg), { 0, 0 } }; static driver_t ste_driver = { "ste", ste_methods, sizeof(struct ste_softc) }; static devclass_t ste_devclass; DRIVER_MODULE(ste, pci, ste_driver, ste_devclass, 0, 0); DRIVER_MODULE(miibus, ste, miibus_driver, miibus_devclass, 0, 0); SYSCTL_NODE(_hw, OID_AUTO, ste, CTLFLAG_RD, 0, "if_ste parameters"); static int ste_rxsyncs; SYSCTL_INT(_hw_ste, OID_AUTO, rxsyncs, CTLFLAG_RW, &ste_rxsyncs, 0, ""); #define STE_SETBIT4(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) #define STE_CLRBIT4(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) #define STE_SETBIT2(sc, reg, x) \ CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | (x)) #define STE_CLRBIT2(sc, reg, x) \ CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~(x)) #define STE_SETBIT1(sc, reg, x) \ CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | (x)) #define STE_CLRBIT1(sc, reg, x) \ CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~(x)) #define MII_SET(x) STE_SETBIT1(sc, STE_PHYCTL, x) #define MII_CLR(x) STE_CLRBIT1(sc, STE_PHYCTL, x) /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void ste_mii_sync(sc) struct ste_softc *sc; { register int i; MII_SET(STE_PHYCTL_MDIR|STE_PHYCTL_MDATA); for (i = 0; i < 32; i++) { MII_SET(STE_PHYCTL_MCLK); DELAY(1); MII_CLR(STE_PHYCTL_MCLK); DELAY(1); } return; } /* * Clock a series of bits through the MII. */ static void ste_mii_send(sc, bits, cnt) struct ste_softc *sc; u_int32_t bits; int cnt; { int i; MII_CLR(STE_PHYCTL_MCLK); for (i = (0x1 << (cnt - 1)); i; i >>= 1) { if (bits & i) { MII_SET(STE_PHYCTL_MDATA); } else { MII_CLR(STE_PHYCTL_MDATA); } DELAY(1); MII_CLR(STE_PHYCTL_MCLK); DELAY(1); MII_SET(STE_PHYCTL_MCLK); } } /* * Read an PHY register through the MII. */ static int ste_mii_readreg(sc, frame) struct ste_softc *sc; struct ste_mii_frame *frame; { int i, ack; /* * Set up frame for RX. */ frame->mii_stdelim = STE_MII_STARTDELIM; frame->mii_opcode = STE_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; CSR_WRITE_2(sc, STE_PHYCTL, 0); /* * Turn on data xmit. */ MII_SET(STE_PHYCTL_MDIR); ste_mii_sync(sc); /* * Send command/address info. */ ste_mii_send(sc, frame->mii_stdelim, 2); ste_mii_send(sc, frame->mii_opcode, 2); ste_mii_send(sc, frame->mii_phyaddr, 5); ste_mii_send(sc, frame->mii_regaddr, 5); /* Turn off xmit. */ MII_CLR(STE_PHYCTL_MDIR); /* Idle bit */ MII_CLR((STE_PHYCTL_MCLK|STE_PHYCTL_MDATA)); DELAY(1); MII_SET(STE_PHYCTL_MCLK); DELAY(1); /* Check for ack */ MII_CLR(STE_PHYCTL_MCLK); DELAY(1); ack = CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA; MII_SET(STE_PHYCTL_MCLK); DELAY(1); /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ if (ack) { for(i = 0; i < 16; i++) { MII_CLR(STE_PHYCTL_MCLK); DELAY(1); MII_SET(STE_PHYCTL_MCLK); DELAY(1); } goto fail; } for (i = 0x8000; i; i >>= 1) { MII_CLR(STE_PHYCTL_MCLK); DELAY(1); if (!ack) { if (CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA) frame->mii_data |= i; DELAY(1); } MII_SET(STE_PHYCTL_MCLK); DELAY(1); } fail: MII_CLR(STE_PHYCTL_MCLK); DELAY(1); MII_SET(STE_PHYCTL_MCLK); DELAY(1); if (ack) return(1); return(0); } /* * Write to a PHY register through the MII. */ static int ste_mii_writereg(sc, frame) struct ste_softc *sc; struct ste_mii_frame *frame; { /* * Set up frame for TX. */ frame->mii_stdelim = STE_MII_STARTDELIM; frame->mii_opcode = STE_MII_WRITEOP; frame->mii_turnaround = STE_MII_TURNAROUND; /* * Turn on data output. */ MII_SET(STE_PHYCTL_MDIR); ste_mii_sync(sc); ste_mii_send(sc, frame->mii_stdelim, 2); ste_mii_send(sc, frame->mii_opcode, 2); ste_mii_send(sc, frame->mii_phyaddr, 5); ste_mii_send(sc, frame->mii_regaddr, 5); ste_mii_send(sc, frame->mii_turnaround, 2); ste_mii_send(sc, frame->mii_data, 16); /* Idle bit. */ MII_SET(STE_PHYCTL_MCLK); DELAY(1); MII_CLR(STE_PHYCTL_MCLK); DELAY(1); /* * Turn off xmit. */ MII_CLR(STE_PHYCTL_MDIR); return(0); } static int ste_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct ste_softc *sc; struct ste_mii_frame frame; sc = device_get_softc(dev); if ( sc->ste_one_phy && phy != 0 ) return (0); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; ste_mii_readreg(sc, &frame); return(frame.mii_data); } static int ste_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct ste_softc *sc; struct ste_mii_frame frame; sc = device_get_softc(dev); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = data; ste_mii_writereg(sc, &frame); return(0); } static void ste_miibus_statchg(dev) device_t dev; { struct ste_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->ste_miibus); if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { STE_SETBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX); } else { STE_CLRBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX); } return; } static int ste_ifmedia_upd(ifp) struct ifnet *ifp; { struct ste_softc *sc; sc = ifp->if_softc; STE_LOCK(sc); ste_ifmedia_upd_locked(ifp); STE_UNLOCK(sc); return(0); } static void ste_ifmedia_upd_locked(ifp) struct ifnet *ifp; { struct ste_softc *sc; struct mii_data *mii; sc = ifp->if_softc; STE_LOCK_ASSERT(sc); mii = device_get_softc(sc->ste_miibus); sc->ste_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) mii_phy_reset(miisc); } mii_mediachg(mii); } static void ste_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct ste_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->ste_miibus); STE_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; STE_UNLOCK(sc); return; } static void ste_wait(sc) struct ste_softc *sc; { register int i; for (i = 0; i < STE_TIMEOUT; i++) { if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG)) break; } if (i == STE_TIMEOUT) device_printf(sc->ste_dev, "command never completed!\n"); return; } /* * The EEPROM is slow: give it time to come ready after issuing * it a command. */ static int ste_eeprom_wait(sc) struct ste_softc *sc; { int i; DELAY(1000); for (i = 0; i < 100; i++) { if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY) DELAY(1000); else break; } if (i == 100) { device_printf(sc->ste_dev, "eeprom failed to come ready\n"); return(1); } return(0); } /* * Read a sequence of words from the EEPROM. Note that ethernet address * data is stored in the EEPROM in network byte order. */ static int ste_read_eeprom(sc, dest, off, cnt, swap) struct ste_softc *sc; caddr_t dest; int off; int cnt; int swap; { int err = 0, i; u_int16_t word = 0, *ptr; if (ste_eeprom_wait(sc)) return(1); for (i = 0; i < cnt; i++) { CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i)); err = ste_eeprom_wait(sc); if (err) break; word = CSR_READ_2(sc, STE_EEPROM_DATA); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } return(err ? 1 : 0); } static void ste_setmulti(sc) struct ste_softc *sc; { struct ifnet *ifp; int h = 0; u_int32_t hashes[2] = { 0, 0 }; struct ifmultiaddr *ifma; ifp = sc->ste_ifp; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI); STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH); return; } /* first, zot all the existing hash bits */ CSR_WRITE_2(sc, STE_MAR0, 0); CSR_WRITE_2(sc, STE_MAR1, 0); CSR_WRITE_2(sc, STE_MAR2, 0); CSR_WRITE_2(sc, STE_MAR3, 0); /* now program new ones */ IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) & 0x3F; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } IF_ADDR_UNLOCK(ifp); CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF); CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF); CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF); CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF); STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI); STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH); return; } #ifdef DEVICE_POLLING static poll_handler_t ste_poll, ste_poll_locked; -static void +static int ste_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct ste_softc *sc = ifp->if_softc; + int rx_npkts = 0; STE_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) - ste_poll_locked(ifp, cmd, count); + rx_npkts = ste_poll_locked(ifp, cmd, count); STE_UNLOCK(sc); + return (rx_npkts); } -static void +static int ste_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct ste_softc *sc = ifp->if_softc; + int rx_npkts; STE_LOCK_ASSERT(sc); sc->rxcycles = count; if (cmd == POLL_AND_CHECK_STATUS) ste_rxeoc(sc); - ste_rxeof(sc); + rx_npkts = ste_rxeof(sc); ste_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) ste_start_locked(ifp); if (cmd == POLL_AND_CHECK_STATUS) { u_int16_t status; status = CSR_READ_2(sc, STE_ISR_ACK); if (status & STE_ISR_TX_DONE) ste_txeoc(sc); if (status & STE_ISR_STATS_OFLOW) { callout_stop(&sc->ste_stat_callout); ste_stats_update(sc); } if (status & STE_ISR_LINKEVENT) mii_pollstat(device_get_softc(sc->ste_miibus)); if (status & STE_ISR_HOSTERR) { ste_reset(sc); ste_init_locked(sc); } } + return (rx_npkts); } #endif /* DEVICE_POLLING */ static void ste_intr(xsc) void *xsc; { struct ste_softc *sc; struct ifnet *ifp; u_int16_t status; sc = xsc; STE_LOCK(sc); ifp = sc->ste_ifp; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { STE_UNLOCK(sc); return; } #endif /* See if this is really our interrupt. */ if (!(CSR_READ_2(sc, STE_ISR) & STE_ISR_INTLATCH)) { STE_UNLOCK(sc); return; } for (;;) { status = CSR_READ_2(sc, STE_ISR_ACK); if (!(status & STE_INTRS)) break; if (status & STE_ISR_RX_DMADONE) { ste_rxeoc(sc); ste_rxeof(sc); } if (status & STE_ISR_TX_DMADONE) ste_txeof(sc); if (status & STE_ISR_TX_DONE) ste_txeoc(sc); if (status & STE_ISR_STATS_OFLOW) { callout_stop(&sc->ste_stat_callout); ste_stats_update(sc); } if (status & STE_ISR_LINKEVENT) mii_pollstat(device_get_softc(sc->ste_miibus)); if (status & STE_ISR_HOSTERR) { ste_reset(sc); ste_init_locked(sc); } } /* Re-enable interrupts */ CSR_WRITE_2(sc, STE_IMR, STE_INTRS); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) ste_start_locked(ifp); STE_UNLOCK(sc); return; } static void ste_rxeoc(struct ste_softc *sc) { struct ste_chain_onefrag *cur_rx; STE_LOCK_ASSERT(sc); if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) { cur_rx = sc->ste_cdata.ste_rx_head; do { cur_rx = cur_rx->ste_next; /* If the ring is empty, just return. */ if (cur_rx == sc->ste_cdata.ste_rx_head) return; } while (cur_rx->ste_ptr->ste_status == 0); if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) { /* We've fallen behind the chip: catch it. */ sc->ste_cdata.ste_rx_head = cur_rx; ++ste_rxsyncs; } } } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ -static void +static int ste_rxeof(sc) struct ste_softc *sc; { struct mbuf *m; struct ifnet *ifp; struct ste_chain_onefrag *cur_rx; - int total_len = 0, count=0; + int total_len = 0, count=0, rx_npkts = 0; u_int32_t rxstat; STE_LOCK_ASSERT(sc); ifp = sc->ste_ifp; while((rxstat = sc->ste_cdata.ste_rx_head->ste_ptr->ste_status) & STE_RXSTAT_DMADONE) { #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif if ((STE_RX_LIST_CNT - count) < 3) { break; } cur_rx = sc->ste_cdata.ste_rx_head; sc->ste_cdata.ste_rx_head = cur_rx->ste_next; /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (rxstat & STE_RXSTAT_FRAME_ERR) { ifp->if_ierrors++; cur_rx->ste_ptr->ste_status = 0; continue; } /* * If there error bit was not set, the upload complete * bit should be set which means we have a valid packet. * If not, something truly strange has happened. */ if (!(rxstat & STE_RXSTAT_DMADONE)) { device_printf(sc->ste_dev, "bad receive status -- packet dropped\n"); ifp->if_ierrors++; cur_rx->ste_ptr->ste_status = 0; continue; } /* No errors; receive the packet. */ m = cur_rx->ste_mbuf; total_len = cur_rx->ste_ptr->ste_status & STE_RXSTAT_FRAMELEN; /* * Try to conjure up a new mbuf cluster. If that * fails, it means we have an out of memory condition and * should leave the buffer in place and continue. This will * result in a lost packet, but there's little else we * can do in this situation. */ if (ste_newbuf(sc, cur_rx, NULL) == ENOBUFS) { ifp->if_ierrors++; cur_rx->ste_ptr->ste_status = 0; continue; } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; ifp->if_ipackets++; STE_UNLOCK(sc); (*ifp->if_input)(ifp, m); STE_LOCK(sc); cur_rx->ste_ptr->ste_status = 0; count++; + rx_npkts++; } - return; + return (rx_npkts); } static void ste_txeoc(sc) struct ste_softc *sc; { u_int8_t txstat; struct ifnet *ifp; ifp = sc->ste_ifp; while ((txstat = CSR_READ_1(sc, STE_TX_STATUS)) & STE_TXSTATUS_TXDONE) { if (txstat & STE_TXSTATUS_UNDERRUN || txstat & STE_TXSTATUS_EXCESSCOLLS || txstat & STE_TXSTATUS_RECLAIMERR) { ifp->if_oerrors++; device_printf(sc->ste_dev, "transmission error: %x\n", txstat); ste_reset(sc); ste_init_locked(sc); if (txstat & STE_TXSTATUS_UNDERRUN && sc->ste_tx_thresh < STE_PACKET_SIZE) { sc->ste_tx_thresh += STE_MIN_FRAMELEN; device_printf(sc->ste_dev, "tx underrun, increasing tx" " start threshold to %d bytes\n", sc->ste_tx_thresh); } CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh); CSR_WRITE_2(sc, STE_TX_RECLAIM_THRESH, (STE_PACKET_SIZE >> 4)); } ste_init_locked(sc); CSR_WRITE_2(sc, STE_TX_STATUS, txstat); } return; } static void ste_txeof(sc) struct ste_softc *sc; { struct ste_chain *cur_tx; struct ifnet *ifp; int idx; ifp = sc->ste_ifp; idx = sc->ste_cdata.ste_tx_cons; while(idx != sc->ste_cdata.ste_tx_prod) { cur_tx = &sc->ste_cdata.ste_tx_chain[idx]; if (!(cur_tx->ste_ptr->ste_ctl & STE_TXCTL_DMADONE)) break; m_freem(cur_tx->ste_mbuf); cur_tx->ste_mbuf = NULL; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_opackets++; STE_INC(idx, STE_TX_LIST_CNT); } sc->ste_cdata.ste_tx_cons = idx; if (idx == sc->ste_cdata.ste_tx_prod) ifp->if_timer = 0; } static void ste_stats_update(xsc) void *xsc; { struct ste_softc *sc; struct ifnet *ifp; struct mii_data *mii; sc = xsc; STE_LOCK_ASSERT(sc); ifp = sc->ste_ifp; mii = device_get_softc(sc->ste_miibus); ifp->if_collisions += CSR_READ_1(sc, STE_LATE_COLLS) + CSR_READ_1(sc, STE_MULTI_COLLS) + CSR_READ_1(sc, STE_SINGLE_COLLS); if (!sc->ste_link) { mii_pollstat(mii); if (mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->ste_link++; /* * we don't get a call-back on re-init so do it * otherwise we get stuck in the wrong link state */ ste_miibus_statchg(sc->ste_dev); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) ste_start_locked(ifp); } } callout_reset(&sc->ste_stat_callout, hz, ste_stats_update, sc); return; } /* * Probe for a Sundance ST201 chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int ste_probe(dev) device_t dev; { struct ste_type *t; t = ste_devs; while(t->ste_name != NULL) { if ((pci_get_vendor(dev) == t->ste_vid) && (pci_get_device(dev) == t->ste_did)) { device_set_desc(dev, t->ste_name); return (BUS_PROBE_DEFAULT); } t++; } return(ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int ste_attach(dev) device_t dev; { struct ste_softc *sc; struct ifnet *ifp; int error = 0, rid; u_char eaddr[6]; sc = device_get_softc(dev); sc->ste_dev = dev; /* * Only use one PHY since this chip reports multiple * Note on the DFE-550 the PHY is at 1 on the DFE-580 * it is at 0 & 1. It is rev 0x12. */ if (pci_get_vendor(dev) == DL_VENDORID && pci_get_device(dev) == DL_DEVICEID_DL10050 && pci_get_revid(dev) == 0x12 ) sc->ste_one_phy = 1; mtx_init(&sc->ste_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = STE_RID; sc->ste_res = bus_alloc_resource_any(dev, STE_RES, &rid, RF_ACTIVE); if (sc->ste_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } sc->ste_btag = rman_get_bustag(sc->ste_res); sc->ste_bhandle = rman_get_bushandle(sc->ste_res); /* Allocate interrupt */ rid = 0; sc->ste_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->ste_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } callout_init_mtx(&sc->ste_stat_callout, &sc->ste_mtx, 0); /* Reset the adapter. */ ste_reset(sc); /* * Get station address from the EEPROM. */ if (ste_read_eeprom(sc, eaddr, STE_EEADDR_NODE0, 3, 0)) { device_printf(dev, "failed to read station address\n"); error = ENXIO;; goto fail; } /* Allocate the descriptor queues. */ sc->ste_ldata = contigmalloc(sizeof(struct ste_list_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->ste_ldata == NULL) { device_printf(dev, "no memory for list buffers!\n"); error = ENXIO; goto fail; } bzero(sc->ste_ldata, sizeof(struct ste_list_data)); ifp = sc->ste_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } /* Do MII setup. */ if (mii_phy_probe(dev, &sc->ste_miibus, ste_ifmedia_upd, ste_ifmedia_sts)) { device_printf(dev, "MII without any phy!\n"); error = ENXIO; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = ste_ioctl; ifp->if_start = ste_start; ifp->if_watchdog = ste_watchdog; ifp->if_init = ste_init; IFQ_SET_MAXLEN(&ifp->if_snd, STE_TX_LIST_CNT - 1); ifp->if_snd.ifq_drv_maxlen = STE_TX_LIST_CNT - 1; IFQ_SET_READY(&ifp->if_snd); sc->ste_tx_thresh = STE_TXSTART_THRESH; /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* * Tell the upper layer(s) we support long frames. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->ste_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, ste_intr, sc, &sc->ste_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); ether_ifdetach(ifp); goto fail; } fail: if (error) ste_detach(dev); return(error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int ste_detach(dev) device_t dev; { struct ste_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->ste_mtx), ("ste mutex not initialized")); ifp = sc->ste_ifp; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { STE_LOCK(sc); ste_stop(sc); STE_UNLOCK(sc); callout_drain(&sc->ste_stat_callout); ether_ifdetach(ifp); } if (sc->ste_miibus) device_delete_child(dev, sc->ste_miibus); bus_generic_detach(dev); if (sc->ste_intrhand) bus_teardown_intr(dev, sc->ste_irq, sc->ste_intrhand); if (sc->ste_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ste_irq); if (sc->ste_res) bus_release_resource(dev, STE_RES, STE_RID, sc->ste_res); if (ifp) if_free(ifp); if (sc->ste_ldata) { contigfree(sc->ste_ldata, sizeof(struct ste_list_data), M_DEVBUF); } mtx_destroy(&sc->ste_mtx); return(0); } static int ste_newbuf(sc, c, m) struct ste_softc *sc; struct ste_chain_onefrag *c; struct mbuf *m; { struct mbuf *m_new = NULL; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) return(ENOBUFS); MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, ETHER_ALIGN); c->ste_mbuf = m_new; c->ste_ptr->ste_status = 0; c->ste_ptr->ste_frag.ste_addr = vtophys(mtod(m_new, caddr_t)); c->ste_ptr->ste_frag.ste_len = (1536 + ETHER_VLAN_ENCAP_LEN) | STE_FRAG_LAST; return(0); } static int ste_init_rx_list(sc) struct ste_softc *sc; { struct ste_chain_data *cd; struct ste_list_data *ld; int i; cd = &sc->ste_cdata; ld = sc->ste_ldata; for (i = 0; i < STE_RX_LIST_CNT; i++) { cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i]; if (ste_newbuf(sc, &cd->ste_rx_chain[i], NULL) == ENOBUFS) return(ENOBUFS); if (i == (STE_RX_LIST_CNT - 1)) { cd->ste_rx_chain[i].ste_next = &cd->ste_rx_chain[0]; ld->ste_rx_list[i].ste_next = vtophys(&ld->ste_rx_list[0]); } else { cd->ste_rx_chain[i].ste_next = &cd->ste_rx_chain[i + 1]; ld->ste_rx_list[i].ste_next = vtophys(&ld->ste_rx_list[i + 1]); } ld->ste_rx_list[i].ste_status = 0; } cd->ste_rx_head = &cd->ste_rx_chain[0]; return(0); } static void ste_init_tx_list(sc) struct ste_softc *sc; { struct ste_chain_data *cd; struct ste_list_data *ld; int i; cd = &sc->ste_cdata; ld = sc->ste_ldata; for (i = 0; i < STE_TX_LIST_CNT; i++) { cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i]; cd->ste_tx_chain[i].ste_ptr->ste_next = 0; cd->ste_tx_chain[i].ste_ptr->ste_ctl = 0; cd->ste_tx_chain[i].ste_phys = vtophys(&ld->ste_tx_list[i]); if (i == (STE_TX_LIST_CNT - 1)) cd->ste_tx_chain[i].ste_next = &cd->ste_tx_chain[0]; else cd->ste_tx_chain[i].ste_next = &cd->ste_tx_chain[i + 1]; } cd->ste_tx_prod = 0; cd->ste_tx_cons = 0; return; } static void ste_init(xsc) void *xsc; { struct ste_softc *sc; sc = xsc; STE_LOCK(sc); ste_init_locked(sc); STE_UNLOCK(sc); } static void ste_init_locked(sc) struct ste_softc *sc; { int i; struct ifnet *ifp; STE_LOCK_ASSERT(sc); ifp = sc->ste_ifp; ste_stop(sc); /* Init our MAC address */ for (i = 0; i < ETHER_ADDR_LEN; i += 2) { CSR_WRITE_2(sc, STE_PAR0 + i, ((IF_LLADDR(sc->ste_ifp)[i] & 0xff) | IF_LLADDR(sc->ste_ifp)[i + 1] << 8)); } /* Init RX list */ if (ste_init_rx_list(sc) == ENOBUFS) { device_printf(sc->ste_dev, "initialization failed: no memory for RX buffers\n"); ste_stop(sc); return; } /* Set RX polling interval */ CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 64); /* Init TX descriptors */ ste_init_tx_list(sc); /* Set the TX freethresh value */ CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, STE_PACKET_SIZE >> 8); /* Set the TX start threshold for best performance. */ CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh); /* Set the TX reclaim threshold. */ CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (STE_PACKET_SIZE >> 4)); /* Set up the RX filter. */ CSR_WRITE_1(sc, STE_RX_MODE, STE_RXMODE_UNICAST); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC); } else { STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC); } /* Set capture broadcast bit to accept broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST); } else { STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST); } ste_setmulti(sc); /* Load the address of the RX list. */ STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL); ste_wait(sc); CSR_WRITE_4(sc, STE_RX_DMALIST_PTR, vtophys(&sc->ste_ldata->ste_rx_list[0])); STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); /* Set TX polling interval (defer until we TX first packet */ CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0); /* Load address of the TX list */ STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); ste_wait(sc); CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0); STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); ste_wait(sc); sc->ste_tx_prev = NULL; /* Enable receiver and transmitter */ CSR_WRITE_2(sc, STE_MACCTL0, 0); CSR_WRITE_2(sc, STE_MACCTL1, 0); STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE); STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE); /* Enable stats counters. */ STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE); CSR_WRITE_2(sc, STE_ISR, 0xFFFF); #ifdef DEVICE_POLLING /* Disable interrupts if we are polling. */ if (ifp->if_capenable & IFCAP_POLLING) CSR_WRITE_2(sc, STE_IMR, 0); else #endif /* Enable interrupts. */ CSR_WRITE_2(sc, STE_IMR, STE_INTRS); /* Accept VLAN length packets */ CSR_WRITE_2(sc, STE_MAX_FRAMELEN, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); ste_ifmedia_upd_locked(ifp); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->ste_stat_callout, hz, ste_stats_update, sc); return; } static void ste_stop(sc) struct ste_softc *sc; { int i; struct ifnet *ifp; STE_LOCK_ASSERT(sc); ifp = sc->ste_ifp; callout_stop(&sc->ste_stat_callout); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE); CSR_WRITE_2(sc, STE_IMR, 0); STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_DISABLE); STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_DISABLE); STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_DISABLE); STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL); ste_wait(sc); /* * Try really hard to stop the RX engine or under heavy RX * data chip will write into de-allocated memory. */ ste_reset(sc); sc->ste_link = 0; for (i = 0; i < STE_RX_LIST_CNT; i++) { if (sc->ste_cdata.ste_rx_chain[i].ste_mbuf != NULL) { m_freem(sc->ste_cdata.ste_rx_chain[i].ste_mbuf); sc->ste_cdata.ste_rx_chain[i].ste_mbuf = NULL; } } for (i = 0; i < STE_TX_LIST_CNT; i++) { if (sc->ste_cdata.ste_tx_chain[i].ste_mbuf != NULL) { m_freem(sc->ste_cdata.ste_tx_chain[i].ste_mbuf); sc->ste_cdata.ste_tx_chain[i].ste_mbuf = NULL; } } bzero(sc->ste_ldata, sizeof(struct ste_list_data)); return; } static void ste_reset(sc) struct ste_softc *sc; { int i; STE_SETBIT4(sc, STE_ASICCTL, STE_ASICCTL_GLOBAL_RESET|STE_ASICCTL_RX_RESET| STE_ASICCTL_TX_RESET|STE_ASICCTL_DMA_RESET| STE_ASICCTL_FIFO_RESET|STE_ASICCTL_NETWORK_RESET| STE_ASICCTL_AUTOINIT_RESET|STE_ASICCTL_HOST_RESET| STE_ASICCTL_EXTRESET_RESET); DELAY(100000); for (i = 0; i < STE_TIMEOUT; i++) { if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY)) break; } if (i == STE_TIMEOUT) device_printf(sc->ste_dev, "global reset never completed\n"); return; } static int ste_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct ste_softc *sc; struct ifreq *ifr; struct mii_data *mii; int error = 0; sc = ifp->if_softc; ifr = (struct ifreq *)data; switch(command) { case SIOCSIFFLAGS: STE_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->ste_if_flags & IFF_PROMISC)) { STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->ste_if_flags & IFF_PROMISC) { STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC); } if (ifp->if_drv_flags & IFF_DRV_RUNNING && (ifp->if_flags ^ sc->ste_if_flags) & IFF_ALLMULTI) ste_setmulti(sc); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { sc->ste_tx_thresh = STE_TXSTART_THRESH; ste_init_locked(sc); } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) ste_stop(sc); } sc->ste_if_flags = ifp->if_flags; STE_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: STE_LOCK(sc); ste_setmulti(sc); STE_UNLOCK(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->ste_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: #ifdef DEVICE_POLLING if (ifr->ifr_reqcap & IFCAP_POLLING && !(ifp->if_capenable & IFCAP_POLLING)) { error = ether_poll_register(ste_poll, ifp); if (error) return(error); STE_LOCK(sc); /* Disable interrupts */ CSR_WRITE_2(sc, STE_IMR, 0); ifp->if_capenable |= IFCAP_POLLING; STE_UNLOCK(sc); return (error); } if (!(ifr->ifr_reqcap & IFCAP_POLLING) && ifp->if_capenable & IFCAP_POLLING) { error = ether_poll_deregister(ifp); /* Enable interrupts. */ STE_LOCK(sc); CSR_WRITE_2(sc, STE_IMR, STE_INTRS); ifp->if_capenable &= ~IFCAP_POLLING; STE_UNLOCK(sc); return (error); } #endif /* DEVICE_POLLING */ break; default: error = ether_ioctl(ifp, command, data); break; } return(error); } static int ste_encap(sc, c, m_head) struct ste_softc *sc; struct ste_chain *c; struct mbuf *m_head; { int frag = 0; struct ste_frag *f = NULL; struct mbuf *m; struct ste_desc *d; d = c->ste_ptr; d->ste_ctl = 0; encap_retry: for (m = m_head, frag = 0; m != NULL; m = m->m_next) { if (m->m_len != 0) { if (frag == STE_MAXFRAGS) break; f = &d->ste_frags[frag]; f->ste_addr = vtophys(mtod(m, vm_offset_t)); f->ste_len = m->m_len; frag++; } } if (m != NULL) { struct mbuf *mn; /* * We ran out of segments. We have to recopy this * mbuf chain first. Bail out if we can't get the * new buffers. */ mn = m_defrag(m_head, M_DONTWAIT); if (mn == NULL) { m_freem(m_head); return ENOMEM; } m_head = mn; goto encap_retry; } c->ste_mbuf = m_head; d->ste_frags[frag - 1].ste_len |= STE_FRAG_LAST; d->ste_ctl = 1; return(0); } static void ste_start(ifp) struct ifnet *ifp; { struct ste_softc *sc; sc = ifp->if_softc; STE_LOCK(sc); ste_start_locked(ifp); STE_UNLOCK(sc); } static void ste_start_locked(ifp) struct ifnet *ifp; { struct ste_softc *sc; struct mbuf *m_head = NULL; struct ste_chain *cur_tx; int idx; sc = ifp->if_softc; STE_LOCK_ASSERT(sc); if (!sc->ste_link) return; if (ifp->if_drv_flags & IFF_DRV_OACTIVE) return; idx = sc->ste_cdata.ste_tx_prod; while(sc->ste_cdata.ste_tx_chain[idx].ste_mbuf == NULL) { /* * We cannot re-use the last (free) descriptor; * the chip may not have read its ste_next yet. */ if (STE_NEXT(idx, STE_TX_LIST_CNT) == sc->ste_cdata.ste_tx_cons) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; cur_tx = &sc->ste_cdata.ste_tx_chain[idx]; if (ste_encap(sc, cur_tx, m_head) != 0) break; cur_tx->ste_ptr->ste_next = 0; if (sc->ste_tx_prev == NULL) { cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1; /* Load address of the TX list */ STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); ste_wait(sc); CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, vtophys(&sc->ste_ldata->ste_tx_list[0])); /* Set TX polling interval to start TX engine */ CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64); STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); ste_wait(sc); }else{ cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1; sc->ste_tx_prev->ste_ptr->ste_next = cur_tx->ste_phys; } sc->ste_tx_prev = cur_tx; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, cur_tx->ste_mbuf); STE_INC(idx, STE_TX_LIST_CNT); ifp->if_timer = 5; } sc->ste_cdata.ste_tx_prod = idx; return; } static void ste_watchdog(ifp) struct ifnet *ifp; { struct ste_softc *sc; sc = ifp->if_softc; STE_LOCK(sc); ifp->if_oerrors++; if_printf(ifp, "watchdog timeout\n"); ste_txeoc(sc); ste_txeof(sc); ste_rxeoc(sc); ste_rxeof(sc); ste_reset(sc); ste_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) ste_start_locked(ifp); STE_UNLOCK(sc); return; } static int ste_shutdown(dev) device_t dev; { struct ste_softc *sc; sc = device_get_softc(dev); STE_LOCK(sc); ste_stop(sc); STE_UNLOCK(sc); return (0); } Index: head/sys/dev/stge/if_stge.c =================================================================== --- head/sys/dev/stge/if_stge.c (revision 193095) +++ head/sys/dev/stge/if_stge.c (revision 193096) @@ -1,2737 +1,2743 @@ /* $NetBSD: if_stge.c,v 1.32 2005/12/11 12:22:49 christos Exp $ */ /*- * Copyright (c) 2001 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Jason R. Thorpe. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the NetBSD * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Device driver for the Sundance Tech. TC9021 10/100/1000 * Ethernet controller. */ #include __FBSDID("$FreeBSD$"); #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define STGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) MODULE_DEPEND(stge, pci, 1, 1, 1); MODULE_DEPEND(stge, ether, 1, 1, 1); MODULE_DEPEND(stge, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Devices supported by this driver. */ static struct stge_product { uint16_t stge_vendorid; uint16_t stge_deviceid; const char *stge_name; } stge_products[] = { { VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST1023, "Sundance ST-1023 Gigabit Ethernet" }, { VENDOR_SUNDANCETI, DEVICEID_SUNDANCETI_ST2021, "Sundance ST-2021 Gigabit Ethernet" }, { VENDOR_TAMARACK, DEVICEID_TAMARACK_TC9021, "Tamarack TC9021 Gigabit Ethernet" }, { VENDOR_TAMARACK, DEVICEID_TAMARACK_TC9021_ALT, "Tamarack TC9021 Gigabit Ethernet" }, /* * The Sundance sample boards use the Sundance vendor ID, * but the Tamarack product ID. */ { VENDOR_SUNDANCETI, DEVICEID_TAMARACK_TC9021, "Sundance TC9021 Gigabit Ethernet" }, { VENDOR_SUNDANCETI, DEVICEID_TAMARACK_TC9021_ALT, "Sundance TC9021 Gigabit Ethernet" }, { VENDOR_DLINK, DEVICEID_DLINK_DL4000, "D-Link DL-4000 Gigabit Ethernet" }, { VENDOR_ANTARES, DEVICEID_ANTARES_TC9021, "Antares Gigabit Ethernet" } }; static int stge_probe(device_t); static int stge_attach(device_t); static int stge_detach(device_t); static int stge_shutdown(device_t); static int stge_suspend(device_t); static int stge_resume(device_t); static int stge_encap(struct stge_softc *, struct mbuf **); static void stge_start(struct ifnet *); static void stge_start_locked(struct ifnet *); static void stge_watchdog(struct stge_softc *); static int stge_ioctl(struct ifnet *, u_long, caddr_t); static void stge_init(void *); static void stge_init_locked(struct stge_softc *); static void stge_vlan_setup(struct stge_softc *); static void stge_stop(struct stge_softc *); static void stge_start_tx(struct stge_softc *); static void stge_start_rx(struct stge_softc *); static void stge_stop_tx(struct stge_softc *); static void stge_stop_rx(struct stge_softc *); static void stge_reset(struct stge_softc *, uint32_t); static int stge_eeprom_wait(struct stge_softc *); static void stge_read_eeprom(struct stge_softc *, int, uint16_t *); static void stge_tick(void *); static void stge_stats_update(struct stge_softc *); static void stge_set_filter(struct stge_softc *); static void stge_set_multi(struct stge_softc *); static void stge_link_task(void *, int); static void stge_intr(void *); static __inline int stge_tx_error(struct stge_softc *); static void stge_txeof(struct stge_softc *); -static void stge_rxeof(struct stge_softc *); +static int stge_rxeof(struct stge_softc *); static __inline void stge_discard_rxbuf(struct stge_softc *, int); static int stge_newbuf(struct stge_softc *, int); #ifndef __NO_STRICT_ALIGNMENT static __inline struct mbuf *stge_fixup_rx(struct stge_softc *, struct mbuf *); #endif static void stge_mii_sync(struct stge_softc *); static void stge_mii_send(struct stge_softc *, uint32_t, int); static int stge_mii_readreg(struct stge_softc *, struct stge_mii_frame *); static int stge_mii_writereg(struct stge_softc *, struct stge_mii_frame *); static int stge_miibus_readreg(device_t, int, int); static int stge_miibus_writereg(device_t, int, int, int); static void stge_miibus_statchg(device_t); static int stge_mediachange(struct ifnet *); static void stge_mediastatus(struct ifnet *, struct ifmediareq *); static void stge_dmamap_cb(void *, bus_dma_segment_t *, int, int); static int stge_dma_alloc(struct stge_softc *); static void stge_dma_free(struct stge_softc *); static void stge_dma_wait(struct stge_softc *); static void stge_init_tx_ring(struct stge_softc *); static int stge_init_rx_ring(struct stge_softc *); #ifdef DEVICE_POLLING -static void stge_poll(struct ifnet *, enum poll_cmd, int); +static int stge_poll(struct ifnet *, enum poll_cmd, int); #endif static void stge_setwol(struct stge_softc *); static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int, int); static int sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS); static int sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS); static device_method_t stge_methods[] = { /* Device interface */ DEVMETHOD(device_probe, stge_probe), DEVMETHOD(device_attach, stge_attach), DEVMETHOD(device_detach, stge_detach), DEVMETHOD(device_shutdown, stge_shutdown), DEVMETHOD(device_suspend, stge_suspend), DEVMETHOD(device_resume, stge_resume), /* MII interface */ DEVMETHOD(miibus_readreg, stge_miibus_readreg), DEVMETHOD(miibus_writereg, stge_miibus_writereg), DEVMETHOD(miibus_statchg, stge_miibus_statchg), { 0, 0 } }; static driver_t stge_driver = { "stge", stge_methods, sizeof(struct stge_softc) }; static devclass_t stge_devclass; DRIVER_MODULE(stge, pci, stge_driver, stge_devclass, 0, 0); DRIVER_MODULE(miibus, stge, miibus_driver, miibus_devclass, 0, 0); static struct resource_spec stge_res_spec_io[] = { { SYS_RES_IOPORT, PCIR_BAR(0), RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0, 0 } }; static struct resource_spec stge_res_spec_mem[] = { { SYS_RES_MEMORY, PCIR_BAR(1), RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0, 0 } }; #define MII_SET(x) \ CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) | (x)) #define MII_CLR(x) \ CSR_WRITE_1(sc, STGE_PhyCtrl, CSR_READ_1(sc, STGE_PhyCtrl) & ~(x)) /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void stge_mii_sync(struct stge_softc *sc) { int i; MII_SET(PC_MgmtDir | PC_MgmtData); for (i = 0; i < 32; i++) { MII_SET(PC_MgmtClk); DELAY(1); MII_CLR(PC_MgmtClk); DELAY(1); } } /* * Clock a series of bits through the MII. */ static void stge_mii_send(struct stge_softc *sc, uint32_t bits, int cnt) { int i; MII_CLR(PC_MgmtClk); for (i = (0x1 << (cnt - 1)); i; i >>= 1) { if (bits & i) MII_SET(PC_MgmtData); else MII_CLR(PC_MgmtData); DELAY(1); MII_CLR(PC_MgmtClk); DELAY(1); MII_SET(PC_MgmtClk); } } /* * Read an PHY register through the MII. */ static int stge_mii_readreg(struct stge_softc *sc, struct stge_mii_frame *frame) { int i, ack; /* * Set up frame for RX. */ frame->mii_stdelim = STGE_MII_STARTDELIM; frame->mii_opcode = STGE_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; CSR_WRITE_1(sc, STGE_PhyCtrl, 0 | sc->sc_PhyCtrl); /* * Turn on data xmit. */ MII_SET(PC_MgmtDir); stge_mii_sync(sc); /* * Send command/address info. */ stge_mii_send(sc, frame->mii_stdelim, 2); stge_mii_send(sc, frame->mii_opcode, 2); stge_mii_send(sc, frame->mii_phyaddr, 5); stge_mii_send(sc, frame->mii_regaddr, 5); /* Turn off xmit. */ MII_CLR(PC_MgmtDir); /* Idle bit */ MII_CLR((PC_MgmtClk | PC_MgmtData)); DELAY(1); MII_SET(PC_MgmtClk); DELAY(1); /* Check for ack */ MII_CLR(PC_MgmtClk); DELAY(1); ack = CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData; MII_SET(PC_MgmtClk); DELAY(1); /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ if (ack) { for(i = 0; i < 16; i++) { MII_CLR(PC_MgmtClk); DELAY(1); MII_SET(PC_MgmtClk); DELAY(1); } goto fail; } for (i = 0x8000; i; i >>= 1) { MII_CLR(PC_MgmtClk); DELAY(1); if (!ack) { if (CSR_READ_1(sc, STGE_PhyCtrl) & PC_MgmtData) frame->mii_data |= i; DELAY(1); } MII_SET(PC_MgmtClk); DELAY(1); } fail: MII_CLR(PC_MgmtClk); DELAY(1); MII_SET(PC_MgmtClk); DELAY(1); if (ack) return(1); return(0); } /* * Write to a PHY register through the MII. */ static int stge_mii_writereg(struct stge_softc *sc, struct stge_mii_frame *frame) { /* * Set up frame for TX. */ frame->mii_stdelim = STGE_MII_STARTDELIM; frame->mii_opcode = STGE_MII_WRITEOP; frame->mii_turnaround = STGE_MII_TURNAROUND; /* * Turn on data output. */ MII_SET(PC_MgmtDir); stge_mii_sync(sc); stge_mii_send(sc, frame->mii_stdelim, 2); stge_mii_send(sc, frame->mii_opcode, 2); stge_mii_send(sc, frame->mii_phyaddr, 5); stge_mii_send(sc, frame->mii_regaddr, 5); stge_mii_send(sc, frame->mii_turnaround, 2); stge_mii_send(sc, frame->mii_data, 16); /* Idle bit. */ MII_SET(PC_MgmtClk); DELAY(1); MII_CLR(PC_MgmtClk); DELAY(1); /* * Turn off xmit. */ MII_CLR(PC_MgmtDir); return(0); } /* * sc_miibus_readreg: [mii interface function] * * Read a PHY register on the MII of the TC9021. */ static int stge_miibus_readreg(device_t dev, int phy, int reg) { struct stge_softc *sc; struct stge_mii_frame frame; int error; sc = device_get_softc(dev); if (reg == STGE_PhyCtrl) { /* XXX allow ip1000phy read STGE_PhyCtrl register. */ STGE_MII_LOCK(sc); error = CSR_READ_1(sc, STGE_PhyCtrl); STGE_MII_UNLOCK(sc); return (error); } bzero(&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; STGE_MII_LOCK(sc); error = stge_mii_readreg(sc, &frame); STGE_MII_UNLOCK(sc); if (error != 0) { /* Don't show errors for PHY probe request */ if (reg != 1) device_printf(sc->sc_dev, "phy read fail\n"); return (0); } return (frame.mii_data); } /* * stge_miibus_writereg: [mii interface function] * * Write a PHY register on the MII of the TC9021. */ static int stge_miibus_writereg(device_t dev, int phy, int reg, int val) { struct stge_softc *sc; struct stge_mii_frame frame; int error; sc = device_get_softc(dev); bzero(&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = val; STGE_MII_LOCK(sc); error = stge_mii_writereg(sc, &frame); STGE_MII_UNLOCK(sc); if (error != 0) device_printf(sc->sc_dev, "phy write fail\n"); return (0); } /* * stge_miibus_statchg: [mii interface function] * * Callback from MII layer when media changes. */ static void stge_miibus_statchg(device_t dev) { struct stge_softc *sc; sc = device_get_softc(dev); taskqueue_enqueue(taskqueue_swi, &sc->sc_link_task); } /* * stge_mediastatus: [ifmedia interface function] * * Get the current interface media status. */ static void stge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) { struct stge_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->sc_miibus); mii_pollstat(mii); ifmr->ifm_status = mii->mii_media_status; ifmr->ifm_active = mii->mii_media_active; } /* * stge_mediachange: [ifmedia interface function] * * Set hardware to newly-selected media. */ static int stge_mediachange(struct ifnet *ifp) { struct stge_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->sc_miibus); mii_mediachg(mii); return (0); } static int stge_eeprom_wait(struct stge_softc *sc) { int i; for (i = 0; i < STGE_TIMEOUT; i++) { DELAY(1000); if ((CSR_READ_2(sc, STGE_EepromCtrl) & EC_EepromBusy) == 0) return (0); } return (1); } /* * stge_read_eeprom: * * Read data from the serial EEPROM. */ static void stge_read_eeprom(struct stge_softc *sc, int offset, uint16_t *data) { if (stge_eeprom_wait(sc)) device_printf(sc->sc_dev, "EEPROM failed to come ready\n"); CSR_WRITE_2(sc, STGE_EepromCtrl, EC_EepromAddress(offset) | EC_EepromOpcode(EC_OP_RR)); if (stge_eeprom_wait(sc)) device_printf(sc->sc_dev, "EEPROM read timed out\n"); *data = CSR_READ_2(sc, STGE_EepromData); } static int stge_probe(device_t dev) { struct stge_product *sp; int i; uint16_t vendor, devid; vendor = pci_get_vendor(dev); devid = pci_get_device(dev); sp = stge_products; for (i = 0; i < sizeof(stge_products)/sizeof(stge_products[0]); i++, sp++) { if (vendor == sp->stge_vendorid && devid == sp->stge_deviceid) { device_set_desc(dev, sp->stge_name); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static int stge_attach(device_t dev) { struct stge_softc *sc; struct ifnet *ifp; uint8_t enaddr[ETHER_ADDR_LEN]; int error, i; uint16_t cmd; uint32_t val; error = 0; sc = device_get_softc(dev); sc->sc_dev = dev; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); mtx_init(&sc->sc_mii_mtx, "stge_mii_mutex", NULL, MTX_DEF); callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); TASK_INIT(&sc->sc_link_task, 0, stge_link_task, sc); /* * Map the device. */ pci_enable_busmaster(dev); cmd = pci_read_config(dev, PCIR_COMMAND, 2); val = pci_read_config(dev, PCIR_BAR(1), 4); if ((val & 0x01) != 0) sc->sc_spec = stge_res_spec_mem; else { val = pci_read_config(dev, PCIR_BAR(0), 4); if ((val & 0x01) == 0) { device_printf(sc->sc_dev, "couldn't locate IO BAR\n"); error = ENXIO; goto fail; } sc->sc_spec = stge_res_spec_io; } error = bus_alloc_resources(dev, sc->sc_spec, sc->sc_res); if (error != 0) { device_printf(dev, "couldn't allocate %s resources\n", sc->sc_spec == stge_res_spec_mem ? "memory" : "I/O"); goto fail; } sc->sc_rev = pci_get_revid(dev); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rxint_nframe", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_nframe, 0, sysctl_hw_stge_rxint_nframe, "I", "stge rx interrupt nframe"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rxint_dmawait", CTLTYPE_INT|CTLFLAG_RW, &sc->sc_rxint_dmawait, 0, sysctl_hw_stge_rxint_dmawait, "I", "stge rx interrupt dmawait"); /* Pull in device tunables. */ sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT; error = resource_int_value(device_get_name(dev), device_get_unit(dev), "rxint_nframe", &sc->sc_rxint_nframe); if (error == 0) { if (sc->sc_rxint_nframe < STGE_RXINT_NFRAME_MIN || sc->sc_rxint_nframe > STGE_RXINT_NFRAME_MAX) { device_printf(dev, "rxint_nframe value out of range; " "using default: %d\n", STGE_RXINT_NFRAME_DEFAULT); sc->sc_rxint_nframe = STGE_RXINT_NFRAME_DEFAULT; } } sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT; error = resource_int_value(device_get_name(dev), device_get_unit(dev), "rxint_dmawait", &sc->sc_rxint_dmawait); if (error == 0) { if (sc->sc_rxint_dmawait < STGE_RXINT_DMAWAIT_MIN || sc->sc_rxint_dmawait > STGE_RXINT_DMAWAIT_MAX) { device_printf(dev, "rxint_dmawait value out of range; " "using default: %d\n", STGE_RXINT_DMAWAIT_DEFAULT); sc->sc_rxint_dmawait = STGE_RXINT_DMAWAIT_DEFAULT; } } if ((error = stge_dma_alloc(sc) != 0)) goto fail; /* * Determine if we're copper or fiber. It affects how we * reset the card. */ if (CSR_READ_4(sc, STGE_AsicCtrl) & AC_PhyMedia) sc->sc_usefiber = 1; else sc->sc_usefiber = 0; /* Load LED configuration from EEPROM. */ stge_read_eeprom(sc, STGE_EEPROM_LEDMode, &sc->sc_led); /* * Reset the chip to a known state. */ STGE_LOCK(sc); stge_reset(sc, STGE_RESET_FULL); STGE_UNLOCK(sc); /* * Reading the station address from the EEPROM doesn't seem * to work, at least on my sample boards. Instead, since * the reset sequence does AutoInit, read it from the station * address registers. For Sundance 1023 you can only read it * from EEPROM. */ if (pci_get_device(dev) != DEVICEID_SUNDANCETI_ST1023) { uint16_t v; v = CSR_READ_2(sc, STGE_StationAddress0); enaddr[0] = v & 0xff; enaddr[1] = v >> 8; v = CSR_READ_2(sc, STGE_StationAddress1); enaddr[2] = v & 0xff; enaddr[3] = v >> 8; v = CSR_READ_2(sc, STGE_StationAddress2); enaddr[4] = v & 0xff; enaddr[5] = v >> 8; sc->sc_stge1023 = 0; } else { uint16_t myaddr[ETHER_ADDR_LEN / 2]; for (i = 0; i sc_stge1023 = 1; } ifp = sc->sc_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(sc->sc_dev, "failed to if_alloc()\n"); error = ENXIO; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = stge_ioctl; ifp->if_start = stge_start; ifp->if_timer = 0; ifp->if_watchdog = NULL; ifp->if_init = stge_init; ifp->if_mtu = ETHERMTU; ifp->if_snd.ifq_drv_maxlen = STGE_TX_RING_CNT - 1; IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); IFQ_SET_READY(&ifp->if_snd); /* Revision B3 and earlier chips have checksum bug. */ if (sc->sc_rev >= 0x0c) { ifp->if_hwassist = STGE_CSUM_FEATURES; ifp->if_capabilities = IFCAP_HWCSUM; } else { ifp->if_hwassist = 0; ifp->if_capabilities = 0; } ifp->if_capabilities |= IFCAP_WOL_MAGIC; ifp->if_capenable = ifp->if_capabilities; /* * Read some important bits from the PhyCtrl register. */ sc->sc_PhyCtrl = CSR_READ_1(sc, STGE_PhyCtrl) & (PC_PhyDuplexPolarity | PC_PhyLnkPolarity); /* Set up MII bus. */ if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, stge_mediachange, stge_mediastatus)) != 0) { device_printf(sc->sc_dev, "no PHY found!\n"); goto fail; } ether_ifattach(ifp, enaddr); /* VLAN capability setup */ ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; if (sc->sc_rev >= 0x0c) ifp->if_capabilities |= IFCAP_VLAN_HWCSUM; ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* * Tell the upper layer(s) we support long frames. * Must appear after the call to ether_ifattach() because * ether_ifattach() sets ifi_hdrlen to the default value. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); /* * The manual recommends disabling early transmit, so we * do. It's disabled anyway, if using IP checksumming, * since the entire packet must be in the FIFO in order * for the chip to perform the checksum. */ sc->sc_txthresh = 0x0fff; /* * Disable MWI if the PCI layer tells us to. */ sc->sc_DMACtrl = 0; if ((cmd & PCIM_CMD_MWRICEN) == 0) sc->sc_DMACtrl |= DMAC_MWIDisable; /* * Hookup IRQ */ error = bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_NET | INTR_MPSAFE, NULL, stge_intr, sc, &sc->sc_ih); if (error != 0) { ether_ifdetach(ifp); device_printf(sc->sc_dev, "couldn't set up IRQ\n"); sc->sc_ifp = NULL; goto fail; } fail: if (error != 0) stge_detach(dev); return (error); } static int stge_detach(device_t dev) { struct stge_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->sc_ifp; #ifdef DEVICE_POLLING if (ifp && ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif if (device_is_attached(dev)) { STGE_LOCK(sc); /* XXX */ sc->sc_detach = 1; stge_stop(sc); STGE_UNLOCK(sc); callout_drain(&sc->sc_tick_ch); taskqueue_drain(taskqueue_swi, &sc->sc_link_task); ether_ifdetach(ifp); } if (sc->sc_miibus != NULL) { device_delete_child(dev, sc->sc_miibus); sc->sc_miibus = NULL; } bus_generic_detach(dev); stge_dma_free(sc); if (ifp != NULL) { if_free(ifp); sc->sc_ifp = NULL; } if (sc->sc_ih) { bus_teardown_intr(dev, sc->sc_res[1], sc->sc_ih); sc->sc_ih = NULL; } bus_release_resources(dev, sc->sc_spec, sc->sc_res); mtx_destroy(&sc->sc_mii_mtx); mtx_destroy(&sc->sc_mtx); return (0); } struct stge_dmamap_arg { bus_addr_t stge_busaddr; }; static void stge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct stge_dmamap_arg *ctx; if (error != 0) return; ctx = (struct stge_dmamap_arg *)arg; ctx->stge_busaddr = segs[0].ds_addr; } static int stge_dma_alloc(struct stge_softc *sc) { struct stge_dmamap_arg ctx; struct stge_txdesc *txd; struct stge_rxdesc *rxd; int error, i; /* create parent tag. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),/* parent */ 1, 0, /* algnmnt, boundary */ STGE_DMA_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sc_cdata.stge_parent_tag); if (error != 0) { device_printf(sc->sc_dev, "failed to create parent DMA tag\n"); goto fail; } /* create tag for Tx ring. */ error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */ STGE_RING_ALIGN, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ STGE_TX_RING_SZ, /* maxsize */ 1, /* nsegments */ STGE_TX_RING_SZ, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sc_cdata.stge_tx_ring_tag); if (error != 0) { device_printf(sc->sc_dev, "failed to allocate Tx ring DMA tag\n"); goto fail; } /* create tag for Rx ring. */ error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */ STGE_RING_ALIGN, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ STGE_RX_RING_SZ, /* maxsize */ 1, /* nsegments */ STGE_RX_RING_SZ, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sc_cdata.stge_rx_ring_tag); if (error != 0) { device_printf(sc->sc_dev, "failed to allocate Rx ring DMA tag\n"); goto fail; } /* create tag for Tx buffers. */ error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES * STGE_MAXTXSEGS, /* maxsize */ STGE_MAXTXSEGS, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sc_cdata.stge_tx_tag); if (error != 0) { device_printf(sc->sc_dev, "failed to allocate Tx DMA tag\n"); goto fail; } /* create tag for Rx buffers. */ error = bus_dma_tag_create(sc->sc_cdata.stge_parent_tag,/* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, /* maxsize */ 1, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sc_cdata.stge_rx_tag); if (error != 0) { device_printf(sc->sc_dev, "failed to allocate Rx DMA tag\n"); goto fail; } /* allocate DMA'able memory and load the DMA map for Tx ring. */ error = bus_dmamem_alloc(sc->sc_cdata.stge_tx_ring_tag, (void **)&sc->sc_rdata.stge_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->sc_cdata.stge_tx_ring_map); if (error != 0) { device_printf(sc->sc_dev, "failed to allocate DMA'able memory for Tx ring\n"); goto fail; } ctx.stge_busaddr = 0; error = bus_dmamap_load(sc->sc_cdata.stge_tx_ring_tag, sc->sc_cdata.stge_tx_ring_map, sc->sc_rdata.stge_tx_ring, STGE_TX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); if (error != 0 || ctx.stge_busaddr == 0) { device_printf(sc->sc_dev, "failed to load DMA'able memory for Tx ring\n"); goto fail; } sc->sc_rdata.stge_tx_ring_paddr = ctx.stge_busaddr; /* allocate DMA'able memory and load the DMA map for Rx ring. */ error = bus_dmamem_alloc(sc->sc_cdata.stge_rx_ring_tag, (void **)&sc->sc_rdata.stge_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->sc_cdata.stge_rx_ring_map); if (error != 0) { device_printf(sc->sc_dev, "failed to allocate DMA'able memory for Rx ring\n"); goto fail; } ctx.stge_busaddr = 0; error = bus_dmamap_load(sc->sc_cdata.stge_rx_ring_tag, sc->sc_cdata.stge_rx_ring_map, sc->sc_rdata.stge_rx_ring, STGE_RX_RING_SZ, stge_dmamap_cb, &ctx, BUS_DMA_NOWAIT); if (error != 0 || ctx.stge_busaddr == 0) { device_printf(sc->sc_dev, "failed to load DMA'able memory for Rx ring\n"); goto fail; } sc->sc_rdata.stge_rx_ring_paddr = ctx.stge_busaddr; /* create DMA maps for Tx buffers. */ for (i = 0; i < STGE_TX_RING_CNT; i++) { txd = &sc->sc_cdata.stge_txdesc[i]; txd->tx_m = NULL; txd->tx_dmamap = 0; error = bus_dmamap_create(sc->sc_cdata.stge_tx_tag, 0, &txd->tx_dmamap); if (error != 0) { device_printf(sc->sc_dev, "failed to create Tx dmamap\n"); goto fail; } } /* create DMA maps for Rx buffers. */ if ((error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0, &sc->sc_cdata.stge_rx_sparemap)) != 0) { device_printf(sc->sc_dev, "failed to create spare Rx dmamap\n"); goto fail; } for (i = 0; i < STGE_RX_RING_CNT; i++) { rxd = &sc->sc_cdata.stge_rxdesc[i]; rxd->rx_m = NULL; rxd->rx_dmamap = 0; error = bus_dmamap_create(sc->sc_cdata.stge_rx_tag, 0, &rxd->rx_dmamap); if (error != 0) { device_printf(sc->sc_dev, "failed to create Rx dmamap\n"); goto fail; } } fail: return (error); } static void stge_dma_free(struct stge_softc *sc) { struct stge_txdesc *txd; struct stge_rxdesc *rxd; int i; /* Tx ring */ if (sc->sc_cdata.stge_tx_ring_tag) { if (sc->sc_cdata.stge_tx_ring_map) bus_dmamap_unload(sc->sc_cdata.stge_tx_ring_tag, sc->sc_cdata.stge_tx_ring_map); if (sc->sc_cdata.stge_tx_ring_map && sc->sc_rdata.stge_tx_ring) bus_dmamem_free(sc->sc_cdata.stge_tx_ring_tag, sc->sc_rdata.stge_tx_ring, sc->sc_cdata.stge_tx_ring_map); sc->sc_rdata.stge_tx_ring = NULL; sc->sc_cdata.stge_tx_ring_map = 0; bus_dma_tag_destroy(sc->sc_cdata.stge_tx_ring_tag); sc->sc_cdata.stge_tx_ring_tag = NULL; } /* Rx ring */ if (sc->sc_cdata.stge_rx_ring_tag) { if (sc->sc_cdata.stge_rx_ring_map) bus_dmamap_unload(sc->sc_cdata.stge_rx_ring_tag, sc->sc_cdata.stge_rx_ring_map); if (sc->sc_cdata.stge_rx_ring_map && sc->sc_rdata.stge_rx_ring) bus_dmamem_free(sc->sc_cdata.stge_rx_ring_tag, sc->sc_rdata.stge_rx_ring, sc->sc_cdata.stge_rx_ring_map); sc->sc_rdata.stge_rx_ring = NULL; sc->sc_cdata.stge_rx_ring_map = 0; bus_dma_tag_destroy(sc->sc_cdata.stge_rx_ring_tag); sc->sc_cdata.stge_rx_ring_tag = NULL; } /* Tx buffers */ if (sc->sc_cdata.stge_tx_tag) { for (i = 0; i < STGE_TX_RING_CNT; i++) { txd = &sc->sc_cdata.stge_txdesc[i]; if (txd->tx_dmamap) { bus_dmamap_destroy(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap); txd->tx_dmamap = 0; } } bus_dma_tag_destroy(sc->sc_cdata.stge_tx_tag); sc->sc_cdata.stge_tx_tag = NULL; } /* Rx buffers */ if (sc->sc_cdata.stge_rx_tag) { for (i = 0; i < STGE_RX_RING_CNT; i++) { rxd = &sc->sc_cdata.stge_rxdesc[i]; if (rxd->rx_dmamap) { bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap); rxd->rx_dmamap = 0; } } if (sc->sc_cdata.stge_rx_sparemap) { bus_dmamap_destroy(sc->sc_cdata.stge_rx_tag, sc->sc_cdata.stge_rx_sparemap); sc->sc_cdata.stge_rx_sparemap = 0; } bus_dma_tag_destroy(sc->sc_cdata.stge_rx_tag); sc->sc_cdata.stge_rx_tag = NULL; } if (sc->sc_cdata.stge_parent_tag) { bus_dma_tag_destroy(sc->sc_cdata.stge_parent_tag); sc->sc_cdata.stge_parent_tag = NULL; } } /* * stge_shutdown: * * Make sure the interface is stopped at reboot time. */ static int stge_shutdown(device_t dev) { return (stge_suspend(dev)); } static void stge_setwol(struct stge_softc *sc) { struct ifnet *ifp; uint8_t v; STGE_LOCK_ASSERT(sc); ifp = sc->sc_ifp; v = CSR_READ_1(sc, STGE_WakeEvent); /* Disable all WOL bits. */ v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable | WE_WakeOnLanEnable); if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) v |= WE_MagicPktEnable | WE_WakeOnLanEnable; CSR_WRITE_1(sc, STGE_WakeEvent, v); /* Reset Tx and prevent transmission. */ CSR_WRITE_4(sc, STGE_AsicCtrl, CSR_READ_4(sc, STGE_AsicCtrl) | AC_TxReset); /* * TC9021 automatically reset link speed to 100Mbps when it's put * into sleep so there is no need to try to resetting link speed. */ } static int stge_suspend(device_t dev) { struct stge_softc *sc; sc = device_get_softc(dev); STGE_LOCK(sc); stge_stop(sc); sc->sc_suspended = 1; stge_setwol(sc); STGE_UNLOCK(sc); return (0); } static int stge_resume(device_t dev) { struct stge_softc *sc; struct ifnet *ifp; uint8_t v; sc = device_get_softc(dev); STGE_LOCK(sc); /* * Clear WOL bits, so special frames wouldn't interfere * normal Rx operation anymore. */ v = CSR_READ_1(sc, STGE_WakeEvent); v &= ~(WE_WakePktEnable | WE_MagicPktEnable | WE_LinkEventEnable | WE_WakeOnLanEnable); CSR_WRITE_1(sc, STGE_WakeEvent, v); ifp = sc->sc_ifp; if (ifp->if_flags & IFF_UP) stge_init_locked(sc); sc->sc_suspended = 0; STGE_UNLOCK(sc); return (0); } static void stge_dma_wait(struct stge_softc *sc) { int i; for (i = 0; i < STGE_TIMEOUT; i++) { DELAY(2); if ((CSR_READ_4(sc, STGE_DMACtrl) & DMAC_TxDMAInProg) == 0) break; } if (i == STGE_TIMEOUT) device_printf(sc->sc_dev, "DMA wait timed out\n"); } static int stge_encap(struct stge_softc *sc, struct mbuf **m_head) { struct stge_txdesc *txd; struct stge_tfd *tfd; struct mbuf *m; bus_dma_segment_t txsegs[STGE_MAXTXSEGS]; int error, i, nsegs, si; uint64_t csum_flags, tfc; STGE_LOCK_ASSERT(sc); if ((txd = STAILQ_FIRST(&sc->sc_cdata.stge_txfreeq)) == NULL) return (ENOBUFS); error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); if (error == EFBIG) { m = m_collapse(*m_head, M_DONTWAIT, STGE_MAXTXSEGS); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOMEM); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap, *m_head, txsegs, &nsegs, 0); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (error); } } else if (error != 0) return (error); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } m = *m_head; csum_flags = 0; if ((m->m_pkthdr.csum_flags & STGE_CSUM_FEATURES) != 0) { if (m->m_pkthdr.csum_flags & CSUM_IP) csum_flags |= TFD_IPChecksumEnable; if (m->m_pkthdr.csum_flags & CSUM_TCP) csum_flags |= TFD_TCPChecksumEnable; else if (m->m_pkthdr.csum_flags & CSUM_UDP) csum_flags |= TFD_UDPChecksumEnable; } si = sc->sc_cdata.stge_tx_prod; tfd = &sc->sc_rdata.stge_tx_ring[si]; for (i = 0; i < nsegs; i++) tfd->tfd_frags[i].frag_word0 = htole64(FRAG_ADDR(txsegs[i].ds_addr) | FRAG_LEN(txsegs[i].ds_len)); sc->sc_cdata.stge_tx_cnt++; tfc = TFD_FrameId(si) | TFD_WordAlign(TFD_WordAlign_disable) | TFD_FragCount(nsegs) | csum_flags; if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) tfc |= TFD_TxDMAIndicate; /* Update producer index. */ sc->sc_cdata.stge_tx_prod = (si + 1) % STGE_TX_RING_CNT; /* Check if we have a VLAN tag to insert. */ if (m->m_flags & M_VLANTAG) tfc |= (TFD_VLANTagInsert | TFD_VID(m->m_pkthdr.ether_vtag)); tfd->tfd_control = htole64(tfc); /* Update Tx Queue. */ STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txfreeq, tx_q); STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txbusyq, txd, tx_q); txd->tx_m = m; /* Sync descriptors. */ bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag, sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } /* * stge_start: [ifnet interface function] * * Start packet transmission on the interface. */ static void stge_start(struct ifnet *ifp) { struct stge_softc *sc; sc = ifp->if_softc; STGE_LOCK(sc); stge_start_locked(ifp); STGE_UNLOCK(sc); } static void stge_start_locked(struct ifnet *ifp) { struct stge_softc *sc; struct mbuf *m_head; int enq; sc = ifp->if_softc; STGE_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || sc->sc_link == 0) return; for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd); ) { if (sc->sc_cdata.stge_tx_cnt >= STGE_TX_HIWAT) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (stge_encap(sc, &m_head)) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } enq++; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ ETHER_BPF_MTAP(ifp, m_head); } if (enq > 0) { /* Transmit */ CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_TxDMAPollNow); /* Set a timeout in case the chip goes out to lunch. */ sc->sc_watchdog_timer = 5; } } /* * stge_watchdog: * * Watchdog timer handler. */ static void stge_watchdog(struct stge_softc *sc) { struct ifnet *ifp; STGE_LOCK_ASSERT(sc); if (sc->sc_watchdog_timer == 0 || --sc->sc_watchdog_timer) return; ifp = sc->sc_ifp; if_printf(sc->sc_ifp, "device timeout\n"); ifp->if_oerrors++; stge_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) stge_start_locked(ifp); } /* * stge_ioctl: [ifnet interface function] * * Handle control requests from the operator. */ static int stge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct stge_softc *sc; struct ifreq *ifr; struct mii_data *mii; int error, mask; sc = ifp->if_softc; ifr = (struct ifreq *)data; error = 0; switch (cmd) { case SIOCSIFMTU: if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > STGE_JUMBO_MTU) error = EINVAL; else if (ifp->if_mtu != ifr->ifr_mtu) { ifp->if_mtu = ifr->ifr_mtu; STGE_LOCK(sc); stge_init_locked(sc); STGE_UNLOCK(sc); } break; case SIOCSIFFLAGS: STGE_LOCK(sc); if ((ifp->if_flags & IFF_UP) != 0) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if (((ifp->if_flags ^ sc->sc_if_flags) & IFF_PROMISC) != 0) stge_set_filter(sc); } else { if (sc->sc_detach == 0) stge_init_locked(sc); } } else { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) stge_stop(sc); } sc->sc_if_flags = ifp->if_flags; STGE_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: STGE_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) stge_set_multi(sc); STGE_UNLOCK(sc); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: mii = device_get_softc(sc->sc_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if ((mask & IFCAP_POLLING) != 0) { if ((ifr->ifr_reqcap & IFCAP_POLLING) != 0) { error = ether_poll_register(stge_poll, ifp); if (error != 0) break; STGE_LOCK(sc); CSR_WRITE_2(sc, STGE_IntEnable, 0); ifp->if_capenable |= IFCAP_POLLING; STGE_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); if (error != 0) break; STGE_LOCK(sc); CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable); ifp->if_capenable &= ~IFCAP_POLLING; STGE_UNLOCK(sc); } } #endif if ((mask & IFCAP_HWCSUM) != 0) { ifp->if_capenable ^= IFCAP_HWCSUM; if ((IFCAP_HWCSUM & ifp->if_capenable) != 0 && (IFCAP_HWCSUM & ifp->if_capabilities) != 0) ifp->if_hwassist = STGE_CSUM_FEATURES; else ifp->if_hwassist = 0; } if ((mask & IFCAP_WOL) != 0 && (ifp->if_capabilities & IFCAP_WOL) != 0) { if ((mask & IFCAP_WOL_MAGIC) != 0) ifp->if_capenable ^= IFCAP_WOL_MAGIC; } if ((mask & IFCAP_VLAN_HWTAGGING) != 0) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { STGE_LOCK(sc); stge_vlan_setup(sc); STGE_UNLOCK(sc); } } VLAN_CAPABILITIES(ifp); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } static void stge_link_task(void *arg, int pending) { struct stge_softc *sc; struct mii_data *mii; uint32_t v, ac; int i; sc = (struct stge_softc *)arg; STGE_LOCK(sc); mii = device_get_softc(sc->sc_miibus); if (mii->mii_media_status & IFM_ACTIVE) { if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) sc->sc_link = 1; } else sc->sc_link = 0; sc->sc_MACCtrl = 0; if (((mii->mii_media_active & IFM_GMASK) & IFM_FDX) != 0) sc->sc_MACCtrl |= MC_DuplexSelect; if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG0) != 0) sc->sc_MACCtrl |= MC_RxFlowControlEnable; if (((mii->mii_media_active & IFM_GMASK) & IFM_FLAG1) != 0) sc->sc_MACCtrl |= MC_TxFlowControlEnable; /* * Update STGE_MACCtrl register depending on link status. * (duplex, flow control etc) */ v = ac = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; v &= ~(MC_DuplexSelect|MC_RxFlowControlEnable|MC_TxFlowControlEnable); v |= sc->sc_MACCtrl; CSR_WRITE_4(sc, STGE_MACCtrl, v); if (((ac ^ sc->sc_MACCtrl) & MC_DuplexSelect) != 0) { /* Duplex setting changed, reset Tx/Rx functions. */ ac = CSR_READ_4(sc, STGE_AsicCtrl); ac |= AC_TxReset | AC_RxReset; CSR_WRITE_4(sc, STGE_AsicCtrl, ac); for (i = 0; i < STGE_TIMEOUT; i++) { DELAY(100); if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0) break; } if (i == STGE_TIMEOUT) device_printf(sc->sc_dev, "reset failed to complete\n"); } STGE_UNLOCK(sc); } static __inline int stge_tx_error(struct stge_softc *sc) { uint32_t txstat; int error; for (error = 0;;) { txstat = CSR_READ_4(sc, STGE_TxStatus); if ((txstat & TS_TxComplete) == 0) break; /* Tx underrun */ if ((txstat & TS_TxUnderrun) != 0) { /* * XXX * There should be a more better way to recover * from Tx underrun instead of a full reset. */ if (sc->sc_nerr++ < STGE_MAXERR) device_printf(sc->sc_dev, "Tx underrun, " "resetting...\n"); if (sc->sc_nerr == STGE_MAXERR) device_printf(sc->sc_dev, "too many errors; " "not reporting any more\n"); error = -1; break; } /* Maximum/Late collisions, Re-enable Tx MAC. */ if ((txstat & (TS_MaxCollisions|TS_LateCollision)) != 0) CSR_WRITE_4(sc, STGE_MACCtrl, (CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK) | MC_TxEnable); } return (error); } /* * stge_intr: * * Interrupt service routine. */ static void stge_intr(void *arg) { struct stge_softc *sc; struct ifnet *ifp; int reinit; uint16_t status; sc = (struct stge_softc *)arg; ifp = sc->sc_ifp; STGE_LOCK(sc); #ifdef DEVICE_POLLING if ((ifp->if_capenable & IFCAP_POLLING) != 0) goto done_locked; #endif status = CSR_READ_2(sc, STGE_IntStatus); if (sc->sc_suspended || (status & IS_InterruptStatus) == 0) goto done_locked; /* Disable interrupts. */ for (reinit = 0;;) { status = CSR_READ_2(sc, STGE_IntStatusAck); status &= sc->sc_IntEnable; if (status == 0) break; /* Host interface errors. */ if ((status & IS_HostError) != 0) { device_printf(sc->sc_dev, "Host interface error, resetting...\n"); reinit = 1; goto force_init; } /* Receive interrupts. */ if ((status & IS_RxDMAComplete) != 0) { stge_rxeof(sc); if ((status & IS_RFDListEnd) != 0) CSR_WRITE_4(sc, STGE_DMACtrl, DMAC_RxDMAPollNow); } /* Transmit interrupts. */ if ((status & (IS_TxDMAComplete | IS_TxComplete)) != 0) stge_txeof(sc); /* Transmission errors.*/ if ((status & IS_TxComplete) != 0) { if ((reinit = stge_tx_error(sc)) != 0) break; } } force_init: if (reinit != 0) stge_init_locked(sc); /* Re-enable interrupts. */ CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable); /* Try to get more packets going. */ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) stge_start_locked(ifp); done_locked: STGE_UNLOCK(sc); } /* * stge_txeof: * * Helper; handle transmit interrupts. */ static void stge_txeof(struct stge_softc *sc) { struct ifnet *ifp; struct stge_txdesc *txd; uint64_t control; int cons; STGE_LOCK_ASSERT(sc); ifp = sc->sc_ifp; txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq); if (txd == NULL) return; bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag, sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_POSTREAD); /* * Go through our Tx list and free mbufs for those * frames which have been transmitted. */ for (cons = sc->sc_cdata.stge_tx_cons;; cons = (cons + 1) % STGE_TX_RING_CNT) { if (sc->sc_cdata.stge_tx_cnt <= 0) break; control = le64toh(sc->sc_rdata.stge_tx_ring[cons].tfd_control); if ((control & TFD_TFDDone) == 0) break; sc->sc_cdata.stge_tx_cnt--; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap); /* Output counter is updated with statistics register */ m_freem(txd->tx_m); txd->tx_m = NULL; STAILQ_REMOVE_HEAD(&sc->sc_cdata.stge_txbusyq, tx_q); STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q); txd = STAILQ_FIRST(&sc->sc_cdata.stge_txbusyq); } sc->sc_cdata.stge_tx_cons = cons; if (sc->sc_cdata.stge_tx_cnt == 0) sc->sc_watchdog_timer = 0; bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag, sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static __inline void stge_discard_rxbuf(struct stge_softc *sc, int idx) { struct stge_rfd *rfd; rfd = &sc->sc_rdata.stge_rx_ring[idx]; rfd->rfd_status = 0; } #ifndef __NO_STRICT_ALIGNMENT /* * It seems that TC9021's DMA engine has alignment restrictions in * DMA scatter operations. The first DMA segment has no address * alignment restrictins but the rest should be aligned on 4(?) bytes * boundary. Otherwise it would corrupt random memory. Since we don't * know which one is used for the first segment in advance we simply * don't align at all. * To avoid copying over an entire frame to align, we allocate a new * mbuf and copy ethernet header to the new mbuf. The new mbuf is * prepended into the existing mbuf chain. */ static __inline struct mbuf * stge_fixup_rx(struct stge_softc *sc, struct mbuf *m) { struct mbuf *n; n = NULL; if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); m->m_data += ETHER_HDR_LEN; n = m; } else { MGETHDR(n, M_DONTWAIT, MT_DATA); if (n != NULL) { bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); m->m_data += ETHER_HDR_LEN; m->m_len -= ETHER_HDR_LEN; n->m_len = ETHER_HDR_LEN; M_MOVE_PKTHDR(n, m); n->m_next = m; } else m_freem(m); } return (n); } #endif /* * stge_rxeof: * * Helper; handle receive interrupts. */ -static void +static int stge_rxeof(struct stge_softc *sc) { struct ifnet *ifp; struct stge_rxdesc *rxd; struct mbuf *mp, *m; uint64_t status64; uint32_t status; - int cons, prog; + int cons, prog, rx_npkts; STGE_LOCK_ASSERT(sc); + rx_npkts = 0; ifp = sc->sc_ifp; bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag, sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_POSTREAD); prog = 0; for (cons = sc->sc_cdata.stge_rx_cons; prog < STGE_RX_RING_CNT; prog++, cons = (cons + 1) % STGE_RX_RING_CNT) { status64 = le64toh(sc->sc_rdata.stge_rx_ring[cons].rfd_status); status = RFD_RxStatus(status64); if ((status & RFD_RFDDone) == 0) break; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { if (sc->sc_cdata.stge_rxcycles <= 0) break; sc->sc_cdata.stge_rxcycles--; } #endif prog++; rxd = &sc->sc_cdata.stge_rxdesc[cons]; mp = rxd->rx_m; /* * If the packet had an error, drop it. Note we count * the error later in the periodic stats update. */ if ((status & RFD_FrameEnd) != 0 && (status & (RFD_RxFIFOOverrun | RFD_RxRuntFrame | RFD_RxAlignmentError | RFD_RxFCSError | RFD_RxLengthError)) != 0) { stge_discard_rxbuf(sc, cons); if (sc->sc_cdata.stge_rxhead != NULL) { m_freem(sc->sc_cdata.stge_rxhead); STGE_RXCHAIN_RESET(sc); } continue; } /* * Add a new receive buffer to the ring. */ if (stge_newbuf(sc, cons) != 0) { ifp->if_iqdrops++; stge_discard_rxbuf(sc, cons); if (sc->sc_cdata.stge_rxhead != NULL) { m_freem(sc->sc_cdata.stge_rxhead); STGE_RXCHAIN_RESET(sc); } continue; } if ((status & RFD_FrameEnd) != 0) mp->m_len = RFD_RxDMAFrameLen(status) - sc->sc_cdata.stge_rxlen; sc->sc_cdata.stge_rxlen += mp->m_len; /* Chain mbufs. */ if (sc->sc_cdata.stge_rxhead == NULL) { sc->sc_cdata.stge_rxhead = mp; sc->sc_cdata.stge_rxtail = mp; } else { mp->m_flags &= ~M_PKTHDR; sc->sc_cdata.stge_rxtail->m_next = mp; sc->sc_cdata.stge_rxtail = mp; } if ((status & RFD_FrameEnd) != 0) { m = sc->sc_cdata.stge_rxhead; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = sc->sc_cdata.stge_rxlen; if (m->m_pkthdr.len > sc->sc_if_framesize) { m_freem(m); STGE_RXCHAIN_RESET(sc); continue; } /* * Set the incoming checksum information for * the packet. */ if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { if ((status & RFD_IPDetected) != 0) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if ((status & RFD_IPError) == 0) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; } if (((status & RFD_TCPDetected) != 0 && (status & RFD_TCPError) == 0) || ((status & RFD_UDPDetected) != 0 && (status & RFD_UDPError) == 0)) { m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); m->m_pkthdr.csum_data = 0xffff; } } #ifndef __NO_STRICT_ALIGNMENT if (sc->sc_if_framesize > (MCLBYTES - ETHER_ALIGN)) { if ((m = stge_fixup_rx(sc, m)) == NULL) { STGE_RXCHAIN_RESET(sc); continue; } } #endif /* Check for VLAN tagged packets. */ if ((status & RFD_VLANDetected) != 0 && (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) { m->m_pkthdr.ether_vtag = RFD_TCI(status64); m->m_flags |= M_VLANTAG; } STGE_UNLOCK(sc); /* Pass it on. */ (*ifp->if_input)(ifp, m); STGE_LOCK(sc); + rx_npkts++; STGE_RXCHAIN_RESET(sc); } } if (prog > 0) { /* Update the consumer index. */ sc->sc_cdata.stge_rx_cons = cons; bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag, sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } + return (rx_npkts); } #ifdef DEVICE_POLLING -static void +static int stge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct stge_softc *sc; uint16_t status; + int rx_npkts; + rx_npkts = 0; sc = ifp->if_softc; STGE_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { STGE_UNLOCK(sc); - return; + return (rx_npkts); } sc->sc_cdata.stge_rxcycles = count; - stge_rxeof(sc); + rx_npkts = stge_rxeof(sc); stge_txeof(sc); if (cmd == POLL_AND_CHECK_STATUS) { status = CSR_READ_2(sc, STGE_IntStatus); status &= sc->sc_IntEnable; if (status != 0) { if ((status & IS_HostError) != 0) { device_printf(sc->sc_dev, "Host interface error, resetting...\n"); stge_init_locked(sc); } if ((status & IS_TxComplete) != 0) { if (stge_tx_error(sc) != 0) stge_init_locked(sc); } } } if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) stge_start_locked(ifp); STGE_UNLOCK(sc); + return (rx_npkts); } #endif /* DEVICE_POLLING */ /* * stge_tick: * * One second timer, used to tick the MII. */ static void stge_tick(void *arg) { struct stge_softc *sc; struct mii_data *mii; sc = (struct stge_softc *)arg; STGE_LOCK_ASSERT(sc); mii = device_get_softc(sc->sc_miibus); mii_tick(mii); /* Update statistics counters. */ stge_stats_update(sc); /* * Relcaim any pending Tx descriptors to release mbufs in a * timely manner as we don't generate Tx completion interrupts * for every frame. This limits the delay to a maximum of one * second. */ if (sc->sc_cdata.stge_tx_cnt != 0) stge_txeof(sc); stge_watchdog(sc); callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc); } /* * stge_stats_update: * * Read the TC9021 statistics counters. */ static void stge_stats_update(struct stge_softc *sc) { struct ifnet *ifp; STGE_LOCK_ASSERT(sc); ifp = sc->sc_ifp; CSR_READ_4(sc,STGE_OctetRcvOk); ifp->if_ipackets += CSR_READ_4(sc, STGE_FramesRcvdOk); ifp->if_ierrors += CSR_READ_2(sc, STGE_FramesLostRxErrors); CSR_READ_4(sc, STGE_OctetXmtdOk); ifp->if_opackets += CSR_READ_4(sc, STGE_FramesXmtdOk); ifp->if_collisions += CSR_READ_4(sc, STGE_LateCollisions) + CSR_READ_4(sc, STGE_MultiColFrames) + CSR_READ_4(sc, STGE_SingleColFrames); ifp->if_oerrors += CSR_READ_2(sc, STGE_FramesAbortXSColls) + CSR_READ_2(sc, STGE_FramesWEXDeferal); } /* * stge_reset: * * Perform a soft reset on the TC9021. */ static void stge_reset(struct stge_softc *sc, uint32_t how) { uint32_t ac; uint8_t v; int i, dv; STGE_LOCK_ASSERT(sc); dv = 5000; ac = CSR_READ_4(sc, STGE_AsicCtrl); switch (how) { case STGE_RESET_TX: ac |= AC_TxReset | AC_FIFO; dv = 100; break; case STGE_RESET_RX: ac |= AC_RxReset | AC_FIFO; dv = 100; break; case STGE_RESET_FULL: default: /* * Only assert RstOut if we're fiber. We need GMII clocks * to be present in order for the reset to complete on fiber * cards. */ ac |= AC_GlobalReset | AC_RxReset | AC_TxReset | AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit | (sc->sc_usefiber ? AC_RstOut : 0); break; } CSR_WRITE_4(sc, STGE_AsicCtrl, ac); /* Account for reset problem at 10Mbps. */ DELAY(dv); for (i = 0; i < STGE_TIMEOUT; i++) { if ((CSR_READ_4(sc, STGE_AsicCtrl) & AC_ResetBusy) == 0) break; DELAY(dv); } if (i == STGE_TIMEOUT) device_printf(sc->sc_dev, "reset failed to complete\n"); /* Set LED, from Linux IPG driver. */ ac = CSR_READ_4(sc, STGE_AsicCtrl); ac &= ~(AC_LEDMode | AC_LEDSpeed | AC_LEDModeBit1); if ((sc->sc_led & 0x01) != 0) ac |= AC_LEDMode; if ((sc->sc_led & 0x03) != 0) ac |= AC_LEDModeBit1; if ((sc->sc_led & 0x08) != 0) ac |= AC_LEDSpeed; CSR_WRITE_4(sc, STGE_AsicCtrl, ac); /* Set PHY, from Linux IPG driver */ v = CSR_READ_1(sc, STGE_PhySet); v &= ~(PS_MemLenb9b | PS_MemLen | PS_NonCompdet); v |= ((sc->sc_led & 0x70) >> 4); CSR_WRITE_1(sc, STGE_PhySet, v); } /* * stge_init: [ ifnet interface function ] * * Initialize the interface. */ static void stge_init(void *xsc) { struct stge_softc *sc; sc = (struct stge_softc *)xsc; STGE_LOCK(sc); stge_init_locked(sc); STGE_UNLOCK(sc); } static void stge_init_locked(struct stge_softc *sc) { struct ifnet *ifp; struct mii_data *mii; uint16_t eaddr[3]; uint32_t v; int error; STGE_LOCK_ASSERT(sc); ifp = sc->sc_ifp; mii = device_get_softc(sc->sc_miibus); /* * Cancel any pending I/O. */ stge_stop(sc); /* * Reset the chip to a known state. */ stge_reset(sc, STGE_RESET_FULL); /* Init descriptors. */ error = stge_init_rx_ring(sc); if (error != 0) { device_printf(sc->sc_dev, "initialization failed: no memory for rx buffers\n"); stge_stop(sc); goto out; } stge_init_tx_ring(sc); /* Set the station address. */ bcopy(IF_LLADDR(ifp), eaddr, ETHER_ADDR_LEN); CSR_WRITE_2(sc, STGE_StationAddress0, htole16(eaddr[0])); CSR_WRITE_2(sc, STGE_StationAddress1, htole16(eaddr[1])); CSR_WRITE_2(sc, STGE_StationAddress2, htole16(eaddr[2])); /* * Set the statistics masks. Disable all the RMON stats, * and disable selected stats in the non-RMON stats registers. */ CSR_WRITE_4(sc, STGE_RMONStatisticsMask, 0xffffffff); CSR_WRITE_4(sc, STGE_StatisticsMask, (1U << 1) | (1U << 2) | (1U << 3) | (1U << 4) | (1U << 5) | (1U << 6) | (1U << 7) | (1U << 8) | (1U << 9) | (1U << 10) | (1U << 13) | (1U << 14) | (1U << 15) | (1U << 19) | (1U << 20) | (1U << 21)); /* Set up the receive filter. */ stge_set_filter(sc); /* Program multicast filter. */ stge_set_multi(sc); /* * Give the transmit and receive ring to the chip. */ CSR_WRITE_4(sc, STGE_TFDListPtrHi, STGE_ADDR_HI(STGE_TX_RING_ADDR(sc, 0))); CSR_WRITE_4(sc, STGE_TFDListPtrLo, STGE_ADDR_LO(STGE_TX_RING_ADDR(sc, 0))); CSR_WRITE_4(sc, STGE_RFDListPtrHi, STGE_ADDR_HI(STGE_RX_RING_ADDR(sc, 0))); CSR_WRITE_4(sc, STGE_RFDListPtrLo, STGE_ADDR_LO(STGE_RX_RING_ADDR(sc, 0))); /* * Initialize the Tx auto-poll period. It's OK to make this number * large (255 is the max, but we use 127) -- we explicitly kick the * transmit engine when there's actually a packet. */ CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127); /* ..and the Rx auto-poll period. */ CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1); /* Initialize the Tx start threshold. */ CSR_WRITE_2(sc, STGE_TxStartThresh, sc->sc_txthresh); /* Rx DMA thresholds, from Linux */ CSR_WRITE_1(sc, STGE_RxDMABurstThresh, 0x30); CSR_WRITE_1(sc, STGE_RxDMAUrgentThresh, 0x30); /* Rx early threhold, from Linux */ CSR_WRITE_2(sc, STGE_RxEarlyThresh, 0x7ff); /* Tx DMA thresholds, from Linux */ CSR_WRITE_1(sc, STGE_TxDMABurstThresh, 0x30); CSR_WRITE_1(sc, STGE_TxDMAUrgentThresh, 0x04); /* * Initialize the Rx DMA interrupt control register. We * request an interrupt after every incoming packet, but * defer it for sc_rxint_dmawait us. When the number of * interrupts pending reaches STGE_RXINT_NFRAME, we stop * deferring the interrupt, and signal it immediately. */ CSR_WRITE_4(sc, STGE_RxDMAIntCtrl, RDIC_RxFrameCount(sc->sc_rxint_nframe) | RDIC_RxDMAWaitTime(STGE_RXINT_USECS2TICK(sc->sc_rxint_dmawait))); /* * Initialize the interrupt mask. */ sc->sc_IntEnable = IS_HostError | IS_TxComplete | IS_TxDMAComplete | IS_RxDMAComplete | IS_RFDListEnd; #ifdef DEVICE_POLLING /* Disable interrupts if we are polling. */ if ((ifp->if_capenable & IFCAP_POLLING) != 0) CSR_WRITE_2(sc, STGE_IntEnable, 0); else #endif CSR_WRITE_2(sc, STGE_IntEnable, sc->sc_IntEnable); /* * Configure the DMA engine. * XXX Should auto-tune TxBurstLimit. */ CSR_WRITE_4(sc, STGE_DMACtrl, sc->sc_DMACtrl | DMAC_TxBurstLimit(3)); /* * Send a PAUSE frame when we reach 29,696 bytes in the Rx * FIFO, and send an un-PAUSE frame when we reach 3056 bytes * in the Rx FIFO. */ CSR_WRITE_2(sc, STGE_FlowOnTresh, 29696 / 16); CSR_WRITE_2(sc, STGE_FlowOffThresh, 3056 / 16); /* * Set the maximum frame size. */ sc->sc_if_framesize = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; CSR_WRITE_2(sc, STGE_MaxFrameSize, sc->sc_if_framesize); /* * Initialize MacCtrl -- do it before setting the media, * as setting the media will actually program the register. * * Note: We have to poke the IFS value before poking * anything else. */ /* Tx/Rx MAC should be disabled before programming IFS.*/ CSR_WRITE_4(sc, STGE_MACCtrl, MC_IFSSelect(MC_IFS96bit)); stge_vlan_setup(sc); if (sc->sc_rev >= 6) { /* >= B.2 */ /* Multi-frag frame bug work-around. */ CSR_WRITE_2(sc, STGE_DebugCtrl, CSR_READ_2(sc, STGE_DebugCtrl) | 0x0200); /* Tx Poll Now bug work-around. */ CSR_WRITE_2(sc, STGE_DebugCtrl, CSR_READ_2(sc, STGE_DebugCtrl) | 0x0010); /* Tx Poll Now bug work-around. */ CSR_WRITE_2(sc, STGE_DebugCtrl, CSR_READ_2(sc, STGE_DebugCtrl) | 0x0020); } v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; v |= MC_StatisticsEnable | MC_TxEnable | MC_RxEnable; CSR_WRITE_4(sc, STGE_MACCtrl, v); /* * It seems that transmitting frames without checking the state of * Rx/Tx MAC wedge the hardware. */ stge_start_tx(sc); stge_start_rx(sc); sc->sc_link = 0; /* * Set the current media. */ mii_mediachg(mii); /* * Start the one second MII clock. */ callout_reset(&sc->sc_tick_ch, hz, stge_tick, sc); /* * ...all done! */ ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; out: if (error != 0) device_printf(sc->sc_dev, "interface not running\n"); } static void stge_vlan_setup(struct stge_softc *sc) { struct ifnet *ifp; uint32_t v; ifp = sc->sc_ifp; /* * The NIC always copy a VLAN tag regardless of STGE_MACCtrl * MC_AutoVLANuntagging bit. * MC_AutoVLANtagging bit selects which VLAN source to use * between STGE_VLANTag and TFC. However TFC TFD_VLANTagInsert * bit has priority over MC_AutoVLANtagging bit. So we always * use TFC instead of STGE_VLANTag register. */ v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) v |= MC_AutoVLANuntagging; else v &= ~MC_AutoVLANuntagging; CSR_WRITE_4(sc, STGE_MACCtrl, v); } /* * Stop transmission on the interface. */ static void stge_stop(struct stge_softc *sc) { struct ifnet *ifp; struct stge_txdesc *txd; struct stge_rxdesc *rxd; uint32_t v; int i; STGE_LOCK_ASSERT(sc); /* * Stop the one second clock. */ callout_stop(&sc->sc_tick_ch); sc->sc_watchdog_timer = 0; /* * Disable interrupts. */ CSR_WRITE_2(sc, STGE_IntEnable, 0); /* * Stop receiver, transmitter, and stats update. */ stge_stop_rx(sc); stge_stop_tx(sc); v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; v |= MC_StatisticsDisable; CSR_WRITE_4(sc, STGE_MACCtrl, v); /* * Stop the transmit and receive DMA. */ stge_dma_wait(sc); CSR_WRITE_4(sc, STGE_TFDListPtrHi, 0); CSR_WRITE_4(sc, STGE_TFDListPtrLo, 0); CSR_WRITE_4(sc, STGE_RFDListPtrHi, 0); CSR_WRITE_4(sc, STGE_RFDListPtrLo, 0); /* * Free RX and TX mbufs still in the queues. */ for (i = 0; i < STGE_RX_RING_CNT; i++) { rxd = &sc->sc_cdata.stge_rxdesc[i]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap); m_freem(rxd->rx_m); rxd->rx_m = NULL; } } for (i = 0; i < STGE_TX_RING_CNT; i++) { txd = &sc->sc_cdata.stge_txdesc[i]; if (txd->tx_m != NULL) { bus_dmamap_sync(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_cdata.stge_tx_tag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; } } /* * Mark the interface down and cancel the watchdog timer. */ ifp = sc->sc_ifp; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->sc_link = 0; } static void stge_start_tx(struct stge_softc *sc) { uint32_t v; int i; v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; if ((v & MC_TxEnabled) != 0) return; v |= MC_TxEnable; CSR_WRITE_4(sc, STGE_MACCtrl, v); CSR_WRITE_1(sc, STGE_TxDMAPollPeriod, 127); for (i = STGE_TIMEOUT; i > 0; i--) { DELAY(10); v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; if ((v & MC_TxEnabled) != 0) break; } if (i == 0) device_printf(sc->sc_dev, "Starting Tx MAC timed out\n"); } static void stge_start_rx(struct stge_softc *sc) { uint32_t v; int i; v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; if ((v & MC_RxEnabled) != 0) return; v |= MC_RxEnable; CSR_WRITE_4(sc, STGE_MACCtrl, v); CSR_WRITE_1(sc, STGE_RxDMAPollPeriod, 1); for (i = STGE_TIMEOUT; i > 0; i--) { DELAY(10); v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; if ((v & MC_RxEnabled) != 0) break; } if (i == 0) device_printf(sc->sc_dev, "Starting Rx MAC timed out\n"); } static void stge_stop_tx(struct stge_softc *sc) { uint32_t v; int i; v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; if ((v & MC_TxEnabled) == 0) return; v |= MC_TxDisable; CSR_WRITE_4(sc, STGE_MACCtrl, v); for (i = STGE_TIMEOUT; i > 0; i--) { DELAY(10); v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; if ((v & MC_TxEnabled) == 0) break; } if (i == 0) device_printf(sc->sc_dev, "Stopping Tx MAC timed out\n"); } static void stge_stop_rx(struct stge_softc *sc) { uint32_t v; int i; v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; if ((v & MC_RxEnabled) == 0) return; v |= MC_RxDisable; CSR_WRITE_4(sc, STGE_MACCtrl, v); for (i = STGE_TIMEOUT; i > 0; i--) { DELAY(10); v = CSR_READ_4(sc, STGE_MACCtrl) & MC_MASK; if ((v & MC_RxEnabled) == 0) break; } if (i == 0) device_printf(sc->sc_dev, "Stopping Rx MAC timed out\n"); } static void stge_init_tx_ring(struct stge_softc *sc) { struct stge_ring_data *rd; struct stge_txdesc *txd; bus_addr_t addr; int i; STAILQ_INIT(&sc->sc_cdata.stge_txfreeq); STAILQ_INIT(&sc->sc_cdata.stge_txbusyq); sc->sc_cdata.stge_tx_prod = 0; sc->sc_cdata.stge_tx_cons = 0; sc->sc_cdata.stge_tx_cnt = 0; rd = &sc->sc_rdata; bzero(rd->stge_tx_ring, STGE_TX_RING_SZ); for (i = 0; i < STGE_TX_RING_CNT; i++) { if (i == (STGE_TX_RING_CNT - 1)) addr = STGE_TX_RING_ADDR(sc, 0); else addr = STGE_TX_RING_ADDR(sc, i + 1); rd->stge_tx_ring[i].tfd_next = htole64(addr); rd->stge_tx_ring[i].tfd_control = htole64(TFD_TFDDone); txd = &sc->sc_cdata.stge_txdesc[i]; STAILQ_INSERT_TAIL(&sc->sc_cdata.stge_txfreeq, txd, tx_q); } bus_dmamap_sync(sc->sc_cdata.stge_tx_ring_tag, sc->sc_cdata.stge_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static int stge_init_rx_ring(struct stge_softc *sc) { struct stge_ring_data *rd; bus_addr_t addr; int i; sc->sc_cdata.stge_rx_cons = 0; STGE_RXCHAIN_RESET(sc); rd = &sc->sc_rdata; bzero(rd->stge_rx_ring, STGE_RX_RING_SZ); for (i = 0; i < STGE_RX_RING_CNT; i++) { if (stge_newbuf(sc, i) != 0) return (ENOBUFS); if (i == (STGE_RX_RING_CNT - 1)) addr = STGE_RX_RING_ADDR(sc, 0); else addr = STGE_RX_RING_ADDR(sc, i + 1); rd->stge_rx_ring[i].rfd_next = htole64(addr); rd->stge_rx_ring[i].rfd_status = 0; } bus_dmamap_sync(sc->sc_cdata.stge_rx_ring_tag, sc->sc_cdata.stge_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } /* * stge_newbuf: * * Add a receive buffer to the indicated descriptor. */ static int stge_newbuf(struct stge_softc *sc, int idx) { struct stge_rxdesc *rxd; struct stge_rfd *rfd; struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; int nsegs; m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; /* * The hardware requires 4bytes aligned DMA address when JUMBO * frame is used. */ if (sc->sc_if_framesize <= (MCLBYTES - ETHER_ALIGN)) m_adj(m, ETHER_ALIGN); if (bus_dmamap_load_mbuf_sg(sc->sc_cdata.stge_rx_tag, sc->sc_cdata.stge_rx_sparemap, m, segs, &nsegs, 0) != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); rxd = &sc->sc_cdata.stge_rxdesc[idx]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap); } map = rxd->rx_dmamap; rxd->rx_dmamap = sc->sc_cdata.stge_rx_sparemap; sc->sc_cdata.stge_rx_sparemap = map; bus_dmamap_sync(sc->sc_cdata.stge_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); rxd->rx_m = m; rfd = &sc->sc_rdata.stge_rx_ring[idx]; rfd->rfd_frag.frag_word0 = htole64(FRAG_ADDR(segs[0].ds_addr) | FRAG_LEN(segs[0].ds_len)); rfd->rfd_status = 0; return (0); } /* * stge_set_filter: * * Set up the receive filter. */ static void stge_set_filter(struct stge_softc *sc) { struct ifnet *ifp; uint16_t mode; STGE_LOCK_ASSERT(sc); ifp = sc->sc_ifp; mode = CSR_READ_2(sc, STGE_ReceiveMode); mode |= RM_ReceiveUnicast; if ((ifp->if_flags & IFF_BROADCAST) != 0) mode |= RM_ReceiveBroadcast; else mode &= ~RM_ReceiveBroadcast; if ((ifp->if_flags & IFF_PROMISC) != 0) mode |= RM_ReceiveAllFrames; else mode &= ~RM_ReceiveAllFrames; CSR_WRITE_2(sc, STGE_ReceiveMode, mode); } static void stge_set_multi(struct stge_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; uint32_t crc; uint32_t mchash[2]; uint16_t mode; int count; STGE_LOCK_ASSERT(sc); ifp = sc->sc_ifp; mode = CSR_READ_2(sc, STGE_ReceiveMode); if ((ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { if ((ifp->if_flags & IFF_PROMISC) != 0) mode |= RM_ReceiveAllFrames; else if ((ifp->if_flags & IFF_ALLMULTI) != 0) mode |= RM_ReceiveMulticast; CSR_WRITE_2(sc, STGE_ReceiveMode, mode); return; } /* clear existing filters. */ CSR_WRITE_4(sc, STGE_HashTable0, 0); CSR_WRITE_4(sc, STGE_HashTable1, 0); /* * Set up the multicast address filter by passing all multicast * addresses through a CRC generator, and then using the low-order * 6 bits as an index into the 64 bit multicast hash table. The * high order bits select the register, while the rest of the bits * select the bit within the register. */ bzero(mchash, sizeof(mchash)); count = 0; IF_ADDR_LOCK(sc->sc_ifp); TAILQ_FOREACH(ifma, &sc->sc_ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; crc = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN); /* Just want the 6 least significant bits. */ crc &= 0x3f; /* Set the corresponding bit in the hash table. */ mchash[crc >> 5] |= 1 << (crc & 0x1f); count++; } IF_ADDR_UNLOCK(ifp); mode &= ~(RM_ReceiveMulticast | RM_ReceiveAllFrames); if (count > 0) mode |= RM_ReceiveMulticastHash; else mode &= ~RM_ReceiveMulticastHash; CSR_WRITE_4(sc, STGE_HashTable0, mchash[0]); CSR_WRITE_4(sc, STGE_HashTable1, mchash[1]); CSR_WRITE_2(sc, STGE_ReceiveMode, mode); } static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) { int error, value; if (!arg1) return (EINVAL); value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if (error || !req->newptr) return (error); if (value < low || value > high) return (EINVAL); *(int *)arg1 = value; return (0); } static int sysctl_hw_stge_rxint_nframe(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, STGE_RXINT_NFRAME_MIN, STGE_RXINT_NFRAME_MAX)); } static int sysctl_hw_stge_rxint_dmawait(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, STGE_RXINT_DMAWAIT_MIN, STGE_RXINT_DMAWAIT_MAX)); } Index: head/sys/dev/tsec/if_tsec.c =================================================================== --- head/sys/dev/tsec/if_tsec.c (revision 193095) +++ head/sys/dev/tsec/if_tsec.c (revision 193096) @@ -1,1922 +1,1930 @@ /*- * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski * Copyright (C) 2006-2007 Semihalf, Piotr Kruszynski * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Freescale integrated Three-Speed Ethernet Controller (TSEC) driver. */ #include __FBSDID("$FreeBSD$"); #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag, bus_dmamap_t *dmap, bus_size_t dsize, void **vaddr, void *raddr, const char *dname); static void tsec_dma_ctl(struct tsec_softc *sc, int state); static int tsec_encap(struct tsec_softc *sc, struct mbuf *m_head, int fcb_inserted); static void tsec_free_dma(struct tsec_softc *sc); static void tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr); static int tsec_ifmedia_upd(struct ifnet *ifp); static void tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); static int tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp, uint32_t *paddr); static void tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error); static void tsec_intrs_ctl(struct tsec_softc *sc, int state); static void tsec_init(void *xsc); static void tsec_init_locked(struct tsec_softc *sc); static int tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data); static void tsec_reset_mac(struct tsec_softc *sc); static void tsec_setfilter(struct tsec_softc *sc); static void tsec_set_mac_address(struct tsec_softc *sc); static void tsec_start(struct ifnet *ifp); static void tsec_start_locked(struct ifnet *ifp); static void tsec_stop(struct tsec_softc *sc); static void tsec_tick(void *arg); static void tsec_watchdog(struct tsec_softc *sc); static void tsec_add_sysctls(struct tsec_softc *sc); static int tsec_sysctl_ic_time(SYSCTL_HANDLER_ARGS); static int tsec_sysctl_ic_count(SYSCTL_HANDLER_ARGS); static void tsec_set_rxic(struct tsec_softc *sc); static void tsec_set_txic(struct tsec_softc *sc); -static void tsec_receive_intr_locked(struct tsec_softc *sc, int count); +static int tsec_receive_intr_locked(struct tsec_softc *sc, int count); static void tsec_transmit_intr_locked(struct tsec_softc *sc); static void tsec_error_intr_locked(struct tsec_softc *sc, int count); static void tsec_offload_setup(struct tsec_softc *sc); static void tsec_offload_process_frame(struct tsec_softc *sc, struct mbuf *m); static void tsec_setup_multicast(struct tsec_softc *sc); static int tsec_set_mtu(struct tsec_softc *sc, unsigned int mtu); struct tsec_softc *tsec0_sc = NULL; /* XXX ugly hack! */ devclass_t tsec_devclass; DRIVER_MODULE(miibus, tsec, miibus_driver, miibus_devclass, 0, 0); MODULE_DEPEND(tsec, ether, 1, 1, 1); MODULE_DEPEND(tsec, miibus, 1, 1, 1); int tsec_attach(struct tsec_softc *sc) { uint8_t hwaddr[ETHER_ADDR_LEN]; struct ifnet *ifp; bus_dmamap_t *map_ptr; bus_dmamap_t **map_pptr; int error = 0; int i; /* Reset all TSEC counters */ TSEC_TX_RX_COUNTERS_INIT(sc); /* Stop DMA engine if enabled by firmware */ tsec_dma_ctl(sc, 0); /* Reset MAC */ tsec_reset_mac(sc); /* Disable interrupts for now */ tsec_intrs_ctl(sc, 0); /* Configure defaults for interrupts coalescing */ sc->rx_ic_time = 768; sc->rx_ic_count = 16; sc->tx_ic_time = 768; sc->tx_ic_count = 16; tsec_set_rxic(sc); tsec_set_txic(sc); tsec_add_sysctls(sc); /* Allocate a busdma tag and DMA safe memory for TX descriptors. */ error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_tx_dtag, &sc->tsec_tx_dmap, sizeof(*sc->tsec_tx_vaddr) * TSEC_TX_NUM_DESC, (void **)&sc->tsec_tx_vaddr, &sc->tsec_tx_raddr, "TX"); if (error) { tsec_detach(sc); return (ENXIO); } /* Allocate a busdma tag and DMA safe memory for RX descriptors. */ error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_rx_dtag, &sc->tsec_rx_dmap, sizeof(*sc->tsec_rx_vaddr) * TSEC_RX_NUM_DESC, (void **)&sc->tsec_rx_vaddr, &sc->tsec_rx_raddr, "RX"); if (error) { tsec_detach(sc); return (ENXIO); } /* Allocate a busdma tag for TX mbufs. */ error = bus_dma_tag_create(NULL, /* parent */ TSEC_TXBUFFER_ALIGNMENT, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ MCLBYTES * (TSEC_TX_NUM_DESC - 1), /* maxsize */ TSEC_TX_NUM_DESC - 1, /* nsegments */ MCLBYTES, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->tsec_tx_mtag); /* dmat */ if (error) { device_printf(sc->dev, "failed to allocate busdma tag " "(tx mbufs)\n"); tsec_detach(sc); return (ENXIO); } /* Allocate a busdma tag for RX mbufs. */ error = bus_dma_tag_create(NULL, /* parent */ TSEC_RXBUFFER_ALIGNMENT, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ MCLBYTES, /* maxsize */ 1, /* nsegments */ MCLBYTES, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->tsec_rx_mtag); /* dmat */ if (error) { device_printf(sc->dev, "failed to allocate busdma tag " "(rx mbufs)\n"); tsec_detach(sc); return (ENXIO); } /* Create TX busdma maps */ map_ptr = sc->tx_map_data; map_pptr = sc->tx_map_unused_data; for (i = 0; i < TSEC_TX_NUM_DESC; i++) { map_pptr[i] = &map_ptr[i]; error = bus_dmamap_create(sc->tsec_tx_mtag, 0, map_pptr[i]); if (error) { device_printf(sc->dev, "failed to init TX ring\n"); tsec_detach(sc); return (ENXIO); } } /* Create RX busdma maps and zero mbuf handlers */ for (i = 0; i < TSEC_RX_NUM_DESC; i++) { error = bus_dmamap_create(sc->tsec_rx_mtag, 0, &sc->rx_data[i].map); if (error) { device_printf(sc->dev, "failed to init RX ring\n"); tsec_detach(sc); return (ENXIO); } sc->rx_data[i].mbuf = NULL; } /* Create mbufs for RX buffers */ for (i = 0; i < TSEC_RX_NUM_DESC; i++) { error = tsec_new_rxbuf(sc->tsec_rx_mtag, sc->rx_data[i].map, &sc->rx_data[i].mbuf, &sc->rx_data[i].paddr); if (error) { device_printf(sc->dev, "can't load rx DMA map %d, " "error = %d\n", i, error); tsec_detach(sc); return (error); } } /* Create network interface for upper layers */ ifp = sc->tsec_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(sc->dev, "if_alloc() failed\n"); tsec_detach(sc); return (ENOMEM); } ifp->if_softc = sc; if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST; ifp->if_init = tsec_init; ifp->if_start = tsec_start; ifp->if_ioctl = tsec_ioctl; IFQ_SET_MAXLEN(&ifp->if_snd, TSEC_TX_NUM_DESC - 1); ifp->if_snd.ifq_drv_maxlen = TSEC_TX_NUM_DESC - 1; IFQ_SET_READY(&ifp->if_snd); ifp->if_capabilities = IFCAP_VLAN_MTU; if (sc->is_etsec) ifp->if_capabilities |= IFCAP_HWCSUM; ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING /* Advertise that polling is supported */ ifp->if_capabilities |= IFCAP_POLLING; #endif /* Probe PHY(s) */ error = mii_phy_probe(sc->dev, &sc->tsec_miibus, tsec_ifmedia_upd, tsec_ifmedia_sts); if (error) { device_printf(sc->dev, "MII failed to find PHY!\n"); if_free(ifp); sc->tsec_ifp = NULL; tsec_detach(sc); return (error); } sc->tsec_mii = device_get_softc(sc->tsec_miibus); /* Set MAC address */ tsec_get_hwaddr(sc, hwaddr); ether_ifattach(ifp, hwaddr); return (0); } int tsec_detach(struct tsec_softc *sc) { #ifdef DEVICE_POLLING if (sc->tsec_ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(sc->tsec_ifp); #endif /* Stop TSEC controller and free TX queue */ if (sc->sc_rres && sc->tsec_ifp) tsec_shutdown(sc->dev); /* Detach network interface */ if (sc->tsec_ifp) { ether_ifdetach(sc->tsec_ifp); if_free(sc->tsec_ifp); sc->tsec_ifp = NULL; } /* Free DMA resources */ tsec_free_dma(sc); return (0); } void tsec_shutdown(device_t dev) { struct tsec_softc *sc; sc = device_get_softc(dev); TSEC_GLOBAL_LOCK(sc); tsec_stop(sc); TSEC_GLOBAL_UNLOCK(sc); } int tsec_suspend(device_t dev) { /* TODO not implemented! */ return (0); } int tsec_resume(device_t dev) { /* TODO not implemented! */ return (0); } static void tsec_init(void *xsc) { struct tsec_softc *sc = xsc; TSEC_GLOBAL_LOCK(sc); tsec_init_locked(sc); TSEC_GLOBAL_UNLOCK(sc); } static void tsec_init_locked(struct tsec_softc *sc) { struct tsec_desc *tx_desc = sc->tsec_tx_vaddr; struct tsec_desc *rx_desc = sc->tsec_rx_vaddr; struct ifnet *ifp = sc->tsec_ifp; uint32_t timeout, val, i; TSEC_GLOBAL_LOCK_ASSERT(sc); tsec_stop(sc); /* * These steps are according to the MPC8555E PowerQUICCIII RM: * 14.7 Initialization/Application Information */ /* Step 1: soft reset MAC */ tsec_reset_mac(sc); /* Step 2: Initialize MACCFG2 */ TSEC_WRITE(sc, TSEC_REG_MACCFG2, TSEC_MACCFG2_FULLDUPLEX | /* Full Duplex = 1 */ TSEC_MACCFG2_PADCRC | /* PAD/CRC append */ TSEC_MACCFG2_GMII | /* I/F Mode bit */ TSEC_MACCFG2_PRECNT /* Preamble count = 7 */ ); /* Step 3: Initialize ECNTRL * While the documentation states that R100M is ignored if RPM is * not set, it does seem to be needed to get the orange boxes to * work (which have a Marvell 88E1111 PHY). Go figure. */ /* * XXX kludge - use circumstancial evidence to program ECNTRL * correctly. Ideally we need some board information to guide * us here. */ i = TSEC_READ(sc, TSEC_REG_ID2); val = (i & 0xffff) ? (TSEC_ECNTRL_TBIM | TSEC_ECNTRL_SGMIIM) /* Sumatra */ : TSEC_ECNTRL_R100M; /* Orange + CDS */ TSEC_WRITE(sc, TSEC_REG_ECNTRL, TSEC_ECNTRL_STEN | val); /* Step 4: Initialize MAC station address */ tsec_set_mac_address(sc); /* * Step 5: Assign a Physical address to the TBI so as to not conflict * with the external PHY physical address */ TSEC_WRITE(sc, TSEC_REG_TBIPA, 5); /* Step 6: Reset the management interface */ TSEC_WRITE(tsec0_sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_RESETMGMT); /* Step 7: Setup the MII Mgmt clock speed */ TSEC_WRITE(tsec0_sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_CLKDIV28); /* Step 8: Read MII Mgmt indicator register and check for Busy = 0 */ timeout = TSEC_READ_RETRY; while (--timeout && (TSEC_READ(tsec0_sc, TSEC_REG_MIIMIND) & TSEC_MIIMIND_BUSY)) DELAY(TSEC_READ_DELAY); if (timeout == 0) { if_printf(ifp, "tsec_init_locked(): Mgmt busy timeout\n"); return; } /* Step 9: Setup the MII Mgmt */ mii_mediachg(sc->tsec_mii); /* Step 10: Clear IEVENT register */ TSEC_WRITE(sc, TSEC_REG_IEVENT, 0xffffffff); /* Step 11: Enable interrupts */ #ifdef DEVICE_POLLING /* * ...only if polling is not turned on. Disable interrupts explicitly * if polling is enabled. */ if (ifp->if_capenable & IFCAP_POLLING ) tsec_intrs_ctl(sc, 0); else #endif /* DEVICE_POLLING */ tsec_intrs_ctl(sc, 1); /* Step 12: Initialize IADDRn */ TSEC_WRITE(sc, TSEC_REG_IADDR0, 0); TSEC_WRITE(sc, TSEC_REG_IADDR1, 0); TSEC_WRITE(sc, TSEC_REG_IADDR2, 0); TSEC_WRITE(sc, TSEC_REG_IADDR3, 0); TSEC_WRITE(sc, TSEC_REG_IADDR4, 0); TSEC_WRITE(sc, TSEC_REG_IADDR5, 0); TSEC_WRITE(sc, TSEC_REG_IADDR6, 0); TSEC_WRITE(sc, TSEC_REG_IADDR7, 0); /* Step 13: Initialize GADDRn */ TSEC_WRITE(sc, TSEC_REG_GADDR0, 0); TSEC_WRITE(sc, TSEC_REG_GADDR1, 0); TSEC_WRITE(sc, TSEC_REG_GADDR2, 0); TSEC_WRITE(sc, TSEC_REG_GADDR3, 0); TSEC_WRITE(sc, TSEC_REG_GADDR4, 0); TSEC_WRITE(sc, TSEC_REG_GADDR5, 0); TSEC_WRITE(sc, TSEC_REG_GADDR6, 0); TSEC_WRITE(sc, TSEC_REG_GADDR7, 0); /* Step 14: Initialize RCTRL */ TSEC_WRITE(sc, TSEC_REG_RCTRL, 0); /* Step 15: Initialize DMACTRL */ tsec_dma_ctl(sc, 1); /* Step 16: Initialize FIFO_PAUSE_CTRL */ TSEC_WRITE(sc, TSEC_REG_FIFO_PAUSE_CTRL, TSEC_FIFO_PAUSE_CTRL_EN); /* * Step 17: Initialize transmit/receive descriptor rings. * Initialize TBASE and RBASE. */ TSEC_WRITE(sc, TSEC_REG_TBASE, sc->tsec_tx_raddr); TSEC_WRITE(sc, TSEC_REG_RBASE, sc->tsec_rx_raddr); for (i = 0; i < TSEC_TX_NUM_DESC; i++) { tx_desc[i].bufptr = 0; tx_desc[i].length = 0; tx_desc[i].flags = ((i == TSEC_TX_NUM_DESC - 1) ? TSEC_TXBD_W : 0); } bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); for (i = 0; i < TSEC_RX_NUM_DESC; i++) { rx_desc[i].bufptr = sc->rx_data[i].paddr; rx_desc[i].length = 0; rx_desc[i].flags = TSEC_RXBD_E | TSEC_RXBD_I | ((i == TSEC_RX_NUM_DESC - 1) ? TSEC_RXBD_W : 0); } bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Step 18: Initialize the maximum receive buffer length */ TSEC_WRITE(sc, TSEC_REG_MRBLR, MCLBYTES); /* Step 19: Configure ethernet frame sizes */ TSEC_WRITE(sc, TSEC_REG_MINFLR, TSEC_MIN_FRAME_SIZE); tsec_set_mtu(sc, ifp->if_mtu); /* Step 20: Enable Rx and RxBD sdata snooping */ TSEC_WRITE(sc, TSEC_REG_ATTR, TSEC_ATTR_RDSEN | TSEC_ATTR_RBDSEN); TSEC_WRITE(sc, TSEC_REG_ATTRELI, 0); /* Step 21: Reset collision counters in hardware */ TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0); TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0); TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0); TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0); TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0); /* Step 22: Mask all CAM interrupts */ TSEC_WRITE(sc, TSEC_REG_MON_CAM1, 0xffffffff); TSEC_WRITE(sc, TSEC_REG_MON_CAM2, 0xffffffff); /* Step 23: Enable Rx and Tx */ val = TSEC_READ(sc, TSEC_REG_MACCFG1); val |= (TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN); TSEC_WRITE(sc, TSEC_REG_MACCFG1, val); /* Step 24: Reset TSEC counters for Tx and Rx rings */ TSEC_TX_RX_COUNTERS_INIT(sc); /* Step 25: Setup TCP/IP Off-Load engine */ if (sc->is_etsec) tsec_offload_setup(sc); /* Step 26: Setup multicast filters */ tsec_setup_multicast(sc); /* Step 27: Activate network interface */ ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->tsec_if_flags = ifp->if_flags; sc->tsec_watchdog = 0; /* Schedule watchdog timeout */ callout_reset(&sc->tsec_callout, hz, tsec_tick, sc); } static void tsec_set_mac_address(struct tsec_softc *sc) { uint32_t macbuf[2] = { 0, 0 }; char *macbufp, *curmac; int i; TSEC_GLOBAL_LOCK_ASSERT(sc); KASSERT((ETHER_ADDR_LEN <= sizeof(macbuf)), ("tsec_set_mac_address: (%d <= %d", ETHER_ADDR_LEN, sizeof(macbuf))); macbufp = (char *)macbuf; curmac = (char *)IF_LLADDR(sc->tsec_ifp); /* Correct order of MAC address bytes */ for (i = 1; i <= ETHER_ADDR_LEN; i++) macbufp[ETHER_ADDR_LEN-i] = curmac[i-1]; /* Initialize MAC station address MACSTNADDR2 and MACSTNADDR1 */ TSEC_WRITE(sc, TSEC_REG_MACSTNADDR2, macbuf[1]); TSEC_WRITE(sc, TSEC_REG_MACSTNADDR1, macbuf[0]); } /* * DMA control function, if argument state is: * 0 - DMA engine will be disabled * 1 - DMA engine will be enabled */ static void tsec_dma_ctl(struct tsec_softc *sc, int state) { device_t dev; uint32_t dma_flags, timeout; dev = sc->dev; dma_flags = TSEC_READ(sc, TSEC_REG_DMACTRL); switch (state) { case 0: /* Temporarily clear stop graceful stop bits. */ tsec_dma_ctl(sc, 1000); /* Set it again */ dma_flags |= (TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS); break; case 1000: case 1: /* Set write with response (WWR), wait (WOP) and snoop bits */ dma_flags |= (TSEC_DMACTRL_TDSEN | TSEC_DMACTRL_TBDSEN | DMACTRL_WWR | DMACTRL_WOP); /* Clear graceful stop bits */ dma_flags &= ~(TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS); break; default: device_printf(dev, "tsec_dma_ctl(): unknown state value: %d\n", state); } TSEC_WRITE(sc, TSEC_REG_DMACTRL, dma_flags); switch (state) { case 0: /* Wait for DMA stop */ timeout = TSEC_READ_RETRY; while (--timeout && (!(TSEC_READ(sc, TSEC_REG_IEVENT) & (TSEC_IEVENT_GRSC | TSEC_IEVENT_GTSC)))) DELAY(TSEC_READ_DELAY); if (timeout == 0) device_printf(dev, "tsec_dma_ctl(): timeout!\n"); break; case 1: /* Restart transmission function */ TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT); } } /* * Interrupts control function, if argument state is: * 0 - all TSEC interrupts will be masked * 1 - all TSEC interrupts will be unmasked */ static void tsec_intrs_ctl(struct tsec_softc *sc, int state) { device_t dev; dev = sc->dev; switch (state) { case 0: TSEC_WRITE(sc, TSEC_REG_IMASK, 0); break; case 1: TSEC_WRITE(sc, TSEC_REG_IMASK, TSEC_IMASK_BREN | TSEC_IMASK_RXCEN | TSEC_IMASK_BSYEN | TSEC_IMASK_EBERREN | TSEC_IMASK_BTEN | TSEC_IMASK_TXEEN | TSEC_IMASK_TXBEN | TSEC_IMASK_TXFEN | TSEC_IMASK_XFUNEN | TSEC_IMASK_RXFEN); break; default: device_printf(dev, "tsec_intrs_ctl(): unknown state value: %d\n", state); } } static void tsec_reset_mac(struct tsec_softc *sc) { uint32_t maccfg1_flags; /* Set soft reset bit */ maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1); maccfg1_flags |= TSEC_MACCFG1_SOFT_RESET; TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags); /* Clear soft reset bit */ maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1); maccfg1_flags &= ~TSEC_MACCFG1_SOFT_RESET; TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags); } static void tsec_watchdog(struct tsec_softc *sc) { struct ifnet *ifp; TSEC_GLOBAL_LOCK_ASSERT(sc); if (sc->tsec_watchdog == 0 || --sc->tsec_watchdog > 0) return; ifp = sc->tsec_ifp; ifp->if_oerrors++; if_printf(ifp, "watchdog timeout\n"); tsec_stop(sc); tsec_init_locked(sc); } static void tsec_start(struct ifnet *ifp) { struct tsec_softc *sc = ifp->if_softc; TSEC_TRANSMIT_LOCK(sc); tsec_start_locked(ifp); TSEC_TRANSMIT_UNLOCK(sc); } static void tsec_start_locked(struct ifnet *ifp) { struct tsec_softc *sc; struct mbuf *m0, *mtmp; struct tsec_tx_fcb *tx_fcb; unsigned int queued = 0; int csum_flags, fcb_inserted = 0; sc = ifp->if_softc; TSEC_TRANSMIT_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; if (sc->tsec_link == 0) return; bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (;;) { /* Get packet from the queue */ IF_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) break; /* Insert TCP/IP Off-load frame control block */ csum_flags = m0->m_pkthdr.csum_flags; if (csum_flags) { M_PREPEND(m0, sizeof(struct tsec_tx_fcb), M_DONTWAIT); if (m0 == NULL) break; tx_fcb = mtod(m0, struct tsec_tx_fcb *); tx_fcb->flags = 0; tx_fcb->l3_offset = ETHER_HDR_LEN; tx_fcb->l4_offset = sizeof(struct ip); if (csum_flags & CSUM_IP) tx_fcb->flags |= TSEC_TX_FCB_IP4 | TSEC_TX_FCB_CSUM_IP; if (csum_flags & CSUM_TCP) tx_fcb->flags |= TSEC_TX_FCB_TCP | TSEC_TX_FCB_CSUM_TCP_UDP; if (csum_flags & CSUM_UDP) tx_fcb->flags |= TSEC_TX_FCB_UDP | TSEC_TX_FCB_CSUM_TCP_UDP; fcb_inserted = 1; } mtmp = m_defrag(m0, M_DONTWAIT); if (mtmp) m0 = mtmp; if (tsec_encap(sc, m0, fcb_inserted)) { IF_PREPEND(&ifp->if_snd, m0); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } queued++; BPF_MTAP(ifp, m0); } bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (queued) { /* Enable transmitter and watchdog timer */ TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT); sc->tsec_watchdog = 5; } } static int tsec_encap(struct tsec_softc *sc, struct mbuf *m0, int fcb_inserted) { struct tsec_desc *tx_desc = NULL; struct ifnet *ifp; bus_dma_segment_t segs[TSEC_TX_NUM_DESC]; bus_dmamap_t *mapp; int csum_flag = 0, error, seg, nsegs; TSEC_TRANSMIT_LOCK_ASSERT(sc); ifp = sc->tsec_ifp; if (TSEC_FREE_TX_DESC(sc) == 0) { /* No free descriptors */ return (-1); } /* Fetch unused map */ mapp = TSEC_ALLOC_TX_MAP(sc); /* Create mapping in DMA memory */ error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag, *mapp, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0 || nsegs > TSEC_FREE_TX_DESC(sc) || nsegs <= 0) { bus_dmamap_unload(sc->tsec_tx_mtag, *mapp); TSEC_FREE_TX_MAP(sc, mapp); return ((error != 0) ? error : -1); } bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, BUS_DMASYNC_PREWRITE); if ((ifp->if_flags & IFF_DEBUG) && (nsegs > 1)) if_printf(ifp, "TX buffer has %d segments\n", nsegs); if (fcb_inserted) csum_flag = TSEC_TXBD_TOE; /* Everything is ok, now we can send buffers */ for (seg = 0; seg < nsegs; seg++) { tx_desc = TSEC_GET_CUR_TX_DESC(sc); tx_desc->length = segs[seg].ds_len; tx_desc->bufptr = segs[seg].ds_addr; /* * Set flags: * - wrap * - checksum * - ready to send * - transmit the CRC sequence after the last data byte * - interrupt after the last buffer */ tx_desc->flags = (tx_desc->flags & TSEC_TXBD_W) | ((seg == 0) ? csum_flag : 0) | TSEC_TXBD_R | TSEC_TXBD_TC | ((seg == nsegs - 1) ? TSEC_TXBD_L | TSEC_TXBD_I : 0); } /* Save mbuf and DMA mapping for release at later stage */ TSEC_PUT_TX_MBUF(sc, m0); TSEC_PUT_TX_MAP(sc, mapp); return (0); } static void tsec_setfilter(struct tsec_softc *sc) { struct ifnet *ifp; uint32_t flags; ifp = sc->tsec_ifp; flags = TSEC_READ(sc, TSEC_REG_RCTRL); /* Promiscuous mode */ if (ifp->if_flags & IFF_PROMISC) flags |= TSEC_RCTRL_PROM; else flags &= ~TSEC_RCTRL_PROM; TSEC_WRITE(sc, TSEC_REG_RCTRL, flags); } #ifdef DEVICE_POLLING static poll_handler_t tsec_poll; -static void +static int tsec_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { uint32_t ie; struct tsec_softc *sc = ifp->if_softc; + int rx_npkts; + rx_npkts = 0; + TSEC_GLOBAL_LOCK(sc); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { TSEC_GLOBAL_UNLOCK(sc); - return; + return (rx_npkts); } if (cmd == POLL_AND_CHECK_STATUS) { tsec_error_intr_locked(sc, count); /* Clear all events reported */ ie = TSEC_READ(sc, TSEC_REG_IEVENT); TSEC_WRITE(sc, TSEC_REG_IEVENT, ie); } tsec_transmit_intr_locked(sc); TSEC_GLOBAL_TO_RECEIVE_LOCK(sc); - tsec_receive_intr_locked(sc, count); + rx_npkts = tsec_receive_intr_locked(sc, count); TSEC_RECEIVE_UNLOCK(sc); + + return (rx_npkts); } #endif /* DEVICE_POLLING */ static int tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct tsec_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; device_t dev; int mask, error = 0; dev = sc->dev; switch (command) { case SIOCSIFMTU: TSEC_GLOBAL_LOCK(sc); if (tsec_set_mtu(sc, ifr->ifr_mtu)) ifp->if_mtu = ifr->ifr_mtu; else error = EINVAL; TSEC_GLOBAL_UNLOCK(sc); break; case SIOCSIFFLAGS: TSEC_GLOBAL_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if ((sc->tsec_if_flags ^ ifp->if_flags) & IFF_PROMISC) tsec_setfilter(sc); if ((sc->tsec_if_flags ^ ifp->if_flags) & IFF_ALLMULTI) tsec_setup_multicast(sc); } else tsec_init_locked(sc); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) tsec_stop(sc); sc->tsec_if_flags = ifp->if_flags; TSEC_GLOBAL_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: if (ifp->if_drv_flags & IFF_DRV_RUNNING) { TSEC_GLOBAL_LOCK(sc); tsec_setup_multicast(sc); TSEC_GLOBAL_UNLOCK(sc); } case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->tsec_mii->mii_media, command); break; case SIOCSIFCAP: mask = ifp->if_capenable ^ ifr->ifr_reqcap; if ((mask & IFCAP_HWCSUM) && sc->is_etsec) { TSEC_GLOBAL_LOCK(sc); ifp->if_capenable &= ~IFCAP_HWCSUM; ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap; tsec_offload_setup(sc); TSEC_GLOBAL_UNLOCK(sc); } #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(tsec_poll, ifp); if (error) return (error); TSEC_GLOBAL_LOCK(sc); /* Disable interrupts */ tsec_intrs_ctl(sc, 0); ifp->if_capenable |= IFCAP_POLLING; TSEC_GLOBAL_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); TSEC_GLOBAL_LOCK(sc); /* Enable interrupts */ tsec_intrs_ctl(sc, 1); ifp->if_capenable &= ~IFCAP_POLLING; TSEC_GLOBAL_UNLOCK(sc); } } #endif break; default: error = ether_ioctl(ifp, command, data); } /* Flush buffers if not empty */ if (ifp->if_flags & IFF_UP) tsec_start(ifp); return (error); } static int tsec_ifmedia_upd(struct ifnet *ifp) { struct tsec_softc *sc = ifp->if_softc; struct mii_data *mii; TSEC_TRANSMIT_LOCK(sc); mii = sc->tsec_mii; mii_mediachg(mii); TSEC_TRANSMIT_UNLOCK(sc); return (0); } static void tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct tsec_softc *sc = ifp->if_softc; struct mii_data *mii; TSEC_TRANSMIT_LOCK(sc); mii = sc->tsec_mii; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; TSEC_TRANSMIT_UNLOCK(sc); } static int tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp, uint32_t *paddr) { struct mbuf *new_mbuf; bus_dma_segment_t seg[1]; int error, nsegs; KASSERT(mbufp != NULL, ("NULL mbuf pointer!")); new_mbuf = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MCLBYTES); if (new_mbuf == NULL) return (ENOBUFS); new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size; if (*mbufp) { bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(tag, map); } error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs, BUS_DMA_NOWAIT); KASSERT(nsegs == 1, ("Too many segments returned!")); if (nsegs != 1 || error) panic("tsec_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error); #if 0 if (error) { printf("tsec: bus_dmamap_load_mbuf_sg() returned: %d!\n", error); m_freem(new_mbuf); return (ENOBUFS); } #endif #if 0 KASSERT(((seg->ds_addr) & (TSEC_RXBUFFER_ALIGNMENT-1)) == 0, ("Wrong alignment of RX buffer!")); #endif bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD); (*mbufp) = new_mbuf; (*paddr) = seg->ds_addr; return (0); } static void tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { u_int32_t *paddr; KASSERT(nseg == 1, ("wrong number of segments, should be 1")); paddr = arg; *paddr = segs->ds_addr; } static int tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag, bus_dmamap_t *dmap, bus_size_t dsize, void **vaddr, void *raddr, const char *dname) { int error; /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */ error = bus_dma_tag_create(NULL, /* parent */ PAGE_SIZE, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ dsize, 1, /* maxsize, nsegments */ dsize, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ dtag); /* dmat */ if (error) { device_printf(dev, "failed to allocate busdma %s tag\n", dname); (*vaddr) = NULL; return (ENXIO); } error = bus_dmamem_alloc(*dtag, vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO, dmap); if (error) { device_printf(dev, "failed to allocate %s DMA safe memory\n", dname); bus_dma_tag_destroy(*dtag); (*vaddr) = NULL; return (ENXIO); } error = bus_dmamap_load(*dtag, *dmap, *vaddr, dsize, tsec_map_dma_addr, raddr, BUS_DMA_NOWAIT); if (error) { device_printf(dev, "cannot get address of the %s " "descriptors\n", dname); bus_dmamem_free(*dtag, *vaddr, *dmap); bus_dma_tag_destroy(*dtag); (*vaddr) = NULL; return (ENXIO); } return (0); } static void tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr) { if (vaddr == NULL) return; /* Unmap descriptors from DMA memory */ bus_dmamap_sync(dtag, dmap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dtag, dmap); /* Free descriptors memory */ bus_dmamem_free(dtag, vaddr, dmap); /* Destroy descriptors tag */ bus_dma_tag_destroy(dtag); } static void tsec_free_dma(struct tsec_softc *sc) { int i; /* Free TX maps */ for (i = 0; i < TSEC_TX_NUM_DESC; i++) if (sc->tx_map_data[i] != NULL) bus_dmamap_destroy(sc->tsec_tx_mtag, sc->tx_map_data[i]); /* Destroy tag for TX mbufs */ bus_dma_tag_destroy(sc->tsec_tx_mtag); /* Free RX mbufs and maps */ for (i = 0; i < TSEC_RX_NUM_DESC; i++) { if (sc->rx_data[i].mbuf) { /* Unload buffer from DMA */ bus_dmamap_sync(sc->tsec_rx_mtag, sc->rx_data[i].map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->tsec_rx_mtag, sc->rx_data[i].map); /* Free buffer */ m_freem(sc->rx_data[i].mbuf); } /* Destroy map for this buffer */ if (sc->rx_data[i].map != NULL) bus_dmamap_destroy(sc->tsec_rx_mtag, sc->rx_data[i].map); } /* Destroy tag for RX mbufs */ bus_dma_tag_destroy(sc->tsec_rx_mtag); /* Unload TX/RX descriptors */ tsec_free_dma_desc(sc->tsec_tx_dtag, sc->tsec_tx_dmap, sc->tsec_tx_vaddr); tsec_free_dma_desc(sc->tsec_rx_dtag, sc->tsec_rx_dmap, sc->tsec_rx_vaddr); } static void tsec_stop(struct tsec_softc *sc) { struct ifnet *ifp; struct mbuf *m0; bus_dmamap_t *mapp; uint32_t tmpval; TSEC_GLOBAL_LOCK_ASSERT(sc); ifp = sc->tsec_ifp; /* Disable interface and watchdog timer */ callout_stop(&sc->tsec_callout); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->tsec_watchdog = 0; /* Disable all interrupts and stop DMA */ tsec_intrs_ctl(sc, 0); tsec_dma_ctl(sc, 0); /* Remove pending data from TX queue */ while (!TSEC_EMPTYQ_TX_MBUF(sc)) { m0 = TSEC_GET_TX_MBUF(sc); mapp = TSEC_GET_TX_MAP(sc); bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->tsec_tx_mtag, *mapp); TSEC_FREE_TX_MAP(sc, mapp); m_freem(m0); } /* Disable RX and TX */ tmpval = TSEC_READ(sc, TSEC_REG_MACCFG1); tmpval &= ~(TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN); TSEC_WRITE(sc, TSEC_REG_MACCFG1, tmpval); DELAY(10); } static void tsec_tick(void *arg) { struct tsec_softc *sc = arg; struct ifnet *ifp; int link; TSEC_GLOBAL_LOCK(sc); tsec_watchdog(sc); ifp = sc->tsec_ifp; link = sc->tsec_link; mii_tick(sc->tsec_mii); if (link == 0 && sc->tsec_link == 1 && (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))) tsec_start_locked(ifp); /* Schedule another timeout one second from now. */ callout_reset(&sc->tsec_callout, hz, tsec_tick, sc); TSEC_GLOBAL_UNLOCK(sc); } /* * This is the core RX routine. It replenishes mbufs in the descriptor and * sends data which have been dma'ed into host memory to upper layer. * * Loops at most count times if count is > 0, or until done if count < 0. */ -static void +static int tsec_receive_intr_locked(struct tsec_softc *sc, int count) { struct tsec_desc *rx_desc; struct ifnet *ifp; struct rx_data_type *rx_data; struct mbuf *m; device_t dev; uint32_t i; - int c; + int c, rx_npkts; uint16_t flags; TSEC_RECEIVE_LOCK_ASSERT(sc); ifp = sc->tsec_ifp; rx_data = sc->rx_data; dev = sc->dev; + rx_npkts = 0; bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (c = 0; ; c++) { if (count >= 0 && count-- == 0) break; rx_desc = TSEC_GET_CUR_RX_DESC(sc); flags = rx_desc->flags; /* Check if there is anything to receive */ if ((flags & TSEC_RXBD_E) || (c >= TSEC_RX_NUM_DESC)) { /* * Avoid generating another interrupt */ if (flags & TSEC_RXBD_E) TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXB | TSEC_IEVENT_RXF); /* * We didn't consume current descriptor and have to * return it to the queue */ TSEC_BACK_CUR_RX_DESC(sc); break; } if (flags & (TSEC_RXBD_LG | TSEC_RXBD_SH | TSEC_RXBD_NO | TSEC_RXBD_CR | TSEC_RXBD_OV | TSEC_RXBD_TR)) { rx_desc->length = 0; rx_desc->flags = (rx_desc->flags & ~TSEC_RXBD_ZEROONINIT) | TSEC_RXBD_E | TSEC_RXBD_I; if (sc->frame != NULL) { m_free(sc->frame); sc->frame = NULL; } continue; } /* Ok... process frame */ i = TSEC_GET_CUR_RX_DESC_CNT(sc); m = rx_data[i].mbuf; m->m_len = rx_desc->length; if (sc->frame != NULL) { if ((flags & TSEC_RXBD_L) != 0) m->m_len -= m_length(sc->frame, NULL); m->m_flags &= ~M_PKTHDR; m_cat(sc->frame, m); } else { sc->frame = m; } m = NULL; if ((flags & TSEC_RXBD_L) != 0) { m = sc->frame; sc->frame = NULL; } if (tsec_new_rxbuf(sc->tsec_rx_mtag, rx_data[i].map, &rx_data[i].mbuf, &rx_data[i].paddr)) { ifp->if_ierrors++; /* * We ran out of mbufs; didn't consume current * descriptor and have to return it to the queue. */ TSEC_BACK_CUR_RX_DESC(sc); break; } /* Attach new buffer to descriptor and clear flags */ rx_desc->bufptr = rx_data[i].paddr; rx_desc->length = 0; rx_desc->flags = (rx_desc->flags & ~TSEC_RXBD_ZEROONINIT) | TSEC_RXBD_E | TSEC_RXBD_I; if (m != NULL) { m->m_pkthdr.rcvif = ifp; m_fixhdr(m); m_adj(m, -ETHER_CRC_LEN); if (sc->is_etsec) tsec_offload_process_frame(sc, m); TSEC_RECEIVE_UNLOCK(sc); (*ifp->if_input)(ifp, m); TSEC_RECEIVE_LOCK(sc); + rx_npkts++; } } bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* * Make sure TSEC receiver is not halted. * * Various conditions can stop the TSEC receiver, but not all are * signaled and handled by error interrupt, so make sure the receiver * is running. Writing to TSEC_REG_RSTAT restarts the receiver when * halted, and is harmless if already running. */ TSEC_WRITE(sc, TSEC_REG_RSTAT, TSEC_RSTAT_QHLT); + return (rx_npkts); } void tsec_receive_intr(void *arg) { struct tsec_softc *sc = arg; TSEC_RECEIVE_LOCK(sc); #ifdef DEVICE_POLLING if (sc->tsec_ifp->if_capenable & IFCAP_POLLING) { TSEC_RECEIVE_UNLOCK(sc); return; } #endif /* Confirm the interrupt was received by driver */ TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXB | TSEC_IEVENT_RXF); tsec_receive_intr_locked(sc, -1); TSEC_RECEIVE_UNLOCK(sc); } static void tsec_transmit_intr_locked(struct tsec_softc *sc) { struct tsec_desc *tx_desc; struct ifnet *ifp; struct mbuf *m0; bus_dmamap_t *mapp; int send = 0; TSEC_TRANSMIT_LOCK_ASSERT(sc); ifp = sc->tsec_ifp; /* Update collision statistics */ ifp->if_collisions += TSEC_READ(sc, TSEC_REG_MON_TNCL); /* Reset collision counters in hardware */ TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0); TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0); TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0); TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0); TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0); bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); while (TSEC_CUR_DIFF_DIRTY_TX_DESC(sc)) { tx_desc = TSEC_GET_DIRTY_TX_DESC(sc); if (tx_desc->flags & TSEC_TXBD_R) { TSEC_BACK_DIRTY_TX_DESC(sc); break; } if ((tx_desc->flags & TSEC_TXBD_L) == 0) continue; /* * This is the last buf in this packet, so unmap and free it. */ m0 = TSEC_GET_TX_MBUF(sc); mapp = TSEC_GET_TX_MAP(sc); bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->tsec_tx_mtag, *mapp); TSEC_FREE_TX_MAP(sc, mapp); m_freem(m0); ifp->if_opackets++; send = 1; } bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (send) { /* Now send anything that was pending */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; tsec_start_locked(ifp); /* Stop wathdog if all sent */ if (TSEC_EMPTYQ_TX_MBUF(sc)) sc->tsec_watchdog = 0; } } void tsec_transmit_intr(void *arg) { struct tsec_softc *sc = arg; TSEC_TRANSMIT_LOCK(sc); #ifdef DEVICE_POLLING if (sc->tsec_ifp->if_capenable & IFCAP_POLLING) { TSEC_TRANSMIT_UNLOCK(sc); return; } #endif /* Confirm the interrupt was received by driver */ TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_TXB | TSEC_IEVENT_TXF); tsec_transmit_intr_locked(sc); TSEC_TRANSMIT_UNLOCK(sc); } static void tsec_error_intr_locked(struct tsec_softc *sc, int count) { struct ifnet *ifp; uint32_t eflags; TSEC_GLOBAL_LOCK_ASSERT(sc); ifp = sc->tsec_ifp; eflags = TSEC_READ(sc, TSEC_REG_IEVENT); /* Clear events bits in hardware */ TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXC | TSEC_IEVENT_BSY | TSEC_IEVENT_EBERR | TSEC_IEVENT_MSRO | TSEC_IEVENT_BABT | TSEC_IEVENT_TXC | TSEC_IEVENT_TXE | TSEC_IEVENT_LC | TSEC_IEVENT_CRL | TSEC_IEVENT_XFUN); /* Check transmitter errors */ if (eflags & TSEC_IEVENT_TXE) { ifp->if_oerrors++; if (eflags & TSEC_IEVENT_LC) ifp->if_collisions++; TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT); } /* Check receiver errors */ if (eflags & TSEC_IEVENT_BSY) { ifp->if_ierrors++; ifp->if_iqdrops++; /* Get data from RX buffers */ tsec_receive_intr_locked(sc, count); } if (ifp->if_flags & IFF_DEBUG) if_printf(ifp, "tsec_error_intr(): event flags: 0x%x\n", eflags); if (eflags & TSEC_IEVENT_EBERR) { if_printf(ifp, "System bus error occurred during" "DMA transaction (flags: 0x%x)\n", eflags); tsec_init_locked(sc); } if (eflags & TSEC_IEVENT_BABT) ifp->if_oerrors++; if (eflags & TSEC_IEVENT_BABR) ifp->if_ierrors++; } void tsec_error_intr(void *arg) { struct tsec_softc *sc = arg; TSEC_GLOBAL_LOCK(sc); tsec_error_intr_locked(sc, -1); TSEC_GLOBAL_UNLOCK(sc); } int tsec_miibus_readreg(device_t dev, int phy, int reg) { struct tsec_softc *sc; uint32_t timeout; sc = device_get_softc(dev); if (device_get_unit(dev) != phy) return (0); sc = tsec0_sc; TSEC_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg); TSEC_WRITE(sc, TSEC_REG_MIIMCOM, 0); TSEC_WRITE(sc, TSEC_REG_MIIMCOM, TSEC_MIIMCOM_READCYCLE); timeout = TSEC_READ_RETRY; while (--timeout && TSEC_READ(sc, TSEC_REG_MIIMIND) & (TSEC_MIIMIND_NOTVALID | TSEC_MIIMIND_BUSY)) DELAY(TSEC_READ_DELAY); if (timeout == 0) device_printf(dev, "Timeout while reading from PHY!\n"); return (TSEC_READ(sc, TSEC_REG_MIIMSTAT)); } void tsec_miibus_writereg(device_t dev, int phy, int reg, int value) { struct tsec_softc *sc; uint32_t timeout; sc = device_get_softc(dev); if (device_get_unit(dev) != phy) device_printf(dev, "Trying to write to an alien PHY(%d)\n", phy); sc = tsec0_sc; TSEC_WRITE(sc, TSEC_REG_MIIMADD, (phy << 8) | reg); TSEC_WRITE(sc, TSEC_REG_MIIMCON, value); timeout = TSEC_READ_RETRY; while (--timeout && (TSEC_READ(sc, TSEC_REG_MIIMIND) & TSEC_MIIMIND_BUSY)) DELAY(TSEC_READ_DELAY); if (timeout == 0) device_printf(dev, "Timeout while writing to PHY!\n"); } void tsec_miibus_statchg(device_t dev) { struct tsec_softc *sc; struct mii_data *mii; uint32_t ecntrl, id, tmp; int link; sc = device_get_softc(dev); mii = sc->tsec_mii; link = ((mii->mii_media_status & IFM_ACTIVE) ? 1 : 0); tmp = TSEC_READ(sc, TSEC_REG_MACCFG2) & ~TSEC_MACCFG2_IF; if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) tmp |= TSEC_MACCFG2_FULLDUPLEX; else tmp &= ~TSEC_MACCFG2_FULLDUPLEX; switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_1000_T: case IFM_1000_SX: tmp |= TSEC_MACCFG2_GMII; sc->tsec_link = link; break; case IFM_100_TX: case IFM_10_T: tmp |= TSEC_MACCFG2_MII; sc->tsec_link = link; break; case IFM_NONE: if (link) device_printf(dev, "No speed selected but link " "active!\n"); sc->tsec_link = 0; return; default: sc->tsec_link = 0; device_printf(dev, "Unknown speed (%d), link %s!\n", IFM_SUBTYPE(mii->mii_media_active), ((link) ? "up" : "down")); return; } TSEC_WRITE(sc, TSEC_REG_MACCFG2, tmp); /* XXX kludge - use circumstantial evidence for reduced mode. */ id = TSEC_READ(sc, TSEC_REG_ID2); if (id & 0xffff) { ecntrl = TSEC_READ(sc, TSEC_REG_ECNTRL) & ~TSEC_ECNTRL_R100M; ecntrl |= (tmp & TSEC_MACCFG2_MII) ? TSEC_ECNTRL_R100M : 0; TSEC_WRITE(sc, TSEC_REG_ECNTRL, ecntrl); } } static void tsec_add_sysctls(struct tsec_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; struct sysctl_oid *tree; ctx = device_get_sysctl_ctx(sc->dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal", CTLFLAG_RD, 0, "TSEC Interrupts coalescing"); children = SYSCTL_CHILDREN(tree); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time", CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_RX, tsec_sysctl_ic_time, "I", "IC RX time threshold (0-65535)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_count", CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_RX, tsec_sysctl_ic_count, "I", "IC RX frame count threshold (0-255)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time", CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_TX, tsec_sysctl_ic_time, "I", "IC TX time threshold (0-65535)"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_count", CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_TX, tsec_sysctl_ic_count, "I", "IC TX frame count threshold (0-255)"); } /* * With Interrupt Coalescing (IC) active, a transmit/receive frame * interrupt is raised either upon: * * - threshold-defined period of time elapsed, or * - threshold-defined number of frames is received/transmitted, * whichever occurs first. * * The following sysctls regulate IC behaviour (for TX/RX separately): * * dev.tsec..int_coal.rx_time * dev.tsec..int_coal.rx_count * dev.tsec..int_coal.tx_time * dev.tsec..int_coal.tx_count * * Values: * * - 0 for either time or count disables IC on the given TX/RX path * * - count: 1-255 (expresses frame count number; note that value of 1 is * effectively IC off) * * - time: 1-65535 (value corresponds to a real time period and is * expressed in units equivalent to 64 TSEC interface clocks, i.e. one timer * threshold unit is 26.5 us, 2.56 us, or 512 ns, corresponding to 10 Mbps, * 100 Mbps, or 1Gbps, respectively. For detailed discussion consult the * TSEC reference manual. */ static int tsec_sysctl_ic_time(SYSCTL_HANDLER_ARGS) { int error; uint32_t time; struct tsec_softc *sc = (struct tsec_softc *)arg1; time = (arg2 == TSEC_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time; error = sysctl_handle_int(oidp, &time, 0, req); if (error != 0) return (error); if (time > 65535) return (EINVAL); TSEC_IC_LOCK(sc); if (arg2 == TSEC_IC_RX) { sc->rx_ic_time = time; tsec_set_rxic(sc); } else { sc->tx_ic_time = time; tsec_set_txic(sc); } TSEC_IC_UNLOCK(sc); return (0); } static int tsec_sysctl_ic_count(SYSCTL_HANDLER_ARGS) { int error; uint32_t count; struct tsec_softc *sc = (struct tsec_softc *)arg1; count = (arg2 == TSEC_IC_RX) ? sc->rx_ic_count : sc->tx_ic_count; error = sysctl_handle_int(oidp, &count, 0, req); if (error != 0) return (error); if (count > 255) return (EINVAL); TSEC_IC_LOCK(sc); if (arg2 == TSEC_IC_RX) { sc->rx_ic_count = count; tsec_set_rxic(sc); } else { sc->tx_ic_count = count; tsec_set_txic(sc); } TSEC_IC_UNLOCK(sc); return (0); } static void tsec_set_rxic(struct tsec_softc *sc) { uint32_t rxic_val; if (sc->rx_ic_count == 0 || sc->rx_ic_time == 0) /* Disable RX IC */ rxic_val = 0; else { rxic_val = 0x80000000; rxic_val |= (sc->rx_ic_count << 21); rxic_val |= sc->rx_ic_time; } TSEC_WRITE(sc, TSEC_REG_RXIC, rxic_val); } static void tsec_set_txic(struct tsec_softc *sc) { uint32_t txic_val; if (sc->tx_ic_count == 0 || sc->tx_ic_time == 0) /* Disable TX IC */ txic_val = 0; else { txic_val = 0x80000000; txic_val |= (sc->tx_ic_count << 21); txic_val |= sc->tx_ic_time; } TSEC_WRITE(sc, TSEC_REG_TXIC, txic_val); } static void tsec_offload_setup(struct tsec_softc *sc) { struct ifnet *ifp = sc->tsec_ifp; uint32_t reg; TSEC_GLOBAL_LOCK_ASSERT(sc); reg = TSEC_READ(sc, TSEC_REG_TCTRL); reg |= TSEC_TCTRL_IPCSEN | TSEC_TCTRL_TUCSEN; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist = TSEC_CHECKSUM_FEATURES; else ifp->if_hwassist = 0; TSEC_WRITE(sc, TSEC_REG_TCTRL, reg); reg = TSEC_READ(sc, TSEC_REG_RCTRL); reg &= ~(TSEC_RCTRL_IPCSEN | TSEC_RCTRL_TUCSEN | TSEC_RCTRL_PRSDEP); reg |= TSEC_RCTRL_PRSDEP_PARSE_L2 | TSEC_RCTRL_VLEX; if (ifp->if_capenable & IFCAP_RXCSUM) reg |= TSEC_RCTRL_IPCSEN | TSEC_RCTRL_TUCSEN | TSEC_RCTRL_PRSDEP_PARSE_L234; TSEC_WRITE(sc, TSEC_REG_RCTRL, reg); } static void tsec_offload_process_frame(struct tsec_softc *sc, struct mbuf *m) { struct tsec_rx_fcb rx_fcb; int csum_flags = 0; int protocol, flags; TSEC_RECEIVE_LOCK_ASSERT(sc); m_copydata(m, 0, sizeof(struct tsec_rx_fcb), (caddr_t)(&rx_fcb)); flags = rx_fcb.flags; protocol = rx_fcb.protocol; if (TSEC_RX_FCB_IP_CSUM_CHECKED(flags)) { csum_flags |= CSUM_IP_CHECKED; if ((flags & TSEC_RX_FCB_IP_CSUM_ERROR) == 0) csum_flags |= CSUM_IP_VALID; } if ((protocol == IPPROTO_TCP || protocol == IPPROTO_UDP) && TSEC_RX_FCB_TCP_UDP_CSUM_CHECKED(flags) && (flags & TSEC_RX_FCB_TCP_UDP_CSUM_ERROR) == 0) { csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; } m->m_pkthdr.csum_flags = csum_flags; if (flags & TSEC_RX_FCB_VLAN) { m->m_pkthdr.ether_vtag = rx_fcb.vlan; m->m_flags |= M_VLANTAG; } m_adj(m, sizeof(struct tsec_rx_fcb)); } static void tsec_setup_multicast(struct tsec_softc *sc) { uint32_t hashtable[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; struct ifnet *ifp = sc->tsec_ifp; struct ifmultiaddr *ifma; uint32_t h; int i; TSEC_GLOBAL_LOCK_ASSERT(sc); if (ifp->if_flags & IFF_ALLMULTI) { for (i = 0; i < 8; i++) TSEC_WRITE(sc, TSEC_REG_GADDR(i), 0xFFFFFFFF); return; } IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = (ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 24) & 0xFF; hashtable[(h >> 5)] |= 1 << (0x1F - (h & 0x1F)); } IF_ADDR_UNLOCK(ifp); for (i = 0; i < 8; i++) TSEC_WRITE(sc, TSEC_REG_GADDR(i), hashtable[i]); } static int tsec_set_mtu(struct tsec_softc *sc, unsigned int mtu) { mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN; TSEC_GLOBAL_LOCK_ASSERT(sc); if (mtu >= TSEC_MIN_FRAME_SIZE && mtu <= TSEC_MAX_FRAME_SIZE) { TSEC_WRITE(sc, TSEC_REG_MAXFRM, mtu); return (mtu); } return (0); } Index: head/sys/dev/vge/if_vge.c =================================================================== --- head/sys/dev/vge/if_vge.c (revision 193095) +++ head/sys/dev/vge/if_vge.c (revision 193096) @@ -1,2387 +1,2389 @@ /*- * Copyright (c) 2004 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. * * Written by Bill Paul * Senior Networking Software Engineer * Wind River Systems */ /* * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that * combines a tri-speed ethernet MAC and PHY, with the following * features: * * o Jumbo frame support up to 16K * o Transmit and receive flow control * o IPv4 checksum offload * o VLAN tag insertion and stripping * o TCP large send * o 64-bit multicast hash table filter * o 64 entry CAM filter * o 16K RX FIFO and 48K TX FIFO memory * o Interrupt moderation * * The VT6122 supports up to four transmit DMA queues. The descriptors * in the transmit ring can address up to 7 data fragments; frames which * span more than 7 data buffers must be coalesced, but in general the * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments * long. The receive descriptors address only a single buffer. * * There are two peculiar design issues with the VT6122. One is that * receive data buffers must be aligned on a 32-bit boundary. This is * not a problem where the VT6122 is used as a LOM device in x86-based * systems, but on architectures that generate unaligned access traps, we * have to do some copying. * * The other issue has to do with the way 64-bit addresses are handled. * The DMA descriptors only allow you to specify 48 bits of addressing * information. The remaining 16 bits are specified using one of the * I/O registers. If you only have a 32-bit system, then this isn't * an issue, but if you have a 64-bit system and more than 4GB of * memory, you must have to make sure your network data buffers reside * in the same 48-bit 'segment.' * * Special thanks to Ryan Fu at VIA Networking for providing documentation * and sample NICs for testing. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(vge, pci, 1, 1, 1); MODULE_DEPEND(vge, ether, 1, 1, 1); MODULE_DEPEND(vge, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #include #include #define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) /* * Various supported device vendors/types and their names. */ static struct vge_type vge_devs[] = { { VIA_VENDORID, VIA_DEVICEID_61XX, "VIA Networking Gigabit Ethernet" }, { 0, 0, NULL } }; static int vge_probe (device_t); static int vge_attach (device_t); static int vge_detach (device_t); static int vge_encap (struct vge_softc *, struct mbuf *, int); static void vge_dma_map_addr (void *, bus_dma_segment_t *, int, int); static void vge_dma_map_rx_desc (void *, bus_dma_segment_t *, int, bus_size_t, int); static void vge_dma_map_tx_desc (void *, bus_dma_segment_t *, int, bus_size_t, int); static int vge_allocmem (device_t, struct vge_softc *); static int vge_newbuf (struct vge_softc *, int, struct mbuf *); static int vge_rx_list_init (struct vge_softc *); static int vge_tx_list_init (struct vge_softc *); #ifdef VGE_FIXUP_RX static __inline void vge_fixup_rx (struct mbuf *); #endif -static void vge_rxeof (struct vge_softc *); +static int vge_rxeof (struct vge_softc *); static void vge_txeof (struct vge_softc *); static void vge_intr (void *); static void vge_tick (void *); static void vge_tx_task (void *, int); static void vge_start (struct ifnet *); static int vge_ioctl (struct ifnet *, u_long, caddr_t); static void vge_init (void *); static void vge_stop (struct vge_softc *); static void vge_watchdog (struct ifnet *); static int vge_suspend (device_t); static int vge_resume (device_t); static int vge_shutdown (device_t); static int vge_ifmedia_upd (struct ifnet *); static void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *); #ifdef VGE_EEPROM static void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *); #endif static void vge_read_eeprom (struct vge_softc *, caddr_t, int, int, int); static void vge_miipoll_start (struct vge_softc *); static void vge_miipoll_stop (struct vge_softc *); static int vge_miibus_readreg (device_t, int, int); static int vge_miibus_writereg (device_t, int, int, int); static void vge_miibus_statchg (device_t); static void vge_cam_clear (struct vge_softc *); static int vge_cam_set (struct vge_softc *, uint8_t *); static void vge_setmulti (struct vge_softc *); static void vge_reset (struct vge_softc *); #define VGE_PCI_LOIO 0x10 #define VGE_PCI_LOMEM 0x14 static device_method_t vge_methods[] = { /* Device interface */ DEVMETHOD(device_probe, vge_probe), DEVMETHOD(device_attach, vge_attach), DEVMETHOD(device_detach, vge_detach), DEVMETHOD(device_suspend, vge_suspend), DEVMETHOD(device_resume, vge_resume), DEVMETHOD(device_shutdown, vge_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, vge_miibus_readreg), DEVMETHOD(miibus_writereg, vge_miibus_writereg), DEVMETHOD(miibus_statchg, vge_miibus_statchg), { 0, 0 } }; static driver_t vge_driver = { "vge", vge_methods, sizeof(struct vge_softc) }; static devclass_t vge_devclass; DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0); DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0); #ifdef VGE_EEPROM /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void vge_eeprom_getword(sc, addr, dest) struct vge_softc *sc; int addr; u_int16_t *dest; { register int i; u_int16_t word = 0; /* * Enter EEPROM embedded programming mode. In order to * access the EEPROM at all, we first have to set the * EELOAD bit in the CHIPCFG2 register. */ CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); /* Select the address of the word we want to read */ CSR_WRITE_1(sc, VGE_EEADDR, addr); /* Issue read command */ CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); /* Wait for the done bit to be set. */ for (i = 0; i < VGE_TIMEOUT; i++) { if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) break; } if (i == VGE_TIMEOUT) { device_printf(sc->vge_dev, "EEPROM read timed out\n"); *dest = 0; return; } /* Read the result */ word = CSR_READ_2(sc, VGE_EERDDAT); /* Turn off EEPROM access mode. */ CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); *dest = word; return; } #endif /* * Read a sequence of words from the EEPROM. */ static void vge_read_eeprom(sc, dest, off, cnt, swap) struct vge_softc *sc; caddr_t dest; int off; int cnt; int swap; { int i; #ifdef VGE_EEPROM u_int16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { vge_eeprom_getword(sc, off + i, &word); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } #else for (i = 0; i < ETHER_ADDR_LEN; i++) dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); #endif } static void vge_miipoll_stop(sc) struct vge_softc *sc; { int i; CSR_WRITE_1(sc, VGE_MIICMD, 0); for (i = 0; i < VGE_TIMEOUT; i++) { DELAY(1); if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) break; } if (i == VGE_TIMEOUT) device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); return; } static void vge_miipoll_start(sc) struct vge_softc *sc; { int i; /* First, make sure we're idle. */ CSR_WRITE_1(sc, VGE_MIICMD, 0); CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); for (i = 0; i < VGE_TIMEOUT; i++) { DELAY(1); if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) break; } if (i == VGE_TIMEOUT) { device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); return; } /* Now enable auto poll mode. */ CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); /* And make sure it started. */ for (i = 0; i < VGE_TIMEOUT; i++) { DELAY(1); if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) break; } if (i == VGE_TIMEOUT) device_printf(sc->vge_dev, "failed to start MII autopoll\n"); return; } static int vge_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct vge_softc *sc; int i; u_int16_t rval = 0; sc = device_get_softc(dev); if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) return(0); VGE_LOCK(sc); vge_miipoll_stop(sc); /* Specify the register we want to read. */ CSR_WRITE_1(sc, VGE_MIIADDR, reg); /* Issue read command. */ CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); /* Wait for the read command bit to self-clear. */ for (i = 0; i < VGE_TIMEOUT; i++) { DELAY(1); if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) break; } if (i == VGE_TIMEOUT) device_printf(sc->vge_dev, "MII read timed out\n"); else rval = CSR_READ_2(sc, VGE_MIIDATA); vge_miipoll_start(sc); VGE_UNLOCK(sc); return (rval); } static int vge_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct vge_softc *sc; int i, rval = 0; sc = device_get_softc(dev); if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) return(0); VGE_LOCK(sc); vge_miipoll_stop(sc); /* Specify the register we want to write. */ CSR_WRITE_1(sc, VGE_MIIADDR, reg); /* Specify the data we want to write. */ CSR_WRITE_2(sc, VGE_MIIDATA, data); /* Issue write command. */ CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); /* Wait for the write command bit to self-clear. */ for (i = 0; i < VGE_TIMEOUT; i++) { DELAY(1); if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) break; } if (i == VGE_TIMEOUT) { device_printf(sc->vge_dev, "MII write timed out\n"); rval = EIO; } vge_miipoll_start(sc); VGE_UNLOCK(sc); return (rval); } static void vge_cam_clear(sc) struct vge_softc *sc; { int i; /* * Turn off all the mask bits. This tells the chip * that none of the entries in the CAM filter are valid. * desired entries will be enabled as we fill the filter in. */ CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); for (i = 0; i < 8; i++) CSR_WRITE_1(sc, VGE_CAM0 + i, 0); /* Clear the VLAN filter too. */ CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); for (i = 0; i < 8; i++) CSR_WRITE_1(sc, VGE_CAM0 + i, 0); CSR_WRITE_1(sc, VGE_CAMADDR, 0); CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); sc->vge_camidx = 0; return; } static int vge_cam_set(sc, addr) struct vge_softc *sc; uint8_t *addr; { int i, error = 0; if (sc->vge_camidx == VGE_CAM_MAXADDRS) return(ENOSPC); /* Select the CAM data page. */ CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); /* Set the filter entry we want to update and enable writing. */ CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); /* Write the address to the CAM registers */ for (i = 0; i < ETHER_ADDR_LEN; i++) CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); /* Issue a write command. */ CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); /* Wake for it to clear. */ for (i = 0; i < VGE_TIMEOUT; i++) { DELAY(1); if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) break; } if (i == VGE_TIMEOUT) { device_printf(sc->vge_dev, "setting CAM filter failed\n"); error = EIO; goto fail; } /* Select the CAM mask page. */ CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); /* Set the mask bit that enables this filter. */ CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 1<<(sc->vge_camidx & 7)); sc->vge_camidx++; fail: /* Turn off access to CAM. */ CSR_WRITE_1(sc, VGE_CAMADDR, 0); CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); return (error); } /* * Program the multicast filter. We use the 64-entry CAM filter * for perfect filtering. If there's more than 64 multicast addresses, * we use the hash filter insted. */ static void vge_setmulti(sc) struct vge_softc *sc; { struct ifnet *ifp; int error = 0/*, h = 0*/; struct ifmultiaddr *ifma; u_int32_t h, hashes[2] = { 0, 0 }; ifp = sc->vge_ifp; /* First, zot all the multicast entries. */ vge_cam_clear(sc); CSR_WRITE_4(sc, VGE_MAR0, 0); CSR_WRITE_4(sc, VGE_MAR1, 0); /* * If the user wants allmulti or promisc mode, enable reception * of all multicast frames. */ if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF); return; } /* Now program new ones */ IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; error = vge_cam_set(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (error) break; } /* If there were too many addresses, use the hash filter. */ if (error) { vge_cam_clear(sc); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); } IF_ADDR_UNLOCK(ifp); return; } static void vge_reset(sc) struct vge_softc *sc; { register int i; CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); for (i = 0; i < VGE_TIMEOUT; i++) { DELAY(5); if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) break; } if (i == VGE_TIMEOUT) { device_printf(sc->vge_dev, "soft reset timed out"); CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); DELAY(2000); } DELAY(5000); CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); for (i = 0; i < VGE_TIMEOUT; i++) { DELAY(5); if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) break; } if (i == VGE_TIMEOUT) { device_printf(sc->vge_dev, "EEPROM reload timed out\n"); return; } CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); return; } /* * Probe for a VIA gigabit chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int vge_probe(dev) device_t dev; { struct vge_type *t; t = vge_devs; while (t->vge_name != NULL) { if ((pci_get_vendor(dev) == t->vge_vid) && (pci_get_device(dev) == t->vge_did)) { device_set_desc(dev, t->vge_name); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } static void vge_dma_map_rx_desc(arg, segs, nseg, mapsize, error) void *arg; bus_dma_segment_t *segs; int nseg; bus_size_t mapsize; int error; { struct vge_dmaload_arg *ctx; struct vge_rx_desc *d = NULL; if (error) return; ctx = arg; /* Signal error to caller if there's too many segments */ if (nseg > ctx->vge_maxsegs) { ctx->vge_maxsegs = 0; return; } /* * Map the segment array into descriptors. */ d = &ctx->sc->vge_ldata.vge_rx_list[ctx->vge_idx]; /* If this descriptor is still owned by the chip, bail. */ if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) { device_printf(ctx->sc->vge_dev, "tried to map busy descriptor\n"); ctx->vge_maxsegs = 0; return; } d->vge_buflen = htole16(VGE_BUFLEN(segs[0].ds_len) | VGE_RXDESC_I); d->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); d->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF); d->vge_sts = 0; d->vge_ctl = 0; ctx->vge_maxsegs = 1; return; } static void vge_dma_map_tx_desc(arg, segs, nseg, mapsize, error) void *arg; bus_dma_segment_t *segs; int nseg; bus_size_t mapsize; int error; { struct vge_dmaload_arg *ctx; struct vge_tx_desc *d = NULL; struct vge_tx_frag *f; int i = 0; if (error) return; ctx = arg; /* Signal error to caller if there's too many segments */ if (nseg > ctx->vge_maxsegs) { ctx->vge_maxsegs = 0; return; } /* Map the segment array into descriptors. */ d = &ctx->sc->vge_ldata.vge_tx_list[ctx->vge_idx]; /* If this descriptor is still owned by the chip, bail. */ if (le32toh(d->vge_sts) & VGE_TDSTS_OWN) { ctx->vge_maxsegs = 0; return; } for (i = 0; i < nseg; i++) { f = &d->vge_frag[i]; f->vge_buflen = htole16(VGE_BUFLEN(segs[i].ds_len)); f->vge_addrlo = htole32(VGE_ADDR_LO(segs[i].ds_addr)); f->vge_addrhi = htole16(VGE_ADDR_HI(segs[i].ds_addr) & 0xFFFF); } /* Argh. This chip does not autopad short frames */ if (ctx->vge_m0->m_pkthdr.len < VGE_MIN_FRAMELEN) { f = &d->vge_frag[i]; f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN - ctx->vge_m0->m_pkthdr.len)); f->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); f->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF); ctx->vge_m0->m_pkthdr.len = VGE_MIN_FRAMELEN; i++; } /* * When telling the chip how many segments there are, we * must use nsegs + 1 instead of just nsegs. Darned if I * know why. */ i++; d->vge_sts = ctx->vge_m0->m_pkthdr.len << 16; d->vge_ctl = ctx->vge_flags|(i << 28)|VGE_TD_LS_NORM; if (ctx->vge_m0->m_pkthdr.len > ETHERMTU + ETHER_HDR_LEN) d->vge_ctl |= VGE_TDCTL_JUMBO; ctx->vge_maxsegs = nseg; return; } /* * Map a single buffer address. */ static void vge_dma_map_addr(arg, segs, nseg, error) void *arg; bus_dma_segment_t *segs; int nseg; int error; { bus_addr_t *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; return; } static int vge_allocmem(dev, sc) device_t dev; struct vge_softc *sc; { int error; int nseg; int i; /* * Allocate map for RX mbufs. */ nseg = 32; error = bus_dma_tag_create(sc->vge_parent_tag, ETHER_ALIGN, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->vge_ldata.vge_mtag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* * Allocate map for TX descriptor list. */ error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->vge_ldata.vge_tx_list_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for the TX ring */ error = bus_dmamem_alloc(sc->vge_ldata.vge_tx_list_tag, (void **)&sc->vge_ldata.vge_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->vge_ldata.vge_tx_list_map); if (error) return (ENOMEM); /* Load the map for the TX ring. */ error = bus_dmamap_load(sc->vge_ldata.vge_tx_list_tag, sc->vge_ldata.vge_tx_list_map, sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ, vge_dma_map_addr, &sc->vge_ldata.vge_tx_list_addr, BUS_DMA_NOWAIT); /* Create DMA maps for TX buffers */ for (i = 0; i < VGE_TX_DESC_CNT; i++) { error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0, &sc->vge_ldata.vge_tx_dmamap[i]); if (error) { device_printf(dev, "can't create DMA map for TX\n"); return (ENOMEM); } } /* * Allocate map for RX descriptor list. */ error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->vge_ldata.vge_rx_list_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for the RX ring */ error = bus_dmamem_alloc(sc->vge_ldata.vge_rx_list_tag, (void **)&sc->vge_ldata.vge_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->vge_ldata.vge_rx_list_map); if (error) return (ENOMEM); /* Load the map for the RX ring. */ error = bus_dmamap_load(sc->vge_ldata.vge_rx_list_tag, sc->vge_ldata.vge_rx_list_map, sc->vge_ldata.vge_rx_list, VGE_TX_LIST_SZ, vge_dma_map_addr, &sc->vge_ldata.vge_rx_list_addr, BUS_DMA_NOWAIT); /* Create DMA maps for RX buffers */ for (i = 0; i < VGE_RX_DESC_CNT; i++) { error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0, &sc->vge_ldata.vge_rx_dmamap[i]); if (error) { device_printf(dev, "can't create DMA map for RX\n"); return (ENOMEM); } } return (0); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int vge_attach(dev) device_t dev; { u_char eaddr[ETHER_ADDR_LEN]; struct vge_softc *sc; struct ifnet *ifp; int unit, error = 0, rid; sc = device_get_softc(dev); unit = device_get_unit(dev); sc->vge_dev = dev; mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = VGE_PCI_LOMEM; sc->vge_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); if (sc->vge_res == NULL) { printf ("vge%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->vge_btag = rman_get_bustag(sc->vge_res); sc->vge_bhandle = rman_get_bushandle(sc->vge_res); /* Allocate interrupt */ rid = 0; sc->vge_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->vge_irq == NULL) { printf("vge%d: couldn't map interrupt\n", unit); error = ENXIO; goto fail; } /* Reset the adapter. */ vge_reset(sc); /* * Get station address from the EEPROM. */ vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0); sc->vge_unit = unit; /* * Allocate the parent bus DMA tag appropriate for PCI. */ #define VGE_NSEG_NEW 32 error = bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MAXBSIZE, VGE_NSEG_NEW, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->vge_parent_tag); if (error) goto fail; error = vge_allocmem(dev, sc); if (error) goto fail; ifp = sc->vge_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { printf("vge%d: can not if_alloc()\n", sc->vge_unit); error = ENOSPC; goto fail; } /* Do MII setup */ if (mii_phy_probe(dev, &sc->vge_miibus, vge_ifmedia_upd, vge_ifmedia_sts)) { printf("vge%d: MII without any phy!\n", sc->vge_unit); error = ENXIO; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = vge_ioctl; ifp->if_capabilities = IFCAP_VLAN_MTU; ifp->if_start = vge_start; ifp->if_hwassist = VGE_CSUM_FEATURES; ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING; ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif ifp->if_watchdog = vge_watchdog; ifp->if_init = vge_init; IFQ_SET_MAXLEN(&ifp->if_snd, VGE_IFQ_MAXLEN); ifp->if_snd.ifq_drv_maxlen = VGE_IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); TASK_INIT(&sc->vge_txtask, 0, vge_tx_task, ifp); /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE, NULL, vge_intr, sc, &sc->vge_intrhand); if (error) { printf("vge%d: couldn't set up irq\n", unit); ether_ifdetach(ifp); goto fail; } fail: if (error) vge_detach(dev); return (error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int vge_detach(dev) device_t dev; { struct vge_softc *sc; struct ifnet *ifp; int i; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized")); ifp = sc->vge_ifp; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { vge_stop(sc); /* * Force off the IFF_UP flag here, in case someone * still had a BPF descriptor attached to this * interface. If they do, ether_ifattach() will cause * the BPF code to try and clear the promisc mode * flag, which will bubble down to vge_ioctl(), * which will try to call vge_init() again. This will * turn the NIC back on and restart the MII ticker, * which will panic the system when the kernel tries * to invoke the vge_tick() function that isn't there * anymore. */ ifp->if_flags &= ~IFF_UP; ether_ifdetach(ifp); } if (sc->vge_miibus) device_delete_child(dev, sc->vge_miibus); bus_generic_detach(dev); if (sc->vge_intrhand) bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand); if (sc->vge_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vge_irq); if (sc->vge_res) bus_release_resource(dev, SYS_RES_MEMORY, VGE_PCI_LOMEM, sc->vge_res); if (ifp) if_free(ifp); /* Unload and free the RX DMA ring memory and map */ if (sc->vge_ldata.vge_rx_list_tag) { bus_dmamap_unload(sc->vge_ldata.vge_rx_list_tag, sc->vge_ldata.vge_rx_list_map); bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag, sc->vge_ldata.vge_rx_list, sc->vge_ldata.vge_rx_list_map); bus_dma_tag_destroy(sc->vge_ldata.vge_rx_list_tag); } /* Unload and free the TX DMA ring memory and map */ if (sc->vge_ldata.vge_tx_list_tag) { bus_dmamap_unload(sc->vge_ldata.vge_tx_list_tag, sc->vge_ldata.vge_tx_list_map); bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag, sc->vge_ldata.vge_tx_list, sc->vge_ldata.vge_tx_list_map); bus_dma_tag_destroy(sc->vge_ldata.vge_tx_list_tag); } /* Destroy all the RX and TX buffer maps */ if (sc->vge_ldata.vge_mtag) { for (i = 0; i < VGE_TX_DESC_CNT; i++) bus_dmamap_destroy(sc->vge_ldata.vge_mtag, sc->vge_ldata.vge_tx_dmamap[i]); for (i = 0; i < VGE_RX_DESC_CNT; i++) bus_dmamap_destroy(sc->vge_ldata.vge_mtag, sc->vge_ldata.vge_rx_dmamap[i]); bus_dma_tag_destroy(sc->vge_ldata.vge_mtag); } if (sc->vge_parent_tag) bus_dma_tag_destroy(sc->vge_parent_tag); mtx_destroy(&sc->vge_mtx); return (0); } static int vge_newbuf(sc, idx, m) struct vge_softc *sc; int idx; struct mbuf *m; { struct vge_dmaload_arg arg; struct mbuf *n = NULL; int i, error; if (m == NULL) { n = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (n == NULL) return (ENOBUFS); m = n; } else m->m_data = m->m_ext.ext_buf; #ifdef VGE_FIXUP_RX /* * This is part of an evil trick to deal with non-x86 platforms. * The VIA chip requires RX buffers to be aligned on 32-bit * boundaries, but that will hose non-x86 machines. To get around * this, we leave some empty space at the start of each buffer * and for non-x86 hosts, we copy the buffer back two bytes * to achieve word alignment. This is slightly more efficient * than allocating a new buffer, copying the contents, and * discarding the old buffer. */ m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN; m_adj(m, VGE_ETHER_ALIGN); #else m->m_len = m->m_pkthdr.len = MCLBYTES; #endif arg.sc = sc; arg.vge_idx = idx; arg.vge_maxsegs = 1; arg.vge_flags = 0; error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, sc->vge_ldata.vge_rx_dmamap[idx], m, vge_dma_map_rx_desc, &arg, BUS_DMA_NOWAIT); if (error || arg.vge_maxsegs != 1) { if (n != NULL) m_freem(n); return (ENOMEM); } /* * Note: the manual fails to document the fact that for * proper opration, the driver needs to replentish the RX * DMA ring 4 descriptors at a time (rather than one at a * time, like most chips). We can allocate the new buffers * but we should not set the OWN bits until we're ready * to hand back 4 of them in one shot. */ #define VGE_RXCHUNK 4 sc->vge_rx_consumed++; if (sc->vge_rx_consumed == VGE_RXCHUNK) { for (i = idx; i != idx - sc->vge_rx_consumed; i--) sc->vge_ldata.vge_rx_list[i].vge_sts |= htole32(VGE_RDSTS_OWN); sc->vge_rx_consumed = 0; } sc->vge_ldata.vge_rx_mbuf[idx] = m; bus_dmamap_sync(sc->vge_ldata.vge_mtag, sc->vge_ldata.vge_rx_dmamap[idx], BUS_DMASYNC_PREREAD); return (0); } static int vge_tx_list_init(sc) struct vge_softc *sc; { bzero ((char *)sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ); bzero ((char *)&sc->vge_ldata.vge_tx_mbuf, (VGE_TX_DESC_CNT * sizeof(struct mbuf *))); bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_PREWRITE); sc->vge_ldata.vge_tx_prodidx = 0; sc->vge_ldata.vge_tx_considx = 0; sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT; return (0); } static int vge_rx_list_init(sc) struct vge_softc *sc; { int i; bzero ((char *)sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ); bzero ((char *)&sc->vge_ldata.vge_rx_mbuf, (VGE_RX_DESC_CNT * sizeof(struct mbuf *))); sc->vge_rx_consumed = 0; for (i = 0; i < VGE_RX_DESC_CNT; i++) { if (vge_newbuf(sc, i, NULL) == ENOBUFS) return (ENOBUFS); } /* Flush the RX descriptors */ bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, sc->vge_ldata.vge_rx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); sc->vge_ldata.vge_rx_prodidx = 0; sc->vge_rx_consumed = 0; sc->vge_head = sc->vge_tail = NULL; return (0); } #ifdef VGE_FIXUP_RX static __inline void vge_fixup_rx(m) struct mbuf *m; { int i; uint16_t *src, *dst; src = mtod(m, uint16_t *); dst = src - 1; for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) *dst++ = *src++; m->m_data -= ETHER_ALIGN; return; } #endif /* * RX handler. We support the reception of jumbo frames that have * been fragmented across multiple 2K mbuf cluster buffers. */ -static void +static int vge_rxeof(sc) struct vge_softc *sc; { struct mbuf *m; struct ifnet *ifp; int i, total_len; int lim = 0; struct vge_rx_desc *cur_rx; u_int32_t rxstat, rxctl; VGE_LOCK_ASSERT(sc); ifp = sc->vge_ifp; i = sc->vge_ldata.vge_rx_prodidx; /* Invalidate the descriptor memory */ bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, sc->vge_ldata.vge_rx_list_map, BUS_DMASYNC_POSTREAD); while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) { #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif cur_rx = &sc->vge_ldata.vge_rx_list[i]; m = sc->vge_ldata.vge_rx_mbuf[i]; total_len = VGE_RXBYTES(cur_rx); rxstat = le32toh(cur_rx->vge_sts); rxctl = le32toh(cur_rx->vge_ctl); /* Invalidate the RX mbuf and unload its map */ bus_dmamap_sync(sc->vge_ldata.vge_mtag, sc->vge_ldata.vge_rx_dmamap[i], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->vge_ldata.vge_mtag, sc->vge_ldata.vge_rx_dmamap[i]); /* * If the 'start of frame' bit is set, this indicates * either the first fragment in a multi-fragment receive, * or an intermediate fragment. Either way, we want to * accumulate the buffers. */ if (rxstat & VGE_RXPKT_SOF) { m->m_len = MCLBYTES - VGE_ETHER_ALIGN; if (sc->vge_head == NULL) sc->vge_head = sc->vge_tail = m; else { m->m_flags &= ~M_PKTHDR; sc->vge_tail->m_next = m; sc->vge_tail = m; } vge_newbuf(sc, i, NULL); VGE_RX_DESC_INC(i); continue; } /* * Bad/error frames will have the RXOK bit cleared. * However, there's one error case we want to allow: * if a VLAN tagged frame arrives and the chip can't * match it against the CAM filter, it considers this * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. * We don't want to drop the frame though: our VLAN * filtering is done in software. */ if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM) && !(rxstat & VGE_RDSTS_CSUMERR)) { ifp->if_ierrors++; /* * If this is part of a multi-fragment packet, * discard all the pieces. */ if (sc->vge_head != NULL) { m_freem(sc->vge_head); sc->vge_head = sc->vge_tail = NULL; } vge_newbuf(sc, i, m); VGE_RX_DESC_INC(i); continue; } /* * If allocating a replacement mbuf fails, * reload the current one. */ if (vge_newbuf(sc, i, NULL)) { ifp->if_ierrors++; if (sc->vge_head != NULL) { m_freem(sc->vge_head); sc->vge_head = sc->vge_tail = NULL; } vge_newbuf(sc, i, m); VGE_RX_DESC_INC(i); continue; } VGE_RX_DESC_INC(i); if (sc->vge_head != NULL) { m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN); /* * Special case: if there's 4 bytes or less * in this buffer, the mbuf can be discarded: * the last 4 bytes is the CRC, which we don't * care about anyway. */ if (m->m_len <= ETHER_CRC_LEN) { sc->vge_tail->m_len -= (ETHER_CRC_LEN - m->m_len); m_freem(m); } else { m->m_len -= ETHER_CRC_LEN; m->m_flags &= ~M_PKTHDR; sc->vge_tail->m_next = m; } m = sc->vge_head; sc->vge_head = sc->vge_tail = NULL; m->m_pkthdr.len = total_len - ETHER_CRC_LEN; } else m->m_pkthdr.len = m->m_len = (total_len - ETHER_CRC_LEN); #ifdef VGE_FIXUP_RX vge_fixup_rx(m); #endif ifp->if_ipackets++; m->m_pkthdr.rcvif = ifp; /* Do RX checksumming if enabled */ if (ifp->if_capenable & IFCAP_RXCSUM) { /* Check IP header checksum */ if (rxctl & VGE_RDCTL_IPPKT) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (rxctl & VGE_RDCTL_IPCSUMOK) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; /* Check TCP/UDP checksum */ if (rxctl & (VGE_RDCTL_TCPPKT|VGE_RDCTL_UDPPKT) && rxctl & VGE_RDCTL_PROTOCSUMOK) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } if (rxstat & VGE_RDSTS_VTAG) { /* * The 32-bit rxctl register is stored in little-endian. * However, the 16-bit vlan tag is stored in big-endian, * so we have to byte swap it. */ m->m_pkthdr.ether_vtag = bswap16(rxctl & VGE_RDCTL_VLANID); m->m_flags |= M_VLANTAG; } VGE_UNLOCK(sc); (*ifp->if_input)(ifp, m); VGE_LOCK(sc); lim++; if (lim == VGE_RX_DESC_CNT) break; } /* Flush the RX DMA ring */ bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, sc->vge_ldata.vge_rx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); sc->vge_ldata.vge_rx_prodidx = i; CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim); - return; + return (lim); } static void vge_txeof(sc) struct vge_softc *sc; { struct ifnet *ifp; u_int32_t txstat; int idx; ifp = sc->vge_ifp; idx = sc->vge_ldata.vge_tx_considx; /* Invalidate the TX descriptor list */ bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_POSTREAD); while (idx != sc->vge_ldata.vge_tx_prodidx) { txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts); if (txstat & VGE_TDSTS_OWN) break; m_freem(sc->vge_ldata.vge_tx_mbuf[idx]); sc->vge_ldata.vge_tx_mbuf[idx] = NULL; bus_dmamap_unload(sc->vge_ldata.vge_mtag, sc->vge_ldata.vge_tx_dmamap[idx]); if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL)) ifp->if_collisions++; if (txstat & VGE_TDSTS_TXERR) ifp->if_oerrors++; else ifp->if_opackets++; sc->vge_ldata.vge_tx_free++; VGE_TX_DESC_INC(idx); } /* No changes made to the TX ring, so no flush needed */ if (idx != sc->vge_ldata.vge_tx_considx) { sc->vge_ldata.vge_tx_considx = idx; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_timer = 0; } /* * If not all descriptors have been released reaped yet, * reload the timer so that we will eventually get another * interrupt that will cause us to re-enter this routine. * This is done in case the transmitter has gone idle. */ if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT) { CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); } return; } static void vge_tick(xsc) void *xsc; { struct vge_softc *sc; struct ifnet *ifp; struct mii_data *mii; sc = xsc; ifp = sc->vge_ifp; VGE_LOCK(sc); mii = device_get_softc(sc->vge_miibus); mii_tick(mii); if (sc->vge_link) { if (!(mii->mii_media_status & IFM_ACTIVE)) { sc->vge_link = 0; if_link_state_change(sc->vge_ifp, LINK_STATE_DOWN); } } else { if (mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->vge_link = 1; if_link_state_change(sc->vge_ifp, LINK_STATE_UP); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask); } } VGE_UNLOCK(sc); return; } #ifdef DEVICE_POLLING -static void +static int vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count) { struct vge_softc *sc = ifp->if_softc; + int rx_npkts = 0; VGE_LOCK(sc); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) goto done; sc->rxcycles = count; - vge_rxeof(sc); + rx_npkts = vge_rxeof(sc); vge_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask); if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ u_int32_t status; status = CSR_READ_4(sc, VGE_ISR); if (status == 0xFFFFFFFF) goto done; if (status) CSR_WRITE_4(sc, VGE_ISR, status); /* * XXX check behaviour on receiver stalls. */ if (status & VGE_ISR_TXDMA_STALL || status & VGE_ISR_RXDMA_STALL) vge_init(sc); if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { vge_rxeof(sc); ifp->if_ierrors++; CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); } } done: VGE_UNLOCK(sc); + return (rx_npkts); } #endif /* DEVICE_POLLING */ static void vge_intr(arg) void *arg; { struct vge_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; if (sc->suspended) { return; } VGE_LOCK(sc); ifp = sc->vge_ifp; if (!(ifp->if_flags & IFF_UP)) { VGE_UNLOCK(sc); return; } #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { VGE_UNLOCK(sc); return; } #endif /* Disable interrupts */ CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); for (;;) { status = CSR_READ_4(sc, VGE_ISR); /* If the card has gone away the read returns 0xffff. */ if (status == 0xFFFFFFFF) break; if (status) CSR_WRITE_4(sc, VGE_ISR, status); if ((status & VGE_INTRS) == 0) break; if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) vge_rxeof(sc); if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { vge_rxeof(sc); CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); } if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0)) vge_txeof(sc); if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) vge_init(sc); if (status & VGE_ISR_LINKSTS) vge_tick(sc); } /* Re-enable interrupts */ CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); VGE_UNLOCK(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask); return; } static int vge_encap(sc, m_head, idx) struct vge_softc *sc; struct mbuf *m_head; int idx; { struct mbuf *m_new = NULL; struct vge_dmaload_arg arg; bus_dmamap_t map; int error; if (sc->vge_ldata.vge_tx_free <= 2) return (EFBIG); arg.vge_flags = 0; if (m_head->m_pkthdr.csum_flags & CSUM_IP) arg.vge_flags |= VGE_TDCTL_IPCSUM; if (m_head->m_pkthdr.csum_flags & CSUM_TCP) arg.vge_flags |= VGE_TDCTL_TCPCSUM; if (m_head->m_pkthdr.csum_flags & CSUM_UDP) arg.vge_flags |= VGE_TDCTL_UDPCSUM; arg.sc = sc; arg.vge_idx = idx; arg.vge_m0 = m_head; arg.vge_maxsegs = VGE_TX_FRAGS; map = sc->vge_ldata.vge_tx_dmamap[idx]; error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map, m_head, vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT); if (error && error != EFBIG) { printf("vge%d: can't map mbuf (error %d)\n", sc->vge_unit, error); return (ENOBUFS); } /* Too many segments to map, coalesce into a single mbuf */ if (error || arg.vge_maxsegs == 0) { m_new = m_defrag(m_head, M_DONTWAIT); if (m_new == NULL) return (1); else m_head = m_new; arg.sc = sc; arg.vge_m0 = m_head; arg.vge_idx = idx; arg.vge_maxsegs = 1; error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map, m_head, vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT); if (error) { printf("vge%d: can't map mbuf (error %d)\n", sc->vge_unit, error); return (EFBIG); } } sc->vge_ldata.vge_tx_mbuf[idx] = m_head; sc->vge_ldata.vge_tx_free--; /* * Set up hardware VLAN tagging. */ if (m_head->m_flags & M_VLANTAG) sc->vge_ldata.vge_tx_list[idx].vge_ctl |= htole32(m_head->m_pkthdr.ether_vtag | VGE_TDCTL_VTAG); sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN); return (0); } static void vge_tx_task(arg, npending) void *arg; int npending; { struct ifnet *ifp; ifp = arg; vge_start(ifp); return; } /* * Main transmit routine. */ static void vge_start(ifp) struct ifnet *ifp; { struct vge_softc *sc; struct mbuf *m_head = NULL; int idx, pidx = 0; sc = ifp->if_softc; VGE_LOCK(sc); if (!sc->vge_link || ifp->if_drv_flags & IFF_DRV_OACTIVE) { VGE_UNLOCK(sc); return; } if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { VGE_UNLOCK(sc); return; } idx = sc->vge_ldata.vge_tx_prodidx; pidx = idx - 1; if (pidx < 0) pidx = VGE_TX_DESC_CNT - 1; while (sc->vge_ldata.vge_tx_mbuf[idx] == NULL) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (vge_encap(sc, m_head, idx)) { IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |= htole16(VGE_TXDESC_Q); pidx = idx; VGE_TX_DESC_INC(idx); /* * If there's a BPF listener, bounce a copy of this frame * to him. */ ETHER_BPF_MTAP(ifp, m_head); } if (idx == sc->vge_ldata.vge_tx_prodidx) { VGE_UNLOCK(sc); return; } /* Flush the TX descriptors */ bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); /* Issue a transmit command. */ CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); sc->vge_ldata.vge_tx_prodidx = idx; /* * Use the countdown timer for interrupt moderation. * 'TX done' interrupts are disabled. Instead, we reset the * countdown timer, which will begin counting until it hits * the value in the SSTIMER register, and then trigger an * interrupt. Each time we set the TIMER0_ENABLE bit, the * the timer count is reloaded. Only when the transmitter * is idle will the timer hit 0 and an interrupt fire. */ CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); VGE_UNLOCK(sc); /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; return; } static void vge_init(xsc) void *xsc; { struct vge_softc *sc = xsc; struct ifnet *ifp = sc->vge_ifp; struct mii_data *mii; int i; VGE_LOCK(sc); mii = device_get_softc(sc->vge_miibus); /* * Cancel pending I/O and free all RX/TX buffers. */ vge_stop(sc); vge_reset(sc); /* * Initialize the RX and TX descriptors and mbufs. */ vge_rx_list_init(sc); vge_tx_list_init(sc); /* Set our station address */ for (i = 0; i < ETHER_ADDR_LEN; i++) CSR_WRITE_1(sc, VGE_PAR0 + i, IF_LLADDR(sc->vge_ifp)[i]); /* * Set receive FIFO threshold. Also allow transmission and * reception of VLAN tagged frames. */ CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2); /* Set DMA burst length */ CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); /* Set collision backoff algorithm */ CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); /* Disable LPSEL field in priority resolution */ CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); /* * Load the addresses of the DMA queues into the chip. * Note that we only use one transmit queue. */ CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_addr)); CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_addr)); CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); /* Enable and wake up the RX descriptor queue */ CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); /* Enable the TX descriptor queue */ CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); /* Set up the receive filter -- allow large frames for VLANs. */ CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); } /* Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST); } /* Set multicast bit to capture multicast frames. */ if (ifp->if_flags & IFF_MULTICAST) { CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST); } /* Init the cam filter. */ vge_cam_clear(sc); /* Init the multicast filter. */ vge_setmulti(sc); /* Enable flow control */ CSR_WRITE_1(sc, VGE_CRS2, 0x8B); /* Enable jumbo frame reception (if desired) */ /* Start the MAC. */ CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); /* * Configure one-shot timer for microsecond * resulution and load it for 500 usecs. */ CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES); CSR_WRITE_2(sc, VGE_SSTIMER, 400); /* * Configure interrupt moderation for receive. Enable * the holdoff counter and load it, and set the RX * suppression count to the number of descriptors we * want to allow before triggering an interrupt. * The holdoff timer is in units of 20 usecs. */ #ifdef notyet CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE); /* Select the interrupt holdoff timer page. */ CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */ /* Enable use of the holdoff timer. */ CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD); /* Select the RX suppression threshold page. */ CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */ /* Restore the page select bits. */ CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); #endif #ifdef DEVICE_POLLING /* * Disable interrupts if we are polling. */ if (ifp->if_capenable & IFCAP_POLLING) { CSR_WRITE_4(sc, VGE_IMR, 0); CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); } else /* otherwise ... */ #endif { /* * Enable interrupts. */ CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); CSR_WRITE_4(sc, VGE_ISR, 0); CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); } mii_mediachg(mii); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->vge_if_flags = 0; sc->vge_link = 0; VGE_UNLOCK(sc); return; } /* * Set media options. */ static int vge_ifmedia_upd(ifp) struct ifnet *ifp; { struct vge_softc *sc; struct mii_data *mii; sc = ifp->if_softc; VGE_LOCK(sc); mii = device_get_softc(sc->vge_miibus); mii_mediachg(mii); VGE_UNLOCK(sc); return (0); } /* * Report current media status. */ static void vge_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct vge_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->vge_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static void vge_miibus_statchg(dev) device_t dev; { struct vge_softc *sc; struct mii_data *mii; struct ifmedia_entry *ife; sc = device_get_softc(dev); mii = device_get_softc(sc->vge_miibus); ife = mii->mii_media.ifm_cur; /* * If the user manually selects a media mode, we need to turn * on the forced MAC mode bit in the DIAGCTL register. If the * user happens to choose a full duplex mode, we also need to * set the 'force full duplex' bit. This applies only to * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC * mode is disabled, and in 1000baseT mode, full duplex is * always implied, so we turn on the forced mode bit but leave * the FDX bit cleared. */ switch (IFM_SUBTYPE(ife->ifm_media)) { case IFM_AUTO: CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); break; case IFM_1000_T: CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); break; case IFM_100_TX: case IFM_10_T: CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); } else { CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); } break; default: device_printf(dev, "unknown media type: %x\n", IFM_SUBTYPE(ife->ifm_media)); break; } return; } static int vge_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct vge_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; switch (command) { case SIOCSIFMTU: if (ifr->ifr_mtu > VGE_JUMBO_MTU) error = EINVAL; ifp->if_mtu = ifr->ifr_mtu; break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->vge_if_flags & IFF_PROMISC)) { CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); vge_setmulti(sc); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->vge_if_flags & IFF_PROMISC) { CSR_CLRBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); vge_setmulti(sc); } else vge_init(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) vge_stop(sc); } sc->vge_if_flags = ifp->if_flags; break; case SIOCADDMULTI: case SIOCDELMULTI: vge_setmulti(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->vge_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: { int mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(vge_poll, ifp); if (error) return(error); VGE_LOCK(sc); /* Disable interrupts */ CSR_WRITE_4(sc, VGE_IMR, 0); CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); ifp->if_capenable |= IFCAP_POLLING; VGE_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupts. */ VGE_LOCK(sc); CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); ifp->if_capenable &= ~IFCAP_POLLING; VGE_UNLOCK(sc); } } #endif /* DEVICE_POLLING */ if ((mask & IFCAP_TXCSUM) != 0 && (ifp->if_capabilities & IFCAP_TXCSUM) != 0) { ifp->if_capenable ^= IFCAP_TXCSUM; if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) ifp->if_hwassist |= VGE_CSUM_FEATURES; else ifp->if_hwassist &= ~VGE_CSUM_FEATURES; } if ((mask & IFCAP_RXCSUM) != 0 && (ifp->if_capabilities & IFCAP_RXCSUM) != 0) ifp->if_capenable ^= IFCAP_RXCSUM; } break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void vge_watchdog(ifp) struct ifnet *ifp; { struct vge_softc *sc; sc = ifp->if_softc; VGE_LOCK(sc); printf("vge%d: watchdog timeout\n", sc->vge_unit); ifp->if_oerrors++; vge_txeof(sc); vge_rxeof(sc); vge_init(sc); VGE_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void vge_stop(sc) struct vge_softc *sc; { register int i; struct ifnet *ifp; VGE_LOCK(sc); ifp = sc->vge_ifp; ifp->if_timer = 0; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); if (sc->vge_head != NULL) { m_freem(sc->vge_head); sc->vge_head = sc->vge_tail = NULL; } /* Free the TX list buffers. */ for (i = 0; i < VGE_TX_DESC_CNT; i++) { if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) { bus_dmamap_unload(sc->vge_ldata.vge_mtag, sc->vge_ldata.vge_tx_dmamap[i]); m_freem(sc->vge_ldata.vge_tx_mbuf[i]); sc->vge_ldata.vge_tx_mbuf[i] = NULL; } } /* Free the RX list buffers. */ for (i = 0; i < VGE_RX_DESC_CNT; i++) { if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) { bus_dmamap_unload(sc->vge_ldata.vge_mtag, sc->vge_ldata.vge_rx_dmamap[i]); m_freem(sc->vge_ldata.vge_rx_mbuf[i]); sc->vge_ldata.vge_rx_mbuf[i] = NULL; } } VGE_UNLOCK(sc); return; } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int vge_suspend(dev) device_t dev; { struct vge_softc *sc; sc = device_get_softc(dev); vge_stop(sc); sc->suspended = 1; return (0); } /* * Device resume routine. Restore some PCI settings in case the BIOS * doesn't, re-enable busmastering, and restart the interface if * appropriate. */ static int vge_resume(dev) device_t dev; { struct vge_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->vge_ifp; /* reenable busmastering */ pci_enable_busmaster(dev); pci_enable_io(dev, SYS_RES_MEMORY); /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) vge_init(sc); sc->suspended = 0; return (0); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int vge_shutdown(dev) device_t dev; { struct vge_softc *sc; sc = device_get_softc(dev); vge_stop(sc); return (0); } Index: head/sys/dev/vr/if_vr.c =================================================================== --- head/sys/dev/vr/if_vr.c (revision 193095) +++ head/sys/dev/vr/if_vr.c (revision 193096) @@ -1,2614 +1,2622 @@ /*- * Copyright (c) 1997, 1998 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * VIA Rhine fast ethernet PCI NIC driver * * Supports various network adapters based on the VIA Rhine * and Rhine II PCI controllers, including the D-Link DFE530TX. * Datasheets are available at http://www.via.com.tw. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The VIA Rhine controllers are similar in some respects to the * the DEC tulip chips, except less complicated. The controller * uses an MII bus and an external physical layer interface. The * receiver has a one entry perfect filter and a 64-bit hash table * multicast filter. Transmit and receive descriptors are similar * to the tulip. * * Some Rhine chips has a serious flaw in its transmit DMA mechanism: * transmit buffers must be longword aligned. Unfortunately, * FreeBSD doesn't guarantee that mbufs will be filled in starting * at longword boundaries, so we have to do a buffer copy before * transmission. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" MODULE_DEPEND(vr, pci, 1, 1, 1); MODULE_DEPEND(vr, ether, 1, 1, 1); MODULE_DEPEND(vr, miibus, 1, 1, 1); /* Define to show Rx/Tx error status. */ #undef VR_SHOW_ERRORS #define VR_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) /* * Various supported device vendors/types, their names & quirks. */ #define VR_Q_NEEDALIGN (1<<0) #define VR_Q_CSUM (1<<1) #define VR_Q_CAM (1<<2) static struct vr_type { u_int16_t vr_vid; u_int16_t vr_did; int vr_quirks; char *vr_name; } vr_devs[] = { { VIA_VENDORID, VIA_DEVICEID_RHINE, VR_Q_NEEDALIGN, "VIA VT3043 Rhine I 10/100BaseTX" }, { VIA_VENDORID, VIA_DEVICEID_RHINE_II, VR_Q_NEEDALIGN, "VIA VT86C100A Rhine II 10/100BaseTX" }, { VIA_VENDORID, VIA_DEVICEID_RHINE_II_2, 0, "VIA VT6102 Rhine II 10/100BaseTX" }, { VIA_VENDORID, VIA_DEVICEID_RHINE_III, 0, "VIA VT6105 Rhine III 10/100BaseTX" }, { VIA_VENDORID, VIA_DEVICEID_RHINE_III_M, VR_Q_CSUM, "VIA VT6105M Rhine III 10/100BaseTX" }, { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II, VR_Q_NEEDALIGN, "Delta Electronics Rhine II 10/100BaseTX" }, { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II, VR_Q_NEEDALIGN, "Addtron Technology Rhine II 10/100BaseTX" }, { 0, 0, 0, NULL } }; static int vr_probe(device_t); static int vr_attach(device_t); static int vr_detach(device_t); static int vr_shutdown(device_t); static int vr_suspend(device_t); static int vr_resume(device_t); static void vr_dmamap_cb(void *, bus_dma_segment_t *, int, int); static int vr_dma_alloc(struct vr_softc *); static void vr_dma_free(struct vr_softc *); static __inline void vr_discard_rxbuf(struct vr_rxdesc *); static int vr_newbuf(struct vr_softc *, int); #ifndef __NO_STRICT_ALIGNMENT static __inline void vr_fixup_rx(struct mbuf *); #endif -static void vr_rxeof(struct vr_softc *); +static int vr_rxeof(struct vr_softc *); static void vr_txeof(struct vr_softc *); static void vr_tick(void *); static int vr_error(struct vr_softc *, uint16_t); static void vr_tx_underrun(struct vr_softc *); static void vr_intr(void *); static void vr_start(struct ifnet *); static void vr_start_locked(struct ifnet *); static int vr_encap(struct vr_softc *, struct mbuf **); static int vr_ioctl(struct ifnet *, u_long, caddr_t); static void vr_init(void *); static void vr_init_locked(struct vr_softc *); static void vr_tx_start(struct vr_softc *); static void vr_rx_start(struct vr_softc *); static int vr_tx_stop(struct vr_softc *); static int vr_rx_stop(struct vr_softc *); static void vr_stop(struct vr_softc *); static void vr_watchdog(struct vr_softc *); static int vr_ifmedia_upd(struct ifnet *); static void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int vr_miibus_readreg(device_t, int, int); static int vr_miibus_writereg(device_t, int, int, int); static void vr_miibus_statchg(device_t); static void vr_link_task(void *, int); static void vr_cam_mask(struct vr_softc *, uint32_t, int); static int vr_cam_data(struct vr_softc *, int, int, uint8_t *); static void vr_set_filter(struct vr_softc *); static void vr_reset(const struct vr_softc *); static int vr_tx_ring_init(struct vr_softc *); static int vr_rx_ring_init(struct vr_softc *); static void vr_setwol(struct vr_softc *); static void vr_clrwol(struct vr_softc *); static int vr_sysctl_stats(SYSCTL_HANDLER_ARGS); static struct vr_tx_threshold_table { int tx_cfg; int bcr_cfg; int value; } vr_tx_threshold_tables[] = { { VR_TXTHRESH_64BYTES, VR_BCR1_TXTHRESH64BYTES, 64 }, { VR_TXTHRESH_128BYTES, VR_BCR1_TXTHRESH128BYTES, 128 }, { VR_TXTHRESH_256BYTES, VR_BCR1_TXTHRESH256BYTES, 256 }, { VR_TXTHRESH_512BYTES, VR_BCR1_TXTHRESH512BYTES, 512 }, { VR_TXTHRESH_1024BYTES, VR_BCR1_TXTHRESH1024BYTES, 1024 }, { VR_TXTHRESH_STORENFWD, VR_BCR1_TXTHRESHSTORENFWD, 2048 } }; static device_method_t vr_methods[] = { /* Device interface */ DEVMETHOD(device_probe, vr_probe), DEVMETHOD(device_attach, vr_attach), DEVMETHOD(device_detach, vr_detach), DEVMETHOD(device_shutdown, vr_shutdown), DEVMETHOD(device_suspend, vr_suspend), DEVMETHOD(device_resume, vr_resume), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, vr_miibus_readreg), DEVMETHOD(miibus_writereg, vr_miibus_writereg), DEVMETHOD(miibus_statchg, vr_miibus_statchg), DEVMETHOD(miibus_linkchg, vr_miibus_statchg), { NULL, NULL } }; static driver_t vr_driver = { "vr", vr_methods, sizeof(struct vr_softc) }; static devclass_t vr_devclass; DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0); DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0); static int vr_miibus_readreg(device_t dev, int phy, int reg) { struct vr_softc *sc; int i; sc = device_get_softc(dev); if (sc->vr_phyaddr != phy) return (0); /* Set the register address. */ CSR_WRITE_1(sc, VR_MIIADDR, reg); VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB); for (i = 0; i < VR_MII_TIMEOUT; i++) { DELAY(1); if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0) break; } if (i == VR_MII_TIMEOUT) device_printf(sc->vr_dev, "phy read timeout %d:%d\n", phy, reg); return (CSR_READ_2(sc, VR_MIIDATA)); } static int vr_miibus_writereg(device_t dev, int phy, int reg, int data) { struct vr_softc *sc; int i; sc = device_get_softc(dev); if (sc->vr_phyaddr != phy) return (0); /* Set the register address and data to write. */ CSR_WRITE_1(sc, VR_MIIADDR, reg); CSR_WRITE_2(sc, VR_MIIDATA, data); VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB); for (i = 0; i < VR_MII_TIMEOUT; i++) { DELAY(1); if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0) break; } if (i == VR_MII_TIMEOUT) device_printf(sc->vr_dev, "phy write timeout %d:%d\n", phy, reg); return (0); } static void vr_miibus_statchg(device_t dev) { struct vr_softc *sc; sc = device_get_softc(dev); taskqueue_enqueue(taskqueue_swi, &sc->vr_link_task); } /* * In order to fiddle with the * 'full-duplex' and '100Mbps' bits in the netconfig register, we * first have to put the transmit and/or receive logic in the idle state. */ static void vr_link_task(void *arg, int pending) { struct vr_softc *sc; struct mii_data *mii; struct ifnet *ifp; int lfdx, mfdx; uint8_t cr0, cr1, fc; sc = (struct vr_softc *)arg; VR_LOCK(sc); mii = device_get_softc(sc->vr_miibus); ifp = sc->vr_ifp; if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { VR_UNLOCK(sc); return; } if (mii->mii_media_status & IFM_ACTIVE) { if (IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) sc->vr_link = 1; } else sc->vr_link = 0; if (sc->vr_link != 0) { cr0 = CSR_READ_1(sc, VR_CR0); cr1 = CSR_READ_1(sc, VR_CR1); mfdx = (cr1 & VR_CR1_FULLDUPLEX) != 0; lfdx = (IFM_OPTIONS(mii->mii_media_active) & IFM_FDX) != 0; if (mfdx != lfdx) { if ((cr0 & (VR_CR0_TX_ON | VR_CR0_RX_ON)) != 0) { if (vr_tx_stop(sc) != 0 || vr_rx_stop(sc) != 0) { device_printf(sc->vr_dev, "%s: Tx/Rx shutdown error -- " "resetting\n", __func__); sc->vr_flags |= VR_F_RESTART; VR_UNLOCK(sc); return; } } if (lfdx) cr1 |= VR_CR1_FULLDUPLEX; else cr1 &= ~VR_CR1_FULLDUPLEX; CSR_WRITE_1(sc, VR_CR1, cr1); } fc = 0; #ifdef notyet /* Configure flow-control. */ if (sc->vr_revid >= REV_ID_VT6105_A0) { fc = CSR_READ_1(sc, VR_FLOWCR1); fc &= ~(VR_FLOWCR1_TXPAUSE | VR_FLOWCR1_RXPAUSE); if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) fc |= VR_FLOWCR1_RXPAUSE; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) fc |= VR_FLOWCR1_TXPAUSE; CSR_WRITE_1(sc, VR_FLOWCR1, fc); } else if (sc->vr_revid >= REV_ID_VT6102_A) { /* No Tx puase capability available for Rhine II. */ fc = CSR_READ_1(sc, VR_MISC_CR0); fc &= ~VR_MISCCR0_RXPAUSE; if ((IFM_OPTIONS(mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) fc |= VR_MISCCR0_RXPAUSE; CSR_WRITE_1(sc, VR_MISC_CR0, fc); } #endif vr_rx_start(sc); vr_tx_start(sc); } else { if (vr_tx_stop(sc) != 0 || vr_rx_stop(sc) != 0) { device_printf(sc->vr_dev, "%s: Tx/Rx shutdown error -- resetting\n", __func__); sc->vr_flags |= VR_F_RESTART; VR_UNLOCK(sc); return; } } VR_UNLOCK(sc); } static void vr_cam_mask(struct vr_softc *sc, uint32_t mask, int type) { if (type == VR_MCAST_CAM) CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST); else CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN); CSR_WRITE_4(sc, VR_CAMMASK, mask); CSR_WRITE_1(sc, VR_CAMCTL, 0); } static int vr_cam_data(struct vr_softc *sc, int type, int idx, uint8_t *mac) { int i; if (type == VR_MCAST_CAM) { if (idx < 0 || idx >= VR_CAM_MCAST_CNT || mac == NULL) return (EINVAL); CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_MCAST); } else CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_VLAN); /* Set CAM entry address. */ CSR_WRITE_1(sc, VR_CAMADDR, idx); /* Set CAM entry data. */ if (type == VR_MCAST_CAM) { for (i = 0; i < ETHER_ADDR_LEN; i++) CSR_WRITE_1(sc, VR_MCAM0 + i, mac[i]); } else { CSR_WRITE_1(sc, VR_VCAM0, mac[0]); CSR_WRITE_1(sc, VR_VCAM1, mac[1]); } DELAY(10); /* Write CAM and wait for self-clear of VR_CAMCTL_WRITE bit. */ CSR_WRITE_1(sc, VR_CAMCTL, VR_CAMCTL_ENA | VR_CAMCTL_WRITE); for (i = 0; i < VR_TIMEOUT; i++) { DELAY(1); if ((CSR_READ_1(sc, VR_CAMCTL) & VR_CAMCTL_WRITE) == 0) break; } if (i == VR_TIMEOUT) device_printf(sc->vr_dev, "%s: setting CAM filter timeout!\n", __func__); CSR_WRITE_1(sc, VR_CAMCTL, 0); return (i == VR_TIMEOUT ? ETIMEDOUT : 0); } /* * Program the 64-bit multicast hash filter. */ static void vr_set_filter(struct vr_softc *sc) { struct ifnet *ifp; int h; uint32_t hashes[2] = { 0, 0 }; struct ifmultiaddr *ifma; uint8_t rxfilt; int error, mcnt; uint32_t cam_mask; VR_LOCK_ASSERT(sc); ifp = sc->vr_ifp; rxfilt = CSR_READ_1(sc, VR_RXCFG); rxfilt &= ~(VR_RXCFG_RX_PROMISC | VR_RXCFG_RX_BROAD | VR_RXCFG_RX_MULTI); if (ifp->if_flags & IFF_BROADCAST) rxfilt |= VR_RXCFG_RX_BROAD; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { rxfilt |= VR_RXCFG_RX_MULTI; if (ifp->if_flags & IFF_PROMISC) rxfilt |= VR_RXCFG_RX_PROMISC; CSR_WRITE_1(sc, VR_RXCFG, rxfilt); CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); return; } /* Now program new ones. */ error = 0; mcnt = 0; IF_ADDR_LOCK(ifp); if ((sc->vr_quirks & VR_Q_CAM) != 0) { /* * For hardwares that have CAM capability, use * 32 entries multicast perfect filter. */ cam_mask = 0; TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; error = vr_cam_data(sc, VR_MCAST_CAM, mcnt, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (error != 0) { cam_mask = 0; break; } cam_mask |= 1 << mcnt; mcnt++; } vr_cam_mask(sc, VR_MCAST_CAM, cam_mask); } if ((sc->vr_quirks & VR_Q_CAM) == 0 || error != 0) { /* * If there are too many multicast addresses or * setting multicast CAM filter failed, use hash * table based filtering. */ mcnt = 0; TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); mcnt++; } } IF_ADDR_UNLOCK(ifp); if (mcnt > 0) rxfilt |= VR_RXCFG_RX_MULTI; CSR_WRITE_4(sc, VR_MAR0, hashes[0]); CSR_WRITE_4(sc, VR_MAR1, hashes[1]); CSR_WRITE_1(sc, VR_RXCFG, rxfilt); } static void vr_reset(const struct vr_softc *sc) { int i; /*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during attach w/o lock. */ CSR_WRITE_1(sc, VR_CR1, VR_CR1_RESET); if (sc->vr_revid < REV_ID_VT6102_A) { /* VT86C100A needs more delay after reset. */ DELAY(100); } for (i = 0; i < VR_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_1(sc, VR_CR1) & VR_CR1_RESET)) break; } if (i == VR_TIMEOUT) { if (sc->vr_revid < REV_ID_VT6102_A) device_printf(sc->vr_dev, "reset never completed!\n"); else { /* Use newer force reset command. */ device_printf(sc->vr_dev, "Using force reset command.\n"); VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); /* * Wait a little while for the chip to get its brains * in order. */ DELAY(2000); } } } /* * Probe for a VIA Rhine chip. Check the PCI vendor and device * IDs against our list and return a match or NULL */ static struct vr_type * vr_match(device_t dev) { struct vr_type *t = vr_devs; for (t = vr_devs; t->vr_name != NULL; t++) if ((pci_get_vendor(dev) == t->vr_vid) && (pci_get_device(dev) == t->vr_did)) return (t); return (NULL); } /* * Probe for a VIA Rhine chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int vr_probe(device_t dev) { struct vr_type *t; t = vr_match(dev); if (t != NULL) { device_set_desc(dev, t->vr_name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int vr_attach(device_t dev) { struct vr_softc *sc; struct ifnet *ifp; struct vr_type *t; uint8_t eaddr[ETHER_ADDR_LEN]; int error, rid; int i, pmc; sc = device_get_softc(dev); sc->vr_dev = dev; t = vr_match(dev); KASSERT(t != NULL, ("Lost if_vr device match")); sc->vr_quirks = t->vr_quirks; device_printf(dev, "Quirks: 0x%x\n", sc->vr_quirks); mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->vr_stat_callout, &sc->vr_mtx, 0); TASK_INIT(&sc->vr_link_task, 0, vr_link_task, sc); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, sc, 0, vr_sysctl_stats, "I", "Statistics"); error = 0; /* * Map control/status registers. */ pci_enable_busmaster(dev); sc->vr_revid = pci_get_revid(dev); device_printf(dev, "Revision: 0x%x\n", sc->vr_revid); sc->vr_res_id = PCIR_BAR(0); sc->vr_res_type = SYS_RES_IOPORT; sc->vr_res = bus_alloc_resource_any(dev, sc->vr_res_type, &sc->vr_res_id, RF_ACTIVE); if (sc->vr_res == NULL) { device_printf(dev, "couldn't map ports\n"); error = ENXIO; goto fail; } /* Allocate interrupt. */ rid = 0; sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->vr_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } /* Allocate ifnet structure. */ ifp = sc->vr_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "couldn't allocate ifnet structure\n"); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = vr_ioctl; ifp->if_start = vr_start; ifp->if_init = vr_init; IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_RING_CNT - 1); ifp->if_snd.ifq_maxlen = VR_TX_RING_CNT - 1; IFQ_SET_READY(&ifp->if_snd); /* Configure Tx FIFO threshold. */ sc->vr_txthresh = VR_TXTHRESH_MIN; if (sc->vr_revid < REV_ID_VT6105_A0) { /* * Use store and forward mode for Rhine I/II. * Otherwise they produce a lot of Tx underruns and * it would take a while to get working FIFO threshold * value. */ sc->vr_txthresh = VR_TXTHRESH_MAX; } if ((sc->vr_quirks & VR_Q_CSUM) != 0) { ifp->if_hwassist = VR_CSUM_FEATURES; ifp->if_capabilities |= IFCAP_HWCSUM; /* * To update checksum field the hardware may need to * store entire frames into FIFO before transmitting. */ sc->vr_txthresh = VR_TXTHRESH_MAX; } if (sc->vr_revid >= REV_ID_VT6102_A && pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) ifp->if_capabilities |= IFCAP_WOL_UCAST | IFCAP_WOL_MAGIC; /* Rhine supports oversized VLAN frame. */ ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* * Windows may put the chip in suspend mode when it * shuts down. Be sure to kick it in the head to wake it * up again. */ if (pci_find_extcap(dev, PCIY_PMG, &pmc) == 0) VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); /* * Get station address. The way the Rhine chips work, * you're not allowed to directly access the EEPROM once * they've been programmed a special way. Consequently, * we need to read the node address from the PAR0 and PAR1 * registers. * Reloading EEPROM also overwrites VR_CFGA, VR_CFGB, * VR_CFGC and VR_CFGD such that memory mapped IO configured * by driver is reset to default state. */ VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); for (i = VR_TIMEOUT; i > 0; i--) { DELAY(1); if ((CSR_READ_1(sc, VR_EECSR) & VR_EECSR_LOAD) == 0) break; } if (i == 0) device_printf(dev, "Reloading EEPROM timeout!\n"); for (i = 0; i < ETHER_ADDR_LEN; i++) eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); /* Reset the adapter. */ vr_reset(sc); /* Ack intr & disable further interrupts. */ CSR_WRITE_2(sc, VR_ISR, 0xFFFF); CSR_WRITE_2(sc, VR_IMR, 0); if (sc->vr_revid >= REV_ID_VT6102_A) CSR_WRITE_2(sc, VR_MII_IMR, 0); if (sc->vr_revid < REV_ID_VT6102_A) { pci_write_config(dev, VR_PCI_MODE2, pci_read_config(dev, VR_PCI_MODE2, 1) | VR_MODE2_MODE10T, 1); } else { /* Report error instead of retrying forever. */ pci_write_config(dev, VR_PCI_MODE2, pci_read_config(dev, VR_PCI_MODE2, 1) | VR_MODE2_PCEROPT, 1); /* Detect MII coding error. */ pci_write_config(dev, VR_PCI_MODE3, pci_read_config(dev, VR_PCI_MODE3, 1) | VR_MODE3_MIION, 1); if (sc->vr_revid >= REV_ID_VT6105_LOM && sc->vr_revid < REV_ID_VT6105M_A0) pci_write_config(dev, VR_PCI_MODE2, pci_read_config(dev, VR_PCI_MODE2, 1) | VR_MODE2_MODE10T, 1); /* Enable Memory-Read-Multiple. */ if (sc->vr_revid >= REV_ID_VT6107_A1 && sc->vr_revid < REV_ID_VT6105M_A0) pci_write_config(dev, VR_PCI_MODE2, pci_read_config(dev, VR_PCI_MODE2, 1) | VR_MODE2_MRDPL, 1); } /* Disable MII AUTOPOLL. */ VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL); if (vr_dma_alloc(sc) != 0) { error = ENXIO; goto fail; } /* Save PHY address. */ if (sc->vr_revid >= REV_ID_VT6105_A0) sc->vr_phyaddr = 1; else sc->vr_phyaddr = CSR_READ_1(sc, VR_PHYADDR) & VR_PHYADDR_MASK; /* Do MII setup. */ if (mii_phy_probe(dev, &sc->vr_miibus, vr_ifmedia_upd, vr_ifmedia_sts)) { device_printf(dev, "MII without any phy!\n"); error = ENXIO; goto fail; } /* Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* * Tell the upper layer(s) we support long frames. * Must appear after the call to ether_ifattach() because * ether_ifattach() sets ifi_hdrlen to the default value. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); /* Hook interrupt last to avoid having to lock softc. */ error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, vr_intr, sc, &sc->vr_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); ether_ifdetach(ifp); goto fail; } fail: if (error) vr_detach(dev); return (error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int vr_detach(device_t dev) { struct vr_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->vr_ifp; KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized")); #ifdef DEVICE_POLLING if (ifp != NULL && ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif /* These should only be active if attach succeeded. */ if (device_is_attached(dev)) { VR_LOCK(sc); sc->vr_detach = 1; vr_stop(sc); VR_UNLOCK(sc); callout_drain(&sc->vr_stat_callout); taskqueue_drain(taskqueue_swi, &sc->vr_link_task); ether_ifdetach(ifp); } if (sc->vr_miibus) device_delete_child(dev, sc->vr_miibus); bus_generic_detach(dev); if (sc->vr_intrhand) bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand); if (sc->vr_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); if (sc->vr_res) bus_release_resource(dev, sc->vr_res_type, sc->vr_res_id, sc->vr_res); if (ifp) if_free(ifp); vr_dma_free(sc); mtx_destroy(&sc->vr_mtx); return (0); } struct vr_dmamap_arg { bus_addr_t vr_busaddr; }; static void vr_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct vr_dmamap_arg *ctx; if (error != 0) return; ctx = arg; ctx->vr_busaddr = segs[0].ds_addr; } static int vr_dma_alloc(struct vr_softc *sc) { struct vr_dmamap_arg ctx; struct vr_txdesc *txd; struct vr_rxdesc *rxd; bus_size_t tx_alignment; int error, i; /* Create parent DMA tag. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->vr_dev), /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->vr_cdata.vr_parent_tag); if (error != 0) { device_printf(sc->vr_dev, "failed to create parent DMA tag\n"); goto fail; } /* Create tag for Tx ring. */ error = bus_dma_tag_create( sc->vr_cdata.vr_parent_tag, /* parent */ VR_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ VR_TX_RING_SIZE, /* maxsize */ 1, /* nsegments */ VR_TX_RING_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->vr_cdata.vr_tx_ring_tag); if (error != 0) { device_printf(sc->vr_dev, "failed to create Tx ring DMA tag\n"); goto fail; } /* Create tag for Rx ring. */ error = bus_dma_tag_create( sc->vr_cdata.vr_parent_tag, /* parent */ VR_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ VR_RX_RING_SIZE, /* maxsize */ 1, /* nsegments */ VR_RX_RING_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->vr_cdata.vr_rx_ring_tag); if (error != 0) { device_printf(sc->vr_dev, "failed to create Rx ring DMA tag\n"); goto fail; } if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) tx_alignment = sizeof(uint32_t); else tx_alignment = 1; /* Create tag for Tx buffers. */ error = bus_dma_tag_create( sc->vr_cdata.vr_parent_tag, /* parent */ tx_alignment, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES * VR_MAXFRAGS, /* maxsize */ VR_MAXFRAGS, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->vr_cdata.vr_tx_tag); if (error != 0) { device_printf(sc->vr_dev, "failed to create Tx DMA tag\n"); goto fail; } /* Create tag for Rx buffers. */ error = bus_dma_tag_create( sc->vr_cdata.vr_parent_tag, /* parent */ VR_RX_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, /* maxsize */ 1, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->vr_cdata.vr_rx_tag); if (error != 0) { device_printf(sc->vr_dev, "failed to create Rx DMA tag\n"); goto fail; } /* Allocate DMA'able memory and load the DMA map for Tx ring. */ error = bus_dmamem_alloc(sc->vr_cdata.vr_tx_ring_tag, (void **)&sc->vr_rdata.vr_tx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_tx_ring_map); if (error != 0) { device_printf(sc->vr_dev, "failed to allocate DMA'able memory for Tx ring\n"); goto fail; } ctx.vr_busaddr = 0; error = bus_dmamap_load(sc->vr_cdata.vr_tx_ring_tag, sc->vr_cdata.vr_tx_ring_map, sc->vr_rdata.vr_tx_ring, VR_TX_RING_SIZE, vr_dmamap_cb, &ctx, 0); if (error != 0 || ctx.vr_busaddr == 0) { device_printf(sc->vr_dev, "failed to load DMA'able memory for Tx ring\n"); goto fail; } sc->vr_rdata.vr_tx_ring_paddr = ctx.vr_busaddr; /* Allocate DMA'able memory and load the DMA map for Rx ring. */ error = bus_dmamem_alloc(sc->vr_cdata.vr_rx_ring_tag, (void **)&sc->vr_rdata.vr_rx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->vr_cdata.vr_rx_ring_map); if (error != 0) { device_printf(sc->vr_dev, "failed to allocate DMA'able memory for Rx ring\n"); goto fail; } ctx.vr_busaddr = 0; error = bus_dmamap_load(sc->vr_cdata.vr_rx_ring_tag, sc->vr_cdata.vr_rx_ring_map, sc->vr_rdata.vr_rx_ring, VR_RX_RING_SIZE, vr_dmamap_cb, &ctx, 0); if (error != 0 || ctx.vr_busaddr == 0) { device_printf(sc->vr_dev, "failed to load DMA'able memory for Rx ring\n"); goto fail; } sc->vr_rdata.vr_rx_ring_paddr = ctx.vr_busaddr; /* Create DMA maps for Tx buffers. */ for (i = 0; i < VR_TX_RING_CNT; i++) { txd = &sc->vr_cdata.vr_txdesc[i]; txd->tx_m = NULL; txd->tx_dmamap = NULL; error = bus_dmamap_create(sc->vr_cdata.vr_tx_tag, 0, &txd->tx_dmamap); if (error != 0) { device_printf(sc->vr_dev, "failed to create Tx dmamap\n"); goto fail; } } /* Create DMA maps for Rx buffers. */ if ((error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0, &sc->vr_cdata.vr_rx_sparemap)) != 0) { device_printf(sc->vr_dev, "failed to create spare Rx dmamap\n"); goto fail; } for (i = 0; i < VR_RX_RING_CNT; i++) { rxd = &sc->vr_cdata.vr_rxdesc[i]; rxd->rx_m = NULL; rxd->rx_dmamap = NULL; error = bus_dmamap_create(sc->vr_cdata.vr_rx_tag, 0, &rxd->rx_dmamap); if (error != 0) { device_printf(sc->vr_dev, "failed to create Rx dmamap\n"); goto fail; } } fail: return (error); } static void vr_dma_free(struct vr_softc *sc) { struct vr_txdesc *txd; struct vr_rxdesc *rxd; int i; /* Tx ring. */ if (sc->vr_cdata.vr_tx_ring_tag) { if (sc->vr_cdata.vr_tx_ring_map) bus_dmamap_unload(sc->vr_cdata.vr_tx_ring_tag, sc->vr_cdata.vr_tx_ring_map); if (sc->vr_cdata.vr_tx_ring_map && sc->vr_rdata.vr_tx_ring) bus_dmamem_free(sc->vr_cdata.vr_tx_ring_tag, sc->vr_rdata.vr_tx_ring, sc->vr_cdata.vr_tx_ring_map); sc->vr_rdata.vr_tx_ring = NULL; sc->vr_cdata.vr_tx_ring_map = NULL; bus_dma_tag_destroy(sc->vr_cdata.vr_tx_ring_tag); sc->vr_cdata.vr_tx_ring_tag = NULL; } /* Rx ring. */ if (sc->vr_cdata.vr_rx_ring_tag) { if (sc->vr_cdata.vr_rx_ring_map) bus_dmamap_unload(sc->vr_cdata.vr_rx_ring_tag, sc->vr_cdata.vr_rx_ring_map); if (sc->vr_cdata.vr_rx_ring_map && sc->vr_rdata.vr_rx_ring) bus_dmamem_free(sc->vr_cdata.vr_rx_ring_tag, sc->vr_rdata.vr_rx_ring, sc->vr_cdata.vr_rx_ring_map); sc->vr_rdata.vr_rx_ring = NULL; sc->vr_cdata.vr_rx_ring_map = NULL; bus_dma_tag_destroy(sc->vr_cdata.vr_rx_ring_tag); sc->vr_cdata.vr_rx_ring_tag = NULL; } /* Tx buffers. */ if (sc->vr_cdata.vr_tx_tag) { for (i = 0; i < VR_TX_RING_CNT; i++) { txd = &sc->vr_cdata.vr_txdesc[i]; if (txd->tx_dmamap) { bus_dmamap_destroy(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); txd->tx_dmamap = NULL; } } bus_dma_tag_destroy(sc->vr_cdata.vr_tx_tag); sc->vr_cdata.vr_tx_tag = NULL; } /* Rx buffers. */ if (sc->vr_cdata.vr_rx_tag) { for (i = 0; i < VR_RX_RING_CNT; i++) { rxd = &sc->vr_cdata.vr_rxdesc[i]; if (rxd->rx_dmamap) { bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap); rxd->rx_dmamap = NULL; } } if (sc->vr_cdata.vr_rx_sparemap) { bus_dmamap_destroy(sc->vr_cdata.vr_rx_tag, sc->vr_cdata.vr_rx_sparemap); sc->vr_cdata.vr_rx_sparemap = 0; } bus_dma_tag_destroy(sc->vr_cdata.vr_rx_tag); sc->vr_cdata.vr_rx_tag = NULL; } if (sc->vr_cdata.vr_parent_tag) { bus_dma_tag_destroy(sc->vr_cdata.vr_parent_tag); sc->vr_cdata.vr_parent_tag = NULL; } } /* * Initialize the transmit descriptors. */ static int vr_tx_ring_init(struct vr_softc *sc) { struct vr_ring_data *rd; struct vr_txdesc *txd; bus_addr_t addr; int i; sc->vr_cdata.vr_tx_prod = 0; sc->vr_cdata.vr_tx_cons = 0; sc->vr_cdata.vr_tx_cnt = 0; sc->vr_cdata.vr_tx_pkts = 0; rd = &sc->vr_rdata; bzero(rd->vr_tx_ring, VR_TX_RING_SIZE); for (i = 0; i < VR_TX_RING_CNT; i++) { if (i == VR_TX_RING_CNT - 1) addr = VR_TX_RING_ADDR(sc, 0); else addr = VR_TX_RING_ADDR(sc, i + 1); rd->vr_tx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr)); txd = &sc->vr_cdata.vr_txdesc[i]; txd->tx_m = NULL; } bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, sc->vr_cdata.vr_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int vr_rx_ring_init(struct vr_softc *sc) { struct vr_ring_data *rd; struct vr_rxdesc *rxd; bus_addr_t addr; int i; sc->vr_cdata.vr_rx_cons = 0; rd = &sc->vr_rdata; bzero(rd->vr_rx_ring, VR_RX_RING_SIZE); for (i = 0; i < VR_RX_RING_CNT; i++) { rxd = &sc->vr_cdata.vr_rxdesc[i]; rxd->rx_m = NULL; rxd->desc = &rd->vr_rx_ring[i]; if (i == VR_RX_RING_CNT - 1) addr = VR_RX_RING_ADDR(sc, 0); else addr = VR_RX_RING_ADDR(sc, i + 1); rd->vr_rx_ring[i].vr_nextphys = htole32(VR_ADDR_LO(addr)); if (vr_newbuf(sc, i) != 0) return (ENOBUFS); } bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, sc->vr_cdata.vr_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } static __inline void vr_discard_rxbuf(struct vr_rxdesc *rxd) { struct vr_desc *desc; desc = rxd->desc; desc->vr_ctl = htole32(VR_RXCTL | (MCLBYTES - sizeof(uint64_t))); desc->vr_status = htole32(VR_RXSTAT_OWN); } /* * Initialize an RX descriptor and attach an MBUF cluster. * Note: the length fields are only 11 bits wide, which means the * largest size we can specify is 2047. This is important because * MCLBYTES is 2048, so we have to subtract one otherwise we'll * overflow the field and make a mess. */ static int vr_newbuf(struct vr_softc *sc, int idx) { struct vr_desc *desc; struct vr_rxdesc *rxd; struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; int nsegs; m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; m_adj(m, sizeof(uint64_t)); if (bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_rx_tag, sc->vr_cdata.vr_rx_sparemap, m, segs, &nsegs, 0) != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); rxd = &sc->vr_cdata.vr_rxdesc[idx]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap); } map = rxd->rx_dmamap; rxd->rx_dmamap = sc->vr_cdata.vr_rx_sparemap; sc->vr_cdata.vr_rx_sparemap = map; bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_PREREAD); rxd->rx_m = m; desc = rxd->desc; desc->vr_data = htole32(VR_ADDR_LO(segs[0].ds_addr)); desc->vr_ctl = htole32(VR_RXCTL | segs[0].ds_len); desc->vr_status = htole32(VR_RXSTAT_OWN); return (0); } #ifndef __NO_STRICT_ALIGNMENT static __inline void vr_fixup_rx(struct mbuf *m) { uint16_t *src, *dst; int i; src = mtod(m, uint16_t *); dst = src - 1; for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) *dst++ = *src++; m->m_data -= ETHER_ALIGN; } #endif /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ -static void +static int vr_rxeof(struct vr_softc *sc) { struct vr_rxdesc *rxd; struct mbuf *m; struct ifnet *ifp; struct vr_desc *cur_rx; - int cons, prog, total_len; + int cons, prog, total_len, rx_npkts; uint32_t rxstat, rxctl; VR_LOCK_ASSERT(sc); ifp = sc->vr_ifp; cons = sc->vr_cdata.vr_rx_cons; + rx_npkts = 0; bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, sc->vr_cdata.vr_rx_ring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (prog = 0; prog < VR_RX_RING_CNT; VR_INC(cons, VR_RX_RING_CNT)) { #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif cur_rx = &sc->vr_rdata.vr_rx_ring[cons]; rxstat = le32toh(cur_rx->vr_status); rxctl = le32toh(cur_rx->vr_ctl); if ((rxstat & VR_RXSTAT_OWN) == VR_RXSTAT_OWN) break; prog++; rxd = &sc->vr_cdata.vr_rxdesc[cons]; m = rxd->rx_m; /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. * We don't support SG in Rx path yet, so discard * partial frame. */ if ((rxstat & VR_RXSTAT_RX_OK) == 0 || (rxstat & (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) != (VR_RXSTAT_FIRSTFRAG | VR_RXSTAT_LASTFRAG)) { ifp->if_ierrors++; sc->vr_stat.rx_errors++; if (rxstat & VR_RXSTAT_CRCERR) sc->vr_stat.rx_crc_errors++; if (rxstat & VR_RXSTAT_FRAMEALIGNERR) sc->vr_stat.rx_alignment++; if (rxstat & VR_RXSTAT_FIFOOFLOW) sc->vr_stat.rx_fifo_overflows++; if (rxstat & VR_RXSTAT_GIANT) sc->vr_stat.rx_giants++; if (rxstat & VR_RXSTAT_RUNT) sc->vr_stat.rx_runts++; if (rxstat & VR_RXSTAT_BUFFERR) sc->vr_stat.rx_no_buffers++; #ifdef VR_SHOW_ERRORS device_printf(sc->vr_dev, "%s: receive error = 0x%b\n", __func__, rxstat & 0xff, VR_RXSTAT_ERR_BITS); #endif vr_discard_rxbuf(rxd); continue; } if (vr_newbuf(sc, cons) != 0) { ifp->if_iqdrops++; sc->vr_stat.rx_errors++; sc->vr_stat.rx_no_mbufs++; vr_discard_rxbuf(rxd); continue; } /* * XXX The VIA Rhine chip includes the CRC with every * received frame, and there's no way to turn this * behavior off (at least, I can't find anything in * the manual that explains how to do it) so we have * to trim off the CRC manually. */ total_len = VR_RXBYTES(rxstat); total_len -= ETHER_CRC_LEN; m->m_pkthdr.len = m->m_len = total_len; #ifndef __NO_STRICT_ALIGNMENT /* * RX buffers must be 32-bit aligned. * Ignore the alignment problems on the non-strict alignment * platform. The performance hit incurred due to unaligned * accesses is much smaller than the hit produced by forcing * buffer copies all the time. */ vr_fixup_rx(m); #endif m->m_pkthdr.rcvif = ifp; ifp->if_ipackets++; sc->vr_stat.rx_ok++; if ((ifp->if_capenable & IFCAP_RXCSUM) != 0 && (rxstat & VR_RXSTAT_FRAG) == 0 && (rxctl & VR_RXCTL_IP) != 0) { /* Checksum is valid for non-fragmented IP packets. */ m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if ((rxctl & VR_RXCTL_IPOK) == VR_RXCTL_IPOK) { m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if (rxctl & (VR_RXCTL_TCP | VR_RXCTL_UDP)) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; if ((rxctl & VR_RXCTL_TCPUDPOK) != 0) m->m_pkthdr.csum_data = 0xffff; } } } VR_UNLOCK(sc); (*ifp->if_input)(ifp, m); VR_LOCK(sc); + rx_npkts++; } if (prog > 0) { sc->vr_cdata.vr_rx_cons = cons; bus_dmamap_sync(sc->vr_cdata.vr_rx_ring_tag, sc->vr_cdata.vr_rx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } + return (rx_npkts); } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void vr_txeof(struct vr_softc *sc) { struct vr_txdesc *txd; struct vr_desc *cur_tx; struct ifnet *ifp; uint32_t txctl, txstat; int cons, prod; VR_LOCK_ASSERT(sc); cons = sc->vr_cdata.vr_tx_cons; prod = sc->vr_cdata.vr_tx_prod; if (cons == prod) return; bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, sc->vr_cdata.vr_tx_ring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); ifp = sc->vr_ifp; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ for (; cons != prod; VR_INC(cons, VR_TX_RING_CNT)) { cur_tx = &sc->vr_rdata.vr_tx_ring[cons]; txctl = le32toh(cur_tx->vr_ctl); txstat = le32toh(cur_tx->vr_status); if ((txstat & VR_TXSTAT_OWN) == VR_TXSTAT_OWN) break; sc->vr_cdata.vr_tx_cnt--; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; /* Only the first descriptor in the chain is valid. */ if ((txctl & VR_TXCTL_FIRSTFRAG) == 0) continue; txd = &sc->vr_cdata.vr_txdesc[cons]; KASSERT(txd->tx_m != NULL, ("%s: accessing NULL mbuf!\n", __func__)); if ((txstat & VR_TXSTAT_ERRSUM) != 0) { ifp->if_oerrors++; sc->vr_stat.tx_errors++; if ((txstat & VR_TXSTAT_ABRT) != 0) { /* Give up and restart Tx. */ sc->vr_stat.tx_abort++; bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; VR_INC(cons, VR_TX_RING_CNT); sc->vr_cdata.vr_tx_cons = cons; if (vr_tx_stop(sc) != 0) { device_printf(sc->vr_dev, "%s: Tx shutdown error -- " "resetting\n", __func__); sc->vr_flags |= VR_F_RESTART; return; } vr_tx_start(sc); break; } if ((sc->vr_revid < REV_ID_VT3071_A && (txstat & VR_TXSTAT_UNDERRUN)) || (txstat & (VR_TXSTAT_UDF | VR_TXSTAT_TBUFF))) { sc->vr_stat.tx_underrun++; /* Retry and restart Tx. */ sc->vr_cdata.vr_tx_cnt++; sc->vr_cdata.vr_tx_cons = cons; cur_tx->vr_status = htole32(VR_TXSTAT_OWN); bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, sc->vr_cdata.vr_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); vr_tx_underrun(sc); return; } if ((txstat & VR_TXSTAT_DEFER) != 0) { ifp->if_collisions++; sc->vr_stat.tx_collisions++; } if ((txstat & VR_TXSTAT_LATECOLL) != 0) { ifp->if_collisions++; sc->vr_stat.tx_late_collisions++; } } else { sc->vr_stat.tx_ok++; ifp->if_opackets++; } bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); if (sc->vr_revid < REV_ID_VT3071_A) { ifp->if_collisions += (txstat & VR_TXSTAT_COLLCNT) >> 3; sc->vr_stat.tx_collisions += (txstat & VR_TXSTAT_COLLCNT) >> 3; } else { ifp->if_collisions += (txstat & 0x0f); sc->vr_stat.tx_collisions += (txstat & 0x0f); } m_freem(txd->tx_m); txd->tx_m = NULL; } sc->vr_cdata.vr_tx_cons = cons; if (sc->vr_cdata.vr_tx_cnt == 0) sc->vr_watchdog_timer = 0; } static void vr_tick(void *xsc) { struct vr_softc *sc; struct mii_data *mii; sc = (struct vr_softc *)xsc; VR_LOCK_ASSERT(sc); if ((sc->vr_flags & VR_F_RESTART) != 0) { device_printf(sc->vr_dev, "restarting\n"); sc->vr_stat.num_restart++; vr_stop(sc); vr_reset(sc); vr_init_locked(sc); sc->vr_flags &= ~VR_F_RESTART; } mii = device_get_softc(sc->vr_miibus); mii_tick(mii); vr_watchdog(sc); callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc); } #ifdef DEVICE_POLLING static poll_handler_t vr_poll; static poll_handler_t vr_poll_locked; -static void +static int vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct vr_softc *sc; + int rx_npkts; sc = ifp->if_softc; + rx_npkts = 0; VR_LOCK(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) - vr_poll_locked(ifp, cmd, count); + rx_npkts = vr_poll_locked(ifp, cmd, count); VR_UNLOCK(sc); + return (rx_npkts); } -static void +static int vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct vr_softc *sc; + int rx_npkts; sc = ifp->if_softc; VR_LOCK_ASSERT(sc); sc->rxcycles = count; - vr_rxeof(sc); + rx_npkts = vr_rxeof(sc); vr_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) vr_start_locked(ifp); if (cmd == POLL_AND_CHECK_STATUS) { uint16_t status; /* Also check status register. */ status = CSR_READ_2(sc, VR_ISR); if (status) CSR_WRITE_2(sc, VR_ISR, status); if ((status & VR_INTRS) == 0) - return; + return (rx_npkts); if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW)) != 0) { if (vr_error(sc, status) != 0) - return; + return (rx_npkts); } if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) { #ifdef VR_SHOW_ERRORS device_printf(sc->vr_dev, "%s: receive error : 0x%b\n", __func__, status, VR_ISR_ERR_BITS); #endif vr_rx_start(sc); } } + return (rx_npkts); } #endif /* DEVICE_POLLING */ /* Back off the transmit threshold. */ static void vr_tx_underrun(struct vr_softc *sc) { int thresh; device_printf(sc->vr_dev, "Tx underrun -- "); if (sc->vr_txthresh < VR_TXTHRESH_MAX) { thresh = sc->vr_txthresh; sc->vr_txthresh++; if (sc->vr_txthresh >= VR_TXTHRESH_MAX) { sc->vr_txthresh = VR_TXTHRESH_MAX; printf("using store and forward mode\n"); } else printf("increasing Tx threshold(%d -> %d)\n", vr_tx_threshold_tables[thresh].value, vr_tx_threshold_tables[thresh + 1].value); } else printf("\n"); sc->vr_stat.tx_underrun++; if (vr_tx_stop(sc) != 0) { device_printf(sc->vr_dev, "%s: Tx shutdown error -- " "resetting\n", __func__); sc->vr_flags |= VR_F_RESTART; return; } vr_tx_start(sc); } static void vr_intr(void *arg) { struct vr_softc *sc; struct ifnet *ifp; uint16_t status; sc = (struct vr_softc *)arg; VR_LOCK(sc); if (sc->vr_suspended != 0) goto done_locked; status = CSR_READ_2(sc, VR_ISR); if (status == 0 || status == 0xffff || (status & VR_INTRS) == 0) goto done_locked; ifp = sc->vr_ifp; #ifdef DEVICE_POLLING if ((ifp->if_capenable & IFCAP_POLLING) != 0) goto done_locked; #endif /* Suppress unwanted interrupts. */ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || (sc->vr_flags & VR_F_RESTART) != 0) { CSR_WRITE_2(sc, VR_IMR, 0); CSR_WRITE_2(sc, VR_ISR, status); goto done_locked; } /* Disable interrupts. */ CSR_WRITE_2(sc, VR_IMR, 0x0000); for (; (status & VR_INTRS) != 0;) { CSR_WRITE_2(sc, VR_ISR, status); if ((status & (VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW)) != 0) { if (vr_error(sc, status) != 0) { VR_UNLOCK(sc); return; } } vr_rxeof(sc); if ((status & (VR_ISR_RX_NOBUF | VR_ISR_RX_OFLOW)) != 0) { #ifdef VR_SHOW_ERRORS device_printf(sc->vr_dev, "%s: receive error = 0x%b\n", __func__, status, VR_ISR_ERR_BITS); #endif /* Restart Rx if RxDMA SM was stopped. */ vr_rx_start(sc); } vr_txeof(sc); status = CSR_READ_2(sc, VR_ISR); } /* Re-enable interrupts. */ CSR_WRITE_2(sc, VR_IMR, VR_INTRS); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) vr_start_locked(ifp); done_locked: VR_UNLOCK(sc); } static int vr_error(struct vr_softc *sc, uint16_t status) { uint16_t pcis; status &= VR_ISR_BUSERR | VR_ISR_LINKSTAT2 | VR_ISR_STATSOFLOW; if ((status & VR_ISR_BUSERR) != 0) { status &= ~VR_ISR_BUSERR; sc->vr_stat.bus_errors++; /* Disable further interrupts. */ CSR_WRITE_2(sc, VR_IMR, 0); pcis = pci_read_config(sc->vr_dev, PCIR_STATUS, 2); device_printf(sc->vr_dev, "PCI bus error(0x%04x) -- " "resetting\n", pcis); pci_write_config(sc->vr_dev, PCIR_STATUS, pcis, 2); sc->vr_flags |= VR_F_RESTART; return (EAGAIN); } if ((status & VR_ISR_LINKSTAT2) != 0) { /* Link state change, duplex changes etc. */ status &= ~VR_ISR_LINKSTAT2; } if ((status & VR_ISR_STATSOFLOW) != 0) { status &= ~VR_ISR_STATSOFLOW; if (sc->vr_revid >= REV_ID_VT6105M_A0) { /* Update MIB counters. */ } } if (status != 0) device_printf(sc->vr_dev, "unhandled interrupt, status = 0x%04x\n", status); return (0); } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int vr_encap(struct vr_softc *sc, struct mbuf **m_head) { struct vr_txdesc *txd; struct vr_desc *desc; struct mbuf *m; bus_dma_segment_t txsegs[VR_MAXFRAGS]; uint32_t csum_flags, txctl; int error, i, nsegs, prod, si; int padlen; VR_LOCK_ASSERT(sc); M_ASSERTPKTHDR((*m_head)); /* * Some VIA Rhine wants packet buffers to be longword * aligned, but very often our mbufs aren't. Rather than * waste time trying to decide when to copy and when not * to copy, just do it all the time. */ if ((sc->vr_quirks & VR_Q_NEEDALIGN) != 0) { m = m_defrag(*m_head, M_DONTWAIT); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m; } /* * The Rhine chip doesn't auto-pad, so we have to make * sure to pad short frames out to the minimum frame length * ourselves. */ if ((*m_head)->m_pkthdr.len < VR_MIN_FRAMELEN) { m = *m_head; padlen = VR_MIN_FRAMELEN - m->m_pkthdr.len; if (M_WRITABLE(m) == 0) { /* Get a writable copy. */ m = m_dup(*m_head, M_DONTWAIT); m_freem(*m_head); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } *m_head = m; } if (m->m_next != NULL || M_TRAILINGSPACE(m) < padlen) { m = m_defrag(m, M_DONTWAIT); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } } /* * Manually pad short frames, and zero the pad space * to avoid leaking data. */ bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); m->m_pkthdr.len += padlen; m->m_len = m->m_pkthdr.len; *m_head = m; } prod = sc->vr_cdata.vr_tx_prod; txd = &sc->vr_cdata.vr_txdesc[prod]; error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m = m_collapse(*m_head, M_DONTWAIT, VR_MAXFRAGS); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (error); } } else if (error != 0) return (error); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } /* Check number of available descriptors. */ if (sc->vr_cdata.vr_tx_cnt + nsegs >= (VR_TX_RING_CNT - 1)) { bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); return (ENOBUFS); } txd->tx_m = *m_head; bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, BUS_DMASYNC_PREWRITE); /* Set checksum offload. */ csum_flags = 0; if (((*m_head)->m_pkthdr.csum_flags & VR_CSUM_FEATURES) != 0) { if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) csum_flags |= VR_TXCTL_IPCSUM; if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) csum_flags |= VR_TXCTL_TCPCSUM; if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) csum_flags |= VR_TXCTL_UDPCSUM; } /* * Quite contrary to datasheet for VIA Rhine, VR_TXCTL_TLINK bit * is required for all descriptors regardless of single or * multiple buffers. Also VR_TXSTAT_OWN bit is valid only for * the first descriptor for a multi-fragmented frames. Without * that VIA Rhine chip generates Tx underrun interrupts and can't * send any frames. */ si = prod; for (i = 0; i < nsegs; i++) { desc = &sc->vr_rdata.vr_tx_ring[prod]; desc->vr_status = 0; txctl = txsegs[i].ds_len | VR_TXCTL_TLINK | csum_flags; if (i == 0) txctl |= VR_TXCTL_FIRSTFRAG; desc->vr_ctl = htole32(txctl); desc->vr_data = htole32(VR_ADDR_LO(txsegs[i].ds_addr)); sc->vr_cdata.vr_tx_cnt++; VR_INC(prod, VR_TX_RING_CNT); } /* Update producer index. */ sc->vr_cdata.vr_tx_prod = prod; prod = (prod + VR_TX_RING_CNT - 1) % VR_TX_RING_CNT; desc = &sc->vr_rdata.vr_tx_ring[prod]; /* * Set EOP on the last desciptor and reuqest Tx completion * interrupt for every VR_TX_INTR_THRESH-th frames. */ VR_INC(sc->vr_cdata.vr_tx_pkts, VR_TX_INTR_THRESH); if (sc->vr_cdata.vr_tx_pkts == 0) desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG | VR_TXCTL_FINT); else desc->vr_ctl |= htole32(VR_TXCTL_LASTFRAG); /* Lastly turn the first descriptor ownership to hardware. */ desc = &sc->vr_rdata.vr_tx_ring[si]; desc->vr_status |= htole32(VR_TXSTAT_OWN); /* Sync descriptors. */ bus_dmamap_sync(sc->vr_cdata.vr_tx_ring_tag, sc->vr_cdata.vr_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } static void vr_start(struct ifnet *ifp) { struct vr_softc *sc; sc = ifp->if_softc; VR_LOCK(sc); vr_start_locked(ifp); VR_UNLOCK(sc); } static void vr_start_locked(struct ifnet *ifp) { struct vr_softc *sc; struct mbuf *m_head; int enq; sc = ifp->if_softc; VR_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || sc->vr_link == 0) return; for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->vr_cdata.vr_tx_cnt < VR_TX_RING_CNT - 2; ) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (vr_encap(sc, &m_head)) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } enq++; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ ETHER_BPF_MTAP(ifp, m_head); } if (enq > 0) { /* Tell the chip to start transmitting. */ VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO); /* Set a timeout in case the chip goes out to lunch. */ sc->vr_watchdog_timer = 5; } } static void vr_init(void *xsc) { struct vr_softc *sc; sc = (struct vr_softc *)xsc; VR_LOCK(sc); vr_init_locked(sc); VR_UNLOCK(sc); } static void vr_init_locked(struct vr_softc *sc) { struct ifnet *ifp; struct mii_data *mii; bus_addr_t addr; int i; VR_LOCK_ASSERT(sc); ifp = sc->vr_ifp; mii = device_get_softc(sc->vr_miibus); /* Cancel pending I/O and free all RX/TX buffers. */ vr_stop(sc); vr_reset(sc); /* Set our station address. */ for (i = 0; i < ETHER_ADDR_LEN; i++) CSR_WRITE_1(sc, VR_PAR0 + i, IF_LLADDR(sc->vr_ifp)[i]); /* Set DMA size. */ VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); /* * BCR0 and BCR1 can override the RXCFG and TXCFG registers, * so we must set both. */ VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES); VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); VR_SETBIT(sc, VR_BCR1, vr_tx_threshold_tables[sc->vr_txthresh].bcr_cfg); VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); VR_SETBIT(sc, VR_TXCFG, vr_tx_threshold_tables[sc->vr_txthresh].tx_cfg); /* Init circular RX list. */ if (vr_rx_ring_init(sc) != 0) { device_printf(sc->vr_dev, "initialization failed: no memory for rx buffers\n"); vr_stop(sc); return; } /* Init tx descriptors. */ vr_tx_ring_init(sc); if ((sc->vr_quirks & VR_Q_CAM) != 0) { uint8_t vcam[2] = { 0, 0 }; /* Disable VLAN hardware tag insertion/stripping. */ VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TXTAGEN | VR_TXCFG_RXTAGCTL); /* Disable VLAN hardware filtering. */ VR_CLRBIT(sc, VR_BCR1, VR_BCR1_VLANFILT_ENB); /* Disable all CAM entries. */ vr_cam_mask(sc, VR_MCAST_CAM, 0); vr_cam_mask(sc, VR_VLAN_CAM, 0); /* Enable the first VLAN CAM. */ vr_cam_data(sc, VR_VLAN_CAM, 0, vcam); vr_cam_mask(sc, VR_VLAN_CAM, 1); } /* * Set up receive filter. */ vr_set_filter(sc); /* * Load the address of the RX ring. */ addr = VR_RX_RING_ADDR(sc, 0); CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr)); /* * Load the address of the TX ring. */ addr = VR_TX_RING_ADDR(sc, 0); CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr)); /* Default : full-duplex, no Tx poll. */ CSR_WRITE_1(sc, VR_CR1, VR_CR1_FULLDUPLEX | VR_CR1_TX_NOPOLL); /* Set flow-control parameters for Rhine III. */ if (sc->vr_revid >= REV_ID_VT6105_A0) { /* Rx buffer count available for incoming packet. */ CSR_WRITE_1(sc, VR_FLOWCR0, VR_RX_RING_CNT); /* * Tx pause low threshold : 16 free receive buffers * Tx pause XON high threshold : 48 free receive buffers */ CSR_WRITE_1(sc, VR_FLOWCR1, VR_FLOWCR1_TXLO16 | VR_FLOWCR1_TXHI48 | VR_FLOWCR1_XONXOFF); /* Set Tx pause timer. */ CSR_WRITE_2(sc, VR_PAUSETIMER, 0xffff); } /* Enable receiver and transmitter. */ CSR_WRITE_1(sc, VR_CR0, VR_CR0_START | VR_CR0_TX_ON | VR_CR0_RX_ON | VR_CR0_RX_GO); CSR_WRITE_2(sc, VR_ISR, 0xFFFF); #ifdef DEVICE_POLLING /* * Disable interrupts if we are polling. */ if (ifp->if_capenable & IFCAP_POLLING) CSR_WRITE_2(sc, VR_IMR, 0); else #endif /* * Enable interrupts and disable MII intrs. */ CSR_WRITE_2(sc, VR_IMR, VR_INTRS); if (sc->vr_revid > REV_ID_VT6102_A) CSR_WRITE_2(sc, VR_MII_IMR, 0); sc->vr_link = 0; mii_mediachg(mii); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->vr_stat_callout, hz, vr_tick, sc); } /* * Set media options. */ static int vr_ifmedia_upd(struct ifnet *ifp) { struct vr_softc *sc; struct mii_data *mii; struct mii_softc *miisc; int error; sc = ifp->if_softc; VR_LOCK(sc); mii = device_get_softc(sc->vr_miibus); if (mii->mii_instance) { LIST_FOREACH(miisc, &mii->mii_phys, mii_list) mii_phy_reset(miisc); } error = mii_mediachg(mii); VR_UNLOCK(sc); return (error); } /* * Report current media status. */ static void vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct vr_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->vr_miibus); VR_LOCK(sc); mii_pollstat(mii); VR_UNLOCK(sc); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static int vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct vr_softc *sc; struct ifreq *ifr; struct mii_data *mii; int error, mask; sc = ifp->if_softc; ifr = (struct ifreq *)data; error = 0; switch (command) { case SIOCSIFFLAGS: VR_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if ((ifp->if_flags ^ sc->vr_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) vr_set_filter(sc); } else { if (sc->vr_detach == 0) vr_init_locked(sc); } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) vr_stop(sc); } sc->vr_if_flags = ifp->if_flags; VR_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: VR_LOCK(sc); vr_set_filter(sc); VR_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->vr_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { error = ether_poll_register(vr_poll, ifp); if (error != 0) break; VR_LOCK(sc); /* Disable interrupts. */ CSR_WRITE_2(sc, VR_IMR, 0x0000); ifp->if_capenable |= IFCAP_POLLING; VR_UNLOCK(sc); } else { error = ether_poll_deregister(ifp); /* Enable interrupts. */ VR_LOCK(sc); CSR_WRITE_2(sc, VR_IMR, VR_INTRS); ifp->if_capenable &= ~IFCAP_POLLING; VR_UNLOCK(sc); } } #endif /* DEVICE_POLLING */ if ((mask & IFCAP_TXCSUM) != 0 && (IFCAP_TXCSUM & ifp->if_capabilities) != 0) { ifp->if_capenable ^= IFCAP_TXCSUM; if ((IFCAP_TXCSUM & ifp->if_capenable) != 0) ifp->if_hwassist |= VR_CSUM_FEATURES; else ifp->if_hwassist &= ~VR_CSUM_FEATURES; } if ((mask & IFCAP_RXCSUM) != 0 && (IFCAP_RXCSUM & ifp->if_capabilities) != 0) ifp->if_capenable ^= IFCAP_RXCSUM; if ((mask & IFCAP_WOL_UCAST) != 0 && (ifp->if_capabilities & IFCAP_WOL_UCAST) != 0) ifp->if_capenable ^= IFCAP_WOL_UCAST; if ((mask & IFCAP_WOL_MAGIC) != 0 && (ifp->if_capabilities & IFCAP_WOL_MAGIC) != 0) ifp->if_capenable ^= IFCAP_WOL_MAGIC; break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void vr_watchdog(struct vr_softc *sc) { struct ifnet *ifp; VR_LOCK_ASSERT(sc); if (sc->vr_watchdog_timer == 0 || --sc->vr_watchdog_timer) return; ifp = sc->vr_ifp; /* * Reclaim first as we don't request interrupt for every packets. */ vr_txeof(sc); if (sc->vr_cdata.vr_tx_cnt == 0) return; if (sc->vr_link == 0) { if (bootverbose) if_printf(sc->vr_ifp, "watchdog timeout " "(missed link)\n"); ifp->if_oerrors++; vr_init_locked(sc); return; } ifp->if_oerrors++; if_printf(ifp, "watchdog timeout\n"); vr_stop(sc); vr_reset(sc); vr_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) vr_start_locked(ifp); } static void vr_tx_start(struct vr_softc *sc) { bus_addr_t addr; uint8_t cmd; cmd = CSR_READ_1(sc, VR_CR0); if ((cmd & VR_CR0_TX_ON) == 0) { addr = VR_TX_RING_ADDR(sc, sc->vr_cdata.vr_tx_cons); CSR_WRITE_4(sc, VR_TXADDR, VR_ADDR_LO(addr)); cmd |= VR_CR0_TX_ON; CSR_WRITE_1(sc, VR_CR0, cmd); } if (sc->vr_cdata.vr_tx_cnt != 0) { sc->vr_watchdog_timer = 5; VR_SETBIT(sc, VR_CR0, VR_CR0_TX_GO); } } static void vr_rx_start(struct vr_softc *sc) { bus_addr_t addr; uint8_t cmd; cmd = CSR_READ_1(sc, VR_CR0); if ((cmd & VR_CR0_RX_ON) == 0) { addr = VR_RX_RING_ADDR(sc, sc->vr_cdata.vr_rx_cons); CSR_WRITE_4(sc, VR_RXADDR, VR_ADDR_LO(addr)); cmd |= VR_CR0_RX_ON; CSR_WRITE_1(sc, VR_CR0, cmd); } CSR_WRITE_1(sc, VR_CR0, cmd | VR_CR0_RX_GO); } static int vr_tx_stop(struct vr_softc *sc) { int i; uint8_t cmd; cmd = CSR_READ_1(sc, VR_CR0); if ((cmd & VR_CR0_TX_ON) != 0) { cmd &= ~VR_CR0_TX_ON; CSR_WRITE_1(sc, VR_CR0, cmd); for (i = VR_TIMEOUT; i > 0; i--) { DELAY(5); cmd = CSR_READ_1(sc, VR_CR0); if ((cmd & VR_CR0_TX_ON) == 0) break; } if (i == 0) return (ETIMEDOUT); } return (0); } static int vr_rx_stop(struct vr_softc *sc) { int i; uint8_t cmd; cmd = CSR_READ_1(sc, VR_CR0); if ((cmd & VR_CR0_RX_ON) != 0) { cmd &= ~VR_CR0_RX_ON; CSR_WRITE_1(sc, VR_CR0, cmd); for (i = VR_TIMEOUT; i > 0; i--) { DELAY(5); cmd = CSR_READ_1(sc, VR_CR0); if ((cmd & VR_CR0_RX_ON) == 0) break; } if (i == 0) return (ETIMEDOUT); } return (0); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void vr_stop(struct vr_softc *sc) { struct vr_txdesc *txd; struct vr_rxdesc *rxd; struct ifnet *ifp; int i; VR_LOCK_ASSERT(sc); ifp = sc->vr_ifp; sc->vr_watchdog_timer = 0; callout_stop(&sc->vr_stat_callout); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); CSR_WRITE_1(sc, VR_CR0, VR_CR0_STOP); if (vr_rx_stop(sc) != 0) device_printf(sc->vr_dev, "%s: Rx shutdown error\n", __func__); if (vr_tx_stop(sc) != 0) device_printf(sc->vr_dev, "%s: Tx shutdown error\n", __func__); /* Clear pending interrupts. */ CSR_WRITE_2(sc, VR_ISR, 0xFFFF); CSR_WRITE_2(sc, VR_IMR, 0x0000); CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); /* * Free RX and TX mbufs still in the queues. */ for (i = 0; i < VR_RX_RING_CNT; i++) { rxd = &sc->vr_cdata.vr_rxdesc[i]; if (rxd->rx_m != NULL) { bus_dmamap_sync(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->vr_cdata.vr_rx_tag, rxd->rx_dmamap); m_freem(rxd->rx_m); rxd->rx_m = NULL; } } for (i = 0; i < VR_TX_RING_CNT; i++) { txd = &sc->vr_cdata.vr_txdesc[i]; if (txd->tx_m != NULL) { bus_dmamap_sync(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->vr_cdata.vr_tx_tag, txd->tx_dmamap); m_freem(txd->tx_m); txd->tx_m = NULL; } } } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int vr_shutdown(device_t dev) { return (vr_suspend(dev)); } static int vr_suspend(device_t dev) { struct vr_softc *sc; sc = device_get_softc(dev); VR_LOCK(sc); vr_stop(sc); vr_setwol(sc); sc->vr_suspended = 1; VR_UNLOCK(sc); return (0); } static int vr_resume(device_t dev) { struct vr_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); VR_LOCK(sc); ifp = sc->vr_ifp; vr_clrwol(sc); vr_reset(sc); if (ifp->if_flags & IFF_UP) vr_init_locked(sc); sc->vr_suspended = 0; VR_UNLOCK(sc); return (0); } static void vr_setwol(struct vr_softc *sc) { struct ifnet *ifp; int pmc; uint16_t pmstat; uint8_t v; VR_LOCK_ASSERT(sc); if (sc->vr_revid < REV_ID_VT6102_A || pci_find_extcap(sc->vr_dev, PCIY_PMG, &pmc) != 0) return; ifp = sc->vr_ifp; /* Clear WOL configuration. */ CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF); CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_SAB | VR_WOLCFG_SAM); CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF); CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN); if (sc->vr_revid > REV_ID_VT6105_B0) { /* Newer Rhine III supports two additional patterns. */ CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE); CSR_WRITE_1(sc, VR_TESTREG_CLR, 3); CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3); } if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_UCAST); if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) CSR_WRITE_1(sc, VR_WOLCR_SET, VR_WOLCR_MAGIC); /* * It seems that multicast wakeup frames require programming pattern * registers and valid CRC as well as pattern mask for each pattern. * While it's possible to setup such a pattern it would complicate * WOL configuration so ignore multicast wakeup frames. */ if ((ifp->if_capenable & IFCAP_WOL) != 0) { CSR_WRITE_1(sc, VR_WOLCFG_SET, VR_WOLCFG_SAB | VR_WOLCFG_SAM); v = CSR_READ_1(sc, VR_STICKHW); CSR_WRITE_1(sc, VR_STICKHW, v | VR_STICKHW_WOL_ENB); CSR_WRITE_1(sc, VR_PWRCFG_SET, VR_PWRCFG_WOLEN); } /* Put hardware into sleep. */ v = CSR_READ_1(sc, VR_STICKHW); v |= VR_STICKHW_DS0 | VR_STICKHW_DS1; CSR_WRITE_1(sc, VR_STICKHW, v); /* Request PME if WOL is requested. */ pmstat = pci_read_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, 2); pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); if ((ifp->if_capenable & IFCAP_WOL) != 0) pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(sc->vr_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); } static void vr_clrwol(struct vr_softc *sc) { uint8_t v; VR_LOCK_ASSERT(sc); if (sc->vr_revid < REV_ID_VT6102_A) return; /* Take hardware out of sleep. */ v = CSR_READ_1(sc, VR_STICKHW); v &= ~(VR_STICKHW_DS0 | VR_STICKHW_DS1 | VR_STICKHW_WOL_ENB); CSR_WRITE_1(sc, VR_STICKHW, v); /* Clear WOL configuration as WOL may interfere normal operation. */ CSR_WRITE_1(sc, VR_WOLCR_CLR, 0xFF); CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_SAB | VR_WOLCFG_SAM | VR_WOLCFG_PMEOVR); CSR_WRITE_1(sc, VR_PWRCSR_CLR, 0xFF); CSR_WRITE_1(sc, VR_PWRCFG_CLR, VR_PWRCFG_WOLEN); if (sc->vr_revid > REV_ID_VT6105_B0) { /* Newer Rhine III supports two additional patterns. */ CSR_WRITE_1(sc, VR_WOLCFG_CLR, VR_WOLCFG_PATTERN_PAGE); CSR_WRITE_1(sc, VR_TESTREG_CLR, 3); CSR_WRITE_1(sc, VR_PWRCSR1_CLR, 3); } } static int vr_sysctl_stats(SYSCTL_HANDLER_ARGS) { struct vr_softc *sc; struct vr_statistics *stat; int error; int result; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error != 0 || req->newptr == NULL) return (error); if (result == 1) { sc = (struct vr_softc *)arg1; stat = &sc->vr_stat; printf("%s statistics:\n", device_get_nameunit(sc->vr_dev)); printf("Outbound good frames : %ju\n", (uintmax_t)stat->tx_ok); printf("Inbound good frames : %ju\n", (uintmax_t)stat->rx_ok); printf("Outbound errors : %u\n", stat->tx_errors); printf("Inbound errors : %u\n", stat->rx_errors); printf("Inbound no buffers : %u\n", stat->rx_no_buffers); printf("Inbound no mbuf clusters: %d\n", stat->rx_no_mbufs); printf("Inbound FIFO overflows : %d\n", stat->rx_fifo_overflows); printf("Inbound CRC errors : %u\n", stat->rx_crc_errors); printf("Inbound frame alignment errors : %u\n", stat->rx_alignment); printf("Inbound giant frames : %u\n", stat->rx_giants); printf("Inbound runt frames : %u\n", stat->rx_runts); printf("Outbound aborted with excessive collisions : %u\n", stat->tx_abort); printf("Outbound collisions : %u\n", stat->tx_collisions); printf("Outbound late collisions : %u\n", stat->tx_late_collisions); printf("Outbound underrun : %u\n", stat->tx_underrun); printf("PCI bus errors : %u\n", stat->bus_errors); printf("driver restarted due to Rx/Tx shutdown failure : %u\n", stat->num_restart); } return (error); } Index: head/sys/dev/xl/if_xl.c =================================================================== --- head/sys/dev/xl/if_xl.c (revision 193095) +++ head/sys/dev/xl/if_xl.c (revision 193096) @@ -1,3359 +1,3366 @@ /*- * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * 3Com 3c90x Etherlink XL PCI NIC driver * * Supports the 3Com "boomerang", "cyclone" and "hurricane" PCI * bus-master chips (3c90x cards and embedded controllers) including * the following: * * 3Com 3c900-TPO 10Mbps/RJ-45 * 3Com 3c900-COMBO 10Mbps/RJ-45,AUI,BNC * 3Com 3c905-TX 10/100Mbps/RJ-45 * 3Com 3c905-T4 10/100Mbps/RJ-45 * 3Com 3c900B-TPO 10Mbps/RJ-45 * 3Com 3c900B-COMBO 10Mbps/RJ-45,AUI,BNC * 3Com 3c900B-TPC 10Mbps/RJ-45,BNC * 3Com 3c900B-FL 10Mbps/Fiber-optic * 3Com 3c905B-COMBO 10/100Mbps/RJ-45,AUI,BNC * 3Com 3c905B-TX 10/100Mbps/RJ-45 * 3Com 3c905B-FL/FX 10/100Mbps/Fiber-optic * 3Com 3c905C-TX 10/100Mbps/RJ-45 (Tornado ASIC) * 3Com 3c980-TX 10/100Mbps server adapter (Hurricane ASIC) * 3Com 3c980C-TX 10/100Mbps server adapter (Tornado ASIC) * 3Com 3cSOHO100-TX 10/100Mbps/RJ-45 (Hurricane ASIC) * 3Com 3c450-TX 10/100Mbps/RJ-45 (Tornado ASIC) * 3Com 3c555 10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane) * 3Com 3c556 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC) * 3Com 3c556B 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC) * 3Com 3c575TX 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3c575B 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3c575C 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3cxfem656 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3cxfem656b 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3cxfem656c 10/100Mbps/RJ-45 (Cardbus, Tornado ASIC) * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45 * Dell on-board 3c920 10/100Mbps/RJ-45 * Dell Precision on-board 3c905B 10/100Mbps/RJ-45 * Dell Latitude laptop docking station embedded 3c905-TX * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The 3c90x series chips use a bus-master DMA interface for transfering * packets to and from the controller chip. Some of the "vortex" cards * (3c59x) also supported a bus master mode, however for those chips * you could only DMA packets to/from a contiguous memory buffer. For * transmission this would mean copying the contents of the queued mbuf * chain into an mbuf cluster and then DMAing the cluster. This extra * copy would sort of defeat the purpose of the bus master support for * any packet that doesn't fit into a single mbuf. * * By contrast, the 3c90x cards support a fragment-based bus master * mode where mbuf chains can be encapsulated using TX descriptors. * This is similar to other PCI chips such as the Texas Instruments * ThunderLAN and the Intel 82557/82558. * * The "vortex" driver (if_vx.c) happens to work for the "boomerang" * bus master chips because they maintain the old PIO interface for * backwards compatibility, but starting with the 3c905B and the * "cyclone" chips, the compatibility interface has been dropped. * Since using bus master DMA is a big win, we use this driver to * support the PCI "boomerang" chips even though they work with the * "vortex" driver in order to obtain better performance. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(xl, pci, 1, 1, 1); MODULE_DEPEND(xl, ether, 1, 1, 1); MODULE_DEPEND(xl, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #include /* * TX Checksumming is disabled by default for two reasons: * - TX Checksumming will occasionally produce corrupt packets * - TX Checksumming seems to reduce performance * * Only 905B/C cards were reported to have this problem, it is possible * that later chips _may_ be immune. */ #define XL905B_TXCSUM_BROKEN 1 #ifdef XL905B_TXCSUM_BROKEN #define XL905B_CSUM_FEATURES 0 #else #define XL905B_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) #endif /* * Various supported device vendors/types and their names. */ static const struct xl_type xl_devs[] = { { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT, "3Com 3c900-TPO Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT_COMBO, "3Com 3c900-COMBO Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_BOOMERANG_10_100BT, "3Com 3c905-TX Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_BOOMERANG_100BT4, "3Com 3c905-T4 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT, "3Com 3c900B-TPO Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_COMBO, "3Com 3c900B-COMBO Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_TPC, "3Com 3c900B-TPC Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_CYCLONE_10FL, "3Com 3c900B-FL Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT, "3Com 3c905B-TX Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT4, "3Com 3c905B-T4 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100FX, "3Com 3c905B-FX/SC Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100_COMBO, "3Com 3c905B-COMBO Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT, "3Com 3c905C-TX Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B, "3Com 3c920B-EMB Integrated Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B_WNM, "3Com 3c920B-EMB-WNM Integrated Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT_SERV, "3Com 3c980 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_SERV, "3Com 3c980C Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_SOHO100TX, "3Com 3cSOHO100-TX OfficeConnect" }, { TC_VENDORID, TC_DEVICEID_TORNADO_HOMECONNECT, "3Com 3c450-TX HomeConnect" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_555, "3Com 3c555 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_556, "3Com 3c556 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_556B, "3Com 3c556B Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_575A, "3Com 3c575TX Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_575B, "3Com 3c575B Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_575C, "3Com 3c575C Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_656, "3Com 3c656 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_656B, "3Com 3c656B Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_656C, "3Com 3c656C Fast Etherlink XL" }, { 0, 0, NULL } }; static int xl_probe(device_t); static int xl_attach(device_t); static int xl_detach(device_t); static int xl_newbuf(struct xl_softc *, struct xl_chain_onefrag *); static void xl_stats_update(void *); static void xl_stats_update_locked(struct xl_softc *); static int xl_encap(struct xl_softc *, struct xl_chain *, struct mbuf **); -static void xl_rxeof(struct xl_softc *); +static int xl_rxeof(struct xl_softc *); static void xl_rxeof_task(void *, int); static int xl_rx_resync(struct xl_softc *); static void xl_txeof(struct xl_softc *); static void xl_txeof_90xB(struct xl_softc *); static void xl_txeoc(struct xl_softc *); static void xl_intr(void *); static void xl_start(struct ifnet *); static void xl_start_locked(struct ifnet *); static void xl_start_90xB_locked(struct ifnet *); static int xl_ioctl(struct ifnet *, u_long, caddr_t); static void xl_init(void *); static void xl_init_locked(struct xl_softc *); static void xl_stop(struct xl_softc *); static int xl_watchdog(struct xl_softc *); static int xl_shutdown(device_t); static int xl_suspend(device_t); static int xl_resume(device_t); #ifdef DEVICE_POLLING -static void xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); -static void xl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count); +static int xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); +static int xl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count); #endif static int xl_ifmedia_upd(struct ifnet *); static void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int xl_eeprom_wait(struct xl_softc *); static int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int); static void xl_mii_sync(struct xl_softc *); static void xl_mii_send(struct xl_softc *, u_int32_t, int); static int xl_mii_readreg(struct xl_softc *, struct xl_mii_frame *); static int xl_mii_writereg(struct xl_softc *, struct xl_mii_frame *); static void xl_setcfg(struct xl_softc *); static void xl_setmode(struct xl_softc *, int); static void xl_setmulti(struct xl_softc *); static void xl_setmulti_hash(struct xl_softc *); static void xl_reset(struct xl_softc *); static int xl_list_rx_init(struct xl_softc *); static int xl_list_tx_init(struct xl_softc *); static int xl_list_tx_init_90xB(struct xl_softc *); static void xl_wait(struct xl_softc *); static void xl_mediacheck(struct xl_softc *); static void xl_choose_media(struct xl_softc *sc, int *media); static void xl_choose_xcvr(struct xl_softc *, int); static void xl_dma_map_addr(void *, bus_dma_segment_t *, int, int); #ifdef notdef static void xl_testpacket(struct xl_softc *); #endif static int xl_miibus_readreg(device_t, int, int); static int xl_miibus_writereg(device_t, int, int, int); static void xl_miibus_statchg(device_t); static void xl_miibus_mediainit(device_t); static device_method_t xl_methods[] = { /* Device interface */ DEVMETHOD(device_probe, xl_probe), DEVMETHOD(device_attach, xl_attach), DEVMETHOD(device_detach, xl_detach), DEVMETHOD(device_shutdown, xl_shutdown), DEVMETHOD(device_suspend, xl_suspend), DEVMETHOD(device_resume, xl_resume), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, xl_miibus_readreg), DEVMETHOD(miibus_writereg, xl_miibus_writereg), DEVMETHOD(miibus_statchg, xl_miibus_statchg), DEVMETHOD(miibus_mediainit, xl_miibus_mediainit), { 0, 0 } }; static driver_t xl_driver = { "xl", xl_methods, sizeof(struct xl_softc) }; static devclass_t xl_devclass; DRIVER_MODULE(xl, pci, xl_driver, xl_devclass, 0, 0); DRIVER_MODULE(miibus, xl, miibus_driver, miibus_devclass, 0, 0); static void xl_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { u_int32_t *paddr; paddr = arg; *paddr = segs->ds_addr; } /* * Murphy's law says that it's possible the chip can wedge and * the 'command in progress' bit may never clear. Hence, we wait * only a finite amount of time to avoid getting caught in an * infinite loop. Normally this delay routine would be a macro, * but it isn't called during normal operation so we can afford * to make it a function. */ static void xl_wait(struct xl_softc *sc) { register int i; for (i = 0; i < XL_TIMEOUT; i++) { if ((CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY) == 0) break; } if (i == XL_TIMEOUT) device_printf(sc->xl_dev, "command never completed!\n"); } /* * MII access routines are provided for adapters with external * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in * autoneg logic that's faked up to look like a PHY (3c905B-TX). * Note: if you don't perform the MDIO operations just right, * it's possible to end up with code that works correctly with * some chips/CPUs/processor speeds/bus speeds/etc but not * with others. */ #define MII_SET(x) \ CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \ CSR_READ_2(sc, XL_W4_PHY_MGMT) | (x)) #define MII_CLR(x) \ CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \ CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~(x)) /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void xl_mii_sync(struct xl_softc *sc) { register int i; XL_SEL_WIN(4); MII_SET(XL_MII_DIR|XL_MII_DATA); for (i = 0; i < 32; i++) { MII_SET(XL_MII_CLK); MII_SET(XL_MII_DATA); MII_SET(XL_MII_DATA); MII_CLR(XL_MII_CLK); MII_SET(XL_MII_DATA); MII_SET(XL_MII_DATA); } } /* * Clock a series of bits through the MII. */ static void xl_mii_send(struct xl_softc *sc, u_int32_t bits, int cnt) { int i; XL_SEL_WIN(4); MII_CLR(XL_MII_CLK); for (i = (0x1 << (cnt - 1)); i; i >>= 1) { if (bits & i) { MII_SET(XL_MII_DATA); } else { MII_CLR(XL_MII_DATA); } MII_CLR(XL_MII_CLK); MII_SET(XL_MII_CLK); } } /* * Read an PHY register through the MII. */ static int xl_mii_readreg(struct xl_softc *sc, struct xl_mii_frame *frame) { int i, ack; /* Set up frame for RX. */ frame->mii_stdelim = XL_MII_STARTDELIM; frame->mii_opcode = XL_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; /* Select register window 4. */ XL_SEL_WIN(4); CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0); /* Turn on data xmit. */ MII_SET(XL_MII_DIR); xl_mii_sync(sc); /* Send command/address info. */ xl_mii_send(sc, frame->mii_stdelim, 2); xl_mii_send(sc, frame->mii_opcode, 2); xl_mii_send(sc, frame->mii_phyaddr, 5); xl_mii_send(sc, frame->mii_regaddr, 5); /* Idle bit */ MII_CLR((XL_MII_CLK|XL_MII_DATA)); MII_SET(XL_MII_CLK); /* Turn off xmit. */ MII_CLR(XL_MII_DIR); /* Check for ack */ MII_CLR(XL_MII_CLK); ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA; MII_SET(XL_MII_CLK); /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ if (ack) { for (i = 0; i < 16; i++) { MII_CLR(XL_MII_CLK); MII_SET(XL_MII_CLK); } goto fail; } for (i = 0x8000; i; i >>= 1) { MII_CLR(XL_MII_CLK); if (!ack) { if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA) frame->mii_data |= i; } MII_SET(XL_MII_CLK); } fail: MII_CLR(XL_MII_CLK); MII_SET(XL_MII_CLK); return (ack ? 1 : 0); } /* * Write to a PHY register through the MII. */ static int xl_mii_writereg(struct xl_softc *sc, struct xl_mii_frame *frame) { /* Set up frame for TX. */ frame->mii_stdelim = XL_MII_STARTDELIM; frame->mii_opcode = XL_MII_WRITEOP; frame->mii_turnaround = XL_MII_TURNAROUND; /* Select the window 4. */ XL_SEL_WIN(4); /* Turn on data output. */ MII_SET(XL_MII_DIR); xl_mii_sync(sc); xl_mii_send(sc, frame->mii_stdelim, 2); xl_mii_send(sc, frame->mii_opcode, 2); xl_mii_send(sc, frame->mii_phyaddr, 5); xl_mii_send(sc, frame->mii_regaddr, 5); xl_mii_send(sc, frame->mii_turnaround, 2); xl_mii_send(sc, frame->mii_data, 16); /* Idle bit. */ MII_SET(XL_MII_CLK); MII_CLR(XL_MII_CLK); /* Turn off xmit. */ MII_CLR(XL_MII_DIR); return (0); } static int xl_miibus_readreg(device_t dev, int phy, int reg) { struct xl_softc *sc; struct xl_mii_frame frame; sc = device_get_softc(dev); /* * Pretend that PHYs are only available at MII address 24. * This is to guard against problems with certain 3Com ASIC * revisions that incorrectly map the internal transceiver * control registers at all MII addresses. This can cause * the miibus code to attach the same PHY several times over. */ if ((sc->xl_flags & XL_FLAG_PHYOK) == 0 && phy != 24) return (0); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; xl_mii_readreg(sc, &frame); return (frame.mii_data); } static int xl_miibus_writereg(device_t dev, int phy, int reg, int data) { struct xl_softc *sc; struct xl_mii_frame frame; sc = device_get_softc(dev); if ((sc->xl_flags & XL_FLAG_PHYOK) == 0 && phy != 24) return (0); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = data; xl_mii_writereg(sc, &frame); return (0); } static void xl_miibus_statchg(device_t dev) { struct xl_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->xl_miibus); xl_setcfg(sc); /* Set ASIC's duplex mode to match the PHY. */ XL_SEL_WIN(3); if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX); else CSR_WRITE_1(sc, XL_W3_MAC_CTRL, (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX)); } /* * Special support for the 3c905B-COMBO. This card has 10/100 support * plus BNC and AUI ports. This means we will have both an miibus attached * plus some non-MII media settings. In order to allow this, we have to * add the extra media to the miibus's ifmedia struct, but we can't do * that during xl_attach() because the miibus hasn't been attached yet. * So instead, we wait until the miibus probe/attach is done, at which * point we will get a callback telling is that it's safe to add our * extra media. */ static void xl_miibus_mediainit(device_t dev) { struct xl_softc *sc; struct mii_data *mii; struct ifmedia *ifm; sc = device_get_softc(dev); mii = device_get_softc(sc->xl_miibus); ifm = &mii->mii_media; if (sc->xl_media & (XL_MEDIAOPT_AUI | XL_MEDIAOPT_10FL)) { /* * Check for a 10baseFL board in disguise. */ if (sc->xl_type == XL_TYPE_905B && sc->xl_media == XL_MEDIAOPT_10FL) { if (bootverbose) device_printf(sc->xl_dev, "found 10baseFL\n"); ifmedia_add(ifm, IFM_ETHER | IFM_10_FL, 0, NULL); ifmedia_add(ifm, IFM_ETHER | IFM_10_FL|IFM_HDX, 0, NULL); if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) ifmedia_add(ifm, IFM_ETHER | IFM_10_FL | IFM_FDX, 0, NULL); } else { if (bootverbose) device_printf(sc->xl_dev, "found AUI\n"); ifmedia_add(ifm, IFM_ETHER | IFM_10_5, 0, NULL); } } if (sc->xl_media & XL_MEDIAOPT_BNC) { if (bootverbose) device_printf(sc->xl_dev, "found BNC\n"); ifmedia_add(ifm, IFM_ETHER | IFM_10_2, 0, NULL); } } /* * The EEPROM is slow: give it time to come ready after issuing * it a command. */ static int xl_eeprom_wait(struct xl_softc *sc) { int i; for (i = 0; i < 100; i++) { if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY) DELAY(162); else break; } if (i == 100) { device_printf(sc->xl_dev, "eeprom failed to come ready\n"); return (1); } return (0); } /* * Read a sequence of words from the EEPROM. Note that ethernet address * data is stored in the EEPROM in network byte order. */ static int xl_read_eeprom(struct xl_softc *sc, caddr_t dest, int off, int cnt, int swap) { int err = 0, i; u_int16_t word = 0, *ptr; #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F)) #define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F) /* * XXX: WARNING! DANGER! * It's easy to accidentally overwrite the rom content! * Note: the 3c575 uses 8bit EEPROM offsets. */ XL_SEL_WIN(0); if (xl_eeprom_wait(sc)) return (1); if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30) off += 0x30; for (i = 0; i < cnt; i++) { if (sc->xl_flags & XL_FLAG_8BITROM) CSR_WRITE_2(sc, XL_W0_EE_CMD, XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i)); else CSR_WRITE_2(sc, XL_W0_EE_CMD, XL_EE_READ | EEPROM_5BIT_OFFSET(off + i)); err = xl_eeprom_wait(sc); if (err) break; word = CSR_READ_2(sc, XL_W0_EE_DATA); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } return (err ? 1 : 0); } /* * NICs older than the 3c905B have only one multicast option, which * is to enable reception of all multicast frames. */ static void xl_setmulti(struct xl_softc *sc) { struct ifnet *ifp = sc->xl_ifp; struct ifmultiaddr *ifma; u_int8_t rxfilt; int mcnt = 0; XL_LOCK_ASSERT(sc); XL_SEL_WIN(5); rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); if (ifp->if_flags & IFF_ALLMULTI) { rxfilt |= XL_RXFILTER_ALLMULTI; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); return; } IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) mcnt++; IF_ADDR_UNLOCK(ifp); if (mcnt) rxfilt |= XL_RXFILTER_ALLMULTI; else rxfilt &= ~XL_RXFILTER_ALLMULTI; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); } /* * 3c905B adapters have a hash filter that we can program. */ static void xl_setmulti_hash(struct xl_softc *sc) { struct ifnet *ifp = sc->xl_ifp; int h = 0, i; struct ifmultiaddr *ifma; u_int8_t rxfilt; int mcnt = 0; XL_LOCK_ASSERT(sc); XL_SEL_WIN(5); rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); if (ifp->if_flags & IFF_ALLMULTI) { rxfilt |= XL_RXFILTER_ALLMULTI; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); return; } else rxfilt &= ~XL_RXFILTER_ALLMULTI; /* first, zot all the existing hash bits */ for (i = 0; i < XL_HASHFILT_SIZE; i++) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i); /* now program new ones */ IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; /* * Note: the 3c905B currently only supports a 64-bit hash * table, which means we really only need 6 bits, but the * manual indicates that future chip revisions will have a * 256-bit hash table, hence the routine is set up to * calculate 8 bits of position info in case we need it some * day. * Note II, The Sequel: _CURRENT_ versions of the 3c905B have * a 256 bit hash table. This means we have to use all 8 bits * regardless. On older cards, the upper 2 bits will be * ignored. Grrrr.... */ h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF; CSR_WRITE_2(sc, XL_COMMAND, h | XL_CMD_RX_SET_HASH | XL_HASH_SET); mcnt++; } IF_ADDR_UNLOCK(ifp); if (mcnt) rxfilt |= XL_RXFILTER_MULTIHASH; else rxfilt &= ~XL_RXFILTER_MULTIHASH; CSR_WRITE_2(sc, XL_COMMAND, rxfilt | XL_CMD_RX_SET_FILT); } static void xl_setcfg(struct xl_softc *sc) { u_int32_t icfg; /*XL_LOCK_ASSERT(sc);*/ XL_SEL_WIN(3); icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG); icfg &= ~XL_ICFG_CONNECTOR_MASK; if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BT4) icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS); if (sc->xl_media & XL_MEDIAOPT_BTX) icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS); CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); } static void xl_setmode(struct xl_softc *sc, int media) { u_int32_t icfg; u_int16_t mediastat; char *pmsg = "", *dmsg = ""; XL_LOCK_ASSERT(sc); XL_SEL_WIN(4); mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); XL_SEL_WIN(3); icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG); if (sc->xl_media & XL_MEDIAOPT_BT) { if (IFM_SUBTYPE(media) == IFM_10_T) { pmsg = "10baseT transceiver"; sc->xl_xcvr = XL_XCVR_10BT; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS); mediastat |= XL_MEDIASTAT_LINKBEAT | XL_MEDIASTAT_JABGUARD; mediastat &= ~XL_MEDIASTAT_SQEENB; } } if (sc->xl_media & XL_MEDIAOPT_BFX) { if (IFM_SUBTYPE(media) == IFM_100_FX) { pmsg = "100baseFX port"; sc->xl_xcvr = XL_XCVR_100BFX; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS); mediastat |= XL_MEDIASTAT_LINKBEAT; mediastat &= ~XL_MEDIASTAT_SQEENB; } } if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) { if (IFM_SUBTYPE(media) == IFM_10_5) { pmsg = "AUI port"; sc->xl_xcvr = XL_XCVR_AUI; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS); mediastat &= ~(XL_MEDIASTAT_LINKBEAT | XL_MEDIASTAT_JABGUARD); mediastat |= ~XL_MEDIASTAT_SQEENB; } if (IFM_SUBTYPE(media) == IFM_10_FL) { pmsg = "10baseFL transceiver"; sc->xl_xcvr = XL_XCVR_AUI; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS); mediastat &= ~(XL_MEDIASTAT_LINKBEAT | XL_MEDIASTAT_JABGUARD); mediastat |= ~XL_MEDIASTAT_SQEENB; } } if (sc->xl_media & XL_MEDIAOPT_BNC) { if (IFM_SUBTYPE(media) == IFM_10_2) { pmsg = "AUI port"; sc->xl_xcvr = XL_XCVR_COAX; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS); mediastat &= ~(XL_MEDIASTAT_LINKBEAT | XL_MEDIASTAT_JABGUARD | XL_MEDIASTAT_SQEENB); } } if ((media & IFM_GMASK) == IFM_FDX || IFM_SUBTYPE(media) == IFM_100_FX) { dmsg = "full"; XL_SEL_WIN(3); CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX); } else { dmsg = "half"; XL_SEL_WIN(3); CSR_WRITE_1(sc, XL_W3_MAC_CTRL, (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX)); } if (IFM_SUBTYPE(media) == IFM_10_2) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START); else CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg); XL_SEL_WIN(4); CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat); DELAY(800); XL_SEL_WIN(7); device_printf(sc->xl_dev, "selecting %s, %s duplex\n", pmsg, dmsg); } static void xl_reset(struct xl_softc *sc) { register int i; XL_LOCK_ASSERT(sc); XL_SEL_WIN(0); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET | ((sc->xl_flags & XL_FLAG_WEIRDRESET) ? XL_RESETOPT_DISADVFD:0)); /* * If we're using memory mapped register mode, pause briefly * after issuing the reset command before trying to access any * other registers. With my 3c575C cardbus card, failing to do * this results in the system locking up while trying to poll * the command busy bit in the status register. */ if (sc->xl_flags & XL_FLAG_USE_MMIO) DELAY(100000); for (i = 0; i < XL_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY)) break; } if (i == XL_TIMEOUT) device_printf(sc->xl_dev, "reset didn't complete\n"); /* Reset TX and RX. */ /* Note: the RX reset takes an absurd amount of time * on newer versions of the Tornado chips such as those * on the 3c905CX and newer 3c908C cards. We wait an * extra amount of time so that xl_wait() doesn't complain * and annoy the users. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); DELAY(100000); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR || sc->xl_flags & XL_FLAG_INVERT_MII_PWR) { XL_SEL_WIN(2); CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, CSR_READ_2(sc, XL_W2_RESET_OPTIONS) | ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR) ? XL_RESETOPT_INVERT_LED : 0) | ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR) ? XL_RESETOPT_INVERT_MII : 0)); } /* Wait a little while for the chip to get its brains in order. */ DELAY(100000); } /* * Probe for a 3Com Etherlink XL chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int xl_probe(device_t dev) { const struct xl_type *t; t = xl_devs; while (t->xl_name != NULL) { if ((pci_get_vendor(dev) == t->xl_vid) && (pci_get_device(dev) == t->xl_did)) { device_set_desc(dev, t->xl_name); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } /* * This routine is a kludge to work around possible hardware faults * or manufacturing defects that can cause the media options register * (or reset options register, as it's called for the first generation * 3c90x adapters) to return an incorrect result. I have encountered * one Dell Latitude laptop docking station with an integrated 3c905-TX * which doesn't have any of the 'mediaopt' bits set. This screws up * the attach routine pretty badly because it doesn't know what media * to look for. If we find ourselves in this predicament, this routine * will try to guess the media options values and warn the user of a * possible manufacturing defect with his adapter/system/whatever. */ static void xl_mediacheck(struct xl_softc *sc) { /* * If some of the media options bits are set, assume they are * correct. If not, try to figure it out down below. * XXX I should check for 10baseFL, but I don't have an adapter * to test with. */ if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) { /* * Check the XCVR value. If it's not in the normal range * of values, we need to fake it up here. */ if (sc->xl_xcvr <= XL_XCVR_AUTO) return; else { device_printf(sc->xl_dev, "bogus xcvr value in EEPROM (%x)\n", sc->xl_xcvr); device_printf(sc->xl_dev, "choosing new default based on card type\n"); } } else { if (sc->xl_type == XL_TYPE_905B && sc->xl_media & XL_MEDIAOPT_10FL) return; device_printf(sc->xl_dev, "WARNING: no media options bits set in the media options register!!\n"); device_printf(sc->xl_dev, "this could be a manufacturing defect in your adapter or system\n"); device_printf(sc->xl_dev, "attempting to guess media type; you should probably consult your vendor\n"); } xl_choose_xcvr(sc, 1); } static void xl_choose_xcvr(struct xl_softc *sc, int verbose) { u_int16_t devid; /* * Read the device ID from the EEPROM. * This is what's loaded into the PCI device ID register, so it has * to be correct otherwise we wouldn't have gotten this far. */ xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0); switch (devid) { case TC_DEVICEID_BOOMERANG_10BT: /* 3c900-TPO */ case TC_DEVICEID_KRAKATOA_10BT: /* 3c900B-TPO */ sc->xl_media = XL_MEDIAOPT_BT; sc->xl_xcvr = XL_XCVR_10BT; if (verbose) device_printf(sc->xl_dev, "guessing 10BaseT transceiver\n"); break; case TC_DEVICEID_BOOMERANG_10BT_COMBO: /* 3c900-COMBO */ case TC_DEVICEID_KRAKATOA_10BT_COMBO: /* 3c900B-COMBO */ sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI; sc->xl_xcvr = XL_XCVR_10BT; if (verbose) device_printf(sc->xl_dev, "guessing COMBO (AUI/BNC/TP)\n"); break; case TC_DEVICEID_KRAKATOA_10BT_TPC: /* 3c900B-TPC */ sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC; sc->xl_xcvr = XL_XCVR_10BT; if (verbose) device_printf(sc->xl_dev, "guessing TPC (BNC/TP)\n"); break; case TC_DEVICEID_CYCLONE_10FL: /* 3c900B-FL */ sc->xl_media = XL_MEDIAOPT_10FL; sc->xl_xcvr = XL_XCVR_AUI; if (verbose) device_printf(sc->xl_dev, "guessing 10baseFL\n"); break; case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */ case TC_DEVICEID_HURRICANE_555: /* 3c555 */ case TC_DEVICEID_HURRICANE_556: /* 3c556 */ case TC_DEVICEID_HURRICANE_556B: /* 3c556B */ case TC_DEVICEID_HURRICANE_575A: /* 3c575TX */ case TC_DEVICEID_HURRICANE_575B: /* 3c575B */ case TC_DEVICEID_HURRICANE_575C: /* 3c575C */ case TC_DEVICEID_HURRICANE_656: /* 3c656 */ case TC_DEVICEID_HURRICANE_656B: /* 3c656B */ case TC_DEVICEID_TORNADO_656C: /* 3c656C */ case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */ case TC_DEVICEID_TORNADO_10_100BT_920B_WNM: /* 3c920B-EMB-WNM */ sc->xl_media = XL_MEDIAOPT_MII; sc->xl_xcvr = XL_XCVR_MII; if (verbose) device_printf(sc->xl_dev, "guessing MII\n"); break; case TC_DEVICEID_BOOMERANG_100BT4: /* 3c905-T4 */ case TC_DEVICEID_CYCLONE_10_100BT4: /* 3c905B-T4 */ sc->xl_media = XL_MEDIAOPT_BT4; sc->xl_xcvr = XL_XCVR_MII; if (verbose) device_printf(sc->xl_dev, "guessing 100baseT4/MII\n"); break; case TC_DEVICEID_HURRICANE_10_100BT: /* 3c905B-TX */ case TC_DEVICEID_HURRICANE_10_100BT_SERV:/*3c980-TX */ case TC_DEVICEID_TORNADO_10_100BT_SERV: /* 3c980C-TX */ case TC_DEVICEID_HURRICANE_SOHO100TX: /* 3cSOHO100-TX */ case TC_DEVICEID_TORNADO_10_100BT: /* 3c905C-TX */ case TC_DEVICEID_TORNADO_HOMECONNECT: /* 3c450-TX */ sc->xl_media = XL_MEDIAOPT_BTX; sc->xl_xcvr = XL_XCVR_AUTO; if (verbose) device_printf(sc->xl_dev, "guessing 10/100 internal\n"); break; case TC_DEVICEID_CYCLONE_10_100_COMBO: /* 3c905B-COMBO */ sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI; sc->xl_xcvr = XL_XCVR_AUTO; if (verbose) device_printf(sc->xl_dev, "guessing 10/100 plus BNC/AUI\n"); break; default: device_printf(sc->xl_dev, "unknown device ID: %x -- defaulting to 10baseT\n", devid); sc->xl_media = XL_MEDIAOPT_BT; break; } } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int xl_attach(device_t dev) { u_char eaddr[ETHER_ADDR_LEN]; u_int16_t xcvr[2]; struct xl_softc *sc; struct ifnet *ifp; int media; int unit, error = 0, rid, res; uint16_t did; sc = device_get_softc(dev); sc->xl_dev = dev; unit = device_get_unit(dev); mtx_init(&sc->xl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts); did = pci_get_device(dev); sc->xl_flags = 0; if (did == TC_DEVICEID_HURRICANE_555) sc->xl_flags |= XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_PHYOK; if (did == TC_DEVICEID_HURRICANE_556 || did == TC_DEVICEID_HURRICANE_556B) sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK | XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_WEIRDRESET | XL_FLAG_INVERT_LED_PWR | XL_FLAG_INVERT_MII_PWR; if (did == TC_DEVICEID_HURRICANE_555 || did == TC_DEVICEID_HURRICANE_556) sc->xl_flags |= XL_FLAG_8BITROM; if (did == TC_DEVICEID_HURRICANE_556B) sc->xl_flags |= XL_FLAG_NO_XCVR_PWR; if (did == TC_DEVICEID_HURRICANE_575B || did == TC_DEVICEID_HURRICANE_575C || did == TC_DEVICEID_HURRICANE_656B || did == TC_DEVICEID_TORNADO_656C) sc->xl_flags |= XL_FLAG_FUNCREG; if (did == TC_DEVICEID_HURRICANE_575A || did == TC_DEVICEID_HURRICANE_575B || did == TC_DEVICEID_HURRICANE_575C || did == TC_DEVICEID_HURRICANE_656B || did == TC_DEVICEID_TORNADO_656C) sc->xl_flags |= XL_FLAG_PHYOK | XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_8BITROM; if (did == TC_DEVICEID_HURRICANE_656) sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK; if (did == TC_DEVICEID_HURRICANE_575B) sc->xl_flags |= XL_FLAG_INVERT_LED_PWR; if (did == TC_DEVICEID_HURRICANE_575C) sc->xl_flags |= XL_FLAG_INVERT_MII_PWR; if (did == TC_DEVICEID_TORNADO_656C) sc->xl_flags |= XL_FLAG_INVERT_MII_PWR; if (did == TC_DEVICEID_HURRICANE_656 || did == TC_DEVICEID_HURRICANE_656B) sc->xl_flags |= XL_FLAG_INVERT_MII_PWR | XL_FLAG_INVERT_LED_PWR; if (did == TC_DEVICEID_TORNADO_10_100BT_920B || did == TC_DEVICEID_TORNADO_10_100BT_920B_WNM) sc->xl_flags |= XL_FLAG_PHYOK; switch (did) { case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */ case TC_DEVICEID_HURRICANE_575A: case TC_DEVICEID_HURRICANE_575B: case TC_DEVICEID_HURRICANE_575C: sc->xl_flags |= XL_FLAG_NO_MMIO; break; default: break; } /* * Map control/status registers. */ pci_enable_busmaster(dev); if ((sc->xl_flags & XL_FLAG_NO_MMIO) == 0) { rid = XL_PCI_LOMEM; res = SYS_RES_MEMORY; sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE); } if (sc->xl_res != NULL) { sc->xl_flags |= XL_FLAG_USE_MMIO; if (bootverbose) device_printf(dev, "using memory mapped I/O\n"); } else { rid = XL_PCI_LOIO; res = SYS_RES_IOPORT; sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE); if (sc->xl_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } if (bootverbose) device_printf(dev, "using port I/O\n"); } sc->xl_btag = rman_get_bustag(sc->xl_res); sc->xl_bhandle = rman_get_bushandle(sc->xl_res); if (sc->xl_flags & XL_FLAG_FUNCREG) { rid = XL_PCI_FUNCMEM; sc->xl_fres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->xl_fres == NULL) { device_printf(dev, "couldn't map funcreg memory\n"); error = ENXIO; goto fail; } sc->xl_ftag = rman_get_bustag(sc->xl_fres); sc->xl_fhandle = rman_get_bushandle(sc->xl_fres); } /* Allocate interrupt */ rid = 0; sc->xl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->xl_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } /* Initialize interface name. */ ifp = sc->xl_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); /* Reset the adapter. */ XL_LOCK(sc); xl_reset(sc); XL_UNLOCK(sc); /* * Get station address from the EEPROM. */ if (xl_read_eeprom(sc, (caddr_t)&eaddr, XL_EE_OEM_ADR0, 3, 1)) { device_printf(dev, "failed to read station address\n"); error = ENXIO; goto fail; } callout_init_mtx(&sc->xl_stat_callout, &sc->xl_mtx, 0); TASK_INIT(&sc->xl_task, 0, xl_rxeof_task, sc); /* * Now allocate a tag for the DMA descriptor lists and a chunk * of DMA-able memory based on the tag. Also obtain the DMA * addresses of the RX and TX ring, which we'll need later. * All of our lists are allocated as a contiguous block * of memory. */ error = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, XL_RX_LIST_SZ, 1, XL_RX_LIST_SZ, 0, NULL, NULL, &sc->xl_ldata.xl_rx_tag); if (error) { device_printf(dev, "failed to allocate rx dma tag\n"); goto fail; } error = bus_dmamem_alloc(sc->xl_ldata.xl_rx_tag, (void **)&sc->xl_ldata.xl_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->xl_ldata.xl_rx_dmamap); if (error) { device_printf(dev, "no memory for rx list buffers!\n"); bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag); sc->xl_ldata.xl_rx_tag = NULL; goto fail; } error = bus_dmamap_load(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, sc->xl_ldata.xl_rx_list, XL_RX_LIST_SZ, xl_dma_map_addr, &sc->xl_ldata.xl_rx_dmaaddr, BUS_DMA_NOWAIT); if (error) { device_printf(dev, "cannot get dma address of the rx ring!\n"); bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list, sc->xl_ldata.xl_rx_dmamap); bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag); sc->xl_ldata.xl_rx_tag = NULL; goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, XL_TX_LIST_SZ, 1, XL_TX_LIST_SZ, 0, NULL, NULL, &sc->xl_ldata.xl_tx_tag); if (error) { device_printf(dev, "failed to allocate tx dma tag\n"); goto fail; } error = bus_dmamem_alloc(sc->xl_ldata.xl_tx_tag, (void **)&sc->xl_ldata.xl_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->xl_ldata.xl_tx_dmamap); if (error) { device_printf(dev, "no memory for list buffers!\n"); bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag); sc->xl_ldata.xl_tx_tag = NULL; goto fail; } error = bus_dmamap_load(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap, sc->xl_ldata.xl_tx_list, XL_TX_LIST_SZ, xl_dma_map_addr, &sc->xl_ldata.xl_tx_dmaaddr, BUS_DMA_NOWAIT); if (error) { device_printf(dev, "cannot get dma address of the tx ring!\n"); bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list, sc->xl_ldata.xl_tx_dmamap); bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag); sc->xl_ldata.xl_tx_tag = NULL; goto fail; } /* * Allocate a DMA tag for the mapping of mbufs. */ error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * XL_MAXFRAGS, XL_MAXFRAGS, MCLBYTES, 0, NULL, NULL, &sc->xl_mtag); if (error) { device_printf(dev, "failed to allocate mbuf dma tag\n"); goto fail; } /* We need a spare DMA map for the RX ring. */ error = bus_dmamap_create(sc->xl_mtag, 0, &sc->xl_tmpmap); if (error) goto fail; /* * Figure out the card type. 3c905B adapters have the * 'supportsNoTxLength' bit set in the capabilities * word in the EEPROM. * Note: my 3c575C cardbus card lies. It returns a value * of 0x1578 for its capabilities word, which is somewhat * nonsensical. Another way to distinguish a 3c90x chip * from a 3c90xB/C chip is to check for the 'supportsLargePackets' * bit. This will only be set for 3c90x boomerage chips. */ xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0); if (sc->xl_caps & XL_CAPS_NO_TXLENGTH || !(sc->xl_caps & XL_CAPS_LARGE_PKTS)) sc->xl_type = XL_TYPE_905B; else sc->xl_type = XL_TYPE_90X; /* Set the TX start threshold for best performance. */ sc->xl_tx_thresh = XL_MIN_FRAMELEN; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = xl_ioctl; ifp->if_capabilities = IFCAP_VLAN_MTU; if (sc->xl_type == XL_TYPE_905B) { ifp->if_hwassist = XL905B_CSUM_FEATURES; #ifdef XL905B_TXCSUM_BROKEN ifp->if_capabilities |= IFCAP_RXCSUM; #else ifp->if_capabilities |= IFCAP_HWCSUM; #endif } ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif ifp->if_start = xl_start; ifp->if_init = xl_init; IFQ_SET_MAXLEN(&ifp->if_snd, XL_TX_LIST_CNT - 1); ifp->if_snd.ifq_drv_maxlen = XL_TX_LIST_CNT - 1; IFQ_SET_READY(&ifp->if_snd); /* * Now we have to see what sort of media we have. * This includes probing for an MII interace and a * possible PHY. */ XL_SEL_WIN(3); sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT); if (bootverbose) device_printf(dev, "media options word: %x\n", sc->xl_media); xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0); sc->xl_xcvr = xcvr[0] | xcvr[1] << 16; sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK; sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS; xl_mediacheck(sc); if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX || sc->xl_media & XL_MEDIAOPT_BT4) { if (bootverbose) device_printf(dev, "found MII/AUTO\n"); xl_setcfg(sc); if (mii_phy_probe(dev, &sc->xl_miibus, xl_ifmedia_upd, xl_ifmedia_sts)) { device_printf(dev, "no PHY found!\n"); error = ENXIO; goto fail; } goto done; } /* * Sanity check. If the user has selected "auto" and this isn't * a 10/100 card of some kind, we need to force the transceiver * type to something sane. */ if (sc->xl_xcvr == XL_XCVR_AUTO) xl_choose_xcvr(sc, bootverbose); /* * Do ifmedia setup. */ if (sc->xl_media & XL_MEDIAOPT_BT) { if (bootverbose) device_printf(dev, "found 10baseT\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); } if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) { /* * Check for a 10baseFL board in disguise. */ if (sc->xl_type == XL_TYPE_905B && sc->xl_media == XL_MEDIAOPT_10FL) { if (bootverbose) device_printf(dev, "found 10baseFL\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_HDX, 0, NULL); if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL); } else { if (bootverbose) device_printf(dev, "found AUI\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL); } } if (sc->xl_media & XL_MEDIAOPT_BNC) { if (bootverbose) device_printf(dev, "found BNC\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL); } if (sc->xl_media & XL_MEDIAOPT_BFX) { if (bootverbose) device_printf(dev, "found 100baseFX\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_FX, 0, NULL); } media = IFM_ETHER|IFM_100_TX|IFM_FDX; xl_choose_media(sc, &media); if (sc->xl_miibus == NULL) ifmedia_set(&sc->ifmedia, media); done: if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) { XL_SEL_WIN(0); CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS); } /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); error = bus_setup_intr(dev, sc->xl_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, xl_intr, sc, &sc->xl_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); ether_ifdetach(ifp); goto fail; } fail: if (error) xl_detach(dev); return (error); } /* * Choose a default media. * XXX This is a leaf function only called by xl_attach() and * acquires/releases the non-recursible driver mutex to * satisfy lock assertions. */ static void xl_choose_media(struct xl_softc *sc, int *media) { XL_LOCK(sc); switch (sc->xl_xcvr) { case XL_XCVR_10BT: *media = IFM_ETHER|IFM_10_T; xl_setmode(sc, *media); break; case XL_XCVR_AUI: if (sc->xl_type == XL_TYPE_905B && sc->xl_media == XL_MEDIAOPT_10FL) { *media = IFM_ETHER|IFM_10_FL; xl_setmode(sc, *media); } else { *media = IFM_ETHER|IFM_10_5; xl_setmode(sc, *media); } break; case XL_XCVR_COAX: *media = IFM_ETHER|IFM_10_2; xl_setmode(sc, *media); break; case XL_XCVR_AUTO: case XL_XCVR_100BTX: case XL_XCVR_MII: /* Chosen by miibus */ break; case XL_XCVR_100BFX: *media = IFM_ETHER|IFM_100_FX; break; default: device_printf(sc->xl_dev, "unknown XCVR type: %d\n", sc->xl_xcvr); /* * This will probably be wrong, but it prevents * the ifmedia code from panicking. */ *media = IFM_ETHER|IFM_10_T; break; } XL_UNLOCK(sc); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int xl_detach(device_t dev) { struct xl_softc *sc; struct ifnet *ifp; int rid, res; sc = device_get_softc(dev); ifp = sc->xl_ifp; KASSERT(mtx_initialized(&sc->xl_mtx), ("xl mutex not initialized")); #ifdef DEVICE_POLLING if (ifp && ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif if (sc->xl_flags & XL_FLAG_USE_MMIO) { rid = XL_PCI_LOMEM; res = SYS_RES_MEMORY; } else { rid = XL_PCI_LOIO; res = SYS_RES_IOPORT; } /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { XL_LOCK(sc); xl_reset(sc); xl_stop(sc); XL_UNLOCK(sc); taskqueue_drain(taskqueue_swi, &sc->xl_task); callout_drain(&sc->xl_stat_callout); ether_ifdetach(ifp); } if (sc->xl_miibus) device_delete_child(dev, sc->xl_miibus); bus_generic_detach(dev); ifmedia_removeall(&sc->ifmedia); if (sc->xl_intrhand) bus_teardown_intr(dev, sc->xl_irq, sc->xl_intrhand); if (sc->xl_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->xl_irq); if (sc->xl_fres != NULL) bus_release_resource(dev, SYS_RES_MEMORY, XL_PCI_FUNCMEM, sc->xl_fres); if (sc->xl_res) bus_release_resource(dev, res, rid, sc->xl_res); if (ifp) if_free(ifp); if (sc->xl_mtag) { bus_dmamap_destroy(sc->xl_mtag, sc->xl_tmpmap); bus_dma_tag_destroy(sc->xl_mtag); } if (sc->xl_ldata.xl_rx_tag) { bus_dmamap_unload(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap); bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list, sc->xl_ldata.xl_rx_dmamap); bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag); } if (sc->xl_ldata.xl_tx_tag) { bus_dmamap_unload(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap); bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list, sc->xl_ldata.xl_tx_dmamap); bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag); } mtx_destroy(&sc->xl_mtx); return (0); } /* * Initialize the transmit descriptors. */ static int xl_list_tx_init(struct xl_softc *sc) { struct xl_chain_data *cd; struct xl_list_data *ld; int error, i; XL_LOCK_ASSERT(sc); cd = &sc->xl_cdata; ld = &sc->xl_ldata; for (i = 0; i < XL_TX_LIST_CNT; i++) { cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i]; error = bus_dmamap_create(sc->xl_mtag, 0, &cd->xl_tx_chain[i].xl_map); if (error) return (error); cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr + i * sizeof(struct xl_list); if (i == (XL_TX_LIST_CNT - 1)) cd->xl_tx_chain[i].xl_next = NULL; else cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1]; } cd->xl_tx_free = &cd->xl_tx_chain[0]; cd->xl_tx_tail = cd->xl_tx_head = NULL; bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE); return (0); } /* * Initialize the transmit descriptors. */ static int xl_list_tx_init_90xB(struct xl_softc *sc) { struct xl_chain_data *cd; struct xl_list_data *ld; int error, i; XL_LOCK_ASSERT(sc); cd = &sc->xl_cdata; ld = &sc->xl_ldata; for (i = 0; i < XL_TX_LIST_CNT; i++) { cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i]; error = bus_dmamap_create(sc->xl_mtag, 0, &cd->xl_tx_chain[i].xl_map); if (error) return (error); cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr + i * sizeof(struct xl_list); if (i == (XL_TX_LIST_CNT - 1)) cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[0]; else cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1]; if (i == 0) cd->xl_tx_chain[i].xl_prev = &cd->xl_tx_chain[XL_TX_LIST_CNT - 1]; else cd->xl_tx_chain[i].xl_prev = &cd->xl_tx_chain[i - 1]; } bzero(ld->xl_tx_list, XL_TX_LIST_SZ); ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY); cd->xl_tx_prod = 1; cd->xl_tx_cons = 1; cd->xl_tx_cnt = 0; bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE); return (0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int xl_list_rx_init(struct xl_softc *sc) { struct xl_chain_data *cd; struct xl_list_data *ld; int error, i, next; u_int32_t nextptr; XL_LOCK_ASSERT(sc); cd = &sc->xl_cdata; ld = &sc->xl_ldata; for (i = 0; i < XL_RX_LIST_CNT; i++) { cd->xl_rx_chain[i].xl_ptr = &ld->xl_rx_list[i]; error = bus_dmamap_create(sc->xl_mtag, 0, &cd->xl_rx_chain[i].xl_map); if (error) return (error); error = xl_newbuf(sc, &cd->xl_rx_chain[i]); if (error) return (error); if (i == (XL_RX_LIST_CNT - 1)) next = 0; else next = i + 1; nextptr = ld->xl_rx_dmaaddr + next * sizeof(struct xl_list_onefrag); cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[next]; ld->xl_rx_list[i].xl_next = htole32(nextptr); } bus_dmamap_sync(ld->xl_rx_tag, ld->xl_rx_dmamap, BUS_DMASYNC_PREWRITE); cd->xl_rx_head = &cd->xl_rx_chain[0]; return (0); } /* * Initialize an RX descriptor and attach an MBUF cluster. * If we fail to do so, we need to leave the old mbuf and * the old DMA map untouched so that it can be reused. */ static int xl_newbuf(struct xl_softc *sc, struct xl_chain_onefrag *c) { struct mbuf *m_new = NULL; bus_dmamap_t map; bus_dma_segment_t segs[1]; int error, nseg; XL_LOCK_ASSERT(sc); m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m_new == NULL) return (ENOBUFS); m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; /* Force longword alignment for packet payload. */ m_adj(m_new, ETHER_ALIGN); error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, sc->xl_tmpmap, m_new, segs, &nseg, BUS_DMA_NOWAIT); if (error) { m_freem(m_new); device_printf(sc->xl_dev, "can't map mbuf (error %d)\n", error); return (error); } KASSERT(nseg == 1, ("%s: too many DMA segments (%d)", __func__, nseg)); bus_dmamap_unload(sc->xl_mtag, c->xl_map); map = c->xl_map; c->xl_map = sc->xl_tmpmap; sc->xl_tmpmap = map; c->xl_mbuf = m_new; c->xl_ptr->xl_frag.xl_len = htole32(m_new->m_len | XL_LAST_FRAG); c->xl_ptr->xl_status = 0; c->xl_ptr->xl_frag.xl_addr = htole32(segs->ds_addr); bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREREAD); return (0); } static int xl_rx_resync(struct xl_softc *sc) { struct xl_chain_onefrag *pos; int i; XL_LOCK_ASSERT(sc); pos = sc->xl_cdata.xl_rx_head; for (i = 0; i < XL_RX_LIST_CNT; i++) { if (pos->xl_ptr->xl_status) break; pos = pos->xl_next; } if (i == XL_RX_LIST_CNT) return (0); sc->xl_cdata.xl_rx_head = pos; return (EAGAIN); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ -static void +static int xl_rxeof(struct xl_softc *sc) { struct mbuf *m; struct ifnet *ifp = sc->xl_ifp; struct xl_chain_onefrag *cur_rx; int total_len = 0; + int rx_npkts = 0; u_int32_t rxstat; XL_LOCK_ASSERT(sc); again: bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_POSTREAD); while ((rxstat = le32toh(sc->xl_cdata.xl_rx_head->xl_ptr->xl_status))) { #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif cur_rx = sc->xl_cdata.xl_rx_head; sc->xl_cdata.xl_rx_head = cur_rx->xl_next; total_len = rxstat & XL_RXSTAT_LENMASK; /* * Since we have told the chip to allow large frames, * we need to trap giant frame errors in software. We allow * a little more than the normal frame size to account for * frames with VLAN tags. */ if (total_len > XL_MAX_FRAMELEN) rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE); /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (rxstat & XL_RXSTAT_UP_ERROR) { ifp->if_ierrors++; cur_rx->xl_ptr->xl_status = 0; bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE); continue; } /* * If the error bit was not set, the upload complete * bit should be set which means we have a valid packet. * If not, something truly strange has happened. */ if (!(rxstat & XL_RXSTAT_UP_CMPLT)) { device_printf(sc->xl_dev, "bad receive status -- packet dropped\n"); ifp->if_ierrors++; cur_rx->xl_ptr->xl_status = 0; bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE); continue; } /* No errors; receive the packet. */ bus_dmamap_sync(sc->xl_mtag, cur_rx->xl_map, BUS_DMASYNC_POSTREAD); m = cur_rx->xl_mbuf; /* * Try to conjure up a new mbuf cluster. If that * fails, it means we have an out of memory condition and * should leave the buffer in place and continue. This will * result in a lost packet, but there's little else we * can do in this situation. */ if (xl_newbuf(sc, cur_rx)) { ifp->if_ierrors++; cur_rx->xl_ptr->xl_status = 0; bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE); continue; } bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE); ifp->if_ipackets++; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; if (ifp->if_capenable & IFCAP_RXCSUM) { /* Do IP checksum checking. */ if (rxstat & XL_RXSTAT_IPCKOK) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (!(rxstat & XL_RXSTAT_IPCKERR)) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((rxstat & XL_RXSTAT_TCPCOK && !(rxstat & XL_RXSTAT_TCPCKERR)) || (rxstat & XL_RXSTAT_UDPCKOK && !(rxstat & XL_RXSTAT_UDPCKERR))) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } XL_UNLOCK(sc); (*ifp->if_input)(ifp, m); XL_LOCK(sc); + rx_npkts++; /* * If we are running from the taskqueue, the interface * might have been stopped while we were passing the last * packet up the network stack. */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) - return; + return (rx_npkts); } /* * Handle the 'end of channel' condition. When the upload * engine hits the end of the RX ring, it will stall. This * is our cue to flush the RX ring, reload the uplist pointer * register and unstall the engine. * XXX This is actually a little goofy. With the ThunderLAN * chip, you get an interrupt when the receiver hits the end * of the receive ring, which tells you exactly when you * you need to reload the ring pointer. Here we have to * fake it. I'm mad at myself for not being clever enough * to avoid the use of a goto here. */ if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 || CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL); xl_wait(sc); CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr); sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0]; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL); goto again; } + return (rx_npkts); } /* * Taskqueue wrapper for xl_rxeof(). */ static void xl_rxeof_task(void *arg, int pending) { struct xl_softc *sc = (struct xl_softc *)arg; XL_LOCK(sc); if (sc->xl_ifp->if_drv_flags & IFF_DRV_RUNNING) xl_rxeof(sc); XL_UNLOCK(sc); } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void xl_txeof(struct xl_softc *sc) { struct xl_chain *cur_tx; struct ifnet *ifp = sc->xl_ifp; XL_LOCK_ASSERT(sc); /* * Go through our tx list and free mbufs for those * frames that have been uploaded. Note: the 3c905B * sets a special bit in the status word to let us * know that a frame has been downloaded, but the * original 3c900/3c905 adapters don't do that. * Consequently, we have to use a different test if * xl_type != XL_TYPE_905B. */ while (sc->xl_cdata.xl_tx_head != NULL) { cur_tx = sc->xl_cdata.xl_tx_head; if (CSR_READ_4(sc, XL_DOWNLIST_PTR)) break; sc->xl_cdata.xl_tx_head = cur_tx->xl_next; bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map); m_freem(cur_tx->xl_mbuf); cur_tx->xl_mbuf = NULL; ifp->if_opackets++; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; cur_tx->xl_next = sc->xl_cdata.xl_tx_free; sc->xl_cdata.xl_tx_free = cur_tx; } if (sc->xl_cdata.xl_tx_head == NULL) { sc->xl_wdog_timer = 0; sc->xl_cdata.xl_tx_tail = NULL; } else { if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED || !CSR_READ_4(sc, XL_DOWNLIST_PTR)) { CSR_WRITE_4(sc, XL_DOWNLIST_PTR, sc->xl_cdata.xl_tx_head->xl_phys); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); } } } static void xl_txeof_90xB(struct xl_softc *sc) { struct xl_chain *cur_tx = NULL; struct ifnet *ifp = sc->xl_ifp; int idx; XL_LOCK_ASSERT(sc); bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap, BUS_DMASYNC_POSTREAD); idx = sc->xl_cdata.xl_tx_cons; while (idx != sc->xl_cdata.xl_tx_prod) { cur_tx = &sc->xl_cdata.xl_tx_chain[idx]; if (!(le32toh(cur_tx->xl_ptr->xl_status) & XL_TXSTAT_DL_COMPLETE)) break; if (cur_tx->xl_mbuf != NULL) { bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map); m_freem(cur_tx->xl_mbuf); cur_tx->xl_mbuf = NULL; } ifp->if_opackets++; sc->xl_cdata.xl_tx_cnt--; XL_INC(idx, XL_TX_LIST_CNT); } if (sc->xl_cdata.xl_tx_cnt == 0) sc->xl_wdog_timer = 0; sc->xl_cdata.xl_tx_cons = idx; if (cur_tx != NULL) ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } /* * TX 'end of channel' interrupt handler. Actually, we should * only get a 'TX complete' interrupt if there's a transmit error, * so this is really TX error handler. */ static void xl_txeoc(struct xl_softc *sc) { u_int8_t txstat; XL_LOCK_ASSERT(sc); while ((txstat = CSR_READ_1(sc, XL_TX_STATUS))) { if (txstat & XL_TXSTATUS_UNDERRUN || txstat & XL_TXSTATUS_JABBER || txstat & XL_TXSTATUS_RECLAIM) { device_printf(sc->xl_dev, "transmission error: %x\n", txstat); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); if (sc->xl_type == XL_TYPE_905B) { if (sc->xl_cdata.xl_tx_cnt) { int i; struct xl_chain *c; i = sc->xl_cdata.xl_tx_cons; c = &sc->xl_cdata.xl_tx_chain[i]; CSR_WRITE_4(sc, XL_DOWNLIST_PTR, c->xl_phys); CSR_WRITE_1(sc, XL_DOWN_POLL, 64); } } else { if (sc->xl_cdata.xl_tx_head != NULL) CSR_WRITE_4(sc, XL_DOWNLIST_PTR, sc->xl_cdata.xl_tx_head->xl_phys); } /* * Remember to set this for the * first generation 3c90X chips. */ CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8); if (txstat & XL_TXSTATUS_UNDERRUN && sc->xl_tx_thresh < XL_PACKET_SIZE) { sc->xl_tx_thresh += XL_MIN_FRAMELEN; device_printf(sc->xl_dev, "tx underrun, increasing tx start threshold to %d bytes\n", sc->xl_tx_thresh); } CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh); if (sc->xl_type == XL_TYPE_905B) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4)); } CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); } else { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); } /* * Write an arbitrary byte to the TX_STATUS register * to clear this interrupt/error and advance to the next. */ CSR_WRITE_1(sc, XL_TX_STATUS, 0x01); } } static void xl_intr(void *arg) { struct xl_softc *sc = arg; struct ifnet *ifp = sc->xl_ifp; u_int16_t status; XL_LOCK(sc); #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { XL_UNLOCK(sc); return; } #endif while ((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS && status != 0xFFFF) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|(status & XL_INTRS)); if (status & XL_STAT_UP_COMPLETE) { int curpkts; curpkts = ifp->if_ipackets; xl_rxeof(sc); if (curpkts == ifp->if_ipackets) { while (xl_rx_resync(sc)) xl_rxeof(sc); } } if (status & XL_STAT_DOWN_COMPLETE) { if (sc->xl_type == XL_TYPE_905B) xl_txeof_90xB(sc); else xl_txeof(sc); } if (status & XL_STAT_TX_COMPLETE) { ifp->if_oerrors++; xl_txeoc(sc); } if (status & XL_STAT_ADFAIL) { xl_reset(sc); xl_init_locked(sc); } if (status & XL_STAT_STATSOFLOW) { sc->xl_stats_no_timeout = 1; xl_stats_update_locked(sc); sc->xl_stats_no_timeout = 0; } } if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { if (sc->xl_type == XL_TYPE_905B) xl_start_90xB_locked(ifp); else xl_start_locked(ifp); } XL_UNLOCK(sc); } #ifdef DEVICE_POLLING -static void +static int xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct xl_softc *sc = ifp->if_softc; + int rx_npkts = 0; XL_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) - xl_poll_locked(ifp, cmd, count); + rx_npkts = xl_poll_locked(ifp, cmd, count); XL_UNLOCK(sc); + return (rx_npkts); } -static void +static int xl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct xl_softc *sc = ifp->if_softc; + int rx_npkts; XL_LOCK_ASSERT(sc); sc->rxcycles = count; - xl_rxeof(sc); + rx_npkts = xl_rxeof(sc); if (sc->xl_type == XL_TYPE_905B) xl_txeof_90xB(sc); else xl_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { if (sc->xl_type == XL_TYPE_905B) xl_start_90xB_locked(ifp); else xl_start_locked(ifp); } if (cmd == POLL_AND_CHECK_STATUS) { u_int16_t status; status = CSR_READ_2(sc, XL_STATUS); if (status & XL_INTRS && status != 0xFFFF) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|(status & XL_INTRS)); if (status & XL_STAT_TX_COMPLETE) { ifp->if_oerrors++; xl_txeoc(sc); } if (status & XL_STAT_ADFAIL) { xl_reset(sc); xl_init_locked(sc); } if (status & XL_STAT_STATSOFLOW) { sc->xl_stats_no_timeout = 1; xl_stats_update_locked(sc); sc->xl_stats_no_timeout = 0; } } } + return (rx_npkts); } #endif /* DEVICE_POLLING */ /* * XXX: This is an entry point for callout which needs to take the lock. */ static void xl_stats_update(void *xsc) { struct xl_softc *sc = xsc; XL_LOCK_ASSERT(sc); if (xl_watchdog(sc) == EJUSTRETURN) return; xl_stats_update_locked(sc); } static void xl_stats_update_locked(struct xl_softc *sc) { struct ifnet *ifp = sc->xl_ifp; struct xl_stats xl_stats; u_int8_t *p; int i; struct mii_data *mii = NULL; XL_LOCK_ASSERT(sc); bzero((char *)&xl_stats, sizeof(struct xl_stats)); if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); p = (u_int8_t *)&xl_stats; /* Read all the stats registers. */ XL_SEL_WIN(6); for (i = 0; i < 16; i++) *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i); ifp->if_ierrors += xl_stats.xl_rx_overrun; ifp->if_collisions += xl_stats.xl_tx_multi_collision + xl_stats.xl_tx_single_collision + xl_stats.xl_tx_late_collision; /* * Boomerang and cyclone chips have an extra stats counter * in window 4 (BadSSD). We have to read this too in order * to clear out all the stats registers and avoid a statsoflow * interrupt. */ XL_SEL_WIN(4); CSR_READ_1(sc, XL_W4_BADSSD); if ((mii != NULL) && (!sc->xl_stats_no_timeout)) mii_tick(mii); XL_SEL_WIN(7); if (!sc->xl_stats_no_timeout) callout_reset(&sc->xl_stat_callout, hz, xl_stats_update, sc); } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int xl_encap(struct xl_softc *sc, struct xl_chain *c, struct mbuf **m_head) { struct mbuf *m_new; struct ifnet *ifp = sc->xl_ifp; int error, i, nseg, total_len; u_int32_t status; XL_LOCK_ASSERT(sc); error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, c->xl_map, *m_head, sc->xl_cdata.xl_tx_segs, &nseg, BUS_DMA_NOWAIT); if (error && error != EFBIG) { if_printf(ifp, "can't map mbuf (error %d)\n", error); return (error); } /* * Handle special case: we used up all 63 fragments, * but we have more mbufs left in the chain. Copy the * data into an mbuf cluster. Note that we don't * bother clearing the values in the other fragment * pointers/counters; it wouldn't gain us anything, * and would waste cycles. */ if (error) { m_new = m_collapse(*m_head, M_DONTWAIT, XL_MAXFRAGS); if (m_new == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m_new; error = bus_dmamap_load_mbuf_sg(sc->xl_mtag, c->xl_map, *m_head, sc->xl_cdata.xl_tx_segs, &nseg, BUS_DMA_NOWAIT); if (error) { m_freem(*m_head); *m_head = NULL; if_printf(ifp, "can't map mbuf (error %d)\n", error); return (error); } } KASSERT(nseg <= XL_MAXFRAGS, ("%s: too many DMA segments (%d)", __func__, nseg)); if (nseg == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } total_len = 0; for (i = 0; i < nseg; i++) { KASSERT(sc->xl_cdata.xl_tx_segs[i].ds_len <= MCLBYTES, ("segment size too large")); c->xl_ptr->xl_frag[i].xl_addr = htole32(sc->xl_cdata.xl_tx_segs[i].ds_addr); c->xl_ptr->xl_frag[i].xl_len = htole32(sc->xl_cdata.xl_tx_segs[i].ds_len); total_len += sc->xl_cdata.xl_tx_segs[i].ds_len; } c->xl_ptr->xl_frag[nseg - 1].xl_len = htole32(sc->xl_cdata.xl_tx_segs[nseg - 1].ds_len | XL_LAST_FRAG); c->xl_ptr->xl_status = htole32(total_len); c->xl_ptr->xl_next = 0; if (sc->xl_type == XL_TYPE_905B) { status = XL_TXSTAT_RND_DEFEAT; #ifndef XL905B_TXCSUM_BROKEN if (m_head->m_pkthdr.csum_flags) { if (m_head->m_pkthdr.csum_flags & CSUM_IP) status |= XL_TXSTAT_IPCKSUM; if (m_head->m_pkthdr.csum_flags & CSUM_TCP) status |= XL_TXSTAT_TCPCKSUM; if (m_head->m_pkthdr.csum_flags & CSUM_UDP) status |= XL_TXSTAT_UDPCKSUM; } #endif c->xl_ptr->xl_status = htole32(status); } c->xl_mbuf = *m_head; bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREWRITE); return (0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void xl_start(struct ifnet *ifp) { struct xl_softc *sc = ifp->if_softc; XL_LOCK(sc); if (sc->xl_type == XL_TYPE_905B) xl_start_90xB_locked(ifp); else xl_start_locked(ifp); XL_UNLOCK(sc); } static void xl_start_locked(struct ifnet *ifp) { struct xl_softc *sc = ifp->if_softc; struct mbuf *m_head = NULL; struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx; u_int32_t status; int error; XL_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; /* * Check for an available queue slot. If there are none, * punt. */ if (sc->xl_cdata.xl_tx_free == NULL) { xl_txeoc(sc); xl_txeof(sc); if (sc->xl_cdata.xl_tx_free == NULL) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; return; } } start_tx = sc->xl_cdata.xl_tx_free; for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->xl_cdata.xl_tx_free != NULL;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* Pick a descriptor off the free list. */ cur_tx = sc->xl_cdata.xl_tx_free; /* Pack the data into the descriptor. */ error = xl_encap(sc, cur_tx, &m_head); if (error) { if (m_head == NULL) break; ifp->if_drv_flags |= IFF_DRV_OACTIVE; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); break; } sc->xl_cdata.xl_tx_free = cur_tx->xl_next; cur_tx->xl_next = NULL; /* Chain it together. */ if (prev != NULL) { prev->xl_next = cur_tx; prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys); } prev = cur_tx; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, cur_tx->xl_mbuf); } /* * If there are no packets queued, bail. */ if (cur_tx == NULL) return; /* * Place the request for the upload interrupt * in the last descriptor in the chain. This way, if * we're chaining several packets at once, we'll only * get an interrupt once for the whole chain rather than * once for each packet. */ cur_tx->xl_ptr->xl_status = htole32(le32toh(cur_tx->xl_ptr->xl_status) | XL_TXSTAT_DL_INTR); bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap, BUS_DMASYNC_PREWRITE); /* * Queue the packets. If the TX channel is clear, update * the downlist pointer register. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL); xl_wait(sc); if (sc->xl_cdata.xl_tx_head != NULL) { sc->xl_cdata.xl_tx_tail->xl_next = start_tx; sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next = htole32(start_tx->xl_phys); status = sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status; sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status = htole32(le32toh(status) & ~XL_TXSTAT_DL_INTR); sc->xl_cdata.xl_tx_tail = cur_tx; } else { sc->xl_cdata.xl_tx_head = start_tx; sc->xl_cdata.xl_tx_tail = cur_tx; } if (!CSR_READ_4(sc, XL_DOWNLIST_PTR)) CSR_WRITE_4(sc, XL_DOWNLIST_PTR, start_tx->xl_phys); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); XL_SEL_WIN(7); /* * Set a timeout in case the chip goes out to lunch. */ sc->xl_wdog_timer = 5; /* * XXX Under certain conditions, usually on slower machines * where interrupts may be dropped, it's possible for the * adapter to chew up all the buffers in the receive ring * and stall, without us being able to do anything about it. * To guard against this, we need to make a pass over the * RX queue to make sure there aren't any packets pending. * Doing it here means we can flush the receive ring at the * same time the chip is DMAing the transmit descriptors we * just gave it. * * 3Com goes to some lengths to emphasize the Parallel Tasking (tm) * nature of their chips in all their marketing literature; * we may as well take advantage of it. :) */ taskqueue_enqueue(taskqueue_swi, &sc->xl_task); } static void xl_start_90xB_locked(struct ifnet *ifp) { struct xl_softc *sc = ifp->if_softc; struct mbuf *m_head = NULL; struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx; int error, idx; XL_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; idx = sc->xl_cdata.xl_tx_prod; start_tx = &sc->xl_cdata.xl_tx_chain[idx]; for (; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL;) { if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; cur_tx = &sc->xl_cdata.xl_tx_chain[idx]; /* Pack the data into the descriptor. */ error = xl_encap(sc, cur_tx, &m_head); if (error) { if (m_head == NULL) break; ifp->if_drv_flags |= IFF_DRV_OACTIVE; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); break; } /* Chain it together. */ if (prev != NULL) prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys); prev = cur_tx; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, cur_tx->xl_mbuf); XL_INC(idx, XL_TX_LIST_CNT); sc->xl_cdata.xl_tx_cnt++; } /* * If there are no packets queued, bail. */ if (cur_tx == NULL) return; /* * Place the request for the upload interrupt * in the last descriptor in the chain. This way, if * we're chaining several packets at once, we'll only * get an interrupt once for the whole chain rather than * once for each packet. */ cur_tx->xl_ptr->xl_status = htole32(le32toh(cur_tx->xl_ptr->xl_status) | XL_TXSTAT_DL_INTR); bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap, BUS_DMASYNC_PREWRITE); /* Start transmission */ sc->xl_cdata.xl_tx_prod = idx; start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys); /* * Set a timeout in case the chip goes out to lunch. */ sc->xl_wdog_timer = 5; } static void xl_init(void *xsc) { struct xl_softc *sc = xsc; XL_LOCK(sc); xl_init_locked(sc); XL_UNLOCK(sc); } static void xl_init_locked(struct xl_softc *sc) { struct ifnet *ifp = sc->xl_ifp; int error, i; u_int16_t rxfilt = 0; struct mii_data *mii = NULL; XL_LOCK_ASSERT(sc); /* * Cancel pending I/O and free all RX/TX buffers. */ xl_stop(sc); if (sc->xl_miibus == NULL) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); xl_wait(sc); } CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); DELAY(10000); if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); /* Init our MAC address */ XL_SEL_WIN(2); for (i = 0; i < ETHER_ADDR_LEN; i++) { CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i, IF_LLADDR(sc->xl_ifp)[i]); } /* Clear the station mask. */ for (i = 0; i < 3; i++) CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0); #ifdef notdef /* Reset TX and RX. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); #endif /* Init circular RX list. */ error = xl_list_rx_init(sc); if (error) { device_printf(sc->xl_dev, "initialization of the rx ring failed (%d)\n", error); xl_stop(sc); return; } /* Init TX descriptors. */ if (sc->xl_type == XL_TYPE_905B) error = xl_list_tx_init_90xB(sc); else error = xl_list_tx_init(sc); if (error) { device_printf(sc->xl_dev, "initialization of the tx ring failed (%d)\n", error); xl_stop(sc); return; } /* * Set the TX freethresh value. * Note that this has no effect on 3c905B "cyclone" * cards but is required for 3c900/3c905 "boomerang" * cards in order to enable the download engine. */ CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8); /* Set the TX start threshold for best performance. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh); /* * If this is a 3c905B, also set the tx reclaim threshold. * This helps cut down on the number of tx reclaim errors * that could happen on a busy network. The chip multiplies * the register value by 16 to obtain the actual threshold * in bytes, so we divide by 16 when setting the value here. * The existing threshold value can be examined by reading * the register at offset 9 in window 5. */ if (sc->xl_type == XL_TYPE_905B) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4)); } /* Set RX filter bits. */ XL_SEL_WIN(5); rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); /* Set the individual bit to receive frames for this host only. */ rxfilt |= XL_RXFILTER_INDIVIDUAL; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { rxfilt |= XL_RXFILTER_ALLFRAMES; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); } else { rxfilt &= ~XL_RXFILTER_ALLFRAMES; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); } /* * Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { rxfilt |= XL_RXFILTER_BROADCAST; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); } else { rxfilt &= ~XL_RXFILTER_BROADCAST; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); } /* * Program the multicast filter, if necessary. */ if (sc->xl_type == XL_TYPE_905B) xl_setmulti_hash(sc); else xl_setmulti(sc); /* * Load the address of the RX list. We have to * stall the upload engine before we can manipulate * the uplist pointer register, then unstall it when * we're finished. We also have to wait for the * stall command to complete before proceeding. * Note that we have to do this after any RX resets * have completed since the uplist register is cleared * by a reset. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL); xl_wait(sc); CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL); xl_wait(sc); if (sc->xl_type == XL_TYPE_905B) { /* Set polling interval */ CSR_WRITE_1(sc, XL_DOWN_POLL, 64); /* Load the address of the TX list */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL); xl_wait(sc); CSR_WRITE_4(sc, XL_DOWNLIST_PTR, sc->xl_cdata.xl_tx_chain[0].xl_phys); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); xl_wait(sc); } /* * If the coax transceiver is on, make sure to enable * the DC-DC converter. */ XL_SEL_WIN(3); if (sc->xl_xcvr == XL_XCVR_COAX) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START); else CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); /* * increase packet size to allow reception of 802.1q or ISL packets. * For the 3c90x chip, set the 'allow large packets' bit in the MAC * control register. For 3c90xB/C chips, use the RX packet size * register. */ if (sc->xl_type == XL_TYPE_905B) CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE); else { u_int8_t macctl; macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL); macctl |= XL_MACCTRL_ALLOW_LARGE_PACK; CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl); } /* Clear out the stats counters. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE); sc->xl_stats_no_timeout = 1; xl_stats_update_locked(sc); sc->xl_stats_no_timeout = 0; XL_SEL_WIN(4); CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE); /* * Enable interrupts. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS); #ifdef DEVICE_POLLING /* Disable interrupts if we are polling. */ if (ifp->if_capenable & IFCAP_POLLING) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0); else #endif CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS); if (sc->xl_flags & XL_FLAG_FUNCREG) bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000); /* Set the RX early threshold */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2)); CSR_WRITE_2(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY); /* Enable receiver and transmitter. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE); xl_wait(sc); /* XXX Downcall to miibus. */ if (mii != NULL) mii_mediachg(mii); /* Select window 7 for normal operations. */ XL_SEL_WIN(7); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->xl_wdog_timer = 0; callout_reset(&sc->xl_stat_callout, hz, xl_stats_update, sc); } /* * Set media options. */ static int xl_ifmedia_upd(struct ifnet *ifp) { struct xl_softc *sc = ifp->if_softc; struct ifmedia *ifm = NULL; struct mii_data *mii = NULL; XL_LOCK(sc); if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); if (mii == NULL) ifm = &sc->ifmedia; else ifm = &mii->mii_media; switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_100_FX: case IFM_10_FL: case IFM_10_2: case IFM_10_5: xl_setmode(sc, ifm->ifm_media); XL_UNLOCK(sc); return (0); } if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX || sc->xl_media & XL_MEDIAOPT_BT4) { xl_init_locked(sc); } else { xl_setmode(sc, ifm->ifm_media); } XL_UNLOCK(sc); return (0); } /* * Report current media status. */ static void xl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct xl_softc *sc = ifp->if_softc; u_int32_t icfg; u_int16_t status = 0; struct mii_data *mii = NULL; XL_LOCK(sc); if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); XL_SEL_WIN(4); status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); XL_SEL_WIN(3); icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK; icfg >>= XL_ICFG_CONNECTOR_BITS; ifmr->ifm_active = IFM_ETHER; ifmr->ifm_status = IFM_AVALID; if ((status & XL_MEDIASTAT_CARRIER) == 0) ifmr->ifm_status |= IFM_ACTIVE; switch (icfg) { case XL_XCVR_10BT: ifmr->ifm_active = IFM_ETHER|IFM_10_T; if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; break; case XL_XCVR_AUI: if (sc->xl_type == XL_TYPE_905B && sc->xl_media == XL_MEDIAOPT_10FL) { ifmr->ifm_active = IFM_ETHER|IFM_10_FL; if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } else ifmr->ifm_active = IFM_ETHER|IFM_10_5; break; case XL_XCVR_COAX: ifmr->ifm_active = IFM_ETHER|IFM_10_2; break; /* * XXX MII and BTX/AUTO should be separate cases. */ case XL_XCVR_100BTX: case XL_XCVR_AUTO: case XL_XCVR_MII: if (mii != NULL) { mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } break; case XL_XCVR_100BFX: ifmr->ifm_active = IFM_ETHER|IFM_100_FX; break; default: if_printf(ifp, "unknown XCVR type: %d\n", icfg); break; } XL_UNLOCK(sc); } static int xl_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct xl_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int error = 0; struct mii_data *mii = NULL; u_int8_t rxfilt; switch (command) { case SIOCSIFFLAGS: XL_LOCK(sc); XL_SEL_WIN(5); rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->xl_if_flags & IFF_PROMISC)) { rxfilt |= XL_RXFILTER_ALLFRAMES; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); XL_SEL_WIN(7); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->xl_if_flags & IFF_PROMISC) { rxfilt &= ~XL_RXFILTER_ALLFRAMES; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); XL_SEL_WIN(7); } else { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) xl_init_locked(sc); } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) xl_stop(sc); } sc->xl_if_flags = ifp->if_flags; XL_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: /* XXX Downcall from if_addmulti() possibly with locks held. */ XL_LOCK(sc); if (sc->xl_type == XL_TYPE_905B) xl_setmulti_hash(sc); else xl_setmulti(sc); XL_UNLOCK(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); if (mii == NULL) error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); else error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: #ifdef DEVICE_POLLING if (ifr->ifr_reqcap & IFCAP_POLLING && !(ifp->if_capenable & IFCAP_POLLING)) { error = ether_poll_register(xl_poll, ifp); if (error) return(error); XL_LOCK(sc); /* Disable interrupts */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0); ifp->if_capenable |= IFCAP_POLLING; XL_UNLOCK(sc); return (error); } if (!(ifr->ifr_reqcap & IFCAP_POLLING) && ifp->if_capenable & IFCAP_POLLING) { error = ether_poll_deregister(ifp); /* Enable interrupts. */ XL_LOCK(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS); if (sc->xl_flags & XL_FLAG_FUNCREG) bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000); ifp->if_capenable &= ~IFCAP_POLLING; XL_UNLOCK(sc); return (error); } #endif /* DEVICE_POLLING */ XL_LOCK(sc); ifp->if_capenable = ifr->ifr_reqcap; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist = XL905B_CSUM_FEATURES; else ifp->if_hwassist = 0; XL_UNLOCK(sc); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static int xl_watchdog(struct xl_softc *sc) { struct ifnet *ifp = sc->xl_ifp; u_int16_t status = 0; int misintr; XL_LOCK_ASSERT(sc); if (sc->xl_wdog_timer == 0 || --sc->xl_wdog_timer != 0) return (0); xl_rxeof(sc); xl_txeoc(sc); misintr = 0; if (sc->xl_type == XL_TYPE_905B) { xl_txeof_90xB(sc); if (sc->xl_cdata.xl_tx_cnt == 0) misintr++; } else { xl_txeof(sc); if (sc->xl_cdata.xl_tx_head == NULL) misintr++; } if (misintr != 0) { device_printf(sc->xl_dev, "watchdog timeout (missed Tx interrupts) -- recovering\n"); return (0); } ifp->if_oerrors++; XL_SEL_WIN(4); status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); device_printf(sc->xl_dev, "watchdog timeout\n"); if (status & XL_MEDIASTAT_CARRIER) device_printf(sc->xl_dev, "no carrier - transceiver cable problem?\n"); xl_reset(sc); xl_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { if (sc->xl_type == XL_TYPE_905B) xl_start_90xB_locked(ifp); else xl_start_locked(ifp); } return (EJUSTRETURN); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void xl_stop(struct xl_softc *sc) { register int i; struct ifnet *ifp = sc->xl_ifp; XL_LOCK_ASSERT(sc); sc->xl_wdog_timer = 0; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); DELAY(800); #ifdef foo CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); #endif CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0); if (sc->xl_flags & XL_FLAG_FUNCREG) bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000); /* Stop the stats updater. */ callout_stop(&sc->xl_stat_callout); /* * Free data in the RX lists. */ for (i = 0; i < XL_RX_LIST_CNT; i++) { if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) { bus_dmamap_unload(sc->xl_mtag, sc->xl_cdata.xl_rx_chain[i].xl_map); bus_dmamap_destroy(sc->xl_mtag, sc->xl_cdata.xl_rx_chain[i].xl_map); m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf); sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL; } } if (sc->xl_ldata.xl_rx_list != NULL) bzero(sc->xl_ldata.xl_rx_list, XL_RX_LIST_SZ); /* * Free the TX list buffers. */ for (i = 0; i < XL_TX_LIST_CNT; i++) { if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) { bus_dmamap_unload(sc->xl_mtag, sc->xl_cdata.xl_tx_chain[i].xl_map); bus_dmamap_destroy(sc->xl_mtag, sc->xl_cdata.xl_tx_chain[i].xl_map); m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf); sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL; } } if (sc->xl_ldata.xl_tx_list != NULL) bzero(sc->xl_ldata.xl_tx_list, XL_TX_LIST_SZ); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int xl_shutdown(device_t dev) { struct xl_softc *sc; sc = device_get_softc(dev); XL_LOCK(sc); xl_reset(sc); xl_stop(sc); XL_UNLOCK(sc); return (0); } static int xl_suspend(device_t dev) { struct xl_softc *sc; sc = device_get_softc(dev); XL_LOCK(sc); xl_stop(sc); XL_UNLOCK(sc); return (0); } static int xl_resume(device_t dev) { struct xl_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->xl_ifp; XL_LOCK(sc); xl_reset(sc); if (ifp->if_flags & IFF_UP) xl_init_locked(sc); XL_UNLOCK(sc); return (0); } Index: head/sys/net/if.h =================================================================== --- head/sys/net/if.h (revision 193095) +++ head/sys/net/if.h (revision 193096) @@ -1,459 +1,460 @@ /*- * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)if.h 8.1 (Berkeley) 6/10/93 * $FreeBSD$ */ #ifndef _NET_IF_H_ #define _NET_IF_H_ #include #ifdef _KERNEL #include #endif #if __BSD_VISIBLE /* * does not depend on on most other systems. This * helps userland compatibility. (struct timeval ifi_lastchange) */ #ifndef _KERNEL #include #endif struct ifnet; #endif /* * Length of interface external name, including terminating '\0'. * Note: this is the same size as a generic device's external name. */ #define IF_NAMESIZE 16 #if __BSD_VISIBLE #define IFNAMSIZ IF_NAMESIZE #define IF_MAXUNIT 0x7fff /* historical value */ #endif #if __BSD_VISIBLE /* * Structure used to query names of interface cloners. */ struct if_clonereq { int ifcr_total; /* total cloners (out) */ int ifcr_count; /* room for this many in user buffer */ char *ifcr_buffer; /* buffer for cloner names */ }; /* * Structure describing information about an interface * which may be of interest to management entities. */ struct if_data { /* generic interface information */ u_char ifi_type; /* ethernet, tokenring, etc */ u_char ifi_physical; /* e.g., AUI, Thinnet, 10base-T, etc */ u_char ifi_addrlen; /* media address length */ u_char ifi_hdrlen; /* media header length */ u_char ifi_link_state; /* current link state */ u_char ifi_spare_char1; /* spare byte */ u_char ifi_spare_char2; /* spare byte */ u_char ifi_datalen; /* length of this data struct */ u_long ifi_mtu; /* maximum transmission unit */ u_long ifi_metric; /* routing metric (external only) */ u_long ifi_baudrate; /* linespeed */ /* volatile statistics */ u_long ifi_ipackets; /* packets received on interface */ u_long ifi_ierrors; /* input errors on interface */ u_long ifi_opackets; /* packets sent on interface */ u_long ifi_oerrors; /* output errors on interface */ u_long ifi_collisions; /* collisions on csma interfaces */ u_long ifi_ibytes; /* total number of octets received */ u_long ifi_obytes; /* total number of octets sent */ u_long ifi_imcasts; /* packets received via multicast */ u_long ifi_omcasts; /* packets sent via multicast */ u_long ifi_iqdrops; /* dropped on input, this interface */ u_long ifi_noproto; /* destined for unsupported protocol */ u_long ifi_hwassist; /* HW offload capabilities, see IFCAP */ time_t ifi_epoch; /* uptime at attach or stat reset */ struct timeval ifi_lastchange; /* time of last administrative change */ }; /*- * Interface flags are of two types: network stack owned flags, and driver * owned flags. Historically, these values were stored in the same ifnet * flags field, but with the advent of fine-grained locking, they have been * broken out such that the network stack is responsible for synchronizing * the stack-owned fields, and the device driver the device-owned fields. * Both halves can perform lockless reads of the other half's field, subject * to accepting the involved races. * * Both sets of flags come from the same number space, and should not be * permitted to conflict, as they are exposed to user space via a single * field. * * The following symbols identify read and write requirements for fields: * * (i) if_flags field set by device driver before attach, read-only there * after. * (n) if_flags field written only by the network stack, read by either the * stack or driver. * (d) if_drv_flags field written only by the device driver, read by either * the stack or driver. */ #define IFF_UP 0x1 /* (n) interface is up */ #define IFF_BROADCAST 0x2 /* (i) broadcast address valid */ #define IFF_DEBUG 0x4 /* (n) turn on debugging */ #define IFF_LOOPBACK 0x8 /* (i) is a loopback net */ #define IFF_POINTOPOINT 0x10 /* (i) is a point-to-point link */ #define IFF_SMART 0x20 /* (i) interface manages own routes */ #define IFF_DRV_RUNNING 0x40 /* (d) resources allocated */ #define IFF_NOARP 0x80 /* (n) no address resolution protocol */ #define IFF_PROMISC 0x100 /* (n) receive all packets */ #define IFF_ALLMULTI 0x200 /* (n) receive all multicast packets */ #define IFF_DRV_OACTIVE 0x400 /* (d) tx hardware queue is full */ #define IFF_SIMPLEX 0x800 /* (i) can't hear own transmissions */ #define IFF_LINK0 0x1000 /* per link layer defined bit */ #define IFF_LINK1 0x2000 /* per link layer defined bit */ #define IFF_LINK2 0x4000 /* per link layer defined bit */ #define IFF_ALTPHYS IFF_LINK2 /* use alternate physical connection */ #define IFF_MULTICAST 0x8000 /* (i) supports multicast */ /* 0x10000 */ #define IFF_PPROMISC 0x20000 /* (n) user-requested promisc mode */ #define IFF_MONITOR 0x40000 /* (n) user-requested monitor mode */ #define IFF_STATICARP 0x80000 /* (n) static ARP */ #define IFF_DYING 0x200000 /* (n) interface is winding down */ /* * Old names for driver flags so that user space tools can continue to use * the old (portable) names. */ #ifndef _KERNEL #define IFF_RUNNING IFF_DRV_RUNNING #define IFF_OACTIVE IFF_DRV_OACTIVE #endif /* flags set internally only: */ #define IFF_CANTCHANGE \ (IFF_BROADCAST|IFF_POINTOPOINT|IFF_DRV_RUNNING|IFF_DRV_OACTIVE|\ IFF_SIMPLEX|IFF_MULTICAST|IFF_ALLMULTI|IFF_SMART|IFF_PROMISC|\ IFF_DYING) /* * Values for if_link_state. */ #define LINK_STATE_UNKNOWN 0 /* link invalid/unknown */ #define LINK_STATE_DOWN 1 /* link is down */ #define LINK_STATE_UP 2 /* link is up */ /* * Some convenience macros used for setting ifi_baudrate. * XXX 1000 vs. 1024? --thorpej@netbsd.org */ #define IF_Kbps(x) ((x) * 1000) /* kilobits/sec. */ #define IF_Mbps(x) (IF_Kbps((x) * 1000)) /* megabits/sec. */ #define IF_Gbps(x) (IF_Mbps((x) * 1000)) /* gigabits/sec. */ /* * Capabilities that interfaces can advertise. * * struct ifnet.if_capabilities * contains the optional features & capabilities a particular interface * supports (not only the driver but also the detected hw revision). * Capabilities are defined by IFCAP_* below. * struct ifnet.if_capenable * contains the enabled (either by default or through ifconfig) optional * features & capabilities on this interface. * Capabilities are defined by IFCAP_* below. * struct if_data.ifi_hwassist in mbuf CSUM_ flag form, controlled by above * contains the enabled optional feature & capabilites that can be used * individually per packet and are specified in the mbuf pkthdr.csum_flags * field. IFCAP_* and CSUM_* do not match one to one and CSUM_* may be * more detailed or differenciated than IFCAP_*. * Hwassist features are defined CSUM_* in sys/mbuf.h */ #define IFCAP_RXCSUM 0x00001 /* can offload checksum on RX */ #define IFCAP_TXCSUM 0x00002 /* can offload checksum on TX */ #define IFCAP_NETCONS 0x00004 /* can be a network console */ #define IFCAP_VLAN_MTU 0x00008 /* VLAN-compatible MTU */ #define IFCAP_VLAN_HWTAGGING 0x00010 /* hardware VLAN tag support */ #define IFCAP_JUMBO_MTU 0x00020 /* 9000 byte MTU supported */ #define IFCAP_POLLING 0x00040 /* driver supports polling */ #define IFCAP_VLAN_HWCSUM 0x00080 /* can do IFCAP_HWCSUM on VLANs */ #define IFCAP_TSO4 0x00100 /* can do TCP Segmentation Offload */ #define IFCAP_TSO6 0x00200 /* can do TCP6 Segmentation Offload */ #define IFCAP_LRO 0x00400 /* can do Large Receive Offload */ #define IFCAP_WOL_UCAST 0x00800 /* wake on any unicast frame */ #define IFCAP_WOL_MCAST 0x01000 /* wake on any multicast frame */ #define IFCAP_WOL_MAGIC 0x02000 /* wake on any Magic Packet */ #define IFCAP_TOE4 0x04000 /* interface can offload TCP */ #define IFCAP_TOE6 0x08000 /* interface can offload TCP6 */ #define IFCAP_VLAN_HWFILTER 0x10000 /* interface hw can filter vlan tag */ +#define IFCAP_POLLING_NOCOUNT 0x20000 /* polling ticks cannot be fragmented */ #define IFCAP_HWCSUM (IFCAP_RXCSUM | IFCAP_TXCSUM) #define IFCAP_TSO (IFCAP_TSO4 | IFCAP_TSO6) #define IFCAP_WOL (IFCAP_WOL_UCAST | IFCAP_WOL_MCAST | IFCAP_WOL_MAGIC) #define IFCAP_TOE (IFCAP_TOE4 | IFCAP_TOE6) #define IFQ_MAXLEN 50 #define IFNET_SLOWHZ 1 /* granularity is 1 second */ /* * Message format for use in obtaining information about interfaces * from getkerninfo and the routing socket */ struct if_msghdr { u_short ifm_msglen; /* to skip over non-understood messages */ u_char ifm_version; /* future binary compatibility */ u_char ifm_type; /* message type */ int ifm_addrs; /* like rtm_addrs */ int ifm_flags; /* value of if_flags */ u_short ifm_index; /* index for associated ifp */ struct if_data ifm_data;/* statistics and other data about if */ }; /* * Message format for use in obtaining information about interface addresses * from getkerninfo and the routing socket */ struct ifa_msghdr { u_short ifam_msglen; /* to skip over non-understood messages */ u_char ifam_version; /* future binary compatibility */ u_char ifam_type; /* message type */ int ifam_addrs; /* like rtm_addrs */ int ifam_flags; /* value of ifa_flags */ u_short ifam_index; /* index for associated ifp */ int ifam_metric; /* value of ifa_metric */ }; /* * Message format for use in obtaining information about multicast addresses * from the routing socket */ struct ifma_msghdr { u_short ifmam_msglen; /* to skip over non-understood messages */ u_char ifmam_version; /* future binary compatibility */ u_char ifmam_type; /* message type */ int ifmam_addrs; /* like rtm_addrs */ int ifmam_flags; /* value of ifa_flags */ u_short ifmam_index; /* index for associated ifp */ }; /* * Message format announcing the arrival or departure of a network interface. */ struct if_announcemsghdr { u_short ifan_msglen; /* to skip over non-understood messages */ u_char ifan_version; /* future binary compatibility */ u_char ifan_type; /* message type */ u_short ifan_index; /* index for associated ifp */ char ifan_name[IFNAMSIZ]; /* if name, e.g. "en0" */ u_short ifan_what; /* what type of announcement */ }; #define IFAN_ARRIVAL 0 /* interface arrival */ #define IFAN_DEPARTURE 1 /* interface departure */ /* * Interface request structure used for socket * ioctl's. All interface ioctl's must have parameter * definitions which begin with ifr_name. The * remainder may be interface specific. */ struct ifreq { char ifr_name[IFNAMSIZ]; /* if name, e.g. "en0" */ union { struct sockaddr ifru_addr; struct sockaddr ifru_dstaddr; struct sockaddr ifru_broadaddr; short ifru_flags[2]; short ifru_index; int ifru_metric; int ifru_mtu; int ifru_phys; int ifru_media; caddr_t ifru_data; int ifru_cap[2]; } ifr_ifru; #define ifr_addr ifr_ifru.ifru_addr /* address */ #define ifr_dstaddr ifr_ifru.ifru_dstaddr /* other end of p-to-p link */ #define ifr_broadaddr ifr_ifru.ifru_broadaddr /* broadcast address */ #define ifr_flags ifr_ifru.ifru_flags[0] /* flags (low 16 bits) */ #define ifr_flagshigh ifr_ifru.ifru_flags[1] /* flags (high 16 bits) */ #define ifr_metric ifr_ifru.ifru_metric /* metric */ #define ifr_mtu ifr_ifru.ifru_mtu /* mtu */ #define ifr_phys ifr_ifru.ifru_phys /* physical wire */ #define ifr_media ifr_ifru.ifru_media /* physical media */ #define ifr_data ifr_ifru.ifru_data /* for use by interface */ #define ifr_reqcap ifr_ifru.ifru_cap[0] /* requested capabilities */ #define ifr_curcap ifr_ifru.ifru_cap[1] /* current capabilities */ #define ifr_index ifr_ifru.ifru_index /* interface index */ }; #define _SIZEOF_ADDR_IFREQ(ifr) \ ((ifr).ifr_addr.sa_len > sizeof(struct sockaddr) ? \ (sizeof(struct ifreq) - sizeof(struct sockaddr) + \ (ifr).ifr_addr.sa_len) : sizeof(struct ifreq)) struct ifaliasreq { char ifra_name[IFNAMSIZ]; /* if name, e.g. "en0" */ struct sockaddr ifra_addr; struct sockaddr ifra_broadaddr; struct sockaddr ifra_mask; }; struct ifmediareq { char ifm_name[IFNAMSIZ]; /* if name, e.g. "en0" */ int ifm_current; /* current media options */ int ifm_mask; /* don't care mask */ int ifm_status; /* media status */ int ifm_active; /* active options */ int ifm_count; /* # entries in ifm_ulist array */ int *ifm_ulist; /* media words */ }; struct ifdrv { char ifd_name[IFNAMSIZ]; /* if name, e.g. "en0" */ unsigned long ifd_cmd; size_t ifd_len; void *ifd_data; }; /* * Structure used to retrieve aux status data from interfaces. * Kernel suppliers to this interface should respect the formatting * needed by ifconfig(8): each line starts with a TAB and ends with * a newline. The canonical example to copy and paste is in if_tun.c. */ #define IFSTATMAX 800 /* 10 lines of text */ struct ifstat { char ifs_name[IFNAMSIZ]; /* if name, e.g. "en0" */ char ascii[IFSTATMAX + 1]; }; /* * Structure used in SIOCGIFCONF request. * Used to retrieve interface configuration * for machine (useful for programs which * must know all networks accessible). */ struct ifconf { int ifc_len; /* size of associated buffer */ union { caddr_t ifcu_buf; struct ifreq *ifcu_req; } ifc_ifcu; #define ifc_buf ifc_ifcu.ifcu_buf /* buffer address */ #define ifc_req ifc_ifcu.ifcu_req /* array of structures returned */ }; #if defined (__amd64__) struct ifconf32 { int ifc_len; /* size of associated buffer */ union { u_int ifcu_buf; u_int ifcu_req; } ifc_ifcu; }; #endif /* * interface groups */ #define IFG_ALL "all" /* group contains all interfaces */ /* XXX: will we implement this? */ #define IFG_EGRESS "egress" /* if(s) default route(s) point to */ struct ifg_req { union { char ifgrqu_group[IFNAMSIZ]; char ifgrqu_member[IFNAMSIZ]; } ifgrq_ifgrqu; #define ifgrq_group ifgrq_ifgrqu.ifgrqu_group #define ifgrq_member ifgrq_ifgrqu.ifgrqu_member }; /* * Used to lookup groups for an interface */ struct ifgroupreq { char ifgr_name[IFNAMSIZ]; u_int ifgr_len; union { char ifgru_group[IFNAMSIZ]; struct ifg_req *ifgru_groups; } ifgr_ifgru; #define ifgr_group ifgr_ifgru.ifgru_group #define ifgr_groups ifgr_ifgru.ifgru_groups }; /* * Structure for SIOC[AGD]LIFADDR */ struct if_laddrreq { char iflr_name[IFNAMSIZ]; u_int flags; #define IFLR_PREFIX 0x8000 /* in: prefix given out: kernel fills id */ u_int prefixlen; /* in/out */ struct sockaddr_storage addr; /* in/out */ struct sockaddr_storage dstaddr; /* out */ }; #endif /* __BSD_VISIBLE */ #ifdef _KERNEL #ifdef MALLOC_DECLARE MALLOC_DECLARE(M_IFADDR); MALLOC_DECLARE(M_IFMADDR); #endif #endif #ifndef _KERNEL struct if_nameindex { unsigned int if_index; /* 1, 2, ... */ char *if_name; /* null terminated name: "le0", ... */ }; __BEGIN_DECLS void if_freenameindex(struct if_nameindex *); char *if_indextoname(unsigned int, char *); struct if_nameindex *if_nameindex(void); unsigned int if_nametoindex(const char *); __END_DECLS #endif #ifdef _KERNEL /* XXX - this should go away soon. */ #include #endif #endif /* !_NET_IF_H_ */ Index: head/sys/net/if_var.h =================================================================== --- head/sys/net/if_var.h (revision 193095) +++ head/sys/net/if_var.h (revision 193096) @@ -1,817 +1,817 @@ /*- * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * From: @(#)if.h 8.1 (Berkeley) 6/10/93 * $FreeBSD$ */ #ifndef _NET_IF_VAR_H_ #define _NET_IF_VAR_H_ /* * Structures defining a network interface, providing a packet * transport mechanism (ala level 0 of the PUP protocols). * * Each interface accepts output datagrams of a specified maximum * length, and provides higher level routines with input datagrams * received from its medium. * * Output occurs when the routine if_output is called, with three parameters: * (*ifp->if_output)(ifp, m, dst, rt) * Here m is the mbuf chain to be sent and dst is the destination address. * The output routine encapsulates the supplied datagram if necessary, * and then transmits it on its medium. * * On input, each interface unwraps the data received by it, and either * places it on the input queue of an internetwork datagram routine * and posts the associated software interrupt, or passes the datagram to a raw * packet input routine. * * Routines exist for locating interfaces by their addresses * or for locating an interface on a certain network, as well as more general * routing and gateway routines maintaining information used to locate * interfaces. These routines live in the files if.c and route.c */ #ifdef __STDC__ /* * Forward structure declarations for function prototypes [sic]. */ struct mbuf; struct thread; struct rtentry; struct rt_addrinfo; struct socket; struct ether_header; struct carp_if; struct ifvlantrunk; struct route; #endif #include /* get TAILQ macros */ #ifdef _KERNEL #include #include #include #endif /* _KERNEL */ #include /* XXX */ #include /* XXX */ #include /* XXX */ #include /* XXX */ #include #define IF_DUNIT_NONE -1 #include TAILQ_HEAD(ifnethead, ifnet); /* we use TAILQs so that the order of */ TAILQ_HEAD(ifaddrhead, ifaddr); /* instantiation is preserved in the list */ TAILQ_HEAD(ifprefixhead, ifprefix); TAILQ_HEAD(ifmultihead, ifmultiaddr); TAILQ_HEAD(ifgrouphead, ifg_group); /* * Structure defining a queue for a network interface. */ struct ifqueue { struct mbuf *ifq_head; struct mbuf *ifq_tail; int ifq_len; int ifq_maxlen; int ifq_drops; struct mtx ifq_mtx; }; /* * Structure defining a network interface. * * (Would like to call this struct ``if'', but C isn't PL/1.) */ struct ifnet { void *if_softc; /* pointer to driver state */ void *if_l2com; /* pointer to protocol bits */ struct vnet *if_vnet; /* pointer to network stack instance */ TAILQ_ENTRY(ifnet) if_link; /* all struct ifnets are chained */ char if_xname[IFNAMSIZ]; /* external name (name + unit) */ const char *if_dname; /* driver name */ int if_dunit; /* unit or IF_DUNIT_NONE */ u_int if_refcount; /* reference count */ struct ifaddrhead if_addrhead; /* linked list of addresses per if */ /* * if_addrhead is the list of all addresses associated to * an interface. * Some code in the kernel assumes that first element * of the list has type AF_LINK, and contains sockaddr_dl * addresses which store the link-level address and the name * of the interface. * However, access to the AF_LINK address through this * field is deprecated. Use if_addr or ifaddr_byindex() instead. */ struct knlist if_klist; /* events attached to this if */ int if_pcount; /* number of promiscuous listeners */ struct carp_if *if_carp; /* carp interface structure */ struct bpf_if *if_bpf; /* packet filter structure */ u_short if_index; /* numeric abbreviation for this if */ short if_timer; /* time 'til if_watchdog called */ struct ifvlantrunk *if_vlantrunk; /* pointer to 802.1q data */ int if_flags; /* up/down, broadcast, etc. */ int if_capabilities; /* interface features & capabilities */ int if_capenable; /* enabled features & capabilities */ void *if_linkmib; /* link-type-specific MIB data */ size_t if_linkmiblen; /* length of above data */ struct if_data if_data; struct ifmultihead if_multiaddrs; /* multicast addresses configured */ int if_amcount; /* number of all-multicast requests */ /* procedure handles */ int (*if_output) /* output routine (enqueue) */ (struct ifnet *, struct mbuf *, struct sockaddr *, struct route *); void (*if_input) /* input routine (from h/w driver) */ (struct ifnet *, struct mbuf *); void (*if_start) /* initiate output routine */ (struct ifnet *); int (*if_ioctl) /* ioctl routine */ (struct ifnet *, u_long, caddr_t); void (*if_watchdog) /* timer routine */ (struct ifnet *); void (*if_init) /* Init routine */ (void *); int (*if_resolvemulti) /* validate/resolve multicast */ (struct ifnet *, struct sockaddr **, struct sockaddr *); void (*if_qflush) /* flush any queues */ (struct ifnet *); int (*if_transmit) /* initiate output routine */ (struct ifnet *, struct mbuf *); struct ifaddr *if_addr; /* pointer to link-level address */ void *if_llsoftc; /* link layer softc */ int if_drv_flags; /* driver-managed status flags */ struct ifaltq if_snd; /* output queue (includes altq) */ const u_int8_t *if_broadcastaddr; /* linklevel broadcast bytestring */ void *if_bridge; /* bridge glue */ struct label *if_label; /* interface MAC label */ /* these are only used by IPv6 */ struct ifprefixhead if_prefixhead; /* list of prefixes per if */ void *if_afdata[AF_MAX]; int if_afdata_initialized; struct rwlock if_afdata_lock; struct task if_linktask; /* task for link change events */ struct mtx if_addr_mtx; /* mutex to protect address lists */ LIST_ENTRY(ifnet) if_clones; /* interfaces of a cloner */ TAILQ_HEAD(, ifg_list) if_groups; /* linked list of groups per if */ /* protected by if_addr_mtx */ void *if_pf_kif; void *if_lagg; /* lagg glue */ u_char if_alloctype; /* if_type at time of allocation */ /* * Spare fields are added so that we can modify sensitive data * structures without changing the kernel binary interface, and must * be used with care where binary compatibility is required. */ char if_cspare[3]; void *if_pspare[8]; int if_ispare[4]; }; typedef void if_init_f_t(void *); /* * XXX These aliases are terribly dangerous because they could apply * to anything. */ #define if_mtu if_data.ifi_mtu #define if_type if_data.ifi_type #define if_physical if_data.ifi_physical #define if_addrlen if_data.ifi_addrlen #define if_hdrlen if_data.ifi_hdrlen #define if_metric if_data.ifi_metric #define if_link_state if_data.ifi_link_state #define if_baudrate if_data.ifi_baudrate #define if_hwassist if_data.ifi_hwassist #define if_ipackets if_data.ifi_ipackets #define if_ierrors if_data.ifi_ierrors #define if_opackets if_data.ifi_opackets #define if_oerrors if_data.ifi_oerrors #define if_collisions if_data.ifi_collisions #define if_ibytes if_data.ifi_ibytes #define if_obytes if_data.ifi_obytes #define if_imcasts if_data.ifi_imcasts #define if_omcasts if_data.ifi_omcasts #define if_iqdrops if_data.ifi_iqdrops #define if_noproto if_data.ifi_noproto #define if_lastchange if_data.ifi_lastchange #define if_rawoutput(if, m, sa) if_output(if, m, sa, (struct rtentry *)NULL) /* for compatibility with other BSDs */ #define if_addrlist if_addrhead #define if_list if_link #define if_name(ifp) ((ifp)->if_xname) /* * Locks for address lists on the network interface. */ #define IF_ADDR_LOCK_INIT(if) mtx_init(&(if)->if_addr_mtx, \ "if_addr_mtx", NULL, MTX_DEF) #define IF_ADDR_LOCK_DESTROY(if) mtx_destroy(&(if)->if_addr_mtx) #define IF_ADDR_LOCK(if) mtx_lock(&(if)->if_addr_mtx) #define IF_ADDR_UNLOCK(if) mtx_unlock(&(if)->if_addr_mtx) #define IF_ADDR_LOCK_ASSERT(if) mtx_assert(&(if)->if_addr_mtx, MA_OWNED) /* * Output queues (ifp->if_snd) and slow device input queues (*ifp->if_slowq) * are queues of messages stored on ifqueue structures * (defined above). Entries are added to and deleted from these structures * by these macros, which should be called with ipl raised to splimp(). */ #define IF_LOCK(ifq) mtx_lock(&(ifq)->ifq_mtx) #define IF_UNLOCK(ifq) mtx_unlock(&(ifq)->ifq_mtx) #define IF_LOCK_ASSERT(ifq) mtx_assert(&(ifq)->ifq_mtx, MA_OWNED) #define _IF_QFULL(ifq) ((ifq)->ifq_len >= (ifq)->ifq_maxlen) #define _IF_DROP(ifq) ((ifq)->ifq_drops++) #define _IF_QLEN(ifq) ((ifq)->ifq_len) #define _IF_ENQUEUE(ifq, m) do { \ (m)->m_nextpkt = NULL; \ if ((ifq)->ifq_tail == NULL) \ (ifq)->ifq_head = m; \ else \ (ifq)->ifq_tail->m_nextpkt = m; \ (ifq)->ifq_tail = m; \ (ifq)->ifq_len++; \ } while (0) #define IF_ENQUEUE(ifq, m) do { \ IF_LOCK(ifq); \ _IF_ENQUEUE(ifq, m); \ IF_UNLOCK(ifq); \ } while (0) #define _IF_PREPEND(ifq, m) do { \ (m)->m_nextpkt = (ifq)->ifq_head; \ if ((ifq)->ifq_tail == NULL) \ (ifq)->ifq_tail = (m); \ (ifq)->ifq_head = (m); \ (ifq)->ifq_len++; \ } while (0) #define IF_PREPEND(ifq, m) do { \ IF_LOCK(ifq); \ _IF_PREPEND(ifq, m); \ IF_UNLOCK(ifq); \ } while (0) #define _IF_DEQUEUE(ifq, m) do { \ (m) = (ifq)->ifq_head; \ if (m) { \ if (((ifq)->ifq_head = (m)->m_nextpkt) == NULL) \ (ifq)->ifq_tail = NULL; \ (m)->m_nextpkt = NULL; \ (ifq)->ifq_len--; \ } \ } while (0) #define IF_DEQUEUE(ifq, m) do { \ IF_LOCK(ifq); \ _IF_DEQUEUE(ifq, m); \ IF_UNLOCK(ifq); \ } while (0) #define _IF_POLL(ifq, m) ((m) = (ifq)->ifq_head) #define IF_POLL(ifq, m) _IF_POLL(ifq, m) #define _IF_DRAIN(ifq) do { \ struct mbuf *m; \ for (;;) { \ _IF_DEQUEUE(ifq, m); \ if (m == NULL) \ break; \ m_freem(m); \ } \ } while (0) #define IF_DRAIN(ifq) do { \ IF_LOCK(ifq); \ _IF_DRAIN(ifq); \ IF_UNLOCK(ifq); \ } while(0) #ifdef _KERNEL /* interface address change event */ typedef void (*ifaddr_event_handler_t)(void *, struct ifnet *); EVENTHANDLER_DECLARE(ifaddr_event, ifaddr_event_handler_t); /* new interface arrival event */ typedef void (*ifnet_arrival_event_handler_t)(void *, struct ifnet *); EVENTHANDLER_DECLARE(ifnet_arrival_event, ifnet_arrival_event_handler_t); /* interface departure event */ typedef void (*ifnet_departure_event_handler_t)(void *, struct ifnet *); EVENTHANDLER_DECLARE(ifnet_departure_event, ifnet_departure_event_handler_t); /* * interface groups */ struct ifg_group { char ifg_group[IFNAMSIZ]; u_int ifg_refcnt; void *ifg_pf_kif; TAILQ_HEAD(, ifg_member) ifg_members; TAILQ_ENTRY(ifg_group) ifg_next; }; struct ifg_member { TAILQ_ENTRY(ifg_member) ifgm_next; struct ifnet *ifgm_ifp; }; struct ifg_list { struct ifg_group *ifgl_group; TAILQ_ENTRY(ifg_list) ifgl_next; }; /* group attach event */ typedef void (*group_attach_event_handler_t)(void *, struct ifg_group *); EVENTHANDLER_DECLARE(group_attach_event, group_attach_event_handler_t); /* group detach event */ typedef void (*group_detach_event_handler_t)(void *, struct ifg_group *); EVENTHANDLER_DECLARE(group_detach_event, group_detach_event_handler_t); /* group change event */ typedef void (*group_change_event_handler_t)(void *, const char *); EVENTHANDLER_DECLARE(group_change_event, group_change_event_handler_t); #define IF_AFDATA_LOCK_INIT(ifp) \ rw_init(&(ifp)->if_afdata_lock, "if_afdata") #define IF_AFDATA_WLOCK(ifp) rw_wlock(&(ifp)->if_afdata_lock) #define IF_AFDATA_RLOCK(ifp) rw_rlock(&(ifp)->if_afdata_lock) #define IF_AFDATA_WUNLOCK(ifp) rw_wunlock(&(ifp)->if_afdata_lock) #define IF_AFDATA_RUNLOCK(ifp) rw_runlock(&(ifp)->if_afdata_lock) #define IF_AFDATA_LOCK(ifp) IF_AFDATA_WLOCK(ifp) #define IF_AFDATA_UNLOCK(ifp) IF_AFDATA_WUNLOCK(ifp) #define IF_AFDATA_TRYLOCK(ifp) rw_try_wlock(&(ifp)->if_afdata_lock) #define IF_AFDATA_DESTROY(ifp) rw_destroy(&(ifp)->if_afdata_lock) #define IF_AFDATA_LOCK_ASSERT(ifp) rw_assert(&(ifp)->if_afdata_lock, RA_LOCKED) #define IF_AFDATA_UNLOCK_ASSERT(ifp) rw_assert(&(ifp)->if_afdata_lock, RA_UNLOCKED) int if_handoff(struct ifqueue *ifq, struct mbuf *m, struct ifnet *ifp, int adjust); #define IF_HANDOFF(ifq, m, ifp) \ if_handoff((struct ifqueue *)ifq, m, ifp, 0) #define IF_HANDOFF_ADJ(ifq, m, ifp, adj) \ if_handoff((struct ifqueue *)ifq, m, ifp, adj) void if_start(struct ifnet *); #define IFQ_ENQUEUE(ifq, m, err) \ do { \ IF_LOCK(ifq); \ if (ALTQ_IS_ENABLED(ifq)) \ ALTQ_ENQUEUE(ifq, m, NULL, err); \ else { \ if (_IF_QFULL(ifq)) { \ m_freem(m); \ (err) = ENOBUFS; \ } else { \ _IF_ENQUEUE(ifq, m); \ (err) = 0; \ } \ } \ if (err) \ (ifq)->ifq_drops++; \ IF_UNLOCK(ifq); \ } while (0) #define IFQ_DEQUEUE_NOLOCK(ifq, m) \ do { \ if (TBR_IS_ENABLED(ifq)) \ (m) = tbr_dequeue_ptr(ifq, ALTDQ_REMOVE); \ else if (ALTQ_IS_ENABLED(ifq)) \ ALTQ_DEQUEUE(ifq, m); \ else \ _IF_DEQUEUE(ifq, m); \ } while (0) #define IFQ_DEQUEUE(ifq, m) \ do { \ IF_LOCK(ifq); \ IFQ_DEQUEUE_NOLOCK(ifq, m); \ IF_UNLOCK(ifq); \ } while (0) #define IFQ_POLL_NOLOCK(ifq, m) \ do { \ if (TBR_IS_ENABLED(ifq)) \ (m) = tbr_dequeue_ptr(ifq, ALTDQ_POLL); \ else if (ALTQ_IS_ENABLED(ifq)) \ ALTQ_POLL(ifq, m); \ else \ _IF_POLL(ifq, m); \ } while (0) #define IFQ_POLL(ifq, m) \ do { \ IF_LOCK(ifq); \ IFQ_POLL_NOLOCK(ifq, m); \ IF_UNLOCK(ifq); \ } while (0) #define IFQ_PURGE_NOLOCK(ifq) \ do { \ if (ALTQ_IS_ENABLED(ifq)) { \ ALTQ_PURGE(ifq); \ } else \ _IF_DRAIN(ifq); \ } while (0) #define IFQ_PURGE(ifq) \ do { \ IF_LOCK(ifq); \ IFQ_PURGE_NOLOCK(ifq); \ IF_UNLOCK(ifq); \ } while (0) #define IFQ_SET_READY(ifq) \ do { ((ifq)->altq_flags |= ALTQF_READY); } while (0) #define IFQ_LOCK(ifq) IF_LOCK(ifq) #define IFQ_UNLOCK(ifq) IF_UNLOCK(ifq) #define IFQ_LOCK_ASSERT(ifq) IF_LOCK_ASSERT(ifq) #define IFQ_IS_EMPTY(ifq) ((ifq)->ifq_len == 0) #define IFQ_INC_LEN(ifq) ((ifq)->ifq_len++) #define IFQ_DEC_LEN(ifq) (--(ifq)->ifq_len) #define IFQ_INC_DROPS(ifq) ((ifq)->ifq_drops++) #define IFQ_SET_MAXLEN(ifq, len) ((ifq)->ifq_maxlen = (len)) /* * The IFF_DRV_OACTIVE test should really occur in the device driver, not in * the handoff logic, as that flag is locked by the device driver. */ #define IFQ_HANDOFF_ADJ(ifp, m, adj, err) \ do { \ int len; \ short mflags; \ \ len = (m)->m_pkthdr.len; \ mflags = (m)->m_flags; \ IFQ_ENQUEUE(&(ifp)->if_snd, m, err); \ if ((err) == 0) { \ (ifp)->if_obytes += len + (adj); \ if (mflags & M_MCAST) \ (ifp)->if_omcasts++; \ if (((ifp)->if_drv_flags & IFF_DRV_OACTIVE) == 0) \ if_start(ifp); \ } \ } while (0) #define IFQ_HANDOFF(ifp, m, err) \ IFQ_HANDOFF_ADJ(ifp, m, 0, err) #define IFQ_DRV_DEQUEUE(ifq, m) \ do { \ (m) = (ifq)->ifq_drv_head; \ if (m) { \ if (((ifq)->ifq_drv_head = (m)->m_nextpkt) == NULL) \ (ifq)->ifq_drv_tail = NULL; \ (m)->m_nextpkt = NULL; \ (ifq)->ifq_drv_len--; \ } else { \ IFQ_LOCK(ifq); \ IFQ_DEQUEUE_NOLOCK(ifq, m); \ while ((ifq)->ifq_drv_len < (ifq)->ifq_drv_maxlen) { \ struct mbuf *m0; \ IFQ_DEQUEUE_NOLOCK(ifq, m0); \ if (m0 == NULL) \ break; \ m0->m_nextpkt = NULL; \ if ((ifq)->ifq_drv_tail == NULL) \ (ifq)->ifq_drv_head = m0; \ else \ (ifq)->ifq_drv_tail->m_nextpkt = m0; \ (ifq)->ifq_drv_tail = m0; \ (ifq)->ifq_drv_len++; \ } \ IFQ_UNLOCK(ifq); \ } \ } while (0) #define IFQ_DRV_PREPEND(ifq, m) \ do { \ (m)->m_nextpkt = (ifq)->ifq_drv_head; \ if ((ifq)->ifq_drv_tail == NULL) \ (ifq)->ifq_drv_tail = (m); \ (ifq)->ifq_drv_head = (m); \ (ifq)->ifq_drv_len++; \ } while (0) #define IFQ_DRV_IS_EMPTY(ifq) \ (((ifq)->ifq_drv_len == 0) && ((ifq)->ifq_len == 0)) #define IFQ_DRV_PURGE(ifq) \ do { \ struct mbuf *m, *n = (ifq)->ifq_drv_head; \ while((m = n) != NULL) { \ n = m->m_nextpkt; \ m_freem(m); \ } \ (ifq)->ifq_drv_head = (ifq)->ifq_drv_tail = NULL; \ (ifq)->ifq_drv_len = 0; \ IFQ_PURGE(ifq); \ } while (0) #ifdef _KERNEL static __inline void drbr_stats_update(struct ifnet *ifp, int len, int mflags) { ifp->if_obytes += len; if (mflags & M_MCAST) ifp->if_omcasts++; } static __inline int drbr_enqueue(struct ifnet *ifp, struct buf_ring *br, struct mbuf *m) { int error = 0; int len = m->m_pkthdr.len; int mflags = m->m_flags; #ifdef ALTQ if (ALTQ_IS_ENABLED(&ifp->if_snd)) { IFQ_ENQUEUE(&ifp->if_snd, m, error); return (error); } #endif if ((error = buf_ring_enqueue(br, m)) == ENOBUFS) { br->br_drops++; _IF_DROP(&ifp->if_snd); m_freem(m); } else drbr_stats_update(ifp, len, mflags); return (error); } static __inline void drbr_free(struct buf_ring *br, struct malloc_type *type) { struct mbuf *m; while ((m = buf_ring_dequeue_sc(br)) != NULL) m_freem(m); buf_ring_free(br, type); } static __inline struct mbuf * drbr_dequeue(struct ifnet *ifp, struct buf_ring *br) { #ifdef ALTQ struct mbuf *m; if (ALTQ_IS_ENABLED(&ifp->if_snd)) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); return (m); } #endif return (buf_ring_dequeue_sc(br)); } static __inline int drbr_empty(struct ifnet *ifp, struct buf_ring *br) { #ifdef ALTQ if (ALTQ_IS_ENABLED(&ifp->if_snd)) return (IFQ_DRV_IS_EMPTY(&ifp->if_snd)); #endif return (buf_ring_empty(br)); } #endif /* * 72 was chosen below because it is the size of a TCP/IP * header (40) + the minimum mss (32). */ #define IF_MINMTU 72 #define IF_MAXMTU 65535 #endif /* _KERNEL */ /* * The ifaddr structure contains information about one address * of an interface. They are maintained by the different address families, * are allocated and attached when an address is set, and are linked * together so all addresses for an interface can be located. * * NOTE: a 'struct ifaddr' is always at the beginning of a larger * chunk of malloc'ed memory, where we store the three addresses * (ifa_addr, ifa_dstaddr and ifa_netmask) referenced here. */ struct ifaddr { struct sockaddr *ifa_addr; /* address of interface */ struct sockaddr *ifa_dstaddr; /* other end of p-to-p link */ #define ifa_broadaddr ifa_dstaddr /* broadcast address interface */ struct sockaddr *ifa_netmask; /* used to determine subnet */ struct if_data if_data; /* not all members are meaningful */ struct ifnet *ifa_ifp; /* back-pointer to interface */ TAILQ_ENTRY(ifaddr) ifa_link; /* queue macro glue */ void (*ifa_rtrequest) /* check or clean routes (+ or -)'d */ (int, struct rtentry *, struct rt_addrinfo *); u_short ifa_flags; /* mostly rt_flags for cloning */ u_int ifa_refcnt; /* references to this structure */ int ifa_metric; /* cost of going out this interface */ int (*ifa_claim_addr) /* check if an addr goes to this if */ (struct ifaddr *, struct sockaddr *); struct mtx ifa_mtx; }; #define IFA_ROUTE RTF_UP /* route installed */ /* for compatibility with other BSDs */ #define ifa_list ifa_link #define IFA_LOCK_INIT(ifa) \ mtx_init(&(ifa)->ifa_mtx, "ifaddr", NULL, MTX_DEF) #define IFA_LOCK(ifa) mtx_lock(&(ifa)->ifa_mtx) #define IFA_UNLOCK(ifa) mtx_unlock(&(ifa)->ifa_mtx) #define IFA_DESTROY(ifa) mtx_destroy(&(ifa)->ifa_mtx) /* * The prefix structure contains information about one prefix * of an interface. They are maintained by the different address families, * are allocated and attached when a prefix or an address is set, * and are linked together so all prefixes for an interface can be located. */ struct ifprefix { struct sockaddr *ifpr_prefix; /* prefix of interface */ struct ifnet *ifpr_ifp; /* back-pointer to interface */ TAILQ_ENTRY(ifprefix) ifpr_list; /* queue macro glue */ u_char ifpr_plen; /* prefix length in bits */ u_char ifpr_type; /* protocol dependent prefix type */ }; /* * Multicast address structure. This is analogous to the ifaddr * structure except that it keeps track of multicast addresses. */ struct ifmultiaddr { TAILQ_ENTRY(ifmultiaddr) ifma_link; /* queue macro glue */ struct sockaddr *ifma_addr; /* address this membership is for */ struct sockaddr *ifma_lladdr; /* link-layer translation, if any */ struct ifnet *ifma_ifp; /* back-pointer to interface */ u_int ifma_refcount; /* reference count */ void *ifma_protospec; /* protocol-specific state, if any */ struct ifmultiaddr *ifma_llifma; /* pointer to ifma for ifma_lladdr */ }; #ifdef _KERNEL #define IFAFREE(ifa) \ do { \ IFA_LOCK(ifa); \ KASSERT((ifa)->ifa_refcnt > 0, \ ("ifa %p !(ifa_refcnt > 0)", ifa)); \ if (--(ifa)->ifa_refcnt == 0) { \ IFA_DESTROY(ifa); \ free(ifa, M_IFADDR); \ } else \ IFA_UNLOCK(ifa); \ } while (0) #define IFAREF(ifa) \ do { \ IFA_LOCK(ifa); \ ++(ifa)->ifa_refcnt; \ IFA_UNLOCK(ifa); \ } while (0) extern struct rwlock ifnet_lock; #define IFNET_LOCK_INIT() \ rw_init_flags(&ifnet_lock, "ifnet", RW_RECURSE) #define IFNET_WLOCK() rw_wlock(&ifnet_lock) #define IFNET_WUNLOCK() rw_wunlock(&ifnet_lock) #define IFNET_WLOCK_ASSERT() rw_assert(&ifnet_lock, RA_LOCKED) #define IFNET_RLOCK() rw_rlock(&ifnet_lock) #define IFNET_RUNLOCK() rw_runlock(&ifnet_lock) struct ifindex_entry { struct ifnet *ife_ifnet; struct cdev *ife_dev; }; /* * Look up an ifnet given its index; the _ref variant also acquires a * reference that must be freed using if_rele(). It is almost always a bug * to call ifnet_byindex() instead if ifnet_byindex_ref(). */ struct ifnet *ifnet_byindex(u_short idx); struct ifnet *ifnet_byindex_locked(u_short idx); struct ifnet *ifnet_byindex_ref(u_short idx); /* * Given the index, ifaddr_byindex() returns the one and only * link-level ifaddr for the interface. You are not supposed to use * it to traverse the list of addresses associated to the interface. */ struct ifaddr *ifaddr_byindex(u_short idx); struct cdev *ifdev_byindex(u_short idx); #ifdef VIMAGE_GLOBALS extern struct ifnethead ifnet; extern struct ifnet *loif; /* first loopback interface */ extern int if_index; #endif extern int ifqmaxlen; int if_addgroup(struct ifnet *, const char *); int if_delgroup(struct ifnet *, const char *); int if_addmulti(struct ifnet *, struct sockaddr *, struct ifmultiaddr **); int if_allmulti(struct ifnet *, int); struct ifnet* if_alloc(u_char); void if_attach(struct ifnet *); void if_dead(struct ifnet *); void if_grow(void); int if_delmulti(struct ifnet *, struct sockaddr *); void if_delmulti_ifma(struct ifmultiaddr *); void if_detach(struct ifnet *); void if_vmove(struct ifnet *, struct vnet *); void if_purgeaddrs(struct ifnet *); void if_purgemaddrs(struct ifnet *); void if_down(struct ifnet *); struct ifmultiaddr * if_findmulti(struct ifnet *, struct sockaddr *); void if_free(struct ifnet *); void if_free_type(struct ifnet *, u_char); void if_initname(struct ifnet *, const char *, int); void if_link_state_change(struct ifnet *, int); int if_printf(struct ifnet *, const char *, ...) __printflike(2, 3); void if_qflush(struct ifnet *); void if_ref(struct ifnet *); void if_rele(struct ifnet *); int if_setlladdr(struct ifnet *, const u_char *, int); void if_up(struct ifnet *); /*void ifinit(void);*/ /* declared in systm.h for main() */ int ifioctl(struct socket *, u_long, caddr_t, struct thread *); int ifpromisc(struct ifnet *, int); struct ifnet *ifunit(const char *); struct ifnet *ifunit_ref(const char *); void ifq_attach(struct ifaltq *, struct ifnet *ifp); void ifq_detach(struct ifaltq *); struct ifaddr *ifa_ifwithaddr(struct sockaddr *); struct ifaddr *ifa_ifwithbroadaddr(struct sockaddr *); struct ifaddr *ifa_ifwithdstaddr(struct sockaddr *); struct ifaddr *ifa_ifwithnet(struct sockaddr *); struct ifaddr *ifa_ifwithroute(int, struct sockaddr *, struct sockaddr *); struct ifaddr *ifa_ifwithroute_fib(int, struct sockaddr *, struct sockaddr *, u_int); struct ifaddr *ifaof_ifpforaddr(struct sockaddr *, struct ifnet *); int if_simloop(struct ifnet *ifp, struct mbuf *m, int af, int hlen); typedef void *if_com_alloc_t(u_char type, struct ifnet *ifp); typedef void if_com_free_t(void *com, u_char type); void if_register_com_alloc(u_char type, if_com_alloc_t *a, if_com_free_t *f); void if_deregister_com_alloc(u_char type); #define IF_LLADDR(ifp) \ LLADDR((struct sockaddr_dl *)((ifp)->if_addr->ifa_addr)) #ifdef DEVICE_POLLING enum poll_cmd { POLL_ONLY, POLL_AND_CHECK_STATUS }; -typedef void poll_handler_t(struct ifnet *ifp, enum poll_cmd cmd, int count); +typedef int poll_handler_t(struct ifnet *ifp, enum poll_cmd cmd, int count); int ether_poll_register(poll_handler_t *h, struct ifnet *ifp); int ether_poll_deregister(struct ifnet *ifp); #endif /* DEVICE_POLLING */ #endif /* _KERNEL */ #endif /* !_NET_IF_VAR_H_ */ Index: head/sys/pci/if_rl.c =================================================================== --- head/sys/pci/if_rl.c (revision 193095) +++ head/sys/pci/if_rl.c (revision 193096) @@ -1,2111 +1,2118 @@ /*- * Copyright (c) 1997, 1998 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * RealTek 8129/8139 PCI NIC driver * * Supports several extremely cheap PCI 10/100 adapters based on * the RealTek chipset. Datasheets can be obtained from * www.realtek.com.tw. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is * probably the worst PCI ethernet controller ever made, with the possible * exception of the FEAST chip made by SMC. The 8139 supports bus-master * DMA, but it has a terrible interface that nullifies any performance * gains that bus-master DMA usually offers. * * For transmission, the chip offers a series of four TX descriptor * registers. Each transmit frame must be in a contiguous buffer, aligned * on a longword (32-bit) boundary. This means we almost always have to * do mbuf copies in order to transmit a frame, except in the unlikely * case where a) the packet fits into a single mbuf, and b) the packet * is 32-bit aligned within the mbuf's data area. The presence of only * four descriptor registers means that we can never have more than four * packets queued for transmission at any one time. * * Reception is not much better. The driver has to allocate a single large * buffer area (up to 64K in size) into which the chip will DMA received * frames. Because we don't know where within this region received packets * will begin or end, we have no choice but to copy data from the buffer * area into mbufs in order to pass the packets up to the higher protocol * levels. * * It's impossible given this rotten design to really achieve decent * performance at 100Mbps, unless you happen to have a 400Mhz PII or * some equally overmuscled CPU to drive it. * * On the bright side, the 8139 does have a built-in PHY, although * rather than using an MDIO serial interface like most other NICs, the * PHY registers are directly accessible through the 8139's register * space. The 8139 supports autonegotiation, as well as a 64-bit multicast * filter. * * The 8129 chip is an older version of the 8139 that uses an external PHY * chip. The 8129 has a serial MDIO interface for accessing the MII where * the 8139 lets you directly access the on-board PHY registers. We need * to select which interface to use depending on the chip type. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(rl, pci, 1, 1, 1); MODULE_DEPEND(rl, ether, 1, 1, 1); MODULE_DEPEND(rl, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Default to using PIO access for this driver. On SMP systems, * there appear to be problems with memory mapped mode: it looks like * doing too many memory mapped access back to back in rapid succession * can hang the bus. I'm inclined to blame this on crummy design/construction * on the part of RealTek. Memory mapped mode does appear to work on * uniprocessor systems though. */ #define RL_USEIOSPACE #include /* * Various supported device vendors/types and their names. */ static struct rl_type rl_devs[] = { { RT_VENDORID, RT_DEVICEID_8129, RL_8129, "RealTek 8129 10/100BaseTX" }, { RT_VENDORID, RT_DEVICEID_8139, RL_8139, "RealTek 8139 10/100BaseTX" }, { RT_VENDORID, RT_DEVICEID_8139D, RL_8139, "RealTek 8139 10/100BaseTX" }, { RT_VENDORID, RT_DEVICEID_8138, RL_8139, "RealTek 8139 10/100BaseTX CardBus" }, { RT_VENDORID, RT_DEVICEID_8100, RL_8139, "RealTek 8100 10/100BaseTX" }, { ACCTON_VENDORID, ACCTON_DEVICEID_5030, RL_8139, "Accton MPX 5030/5038 10/100BaseTX" }, { DELTA_VENDORID, DELTA_DEVICEID_8139, RL_8139, "Delta Electronics 8139 10/100BaseTX" }, { ADDTRON_VENDORID, ADDTRON_DEVICEID_8139, RL_8139, "Addtron Technology 8139 10/100BaseTX" }, { DLINK_VENDORID, DLINK_DEVICEID_530TXPLUS, RL_8139, "D-Link DFE-530TX+ 10/100BaseTX" }, { DLINK_VENDORID, DLINK_DEVICEID_690TXD, RL_8139, "D-Link DFE-690TXD 10/100BaseTX" }, { NORTEL_VENDORID, ACCTON_DEVICEID_5030, RL_8139, "Nortel Networks 10/100BaseTX" }, { COREGA_VENDORID, COREGA_DEVICEID_FETHERCBTXD, RL_8139, "Corega FEther CB-TXD" }, { COREGA_VENDORID, COREGA_DEVICEID_FETHERIICBTXD, RL_8139, "Corega FEtherII CB-TXD" }, { PEPPERCON_VENDORID, PEPPERCON_DEVICEID_ROLF, RL_8139, "Peppercon AG ROL-F" }, { PLANEX_VENDORID, PLANEX_DEVICEID_FNW3603TX, RL_8139, "Planex FNW-3603-TX" }, { PLANEX_VENDORID, PLANEX_DEVICEID_FNW3800TX, RL_8139, "Planex FNW-3800-TX" }, { CP_VENDORID, RT_DEVICEID_8139, RL_8139, "Compaq HNE-300" }, { LEVEL1_VENDORID, LEVEL1_DEVICEID_FPC0106TX, RL_8139, "LevelOne FPC-0106TX" }, { EDIMAX_VENDORID, EDIMAX_DEVICEID_EP4103DL, RL_8139, "Edimax EP-4103DL CardBus" } }; static int rl_attach(device_t); static int rl_detach(device_t); static void rl_dmamap_cb(void *, bus_dma_segment_t *, int, int); static int rl_dma_alloc(struct rl_softc *); static void rl_dma_free(struct rl_softc *); static void rl_eeprom_putbyte(struct rl_softc *, int); static void rl_eeprom_getword(struct rl_softc *, int, uint16_t *); static int rl_encap(struct rl_softc *, struct mbuf **); static int rl_list_tx_init(struct rl_softc *); static int rl_list_rx_init(struct rl_softc *); static int rl_ifmedia_upd(struct ifnet *); static void rl_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int rl_ioctl(struct ifnet *, u_long, caddr_t); static void rl_intr(void *); static void rl_init(void *); static void rl_init_locked(struct rl_softc *sc); static void rl_mii_send(struct rl_softc *, uint32_t, int); static void rl_mii_sync(struct rl_softc *); static int rl_mii_readreg(struct rl_softc *, struct rl_mii_frame *); static int rl_mii_writereg(struct rl_softc *, struct rl_mii_frame *); static int rl_miibus_readreg(device_t, int, int); static void rl_miibus_statchg(device_t); static int rl_miibus_writereg(device_t, int, int, int); #ifdef DEVICE_POLLING -static void rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); -static void rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count); +static int rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); +static int rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count); #endif static int rl_probe(device_t); static void rl_read_eeprom(struct rl_softc *, uint8_t *, int, int, int); static void rl_reset(struct rl_softc *); static int rl_resume(device_t); -static void rl_rxeof(struct rl_softc *); +static int rl_rxeof(struct rl_softc *); static void rl_setmulti(struct rl_softc *); static int rl_shutdown(device_t); static void rl_start(struct ifnet *); static void rl_start_locked(struct ifnet *); static void rl_stop(struct rl_softc *); static int rl_suspend(device_t); static void rl_tick(void *); static void rl_txeof(struct rl_softc *); static void rl_watchdog(struct rl_softc *); #ifdef RL_USEIOSPACE #define RL_RES SYS_RES_IOPORT #define RL_RID RL_PCI_LOIO #else #define RL_RES SYS_RES_MEMORY #define RL_RID RL_PCI_LOMEM #endif static device_method_t rl_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rl_probe), DEVMETHOD(device_attach, rl_attach), DEVMETHOD(device_detach, rl_detach), DEVMETHOD(device_suspend, rl_suspend), DEVMETHOD(device_resume, rl_resume), DEVMETHOD(device_shutdown, rl_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, rl_miibus_readreg), DEVMETHOD(miibus_writereg, rl_miibus_writereg), DEVMETHOD(miibus_statchg, rl_miibus_statchg), { 0, 0 } }; static driver_t rl_driver = { "rl", rl_methods, sizeof(struct rl_softc) }; static devclass_t rl_devclass; DRIVER_MODULE(rl, pci, rl_driver, rl_devclass, 0, 0); DRIVER_MODULE(rl, cardbus, rl_driver, rl_devclass, 0, 0); DRIVER_MODULE(miibus, rl, miibus_driver, miibus_devclass, 0, 0); #define EE_SET(x) \ CSR_WRITE_1(sc, RL_EECMD, \ CSR_READ_1(sc, RL_EECMD) | x) #define EE_CLR(x) \ CSR_WRITE_1(sc, RL_EECMD, \ CSR_READ_1(sc, RL_EECMD) & ~x) /* * Send a read command and address to the EEPROM, check for ACK. */ static void rl_eeprom_putbyte(struct rl_softc *sc, int addr) { register int d, i; d = addr | sc->rl_eecmd_read; /* * Feed in each bit and strobe the clock. */ for (i = 0x400; i; i >>= 1) { if (d & i) { EE_SET(RL_EE_DATAIN); } else { EE_CLR(RL_EE_DATAIN); } DELAY(100); EE_SET(RL_EE_CLK); DELAY(150); EE_CLR(RL_EE_CLK); DELAY(100); } } /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void rl_eeprom_getword(struct rl_softc *sc, int addr, uint16_t *dest) { register int i; uint16_t word = 0; /* Enter EEPROM access mode. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); /* * Send address of word we want to read. */ rl_eeprom_putbyte(sc, addr); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); /* * Start reading bits from EEPROM. */ for (i = 0x8000; i; i >>= 1) { EE_SET(RL_EE_CLK); DELAY(100); if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) word |= i; EE_CLR(RL_EE_CLK); DELAY(100); } /* Turn off EEPROM access mode. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); *dest = word; } /* * Read a sequence of words from the EEPROM. */ static void rl_read_eeprom(struct rl_softc *sc, uint8_t *dest, int off, int cnt, int swap) { int i; uint16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { rl_eeprom_getword(sc, off + i, &word); ptr = (uint16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } } /* * MII access routines are provided for the 8129, which * doesn't have a built-in PHY. For the 8139, we fake things * up by diverting rl_phy_readreg()/rl_phy_writereg() to the * direct access PHY registers. */ #define MII_SET(x) \ CSR_WRITE_1(sc, RL_MII, \ CSR_READ_1(sc, RL_MII) | (x)) #define MII_CLR(x) \ CSR_WRITE_1(sc, RL_MII, \ CSR_READ_1(sc, RL_MII) & ~(x)) /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void rl_mii_sync(struct rl_softc *sc) { register int i; MII_SET(RL_MII_DIR|RL_MII_DATAOUT); for (i = 0; i < 32; i++) { MII_SET(RL_MII_CLK); DELAY(1); MII_CLR(RL_MII_CLK); DELAY(1); } } /* * Clock a series of bits through the MII. */ static void rl_mii_send(struct rl_softc *sc, uint32_t bits, int cnt) { int i; MII_CLR(RL_MII_CLK); for (i = (0x1 << (cnt - 1)); i; i >>= 1) { if (bits & i) { MII_SET(RL_MII_DATAOUT); } else { MII_CLR(RL_MII_DATAOUT); } DELAY(1); MII_CLR(RL_MII_CLK); DELAY(1); MII_SET(RL_MII_CLK); } } /* * Read an PHY register through the MII. */ static int rl_mii_readreg(struct rl_softc *sc, struct rl_mii_frame *frame) { int i, ack; /* Set up frame for RX. */ frame->mii_stdelim = RL_MII_STARTDELIM; frame->mii_opcode = RL_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; CSR_WRITE_2(sc, RL_MII, 0); /* Turn on data xmit. */ MII_SET(RL_MII_DIR); rl_mii_sync(sc); /* Send command/address info. */ rl_mii_send(sc, frame->mii_stdelim, 2); rl_mii_send(sc, frame->mii_opcode, 2); rl_mii_send(sc, frame->mii_phyaddr, 5); rl_mii_send(sc, frame->mii_regaddr, 5); /* Idle bit */ MII_CLR((RL_MII_CLK|RL_MII_DATAOUT)); DELAY(1); MII_SET(RL_MII_CLK); DELAY(1); /* Turn off xmit. */ MII_CLR(RL_MII_DIR); /* Check for ack */ MII_CLR(RL_MII_CLK); DELAY(1); ack = CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN; MII_SET(RL_MII_CLK); DELAY(1); /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ if (ack) { for(i = 0; i < 16; i++) { MII_CLR(RL_MII_CLK); DELAY(1); MII_SET(RL_MII_CLK); DELAY(1); } goto fail; } for (i = 0x8000; i; i >>= 1) { MII_CLR(RL_MII_CLK); DELAY(1); if (!ack) { if (CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN) frame->mii_data |= i; DELAY(1); } MII_SET(RL_MII_CLK); DELAY(1); } fail: MII_CLR(RL_MII_CLK); DELAY(1); MII_SET(RL_MII_CLK); DELAY(1); return (ack ? 1 : 0); } /* * Write to a PHY register through the MII. */ static int rl_mii_writereg(struct rl_softc *sc, struct rl_mii_frame *frame) { /* Set up frame for TX. */ frame->mii_stdelim = RL_MII_STARTDELIM; frame->mii_opcode = RL_MII_WRITEOP; frame->mii_turnaround = RL_MII_TURNAROUND; /* Turn on data output. */ MII_SET(RL_MII_DIR); rl_mii_sync(sc); rl_mii_send(sc, frame->mii_stdelim, 2); rl_mii_send(sc, frame->mii_opcode, 2); rl_mii_send(sc, frame->mii_phyaddr, 5); rl_mii_send(sc, frame->mii_regaddr, 5); rl_mii_send(sc, frame->mii_turnaround, 2); rl_mii_send(sc, frame->mii_data, 16); /* Idle bit. */ MII_SET(RL_MII_CLK); DELAY(1); MII_CLR(RL_MII_CLK); DELAY(1); /* Turn off xmit. */ MII_CLR(RL_MII_DIR); return (0); } static int rl_miibus_readreg(device_t dev, int phy, int reg) { struct rl_softc *sc; struct rl_mii_frame frame; uint16_t rval = 0; uint16_t rl8139_reg = 0; sc = device_get_softc(dev); if (sc->rl_type == RL_8139) { /* Pretend the internal PHY is only at address 0 */ if (phy) { return (0); } switch (reg) { case MII_BMCR: rl8139_reg = RL_BMCR; break; case MII_BMSR: rl8139_reg = RL_BMSR; break; case MII_ANAR: rl8139_reg = RL_ANAR; break; case MII_ANER: rl8139_reg = RL_ANER; break; case MII_ANLPAR: rl8139_reg = RL_LPAR; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); /* * Allow the rlphy driver to read the media status * register. If we have a link partner which does not * support NWAY, this is the register which will tell * us the results of parallel detection. */ case RL_MEDIASTAT: rval = CSR_READ_1(sc, RL_MEDIASTAT); return (rval); default: device_printf(sc->rl_dev, "bad phy register\n"); return (0); } rval = CSR_READ_2(sc, rl8139_reg); return (rval); } bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; rl_mii_readreg(sc, &frame); return (frame.mii_data); } static int rl_miibus_writereg(device_t dev, int phy, int reg, int data) { struct rl_softc *sc; struct rl_mii_frame frame; uint16_t rl8139_reg = 0; sc = device_get_softc(dev); if (sc->rl_type == RL_8139) { /* Pretend the internal PHY is only at address 0 */ if (phy) { return (0); } switch (reg) { case MII_BMCR: rl8139_reg = RL_BMCR; break; case MII_BMSR: rl8139_reg = RL_BMSR; break; case MII_ANAR: rl8139_reg = RL_ANAR; break; case MII_ANER: rl8139_reg = RL_ANER; break; case MII_ANLPAR: rl8139_reg = RL_LPAR; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); break; default: device_printf(sc->rl_dev, "bad phy register\n"); return (0); } CSR_WRITE_2(sc, rl8139_reg, data); return (0); } bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = data; rl_mii_writereg(sc, &frame); return (0); } static void rl_miibus_statchg(device_t dev) { struct rl_softc *sc; struct ifnet *ifp; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->rl_miibus); ifp = sc->rl_ifp; if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; sc->rl_flags &= ~RL_FLAG_LINK; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc->rl_flags |= RL_FLAG_LINK; break; default: break; } } /* * RealTek controllers do not provide any interface to * Tx/Rx MACs for resolved speed, duplex and flow-control * parameters. */ } /* * Program the 64-bit multicast hash filter. */ static void rl_setmulti(struct rl_softc *sc) { struct ifnet *ifp = sc->rl_ifp; int h = 0; uint32_t hashes[2] = { 0, 0 }; struct ifmultiaddr *ifma; uint32_t rxfilt; int mcnt = 0; RL_LOCK_ASSERT(sc); rxfilt = CSR_READ_4(sc, RL_RXCFG); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { rxfilt |= RL_RXCFG_RX_MULTI; CSR_WRITE_4(sc, RL_RXCFG, rxfilt); CSR_WRITE_4(sc, RL_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, RL_MAR4, 0xFFFFFFFF); return; } /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, RL_MAR0, 0); CSR_WRITE_4(sc, RL_MAR4, 0); /* now program new ones */ IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); mcnt++; } IF_ADDR_UNLOCK(ifp); if (mcnt) rxfilt |= RL_RXCFG_RX_MULTI; else rxfilt &= ~RL_RXCFG_RX_MULTI; CSR_WRITE_4(sc, RL_RXCFG, rxfilt); CSR_WRITE_4(sc, RL_MAR0, hashes[0]); CSR_WRITE_4(sc, RL_MAR4, hashes[1]); } static void rl_reset(struct rl_softc *sc) { register int i; RL_LOCK_ASSERT(sc); CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); for (i = 0; i < RL_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) break; } if (i == RL_TIMEOUT) device_printf(sc->rl_dev, "reset never completed!\n"); } /* * Probe for a RealTek 8129/8139 chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int rl_probe(device_t dev) { struct rl_type *t; uint16_t devid, revid, vendor; int i; vendor = pci_get_vendor(dev); devid = pci_get_device(dev); revid = pci_get_revid(dev); if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) { if (revid == 0x20) { /* 8139C+, let re(4) take care of this device. */ return (ENXIO); } } t = rl_devs; for (i = 0; i < sizeof(rl_devs) / sizeof(rl_devs[0]); i++, t++) { if (vendor == t->rl_vid && devid == t->rl_did) { device_set_desc(dev, t->rl_name); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } struct rl_dmamap_arg { bus_addr_t rl_busaddr; }; static void rl_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct rl_dmamap_arg *ctx; if (error != 0) return; KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); ctx = (struct rl_dmamap_arg *)arg; ctx->rl_busaddr = segs[0].ds_addr; } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int rl_attach(device_t dev) { uint8_t eaddr[ETHER_ADDR_LEN]; uint16_t as[3]; struct ifnet *ifp; struct rl_softc *sc; struct rl_type *t; struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; int error = 0, i, rid; int unit; uint16_t rl_did = 0; char tn[32]; sc = device_get_softc(dev); unit = device_get_unit(dev); sc->rl_dev = dev; sc->rl_twister_enable = 0; snprintf(tn, sizeof(tn), "dev.rl.%d.twister_enable", unit); TUNABLE_INT_FETCH(tn, &sc->rl_twister_enable); ctx = device_get_sysctl_ctx(sc->rl_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev)); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "twister_enable", CTLFLAG_RD, &sc->rl_twister_enable, 0, ""); mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0); pci_enable_busmaster(dev); /* Map control/status registers. */ rid = RL_RID; sc->rl_res = bus_alloc_resource_any(dev, RL_RES, &rid, RF_ACTIVE); if (sc->rl_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } #ifdef notdef /* * Detect the Realtek 8139B. For some reason, this chip is very * unstable when left to autoselect the media * The best workaround is to set the device to the required * media type or to set it to the 10 Meg speed. */ if ((rman_get_end(sc->rl_res) - rman_get_start(sc->rl_res)) == 0xFF) device_printf(dev, "Realtek 8139B detected. Warning, this may be unstable in autoselect mode\n"); #endif sc->rl_btag = rman_get_bustag(sc->rl_res); sc->rl_bhandle = rman_get_bushandle(sc->rl_res); /* Allocate interrupt */ rid = 0; sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->rl_irq[0] == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } /* * Reset the adapter. Only take the lock here as it's needed in * order to call rl_reset(). */ RL_LOCK(sc); rl_reset(sc); RL_UNLOCK(sc); sc->rl_eecmd_read = RL_EECMD_READ_6BIT; rl_read_eeprom(sc, (uint8_t *)&rl_did, 0, 1, 0); if (rl_did != 0x8129) sc->rl_eecmd_read = RL_EECMD_READ_8BIT; /* * Get station address from the EEPROM. */ rl_read_eeprom(sc, (uint8_t *)as, RL_EE_EADDR, 3, 0); for (i = 0; i < 3; i++) { eaddr[(i * 2) + 0] = as[i] & 0xff; eaddr[(i * 2) + 1] = as[i] >> 8; } /* * Now read the exact device type from the EEPROM to find * out if it's an 8129 or 8139. */ rl_read_eeprom(sc, (uint8_t *)&rl_did, RL_EE_PCI_DID, 1, 0); t = rl_devs; sc->rl_type = 0; while(t->rl_name != NULL) { if (rl_did == t->rl_did) { sc->rl_type = t->rl_basetype; break; } t++; } if (sc->rl_type == 0) { device_printf(dev, "unknown device ID: %x assuming 8139\n", rl_did); sc->rl_type = RL_8139; /* * Read RL_IDR register to get ethernet address as accessing * EEPROM may not extract correct address. */ for (i = 0; i < ETHER_ADDR_LEN; i++) eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); } if ((error = rl_dma_alloc(sc)) != 0) goto fail; ifp = sc->rl_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } /* Do MII setup */ if (mii_phy_probe(dev, &sc->rl_miibus, rl_ifmedia_upd, rl_ifmedia_sts)) { device_printf(dev, "MII without any phy!\n"); error = ENXIO; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = rl_ioctl; ifp->if_start = rl_start; ifp->if_init = rl_init; ifp->if_capabilities = IFCAP_VLAN_MTU; ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE, NULL, rl_intr, sc, &sc->rl_intrhand[0]); if (error) { device_printf(sc->rl_dev, "couldn't set up irq\n"); ether_ifdetach(ifp); } fail: if (error) rl_detach(dev); return (error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int rl_detach(device_t dev) { struct rl_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->rl_ifp; KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized")); #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { RL_LOCK(sc); rl_stop(sc); RL_UNLOCK(sc); callout_drain(&sc->rl_stat_callout); ether_ifdetach(ifp); } #if 0 sc->suspended = 1; #endif if (sc->rl_miibus) device_delete_child(dev, sc->rl_miibus); bus_generic_detach(dev); if (sc->rl_intrhand[0]) bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]); if (sc->rl_irq[0]) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq[0]); if (sc->rl_res) bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res); if (ifp) if_free(ifp); rl_dma_free(sc); mtx_destroy(&sc->rl_mtx); return (0); } static int rl_dma_alloc(struct rl_softc *sc) { struct rl_dmamap_arg ctx; int error, i; /* * Allocate the parent bus DMA tag appropriate for PCI. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->rl_dev), /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->rl_parent_tag); if (error) { device_printf(sc->rl_dev, "failed to create parent DMA tag.\n"); goto fail; } /* Create DMA tag for Rx memory block. */ error = bus_dma_tag_create(sc->rl_parent_tag, /* parent */ RL_RX_8139_BUF_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, 1, /* maxsize,nsegments */ RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->rl_cdata.rl_rx_tag); if (error) { device_printf(sc->rl_dev, "failed to create Rx memory block DMA tag.\n"); goto fail; } /* Create DMA tag for Tx buffer. */ error = bus_dma_tag_create(sc->rl_parent_tag, /* parent */ RL_TX_8139_BUF_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, 1, /* maxsize, nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->rl_cdata.rl_tx_tag); if (error) { device_printf(sc->rl_dev, "failed to create Tx DMA tag.\n"); goto fail; } /* * Allocate DMA'able memory and load DMA map for Rx memory block. */ error = bus_dmamem_alloc(sc->rl_cdata.rl_rx_tag, (void **)&sc->rl_cdata.rl_rx_buf, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_cdata.rl_rx_dmamap); if (error != 0) { device_printf(sc->rl_dev, "failed to allocate Rx DMA memory block.\n"); goto fail; } ctx.rl_busaddr = 0; error = bus_dmamap_load(sc->rl_cdata.rl_rx_tag, sc->rl_cdata.rl_rx_dmamap, sc->rl_cdata.rl_rx_buf, RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, rl_dmamap_cb, &ctx, BUS_DMA_NOWAIT); if (error != 0 || ctx.rl_busaddr == 0) { device_printf(sc->rl_dev, "could not load Rx DMA memory block.\n"); goto fail; } sc->rl_cdata.rl_rx_buf_paddr = ctx.rl_busaddr; /* Create DMA maps for Tx buffers. */ for (i = 0; i < RL_TX_LIST_CNT; i++) { sc->rl_cdata.rl_tx_chain[i] = NULL; sc->rl_cdata.rl_tx_dmamap[i] = NULL; error = bus_dmamap_create(sc->rl_cdata.rl_tx_tag, 0, &sc->rl_cdata.rl_tx_dmamap[i]); if (error != 0) { device_printf(sc->rl_dev, "could not create Tx dmamap.\n"); goto fail; } } /* Leave a few bytes before the start of the RX ring buffer. */ sc->rl_cdata.rl_rx_buf_ptr = sc->rl_cdata.rl_rx_buf; sc->rl_cdata.rl_rx_buf += RL_RX_8139_BUF_RESERVE; fail: return (error); } static void rl_dma_free(struct rl_softc *sc) { int i; /* Rx memory block. */ if (sc->rl_cdata.rl_rx_tag != NULL) { if (sc->rl_cdata.rl_rx_dmamap != NULL) bus_dmamap_unload(sc->rl_cdata.rl_rx_tag, sc->rl_cdata.rl_rx_dmamap); if (sc->rl_cdata.rl_rx_dmamap != NULL && sc->rl_cdata.rl_rx_buf_ptr != NULL) bus_dmamem_free(sc->rl_cdata.rl_rx_tag, sc->rl_cdata.rl_rx_buf_ptr, sc->rl_cdata.rl_rx_dmamap); sc->rl_cdata.rl_rx_buf_ptr = NULL; sc->rl_cdata.rl_rx_buf = NULL; sc->rl_cdata.rl_rx_dmamap = NULL; bus_dma_tag_destroy(sc->rl_cdata.rl_rx_tag); sc->rl_cdata.rl_tx_tag = NULL; } /* Tx buffers. */ if (sc->rl_cdata.rl_tx_tag != NULL) { for (i = 0; i < RL_TX_LIST_CNT; i++) { if (sc->rl_cdata.rl_tx_dmamap[i] != NULL) { bus_dmamap_destroy( sc->rl_cdata.rl_tx_tag, sc->rl_cdata.rl_tx_dmamap[i]); sc->rl_cdata.rl_tx_dmamap[i] = NULL; } } bus_dma_tag_destroy(sc->rl_cdata.rl_tx_tag); sc->rl_cdata.rl_tx_tag = NULL; } if (sc->rl_parent_tag != NULL) { bus_dma_tag_destroy(sc->rl_parent_tag); sc->rl_parent_tag = NULL; } } /* * Initialize the transmit descriptors. */ static int rl_list_tx_init(struct rl_softc *sc) { struct rl_chain_data *cd; int i; RL_LOCK_ASSERT(sc); cd = &sc->rl_cdata; for (i = 0; i < RL_TX_LIST_CNT; i++) { cd->rl_tx_chain[i] = NULL; CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)), 0x0000000); } sc->rl_cdata.cur_tx = 0; sc->rl_cdata.last_tx = 0; return (0); } static int rl_list_rx_init(struct rl_softc *sc) { RL_LOCK_ASSERT(sc); bzero(sc->rl_cdata.rl_rx_buf_ptr, RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ); bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, sc->rl_cdata.rl_rx_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. * * You know there's something wrong with a PCI bus-master chip design * when you have to use m_devget(). * * The receive operation is badly documented in the datasheet, so I'll * attempt to document it here. The driver provides a buffer area and * places its base address in the RX buffer start address register. * The chip then begins copying frames into the RX buffer. Each frame * is preceded by a 32-bit RX status word which specifies the length * of the frame and certain other status bits. Each frame (starting with * the status word) is also 32-bit aligned. The frame length is in the * first 16 bits of the status word; the lower 15 bits correspond with * the 'rx status register' mentioned in the datasheet. * * Note: to make the Alpha happy, the frame payload needs to be aligned * on a 32-bit boundary. To achieve this, we pass RL_ETHER_ALIGN (2 bytes) * as the offset argument to m_devget(). */ -static void +static int rl_rxeof(struct rl_softc *sc) { struct mbuf *m; struct ifnet *ifp = sc->rl_ifp; uint8_t *rxbufpos; int total_len = 0; int wrap = 0; + int rx_npkts = 0; uint32_t rxstat; uint16_t cur_rx; uint16_t limit; uint16_t max_bytes, rx_bytes = 0; RL_LOCK_ASSERT(sc); bus_dmamap_sync(sc->rl_cdata.rl_rx_tag, sc->rl_cdata.rl_rx_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); cur_rx = (CSR_READ_2(sc, RL_CURRXADDR) + 16) % RL_RXBUFLEN; /* Do not try to read past this point. */ limit = CSR_READ_2(sc, RL_CURRXBUF) % RL_RXBUFLEN; if (limit < cur_rx) max_bytes = (RL_RXBUFLEN - cur_rx) + limit; else max_bytes = limit - cur_rx; while((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) { #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx; rxstat = le32toh(*(uint32_t *)rxbufpos); /* * Here's a totally undocumented fact for you. When the * RealTek chip is in the process of copying a packet into * RAM for you, the length will be 0xfff0. If you spot a * packet header with this value, you need to stop. The * datasheet makes absolutely no mention of this and * RealTek should be shot for this. */ total_len = rxstat >> 16; if (total_len == RL_RXSTAT_UNFINISHED) break; if (!(rxstat & RL_RXSTAT_RXOK) || total_len < ETHER_MIN_LEN || total_len > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) { ifp->if_ierrors++; rl_init_locked(sc); - return; + return (rx_npkts); } /* No errors; receive the packet. */ rx_bytes += total_len + 4; /* * XXX The RealTek chip includes the CRC with every * received frame, and there's no way to turn this * behavior off (at least, I can't find anything in * the manual that explains how to do it) so we have * to trim off the CRC manually. */ total_len -= ETHER_CRC_LEN; /* * Avoid trying to read more bytes than we know * the chip has prepared for us. */ if (rx_bytes > max_bytes) break; rxbufpos = sc->rl_cdata.rl_rx_buf + ((cur_rx + sizeof(uint32_t)) % RL_RXBUFLEN); if (rxbufpos == (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN)) rxbufpos = sc->rl_cdata.rl_rx_buf; wrap = (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN) - rxbufpos; if (total_len > wrap) { m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp, NULL); if (m != NULL) m_copyback(m, wrap, total_len - wrap, sc->rl_cdata.rl_rx_buf); cur_rx = (total_len - wrap + ETHER_CRC_LEN); } else { m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp, NULL); cur_rx += total_len + 4 + ETHER_CRC_LEN; } /* Round up to 32-bit boundary. */ cur_rx = (cur_rx + 3) & ~3; CSR_WRITE_2(sc, RL_CURRXADDR, cur_rx - 16); if (m == NULL) { ifp->if_iqdrops++; continue; } ifp->if_ipackets++; RL_UNLOCK(sc); (*ifp->if_input)(ifp, m); RL_LOCK(sc); + rx_npkts++; } /* No need to sync Rx memory block as we didn't modify it. */ + return (rx_npkts); } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void rl_txeof(struct rl_softc *sc) { struct ifnet *ifp = sc->rl_ifp; uint32_t txstat; RL_LOCK_ASSERT(sc); /* * Go through our tx list and free mbufs for those * frames that have been uploaded. */ do { if (RL_LAST_TXMBUF(sc) == NULL) break; txstat = CSR_READ_4(sc, RL_LAST_TXSTAT(sc)); if (!(txstat & (RL_TXSTAT_TX_OK| RL_TXSTAT_TX_UNDERRUN|RL_TXSTAT_TXABRT))) break; ifp->if_collisions += (txstat & RL_TXSTAT_COLLCNT) >> 24; bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc), BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc)); m_freem(RL_LAST_TXMBUF(sc)); RL_LAST_TXMBUF(sc) = NULL; /* * If there was a transmit underrun, bump the TX threshold. * Make sure not to overflow the 63 * 32byte we can address * with the 6 available bit. */ if ((txstat & RL_TXSTAT_TX_UNDERRUN) && (sc->rl_txthresh < 2016)) sc->rl_txthresh += 32; if (txstat & RL_TXSTAT_TX_OK) ifp->if_opackets++; else { int oldthresh; ifp->if_oerrors++; if ((txstat & RL_TXSTAT_TXABRT) || (txstat & RL_TXSTAT_OUTOFWIN)) CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); oldthresh = sc->rl_txthresh; /* error recovery */ rl_init_locked(sc); /* restore original threshold */ sc->rl_txthresh = oldthresh; return; } RL_INC(sc->rl_cdata.last_tx); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } while (sc->rl_cdata.last_tx != sc->rl_cdata.cur_tx); if (RL_LAST_TXMBUF(sc) == NULL) sc->rl_watchdog_timer = 0; } static void rl_twister_update(struct rl_softc *sc) { uint16_t linktest; /* * Table provided by RealTek (Kinston ) for * Linux driver. Values undocumented otherwise. */ static const uint32_t param[4][4] = { {0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43}, {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83}, {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83}, {0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83} }; /* * Tune the so-called twister registers of the RTL8139. These * are used to compensate for impedance mismatches. The * method for tuning these registers is undocumented and the * following procedure is collected from public sources. */ switch (sc->rl_twister) { case CHK_LINK: /* * If we have a sufficient link, then we can proceed in * the state machine to the next stage. If not, then * disable further tuning after writing sane defaults. */ if (CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_LINK_OK) { CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_OFF_CMD); sc->rl_twister = FIND_ROW; } else { CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_CMD); CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST); CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF); CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF); sc->rl_twister = DONE; } break; case FIND_ROW: /* * Read how long it took to see the echo to find the tuning * row to use. */ linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS; if (linktest == RL_CSCFG_ROW3) sc->rl_twist_row = 3; else if (linktest == RL_CSCFG_ROW2) sc->rl_twist_row = 2; else if (linktest == RL_CSCFG_ROW1) sc->rl_twist_row = 1; else sc->rl_twist_row = 0; sc->rl_twist_col = 0; sc->rl_twister = SET_PARAM; break; case SET_PARAM: if (sc->rl_twist_col == 0) CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET); CSR_WRITE_4(sc, RL_PARA7C, param[sc->rl_twist_row][sc->rl_twist_col]); if (++sc->rl_twist_col == 4) { if (sc->rl_twist_row == 3) sc->rl_twister = RECHK_LONG; else sc->rl_twister = DONE; } break; case RECHK_LONG: /* * For long cables, we have to double check to make sure we * don't mistune. */ linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS; if (linktest == RL_CSCFG_ROW3) sc->rl_twister = DONE; else { CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_RETUNE); sc->rl_twister = RETUNE; } break; case RETUNE: /* Retune for a shorter cable (try column 2) */ CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST); CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF); CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF); CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET); sc->rl_twist_row--; sc->rl_twist_col = 0; sc->rl_twister = SET_PARAM; break; case DONE: break; } } static void rl_tick(void *xsc) { struct rl_softc *sc = xsc; struct mii_data *mii; int ticks; RL_LOCK_ASSERT(sc); /* * If we're doing the twister cable calibration, then we need to defer * watchdog timeouts. This is a no-op in normal operations, but * can falsely trigger when the cable calibration takes a while and * there was traffic ready to go when rl was started. * * We don't defer mii_tick since that updates the mii status, which * helps the twister process, at least according to similar patches * for the Linux driver I found online while doing the fixes. Worst * case is a few extra mii reads during calibration. */ mii = device_get_softc(sc->rl_miibus); mii_tick(mii); if ((sc->rl_flags & RL_FLAG_LINK) == 0) rl_miibus_statchg(sc->rl_dev); if (sc->rl_twister_enable) { if (sc->rl_twister == DONE) rl_watchdog(sc); else rl_twister_update(sc); if (sc->rl_twister == DONE) ticks = hz; else ticks = hz / 10; } else { rl_watchdog(sc); ticks = hz; } callout_reset(&sc->rl_stat_callout, ticks, rl_tick, sc); } #ifdef DEVICE_POLLING -static void +static int rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct rl_softc *sc = ifp->if_softc; + int rx_npkts = 0; RL_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) - rl_poll_locked(ifp, cmd, count); + rx_npkts = rl_poll_locked(ifp, cmd, count); RL_UNLOCK(sc); + return (rx_npkts); } -static void +static int rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct rl_softc *sc = ifp->if_softc; + int rx_npkts; RL_LOCK_ASSERT(sc); sc->rxcycles = count; - rl_rxeof(sc); + rx_npkts = rl_rxeof(sc); rl_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) rl_start_locked(ifp); if (cmd == POLL_AND_CHECK_STATUS) { uint16_t status; /* We should also check the status register. */ status = CSR_READ_2(sc, RL_ISR); if (status == 0xffff) - return; + return (rx_npkts); if (status != 0) CSR_WRITE_2(sc, RL_ISR, status); /* XXX We should check behaviour on receiver stalls. */ if (status & RL_ISR_SYSTEM_ERR) rl_init_locked(sc); } + return (rx_npkts); } #endif /* DEVICE_POLLING */ static void rl_intr(void *arg) { struct rl_softc *sc = arg; struct ifnet *ifp = sc->rl_ifp; uint16_t status; RL_LOCK(sc); if (sc->suspended) goto done_locked; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) goto done_locked; #endif for (;;) { status = CSR_READ_2(sc, RL_ISR); /* If the card has gone away, the read returns 0xffff. */ if (status == 0xffff) break; if (status != 0) CSR_WRITE_2(sc, RL_ISR, status); if ((status & RL_INTRS) == 0) break; if (status & RL_ISR_RX_OK) rl_rxeof(sc); if (status & RL_ISR_RX_ERR) rl_rxeof(sc); if ((status & RL_ISR_TX_OK) || (status & RL_ISR_TX_ERR)) rl_txeof(sc); if (status & RL_ISR_SYSTEM_ERR) rl_init_locked(sc); } if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) rl_start_locked(ifp); done_locked: RL_UNLOCK(sc); } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int rl_encap(struct rl_softc *sc, struct mbuf **m_head) { struct mbuf *m; bus_dma_segment_t txsegs[1]; int error, nsegs, padlen; RL_LOCK_ASSERT(sc); m = *m_head; padlen = 0; /* * Hardware doesn't auto-pad, so we have to make sure * pad short frames out to the minimum frame length. */ if (m->m_pkthdr.len < RL_MIN_FRAMELEN) padlen = RL_MIN_FRAMELEN - m->m_pkthdr.len; /* * The RealTek is brain damaged and wants longword-aligned * TX buffers, plus we can only have one fragment buffer * per packet. We have to copy pretty much all the time. */ if (m->m_next != NULL || (mtod(m, uintptr_t) & 3) != 0 || (padlen > 0 && M_TRAILINGSPACE(m) < padlen)) { m = m_defrag(*m_head, M_DONTWAIT); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOMEM); } } *m_head = m; if (padlen > 0) { /* * Make security-conscious people happy: zero out the * bytes in the pad area, since we don't know what * this mbuf cluster buffer's previous user might * have left in it. */ bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); m->m_pkthdr.len += padlen; m->m_len = m->m_pkthdr.len; } error = bus_dmamap_load_mbuf_sg(sc->rl_cdata.rl_tx_tag, RL_CUR_DMAMAP(sc), m, txsegs, &nsegs, 0); if (error != 0) return (error); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } RL_CUR_TXMBUF(sc) = m; bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_CUR_DMAMAP(sc), BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, RL_CUR_TXADDR(sc), RL_ADDR_LO(txsegs[0].ds_addr)); return (0); } /* * Main transmit routine. */ static void rl_start(struct ifnet *ifp) { struct rl_softc *sc = ifp->if_softc; RL_LOCK(sc); rl_start_locked(ifp); RL_UNLOCK(sc); } static void rl_start_locked(struct ifnet *ifp) { struct rl_softc *sc = ifp->if_softc; struct mbuf *m_head = NULL; RL_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0) return; while (RL_CUR_TXMBUF(sc) == NULL) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (rl_encap(sc, &m_head)) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } /* Pass a copy of this mbuf chain to the bpf subsystem. */ BPF_MTAP(ifp, RL_CUR_TXMBUF(sc)); /* Transmit the frame. */ CSR_WRITE_4(sc, RL_CUR_TXSTAT(sc), RL_TXTHRESH(sc->rl_txthresh) | RL_CUR_TXMBUF(sc)->m_pkthdr.len); RL_INC(sc->rl_cdata.cur_tx); /* Set a timeout in case the chip goes out to lunch. */ sc->rl_watchdog_timer = 5; } /* * We broke out of the loop because all our TX slots are * full. Mark the NIC as busy until it drains some of the * packets from the queue. */ if (RL_CUR_TXMBUF(sc) != NULL) ifp->if_drv_flags |= IFF_DRV_OACTIVE; } static void rl_init(void *xsc) { struct rl_softc *sc = xsc; RL_LOCK(sc); rl_init_locked(sc); RL_UNLOCK(sc); } static void rl_init_locked(struct rl_softc *sc) { struct ifnet *ifp = sc->rl_ifp; struct mii_data *mii; uint32_t rxcfg = 0; uint32_t eaddr[2]; RL_LOCK_ASSERT(sc); mii = device_get_softc(sc->rl_miibus); /* * Cancel pending I/O and free all RX/TX buffers. */ rl_stop(sc); rl_reset(sc); if (sc->rl_twister_enable) { /* * Reset twister register tuning state. The twister * registers and their tuning are undocumented, but * are necessary to cope with bad links. rl_twister = * DONE here will disable this entirely. */ sc->rl_twister = CHK_LINK; } /* * Init our MAC address. Even though the chipset * documentation doesn't mention it, we need to enter "Config * register write enable" mode to modify the ID registers. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); bzero(eaddr, sizeof(eaddr)); bcopy(IF_LLADDR(sc->rl_ifp), eaddr, ETHER_ADDR_LEN); CSR_WRITE_STREAM_4(sc, RL_IDR0, eaddr[0]); CSR_WRITE_STREAM_4(sc, RL_IDR4, eaddr[1]); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); /* Init the RX memory block pointer register. */ CSR_WRITE_4(sc, RL_RXADDR, sc->rl_cdata.rl_rx_buf_paddr + RL_RX_8139_BUF_RESERVE); /* Init TX descriptors. */ rl_list_tx_init(sc); /* Init Rx memory block. */ rl_list_rx_init(sc); /* * Enable transmit and receive. */ CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); /* * Set the initial TX and RX configuration. */ CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG); /* Set the individual bit to receive frames for this host only. */ rxcfg = CSR_READ_4(sc, RL_RXCFG); rxcfg |= RL_RXCFG_RX_INDIV; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { rxcfg |= RL_RXCFG_RX_ALLPHYS; CSR_WRITE_4(sc, RL_RXCFG, rxcfg); } else { rxcfg &= ~RL_RXCFG_RX_ALLPHYS; CSR_WRITE_4(sc, RL_RXCFG, rxcfg); } /* Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { rxcfg |= RL_RXCFG_RX_BROAD; CSR_WRITE_4(sc, RL_RXCFG, rxcfg); } else { rxcfg &= ~RL_RXCFG_RX_BROAD; CSR_WRITE_4(sc, RL_RXCFG, rxcfg); } /* Program the multicast filter, if necessary. */ rl_setmulti(sc); #ifdef DEVICE_POLLING /* Disable interrupts if we are polling. */ if (ifp->if_capenable & IFCAP_POLLING) CSR_WRITE_2(sc, RL_IMR, 0); else #endif /* Enable interrupts. */ CSR_WRITE_2(sc, RL_IMR, RL_INTRS); /* Set initial TX threshold */ sc->rl_txthresh = RL_TX_THRESH_INIT; /* Start RX/TX process. */ CSR_WRITE_4(sc, RL_MISSEDPKT, 0); /* Enable receiver and transmitter. */ CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); sc->rl_flags &= ~RL_FLAG_LINK; mii_mediachg(mii); CSR_WRITE_1(sc, RL_CFG1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->rl_stat_callout, hz, rl_tick, sc); } /* * Set media options. */ static int rl_ifmedia_upd(struct ifnet *ifp) { struct rl_softc *sc = ifp->if_softc; struct mii_data *mii; mii = device_get_softc(sc->rl_miibus); RL_LOCK(sc); mii_mediachg(mii); RL_UNLOCK(sc); return (0); } /* * Report current media status. */ static void rl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct rl_softc *sc = ifp->if_softc; struct mii_data *mii; mii = device_get_softc(sc->rl_miibus); RL_LOCK(sc); mii_pollstat(mii); RL_UNLOCK(sc); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static int rl_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; struct rl_softc *sc = ifp->if_softc; int error = 0; switch (command) { case SIOCSIFFLAGS: RL_LOCK(sc); if (ifp->if_flags & IFF_UP) { rl_init_locked(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) rl_stop(sc); } RL_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: RL_LOCK(sc); rl_setmulti(sc); RL_UNLOCK(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->rl_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: #ifdef DEVICE_POLLING if (ifr->ifr_reqcap & IFCAP_POLLING && !(ifp->if_capenable & IFCAP_POLLING)) { error = ether_poll_register(rl_poll, ifp); if (error) return(error); RL_LOCK(sc); /* Disable interrupts */ CSR_WRITE_2(sc, RL_IMR, 0x0000); ifp->if_capenable |= IFCAP_POLLING; RL_UNLOCK(sc); return (error); } if (!(ifr->ifr_reqcap & IFCAP_POLLING) && ifp->if_capenable & IFCAP_POLLING) { error = ether_poll_deregister(ifp); /* Enable interrupts. */ RL_LOCK(sc); CSR_WRITE_2(sc, RL_IMR, RL_INTRS); ifp->if_capenable &= ~IFCAP_POLLING; RL_UNLOCK(sc); return (error); } #endif /* DEVICE_POLLING */ break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void rl_watchdog(struct rl_softc *sc) { RL_LOCK_ASSERT(sc); if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer >0) return; device_printf(sc->rl_dev, "watchdog timeout\n"); sc->rl_ifp->if_oerrors++; rl_txeof(sc); rl_rxeof(sc); rl_init_locked(sc); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void rl_stop(struct rl_softc *sc) { register int i; struct ifnet *ifp = sc->rl_ifp; RL_LOCK_ASSERT(sc); sc->rl_watchdog_timer = 0; callout_stop(&sc->rl_stat_callout); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->rl_flags &= ~RL_FLAG_LINK; CSR_WRITE_1(sc, RL_COMMAND, 0x00); CSR_WRITE_2(sc, RL_IMR, 0x0000); for (i = 0; i < RL_TIMEOUT; i++) { DELAY(10); if ((CSR_READ_1(sc, RL_COMMAND) & (RL_CMD_RX_ENB | RL_CMD_TX_ENB)) == 0) break; } if (i == RL_TIMEOUT) device_printf(sc->rl_dev, "Unable to stop Tx/Rx MAC\n"); /* * Free the TX list buffers. */ for (i = 0; i < RL_TX_LIST_CNT; i++) { if (sc->rl_cdata.rl_tx_chain[i] != NULL) { if (sc->rl_cdata.rl_tx_chain[i] != NULL) { bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, sc->rl_cdata.rl_tx_dmamap[i], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->rl_cdata.rl_tx_tag, sc->rl_cdata.rl_tx_dmamap[i]); m_freem(sc->rl_cdata.rl_tx_chain[i]); sc->rl_cdata.rl_tx_chain[i] = NULL; } CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)), 0x0000000); } } } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int rl_suspend(device_t dev) { struct rl_softc *sc; sc = device_get_softc(dev); RL_LOCK(sc); rl_stop(sc); sc->suspended = 1; RL_UNLOCK(sc); return (0); } /* * Device resume routine. Restore some PCI settings in case the BIOS * doesn't, re-enable busmastering, and restart the interface if * appropriate. */ static int rl_resume(device_t dev) { struct rl_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->rl_ifp; RL_LOCK(sc); /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) rl_init_locked(sc); sc->suspended = 0; RL_UNLOCK(sc); return (0); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int rl_shutdown(device_t dev) { struct rl_softc *sc; sc = device_get_softc(dev); RL_LOCK(sc); rl_stop(sc); RL_UNLOCK(sc); return (0); } Index: head/sys/sys/param.h =================================================================== --- head/sys/sys/param.h (revision 193095) +++ head/sys/sys/param.h (revision 193096) @@ -1,318 +1,318 @@ /*- * Copyright (c) 1982, 1986, 1989, 1993 * The Regents of the University of California. All rights reserved. * (c) UNIX System Laboratories, Inc. * All or some portions of this file are derived from material licensed * to the University of California by American Telephone and Telegraph * Co. or Unix System Laboratories, Inc. and are reproduced herein with * the permission of UNIX System Laboratories, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)param.h 8.3 (Berkeley) 4/4/95 * $FreeBSD$ */ #ifndef _SYS_PARAM_H_ #define _SYS_PARAM_H_ #include #define BSD 199506 /* System version (year & month). */ #define BSD4_3 1 #define BSD4_4 1 /* * __FreeBSD_version numbers are documented in the Porter's Handbook. * If you bump the version for any reason, you should update the documentation * there. * Currently this lives here: * * doc/en_US.ISO8859-1/books/porters-handbook/book.sgml * * scheme is: Rxx * 'R' is 0 if release branch or x.0-CURRENT before RELENG_*_0 * is created, otherwise 1. */ #undef __FreeBSD_version -#define __FreeBSD_version 800094 /* Master, propagated to newvers */ +#define __FreeBSD_version 800095 /* Master, propagated to newvers */ #ifndef LOCORE #include #endif /* * Machine-independent constants (some used in following include files). * Redefined constants are from POSIX 1003.1 limits file. * * MAXCOMLEN should be >= sizeof(ac_comm) (see ) * MAXLOGNAME should be == UT_NAMESIZE+1 (see ) */ #include #define MAXCOMLEN 19 /* max command name remembered */ #define MAXINTERP 32 /* max interpreter file name length */ #define MAXLOGNAME 17 /* max login name length (incl. NUL) */ #define MAXUPRC CHILD_MAX /* max simultaneous processes */ #define NCARGS ARG_MAX /* max bytes for an exec function */ #define NGROUPS NGROUPS_MAX /* max number groups */ #define NOFILE OPEN_MAX /* max open files per process */ #define NOGROUP 65535 /* marker for empty group set member */ #define MAXHOSTNAMELEN 256 /* max hostname size */ #define SPECNAMELEN 63 /* max length of devicename */ /* More types and definitions used throughout the kernel. */ #ifdef _KERNEL #include #include #ifndef LOCORE #include #include #endif #ifndef FALSE #define FALSE 0 #endif #ifndef TRUE #define TRUE 1 #endif #endif #ifndef _KERNEL /* Signals. */ #include #endif /* Machine type dependent parameters. */ #include #ifndef _KERNEL #include #endif #ifndef _NO_NAMESPACE_POLLUTION #ifndef DEV_BSHIFT #define DEV_BSHIFT 9 /* log2(DEV_BSIZE) */ #endif #define DEV_BSIZE (1<>PAGE_SHIFT) #endif /* * btodb() is messy and perhaps slow because `bytes' may be an off_t. We * want to shift an unsigned type to avoid sign extension and we don't * want to widen `bytes' unnecessarily. Assume that the result fits in * a daddr_t. */ #ifndef btodb #define btodb(bytes) /* calculates (bytes / DEV_BSIZE) */ \ (sizeof (bytes) > sizeof(long) \ ? (daddr_t)((unsigned long long)(bytes) >> DEV_BSHIFT) \ : (daddr_t)((unsigned long)(bytes) >> DEV_BSHIFT)) #endif #ifndef dbtob #define dbtob(db) /* calculates (db * DEV_BSIZE) */ \ ((off_t)(db) << DEV_BSHIFT) #endif #endif /* _NO_NAMESPACE_POLLUTION */ #define PRIMASK 0x0ff #define PCATCH 0x100 /* OR'd with pri for tsleep to check signals */ #define PDROP 0x200 /* OR'd with pri to stop re-entry of interlock mutex */ #define NZERO 0 /* default "nice" */ #define NBBY 8 /* number of bits in a byte */ #define NBPW sizeof(int) /* number of bytes per word (integer) */ #define CMASK 022 /* default file mask: S_IWGRP|S_IWOTH */ #define NODEV (dev_t)(-1) /* non-existent device */ #define CBLOCK 128 /* Clist block size, must be a power of 2. */ /* Data chars/clist. */ #define CBSIZE (CBLOCK - sizeof(struct cblock *)) #define CROUND (CBLOCK - 1) /* Clist rounding. */ /* * File system parameters and macros. * * MAXBSIZE - Filesystems are made out of blocks of at most MAXBSIZE bytes * per block. MAXBSIZE may be made larger without effecting * any existing filesystems as long as it does not exceed MAXPHYS, * and may be made smaller at the risk of not being able to use * filesystems which require a block size exceeding MAXBSIZE. * * BKVASIZE - Nominal buffer space per buffer, in bytes. BKVASIZE is the * minimum KVM memory reservation the kernel is willing to make. * Filesystems can of course request smaller chunks. Actual * backing memory uses a chunk size of a page (PAGE_SIZE). * * If you make BKVASIZE too small you risk seriously fragmenting * the buffer KVM map which may slow things down a bit. If you * make it too big the kernel will not be able to optimally use * the KVM memory reserved for the buffer cache and will wind * up with too-few buffers. * * The default is 16384, roughly 2x the block size used by a * normal UFS filesystem. */ #define MAXBSIZE 65536 /* must be power of 2 */ #define BKVASIZE 16384 /* must be power of 2 */ #define BKVAMASK (BKVASIZE-1) /* * MAXPATHLEN defines the longest permissible path length after expanding * symbolic links. It is used to allocate a temporary buffer from the buffer * pool in which to do the name expansion, hence should be a power of two, * and must be less than or equal to MAXBSIZE. MAXSYMLINKS defines the * maximum number of symbolic links that may be expanded in a path name. * It should be set high enough to allow all legitimate uses, but halt * infinite loops reasonably quickly. */ #define MAXPATHLEN PATH_MAX #define MAXSYMLINKS 32 /* Bit map related macros. */ #define setbit(a,i) (((unsigned char *)(a))[(i)/NBBY] |= 1<<((i)%NBBY)) #define clrbit(a,i) (((unsigned char *)(a))[(i)/NBBY] &= ~(1<<((i)%NBBY))) #define isset(a,i) \ (((const unsigned char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) #define isclr(a,i) \ ((((const unsigned char *)(a))[(i)/NBBY] & (1<<((i)%NBBY))) == 0) /* Macros for counting and rounding. */ #ifndef howmany #define howmany(x, y) (((x)+((y)-1))/(y)) #endif #define rounddown(x, y) (((x)/(y))*(y)) #define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) /* to any y */ #define roundup2(x, y) (((x)+((y)-1))&(~((y)-1))) /* if y is powers of two */ #define powerof2(x) ((((x)-1)&(x))==0) /* Macros for min/max. */ #define MIN(a,b) (((a)<(b))?(a):(b)) #define MAX(a,b) (((a)>(b))?(a):(b)) #ifdef _KERNEL /* * Basic byte order function prototypes for non-inline functions. */ #ifndef LOCORE #ifndef _BYTEORDER_PROTOTYPED #define _BYTEORDER_PROTOTYPED __BEGIN_DECLS __uint32_t htonl(__uint32_t); __uint16_t htons(__uint16_t); __uint32_t ntohl(__uint32_t); __uint16_t ntohs(__uint16_t); __END_DECLS #endif #endif #ifndef lint #ifndef _BYTEORDER_FUNC_DEFINED #define _BYTEORDER_FUNC_DEFINED #define htonl(x) __htonl(x) #define htons(x) __htons(x) #define ntohl(x) __ntohl(x) #define ntohs(x) __ntohs(x) #endif /* !_BYTEORDER_FUNC_DEFINED */ #endif /* lint */ #endif /* _KERNEL */ /* * Scale factor for scaled integers used to count %cpu time and load avgs. * * The number of CPU `tick's that map to a unique `%age' can be expressed * by the formula (1 / (2 ^ (FSHIFT - 11))). The maximum load average that * can be calculated (assuming 32 bits) can be closely approximated using * the formula (2 ^ (2 * (16 - FSHIFT))) for (FSHIFT < 15). * * For the scheduler to maintain a 1:1 mapping of CPU `tick' to `%age', * FSHIFT must be at least 11; this gives us a maximum load avg of ~1024. */ #define FSHIFT 11 /* bits to right of fixed binary point */ #define FSCALE (1<> (PAGE_SHIFT - DEV_BSHIFT)) #define ctodb(db) /* calculates pages to devblks */ \ ((db) << (PAGE_SHIFT - DEV_BSHIFT)) /* * Given the pointer x to the member m of the struct s, return * a pointer to the containing structure. */ #define member2struct(s, m, x) \ ((struct s *)(void *)((char *)(x) - offsetof(struct s, m))) #endif /* _SYS_PARAM_H_ */