Index: stable/6/sys/dev/sk/if_sk.c =================================================================== --- stable/6/sys/dev/sk/if_sk.c (revision 159562) +++ stable/6/sys/dev/sk/if_sk.c (revision 159563) @@ -1,3065 +1,4170 @@ /* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */ /*- * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 2003 Nathan L. Binkert * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /* * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports * the SK-984x series adapters, both single port and dual port. * References: * The XaQti XMAC II datasheet, * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf * The SysKonnect GEnesis manual, http://www.syskonnect.com * * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the * XMAC II datasheet online. I have put my copy at people.freebsd.org as a * convenience to others until Vitesse corrects this problem: * * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf * * Written by Bill Paul * Department of Electrical Engineering * Columbia University, New York City */ /* * The SysKonnect gigabit ethernet adapters consist of two main * components: the SysKonnect GEnesis controller chip and the XaQti Corp. * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC * components and a PHY while the GEnesis controller provides a PCI * interface with DMA support. Each card may have between 512K and * 2MB of SRAM on board depending on the configuration. * * The SysKonnect GEnesis controller can have either one or two XMAC * chips connected to it, allowing single or dual port NIC configurations. * SysKonnect has the distinction of being the only vendor on the market * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs, * dual DMA queues, packet/MAC/transmit arbiters and direct access to the * XMAC registers. This driver takes advantage of these features to allow * both XMACs to operate as independent interfaces. */ #include #include -#include +#include +#include #include #include #include #include #include +#include #include #include +#include +#include #include #include -#include #include #include #include +#include -#include +#include +#include +#include -#include /* for vtophys */ -#include /* for vtophys */ #include +#include #include -#include #include #include #include #include #include #include #if 0 #define SK_USEIOSPACE #endif #include #include #include MODULE_DEPEND(sk, pci, 1, 1, 1); MODULE_DEPEND(sk, ether, 1, 1, 1); MODULE_DEPEND(sk, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif static struct sk_type sk_devs[] = { { VENDORID_SK, DEVICEID_SK_V1, "SysKonnect Gigabit Ethernet (V1.0)" }, { VENDORID_SK, DEVICEID_SK_V2, "SysKonnect Gigabit Ethernet (V2.0)" }, { VENDORID_MARVELL, DEVICEID_SK_V2, "Marvell Gigabit Ethernet" }, +#ifdef not_yet { VENDORID_MARVELL, + DEVICEID_MRVL_4360, + "Marvell 88E8052 Gigabit Ethernet Controller" + }, + { + VENDORID_MARVELL, + DEVICEID_MRVL_4361, + "Marvell 88E8050 Gigabit Ethernet Controller" + }, + { + VENDORID_MARVELL, + DEVICEID_MRVL_4362, + "Marvell 88E8053 Gigabit Ethernet Controller" + }, +#endif + { + VENDORID_MARVELL, DEVICEID_BELKIN_5005, "Belkin F5D5005 Gigabit Ethernet" }, { VENDORID_3COM, DEVICEID_3COM_3C940, "3Com 3C940 Gigabit Ethernet" }, { VENDORID_LINKSYS, DEVICEID_LINKSYS_EG1032, "Linksys EG1032 Gigabit Ethernet" }, { VENDORID_DLINK, DEVICEID_DLINK_DGE530T, "D-Link DGE-530T Gigabit Ethernet" }, { 0, 0, NULL } }; static int skc_probe(device_t); static int skc_attach(device_t); static int skc_detach(device_t); static void skc_shutdown(device_t); +static int skc_suspend(device_t); +static int skc_resume(device_t); static int sk_detach(device_t); static int sk_probe(device_t); static int sk_attach(device_t); static void sk_tick(void *); +static void sk_yukon_tick(void *); static void sk_intr(void *); static void sk_intr_xmac(struct sk_if_softc *); static void sk_intr_bcom(struct sk_if_softc *); static void sk_intr_yukon(struct sk_if_softc *); +static __inline void sk_rxcksum(struct ifnet *, struct mbuf *, u_int32_t); +static __inline int sk_rxvalid(struct sk_softc *, u_int32_t, u_int32_t); static void sk_rxeof(struct sk_if_softc *); +static void sk_jumbo_rxeof(struct sk_if_softc *); static void sk_txeof(struct sk_if_softc *); -static int sk_encap(struct sk_if_softc *, struct mbuf *, - u_int32_t *); +static void sk_txcksum(struct ifnet *, struct mbuf *, struct sk_tx_desc *); +static int sk_encap(struct sk_if_softc *, struct mbuf **); static void sk_start(struct ifnet *); static void sk_start_locked(struct ifnet *); static int sk_ioctl(struct ifnet *, u_long, caddr_t); static void sk_init(void *); static void sk_init_locked(struct sk_if_softc *); static void sk_init_xmac(struct sk_if_softc *); static void sk_init_yukon(struct sk_if_softc *); static void sk_stop(struct sk_if_softc *); static void sk_watchdog(struct ifnet *); static int sk_ifmedia_upd(struct ifnet *); static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void sk_reset(struct sk_softc *); -static int sk_newbuf(struct sk_if_softc *, - struct sk_chain *, struct mbuf *); -static int sk_alloc_jumbo_mem(struct sk_if_softc *); -static void sk_free_jumbo_mem(struct sk_if_softc *); +static __inline void sk_discard_rxbuf(struct sk_if_softc *, int); +static __inline void sk_discard_jumbo_rxbuf(struct sk_if_softc *, int); +static int sk_newbuf(struct sk_if_softc *, int); +static int sk_jumbo_newbuf(struct sk_if_softc *, int); +static void sk_dmamap_cb(void *, bus_dma_segment_t *, int, int); +static int sk_dma_alloc(struct sk_if_softc *); +static void sk_dma_free(struct sk_if_softc *); static void *sk_jalloc(struct sk_if_softc *); static void sk_jfree(void *, void *); static int sk_init_rx_ring(struct sk_if_softc *); +static int sk_init_jumbo_rx_ring(struct sk_if_softc *); static void sk_init_tx_ring(struct sk_if_softc *); static u_int32_t sk_win_read_4(struct sk_softc *, int); static u_int16_t sk_win_read_2(struct sk_softc *, int); static u_int8_t sk_win_read_1(struct sk_softc *, int); static void sk_win_write_4(struct sk_softc *, int, u_int32_t); static void sk_win_write_2(struct sk_softc *, int, u_int32_t); static void sk_win_write_1(struct sk_softc *, int, u_int32_t); static u_int8_t sk_vpd_readbyte(struct sk_softc *, int); static void sk_vpd_read_res(struct sk_softc *, struct vpd_res *, int); static void sk_vpd_read(struct sk_softc *); static int sk_miibus_readreg(device_t, int, int); static int sk_miibus_writereg(device_t, int, int, int); static void sk_miibus_statchg(device_t); static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int); static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int, int); static void sk_xmac_miibus_statchg(struct sk_if_softc *); static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int); static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int, int); static void sk_marv_miibus_statchg(struct sk_if_softc *); static uint32_t sk_xmchash(const uint8_t *); static uint32_t sk_gmchash(const uint8_t *); -static void sk_setfilt(struct sk_if_softc *, caddr_t, int); +static void sk_setfilt(struct sk_if_softc *, u_int16_t *, int); static void sk_setmulti(struct sk_if_softc *); static void sk_setpromisc(struct sk_if_softc *); static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high); static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS); #ifdef SK_USEIOSPACE #define SK_RES SYS_RES_IOPORT #define SK_RID SK_PCI_LOIO #else #define SK_RES SYS_RES_MEMORY #define SK_RID SK_PCI_LOMEM #endif /* + * It seems that SK-NET GENESIS supports very simple checksum offload + * capability for Tx and I believe it can generate 0 checksum value for + * UDP packets in Tx as the hardware can't differenciate UDP packets from + * TCP packets. 0 chcecksum value for UDP packet is an invalid one as it + * means sender didn't perforam checksum computation. For the safety I + * disabled UDP checksum offload capability at the moment. Alternatively + * we can intrduce a LINK0/LINK1 flag as hme(4) did in its Tx checksum + * offload routine. + */ +#define SK_CSUM_FEATURES (CSUM_TCP) + +/* * Note that we have newbus methods for both the GEnesis controller * itself and the XMAC(s). The XMACs are children of the GEnesis, and * the miibus code is a child of the XMACs. We need to do it this way * so that the miibus drivers can access the PHY registers on the * right PHY. It's not quite what I had in mind, but it's the only * design that achieves the desired effect. */ static device_method_t skc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, skc_probe), DEVMETHOD(device_attach, skc_attach), DEVMETHOD(device_detach, skc_detach), + DEVMETHOD(device_suspend, skc_suspend), + DEVMETHOD(device_resume, skc_resume), DEVMETHOD(device_shutdown, skc_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), { 0, 0 } }; static driver_t skc_driver = { "skc", skc_methods, sizeof(struct sk_softc) }; static devclass_t skc_devclass; static device_method_t sk_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sk_probe), DEVMETHOD(device_attach, sk_attach), DEVMETHOD(device_detach, sk_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, sk_miibus_readreg), DEVMETHOD(miibus_writereg, sk_miibus_writereg), DEVMETHOD(miibus_statchg, sk_miibus_statchg), { 0, 0 } }; static driver_t sk_driver = { "sk", sk_methods, sizeof(struct sk_if_softc) }; static devclass_t sk_devclass; -DRIVER_MODULE(sk, pci, skc_driver, skc_devclass, 0, 0); +DRIVER_MODULE(skc, pci, skc_driver, skc_devclass, 0, 0); DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0); DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0); #define SK_SETBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x) #define SK_CLRBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x) #define SK_WIN_SETBIT_4(sc, reg, x) \ sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x) #define SK_WIN_CLRBIT_4(sc, reg, x) \ sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x) #define SK_WIN_SETBIT_2(sc, reg, x) \ sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x) #define SK_WIN_CLRBIT_2(sc, reg, x) \ sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x) static u_int32_t sk_win_read_4(sc, reg) struct sk_softc *sc; int reg; { #ifdef SK_USEIOSPACE CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg))); #else return(CSR_READ_4(sc, reg)); #endif } static u_int16_t sk_win_read_2(sc, reg) struct sk_softc *sc; int reg; { #ifdef SK_USEIOSPACE CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg))); #else return(CSR_READ_2(sc, reg)); #endif } static u_int8_t sk_win_read_1(sc, reg) struct sk_softc *sc; int reg; { #ifdef SK_USEIOSPACE CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg))); #else return(CSR_READ_1(sc, reg)); #endif } static void sk_win_write_4(sc, reg, val) struct sk_softc *sc; int reg; u_int32_t val; { #ifdef SK_USEIOSPACE CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val); #else CSR_WRITE_4(sc, reg, val); #endif return; } static void sk_win_write_2(sc, reg, val) struct sk_softc *sc; int reg; u_int32_t val; { #ifdef SK_USEIOSPACE CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val); #else CSR_WRITE_2(sc, reg, val); #endif return; } static void sk_win_write_1(sc, reg, val) struct sk_softc *sc; int reg; u_int32_t val; { #ifdef SK_USEIOSPACE CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val); #else CSR_WRITE_1(sc, reg, val); #endif return; } /* * The VPD EEPROM contains Vital Product Data, as suggested in * the PCI 2.1 specification. The VPD data is separared into areas * denoted by resource IDs. The SysKonnect VPD contains an ID string * resource (the name of the adapter), a read-only area resource * containing various key/data fields and a read/write area which * can be used to store asset management information or log messages. * We read the ID string and read-only into buffers attached to * the controller softc structure for later use. At the moment, * we only use the ID string during skc_attach(). */ static u_int8_t sk_vpd_readbyte(sc, addr) struct sk_softc *sc; int addr; { int i; sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr); for (i = 0; i < SK_TIMEOUT; i++) { - DELAY(1); + /* ASUS LOM takes a very long time to read VPD. */ + DELAY(100); if (sk_win_read_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG) break; } if (i == SK_TIMEOUT) return(0); return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA))); } static void sk_vpd_read_res(sc, res, addr) struct sk_softc *sc; struct vpd_res *res; int addr; { int i; u_int8_t *ptr; ptr = (u_int8_t *)res; for (i = 0; i < sizeof(struct vpd_res); i++) ptr[i] = sk_vpd_readbyte(sc, i + addr); return; } static void sk_vpd_read(sc) struct sk_softc *sc; { int pos = 0, i; struct vpd_res res; + /* Check VPD capability */ + if (sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_CAPID)) != PCIY_VPD) + return; if (sc->sk_vpd_prodname != NULL) free(sc->sk_vpd_prodname, M_DEVBUF); if (sc->sk_vpd_readonly != NULL) free(sc->sk_vpd_readonly, M_DEVBUF); sc->sk_vpd_prodname = NULL; sc->sk_vpd_readonly = NULL; sc->sk_vpd_readonly_len = 0; sk_vpd_read_res(sc, &res, pos); /* * Bail out quietly if the eeprom appears to be missing or empty. */ if (res.vr_id == 0xff && res.vr_len == 0xff && res.vr_pad == 0xff) return; if (res.vr_id != VPD_RES_ID) { - printf("skc%d: bad VPD resource id: expected %x got %x\n", - sc->sk_unit, VPD_RES_ID, res.vr_id); + device_printf(sc->sk_dev, "bad VPD resource id: expected %x " + "got %x\n", VPD_RES_ID, res.vr_id); return; } pos += sizeof(res); sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); if (sc->sk_vpd_prodname != NULL) { for (i = 0; i < res.vr_len; i++) sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos); sc->sk_vpd_prodname[i] = '\0'; } pos += res.vr_len; sk_vpd_read_res(sc, &res, pos); if (res.vr_id != VPD_RES_READ) { - printf("skc%d: bad VPD resource id: expected %x got %x\n", - sc->sk_unit, VPD_RES_READ, res.vr_id); + device_printf(sc->sk_dev, "bad VPD resource id: expected %x " + "got %x\n", VPD_RES_READ, res.vr_id); return; } pos += sizeof(res); sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); for (i = 0; i < res.vr_len; i++) sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos); sc->sk_vpd_readonly_len = res.vr_len; return; } static int sk_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct sk_if_softc *sc_if; + int v; sc_if = device_get_softc(dev); + SK_IF_MII_LOCK(sc_if); switch(sc_if->sk_softc->sk_type) { case SK_GENESIS: - return(sk_xmac_miibus_readreg(sc_if, phy, reg)); + v = sk_xmac_miibus_readreg(sc_if, phy, reg); + break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: - return(sk_marv_miibus_readreg(sc_if, phy, reg)); + case SK_YUKON_EC: + v = sk_marv_miibus_readreg(sc_if, phy, reg); + break; + default: + v = 0; + break; } + SK_IF_MII_UNLOCK(sc_if); - return(0); + return (v); } static int sk_miibus_writereg(dev, phy, reg, val) device_t dev; int phy, reg, val; { struct sk_if_softc *sc_if; + int v; sc_if = device_get_softc(dev); + SK_IF_MII_LOCK(sc_if); switch(sc_if->sk_softc->sk_type) { case SK_GENESIS: - return(sk_xmac_miibus_writereg(sc_if, phy, reg, val)); + v = sk_xmac_miibus_writereg(sc_if, phy, reg, val); + break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: - return(sk_marv_miibus_writereg(sc_if, phy, reg, val)); + case SK_YUKON_EC: + v = sk_marv_miibus_writereg(sc_if, phy, reg, val); + break; + default: + v = 0; + break; } + SK_IF_MII_UNLOCK(sc_if); - return(0); + return (v); } static void sk_miibus_statchg(dev) device_t dev; { struct sk_if_softc *sc_if; sc_if = device_get_softc(dev); + SK_IF_MII_LOCK(sc_if); switch(sc_if->sk_softc->sk_type) { case SK_GENESIS: sk_xmac_miibus_statchg(sc_if); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: + case SK_YUKON_EC: sk_marv_miibus_statchg(sc_if); break; } + SK_IF_MII_UNLOCK(sc_if); return; } static int sk_xmac_miibus_readreg(sc_if, phy, reg) struct sk_if_softc *sc_if; int phy, reg; { int i; if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0) return(0); - SK_IF_LOCK(sc_if); SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); SK_XM_READ_2(sc_if, XM_PHY_DATA); if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { for (i = 0; i < SK_TIMEOUT; i++) { DELAY(1); if (SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYDATARDY) break; } if (i == SK_TIMEOUT) { - printf("sk%d: phy failed to come ready\n", - sc_if->sk_unit); - SK_IF_UNLOCK(sc_if); + if_printf(sc_if->sk_ifp, "phy failed to come ready\n"); return(0); } } DELAY(1); i = SK_XM_READ_2(sc_if, XM_PHY_DATA); - SK_IF_UNLOCK(sc_if); + return(i); } static int sk_xmac_miibus_writereg(sc_if, phy, reg, val) struct sk_if_softc *sc_if; int phy, reg, val; { int i; - SK_IF_LOCK(sc_if); SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); for (i = 0; i < SK_TIMEOUT; i++) { if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) break; } if (i == SK_TIMEOUT) { - printf("sk%d: phy failed to come ready\n", sc_if->sk_unit); - SK_IF_UNLOCK(sc_if); - return(ETIMEDOUT); + if_printf(sc_if->sk_ifp, "phy failed to come ready\n"); + return (ETIMEDOUT); } SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val); for (i = 0; i < SK_TIMEOUT; i++) { DELAY(1); if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) break; } - SK_IF_UNLOCK(sc_if); if (i == SK_TIMEOUT) - printf("sk%d: phy write timed out\n", sc_if->sk_unit); + if_printf(sc_if->sk_ifp, "phy write timed out\n"); return(0); } static void sk_xmac_miibus_statchg(sc_if) struct sk_if_softc *sc_if; { struct mii_data *mii; mii = device_get_softc(sc_if->sk_miibus); - SK_IF_LOCK(sc_if); /* * If this is a GMII PHY, manually set the XMAC's * duplex mode accordingly. */ if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); } else { SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); } } - SK_IF_UNLOCK(sc_if); - - return; } static int sk_marv_miibus_readreg(sc_if, phy, reg) struct sk_if_softc *sc_if; int phy, reg; { u_int16_t val; int i; if (phy != 0 || (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER && sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) { return(0); } - SK_IF_LOCK(sc_if); SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ); for (i = 0; i < SK_TIMEOUT; i++) { DELAY(1); val = SK_YU_READ_2(sc_if, YUKON_SMICR); if (val & YU_SMICR_READ_VALID) break; } if (i == SK_TIMEOUT) { - printf("sk%d: phy failed to come ready\n", - sc_if->sk_unit); - SK_IF_UNLOCK(sc_if); + if_printf(sc_if->sk_ifp, "phy failed to come ready\n"); return(0); } val = SK_YU_READ_2(sc_if, YUKON_SMIDR); - SK_IF_UNLOCK(sc_if); return(val); } static int sk_marv_miibus_writereg(sc_if, phy, reg, val) struct sk_if_softc *sc_if; int phy, reg, val; { int i; - SK_IF_LOCK(sc_if); SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val); SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE); for (i = 0; i < SK_TIMEOUT; i++) { DELAY(1); if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) break; } - SK_IF_UNLOCK(sc_if); + if (i == SK_TIMEOUT) { + if_printf(sc_if->sk_ifp, "phy write timeout\n"); + return (0); + } return(0); } static void sk_marv_miibus_statchg(sc_if) struct sk_if_softc *sc_if; { return; } #define HASH_BITS 6 static u_int32_t sk_xmchash(addr) const uint8_t *addr; { uint32_t crc; /* Compute CRC for the address value. */ crc = ether_crc32_le(addr, ETHER_ADDR_LEN); return (~crc & ((1 << HASH_BITS) - 1)); } /* gmchash is just a big endian crc */ static u_int32_t sk_gmchash(addr) const uint8_t *addr; { uint32_t crc; /* Compute CRC for the address value. */ crc = ether_crc32_be(addr, ETHER_ADDR_LEN); return (crc & ((1 << HASH_BITS) - 1)); } static void sk_setfilt(sc_if, addr, slot) struct sk_if_softc *sc_if; - caddr_t addr; + u_int16_t *addr; int slot; { int base; base = XM_RXFILT_ENTRY(slot); - SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0])); - SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2])); - SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4])); + SK_XM_WRITE_2(sc_if, base, addr[0]); + SK_XM_WRITE_2(sc_if, base + 2, addr[1]); + SK_XM_WRITE_2(sc_if, base + 4, addr[2]); return; } static void sk_setmulti(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc = sc_if->sk_softc; struct ifnet *ifp = sc_if->sk_ifp; u_int32_t hashes[2] = { 0, 0 }; int h = 0, i; struct ifmultiaddr *ifma; - u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 }; + u_int16_t dummy[] = { 0, 0, 0 }; + u_int16_t maddr[(ETHER_ADDR_LEN+1)/2]; SK_IF_LOCK_ASSERT(sc_if); /* First, zot all the existing filters. */ switch(sc->sk_type) { case SK_GENESIS: for (i = 1; i < XM_RXFILT_MAX; i++) - sk_setfilt(sc_if, (caddr_t)&dummy, i); + sk_setfilt(sc_if, dummy, i); SK_XM_WRITE_4(sc_if, XM_MAR0, 0); SK_XM_WRITE_4(sc_if, XM_MAR2, 0); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: + case SK_YUKON_EC: SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0); SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0); SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0); SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0); break; } /* Now program new ones. */ if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { hashes[0] = 0xFFFFFFFF; hashes[1] = 0xFFFFFFFF; } else { i = 1; IF_ADDR_LOCK(ifp); TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; /* * Program the first XM_RXFILT_MAX multicast groups * into the perfect filter. For all others, * use the hash table. */ if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) { - sk_setfilt(sc_if, - LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i); + bcopy(LLADDR( + (struct sockaddr_dl *)ifma->ifma_addr), + maddr, ETHER_ADDR_LEN); + sk_setfilt(sc_if, maddr, i); i++; continue; } switch(sc->sk_type) { case SK_GENESIS: - h = sk_xmchash( - LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); + bcopy(LLADDR( + (struct sockaddr_dl *)ifma->ifma_addr), + maddr, ETHER_ADDR_LEN); + h = sk_xmchash((const uint8_t *)maddr); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: - h = sk_gmchash( - LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); + case SK_YUKON_EC: + bcopy(LLADDR( + (struct sockaddr_dl *)ifma->ifma_addr), + maddr, ETHER_ADDR_LEN); + h = sk_gmchash((const uint8_t *)maddr); break; } if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } IF_ADDR_UNLOCK(ifp); } switch(sc->sk_type) { case SK_GENESIS: SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH| XM_MODE_RX_USE_PERFECT); SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]); SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: + case SK_YUKON_EC: SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff); SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff); SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff); SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff); break; } return; } static void sk_setpromisc(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc = sc_if->sk_softc; struct ifnet *ifp = sc_if->sk_ifp; SK_IF_LOCK_ASSERT(sc_if); switch(sc->sk_type) { case SK_GENESIS: if (ifp->if_flags & IFF_PROMISC) { SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); } else { SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); } break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: + case SK_YUKON_EC: if (ifp->if_flags & IFF_PROMISC) { SK_YU_CLRBIT_2(sc_if, YUKON_RCR, YU_RCR_UFLEN | YU_RCR_MUFLEN); } else { SK_YU_SETBIT_2(sc_if, YUKON_RCR, YU_RCR_UFLEN | YU_RCR_MUFLEN); } break; } return; } static int sk_init_rx_ring(sc_if) struct sk_if_softc *sc_if; { - struct sk_chain_data *cd = &sc_if->sk_cdata; - struct sk_ring_data *rd = sc_if->sk_rdata; + struct sk_ring_data *rd; + bus_addr_t addr; + u_int32_t csum_start; int i; - bzero((char *)rd->sk_rx_ring, - sizeof(struct sk_rx_desc) * SK_RX_RING_CNT); + sc_if->sk_cdata.sk_rx_cons = 0; + csum_start = (ETHER_HDR_LEN + sizeof(struct ip)) << 16 | + ETHER_HDR_LEN; + rd = &sc_if->sk_rdata; + bzero(rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT); for (i = 0; i < SK_RX_RING_CNT; i++) { - cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i]; - if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS) - return(ENOBUFS); - if (i == (SK_RX_RING_CNT - 1)) { - cd->sk_rx_chain[i].sk_next = - &cd->sk_rx_chain[0]; - rd->sk_rx_ring[i].sk_next = - vtophys(&rd->sk_rx_ring[0]); - } else { - cd->sk_rx_chain[i].sk_next = - &cd->sk_rx_chain[i + 1]; - rd->sk_rx_ring[i].sk_next = - vtophys(&rd->sk_rx_ring[i + 1]); - } + if (sk_newbuf(sc_if, i) != 0) + return (ENOBUFS); + if (i == (SK_RX_RING_CNT - 1)) + addr = SK_RX_RING_ADDR(sc_if, 0); + else + addr = SK_RX_RING_ADDR(sc_if, i + 1); + rd->sk_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr)); + rd->sk_rx_ring[i].sk_csum_start = htole32(csum_start); } - sc_if->sk_cdata.sk_rx_prod = 0; - sc_if->sk_cdata.sk_rx_cons = 0; + bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag, + sc_if->sk_cdata.sk_rx_ring_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return(0); } -static void -sk_init_tx_ring(sc_if) +static int +sk_init_jumbo_rx_ring(sc_if) struct sk_if_softc *sc_if; { - struct sk_chain_data *cd = &sc_if->sk_cdata; - struct sk_ring_data *rd = sc_if->sk_rdata; + struct sk_ring_data *rd; + bus_addr_t addr; + u_int32_t csum_start; int i; - bzero((char *)sc_if->sk_rdata->sk_tx_ring, - sizeof(struct sk_tx_desc) * SK_TX_RING_CNT); + sc_if->sk_cdata.sk_jumbo_rx_cons = 0; - for (i = 0; i < SK_TX_RING_CNT; i++) { - cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i]; - if (i == (SK_TX_RING_CNT - 1)) { - cd->sk_tx_chain[i].sk_next = - &cd->sk_tx_chain[0]; - rd->sk_tx_ring[i].sk_next = - vtophys(&rd->sk_tx_ring[0]); - } else { - cd->sk_tx_chain[i].sk_next = - &cd->sk_tx_chain[i + 1]; - rd->sk_tx_ring[i].sk_next = - vtophys(&rd->sk_tx_ring[i + 1]); - } + csum_start = ((ETHER_HDR_LEN + sizeof(struct ip)) << 16) | + ETHER_HDR_LEN; + rd = &sc_if->sk_rdata; + bzero(rd->sk_jumbo_rx_ring, + sizeof(struct sk_rx_desc) * SK_JUMBO_RX_RING_CNT); + for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) { + if (sk_jumbo_newbuf(sc_if, i) != 0) + return (ENOBUFS); + if (i == (SK_JUMBO_RX_RING_CNT - 1)) + addr = SK_JUMBO_RX_RING_ADDR(sc_if, 0); + else + addr = SK_JUMBO_RX_RING_ADDR(sc_if, i + 1); + rd->sk_jumbo_rx_ring[i].sk_next = htole32(SK_ADDR_LO(addr)); + rd->sk_jumbo_rx_ring[i].sk_csum_start = htole32(csum_start); } - sc_if->sk_cdata.sk_tx_prod = 0; - sc_if->sk_cdata.sk_tx_cons = 0; - sc_if->sk_cdata.sk_tx_cnt = 0; + bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, + sc_if->sk_cdata.sk_jumbo_rx_ring_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); - return; + return (0); } -static int -sk_newbuf(sc_if, c, m) +static void +sk_init_tx_ring(sc_if) struct sk_if_softc *sc_if; - struct sk_chain *c; - struct mbuf *m; { - struct mbuf *m_new = NULL; - struct sk_rx_desc *r; + struct sk_ring_data *rd; + struct sk_txdesc *txd; + bus_addr_t addr; + int i; - if (m == NULL) { - caddr_t *buf = NULL; + STAILQ_INIT(&sc_if->sk_cdata.sk_txfreeq); + STAILQ_INIT(&sc_if->sk_cdata.sk_txbusyq); - MGETHDR(m_new, M_DONTWAIT, MT_DATA); - if (m_new == NULL) - return(ENOBUFS); + sc_if->sk_cdata.sk_tx_prod = 0; + sc_if->sk_cdata.sk_tx_cons = 0; + sc_if->sk_cdata.sk_tx_cnt = 0; - /* Allocate the jumbo buffer */ - buf = sk_jalloc(sc_if); - if (buf == NULL) { - m_freem(m_new); -#ifdef SK_VERBOSE - printf("sk%d: jumbo allocation failed " - "-- packet dropped!\n", sc_if->sk_unit); -#endif - return(ENOBUFS); - } - - /* Attach the buffer to the mbuf */ - MEXTADD(m_new, buf, SK_JLEN, sk_jfree, - (struct sk_if_softc *)sc_if, 0, EXT_NET_DRV); - m_new->m_data = (void *)buf; - m_new->m_pkthdr.len = m_new->m_len = SK_JLEN; - } else { - /* - * We're re-using a previously allocated mbuf; - * be sure to re-init pointers and lengths to - * default values. - */ - m_new = m; - m_new->m_len = m_new->m_pkthdr.len = SK_JLEN; - m_new->m_data = m_new->m_ext.ext_buf; + rd = &sc_if->sk_rdata; + bzero(rd->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT); + for (i = 0; i < SK_TX_RING_CNT; i++) { + if (i == (SK_TX_RING_CNT - 1)) + addr = SK_TX_RING_ADDR(sc_if, 0); + else + addr = SK_TX_RING_ADDR(sc_if, i + 1); + rd->sk_tx_ring[i].sk_next = htole32(SK_ADDR_LO(addr)); + txd = &sc_if->sk_cdata.sk_txdesc[i]; + STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q); } - /* - * Adjust alignment so packet payload begins on a - * longword boundary. Mandatory for Alpha, useful on - * x86 too. - */ - m_adj(m_new, ETHER_ALIGN); - - r = c->sk_desc; - c->sk_mbuf = m_new; - r->sk_data_lo = vtophys(mtod(m_new, caddr_t)); - r->sk_ctl = m_new->m_len | SK_RXSTAT; - - return(0); + bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag, + sc_if->sk_cdata.sk_tx_ring_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } -/* - * Allocate jumbo buffer storage. The SysKonnect adapters support - * "jumbograms" (9K frames), although SysKonnect doesn't currently - * use them in their drivers. In order for us to use them, we need - * large 9K receive buffers, however standard mbuf clusters are only - * 2048 bytes in size. Consequently, we need to allocate and manage - * our own jumbo buffer pool. Fortunately, this does not require an - * excessive amount of additional code. - */ -static int -sk_alloc_jumbo_mem(sc_if) +static __inline void +sk_discard_rxbuf(sc_if, idx) struct sk_if_softc *sc_if; + int idx; { - caddr_t ptr; - register int i; - struct sk_jpool_entry *entry; + struct sk_rx_desc *r; + struct sk_rxdesc *rxd; + struct mbuf *m; - /* Grab a big chunk o' storage. */ - sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF, - M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); - if (sc_if->sk_cdata.sk_jumbo_buf == NULL) { - printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit); - return(ENOBUFS); - } + r = &sc_if->sk_rdata.sk_rx_ring[idx]; + rxd = &sc_if->sk_cdata.sk_rxdesc[idx]; + m = rxd->rx_m; + r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM); +} - mtx_init(&sc_if->sk_jlist_mtx, "sk_jlist_mtx", NULL, MTX_DEF); +static __inline void +sk_discard_jumbo_rxbuf(sc_if, idx) + struct sk_if_softc *sc_if; + int idx; +{ + struct sk_rx_desc *r; + struct sk_rxdesc *rxd; + struct mbuf *m; - SLIST_INIT(&sc_if->sk_jfree_listhead); - SLIST_INIT(&sc_if->sk_jinuse_listhead); - - /* - * Now divide it up into 9K pieces and save the addresses - * in an array. - */ - ptr = sc_if->sk_cdata.sk_jumbo_buf; - for (i = 0; i < SK_JSLOTS; i++) { - sc_if->sk_cdata.sk_jslots[i] = ptr; - ptr += SK_JLEN; - entry = malloc(sizeof(struct sk_jpool_entry), - M_DEVBUF, M_NOWAIT); - if (entry == NULL) { - sk_free_jumbo_mem(sc_if); - sc_if->sk_cdata.sk_jumbo_buf = NULL; - printf("sk%d: no memory for jumbo " - "buffer queue!\n", sc_if->sk_unit); - return(ENOBUFS); - } - entry->slot = i; - SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, - entry, jpool_entries); - } - - return(0); + r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx]; + rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx]; + m = rxd->rx_m; + r->sk_ctl = htole32(m->m_len | SK_RXSTAT | SK_OPCODE_CSUM); } -static void -sk_free_jumbo_mem(sc_if) +static int +sk_newbuf(sc_if, idx) struct sk_if_softc *sc_if; + int idx; { - struct sk_jpool_entry *entry; + struct sk_rx_desc *r; + struct sk_rxdesc *rxd; + struct mbuf *m; + bus_dma_segment_t segs[1]; + bus_dmamap_t map; + int nsegs; - SK_JLIST_LOCK(sc_if); + m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); + if (m == NULL) + return (ENOBUFS); + m->m_len = m->m_pkthdr.len = MCLBYTES; + m_adj(m, ETHER_ALIGN); - /* We cannot release external mbuf storage while in use. */ - if (!SLIST_EMPTY(&sc_if->sk_jinuse_listhead)) { - printf("sk%d: will leak jumbo buffer memory!\n", sc_if->sk_unit); - SK_JLIST_UNLOCK(sc_if); - return; + if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_rx_tag, + sc_if->sk_cdata.sk_rx_sparemap, m, segs, &nsegs, 0) != 0) { + m_freem(m); + return (ENOBUFS); } + KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); - while (!SLIST_EMPTY(&sc_if->sk_jfree_listhead)) { - entry = SLIST_FIRST(&sc_if->sk_jfree_listhead); - SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries); - free(entry, M_DEVBUF); + rxd = &sc_if->sk_cdata.sk_rxdesc[idx]; + if (rxd->rx_m != NULL) { + bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap); } + map = rxd->rx_dmamap; + rxd->rx_dmamap = sc_if->sk_cdata.sk_rx_sparemap; + sc_if->sk_cdata.sk_rx_sparemap = map; + bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, rxd->rx_dmamap, + BUS_DMASYNC_PREREAD); + rxd->rx_m = m; + r = &sc_if->sk_rdata.sk_rx_ring[idx]; + r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr)); + r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr)); + r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM); - SK_JLIST_UNLOCK(sc_if); - - mtx_destroy(&sc_if->sk_jlist_mtx); - - contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF); - - return; + return (0); } -/* - * Allocate a jumbo buffer. - */ -static void * -sk_jalloc(sc_if) +static int +sk_jumbo_newbuf(sc_if, idx) struct sk_if_softc *sc_if; + int idx; { - struct sk_jpool_entry *entry; + struct sk_rx_desc *r; + struct sk_rxdesc *rxd; + struct mbuf *m; + bus_dma_segment_t segs[1]; + bus_dmamap_t map; + int nsegs; + void *buf; - SK_JLIST_LOCK(sc_if); + MGETHDR(m, M_DONTWAIT, MT_DATA); + if (m == NULL) + return (ENOBUFS); + buf = sk_jalloc(sc_if); + if (buf == NULL) { + m_freem(m); + return (ENOBUFS); + } + /* Attach the buffer to the mbuf */ + MEXTADD(m, buf, SK_JLEN, sk_jfree, (struct sk_if_softc *)sc_if, 0, + EXT_NET_DRV); + if ((m->m_flags & M_EXT) == 0) { + m_freem(m); + return (ENOBUFS); + } + m->m_pkthdr.len = m->m_len = SK_JLEN; + /* + * Adjust alignment so packet payload begins on a + * longword boundary. Mandatory for Alpha, useful on + * x86 too. + */ + m_adj(m, ETHER_ALIGN); - entry = SLIST_FIRST(&sc_if->sk_jfree_listhead); + if (bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_jumbo_rx_tag, + sc_if->sk_cdata.sk_jumbo_rx_sparemap, m, segs, &nsegs, 0) != 0) { + m_freem(m); + return (ENOBUFS); + } + KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); - if (entry == NULL) { -#ifdef SK_VERBOSE - printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit); -#endif - SK_JLIST_UNLOCK(sc_if); - return(NULL); + rxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[idx]; + if (rxd->rx_m != NULL) { + bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag, + rxd->rx_dmamap); } + map = rxd->rx_dmamap; + rxd->rx_dmamap = sc_if->sk_cdata.sk_jumbo_rx_sparemap; + sc_if->sk_cdata.sk_jumbo_rx_sparemap = map; + bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, rxd->rx_dmamap, + BUS_DMASYNC_PREREAD); + rxd->rx_m = m; + r = &sc_if->sk_rdata.sk_jumbo_rx_ring[idx]; + r->sk_data_lo = htole32(SK_ADDR_LO(segs[0].ds_addr)); + r->sk_data_hi = htole32(SK_ADDR_HI(segs[0].ds_addr)); + r->sk_ctl = htole32(segs[0].ds_len | SK_RXSTAT | SK_OPCODE_CSUM); - SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries); - SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries); - - SK_JLIST_UNLOCK(sc_if); - - return(sc_if->sk_cdata.sk_jslots[entry->slot]); + return (0); } /* - * Release a jumbo buffer. - */ -static void -sk_jfree(buf, args) - void *buf; - void *args; -{ - struct sk_if_softc *sc_if; - int i; - struct sk_jpool_entry *entry; - - /* Extract the softc struct pointer. */ - sc_if = (struct sk_if_softc *)args; - if (sc_if == NULL) - panic("sk_jfree: didn't get softc pointer!"); - - SK_JLIST_LOCK(sc_if); - - /* calculate the slot this buffer belongs to */ - i = ((vm_offset_t)buf - - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN; - - if ((i < 0) || (i >= SK_JSLOTS)) - panic("sk_jfree: asked to free buffer that we don't manage!"); - - entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead); - if (entry == NULL) - panic("sk_jfree: buffer not in use!"); - entry->slot = i; - SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries); - SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries); - if (SLIST_EMPTY(&sc_if->sk_jinuse_listhead)) - wakeup(sc_if); - - SK_JLIST_UNLOCK(sc_if); - return; -} - -/* * Set media options. */ static int sk_ifmedia_upd(ifp) struct ifnet *ifp; { struct sk_if_softc *sc_if = ifp->if_softc; struct mii_data *mii; mii = device_get_softc(sc_if->sk_miibus); sk_init(sc_if); mii_mediachg(mii); return(0); } /* * Report current media status. */ static void sk_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct sk_if_softc *sc_if; struct mii_data *mii; sc_if = ifp->if_softc; mii = device_get_softc(sc_if->sk_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static int sk_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct sk_if_softc *sc_if = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; - int error = 0; + int error, mask; struct mii_data *mii; + error = 0; switch(command) { case SIOCSIFMTU: SK_IF_LOCK(sc_if); if (ifr->ifr_mtu > SK_JUMBO_MTU) error = EINVAL; else { ifp->if_mtu = ifr->ifr_mtu; ifp->if_drv_flags &= ~IFF_DRV_RUNNING; sk_init_locked(sc_if); } SK_IF_UNLOCK(sc_if); break; case SIOCSIFFLAGS: SK_IF_LOCK(sc_if); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if ((ifp->if_flags ^ sc_if->sk_if_flags) & IFF_PROMISC) { sk_setpromisc(sc_if); sk_setmulti(sc_if); } } else sk_init_locked(sc_if); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) sk_stop(sc_if); } sc_if->sk_if_flags = ifp->if_flags; SK_IF_UNLOCK(sc_if); - error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: SK_IF_LOCK(sc_if); - if (ifp->if_drv_flags & IFF_DRV_RUNNING) { + if (ifp->if_drv_flags & IFF_DRV_RUNNING) sk_setmulti(sc_if); - error = 0; - } SK_IF_UNLOCK(sc_if); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc_if->sk_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; + case SIOCSIFCAP: + SK_IF_LOCK(sc_if); + if (sc_if->sk_softc->sk_type == SK_GENESIS) { + SK_IF_UNLOCK(sc_if); + break; + } + mask = ifr->ifr_reqcap ^ ifp->if_capenable; + if (mask & IFCAP_HWCSUM) { + ifp->if_capenable ^= IFCAP_HWCSUM; + if (IFCAP_HWCSUM & ifp->if_capenable && + IFCAP_HWCSUM & ifp->if_capabilities) + ifp->if_hwassist = SK_CSUM_FEATURES; + else + ifp->if_hwassist = 0; + } + SK_IF_UNLOCK(sc_if); + break; default: error = ether_ioctl(ifp, command, data); break; } - return(error); + return (error); } /* * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int skc_probe(dev) device_t dev; { struct sk_type *t = sk_devs; while(t->sk_name != NULL) { if ((pci_get_vendor(dev) == t->sk_vid) && (pci_get_device(dev) == t->sk_did)) { /* * Only attach to rev. 2 of the Linksys EG1032 adapter. * Rev. 3 is supported by re(4). */ if ((t->sk_vid == VENDORID_LINKSYS) && (t->sk_did == DEVICEID_LINKSYS_EG1032) && (pci_get_subdevice(dev) != SUBDEVICEID_LINKSYS_EG1032_REV2)) { t++; continue; } device_set_desc(dev, t->sk_name); return (BUS_PROBE_DEFAULT); } t++; } return(ENXIO); } /* * Force the GEnesis into reset, then bring it out of reset. */ static void sk_reset(sc) struct sk_softc *sc; { + CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET); CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET); if (SK_YUKON_FAMILY(sc->sk_type)) CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET); DELAY(1000); CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET); DELAY(2); CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET); if (SK_YUKON_FAMILY(sc->sk_type)) CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR); if (sc->sk_type == SK_GENESIS) { /* Configure packet arbiter */ sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET); sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT); sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT); sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT); sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT); } /* Enable RAM interface */ sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET); /* * Configure interrupt moderation. The moderation timer * defers interrupts specified in the interrupt moderation * timer mask based on the timeout specified in the interrupt * moderation timer init register. Each bit in the timer * register represents one tick, so to specify a timeout in * microseconds, we have to multiply by the correct number of * ticks-per-microsecond. */ switch (sc->sk_type) { case SK_GENESIS: sc->sk_int_ticks = SK_IMTIMER_TICKS_GENESIS; break; + case SK_YUKON_EC: + sc->sk_int_ticks = SK_IMTIMER_TICKS_YUKON_EC; + break; default: sc->sk_int_ticks = SK_IMTIMER_TICKS_YUKON; break; } if (bootverbose) - printf("skc%d: interrupt moderation is %d us\n", - sc->sk_unit, sc->sk_int_mod); + device_printf(sc->sk_dev, "interrupt moderation is %d us\n", + sc->sk_int_mod); sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)); sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF| SK_ISR_RX1_EOF|SK_ISR_RX2_EOF); sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START); return; } static int sk_probe(dev) device_t dev; { struct sk_softc *sc; sc = device_get_softc(device_get_parent(dev)); /* * Not much to do here. We always know there will be * at least one XMAC present, and if there are two, * skc_attach() will create a second device instance * for us. */ switch (sc->sk_type) { case SK_GENESIS: device_set_desc(dev, "XaQti Corp. XMAC II"); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: + case SK_YUKON_EC: device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon"); break; } return (BUS_PROBE_DEFAULT); } /* * Each XMAC chip is attached as a separate logical IP interface. * Single port cards will have only one logical interface of course. */ static int sk_attach(dev) device_t dev; { struct sk_softc *sc; struct sk_if_softc *sc_if; struct ifnet *ifp; int i, port, error; u_char eaddr[6]; if (dev == NULL) return(EINVAL); error = 0; sc_if = device_get_softc(dev); sc = device_get_softc(device_get_parent(dev)); port = *(int *)device_get_ivars(dev); - sc_if->sk_dev = dev; - sc_if->sk_unit = device_get_unit(dev); + sc_if->sk_if_dev = dev; sc_if->sk_port = port; sc_if->sk_softc = sc; sc->sk_if[port] = sc_if; if (port == SK_PORT_A) sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0; if (port == SK_PORT_B) sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1; - /* Allocate the descriptor queues. */ - sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF, - M_NOWAIT, M_ZERO, 0xffffffff, PAGE_SIZE, 0); + callout_init_mtx(&sc_if->sk_tick_ch, &sc_if->sk_softc->sk_mtx, 0); - if (sc_if->sk_rdata == NULL) { - printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit); + if (sk_dma_alloc(sc_if) != 0) { error = ENOMEM; goto fail; } - /* Try to allocate memory for jumbo buffers. */ - if (sk_alloc_jumbo_mem(sc_if)) { - printf("sk%d: jumbo buffer allocation failed\n", - sc_if->sk_unit); - error = ENOMEM; - goto fail; - } - ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { - printf("sk%d: can not if_alloc()\n", sc_if->sk_unit); + device_printf(sc_if->sk_if_dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } ifp->if_softc = sc_if; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; /* - * The hardware should be ready for VLAN_MTU by default: - * XMAC II has 0x8100 in VLAN Tag Level 1 register initially; - * YU_SMR_MFL_VLAN is set by this driver in Yukon. + * SK_GENESIS has a bug in checksum offload - From linux. */ - ifp->if_capabilities = ifp->if_capenable = IFCAP_VLAN_MTU; + if (sc_if->sk_softc->sk_type != SK_GENESIS) { + ifp->if_capabilities = IFCAP_HWCSUM; + ifp->if_hwassist = SK_CSUM_FEATURES; + } else { + ifp->if_capabilities = 0; + ifp->if_hwassist = 0; + } + ifp->if_capenable = ifp->if_capabilities; ifp->if_ioctl = sk_ioctl; ifp->if_start = sk_start; ifp->if_watchdog = sk_watchdog; ifp->if_init = sk_init; IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1); ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1; IFQ_SET_READY(&ifp->if_snd); - callout_handle_init(&sc_if->sk_tick_ch); - /* * Get station address for this interface. Note that * dual port cards actually come with three station * addresses: one for each port, plus an extra. The * extra one is used by the SysKonnect driver software * as a 'virtual' station address for when both ports * are operating in failover mode. Currently we don't * use this extra address. */ - SK_LOCK(sc); + SK_IF_LOCK(sc_if); for (i = 0; i < ETHER_ADDR_LEN; i++) eaddr[i] = sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i); /* * Set up RAM buffer addresses. The NIC will have a certain * amount of SRAM on it, somewhere between 512K and 2MB. We * need to divide this up a) between the transmitter and * receiver and b) between the two XMACs, if this is a * dual port NIC. Our algotithm is to divide up the memory * evenly so that everyone gets a fair share. + * + * Just to be contrary, Yukon2 appears to have separate memory + * for each MAC. */ - if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) { + if (SK_IS_YUKON2(sc) || + sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) { u_int32_t chunk, val; chunk = sc->sk_ramsize / 2; val = sc->sk_rboff / sizeof(u_int64_t); sc_if->sk_rx_ramstart = val; val += (chunk / sizeof(u_int64_t)); sc_if->sk_rx_ramend = val - 1; sc_if->sk_tx_ramstart = val; val += (chunk / sizeof(u_int64_t)); sc_if->sk_tx_ramend = val - 1; } else { u_int32_t chunk, val; chunk = sc->sk_ramsize / 4; val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) / sizeof(u_int64_t); sc_if->sk_rx_ramstart = val; val += (chunk / sizeof(u_int64_t)); sc_if->sk_rx_ramend = val - 1; sc_if->sk_tx_ramstart = val; val += (chunk / sizeof(u_int64_t)); sc_if->sk_tx_ramend = val - 1; } /* Read and save PHY type and set PHY address */ sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF; - switch(sc_if->sk_phytype) { - case SK_PHYTYPE_XMAC: - sc_if->sk_phyaddr = SK_PHYADDR_XMAC; - break; - case SK_PHYTYPE_BCOM: - sc_if->sk_phyaddr = SK_PHYADDR_BCOM; - break; - case SK_PHYTYPE_MARV_COPPER: + if (!SK_YUKON_FAMILY(sc->sk_type)) { + switch(sc_if->sk_phytype) { + case SK_PHYTYPE_XMAC: + sc_if->sk_phyaddr = SK_PHYADDR_XMAC; + break; + case SK_PHYTYPE_BCOM: + sc_if->sk_phyaddr = SK_PHYADDR_BCOM; + break; + default: + device_printf(sc->sk_dev, "unsupported PHY type: %d\n", + sc_if->sk_phytype); + error = ENODEV; + SK_IF_UNLOCK(sc_if); + goto fail; + } + } else { + if (sc_if->sk_phytype < SK_PHYTYPE_MARV_COPPER && + sc->sk_pmd != 'S') { + /* not initialized, punt */ + sc_if->sk_phytype = SK_PHYTYPE_MARV_COPPER; + sc->sk_coppertype = 1; + } + sc_if->sk_phyaddr = SK_PHYADDR_MARV; - break; - default: - printf("skc%d: unsupported PHY type: %d\n", - sc->sk_unit, sc_if->sk_phytype); - error = ENODEV; - SK_UNLOCK(sc); - goto fail; + + if (!(sc->sk_coppertype)) + sc_if->sk_phytype = SK_PHYTYPE_MARV_FIBER; } - /* * Call MI attach routine. Can't hold locks when calling into ether_*. */ - SK_UNLOCK(sc); + SK_IF_UNLOCK(sc_if); ether_ifattach(ifp, eaddr); - SK_LOCK(sc); + SK_IF_LOCK(sc_if); /* + * The hardware should be ready for VLAN_MTU by default: + * XMAC II has 0x8100 in VLAN Tag Level 1 register initially; + * YU_SMR_MFL_VLAN is set by this driver in Yukon. + * + */ + ifp->if_capabilities |= IFCAP_VLAN_MTU; + ifp->if_capenable |= IFCAP_VLAN_MTU; + /* + * Tell the upper layer(s) we support long frames. + * Must appear after the call to ether_ifattach() because + * ether_ifattach() sets ifi_hdrlen to the default value. + */ + ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); + + /* * Do miibus setup. */ switch (sc->sk_type) { case SK_GENESIS: sk_init_xmac(sc_if); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: + case SK_YUKON_EC: sk_init_yukon(sc_if); break; } - SK_UNLOCK(sc); + SK_IF_UNLOCK(sc_if); if (mii_phy_probe(dev, &sc_if->sk_miibus, sk_ifmedia_upd, sk_ifmedia_sts)) { - printf("skc%d: no PHY found!\n", sc_if->sk_unit); + device_printf(sc_if->sk_if_dev, "no PHY found!\n"); ether_ifdetach(ifp); error = ENXIO; goto fail; } fail: if (error) { /* Access should be ok even though lock has been dropped */ sc->sk_if[port] = NULL; sk_detach(dev); } return(error); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int skc_attach(dev) device_t dev; { struct sk_softc *sc; - int unit, error = 0, rid, *port; + int error = 0, rid, *port, sk_macs; uint8_t skrs; char *pname, *revstr; sc = device_get_softc(dev); - unit = device_get_unit(dev); + sc->sk_dev = dev; mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, - MTX_DEF | MTX_RECURSE); + MTX_DEF); + mtx_init(&sc->sk_mii_mtx, "sk_mii_mutex", NULL, MTX_DEF); /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = SK_RID; sc->sk_res = bus_alloc_resource_any(dev, SK_RES, &rid, RF_ACTIVE); if (sc->sk_res == NULL) { - printf("sk%d: couldn't map ports/memory\n", unit); + device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } sc->sk_btag = rman_get_bustag(sc->sk_res); sc->sk_bhandle = rman_get_bushandle(sc->sk_res); sc->sk_type = sk_win_read_1(sc, SK_CHIPVER); sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf; /* Bail out if chip is not recognized. */ if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) { - printf("skc%d: unknown device: chipver=%02x, rev=%x\n", - unit, sc->sk_type, sc->sk_rev); + device_printf(dev, "unknown device: chipver=%02x, rev=%x\n", + sc->sk_type, sc->sk_rev); error = ENXIO; goto fail; } /* Allocate interrupt */ rid = 0; sc->sk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->sk_irq == NULL) { - printf("skc%d: couldn't map interrupt\n", unit); + device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "int_mod", CTLTYPE_INT|CTLFLAG_RW, &sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I", "SK interrupt moderation"); /* Pull in device tunables. */ sc->sk_int_mod = SK_IM_DEFAULT; - error = resource_int_value(device_get_name(dev), unit, + error = resource_int_value(device_get_name(dev), device_get_unit(dev), "int_mod", &sc->sk_int_mod); if (error == 0) { if (sc->sk_int_mod < SK_IM_MIN || sc->sk_int_mod > SK_IM_MAX) { - printf("skc%d: int_mod value out of range; " - "using default: %d\n", unit, SK_IM_DEFAULT); + device_printf(dev, "int_mod value out of range; " + "using default: %d\n", SK_IM_DEFAULT); sc->sk_int_mod = SK_IM_DEFAULT; } } /* Reset the adapter. */ sk_reset(sc); - sc->sk_unit = unit; - /* Read and save vital product data from EEPROM. */ sk_vpd_read(sc); skrs = sk_win_read_1(sc, SK_EPROM0); if (sc->sk_type == SK_GENESIS) { /* Read and save RAM size and RAMbuffer offset */ switch(skrs) { case SK_RAMSIZE_512K_64: sc->sk_ramsize = 0x80000; sc->sk_rboff = SK_RBOFF_0; break; case SK_RAMSIZE_1024K_64: sc->sk_ramsize = 0x100000; sc->sk_rboff = SK_RBOFF_80000; break; case SK_RAMSIZE_1024K_128: sc->sk_ramsize = 0x100000; sc->sk_rboff = SK_RBOFF_0; break; case SK_RAMSIZE_2048K_128: sc->sk_ramsize = 0x200000; sc->sk_rboff = SK_RBOFF_0; break; default: - printf("skc%d: unknown ram size: %d\n", - sc->sk_unit, skrs); + device_printf(dev, "unknown ram size: %d\n", skrs); error = ENXIO; goto fail; } } else { /* SK_YUKON_FAMILY */ if (skrs == 0x00) sc->sk_ramsize = 0x20000; else sc->sk_ramsize = skrs * (1<<12); sc->sk_rboff = SK_RBOFF_0; } /* Read and save physical media type */ - switch(sk_win_read_1(sc, SK_PMDTYPE)) { - case SK_PMD_1000BASESX: - sc->sk_pmd = IFM_1000_SX; - break; - case SK_PMD_1000BASELX: - sc->sk_pmd = IFM_1000_LX; - break; - case SK_PMD_1000BASECX: - sc->sk_pmd = IFM_1000_CX; - break; - case SK_PMD_1000BASETX: - sc->sk_pmd = IFM_1000_T; - break; - default: - printf("skc%d: unknown media type: 0x%x\n", - sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE)); - error = ENXIO; - goto fail; - } + sc->sk_pmd = sk_win_read_1(sc, SK_PMDTYPE); + if (sc->sk_pmd == 'T' || sc->sk_pmd == '1') + sc->sk_coppertype = 1; + else + sc->sk_coppertype = 0; + /* Determine whether to name it with VPD PN or just make it up. * Marvell Yukon VPD PN seems to freqently be bogus. */ switch (pci_get_device(dev)) { case DEVICEID_SK_V1: case DEVICEID_BELKIN_5005: case DEVICEID_3COM_3C940: case DEVICEID_LINKSYS_EG1032: case DEVICEID_DLINK_DGE530T: /* Stay with VPD PN. */ pname = sc->sk_vpd_prodname; break; case DEVICEID_SK_V2: + case DEVICEID_MRVL_4360: + case DEVICEID_MRVL_4361: + case DEVICEID_MRVL_4362: /* YUKON VPD PN might bear no resemblance to reality. */ switch (sc->sk_type) { case SK_GENESIS: /* Stay with VPD PN. */ pname = sc->sk_vpd_prodname; break; case SK_YUKON: pname = "Marvell Yukon Gigabit Ethernet"; break; case SK_YUKON_LITE: pname = "Marvell Yukon Lite Gigabit Ethernet"; break; case SK_YUKON_LP: pname = "Marvell Yukon LP Gigabit Ethernet"; break; + case SK_YUKON_EC: + pname = "Marvell Yukon-2 EC Gigabit Ethernet"; + break; default: pname = "Marvell Yukon (Unknown) Gigabit Ethernet"; break; } /* Yukon Lite Rev. A0 needs special test. */ if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) { u_int32_t far; u_int8_t testbyte; /* Save flash address register before testing. */ far = sk_win_read_4(sc, SK_EP_ADDR); sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff); testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03); if (testbyte != 0x00) { /* Yukon Lite Rev. A0 detected. */ sc->sk_type = SK_YUKON_LITE; sc->sk_rev = SK_YUKON_LITE_REV_A0; /* Restore flash address register. */ sk_win_write_4(sc, SK_EP_ADDR, far); } } break; default: device_printf(dev, "unknown device: vendor=%04x, device=%04x, " "chipver=%02x, rev=%x\n", pci_get_vendor(dev), pci_get_device(dev), sc->sk_type, sc->sk_rev); error = ENXIO; goto fail; } if (sc->sk_type == SK_YUKON_LITE) { switch (sc->sk_rev) { case SK_YUKON_LITE_REV_A0: revstr = "A0"; break; case SK_YUKON_LITE_REV_A1: revstr = "A1"; break; case SK_YUKON_LITE_REV_A3: revstr = "A3"; break; default: revstr = ""; break; } + } else if (sc->sk_type == SK_YUKON_EC) { + switch (sc->sk_rev) { + case SK_YUKON_EC_REV_A1: + revstr = "A1"; + break; + case SK_YUKON_EC_REV_A2: + revstr = "A2"; + break; + case SK_YUKON_EC_REV_A3: + revstr = "A3"; + break; + default: + revstr = ""; + break; + } } else { revstr = ""; } /* Announce the product name and more VPD data if there. */ device_printf(dev, "%s rev. %s(0x%x)\n", pname != NULL ? pname : "", revstr, sc->sk_rev); if (bootverbose) { if (sc->sk_vpd_readonly != NULL && sc->sk_vpd_readonly_len != 0) { char buf[256]; char *dp = sc->sk_vpd_readonly; uint16_t l, len = sc->sk_vpd_readonly_len; while (len >= 3) { if ((*dp == 'P' && *(dp+1) == 'N') || (*dp == 'E' && *(dp+1) == 'C') || (*dp == 'M' && *(dp+1) == 'N') || (*dp == 'S' && *(dp+1) == 'N')) { l = 0; while (l < *(dp+2)) { buf[l] = *(dp+3+l); ++l; } buf[l] = '\0'; device_printf(dev, "%c%c: %s\n", *dp, *(dp+1), buf); len -= (3 + l); dp += (3 + l); } else { len -= (3 + *(dp+2)); dp += (3 + *(dp+2)); } } } device_printf(dev, "chip ver = 0x%02x\n", sc->sk_type); device_printf(dev, "chip rev = 0x%02x\n", sc->sk_rev); device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs); device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize); } sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1); if (sc->sk_devs[SK_PORT_A] == NULL) { device_printf(dev, "failed to add child for PORT_A\n"); error = ENXIO; goto fail; } port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); if (port == NULL) { device_printf(dev, "failed to allocate memory for " "ivars of PORT_A\n"); error = ENXIO; goto fail; } *port = SK_PORT_A; device_set_ivars(sc->sk_devs[SK_PORT_A], port); - if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) { + sk_macs = 1; + + if (SK_IS_YUKON2(sc)) { + u_int8_t hw; + + hw = sk_win_read_1(sc, SK_Y2_HWRES); + if ((hw & SK_Y2_HWRES_LINK_MASK) == SK_Y2_HWRES_LINK_DUAL) { + if ((sk_win_read_1(sc, SK_Y2_CLKGATE) & + SK_Y2_CLKGATE_LINK2_INACTIVE) == 0) + sk_macs++; + } + } else { + if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) + sk_macs++; + } + + if (sk_macs > 1) { sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1); if (sc->sk_devs[SK_PORT_B] == NULL) { device_printf(dev, "failed to add child for PORT_B\n"); error = ENXIO; goto fail; } port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); if (port == NULL) { device_printf(dev, "failed to allocate memory for " "ivars of PORT_B\n"); error = ENXIO; goto fail; } *port = SK_PORT_B; device_set_ivars(sc->sk_devs[SK_PORT_B], port); } /* Turn on the 'driver is loaded' LED. */ CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON); error = bus_generic_attach(dev); if (error) { device_printf(dev, "failed to attach port(s)\n"); goto fail; } /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET|INTR_MPSAFE, sk_intr, sc, &sc->sk_intrhand); if (error) { - printf("skc%d: couldn't set up irq\n", unit); + device_printf(dev, "couldn't set up irq\n"); goto fail; } fail: if (error) skc_detach(dev); return(error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int sk_detach(dev) device_t dev; { struct sk_if_softc *sc_if; struct ifnet *ifp; sc_if = device_get_softc(dev); KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx), ("sk mutex not initialized in sk_detach")); SK_IF_LOCK(sc_if); ifp = sc_if->sk_ifp; /* These should only be active if attach_xmac succeeded */ if (device_is_attached(dev)) { sk_stop(sc_if); /* Can't hold locks while calling detach */ SK_IF_UNLOCK(sc_if); + callout_drain(&sc_if->sk_tick_ch); ether_ifdetach(ifp); SK_IF_LOCK(sc_if); } if (ifp) if_free(ifp); /* * We're generally called from skc_detach() which is using * device_delete_child() to get to here. It's already trashed * miibus for us, so don't do it here or we'll panic. */ /* if (sc_if->sk_miibus != NULL) device_delete_child(dev, sc_if->sk_miibus); */ bus_generic_detach(dev); - if (sc_if->sk_cdata.sk_jumbo_buf != NULL) - sk_free_jumbo_mem(sc_if); - if (sc_if->sk_rdata != NULL) { - contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data), - M_DEVBUF); - } + sk_dma_free(sc_if); SK_IF_UNLOCK(sc_if); return(0); } static int skc_detach(dev) device_t dev; { struct sk_softc *sc; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized")); if (device_is_alive(dev)) { if (sc->sk_devs[SK_PORT_A] != NULL) { free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF); device_delete_child(dev, sc->sk_devs[SK_PORT_A]); } if (sc->sk_devs[SK_PORT_B] != NULL) { free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF); device_delete_child(dev, sc->sk_devs[SK_PORT_B]); } bus_generic_detach(dev); } if (sc->sk_vpd_prodname != NULL) free(sc->sk_vpd_prodname, M_DEVBUF); if (sc->sk_vpd_readonly != NULL) free(sc->sk_vpd_readonly, M_DEVBUF); if (sc->sk_intrhand) bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand); if (sc->sk_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq); if (sc->sk_res) bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res); + mtx_destroy(&sc->sk_mii_mtx); mtx_destroy(&sc->sk_mtx); return(0); } +struct sk_dmamap_arg { + bus_addr_t sk_busaddr; +}; + +static void +sk_dmamap_cb(arg, segs, nseg, error) + void *arg; + bus_dma_segment_t *segs; + int nseg; + int error; +{ + struct sk_dmamap_arg *ctx; + + if (error != 0) + return; + + ctx = arg; + ctx->sk_busaddr = segs[0].ds_addr; +} + +/* + * Allocate jumbo buffer storage. The SysKonnect adapters support + * "jumbograms" (9K frames), although SysKonnect doesn't currently + * use them in their drivers. In order for us to use them, we need + * large 9K receive buffers, however standard mbuf clusters are only + * 2048 bytes in size. Consequently, we need to allocate and manage + * our own jumbo buffer pool. Fortunately, this does not require an + * excessive amount of additional code. + */ static int -sk_encap(sc_if, m_head, txidx) +sk_dma_alloc(sc_if) + struct sk_if_softc *sc_if; +{ + struct sk_dmamap_arg ctx; + struct sk_txdesc *txd; + struct sk_rxdesc *rxd; + struct sk_rxdesc *jrxd; + u_int8_t *ptr; + struct sk_jpool_entry *entry; + int error, i; + + mtx_init(&sc_if->sk_jlist_mtx, "sk_jlist_mtx", NULL, MTX_DEF); + SLIST_INIT(&sc_if->sk_jfree_listhead); + SLIST_INIT(&sc_if->sk_jinuse_listhead); + + /* create parent tag */ + /* + * XXX + * This driver should use BUS_SPACE_MAXADDR for lowaddr argument + * in bus_dma_tag_create(9) as the NIC would support DAC mode. + * However bz@ reported that it does not work on amd64 with > 4GB + * RAM. Until we have more clues of the breakage, disable DAC mode + * by limiting DMA address to be in 32bit address space. + */ + error = bus_dma_tag_create(NULL, /* parent */ + 1, 0, /* algnmnt, boundary */ + BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ + 0, /* nsegments */ + BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc_if->sk_cdata.sk_parent_tag); + if (error != 0) { + device_printf(sc_if->sk_if_dev, + "failed to create parent DMA tag\n"); + goto fail; + } + /* create tag for Tx ring */ + error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ + SK_RING_ALIGN, 0, /* algnmnt, boundary */ + BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + SK_TX_RING_SZ, /* maxsize */ + 1, /* nsegments */ + SK_TX_RING_SZ, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc_if->sk_cdata.sk_tx_ring_tag); + if (error != 0) { + device_printf(sc_if->sk_if_dev, + "failed to allocate Tx ring DMA tag\n"); + goto fail; + } + + /* create tag for Rx ring */ + error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ + SK_RING_ALIGN, 0, /* algnmnt, boundary */ + BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + SK_RX_RING_SZ, /* maxsize */ + 1, /* nsegments */ + SK_RX_RING_SZ, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc_if->sk_cdata.sk_rx_ring_tag); + if (error != 0) { + device_printf(sc_if->sk_if_dev, + "failed to allocate Rx ring DMA tag\n"); + goto fail; + } + + /* create tag for jumbo Rx ring */ + error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ + SK_RING_ALIGN, 0, /* algnmnt, boundary */ + BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + SK_JUMBO_RX_RING_SZ, /* maxsize */ + 1, /* nsegments */ + SK_JUMBO_RX_RING_SZ, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc_if->sk_cdata.sk_jumbo_rx_ring_tag); + if (error != 0) { + device_printf(sc_if->sk_if_dev, + "failed to allocate jumbo Rx ring DMA tag\n"); + goto fail; + } + + /* create tag for jumbo buffer blocks */ + error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ + PAGE_SIZE, 0, /* algnmnt, boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + SK_JMEM, /* maxsize */ + 1, /* nsegments */ + SK_JMEM, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc_if->sk_cdata.sk_jumbo_tag); + if (error != 0) { + device_printf(sc_if->sk_if_dev, + "failed to allocate jumbo Rx buffer block DMA tag\n"); + goto fail; + } + + /* create tag for Tx buffers */ + error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ + 1, 0, /* algnmnt, boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + MCLBYTES * SK_MAXTXSEGS, /* maxsize */ + SK_MAXTXSEGS, /* nsegments */ + MCLBYTES, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc_if->sk_cdata.sk_tx_tag); + if (error != 0) { + device_printf(sc_if->sk_if_dev, + "failed to allocate Tx DMA tag\n"); + goto fail; + } + + /* create tag for Rx buffers */ + error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ + 1, 0, /* algnmnt, boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + MCLBYTES, /* maxsize */ + 1, /* nsegments */ + MCLBYTES, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc_if->sk_cdata.sk_rx_tag); + if (error != 0) { + device_printf(sc_if->sk_if_dev, + "failed to allocate Rx DMA tag\n"); + goto fail; + } + + /* create tag for jumbo Rx buffers */ + error = bus_dma_tag_create(sc_if->sk_cdata.sk_parent_tag,/* parent */ + PAGE_SIZE, 0, /* algnmnt, boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + MCLBYTES * SK_MAXRXSEGS, /* maxsize */ + SK_MAXRXSEGS, /* nsegments */ + SK_JLEN, /* maxsegsize */ + 0, /* flags */ + NULL, NULL, /* lockfunc, lockarg */ + &sc_if->sk_cdata.sk_jumbo_rx_tag); + if (error != 0) { + device_printf(sc_if->sk_if_dev, + "failed to allocate jumbo Rx DMA tag\n"); + goto fail; + } + + /* allocate DMA'able memory and load the DMA map for Tx ring */ + error = bus_dmamem_alloc(sc_if->sk_cdata.sk_tx_ring_tag, + (void **)&sc_if->sk_rdata.sk_tx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO, + &sc_if->sk_cdata.sk_tx_ring_map); + if (error != 0) { + device_printf(sc_if->sk_if_dev, + "failed to allocate DMA'able memory for Tx ring\n"); + goto fail; + } + + ctx.sk_busaddr = 0; + error = bus_dmamap_load(sc_if->sk_cdata.sk_tx_ring_tag, + sc_if->sk_cdata.sk_tx_ring_map, sc_if->sk_rdata.sk_tx_ring, + SK_TX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT); + if (error != 0) { + device_printf(sc_if->sk_if_dev, + "failed to load DMA'able memory for Tx ring\n"); + goto fail; + } + sc_if->sk_rdata.sk_tx_ring_paddr = ctx.sk_busaddr; + + /* allocate DMA'able memory and load the DMA map for Rx ring */ + error = bus_dmamem_alloc(sc_if->sk_cdata.sk_rx_ring_tag, + (void **)&sc_if->sk_rdata.sk_rx_ring, BUS_DMA_NOWAIT | BUS_DMA_ZERO, + &sc_if->sk_cdata.sk_rx_ring_map); + if (error != 0) { + device_printf(sc_if->sk_if_dev, + "failed to allocate DMA'able memory for Rx ring\n"); + goto fail; + } + + ctx.sk_busaddr = 0; + error = bus_dmamap_load(sc_if->sk_cdata.sk_rx_ring_tag, + sc_if->sk_cdata.sk_rx_ring_map, sc_if->sk_rdata.sk_rx_ring, + SK_RX_RING_SZ, sk_dmamap_cb, &ctx, BUS_DMA_NOWAIT); + if (error != 0) { + device_printf(sc_if->sk_if_dev, + "failed to load DMA'able memory for Rx ring\n"); + goto fail; + } + sc_if->sk_rdata.sk_rx_ring_paddr = ctx.sk_busaddr; + + /* allocate DMA'able memory and load the DMA map for jumbo Rx ring */ + error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, + (void **)&sc_if->sk_rdata.sk_jumbo_rx_ring, + BUS_DMA_NOWAIT|BUS_DMA_ZERO, &sc_if->sk_cdata.sk_jumbo_rx_ring_map); + if (error != 0) { + device_printf(sc_if->sk_if_dev, + "failed to allocate DMA'able memory for jumbo Rx ring\n"); + goto fail; + } + + ctx.sk_busaddr = 0; + error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, + sc_if->sk_cdata.sk_jumbo_rx_ring_map, + sc_if->sk_rdata.sk_jumbo_rx_ring, SK_JUMBO_RX_RING_SZ, sk_dmamap_cb, + &ctx, BUS_DMA_NOWAIT); + if (error != 0) { + device_printf(sc_if->sk_if_dev, + "failed to load DMA'able memory for jumbo Rx ring\n"); + goto fail; + } + sc_if->sk_rdata.sk_jumbo_rx_ring_paddr = ctx.sk_busaddr; + + /* create DMA maps for Tx buffers */ + for (i = 0; i < SK_TX_RING_CNT; i++) { + txd = &sc_if->sk_cdata.sk_txdesc[i]; + txd->tx_m = NULL; + txd->tx_dmamap = 0; + error = bus_dmamap_create(sc_if->sk_cdata.sk_tx_tag, 0, + &txd->tx_dmamap); + if (error != 0) { + device_printf(sc_if->sk_if_dev, + "failed to create Tx dmamap\n"); + goto fail; + } + } + /* create DMA maps for Rx buffers */ + if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0, + &sc_if->sk_cdata.sk_rx_sparemap)) != 0) { + device_printf(sc_if->sk_if_dev, + "failed to create spare Rx dmamap\n"); + goto fail; + } + for (i = 0; i < SK_RX_RING_CNT; i++) { + rxd = &sc_if->sk_cdata.sk_rxdesc[i]; + rxd->rx_m = NULL; + rxd->rx_dmamap = 0; + error = bus_dmamap_create(sc_if->sk_cdata.sk_rx_tag, 0, + &rxd->rx_dmamap); + if (error != 0) { + device_printf(sc_if->sk_if_dev, + "failed to create Rx dmamap\n"); + goto fail; + } + } + /* create DMA maps for jumbo Rx buffers */ + if ((error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0, + &sc_if->sk_cdata.sk_jumbo_rx_sparemap)) != 0) { + device_printf(sc_if->sk_if_dev, + "failed to create spare jumbo Rx dmamap\n"); + goto fail; + } + for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) { + jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i]; + jrxd->rx_m = NULL; + jrxd->rx_dmamap = 0; + error = bus_dmamap_create(sc_if->sk_cdata.sk_jumbo_rx_tag, 0, + &jrxd->rx_dmamap); + if (error != 0) { + device_printf(sc_if->sk_if_dev, + "failed to create jumbo Rx dmamap\n"); + goto fail; + } + } + + /* allocate DMA'able memory and load the DMA map for jumbo buf */ + error = bus_dmamem_alloc(sc_if->sk_cdata.sk_jumbo_tag, + (void **)&sc_if->sk_rdata.sk_jumbo_buf, + BUS_DMA_NOWAIT|BUS_DMA_ZERO, &sc_if->sk_cdata.sk_jumbo_map); + if (error != 0) { + device_printf(sc_if->sk_if_dev, + "failed to allocate DMA'able memory for jumbo buf\n"); + goto fail; + } + + ctx.sk_busaddr = 0; + error = bus_dmamap_load(sc_if->sk_cdata.sk_jumbo_tag, + sc_if->sk_cdata.sk_jumbo_map, + sc_if->sk_rdata.sk_jumbo_buf, SK_JMEM, sk_dmamap_cb, + &ctx, BUS_DMA_NOWAIT); + if (error != 0) { + device_printf(sc_if->sk_if_dev, + "failed to load DMA'able memory for jumbobuf\n"); + goto fail; + } + sc_if->sk_rdata.sk_jumbo_buf_paddr = ctx.sk_busaddr; + + /* + * Now divide it up into 9K pieces and save the addresses + * in an array. + */ + ptr = sc_if->sk_rdata.sk_jumbo_buf; + for (i = 0; i < SK_JSLOTS; i++) { + sc_if->sk_cdata.sk_jslots[i] = ptr; + ptr += SK_JLEN; + entry = malloc(sizeof(struct sk_jpool_entry), + M_DEVBUF, M_NOWAIT); + if (entry == NULL) { + device_printf(sc_if->sk_if_dev, + "no memory for jumbo buffers!\n"); + error = ENOMEM; + goto fail; + } + entry->slot = i; + SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, + jpool_entries); + } + +fail: + return (error); +} + +static void +sk_dma_free(sc_if) + struct sk_if_softc *sc_if; +{ + struct sk_txdesc *txd; + struct sk_rxdesc *rxd; + struct sk_rxdesc *jrxd; + struct sk_jpool_entry *entry; + int i; + + SK_JLIST_LOCK(sc_if); + while ((entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead))) { + device_printf(sc_if->sk_if_dev, + "asked to free buffer that is in use!\n"); + SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries); + SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, + jpool_entries); + } + + while (!SLIST_EMPTY(&sc_if->sk_jfree_listhead)) { + entry = SLIST_FIRST(&sc_if->sk_jfree_listhead); + SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries); + free(entry, M_DEVBUF); + } + SK_JLIST_UNLOCK(sc_if); + + /* destroy jumbo buffer block */ + if (sc_if->sk_cdata.sk_jumbo_map) + bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_tag, + sc_if->sk_cdata.sk_jumbo_map); + + if (sc_if->sk_rdata.sk_jumbo_buf) { + bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_tag, + sc_if->sk_rdata.sk_jumbo_buf, + sc_if->sk_cdata.sk_jumbo_map); + sc_if->sk_rdata.sk_jumbo_buf = NULL; + sc_if->sk_cdata.sk_jumbo_map = 0; + } + + /* Tx ring */ + if (sc_if->sk_cdata.sk_tx_ring_tag) { + if (sc_if->sk_cdata.sk_tx_ring_map) + bus_dmamap_unload(sc_if->sk_cdata.sk_tx_ring_tag, + sc_if->sk_cdata.sk_tx_ring_map); + if (sc_if->sk_cdata.sk_tx_ring_map && + sc_if->sk_rdata.sk_tx_ring) + bus_dmamem_free(sc_if->sk_cdata.sk_tx_ring_tag, + sc_if->sk_rdata.sk_tx_ring, + sc_if->sk_cdata.sk_tx_ring_map); + sc_if->sk_rdata.sk_tx_ring = NULL; + sc_if->sk_cdata.sk_tx_ring_map = 0; + bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_ring_tag); + sc_if->sk_cdata.sk_tx_ring_tag = NULL; + } + /* Rx ring */ + if (sc_if->sk_cdata.sk_rx_ring_tag) { + if (sc_if->sk_cdata.sk_rx_ring_map) + bus_dmamap_unload(sc_if->sk_cdata.sk_rx_ring_tag, + sc_if->sk_cdata.sk_rx_ring_map); + if (sc_if->sk_cdata.sk_rx_ring_map && + sc_if->sk_rdata.sk_rx_ring) + bus_dmamem_free(sc_if->sk_cdata.sk_rx_ring_tag, + sc_if->sk_rdata.sk_rx_ring, + sc_if->sk_cdata.sk_rx_ring_map); + sc_if->sk_rdata.sk_rx_ring = NULL; + sc_if->sk_cdata.sk_rx_ring_map = 0; + bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_ring_tag); + sc_if->sk_cdata.sk_rx_ring_tag = NULL; + } + /* jumbo Rx ring */ + if (sc_if->sk_cdata.sk_jumbo_rx_ring_tag) { + if (sc_if->sk_cdata.sk_jumbo_rx_ring_map) + bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, + sc_if->sk_cdata.sk_jumbo_rx_ring_map); + if (sc_if->sk_cdata.sk_jumbo_rx_ring_map && + sc_if->sk_rdata.sk_jumbo_rx_ring) + bus_dmamem_free(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, + sc_if->sk_rdata.sk_jumbo_rx_ring, + sc_if->sk_cdata.sk_jumbo_rx_ring_map); + sc_if->sk_rdata.sk_jumbo_rx_ring = NULL; + sc_if->sk_cdata.sk_jumbo_rx_ring_map = 0; + bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_ring_tag); + sc_if->sk_cdata.sk_jumbo_rx_ring_tag = NULL; + } + /* Tx buffers */ + if (sc_if->sk_cdata.sk_tx_tag) { + for (i = 0; i < SK_TX_RING_CNT; i++) { + txd = &sc_if->sk_cdata.sk_txdesc[i]; + if (txd->tx_dmamap) { + bus_dmamap_destroy(sc_if->sk_cdata.sk_tx_tag, + txd->tx_dmamap); + txd->tx_dmamap = 0; + } + } + bus_dma_tag_destroy(sc_if->sk_cdata.sk_tx_tag); + sc_if->sk_cdata.sk_tx_tag = NULL; + } + /* Rx buffers */ + if (sc_if->sk_cdata.sk_rx_tag) { + for (i = 0; i < SK_RX_RING_CNT; i++) { + rxd = &sc_if->sk_cdata.sk_rxdesc[i]; + if (rxd->rx_dmamap) { + bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag, + rxd->rx_dmamap); + rxd->rx_dmamap = 0; + } + } + if (sc_if->sk_cdata.sk_rx_sparemap) { + bus_dmamap_destroy(sc_if->sk_cdata.sk_rx_tag, + sc_if->sk_cdata.sk_rx_sparemap); + sc_if->sk_cdata.sk_rx_sparemap = 0; + } + bus_dma_tag_destroy(sc_if->sk_cdata.sk_rx_tag); + sc_if->sk_cdata.sk_rx_tag = NULL; + } + /* jumbo Rx buffers */ + if (sc_if->sk_cdata.sk_jumbo_rx_tag) { + for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) { + jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i]; + if (jrxd->rx_dmamap) { + bus_dmamap_destroy( + sc_if->sk_cdata.sk_jumbo_rx_tag, + jrxd->rx_dmamap); + jrxd->rx_dmamap = 0; + } + } + if (sc_if->sk_cdata.sk_jumbo_rx_sparemap) { + bus_dmamap_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag, + sc_if->sk_cdata.sk_jumbo_rx_sparemap); + sc_if->sk_cdata.sk_jumbo_rx_sparemap = 0; + } + bus_dma_tag_destroy(sc_if->sk_cdata.sk_jumbo_rx_tag); + sc_if->sk_cdata.sk_jumbo_rx_tag = NULL; + } + + if (sc_if->sk_cdata.sk_parent_tag) { + bus_dma_tag_destroy(sc_if->sk_cdata.sk_parent_tag); + sc_if->sk_cdata.sk_parent_tag = NULL; + } + mtx_destroy(&sc_if->sk_jlist_mtx); +} + +/* + * Allocate a jumbo buffer. + */ +static void * +sk_jalloc(sc_if) + struct sk_if_softc *sc_if; +{ + struct sk_jpool_entry *entry; + + SK_JLIST_LOCK(sc_if); + + entry = SLIST_FIRST(&sc_if->sk_jfree_listhead); + + if (entry == NULL) { + SK_JLIST_UNLOCK(sc_if); + return (NULL); + } + + SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries); + SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries); + + SK_JLIST_UNLOCK(sc_if); + + return (sc_if->sk_cdata.sk_jslots[entry->slot]); +} + +/* + * Release a jumbo buffer. + */ +static void +sk_jfree(buf, args) + void *buf; + void *args; +{ + struct sk_if_softc *sc_if; + struct sk_jpool_entry *entry; + int i; + + /* Extract the softc struct pointer. */ + sc_if = (struct sk_if_softc *)args; + KASSERT(sc_if != NULL, ("%s: can't find softc pointer!", __func__)); + + SK_JLIST_LOCK(sc_if); + /* calculate the slot this buffer belongs to */ + i = ((vm_offset_t)buf + - (vm_offset_t)sc_if->sk_rdata.sk_jumbo_buf) / SK_JLEN; + KASSERT(i >= 0 && i < SK_JSLOTS, + ("%s: asked to free buffer that we don't manage!", __func__)); + + entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead); + KASSERT(entry != NULL, ("%s: buffer not in use!", __func__)); + entry->slot = i; + SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries); + SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries); + if (SLIST_EMPTY(&sc_if->sk_jinuse_listhead)) + wakeup(sc_if); + + SK_JLIST_UNLOCK(sc_if); +} + +static void +sk_txcksum(ifp, m, f) + struct ifnet *ifp; + struct mbuf *m; + struct sk_tx_desc *f; +{ + struct ip *ip; + u_int16_t offset; + u_int8_t *p; + + offset = sizeof(struct ip) + ETHER_HDR_LEN; + for(; m && m->m_len == 0; m = m->m_next) + ; + if (m == NULL || m->m_len < ETHER_HDR_LEN) { + if_printf(ifp, "%s: m_len < ETHER_HDR_LEN\n", __func__); + /* checksum may be corrupted */ + goto sendit; + } + if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) { + if (m->m_len != ETHER_HDR_LEN) { + if_printf(ifp, "%s: m_len != ETHER_HDR_LEN\n", + __func__); + /* checksum may be corrupted */ + goto sendit; + } + for(m = m->m_next; m && m->m_len == 0; m = m->m_next) + ; + if (m == NULL) { + offset = sizeof(struct ip) + ETHER_HDR_LEN; + /* checksum may be corrupted */ + goto sendit; + } + ip = mtod(m, struct ip *); + } else { + p = mtod(m, u_int8_t *); + p += ETHER_HDR_LEN; + ip = (struct ip *)p; + } + offset = (ip->ip_hl << 2) + ETHER_HDR_LEN; + +sendit: + f->sk_csum_startval = 0; + f->sk_csum_start = htole32(((offset + m->m_pkthdr.csum_data) & 0xffff) | + (offset << 16)); +} + +static int +sk_encap(sc_if, m_head) struct sk_if_softc *sc_if; - struct mbuf *m_head; - u_int32_t *txidx; + struct mbuf **m_head; { + struct sk_txdesc *txd; struct sk_tx_desc *f = NULL; - struct mbuf *m; - u_int32_t frag, cur, cnt = 0; + struct mbuf *m, *n; + bus_dma_segment_t txsegs[SK_MAXTXSEGS]; + u_int32_t cflags, frag, si, sk_ctl; + int error, i, nseg; SK_IF_LOCK_ASSERT(sc_if); - m = m_head; - cur = frag = *txidx; + if ((txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txfreeq)) == NULL) + return (ENOBUFS); - /* - * Start packing the mbufs in this chain into - * the fragment pointers. Stop when we run out - * of fragments or hit the end of the mbuf chain. - */ - for (m = m_head; m != NULL; m = m->m_next) { - if (m->m_len != 0) { - if ((SK_TX_RING_CNT - - (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2) - return(ENOBUFS); - f = &sc_if->sk_rdata->sk_tx_ring[frag]; - f->sk_data_lo = vtophys(mtod(m, vm_offset_t)); - f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT; - if (cnt == 0) - f->sk_ctl |= SK_TXCTL_FIRSTFRAG; - else - f->sk_ctl |= SK_TXCTL_OWN; - cur = frag; - SK_INC(frag, SK_TX_RING_CNT); - cnt++; + m = *m_head; + error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag, + txd->tx_dmamap, m, txsegs, &nseg, 0); + if (error == EFBIG) { + n = m_defrag(m, M_DONTWAIT); + if (n == NULL) { + m_freem(m); + m = NULL; + return (ENOMEM); } + m = n; + error = bus_dmamap_load_mbuf_sg(sc_if->sk_cdata.sk_tx_tag, + txd->tx_dmamap, m, txsegs, &nseg, 0); + if (error != 0) { + m_freem(m); + m = NULL; + return (error); + } + } else if (error != 0) + return (error); + if (nseg == 0) { + m_freem(m); + m = NULL; + return (EIO); } + if (sc_if->sk_cdata.sk_tx_cnt + nseg >= SK_TX_RING_CNT) { + bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap); + return (ENOBUFS); + } - if (m != NULL) - return(ENOBUFS); + if ((m->m_pkthdr.csum_flags & sc_if->sk_ifp->if_hwassist) != 0) + cflags = SK_OPCODE_CSUM; + else + cflags = SK_OPCODE_DEFAULT; + si = frag = sc_if->sk_cdata.sk_tx_prod; + for (i = 0; i < nseg; i++) { + f = &sc_if->sk_rdata.sk_tx_ring[frag]; + f->sk_data_lo = htole32(SK_ADDR_LO(txsegs[i].ds_addr)); + f->sk_data_hi = htole32(SK_ADDR_HI(txsegs[i].ds_addr)); + sk_ctl = txsegs[i].ds_len | cflags; + if (i == 0) { + if (cflags == SK_OPCODE_CSUM) + sk_txcksum(sc_if->sk_ifp, m, f); + sk_ctl |= SK_TXCTL_FIRSTFRAG; + } else + sk_ctl |= SK_TXCTL_OWN; + f->sk_ctl = htole32(sk_ctl); + sc_if->sk_cdata.sk_tx_cnt++; + SK_INC(frag, SK_TX_RING_CNT); + } + sc_if->sk_cdata.sk_tx_prod = frag; - sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |= - SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR; - sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head; - sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN; - sc_if->sk_cdata.sk_tx_cnt += cnt; + /* set EOF on the last desciptor */ + frag = (frag + SK_TX_RING_CNT - 1) % SK_TX_RING_CNT; + f = &sc_if->sk_rdata.sk_tx_ring[frag]; + f->sk_ctl |= htole32(SK_TXCTL_LASTFRAG | SK_TXCTL_EOF_INTR); - *txidx = frag; + /* turn the first descriptor ownership to NIC */ + f = &sc_if->sk_rdata.sk_tx_ring[si]; + f->sk_ctl |= htole32(SK_TXCTL_OWN); - return(0); + STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txfreeq, tx_q); + STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txbusyq, txd, tx_q); + txd->tx_m = m; + + /* sync descriptors */ + bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap, + BUS_DMASYNC_PREWRITE); + bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag, + sc_if->sk_cdata.sk_tx_ring_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + + return (0); } static void sk_start(ifp) struct ifnet *ifp; { struct sk_if_softc *sc_if; sc_if = ifp->if_softc; SK_IF_LOCK(sc_if); sk_start_locked(ifp); SK_IF_UNLOCK(sc_if); return; } static void sk_start_locked(ifp) struct ifnet *ifp; { struct sk_softc *sc; struct sk_if_softc *sc_if; - struct mbuf *m_head = NULL; - u_int32_t idx; + struct mbuf *m_head; + int enq; sc_if = ifp->if_softc; sc = sc_if->sk_softc; SK_IF_LOCK_ASSERT(sc_if); - idx = sc_if->sk_cdata.sk_tx_prod; - - while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) { + for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && + sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 1; ) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ - if (sk_encap(sc_if, m_head, &idx)) { + if (sk_encap(sc_if, &m_head)) { + if (m_head == NULL) + break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } + enq++; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); } - /* Transmit */ - if (idx != sc_if->sk_cdata.sk_tx_prod) { - sc_if->sk_cdata.sk_tx_prod = idx; + if (enq > 0) { + /* Transmit */ CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); /* Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; } - - return; } static void sk_watchdog(ifp) struct ifnet *ifp; { struct sk_if_softc *sc_if; sc_if = ifp->if_softc; - printf("sk%d: watchdog timeout\n", sc_if->sk_unit); SK_IF_LOCK(sc_if); - ifp->if_drv_flags &= ~IFF_DRV_RUNNING; - sk_init_locked(sc_if); + /* + * Reclaim first as there is a possibility of loosing Tx completion + * interrupt. + */ + sk_txeof(sc_if); + if (sc_if->sk_cdata.sk_tx_cnt != 0) { + if_printf(sc_if->sk_ifp, "watchdog timeout\n"); + ifp->if_oerrors++; + ifp->if_drv_flags &= ~IFF_DRV_RUNNING; + sk_init_locked(sc_if); + } SK_IF_UNLOCK(sc_if); return; } static void skc_shutdown(dev) device_t dev; { struct sk_softc *sc; sc = device_get_softc(dev); SK_LOCK(sc); /* Turn off the 'driver is loaded' LED. */ CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF); /* * Reset the GEnesis controller. Doing this should also * assert the resets on the attached XMAC(s). */ sk_reset(sc); SK_UNLOCK(sc); return; } +static int +skc_suspend(dev) + device_t dev; +{ + struct sk_softc *sc; + struct sk_if_softc *sc_if0, *sc_if1; + struct ifnet *ifp0 = NULL, *ifp1 = NULL; + + sc = device_get_softc(dev); + + SK_LOCK(sc); + + sc_if0 = sc->sk_if[SK_PORT_A]; + sc_if1 = sc->sk_if[SK_PORT_B]; + if (sc_if0 != NULL) + ifp0 = sc_if0->sk_ifp; + if (sc_if1 != NULL) + ifp1 = sc_if1->sk_ifp; + if (ifp0 != NULL) + sk_stop(sc_if0); + if (ifp1 != NULL) + sk_stop(sc_if1); + sc->sk_suspended = 1; + + SK_UNLOCK(sc); + + return (0); +} + +static int +skc_resume(dev) + device_t dev; +{ + struct sk_softc *sc; + struct sk_if_softc *sc_if0, *sc_if1; + struct ifnet *ifp0 = NULL, *ifp1 = NULL; + + sc = device_get_softc(dev); + + SK_LOCK(sc); + + sc_if0 = sc->sk_if[SK_PORT_A]; + sc_if1 = sc->sk_if[SK_PORT_B]; + if (sc_if0 != NULL) + ifp0 = sc_if0->sk_ifp; + if (sc_if1 != NULL) + ifp1 = sc_if1->sk_ifp; + if (ifp0 != NULL && ifp0->if_flags & IFF_UP) + sk_init_locked(sc_if0); + if (ifp1 != NULL && ifp1->if_flags & IFF_UP) + sk_init_locked(sc_if1); + sc->sk_suspended = 0; + + SK_UNLOCK(sc); + + return (0); +} + +/* + * According to the data sheet from SK-NET GENESIS the hardware can compute + * two Rx checksums at the same time(Each checksum start position is + * programmed in Rx descriptors). However it seems that TCP/UDP checksum + * does not work at least on my Yukon hardware. I tried every possible ways + * to get correct checksum value but couldn't get correct one. So TCP/UDP + * checksum offload was disabled at the moment and only IP checksum offload + * was enabled. + * As nomral IP header size is 20 bytes I can't expect it would give an + * increase in throughput. However it seems it doesn't hurt performance in + * my testing. If there is a more detailed information for checksum secret + * of the hardware in question please contact yongari@FreeBSD.org to add + * TCP/UDP checksum offload support. + */ +static __inline void +sk_rxcksum(ifp, m, csum) + struct ifnet *ifp; + struct mbuf *m; + u_int32_t csum; +{ + struct ether_header *eh; + struct ip *ip; + int32_t hlen, len, pktlen; + u_int16_t csum1, csum2, ipcsum; + + pktlen = m->m_pkthdr.len; + if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) + return; + eh = mtod(m, struct ether_header *); + if (eh->ether_type != htons(ETHERTYPE_IP)) + return; + ip = (struct ip *)(eh + 1); + if (ip->ip_v != IPVERSION) + return; + hlen = ip->ip_hl << 2; + pktlen -= sizeof(struct ether_header); + if (hlen < sizeof(struct ip)) + return; + if (ntohs(ip->ip_len) < hlen) + return; + if (ntohs(ip->ip_len) != pktlen) + return; + + csum1 = htons(csum & 0xffff); + csum2 = htons((csum >> 16) & 0xffff); + ipcsum = in_addword(csum1, ~csum2 & 0xffff); + /* checksum fixup for IP options */ + len = hlen - sizeof(struct ip); + if (len > 0) { + /* + * If the second checksum value is correct we can compute IP + * checksum with simple math. Unfortunately the second checksum + * value is wrong so we can't verify the checksum from the + * value(It seems there is some magic here to get correct + * value). If the second checksum value is correct it also + * means we can get TCP/UDP checksum) here. However, it still + * needs pseudo header checksum calculation due to hardware + * limitations. + */ + return; + } + m->m_pkthdr.csum_flags = CSUM_IP_CHECKED; + if (ipcsum == 0xffff) + m->m_pkthdr.csum_flags |= CSUM_IP_VALID; +} + +static __inline int +sk_rxvalid(sc, stat, len) + struct sk_softc *sc; + u_int32_t stat, len; +{ + + if (sc->sk_type == SK_GENESIS) { + if ((stat & XM_RXSTAT_ERRFRAME) == XM_RXSTAT_ERRFRAME || + XM_RXSTAT_BYTES(stat) != len) + return (0); + } else { + if ((stat & (YU_RXSTAT_CRCERR | YU_RXSTAT_LONGERR | + YU_RXSTAT_MIIERR | YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | + YU_RXSTAT_JABBER)) != 0 || + (stat & YU_RXSTAT_RXOK) != YU_RXSTAT_RXOK || + YU_RXSTAT_BYTES(stat) != len) + return (0); + } + + return (1); +} + static void sk_rxeof(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc; struct mbuf *m; struct ifnet *ifp; - struct sk_chain *cur_rx; - int total_len = 0; - int i; - u_int32_t rxstat; + struct sk_rx_desc *cur_rx; + struct sk_rxdesc *rxd; + int cons, prog; + u_int32_t csum, rxstat, sk_ctl; sc = sc_if->sk_softc; ifp = sc_if->sk_ifp; - i = sc_if->sk_cdata.sk_rx_prod; - cur_rx = &sc_if->sk_cdata.sk_rx_chain[i]; - SK_LOCK_ASSERT(sc); + SK_IF_LOCK_ASSERT(sc_if); - while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) { + bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag, + sc_if->sk_cdata.sk_rx_ring_map, BUS_DMASYNC_POSTREAD); - cur_rx = &sc_if->sk_cdata.sk_rx_chain[i]; - rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat; - m = cur_rx->sk_mbuf; - cur_rx->sk_mbuf = NULL; - total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl); - SK_INC(i, SK_RX_RING_CNT); + prog = 0; + for (cons = sc_if->sk_cdata.sk_rx_cons; prog < SK_RX_RING_CNT; + prog++, SK_INC(cons, SK_RX_RING_CNT)) { + cur_rx = &sc_if->sk_rdata.sk_rx_ring[cons]; + sk_ctl = le32toh(cur_rx->sk_ctl); + if ((sk_ctl & SK_RXCTL_OWN) != 0) + break; + rxd = &sc_if->sk_cdata.sk_rxdesc[cons]; + rxstat = le32toh(cur_rx->sk_xmac_rxstat); - if (rxstat & XM_RXSTAT_ERRFRAME) { + if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG | + SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID | + SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) || + SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN || + SK_RXBYTES(sk_ctl) > SK_MAX_FRAMELEN || + sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) { ifp->if_ierrors++; - sk_newbuf(sc_if, cur_rx, m); + sk_discard_rxbuf(sc_if, cons); continue; } - /* - * Try to allocate a new jumbo buffer. If that - * fails, copy the packet to mbufs and put the - * jumbo buffer back in the ring so it can be - * re-used. If allocating mbufs fails, then we - * have to drop the packet. - */ - if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) { - struct mbuf *m0; - m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, - ifp, NULL); - sk_newbuf(sc_if, cur_rx, m); - if (m0 == NULL) { - printf("sk%d: no receive buffers " - "available -- packet dropped!\n", - sc_if->sk_unit); - ifp->if_ierrors++; - continue; - } - m = m0; - } else { - m->m_pkthdr.rcvif = ifp; - m->m_pkthdr.len = m->m_len = total_len; + m = rxd->rx_m; + csum = le32toh(cur_rx->sk_csum); + if (sk_newbuf(sc_if, cons) != 0) { + ifp->if_iqdrops++; + /* reuse old buffer */ + sk_discard_rxbuf(sc_if, cons); + continue; } - + m->m_pkthdr.rcvif = ifp; + m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl); ifp->if_ipackets++; - SK_UNLOCK(sc); + if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) + sk_rxcksum(ifp, m, csum); + SK_IF_UNLOCK(sc_if); (*ifp->if_input)(ifp, m); - SK_LOCK(sc); + SK_IF_LOCK(sc_if); } - sc_if->sk_cdata.sk_rx_prod = i; + if (prog > 0) { + sc_if->sk_cdata.sk_rx_cons = cons; + bus_dmamap_sync(sc_if->sk_cdata.sk_rx_ring_tag, + sc_if->sk_cdata.sk_rx_ring_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + } +} - return; +static void +sk_jumbo_rxeof(sc_if) + struct sk_if_softc *sc_if; +{ + struct sk_softc *sc; + struct mbuf *m; + struct ifnet *ifp; + struct sk_rx_desc *cur_rx; + struct sk_rxdesc *jrxd; + int cons, prog; + u_int32_t csum, rxstat, sk_ctl; + + sc = sc_if->sk_softc; + ifp = sc_if->sk_ifp; + + SK_IF_LOCK_ASSERT(sc_if); + + bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, + sc_if->sk_cdata.sk_jumbo_rx_ring_map, BUS_DMASYNC_POSTREAD); + + prog = 0; + for (cons = sc_if->sk_cdata.sk_jumbo_rx_cons; + prog < SK_JUMBO_RX_RING_CNT; + prog++, SK_INC(cons, SK_JUMBO_RX_RING_CNT)) { + cur_rx = &sc_if->sk_rdata.sk_jumbo_rx_ring[cons]; + sk_ctl = le32toh(cur_rx->sk_ctl); + if ((sk_ctl & SK_RXCTL_OWN) != 0) + break; + jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[cons]; + rxstat = le32toh(cur_rx->sk_xmac_rxstat); + + if ((sk_ctl & (SK_RXCTL_STATUS_VALID | SK_RXCTL_FIRSTFRAG | + SK_RXCTL_LASTFRAG)) != (SK_RXCTL_STATUS_VALID | + SK_RXCTL_FIRSTFRAG | SK_RXCTL_LASTFRAG) || + SK_RXBYTES(sk_ctl) < SK_MIN_FRAMELEN || + SK_RXBYTES(sk_ctl) > SK_JUMBO_FRAMELEN || + sk_rxvalid(sc, rxstat, SK_RXBYTES(sk_ctl)) == 0) { + ifp->if_ierrors++; + sk_discard_jumbo_rxbuf(sc_if, cons); + continue; + } + + m = jrxd->rx_m; + csum = le32toh(cur_rx->sk_csum); + if (sk_jumbo_newbuf(sc_if, cons) != 0) { + ifp->if_iqdrops++; + /* reuse old buffer */ + sk_discard_jumbo_rxbuf(sc_if, cons); + continue; + } + m->m_pkthdr.rcvif = ifp; + m->m_pkthdr.len = m->m_len = SK_RXBYTES(sk_ctl); + ifp->if_ipackets++; + if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) + sk_rxcksum(ifp, m, csum); + SK_IF_UNLOCK(sc_if); + (*ifp->if_input)(ifp, m); + SK_IF_LOCK(sc_if); + } + + if (prog > 0) { + sc_if->sk_cdata.sk_jumbo_rx_cons = cons; + bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_ring_tag, + sc_if->sk_cdata.sk_jumbo_rx_ring_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); + } } static void sk_txeof(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc; + struct sk_txdesc *txd; struct sk_tx_desc *cur_tx; struct ifnet *ifp; - u_int32_t idx; + u_int32_t idx, sk_ctl; sc = sc_if->sk_softc; ifp = sc_if->sk_ifp; + txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq); + if (txd == NULL) + return; + bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag, + sc_if->sk_cdata.sk_tx_ring_map, BUS_DMASYNC_POSTREAD); /* * Go through our tx ring and free mbufs for those * frames that have been sent. */ - idx = sc_if->sk_cdata.sk_tx_cons; - while(idx != sc_if->sk_cdata.sk_tx_prod) { - cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx]; - if (cur_tx->sk_ctl & SK_TXCTL_OWN) + for (idx = sc_if->sk_cdata.sk_tx_cons;; SK_INC(idx, SK_TX_RING_CNT)) { + if (sc_if->sk_cdata.sk_tx_cnt <= 0) break; - if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG) - ifp->if_opackets++; - if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) { - m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf); - sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL; - } + cur_tx = &sc_if->sk_rdata.sk_tx_ring[idx]; + sk_ctl = le32toh(cur_tx->sk_ctl); + if (sk_ctl & SK_TXCTL_OWN) + break; sc_if->sk_cdata.sk_tx_cnt--; - SK_INC(idx, SK_TX_RING_CNT); - } - - if (sc_if->sk_cdata.sk_tx_cnt == 0) { - ifp->if_timer = 0; - } else /* nudge chip to keep tx ring moving */ - CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); - - if (sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 2) ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + if ((sk_ctl & SK_TXCTL_LASTFRAG) == 0) + continue; + bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap, + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, txd->tx_dmamap); + ifp->if_opackets++; + m_freem(txd->tx_m); + txd->tx_m = NULL; + STAILQ_REMOVE_HEAD(&sc_if->sk_cdata.sk_txbusyq, tx_q); + STAILQ_INSERT_TAIL(&sc_if->sk_cdata.sk_txfreeq, txd, tx_q); + txd = STAILQ_FIRST(&sc_if->sk_cdata.sk_txbusyq); + } sc_if->sk_cdata.sk_tx_cons = idx; + ifp->if_timer = sc_if->sk_cdata.sk_tx_cnt > 0 ? 5 : 0; + + bus_dmamap_sync(sc_if->sk_cdata.sk_tx_ring_tag, + sc_if->sk_cdata.sk_tx_ring_map, + BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static void sk_tick(xsc_if) void *xsc_if; { struct sk_if_softc *sc_if; struct mii_data *mii; struct ifnet *ifp; int i; sc_if = xsc_if; - SK_IF_LOCK(sc_if); ifp = sc_if->sk_ifp; mii = device_get_softc(sc_if->sk_miibus); - if (!(ifp->if_flags & IFF_UP)) { - SK_IF_UNLOCK(sc_if); + if (!(ifp->if_flags & IFF_UP)) return; - } if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { sk_intr_bcom(sc_if); - SK_IF_UNLOCK(sc_if); return; } /* * According to SysKonnect, the correct way to verify that * the link has come back up is to poll bit 0 of the GPIO * register three times. This pin has the signal from the * link_sync pin connected to it; if we read the same link * state 3 times in a row, we know the link is up. */ for (i = 0; i < 3; i++) { if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET) break; } if (i != 3) { - sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); - SK_IF_UNLOCK(sc_if); + callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if); return; } /* Turn the GP0 interrupt back on. */ SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); SK_XM_READ_2(sc_if, XM_ISR); mii_tick(mii); - untimeout(sk_tick, sc_if, sc_if->sk_tick_ch); + callout_stop(&sc_if->sk_tick_ch); +} - SK_IF_UNLOCK(sc_if); - return; +static void +sk_yukon_tick(xsc_if) + void *xsc_if; +{ + struct sk_if_softc *sc_if; + struct mii_data *mii; + + sc_if = xsc_if; + mii = device_get_softc(sc_if->sk_miibus); + + mii_tick(mii); + callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if); } static void sk_intr_bcom(sc_if) struct sk_if_softc *sc_if; { struct mii_data *mii; struct ifnet *ifp; int status; mii = device_get_softc(sc_if->sk_miibus); ifp = sc_if->sk_ifp; SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); /* * Read the PHY interrupt register to make sure * we clear any pending interrupts. */ status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { sk_init_xmac(sc_if); return; } if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) { int lstat; lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_AUXSTS); if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) { mii_mediachg(mii); /* Turn off the link LED. */ SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); sc_if->sk_link = 0; } else if (status & BRGPHY_ISR_LNK_CHG) { sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFF00); mii_tick(mii); sc_if->sk_link = 1; /* Turn on the link LED. */ SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF| SK_LINKLED_BLINK_OFF); } else { mii_tick(mii); - sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); + callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if); } } SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); return; } static void sk_intr_xmac(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc; u_int16_t status; sc = sc_if->sk_softc; status = SK_XM_READ_2(sc_if, XM_ISR); /* * Link has gone down. Start MII tick timeout to * watch for link resync. */ if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) { if (status & XM_ISR_GP0_SET) { SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); - sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); + callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if); } if (status & XM_ISR_AUTONEG_DONE) { - sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); + callout_reset(&sc_if->sk_tick_ch, hz, sk_tick, sc_if); } } if (status & XM_IMR_TX_UNDERRUN) SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO); if (status & XM_IMR_RX_OVERRUN) SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO); status = SK_XM_READ_2(sc_if, XM_ISR); return; } static void sk_intr_yukon(sc_if) struct sk_if_softc *sc_if; { - int status; + u_int8_t status; - status = SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); - - return; + status = SK_IF_READ_1(sc_if, 0, SK_GMAC_ISR); + /* RX overrun */ + if ((status & SK_GMAC_INT_RX_OVER) != 0) { + SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, + SK_RFCTL_RX_FIFO_OVER); + } + /* TX underrun */ + if ((status & SK_GMAC_INT_TX_UNDER) != 0) { + SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, + SK_TFCTL_TX_FIFO_UNDER); + } } static void sk_intr(xsc) void *xsc; { struct sk_softc *sc = xsc; - struct sk_if_softc *sc_if0 = NULL, *sc_if1 = NULL; + struct sk_if_softc *sc_if0, *sc_if1; struct ifnet *ifp0 = NULL, *ifp1 = NULL; u_int32_t status; SK_LOCK(sc); + status = CSR_READ_4(sc, SK_ISSR); + if (status == 0 || status == 0xffffffff || sc->sk_suspended) + goto done_locked; + sc_if0 = sc->sk_if[SK_PORT_A]; sc_if1 = sc->sk_if[SK_PORT_B]; if (sc_if0 != NULL) ifp0 = sc_if0->sk_ifp; if (sc_if1 != NULL) ifp1 = sc_if1->sk_ifp; - for (;;) { - status = CSR_READ_4(sc, SK_ISSR); - if (!(status & sc->sk_intrmask)) - break; - + for (; (status &= sc->sk_intrmask) != 0;) { /* Handle receive interrupts first. */ if (status & SK_ISR_RX1_EOF) { - sk_rxeof(sc_if0); + if (ifp0->if_mtu > SK_MAX_FRAMELEN) + sk_jumbo_rxeof(sc_if0); + else + sk_rxeof(sc_if0); CSR_WRITE_4(sc, SK_BMU_RX_CSR0, SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); } if (status & SK_ISR_RX2_EOF) { - sk_rxeof(sc_if1); + if (ifp1->if_mtu > SK_MAX_FRAMELEN) + sk_jumbo_rxeof(sc_if1); + else + sk_rxeof(sc_if1); CSR_WRITE_4(sc, SK_BMU_RX_CSR1, SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); } /* Then transmit interrupts. */ if (status & SK_ISR_TX1_S_EOF) { sk_txeof(sc_if0); - CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, - SK_TXBMU_CLR_IRQ_EOF); + CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, SK_TXBMU_CLR_IRQ_EOF); } if (status & SK_ISR_TX2_S_EOF) { sk_txeof(sc_if1); - CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, - SK_TXBMU_CLR_IRQ_EOF); + CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, SK_TXBMU_CLR_IRQ_EOF); } /* Then MAC interrupts. */ if (status & SK_ISR_MAC1 && ifp0->if_drv_flags & IFF_DRV_RUNNING) { if (sc->sk_type == SK_GENESIS) sk_intr_xmac(sc_if0); else sk_intr_yukon(sc_if0); } if (status & SK_ISR_MAC2 && ifp1->if_drv_flags & IFF_DRV_RUNNING) { if (sc->sk_type == SK_GENESIS) sk_intr_xmac(sc_if1); else sk_intr_yukon(sc_if1); } if (status & SK_ISR_EXTERNAL_REG) { if (ifp0 != NULL && sc_if0->sk_phytype == SK_PHYTYPE_BCOM) sk_intr_bcom(sc_if0); if (ifp1 != NULL && sc_if1->sk_phytype == SK_PHYTYPE_BCOM) sk_intr_bcom(sc_if1); } + status = CSR_READ_4(sc, SK_ISSR); } CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd)) sk_start_locked(ifp0); if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd)) sk_start_locked(ifp1); +done_locked: SK_UNLOCK(sc); - - return; } static void sk_init_xmac(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc; struct ifnet *ifp; + u_int16_t eaddr[(ETHER_ADDR_LEN+1)/2]; struct sk_bcom_hack bhack[] = { { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, { 0, 0 } }; + SK_IF_LOCK_ASSERT(sc_if); + sc = sc_if->sk_softc; ifp = sc_if->sk_ifp; /* Unreset the XMAC. */ SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET); DELAY(1000); /* Reset the XMAC's internal state. */ SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); /* Save the XMAC II revision */ sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID)); /* * Perform additional initialization for external PHYs, * namely for the 1000baseTX cards that use the XMAC's * GMII mode. */ if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { int i = 0; u_int32_t val; /* Take PHY out of reset. */ val = sk_win_read_4(sc, SK_GPIO); if (sc_if->sk_port == SK_PORT_A) val |= SK_GPIO_DIR0|SK_GPIO_DAT0; else val |= SK_GPIO_DIR2|SK_GPIO_DAT2; sk_win_write_4(sc, SK_GPIO, val); /* Enable GMII mode on the XMAC. */ SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE); sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET); DELAY(10000); sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFFF0); /* * Early versions of the BCM5400 apparently have * a bug that requires them to have their reserved * registers initialized to some magic values. I don't * know what the numbers do, I'm just the messenger. */ if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03) == 0x6041) { while(bhack[i].reg) { sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, bhack[i].reg, bhack[i].val); i++; } } } /* Set station address */ - SK_XM_WRITE_2(sc_if, XM_PAR0, - *(u_int16_t *)(&IF_LLADDR(sc_if->sk_ifp)[0])); - SK_XM_WRITE_2(sc_if, XM_PAR1, - *(u_int16_t *)(&IF_LLADDR(sc_if->sk_ifp)[2])); - SK_XM_WRITE_2(sc_if, XM_PAR2, - *(u_int16_t *)(&IF_LLADDR(sc_if->sk_ifp)[4])); + bcopy(IF_LLADDR(sc_if->sk_ifp), eaddr, ETHER_ADDR_LEN); + SK_XM_WRITE_2(sc_if, XM_PAR0, eaddr[0]); + SK_XM_WRITE_2(sc_if, XM_PAR1, eaddr[1]); + SK_XM_WRITE_2(sc_if, XM_PAR2, eaddr[2]); SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION); if (ifp->if_flags & IFF_BROADCAST) { SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); } else { SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); } /* We don't need the FCS appended to the packet. */ SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS); /* We want short frames padded to 60 bytes. */ SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD); /* * Enable the reception of all error frames. This is is * a necessary evil due to the design of the XMAC. The * XMAC's receive FIFO is only 8K in size, however jumbo * frames can be up to 9000 bytes in length. When bad * frame filtering is enabled, the XMAC's RX FIFO operates * in 'store and forward' mode. For this to work, the * entire frame has to fit into the FIFO, but that means * that jumbo frames larger than 8192 bytes will be * truncated. Disabling all bad frame filtering causes * the RX FIFO to operate in streaming mode, in which * case the XMAC will start transfering frames out of the * RX FIFO as soon as the FIFO threshold is reached. */ - SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES| - XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS| - XM_MODE_RX_INRANGELEN); - - if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) + if (ifp->if_mtu > SK_MAX_FRAMELEN) { + SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES| + XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS| + XM_MODE_RX_INRANGELEN); SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); - else + } else SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); /* * Bump up the transmit threshold. This helps hold off transmit * underruns when we're blasting traffic from both ports at once. */ SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH); /* Set promiscuous mode */ sk_setpromisc(sc_if); /* Set multicast filter */ sk_setmulti(sc_if); /* Clear and enable interrupts */ SK_XM_READ_2(sc_if, XM_ISR); if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS); else SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); /* Configure MAC arbiter */ switch(sc_if->sk_xmac_rev) { case XM_XMAC_REV_B2: sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2); sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2); sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2); sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2); sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2); sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2); sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2); sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2); sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); break; case XM_XMAC_REV_C1: sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1); sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1); sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1); sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1); sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1); sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1); sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1); sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1); sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); break; default: break; } sk_win_write_2(sc, SK_MACARB_CTL, SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF); sc_if->sk_link = 1; return; } static void sk_init_yukon(sc_if) struct sk_if_softc *sc_if; { - u_int32_t phy; + u_int32_t phy, v; u_int16_t reg; struct sk_softc *sc; struct ifnet *ifp; int i; + SK_IF_LOCK_ASSERT(sc_if); + sc = sc_if->sk_softc; ifp = sc_if->sk_ifp; if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev >= SK_YUKON_LITE_REV_A3) { - /* Take PHY out of reset. */ - sk_win_write_4(sc, SK_GPIO, - (sk_win_read_4(sc, SK_GPIO) | SK_GPIO_DIR9) & ~SK_GPIO_DAT9); + /* + * Workaround code for COMA mode, set PHY reset. + * Otherwise it will not correctly take chip out of + * powerdown (coma) + */ + v = sk_win_read_4(sc, SK_GPIO); + v |= SK_GPIO_DIR9 | SK_GPIO_DAT9; + sk_win_write_4(sc, SK_GPIO, v); } /* GMAC and GPHY Reset */ SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET); SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); DELAY(1000); - SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_CLEAR); - SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); - DELAY(1000); + if (sc->sk_type == SK_YUKON_LITE && + sc->sk_rev >= SK_YUKON_LITE_REV_A3) { + /* + * Workaround code for COMA mode, clear PHY reset + */ + v = sk_win_read_4(sc, SK_GPIO); + v |= SK_GPIO_DIR9; + v &= ~SK_GPIO_DAT9; + sk_win_write_4(sc, SK_GPIO, v); + } + phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP | SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE; - switch(sc_if->sk_softc->sk_pmd) { - case IFM_1000_SX: - case IFM_1000_LX: + if (sc->sk_coppertype) + phy |= SK_GPHY_COPPER; + else phy |= SK_GPHY_FIBER; - break; - case IFM_1000_CX: - case IFM_1000_T: - phy |= SK_GPHY_COPPER; - break; - } - SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET); DELAY(1000); SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR); SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF | SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR); /* unused read of the interrupt source register */ SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); reg = SK_YU_READ_2(sc_if, YUKON_PAR); /* MIB Counter Clear Mode set */ reg |= YU_PAR_MIB_CLR; SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); /* MIB Counter Clear Mode clear */ reg &= ~YU_PAR_MIB_CLR; SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); /* receive control reg */ SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR); /* transmit parameter register */ SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) | YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) ); /* serial mode register */ reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e); - if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) + if (ifp->if_mtu > SK_MAX_FRAMELEN) reg |= YU_SMR_MFL_JUMBO; SK_YU_WRITE_2(sc_if, YUKON_SMR, reg); /* Setup Yukon's address */ for (i = 0; i < 3; i++) { /* Write Source Address 1 (unicast filter) */ SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4, IF_LLADDR(sc_if->sk_ifp)[i * 2] | IF_LLADDR(sc_if->sk_ifp)[i * 2 + 1] << 8); } for (i = 0; i < 3; i++) { reg = sk_win_read_2(sc_if->sk_softc, SK_MAC1_0 + i * 2 + sc_if->sk_port * 8); SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg); } /* Set promiscuous mode */ sk_setpromisc(sc_if); /* Set multicast filter */ sk_setmulti(sc_if); /* enable interrupt mask for counter overflows */ SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0); SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0); SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0); + /* Configure RX MAC FIFO Flush Mask */ + v = YU_RXSTAT_FOFL | YU_RXSTAT_CRCERR | YU_RXSTAT_MIIERR | + YU_RXSTAT_BADFC | YU_RXSTAT_GOODFC | YU_RXSTAT_RUNT | + YU_RXSTAT_JABBER; + SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_MASK, v); + + /* Disable RX MAC FIFO Flush for YUKON-Lite Rev. A0 only */ + if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A0) + v = SK_TFCTL_OPERATION_ON; + else + v = SK_TFCTL_OPERATION_ON | SK_RFCTL_FIFO_FLUSH_ON; /* Configure RX MAC FIFO */ SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR); - SK_IF_WRITE_4(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON); + SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_CTRL_TEST, v); + /* Increase flush threshould to 64 bytes */ + SK_IF_WRITE_2(sc_if, 0, SK_RXMF1_FLUSH_THRESHOLD, + SK_RFCTL_FIFO_THRESHOLD + 1); + /* Configure TX MAC FIFO */ SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR); - SK_IF_WRITE_4(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON); + SK_IF_WRITE_2(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON); } /* * Note that to properly initialize any part of the GEnesis chip, * you first have to take it out of reset mode. */ static void sk_init(xsc) void *xsc; { struct sk_if_softc *sc_if = xsc; SK_IF_LOCK(sc_if); sk_init_locked(sc_if); SK_IF_UNLOCK(sc_if); return; } static void sk_init_locked(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc; struct ifnet *ifp; struct mii_data *mii; u_int16_t reg; u_int32_t imr; + int error; SK_IF_LOCK_ASSERT(sc_if); ifp = sc_if->sk_ifp; sc = sc_if->sk_softc; mii = device_get_softc(sc_if->sk_miibus); if (ifp->if_drv_flags & IFF_DRV_RUNNING) return; /* Cancel pending I/O and free all RX/TX buffers. */ sk_stop(sc_if); if (sc->sk_type == SK_GENESIS) { /* Configure LINK_SYNC LED */ SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON); SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_ON); /* Configure RX LED */ SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_START); /* Configure TX LED */ SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_START); } + /* + * Configure descriptor poll timer + * + * SK-NET GENESIS data sheet says that possibility of losing Start + * transmit command due to CPU/cache related interim storage problems + * under certain conditions. The document recommends a polling + * mechanism to send a Start transmit command to initiate transfer + * of ready descriptors regulary. To cope with this issue sk(4) now + * enables descriptor poll timer to initiate descriptor processing + * periodically as defined by SK_DPT_TIMER_MAX. However sk(4) still + * issue SK_TXBMU_TX_START to Tx BMU to get fast execution of Tx + * command instead of waiting for next descriptor polling time. + * The same rule may apply to Rx side too but it seems that is not + * needed at the moment. + * Since sk(4) uses descriptor polling as a last resort there is no + * need to set smaller polling time than maximum allowable one. + */ + SK_IF_WRITE_4(sc_if, 0, SK_DPT_INIT, SK_DPT_TIMER_MAX); + /* Configure I2C registers */ /* Configure XMAC(s) */ switch (sc->sk_type) { case SK_GENESIS: sk_init_xmac(sc_if); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: + case SK_YUKON_EC: sk_init_yukon(sc_if); break; } mii_mediachg(mii); if (sc->sk_type == SK_GENESIS) { /* Configure MAC FIFOs */ SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET); SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END); SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON); SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET); SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END); SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON); } /* Configure transmit arbiter(s) */ SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON); /* Configure RAMbuffers */ SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON); /* Configure BMUs */ SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE); - SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, - vtophys(&sc_if->sk_rdata->sk_rx_ring[0])); - SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0); + if (ifp->if_mtu > SK_MAX_FRAMELEN) { + SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, + SK_ADDR_LO(SK_JUMBO_RX_RING_ADDR(sc_if, 0))); + SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, + SK_ADDR_HI(SK_JUMBO_RX_RING_ADDR(sc_if, 0))); + } else { + SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, + SK_ADDR_LO(SK_RX_RING_ADDR(sc_if, 0))); + SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, + SK_ADDR_HI(SK_RX_RING_ADDR(sc_if, 0))); + } SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE); SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO, - vtophys(&sc_if->sk_rdata->sk_tx_ring[0])); - SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0); + SK_ADDR_LO(SK_TX_RING_ADDR(sc_if, 0))); + SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, + SK_ADDR_HI(SK_TX_RING_ADDR(sc_if, 0))); /* Init descriptors */ - if (sk_init_rx_ring(sc_if) == ENOBUFS) { - printf("sk%d: initialization failed: no " - "memory for rx buffers\n", sc_if->sk_unit); + if (ifp->if_mtu > SK_MAX_FRAMELEN) + error = sk_init_jumbo_rx_ring(sc_if); + else + error = sk_init_rx_ring(sc_if); + if (error != 0) { + device_printf(sc_if->sk_if_dev, + "initialization failed: no memory for rx buffers\n"); sk_stop(sc_if); return; } sk_init_tx_ring(sc_if); /* Set interrupt moderation if changed via sysctl. */ - /* SK_LOCK(sc); */ imr = sk_win_read_4(sc, SK_IMTIMERINIT); if (imr != SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)) { sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod, sc->sk_int_ticks)); if (bootverbose) - printf("skc%d: interrupt moderation is %d us\n", - sc->sk_unit, sc->sk_int_mod); + device_printf(sc_if->sk_if_dev, + "interrupt moderation is %d us.\n", + sc->sk_int_mod); } - /* SK_UNLOCK(sc); */ /* Configure interrupt handling */ CSR_READ_4(sc, SK_ISSR); if (sc_if->sk_port == SK_PORT_A) sc->sk_intrmask |= SK_INTRS1; else sc->sk_intrmask |= SK_INTRS2; sc->sk_intrmask |= SK_ISR_EXTERNAL_REG; CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); /* Start BMUs. */ SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START); switch(sc->sk_type) { case SK_GENESIS: /* Enable XMACs TX and RX state machines */ SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE); SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: + case SK_YUKON_EC: reg = SK_YU_READ_2(sc_if, YUKON_GPCR); reg |= YU_GPCR_TXEN | YU_GPCR_RXEN; - reg &= ~(YU_GPCR_SPEED_EN | YU_GPCR_DPLX_EN); +#if 0 + /* XXX disable 100Mbps and full duplex mode? */ + reg &= ~(YU_GPCR_SPEED | YU_GPCR_DPLX_DIS); +#endif SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg); } + /* Activate descriptor polling timer */ + SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_START); + /* start transfer of Tx descriptors */ + CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); + ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; + switch (sc->sk_type) { + case SK_YUKON: + case SK_YUKON_LITE: + case SK_YUKON_LP: + case SK_YUKON_EC: + callout_reset(&sc_if->sk_tick_ch, hz, sk_yukon_tick, sc_if); + break; + } + return; } static void sk_stop(sc_if) struct sk_if_softc *sc_if; { int i; struct sk_softc *sc; + struct sk_txdesc *txd; + struct sk_rxdesc *rxd; + struct sk_rxdesc *jrxd; struct ifnet *ifp; + u_int32_t val; SK_IF_LOCK_ASSERT(sc_if); sc = sc_if->sk_softc; ifp = sc_if->sk_ifp; - untimeout(sk_tick, sc_if, sc_if->sk_tick_ch); + callout_stop(&sc_if->sk_tick_ch); - if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { - u_int32_t val; + /* stop Tx descriptor polling timer */ + SK_IF_WRITE_4(sc_if, 0, SK_DPT_TIMER_CTRL, SK_DPT_TCTL_STOP); + /* stop transfer of Tx descriptors */ + CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_STOP); + for (i = 0; i < SK_TIMEOUT; i++) { + val = CSR_READ_4(sc, sc_if->sk_tx_bmu); + if ((val & SK_TXBMU_TX_STOP) == 0) + break; + DELAY(1); + } + if (i == SK_TIMEOUT) + device_printf(sc_if->sk_if_dev, + "can not stop transfer of Tx descriptor\n"); + /* stop transfer of Rx descriptors */ + SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_STOP); + for (i = 0; i < SK_TIMEOUT; i++) { + val = SK_IF_READ_4(sc_if, 0, SK_RXQ1_BMU_CSR); + if ((val & SK_RXBMU_RX_STOP) == 0) + break; + DELAY(1); + } + if (i == SK_TIMEOUT) + device_printf(sc_if->sk_if_dev, + "can not stop transfer of Rx descriptor\n"); + if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { /* Put PHY back into reset. */ val = sk_win_read_4(sc, SK_GPIO); if (sc_if->sk_port == SK_PORT_A) { val |= SK_GPIO_DIR0; val &= ~SK_GPIO_DAT0; } else { val |= SK_GPIO_DIR2; val &= ~SK_GPIO_DAT2; } sk_win_write_4(sc, SK_GPIO, val); } /* Turn off various components of this interface. */ SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); switch (sc->sk_type) { case SK_GENESIS: SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET); SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: + case SK_YUKON_EC: SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET); SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET); break; } SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF); SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF); /* Disable interrupts */ if (sc_if->sk_port == SK_PORT_A) sc->sk_intrmask &= ~SK_INTRS1; else sc->sk_intrmask &= ~SK_INTRS2; CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); SK_XM_READ_2(sc_if, XM_ISR); SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); /* Free RX and TX mbufs still in the queues. */ for (i = 0; i < SK_RX_RING_CNT; i++) { - if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) { - m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf); - sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL; + rxd = &sc_if->sk_cdata.sk_rxdesc[i]; + if (rxd->rx_m != NULL) { + bus_dmamap_sync(sc_if->sk_cdata.sk_rx_tag, + rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(sc_if->sk_cdata.sk_rx_tag, + rxd->rx_dmamap); + m_freem(rxd->rx_m); + rxd->rx_m = NULL; } } - + for (i = 0; i < SK_JUMBO_RX_RING_CNT; i++) { + jrxd = &sc_if->sk_cdata.sk_jumbo_rxdesc[i]; + if (jrxd->rx_m != NULL) { + bus_dmamap_sync(sc_if->sk_cdata.sk_jumbo_rx_tag, + jrxd->rx_dmamap, BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(sc_if->sk_cdata.sk_jumbo_rx_tag, + jrxd->rx_dmamap); + m_freem(jrxd->rx_m); + jrxd->rx_m = NULL; + } + } for (i = 0; i < SK_TX_RING_CNT; i++) { - if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) { - m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf); - sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL; + txd = &sc_if->sk_cdata.sk_txdesc[i]; + if (txd->tx_m != NULL) { + bus_dmamap_sync(sc_if->sk_cdata.sk_tx_tag, + txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(sc_if->sk_cdata.sk_tx_tag, + txd->tx_dmamap); + m_freem(txd->tx_m); + txd->tx_m = NULL; } } ifp->if_drv_flags &= ~(IFF_DRV_RUNNING|IFF_DRV_OACTIVE); return; } static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) { int error, value; if (!arg1) return (EINVAL); value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if (error || !req->newptr) return (error); if (value < low || value > high) return (EINVAL); *(int *)arg1 = value; return (0); } static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX)); } Index: stable/6/sys/dev/sk/if_skreg.h =================================================================== --- stable/6/sys/dev/sk/if_skreg.h (revision 159562) +++ stable/6/sys/dev/sk/if_skreg.h (revision 159563) @@ -1,1507 +1,1586 @@ /* $OpenBSD: if_skreg.h,v 1.10 2003/08/12 05:23:06 nate Exp $ */ /*- * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /*- * Copyright (c) 2003 Nathan L. Binkert * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* Values to keep the different chip revisions apart (SK_CHIPVER). */ #define SK_GENESIS 0x0A #define SK_YUKON 0xB0 #define SK_YUKON_LITE 0xB1 #define SK_YUKON_LP 0xB2 +#define SK_YUKON_XL 0xB3 +#define SK_YUKON_EC_U 0xB4 +#define SK_YUKON_EC 0xB6 +#define SK_YUKON_FE 0xB7 #define SK_YUKON_FAMILY(x) ((x) & 0xB0) +#define SK_IS_YUKON2(sc) \ + ((sc)->sk_type >= SK_YUKON_XL && (sc)->sk_type <= SK_YUKON_FE) /* Known revisions in SK_CONFIG. */ #define SK_YUKON_LITE_REV_A0 0x0 /* invented, see test in skc_attach. */ #define SK_YUKON_LITE_REV_A1 0x3 #define SK_YUKON_LITE_REV_A3 0x7 +#define SK_YUKON_EC_REV_A1 0x0 +#define SK_YUKON_EC_REV_A2 0x1 +#define SK_YUKON_EC_REV_A3 0x2 + /* * SysKonnect PCI vendor ID */ #define VENDORID_SK 0x1148 /* * Marvell PCI vendor ID */ #define VENDORID_MARVELL 0x11AB /* * SK-NET gigabit ethernet device IDs */ #define DEVICEID_SK_V1 0x4300 #define DEVICEID_SK_V2 0x4320 /* + * Marvell gigabit ethernet device IDs + */ +#define DEVICEID_MRVL_4360 0x4360 +#define DEVICEID_MRVL_4361 0x4361 +#define DEVICEID_MRVL_4362 0x4362 + +/* * Belkin F5D5005 */ #define DEVICEID_BELKIN_5005 0x5005 /* * 3Com PCI vendor ID */ #define VENDORID_3COM 0x10b7 /* * 3Com gigabit ethernet device ID */ #define DEVICEID_3COM_3C940 0x1700 /* * Linksys PCI vendor ID */ #define VENDORID_LINKSYS 0x1737 /* * Linksys gigabit ethernet device ID */ #define DEVICEID_LINKSYS_EG1032 0x1032 /* * Linksys gigabit ethernet rev 2 sub-device ID */ #define SUBDEVICEID_LINKSYS_EG1032_REV2 0x0015 /* * D-Link PCI vendor ID */ #define VENDORID_DLINK 0x1186 /* * D-Link gigabit ethernet device ID */ #define DEVICEID_DLINK_DGE530T 0x4c00 /* * GEnesis registers. The GEnesis chip has a 256-byte I/O window * but internally it has a 16K register space. This 16K space is * divided into 128-byte blocks. The first 128 bytes of the I/O * window represent the first block, which is permanently mapped * at the start of the window. The other 127 blocks can be mapped * to the second 128 bytes of the I/O window by setting the desired * block value in the RAP register in block 0. Not all of the 127 * blocks are actually used. Most registers are 32 bits wide, but * there are a few 16-bit and 8-bit ones as well. */ /* Start of remappable register window. */ #define SK_WIN_BASE 0x0080 /* Size of a window */ #define SK_WIN_LEN 0x80 #define SK_WIN_MASK 0x3F80 #define SK_REG_MASK 0x7F /* Compute the window of a given register (for the RAP register) */ #define SK_WIN(reg) (((reg) & SK_WIN_MASK) / SK_WIN_LEN) /* Compute the relative offset of a register within the window */ #define SK_REG(reg) ((reg) & SK_REG_MASK) #define SK_PORT_A 0 #define SK_PORT_B 1 /* * Compute offset of port-specific register. Since there are two * ports, there are two of some GEnesis modules (e.g. two sets of * DMA queues, two sets of FIFO control registers, etc...). Normally, * the block for port 0 is at offset 0x0 and the block for port 1 is * at offset 0x80 (i.e. the next page over). However for the transmit * BMUs and RAMbuffers, there are two blocks for each port: one for * the sync transmit queue and one for the async queue (which we don't * use). However instead of ordering them like this: * TX sync 1 / TX sync 2 / TX async 1 / TX async 2 * SysKonnect has instead ordered them like this: * TX sync 1 / TX async 1 / TX sync 2 / TX async 2 * This means that when referencing the TX BMU and RAMbuffer registers, * we have to double the block offset (0x80 * 2) in order to reach the * second queue. This prevents us from using the same formula * (sk_port * 0x80) to compute the offsets for all of the port-specific * blocks: we need an extra offset for the BMU and RAMbuffer registers. * The simplest thing is to provide an extra argument to these macros: * the 'skip' parameter. The 'skip' value is the number of extra pages * for skip when computing the port0/port1 offsets. For most registers, * the skip value is 0; for the BMU and RAMbuffer registers, it's 1. */ #define SK_IF_READ_4(sc_if, skip, reg) \ sk_win_read_4(sc_if->sk_softc, reg + \ ((sc_if->sk_port * (skip + 1)) * SK_WIN_LEN)) #define SK_IF_READ_2(sc_if, skip, reg) \ sk_win_read_2(sc_if->sk_softc, reg + \ ((sc_if->sk_port * (skip + 1)) * SK_WIN_LEN)) #define SK_IF_READ_1(sc_if, skip, reg) \ sk_win_read_1(sc_if->sk_softc, reg + \ ((sc_if->sk_port * (skip + 1)) * SK_WIN_LEN)) #define SK_IF_WRITE_4(sc_if, skip, reg, val) \ sk_win_write_4(sc_if->sk_softc, \ reg + ((sc_if->sk_port * (skip + 1)) * SK_WIN_LEN), val) #define SK_IF_WRITE_2(sc_if, skip, reg, val) \ sk_win_write_2(sc_if->sk_softc, \ reg + ((sc_if->sk_port * (skip + 1)) * SK_WIN_LEN), val) #define SK_IF_WRITE_1(sc_if, skip, reg, val) \ sk_win_write_1(sc_if->sk_softc, \ reg + ((sc_if->sk_port * (skip + 1)) * SK_WIN_LEN), val) /* Block 0 registers, permanently mapped at iobase. */ #define SK_RAP 0x0000 #define SK_CSR 0x0004 #define SK_LED 0x0006 #define SK_ISR 0x0008 /* interrupt source */ #define SK_IMR 0x000C /* interrupt mask */ #define SK_IESR 0x0010 /* interrupt hardware error source */ #define SK_IEMR 0x0014 /* interrupt hardware error mask */ #define SK_ISSR 0x0018 /* special interrupt source */ #define SK_XM_IMR0 0x0020 #define SK_XM_ISR0 0x0028 #define SK_XM_PHYADDR0 0x0030 #define SK_XM_PHYDATA0 0x0034 #define SK_XM_IMR1 0x0040 #define SK_XM_ISR1 0x0048 #define SK_XM_PHYADDR1 0x0050 #define SK_XM_PHYDATA1 0x0054 #define SK_BMU_RX_CSR0 0x0060 #define SK_BMU_RX_CSR1 0x0064 #define SK_BMU_TXS_CSR0 0x0068 #define SK_BMU_TXA_CSR0 0x006C #define SK_BMU_TXS_CSR1 0x0070 #define SK_BMU_TXA_CSR1 0x0074 /* SK_CSR register */ #define SK_CSR_SW_RESET 0x0001 #define SK_CSR_SW_UNRESET 0x0002 #define SK_CSR_MASTER_RESET 0x0004 #define SK_CSR_MASTER_UNRESET 0x0008 #define SK_CSR_MASTER_STOP 0x0010 #define SK_CSR_MASTER_DONE 0x0020 #define SK_CSR_SW_IRQ_CLEAR 0x0040 #define SK_CSR_SW_IRQ_SET 0x0080 #define SK_CSR_SLOTSIZE 0x0100 /* 1 == 64 bits, 0 == 32 */ #define SK_CSR_BUSCLOCK 0x0200 /* 1 == 33/66 Mhz, = 33 */ /* SK_LED register */ #define SK_LED_GREEN_OFF 0x01 #define SK_LED_GREEN_ON 0x02 /* SK_ISR register */ #define SK_ISR_TX2_AS_CHECK 0x00000001 #define SK_ISR_TX2_AS_EOF 0x00000002 #define SK_ISR_TX2_AS_EOB 0x00000004 #define SK_ISR_TX2_S_CHECK 0x00000008 #define SK_ISR_TX2_S_EOF 0x00000010 #define SK_ISR_TX2_S_EOB 0x00000020 #define SK_ISR_TX1_AS_CHECK 0x00000040 #define SK_ISR_TX1_AS_EOF 0x00000080 #define SK_ISR_TX1_AS_EOB 0x00000100 #define SK_ISR_TX1_S_CHECK 0x00000200 #define SK_ISR_TX1_S_EOF 0x00000400 #define SK_ISR_TX1_S_EOB 0x00000800 #define SK_ISR_RX2_CHECK 0x00001000 #define SK_ISR_RX2_EOF 0x00002000 #define SK_ISR_RX2_EOB 0x00004000 #define SK_ISR_RX1_CHECK 0x00008000 #define SK_ISR_RX1_EOF 0x00010000 #define SK_ISR_RX1_EOB 0x00020000 #define SK_ISR_LINK2_OFLOW 0x00040000 #define SK_ISR_MAC2 0x00080000 #define SK_ISR_LINK1_OFLOW 0x00100000 #define SK_ISR_MAC1 0x00200000 #define SK_ISR_TIMER 0x00400000 #define SK_ISR_EXTERNAL_REG 0x00800000 #define SK_ISR_SW 0x01000000 #define SK_ISR_I2C_RDY 0x02000000 #define SK_ISR_TX2_TIMEO 0x04000000 #define SK_ISR_TX1_TIMEO 0x08000000 #define SK_ISR_RX2_TIMEO 0x10000000 #define SK_ISR_RX1_TIMEO 0x20000000 #define SK_ISR_RSVD 0x40000000 #define SK_ISR_HWERR 0x80000000 /* SK_IMR register */ #define SK_IMR_TX2_AS_CHECK 0x00000001 #define SK_IMR_TX2_AS_EOF 0x00000002 #define SK_IMR_TX2_AS_EOB 0x00000004 #define SK_IMR_TX2_S_CHECK 0x00000008 #define SK_IMR_TX2_S_EOF 0x00000010 #define SK_IMR_TX2_S_EOB 0x00000020 #define SK_IMR_TX1_AS_CHECK 0x00000040 #define SK_IMR_TX1_AS_EOF 0x00000080 #define SK_IMR_TX1_AS_EOB 0x00000100 #define SK_IMR_TX1_S_CHECK 0x00000200 #define SK_IMR_TX1_S_EOF 0x00000400 #define SK_IMR_TX1_S_EOB 0x00000800 #define SK_IMR_RX2_CHECK 0x00001000 #define SK_IMR_RX2_EOF 0x00002000 #define SK_IMR_RX2_EOB 0x00004000 #define SK_IMR_RX1_CHECK 0x00008000 #define SK_IMR_RX1_EOF 0x00010000 #define SK_IMR_RX1_EOB 0x00020000 #define SK_IMR_LINK2_OFLOW 0x00040000 #define SK_IMR_MAC2 0x00080000 #define SK_IMR_LINK1_OFLOW 0x00100000 #define SK_IMR_MAC1 0x00200000 #define SK_IMR_TIMER 0x00400000 #define SK_IMR_EXTERNAL_REG 0x00800000 #define SK_IMR_SW 0x01000000 #define SK_IMR_I2C_RDY 0x02000000 #define SK_IMR_TX2_TIMEO 0x04000000 #define SK_IMR_TX1_TIMEO 0x08000000 #define SK_IMR_RX2_TIMEO 0x10000000 #define SK_IMR_RX1_TIMEO 0x20000000 #define SK_IMR_RSVD 0x40000000 #define SK_IMR_HWERR 0x80000000 #define SK_INTRS1 \ (SK_IMR_RX1_EOF|SK_IMR_TX1_S_EOF|SK_IMR_MAC1) #define SK_INTRS2 \ (SK_IMR_RX2_EOF|SK_IMR_TX2_S_EOF|SK_IMR_MAC2) /* SK_IESR register */ #define SK_IESR_PAR_RX2 0x00000001 #define SK_IESR_PAR_RX1 0x00000002 #define SK_IESR_PAR_MAC2 0x00000004 #define SK_IESR_PAR_MAC1 0x00000008 #define SK_IESR_PAR_WR_RAM 0x00000010 #define SK_IESR_PAR_RD_RAM 0x00000020 #define SK_IESR_NO_TSTAMP_MAC2 0x00000040 #define SK_IESR_NO_TSTAMO_MAC1 0x00000080 #define SK_IESR_NO_STS_MAC2 0x00000100 #define SK_IESR_NO_STS_MAC1 0x00000200 #define SK_IESR_IRQ_STS 0x00000400 #define SK_IESR_MASTERERR 0x00000800 /* SK_IEMR register */ #define SK_IEMR_PAR_RX2 0x00000001 #define SK_IEMR_PAR_RX1 0x00000002 #define SK_IEMR_PAR_MAC2 0x00000004 #define SK_IEMR_PAR_MAC1 0x00000008 #define SK_IEMR_PAR_WR_RAM 0x00000010 #define SK_IEMR_PAR_RD_RAM 0x00000020 #define SK_IEMR_NO_TSTAMP_MAC2 0x00000040 #define SK_IEMR_NO_TSTAMO_MAC1 0x00000080 #define SK_IEMR_NO_STS_MAC2 0x00000100 #define SK_IEMR_NO_STS_MAC1 0x00000200 #define SK_IEMR_IRQ_STS 0x00000400 #define SK_IEMR_MASTERERR 0x00000800 /* Block 2 */ #define SK_MAC0_0 0x0100 #define SK_MAC0_1 0x0104 #define SK_MAC1_0 0x0108 #define SK_MAC1_1 0x010C #define SK_MAC2_0 0x0110 #define SK_MAC2_1 0x0114 #define SK_CONNTYPE 0x0118 #define SK_PMDTYPE 0x0119 #define SK_CONFIG 0x011A #define SK_CHIPVER 0x011B #define SK_EPROM0 0x011C -#define SK_EPROM1 0x011D -#define SK_EPROM2 0x011E +#define SK_EPROM1 0x011D /* yukon/genesis */ +#define SK_Y2_CLKGATE 0x011D /* yukon 2 */ +#define SK_EPROM2 0x011E /* yukon/genesis */ +#define SK_Y2_HWRES 0x011E /* yukon 2 */ #define SK_EPROM3 0x011F #define SK_EP_ADDR 0x0120 #define SK_EP_DATA 0x0124 #define SK_EP_LOADCTL 0x0128 #define SK_EP_LOADTST 0x0129 #define SK_TIMERINIT 0x0130 #define SK_TIMER 0x0134 #define SK_TIMERCTL 0x0138 #define SK_TIMERTST 0x0139 #define SK_IMTIMERINIT 0x0140 #define SK_IMTIMER 0x0144 #define SK_IMTIMERCTL 0x0148 #define SK_IMTIMERTST 0x0149 #define SK_IMMR 0x014C #define SK_IHWEMR 0x0150 #define SK_TESTCTL1 0x0158 #define SK_TESTCTL2 0x0159 #define SK_GPIO 0x015C #define SK_I2CHWCTL 0x0160 #define SK_I2CHWDATA 0x0164 #define SK_I2CHWIRQ 0x0168 #define SK_I2CSW 0x016C #define SK_BLNKINIT 0x0170 #define SK_BLNKCOUNT 0x0174 #define SK_BLNKCTL 0x0178 #define SK_BLNKSTS 0x0179 #define SK_BLNKTST 0x017A #define SK_IMCTL_STOP 0x02 #define SK_IMCTL_START 0x04 #define SK_IMTIMER_TICKS_GENESIS 53 #define SK_IMTIMER_TICKS_YUKON 78 #define SK_IMTIMER_TICKS_YUKON_EC 125 #define SK_IM_USECS(x, t) ((x) * (t)) #define SK_IM_MIN 10 #define SK_IM_DEFAULT 100 #define SK_IM_MAX 10000 /* * The SK_EPROM0 register contains a byte that describes the * amount of SRAM mounted on the NIC. The value also tells if * the chips are 64K or 128K. This affects the RAMbuffer address * offset that we need to use. */ #define SK_RAMSIZE_512K_64 0x1 #define SK_RAMSIZE_1024K_128 0x2 #define SK_RAMSIZE_1024K_64 0x3 #define SK_RAMSIZE_2048K_128 0x4 #define SK_RBOFF_0 0x0 #define SK_RBOFF_80000 0x80000 /* * SK_EEPROM1 contains the PHY type, which may be XMAC for * fiber-based cards or BCOM for 1000baseT cards with a Broadcom * PHY. */ #define SK_PHYTYPE_XMAC 0 /* integeated XMAC II PHY */ #define SK_PHYTYPE_BCOM 1 /* Broadcom BCM5400 */ #define SK_PHYTYPE_LONE 2 /* Level One LXT1000 */ #define SK_PHYTYPE_NAT 3 /* National DP83891 */ #define SK_PHYTYPE_MARV_COPPER 4 /* Marvell 88E1011S */ #define SK_PHYTYPE_MARV_FIBER 5 /* Marvell 88E1011S (fiber) */ /* * PHY addresses. */ #define SK_PHYADDR_XMAC 0x0 #define SK_PHYADDR_BCOM 0x1 #define SK_PHYADDR_LONE 0x3 #define SK_PHYADDR_NAT 0x0 #define SK_PHYADDR_MARV 0x0 #define SK_CONFIG_SINGLEMAC 0x01 #define SK_CONFIG_DIS_DSL_CLK 0x02 #define SK_PMD_1000BASELX 0x4C #define SK_PMD_1000BASESX 0x53 #define SK_PMD_1000BASECX 0x43 #define SK_PMD_1000BASETX 0x54 /* GPIO bits */ #define SK_GPIO_DAT0 0x00000001 #define SK_GPIO_DAT1 0x00000002 #define SK_GPIO_DAT2 0x00000004 #define SK_GPIO_DAT3 0x00000008 #define SK_GPIO_DAT4 0x00000010 #define SK_GPIO_DAT5 0x00000020 #define SK_GPIO_DAT6 0x00000040 #define SK_GPIO_DAT7 0x00000080 #define SK_GPIO_DAT8 0x00000100 #define SK_GPIO_DAT9 0x00000200 #define SK_GPIO_DIR0 0x00010000 #define SK_GPIO_DIR1 0x00020000 #define SK_GPIO_DIR2 0x00040000 #define SK_GPIO_DIR3 0x00080000 #define SK_GPIO_DIR4 0x00100000 #define SK_GPIO_DIR5 0x00200000 #define SK_GPIO_DIR6 0x00400000 #define SK_GPIO_DIR7 0x00800000 #define SK_GPIO_DIR8 0x01000000 #define SK_GPIO_DIR9 0x02000000 +#define SK_Y2_CLKGATE_LINK2_INACTIVE 0x80 /* port 2 inactive */ + +#define SK_Y2_HWRES_LINK_1 0x01 +#define SK_Y2_HWRES_LINK_2 0x02 +#define SK_Y2_HWRES_LINK_MASK (SK_Y2_HWRES_LINK_1 | SK_Y2_HWRES_LINK_2) +#define SK_Y2_HWRES_LINK_DUAL (SK_Y2_HWRES_LINK_1 | SK_Y2_HWRES_LINK_2) + /* Block 3 Ram interface and MAC arbiter registers */ #define SK_RAMADDR 0x0180 #define SK_RAMDATA0 0x0184 #define SK_RAMDATA1 0x0188 #define SK_TO0 0x0190 #define SK_TO1 0x0191 #define SK_TO2 0x0192 #define SK_TO3 0x0193 #define SK_TO4 0x0194 #define SK_TO5 0x0195 #define SK_TO6 0x0196 #define SK_TO7 0x0197 #define SK_TO8 0x0198 #define SK_TO9 0x0199 #define SK_TO10 0x019A #define SK_TO11 0x019B #define SK_RITIMEO_TMR 0x019C #define SK_RAMCTL 0x01A0 #define SK_RITIMER_TST 0x01A2 #define SK_RAMCTL_RESET 0x0001 #define SK_RAMCTL_UNRESET 0x0002 #define SK_RAMCTL_CLR_IRQ_WPAR 0x0100 #define SK_RAMCTL_CLR_IRQ_RPAR 0x0200 /* Mac arbiter registers */ #define SK_MINIT_RX1 0x01B0 #define SK_MINIT_RX2 0x01B1 #define SK_MINIT_TX1 0x01B2 #define SK_MINIT_TX2 0x01B3 #define SK_MTIMEO_RX1 0x01B4 #define SK_MTIMEO_RX2 0x01B5 #define SK_MTIMEO_TX1 0x01B6 #define SK_MTIEMO_TX2 0x01B7 #define SK_MACARB_CTL 0x01B8 #define SK_MTIMER_TST 0x01BA #define SK_RCINIT_RX1 0x01C0 #define SK_RCINIT_RX2 0x01C1 #define SK_RCINIT_TX1 0x01C2 #define SK_RCINIT_TX2 0x01C3 #define SK_RCTIMEO_RX1 0x01C4 #define SK_RCTIMEO_RX2 0x01C5 #define SK_RCTIMEO_TX1 0x01C6 #define SK_RCTIMEO_TX2 0x01C7 #define SK_RECOVERY_CTL 0x01C8 #define SK_RCTIMER_TST 0x01CA /* Packet arbiter registers */ #define SK_RXPA1_TINIT 0x01D0 #define SK_RXPA2_TINIT 0x01D4 #define SK_TXPA1_TINIT 0x01D8 #define SK_TXPA2_TINIT 0x01DC #define SK_RXPA1_TIMEO 0x01E0 #define SK_RXPA2_TIMEO 0x01E4 #define SK_TXPA1_TIMEO 0x01E8 #define SK_TXPA2_TIMEO 0x01EC #define SK_PKTARB_CTL 0x01F0 #define SK_PKTATB_TST 0x01F2 #define SK_PKTARB_TIMEOUT 0x2000 #define SK_PKTARBCTL_RESET 0x0001 #define SK_PKTARBCTL_UNRESET 0x0002 #define SK_PKTARBCTL_RXTO1_OFF 0x0004 #define SK_PKTARBCTL_RXTO1_ON 0x0008 #define SK_PKTARBCTL_RXTO2_OFF 0x0010 #define SK_PKTARBCTL_RXTO2_ON 0x0020 #define SK_PKTARBCTL_TXTO1_OFF 0x0040 #define SK_PKTARBCTL_TXTO1_ON 0x0080 #define SK_PKTARBCTL_TXTO2_OFF 0x0100 #define SK_PKTARBCTL_TXTO2_ON 0x0200 #define SK_PKTARBCTL_CLR_IRQ_RXTO1 0x0400 #define SK_PKTARBCTL_CLR_IRQ_RXTO2 0x0800 #define SK_PKTARBCTL_CLR_IRQ_TXTO1 0x1000 #define SK_PKTARBCTL_CLR_IRQ_TXTO2 0x2000 #define SK_MINIT_XMAC_B2 54 #define SK_MINIT_XMAC_C1 63 #define SK_MACARBCTL_RESET 0x0001 #define SK_MACARBCTL_UNRESET 0x0002 #define SK_MACARBCTL_FASTOE_OFF 0x0004 #define SK_MACARBCRL_FASTOE_ON 0x0008 #define SK_RCINIT_XMAC_B2 54 #define SK_RCINIT_XMAC_C1 0 #define SK_RECOVERYCTL_RX1_OFF 0x0001 #define SK_RECOVERYCTL_RX1_ON 0x0002 #define SK_RECOVERYCTL_RX2_OFF 0x0004 #define SK_RECOVERYCTL_RX2_ON 0x0008 #define SK_RECOVERYCTL_TX1_OFF 0x0010 #define SK_RECOVERYCTL_TX1_ON 0x0020 #define SK_RECOVERYCTL_TX2_OFF 0x0040 #define SK_RECOVERYCTL_TX2_ON 0x0080 #define SK_RECOVERY_XMAC_B2 \ (SK_RECOVERYCTL_RX1_ON|SK_RECOVERYCTL_RX2_ON| \ SK_RECOVERYCTL_TX1_ON|SK_RECOVERYCTL_TX2_ON) #define SK_RECOVERY_XMAC_C1 \ (SK_RECOVERYCTL_RX1_OFF|SK_RECOVERYCTL_RX2_OFF| \ SK_RECOVERYCTL_TX1_OFF|SK_RECOVERYCTL_TX2_OFF) /* Block 4 -- TX Arbiter MAC 1 */ #define SK_TXAR1_TIMERINIT 0x0200 #define SK_TXAR1_TIMERVAL 0x0204 #define SK_TXAR1_LIMITINIT 0x0208 #define SK_TXAR1_LIMITCNT 0x020C #define SK_TXAR1_COUNTERCTL 0x0210 #define SK_TXAR1_COUNTERTST 0x0212 #define SK_TXAR1_COUNTERSTS 0x0212 /* Block 5 -- TX Arbiter MAC 2 */ #define SK_TXAR2_TIMERINIT 0x0280 #define SK_TXAR2_TIMERVAL 0x0284 #define SK_TXAR2_LIMITINIT 0x0288 #define SK_TXAR2_LIMITCNT 0x028C #define SK_TXAR2_COUNTERCTL 0x0290 #define SK_TXAR2_COUNTERTST 0x0291 #define SK_TXAR2_COUNTERSTS 0x0292 #define SK_TXARCTL_OFF 0x01 #define SK_TXARCTL_ON 0x02 #define SK_TXARCTL_RATECTL_OFF 0x04 #define SK_TXARCTL_RATECTL_ON 0x08 #define SK_TXARCTL_ALLOC_OFF 0x10 #define SK_TXARCTL_ALLOC_ON 0x20 #define SK_TXARCTL_FSYNC_OFF 0x40 #define SK_TXARCTL_FSYNC_ON 0x80 /* Block 6 -- External registers */ #define SK_EXTREG_BASE 0x300 #define SK_EXTREG_END 0x37C /* Block 7 -- PCI config registers */ #define SK_PCI_BASE 0x0380 #define SK_PCI_END 0x03FC /* Compute offset of mirrored PCI register */ #define SK_PCI_REG(reg) ((reg) + SK_PCI_BASE) /* Block 8 -- RX queue 1 */ #define SK_RXQ1_BUFCNT 0x0400 #define SK_RXQ1_BUFCTL 0x0402 #define SK_RXQ1_NEXTDESC 0x0404 #define SK_RXQ1_RXBUF_LO 0x0408 #define SK_RXQ1_RXBUF_HI 0x040C #define SK_RXQ1_RXSTAT 0x0410 #define SK_RXQ1_TIMESTAMP 0x0414 #define SK_RXQ1_CSUM1 0x0418 #define SK_RXQ1_CSUM2 0x041A #define SK_RXQ1_CSUM1_START 0x041C #define SK_RXQ1_CSUM2_START 0x041E #define SK_RXQ1_CURADDR_LO 0x0420 #define SK_RXQ1_CURADDR_HI 0x0424 #define SK_RXQ1_CURCNT_LO 0x0428 #define SK_RXQ1_CURCNT_HI 0x042C #define SK_RXQ1_CURBYTES 0x0430 #define SK_RXQ1_BMU_CSR 0x0434 #define SK_RXQ1_WATERMARK 0x0438 #define SK_RXQ1_FLAG 0x043A #define SK_RXQ1_TEST1 0x043C #define SK_RXQ1_TEST2 0x0440 #define SK_RXQ1_TEST3 0x0444 /* Block 9 -- RX queue 2 */ #define SK_RXQ2_BUFCNT 0x0480 #define SK_RXQ2_BUFCTL 0x0482 #define SK_RXQ2_NEXTDESC 0x0484 #define SK_RXQ2_RXBUF_LO 0x0488 #define SK_RXQ2_RXBUF_HI 0x048C #define SK_RXQ2_RXSTAT 0x0490 #define SK_RXQ2_TIMESTAMP 0x0494 #define SK_RXQ2_CSUM1 0x0498 #define SK_RXQ2_CSUM2 0x049A #define SK_RXQ2_CSUM1_START 0x049C #define SK_RXQ2_CSUM2_START 0x049E #define SK_RXQ2_CURADDR_LO 0x04A0 #define SK_RXQ2_CURADDR_HI 0x04A4 #define SK_RXQ2_CURCNT_LO 0x04A8 #define SK_RXQ2_CURCNT_HI 0x04AC #define SK_RXQ2_CURBYTES 0x04B0 #define SK_RXQ2_BMU_CSR 0x04B4 #define SK_RXQ2_WATERMARK 0x04B8 #define SK_RXQ2_FLAG 0x04BA #define SK_RXQ2_TEST1 0x04BC #define SK_RXQ2_TEST2 0x04C0 #define SK_RXQ2_TEST3 0x04C4 #define SK_RXBMU_CLR_IRQ_ERR 0x00000001 #define SK_RXBMU_CLR_IRQ_EOF 0x00000002 #define SK_RXBMU_CLR_IRQ_EOB 0x00000004 #define SK_RXBMU_CLR_IRQ_PAR 0x00000008 #define SK_RXBMU_RX_START 0x00000010 #define SK_RXBMU_RX_STOP 0x00000020 #define SK_RXBMU_POLL_OFF 0x00000040 #define SK_RXBMU_POLL_ON 0x00000080 #define SK_RXBMU_TRANSFER_SM_RESET 0x00000100 #define SK_RXBMU_TRANSFER_SM_UNRESET 0x00000200 #define SK_RXBMU_DESCWR_SM_RESET 0x00000400 #define SK_RXBMU_DESCWR_SM_UNRESET 0x00000800 #define SK_RXBMU_DESCRD_SM_RESET 0x00001000 #define SK_RXBMU_DESCRD_SM_UNRESET 0x00002000 #define SK_RXBMU_SUPERVISOR_SM_RESET 0x00004000 #define SK_RXBMU_SUPERVISOR_SM_UNRESET 0x00008000 #define SK_RXBMU_PFI_SM_RESET 0x00010000 #define SK_RXBMU_PFI_SM_UNRESET 0x00020000 #define SK_RXBMU_FIFO_RESET 0x00040000 #define SK_RXBMU_FIFO_UNRESET 0x00080000 #define SK_RXBMU_DESC_RESET 0x00100000 #define SK_RXBMU_DESC_UNRESET 0x00200000 #define SK_RXBMU_SUPERVISOR_IDLE 0x01000000 #define SK_RXBMU_ONLINE \ (SK_RXBMU_TRANSFER_SM_UNRESET|SK_RXBMU_DESCWR_SM_UNRESET| \ SK_RXBMU_DESCRD_SM_UNRESET|SK_RXBMU_SUPERVISOR_SM_UNRESET| \ SK_RXBMU_PFI_SM_UNRESET|SK_RXBMU_FIFO_UNRESET| \ SK_RXBMU_DESC_UNRESET) #define SK_RXBMU_OFFLINE \ (SK_RXBMU_TRANSFER_SM_RESET|SK_RXBMU_DESCWR_SM_RESET| \ SK_RXBMU_DESCRD_SM_RESET|SK_RXBMU_SUPERVISOR_SM_RESET| \ SK_RXBMU_PFI_SM_RESET|SK_RXBMU_FIFO_RESET| \ SK_RXBMU_DESC_RESET) /* Block 12 -- TX sync queue 1 */ #define SK_TXQS1_BUFCNT 0x0600 #define SK_TXQS1_BUFCTL 0x0602 #define SK_TXQS1_NEXTDESC 0x0604 #define SK_TXQS1_RXBUF_LO 0x0608 #define SK_TXQS1_RXBUF_HI 0x060C #define SK_TXQS1_RXSTAT 0x0610 #define SK_TXQS1_CSUM_STARTVAL 0x0614 #define SK_TXQS1_CSUM_STARTPOS 0x0618 #define SK_TXQS1_CSUM_WRITEPOS 0x061A #define SK_TXQS1_CURADDR_LO 0x0620 #define SK_TXQS1_CURADDR_HI 0x0624 #define SK_TXQS1_CURCNT_LO 0x0628 #define SK_TXQS1_CURCNT_HI 0x062C #define SK_TXQS1_CURBYTES 0x0630 #define SK_TXQS1_BMU_CSR 0x0634 #define SK_TXQS1_WATERMARK 0x0638 #define SK_TXQS1_FLAG 0x063A #define SK_TXQS1_TEST1 0x063C #define SK_TXQS1_TEST2 0x0640 #define SK_TXQS1_TEST3 0x0644 /* Block 13 -- TX async queue 1 */ #define SK_TXQA1_BUFCNT 0x0680 #define SK_TXQA1_BUFCTL 0x0682 #define SK_TXQA1_NEXTDESC 0x0684 #define SK_TXQA1_RXBUF_LO 0x0688 #define SK_TXQA1_RXBUF_HI 0x068C #define SK_TXQA1_RXSTAT 0x0690 #define SK_TXQA1_CSUM_STARTVAL 0x0694 #define SK_TXQA1_CSUM_STARTPOS 0x0698 #define SK_TXQA1_CSUM_WRITEPOS 0x069A #define SK_TXQA1_CURADDR_LO 0x06A0 #define SK_TXQA1_CURADDR_HI 0x06A4 #define SK_TXQA1_CURCNT_LO 0x06A8 #define SK_TXQA1_CURCNT_HI 0x06AC #define SK_TXQA1_CURBYTES 0x06B0 #define SK_TXQA1_BMU_CSR 0x06B4 #define SK_TXQA1_WATERMARK 0x06B8 #define SK_TXQA1_FLAG 0x06BA #define SK_TXQA1_TEST1 0x06BC #define SK_TXQA1_TEST2 0x06C0 #define SK_TXQA1_TEST3 0x06C4 /* Block 14 -- TX sync queue 2 */ #define SK_TXQS2_BUFCNT 0x0700 #define SK_TXQS2_BUFCTL 0x0702 #define SK_TXQS2_NEXTDESC 0x0704 #define SK_TXQS2_RXBUF_LO 0x0708 #define SK_TXQS2_RXBUF_HI 0x070C #define SK_TXQS2_RXSTAT 0x0710 #define SK_TXQS2_CSUM_STARTVAL 0x0714 #define SK_TXQS2_CSUM_STARTPOS 0x0718 #define SK_TXQS2_CSUM_WRITEPOS 0x071A #define SK_TXQS2_CURADDR_LO 0x0720 #define SK_TXQS2_CURADDR_HI 0x0724 #define SK_TXQS2_CURCNT_LO 0x0728 #define SK_TXQS2_CURCNT_HI 0x072C #define SK_TXQS2_CURBYTES 0x0730 #define SK_TXQS2_BMU_CSR 0x0734 #define SK_TXQS2_WATERMARK 0x0738 #define SK_TXQS2_FLAG 0x073A #define SK_TXQS2_TEST1 0x073C #define SK_TXQS2_TEST2 0x0740 #define SK_TXQS2_TEST3 0x0744 /* Block 15 -- TX async queue 2 */ #define SK_TXQA2_BUFCNT 0x0780 #define SK_TXQA2_BUFCTL 0x0782 #define SK_TXQA2_NEXTDESC 0x0784 #define SK_TXQA2_RXBUF_LO 0x0788 #define SK_TXQA2_RXBUF_HI 0x078C #define SK_TXQA2_RXSTAT 0x0790 #define SK_TXQA2_CSUM_STARTVAL 0x0794 #define SK_TXQA2_CSUM_STARTPOS 0x0798 #define SK_TXQA2_CSUM_WRITEPOS 0x079A #define SK_TXQA2_CURADDR_LO 0x07A0 #define SK_TXQA2_CURADDR_HI 0x07A4 #define SK_TXQA2_CURCNT_LO 0x07A8 #define SK_TXQA2_CURCNT_HI 0x07AC #define SK_TXQA2_CURBYTES 0x07B0 #define SK_TXQA2_BMU_CSR 0x07B4 #define SK_TXQA2_WATERMARK 0x07B8 #define SK_TXQA2_FLAG 0x07BA #define SK_TXQA2_TEST1 0x07BC #define SK_TXQA2_TEST2 0x07C0 #define SK_TXQA2_TEST3 0x07C4 #define SK_TXBMU_CLR_IRQ_ERR 0x00000001 #define SK_TXBMU_CLR_IRQ_EOF 0x00000002 #define SK_TXBMU_CLR_IRQ_EOB 0x00000004 #define SK_TXBMU_TX_START 0x00000010 #define SK_TXBMU_TX_STOP 0x00000020 #define SK_TXBMU_POLL_OFF 0x00000040 #define SK_TXBMU_POLL_ON 0x00000080 #define SK_TXBMU_TRANSFER_SM_RESET 0x00000100 #define SK_TXBMU_TRANSFER_SM_UNRESET 0x00000200 #define SK_TXBMU_DESCWR_SM_RESET 0x00000400 #define SK_TXBMU_DESCWR_SM_UNRESET 0x00000800 #define SK_TXBMU_DESCRD_SM_RESET 0x00001000 #define SK_TXBMU_DESCRD_SM_UNRESET 0x00002000 #define SK_TXBMU_SUPERVISOR_SM_RESET 0x00004000 #define SK_TXBMU_SUPERVISOR_SM_UNRESET 0x00008000 #define SK_TXBMU_PFI_SM_RESET 0x00010000 #define SK_TXBMU_PFI_SM_UNRESET 0x00020000 #define SK_TXBMU_FIFO_RESET 0x00040000 #define SK_TXBMU_FIFO_UNRESET 0x00080000 #define SK_TXBMU_DESC_RESET 0x00100000 #define SK_TXBMU_DESC_UNRESET 0x00200000 #define SK_TXBMU_SUPERVISOR_IDLE 0x01000000 #define SK_TXBMU_ONLINE \ (SK_TXBMU_TRANSFER_SM_UNRESET|SK_TXBMU_DESCWR_SM_UNRESET| \ SK_TXBMU_DESCRD_SM_UNRESET|SK_TXBMU_SUPERVISOR_SM_UNRESET| \ SK_TXBMU_PFI_SM_UNRESET|SK_TXBMU_FIFO_UNRESET| \ - SK_TXBMU_DESC_UNRESET) + SK_TXBMU_DESC_UNRESET|SK_TXBMU_POLL_ON) #define SK_TXBMU_OFFLINE \ (SK_TXBMU_TRANSFER_SM_RESET|SK_TXBMU_DESCWR_SM_RESET| \ SK_TXBMU_DESCRD_SM_RESET|SK_TXBMU_SUPERVISOR_SM_RESET| \ SK_TXBMU_PFI_SM_RESET|SK_TXBMU_FIFO_RESET| \ - SK_TXBMU_DESC_RESET) + SK_TXBMU_DESC_RESET|SK_TXBMU_POLL_OFF) /* Block 16 -- Receive RAMbuffer 1 */ #define SK_RXRB1_START 0x0800 #define SK_RXRB1_END 0x0804 #define SK_RXRB1_WR_PTR 0x0808 #define SK_RXRB1_RD_PTR 0x080C #define SK_RXRB1_UTHR_PAUSE 0x0810 #define SK_RXRB1_LTHR_PAUSE 0x0814 #define SK_RXRB1_UTHR_HIPRIO 0x0818 #define SK_RXRB1_UTHR_LOPRIO 0x081C #define SK_RXRB1_PKTCNT 0x0820 #define SK_RXRB1_LVL 0x0824 #define SK_RXRB1_CTLTST 0x0828 /* Block 17 -- Receive RAMbuffer 2 */ #define SK_RXRB2_START 0x0880 #define SK_RXRB2_END 0x0884 #define SK_RXRB2_WR_PTR 0x0888 #define SK_RXRB2_RD_PTR 0x088C #define SK_RXRB2_UTHR_PAUSE 0x0890 #define SK_RXRB2_LTHR_PAUSE 0x0894 #define SK_RXRB2_UTHR_HIPRIO 0x0898 #define SK_RXRB2_UTHR_LOPRIO 0x089C #define SK_RXRB2_PKTCNT 0x08A0 #define SK_RXRB2_LVL 0x08A4 #define SK_RXRB2_CTLTST 0x08A8 /* Block 20 -- Sync. Transmit RAMbuffer 1 */ #define SK_TXRBS1_START 0x0A00 #define SK_TXRBS1_END 0x0A04 #define SK_TXRBS1_WR_PTR 0x0A08 #define SK_TXRBS1_RD_PTR 0x0A0C #define SK_TXRBS1_PKTCNT 0x0A20 #define SK_TXRBS1_LVL 0x0A24 #define SK_TXRBS1_CTLTST 0x0A28 /* Block 21 -- Async. Transmit RAMbuffer 1 */ #define SK_TXRBA1_START 0x0A80 #define SK_TXRBA1_END 0x0A84 #define SK_TXRBA1_WR_PTR 0x0A88 #define SK_TXRBA1_RD_PTR 0x0A8C #define SK_TXRBA1_PKTCNT 0x0AA0 #define SK_TXRBA1_LVL 0x0AA4 #define SK_TXRBA1_CTLTST 0x0AA8 /* Block 22 -- Sync. Transmit RAMbuffer 2 */ #define SK_TXRBS2_START 0x0B00 #define SK_TXRBS2_END 0x0B04 #define SK_TXRBS2_WR_PTR 0x0B08 #define SK_TXRBS2_RD_PTR 0x0B0C #define SK_TXRBS2_PKTCNT 0x0B20 #define SK_TXRBS2_LVL 0x0B24 #define SK_TXRBS2_CTLTST 0x0B28 /* Block 23 -- Async. Transmit RAMbuffer 2 */ #define SK_TXRBA2_START 0x0B80 #define SK_TXRBA2_END 0x0B84 #define SK_TXRBA2_WR_PTR 0x0B88 #define SK_TXRBA2_RD_PTR 0x0B8C #define SK_TXRBA2_PKTCNT 0x0BA0 #define SK_TXRBA2_LVL 0x0BA4 #define SK_TXRBA2_CTLTST 0x0BA8 #define SK_RBCTL_RESET 0x00000001 #define SK_RBCTL_UNRESET 0x00000002 #define SK_RBCTL_OFF 0x00000004 #define SK_RBCTL_ON 0x00000008 #define SK_RBCTL_STORENFWD_OFF 0x00000010 #define SK_RBCTL_STORENFWD_ON 0x00000020 /* Block 24 -- RX MAC FIFO 1 regisrers and LINK_SYNC counter */ #define SK_RXF1_END 0x0C00 #define SK_RXF1_WPTR 0x0C04 #define SK_RXF1_RPTR 0x0C0C #define SK_RXF1_PKTCNT 0x0C10 #define SK_RXF1_LVL 0x0C14 #define SK_RXF1_MACCTL 0x0C18 #define SK_RXF1_CTL 0x0C1C #define SK_RXLED1_CNTINIT 0x0C20 #define SK_RXLED1_COUNTER 0x0C24 #define SK_RXLED1_CTL 0x0C28 #define SK_RXLED1_TST 0x0C29 #define SK_LINK_SYNC1_CINIT 0x0C30 #define SK_LINK_SYNC1_COUNTER 0x0C34 #define SK_LINK_SYNC1_CTL 0x0C38 #define SK_LINK_SYNC1_TST 0x0C39 #define SK_LINKLED1_CTL 0x0C3C #define SK_FIFO_END 0x3F /* Receive MAC FIFO 1 (Yukon Only) */ #define SK_RXMF1_END 0x0C40 #define SK_RXMF1_THRESHOLD 0x0C44 #define SK_RXMF1_CTRL_TEST 0x0C48 +#define SK_RXMF1_FLUSH_MASK 0x0C4C +#define SK_RXMF1_FLUSH_THRESHOLD 0x0C50 #define SK_RXMF1_WRITE_PTR 0x0C60 #define SK_RXMF1_WRITE_LEVEL 0x0C68 #define SK_RXMF1_READ_PTR 0x0C70 #define SK_RXMF1_READ_LEVEL 0x0C78 +/* Receive MAC FIFO 1 Control/Test */ #define SK_RFCTL_WR_PTR_TST_ON 0x00004000 /* Write pointer test on*/ #define SK_RFCTL_WR_PTR_TST_OFF 0x00002000 /* Write pointer test off */ #define SK_RFCTL_WR_PTR_STEP 0x00001000 /* Write pointer increment */ #define SK_RFCTL_RD_PTR_TST_ON 0x00000400 /* Read pointer test on */ #define SK_RFCTL_RD_PTR_TST_OFF 0x00000200 /* Read pointer test off */ #define SK_RFCTL_RD_PTR_STEP 0x00000100 /* Read pointer increment */ -#define SK_RFCTL_RX_FIFO_OVER 0x00000040 /* Clear IRQ RX FIFO Overrun */ +#define SK_RFCTL_FIFO_FLUSH_OFF 0x00000080 /* RX FIFO Flsuh mode off */ +#define SK_RFCTL_FIFO_FLUSH_ON 0x00000040 /* RX FIFO Flush mode on */ +#define SK_RFCTL_RX_FIFO_OVER 0x00000020 /* Clear IRQ RX FIFO Overrun */ #define SK_RFCTL_FRAME_RX_DONE 0x00000010 /* Clear IRQ Frame RX Done */ #define SK_RFCTL_OPERATION_ON 0x00000008 /* Operational mode on */ #define SK_RFCTL_OPERATION_OFF 0x00000004 /* Operational mode off */ #define SK_RFCTL_RESET_CLEAR 0x00000002 /* MAC FIFO Reset Clear */ #define SK_RFCTL_RESET_SET 0x00000001 /* MAC FIFO Reset Set */ +#define SK_RFCTL_FIFO_THRESHOLD 0x0a /* flush threshold (default) */ + /* Block 25 -- RX MAC FIFO 2 regisrers and LINK_SYNC counter */ #define SK_RXF2_END 0x0C80 #define SK_RXF2_WPTR 0x0C84 #define SK_RXF2_RPTR 0x0C8C #define SK_RXF2_PKTCNT 0x0C90 #define SK_RXF2_LVL 0x0C94 #define SK_RXF2_MACCTL 0x0C98 #define SK_RXF2_CTL 0x0C9C #define SK_RXLED2_CNTINIT 0x0CA0 #define SK_RXLED2_COUNTER 0x0CA4 #define SK_RXLED2_CTL 0x0CA8 #define SK_RXLED2_TST 0x0CA9 #define SK_LINK_SYNC2_CINIT 0x0CB0 #define SK_LINK_SYNC2_COUNTER 0x0CB4 #define SK_LINK_SYNC2_CTL 0x0CB8 #define SK_LINK_SYNC2_TST 0x0CB9 #define SK_LINKLED2_CTL 0x0CBC #define SK_RXMACCTL_CLR_IRQ_NOSTS 0x00000001 #define SK_RXMACCTL_CLR_IRQ_NOTSTAMP 0x00000002 #define SK_RXMACCTL_TSTAMP_OFF 0x00000004 #define SK_RXMACCTL_RSTAMP_ON 0x00000008 #define SK_RXMACCTL_FLUSH_OFF 0x00000010 #define SK_RXMACCTL_FLUSH_ON 0x00000020 #define SK_RXMACCTL_PAUSE_OFF 0x00000040 #define SK_RXMACCTL_PAUSE_ON 0x00000080 #define SK_RXMACCTL_AFULL_OFF 0x00000100 #define SK_RXMACCTL_AFULL_ON 0x00000200 #define SK_RXMACCTL_VALIDTIME_PATCH_OFF 0x00000400 #define SK_RXMACCTL_VALIDTIME_PATCH_ON 0x00000800 #define SK_RXMACCTL_RXRDY_PATCH_OFF 0x00001000 #define SK_RXMACCTL_RXRDY_PATCH_ON 0x00002000 #define SK_RXMACCTL_STS_TIMEO 0x00FF0000 #define SK_RXMACCTL_TSTAMP_TIMEO 0xFF000000 #define SK_RXLEDCTL_ENABLE 0x0001 #define SK_RXLEDCTL_COUNTER_STOP 0x0002 #define SK_RXLEDCTL_COUNTER_START 0x0004 #define SK_LINKLED_OFF 0x0001 #define SK_LINKLED_ON 0x0002 #define SK_LINKLED_LINKSYNC_OFF 0x0004 #define SK_LINKLED_LINKSYNC_ON 0x0008 #define SK_LINKLED_BLINK_OFF 0x0010 #define SK_LINKLED_BLINK_ON 0x0020 /* Block 26 -- TX MAC FIFO 1 regisrers */ #define SK_TXF1_END 0x0D00 #define SK_TXF1_WPTR 0x0D04 #define SK_TXF1_RPTR 0x0D0C #define SK_TXF1_PKTCNT 0x0D10 #define SK_TXF1_LVL 0x0D14 #define SK_TXF1_MACCTL 0x0D18 #define SK_TXF1_CTL 0x0D1C #define SK_TXLED1_CNTINIT 0x0D20 #define SK_TXLED1_COUNTER 0x0D24 #define SK_TXLED1_CTL 0x0D28 #define SK_TXLED1_TST 0x0D29 -/* Receive MAC FIFO 1 (Yukon Only) */ +/* Transmit MAC FIFO 1 (Yukon Only) */ #define SK_TXMF1_END 0x0D40 #define SK_TXMF1_THRESHOLD 0x0D44 #define SK_TXMF1_CTRL_TEST 0x0D48 #define SK_TXMF1_WRITE_PTR 0x0D60 #define SK_TXMF1_WRITE_SHADOW 0x0D64 #define SK_TXMF1_WRITE_LEVEL 0x0D68 #define SK_TXMF1_READ_PTR 0x0D70 #define SK_TXMF1_RESTART_PTR 0x0D74 #define SK_TXMF1_READ_LEVEL 0x0D78 +/* Transmit MAC FIFO Control/Test */ #define SK_TFCTL_WR_PTR_TST_ON 0x00004000 /* Write pointer test on*/ #define SK_TFCTL_WR_PTR_TST_OFF 0x00002000 /* Write pointer test off */ #define SK_TFCTL_WR_PTR_STEP 0x00001000 /* Write pointer increment */ #define SK_TFCTL_RD_PTR_TST_ON 0x00000400 /* Read pointer test on */ #define SK_TFCTL_RD_PTR_TST_OFF 0x00000200 /* Read pointer test off */ #define SK_TFCTL_RD_PTR_STEP 0x00000100 /* Read pointer increment */ #define SK_TFCTL_TX_FIFO_UNDER 0x00000040 /* Clear IRQ TX FIFO Under */ #define SK_TFCTL_FRAME_TX_DONE 0x00000020 /* Clear IRQ Frame TX Done */ #define SK_TFCTL_IRQ_PARITY_ER 0x00000010 /* Clear IRQ Parity Error */ #define SK_TFCTL_OPERATION_ON 0x00000008 /* Operational mode on */ #define SK_TFCTL_OPERATION_OFF 0x00000004 /* Operational mode off */ #define SK_TFCTL_RESET_CLEAR 0x00000002 /* MAC FIFO Reset Clear */ #define SK_TFCTL_RESET_SET 0x00000001 /* MAC FIFO Reset Set */ /* Block 27 -- TX MAC FIFO 2 regisrers */ #define SK_TXF2_END 0x0D80 #define SK_TXF2_WPTR 0x0D84 #define SK_TXF2_RPTR 0x0D8C #define SK_TXF2_PKTCNT 0x0D90 #define SK_TXF2_LVL 0x0D94 #define SK_TXF2_MACCTL 0x0D98 #define SK_TXF2_CTL 0x0D9C #define SK_TXLED2_CNTINIT 0x0DA0 #define SK_TXLED2_COUNTER 0x0DA4 #define SK_TXLED2_CTL 0x0DA8 #define SK_TXLED2_TST 0x0DA9 #define SK_TXMACCTL_XMAC_RESET 0x00000001 #define SK_TXMACCTL_XMAC_UNRESET 0x00000002 #define SK_TXMACCTL_LOOP_OFF 0x00000004 #define SK_TXMACCTL_LOOP_ON 0x00000008 #define SK_TXMACCTL_FLUSH_OFF 0x00000010 #define SK_TXMACCTL_FLUSH_ON 0x00000020 #define SK_TXMACCTL_WAITEMPTY_OFF 0x00000040 #define SK_TXMACCTL_WAITEMPTY_ON 0x00000080 #define SK_TXMACCTL_AFULL_OFF 0x00000100 #define SK_TXMACCTL_AFULL_ON 0x00000200 #define SK_TXMACCTL_TXRDY_PATCH_OFF 0x00000400 #define SK_TXMACCTL_RXRDY_PATCH_ON 0x00000800 #define SK_TXMACCTL_PKT_RECOVERY_OFF 0x00001000 #define SK_TXMACCTL_PKT_RECOVERY_ON 0x00002000 #define SK_TXMACCTL_CLR_IRQ_PERR 0x00008000 #define SK_TXMACCTL_WAITAFTERFLUSH 0x00010000 #define SK_TXLEDCTL_ENABLE 0x0001 #define SK_TXLEDCTL_COUNTER_STOP 0x0002 #define SK_TXLEDCTL_COUNTER_START 0x0004 #define SK_FIFO_RESET 0x00000001 #define SK_FIFO_UNRESET 0x00000002 #define SK_FIFO_OFF 0x00000004 #define SK_FIFO_ON 0x00000008 /* Block 28 -- Descriptor Poll Timer */ #define SK_DPT_INIT 0x0e00 /* Initial value 24 bits */ #define SK_DPT_TIMER 0x0e04 /* Mul of 78.12MHz clk (24b) */ +#define SK_DPT_TIMER_MAX 0x00ffffffff /* 214.75ms at 78.125MHz */ + #define SK_DPT_TIMER_CTRL 0x0e08 /* Timer Control 16 bits */ #define SK_DPT_TCTL_STOP 0x0001 /* Stop Timer */ #define SK_DPT_TCTL_START 0x0002 /* Start Timer */ #define SK_DPT_TIMER_TEST 0x0e0a /* Timer Test 16 bits */ #define SK_DPT_TTEST_STEP 0x0001 /* Timer Decrement */ #define SK_DPT_TTEST_OFF 0x0002 /* Test Mode Off */ #define SK_DPT_TTEST_ON 0x0004 /* Test Mode On */ /* Block 29 -- reserved */ /* Block 30 -- GMAC/GPHY Control Registers (Yukon Only)*/ #define SK_GMAC_CTRL 0x0f00 /* GMAC Control Register */ #define SK_GPHY_CTRL 0x0f04 /* GPHY Control Register */ #define SK_GMAC_ISR 0x0f08 /* GMAC Interrupt Source Register */ -#define SK_GMAC_IMR 0x0f08 /* GMAC Interrupt Mask Register */ +#define SK_GMAC_IMR 0x0f0c /* GMAC Interrupt Mask Register */ #define SK_LINK_CTRL 0x0f10 /* Link Control Register (LCR) */ #define SK_WOL_CTRL 0x0f20 /* Wake on LAN Control Register */ #define SK_MAC_ADDR_LOW 0x0f24 /* Mack Address Registers LOW */ #define SK_MAC_ADDR_HIGH 0x0f28 /* Mack Address Registers HIGH */ #define SK_PAT_READ_PTR 0x0f2c /* Pattern Read Pointer Register */ #define SK_PAT_LEN_REG0 0x0f30 /* Pattern Length Register 0 */ #define SK_PAT_LEN0 0x0f30 /* Pattern Length 0 */ #define SK_PAT_LEN1 0x0f31 /* Pattern Length 1 */ #define SK_PAT_LEN2 0x0f32 /* Pattern Length 2 */ #define SK_PAT_LEN3 0x0f33 /* Pattern Length 3 */ #define SK_PAT_LEN_REG1 0x0f34 /* Pattern Length Register 1 */ #define SK_PAT_LEN4 0x0f34 /* Pattern Length 4 */ #define SK_PAT_LEN5 0x0f35 /* Pattern Length 5 */ #define SK_PAT_LEN6 0x0f36 /* Pattern Length 6 */ #define SK_PAT_LEN7 0x0f37 /* Pattern Length 7 */ #define SK_PAT_CTR_REG0 0x0f38 /* Pattern Counter Register 0 */ #define SK_PAT_CTR0 0x0f38 /* Pattern Counter 0 */ #define SK_PAT_CTR1 0x0f39 /* Pattern Counter 1 */ #define SK_PAT_CTR2 0x0f3a /* Pattern Counter 2 */ #define SK_PAT_CTR3 0x0f3b /* Pattern Counter 3 */ #define SK_PAT_CTR_REG1 0x0f3c /* Pattern Counter Register 1 */ #define SK_PAT_CTR4 0x0f3c /* Pattern Counter 4 */ #define SK_PAT_CTR5 0x0f3d /* Pattern Counter 5 */ #define SK_PAT_CTR6 0x0f3e /* Pattern Counter 6 */ #define SK_PAT_CTR7 0x0f3f /* Pattern Counter 7 */ #define SK_GMAC_LOOP_ON 0x00000020 /* Loopback mode for testing */ #define SK_GMAC_LOOP_OFF 0x00000010 /* purposes */ #define SK_GMAC_PAUSE_ON 0x00000008 /* enable forward of pause */ #define SK_GMAC_PAUSE_OFF 0x00000004 /* signal to GMAC */ #define SK_GMAC_RESET_CLEAR 0x00000002 /* Clear GMAC Reset */ #define SK_GMAC_RESET_SET 0x00000001 /* Set GMAC Reset */ #define SK_GPHY_SEL_BDT 0x10000000 /* Select Bidirectional xfer */ #define SK_GPHY_INT_POL_HI 0x08000000 /* IRQ Polarity Active */ #define SK_GPHY_75_OHM 0x04000000 /* Use 75 Ohm Termination */ #define SK_GPHY_DIS_FC 0x02000000 /* Disable Auto Fiber/Copper */ #define SK_GPHY_DIS_SLEEP 0x01000000 /* Disable Energy Detect */ #define SK_GPHY_HWCFG_M_3 0x00800000 /* HWCFG_MODE[3] */ #define SK_GPHY_HWCFG_M_2 0x00400000 /* HWCFG_MODE[2] */ #define SK_GPHY_HWCFG_M_1 0x00200000 /* HWCFG_MODE[1] */ #define SK_GPHY_HWCFG_M_0 0x00100000 /* HWCFG_MODE[0] */ #define SK_GPHY_ANEG_0 0x00080000 /* ANEG[0] */ #define SK_GPHY_ENA_XC 0x00040000 /* Enable MDI Crossover */ #define SK_GPHY_DIS_125 0x00020000 /* Disable 125MHz Clock */ #define SK_GPHY_ANEG_3 0x00010000 /* ANEG[3] */ #define SK_GPHY_ANEG_2 0x00008000 /* ANEG[2] */ #define SK_GPHY_ANEG_1 0x00004000 /* ANEG[1] */ #define SK_GPHY_ENA_PAUSE 0x00002000 /* Enable Pause */ #define SK_GPHY_PHYADDR_4 0x00001000 /* Bit 4 of Phy Addr */ #define SK_GPHY_PHYADDR_3 0x00000800 /* Bit 3 of Phy Addr */ #define SK_GPHY_PHYADDR_2 0x00000400 /* Bit 2 of Phy Addr */ #define SK_GPHY_PHYADDR_1 0x00000200 /* Bit 1 of Phy Addr */ #define SK_GPHY_PHYADDR_0 0x00000100 /* Bit 0 of Phy Addr */ #define SK_GPHY_RESET_CLEAR 0x00000002 /* Clear GPHY Reset */ #define SK_GPHY_RESET_SET 0x00000001 /* Set GPHY Reset */ #define SK_GPHY_COPPER (SK_GPHY_HWCFG_M_0 | SK_GPHY_HWCFG_M_1 | \ SK_GPHY_HWCFG_M_2 | SK_GPHY_HWCFG_M_3 ) #define SK_GPHY_FIBER (SK_GPHY_HWCFG_M_0 | SK_GPHY_HWCFG_M_1 | \ SK_GPHY_HWCFG_M_2 ) #define SK_GPHY_ANEG_ALL (SK_GPHY_ANEG_0 | SK_GPHY_ANEG_1 | \ SK_GPHY_ANEG_2 | SK_GPHY_ANEG_3 ) #define SK_GMAC_INT_TX_OFLOW 0x20 /* Transmit Counter Overflow */ #define SK_GMAC_INT_RX_OFLOW 0x10 /* Receiver Overflow */ #define SK_GMAC_INT_TX_UNDER 0x08 /* Transmit FIFO Underrun */ #define SK_GMAC_INT_TX_DONE 0x04 /* Transmit Complete */ #define SK_GMAC_INT_RX_OVER 0x02 /* Receive FIFO Overrun */ #define SK_GMAC_INT_RX_DONE 0x01 /* Receive Complete */ #define SK_LINK_RESET_CLEAR 0x0002 /* Link Reset Clear */ #define SK_LINK_RESET_SET 0x0001 /* Link Reset Set */ /* Block 31 -- reserved */ /* Block 32-33 -- Pattern Ram */ #define SK_WOL_PRAM 0x1000 /* Block 0x22 - 0x3f -- reserved */ /* Block 0x40 to 0x4F -- XMAC 1 registers */ #define SK_XMAC1_BASE 0x2000 /* Block 0x50 to 0x5F -- MARV 1 registers */ #define SK_MARV1_BASE 0x2800 /* Block 0x60 to 0x6F -- XMAC 2 registers */ #define SK_XMAC2_BASE 0x3000 /* Block 0x70 to 0x7F -- MARV 2 registers */ #define SK_MARV2_BASE 0x3800 /* Compute relative offset of an XMAC register in the XMAC window(s). */ #define SK_XMAC_REG(sc, reg) (((reg) * 2) + SK_XMAC1_BASE + \ (((sc)->sk_port) * (SK_XMAC2_BASE - SK_XMAC1_BASE))) #if 0 #define SK_XM_READ_4(sc, reg) \ ((sk_win_read_2(sc->sk_softc, \ SK_XMAC_REG(sc, reg)) & 0xFFFF) | \ ((sk_win_read_2(sc->sk_softc, \ SK_XMAC_REG(sc, reg + 2)) & 0xFFFF) << 16)) #define SK_XM_WRITE_4(sc, reg, val) \ sk_win_write_2(sc->sk_softc, SK_XMAC_REG(sc, reg), \ ((val) & 0xFFFF)); \ sk_win_write_2(sc->sk_softc, SK_XMAC_REG(sc, reg + 2), \ ((val) >> 16) & 0xFFFF) #else #define SK_XM_READ_4(sc, reg) \ sk_win_read_4(sc->sk_softc, SK_XMAC_REG(sc, reg)) #define SK_XM_WRITE_4(sc, reg, val) \ sk_win_write_4(sc->sk_softc, SK_XMAC_REG(sc, reg), (val)) #endif #define SK_XM_READ_2(sc, reg) \ sk_win_read_2(sc->sk_softc, SK_XMAC_REG(sc, reg)) #define SK_XM_WRITE_2(sc, reg, val) \ sk_win_write_2(sc->sk_softc, SK_XMAC_REG(sc, reg), val) #define SK_XM_SETBIT_4(sc, reg, x) \ SK_XM_WRITE_4(sc, reg, (SK_XM_READ_4(sc, reg)) | (x)) #define SK_XM_CLRBIT_4(sc, reg, x) \ SK_XM_WRITE_4(sc, reg, (SK_XM_READ_4(sc, reg)) & ~(x)) #define SK_XM_SETBIT_2(sc, reg, x) \ SK_XM_WRITE_2(sc, reg, (SK_XM_READ_2(sc, reg)) | (x)) #define SK_XM_CLRBIT_2(sc, reg, x) \ SK_XM_WRITE_2(sc, reg, (SK_XM_READ_2(sc, reg)) & ~(x)) /* Compute relative offset of an MARV register in the MARV window(s). */ #define SK_YU_REG(sc, reg) \ ((reg) + SK_MARV1_BASE + \ (((sc)->sk_port) * (SK_MARV2_BASE - SK_MARV1_BASE))) #define SK_YU_READ_4(sc, reg) \ sk_win_read_4((sc)->sk_softc, SK_YU_REG((sc), (reg))) #define SK_YU_READ_2(sc, reg) \ sk_win_read_2((sc)->sk_softc, SK_YU_REG((sc), (reg))) #define SK_YU_WRITE_4(sc, reg, val) \ sk_win_write_4((sc)->sk_softc, SK_YU_REG((sc), (reg)), (val)) #define SK_YU_WRITE_2(sc, reg, val) \ sk_win_write_2((sc)->sk_softc, SK_YU_REG((sc), (reg)), (val)) #define SK_YU_SETBIT_4(sc, reg, x) \ SK_YU_WRITE_4(sc, reg, (SK_YU_READ_4(sc, reg)) | (x)) #define SK_YU_CLRBIT_4(sc, reg, x) \ SK_YU_WRITE_4(sc, reg, (SK_YU_READ_4(sc, reg)) & ~(x)) #define SK_YU_SETBIT_2(sc, reg, x) \ SK_YU_WRITE_2(sc, reg, (SK_YU_READ_2(sc, reg)) | (x)) #define SK_YU_CLRBIT_2(sc, reg, x) \ SK_YU_WRITE_2(sc, reg, (SK_YU_READ_2(sc, reg)) & ~(x)) /* * The default FIFO threshold on the XMAC II is 4 bytes. On * dual port NICs, this often leads to transmit underruns, so we * bump the threshold a little. */ #define SK_XM_TX_FIFOTHRESH 512 #define SK_PCI_VENDOR_ID 0x0000 #define SK_PCI_DEVICE_ID 0x0002 #define SK_PCI_COMMAND 0x0004 #define SK_PCI_STATUS 0x0006 #define SK_PCI_REVID 0x0008 #define SK_PCI_CLASSCODE 0x0009 #define SK_PCI_CACHELEN 0x000C #define SK_PCI_LATENCY_TIMER 0x000D #define SK_PCI_HEADER_TYPE 0x000E #define SK_PCI_LOMEM 0x0010 #define SK_PCI_LOIO 0x0014 #define SK_PCI_SUBVEN_ID 0x002C #define SK_PCI_SYBSYS_ID 0x002E #define SK_PCI_BIOSROM 0x0030 #define SK_PCI_INTLINE 0x003C #define SK_PCI_INTPIN 0x003D #define SK_PCI_MINGNT 0x003E #define SK_PCI_MINLAT 0x003F /* device specific PCI registers */ #define SK_PCI_OURREG1 0x0040 #define SK_PCI_OURREG2 0x0044 #define SK_PCI_CAPID 0x0048 /* 8 bits */ #define SK_PCI_NEXTPTR 0x0049 /* 8 bits */ #define SK_PCI_PWRMGMTCAP 0x004A /* 16 bits */ #define SK_PCI_PWRMGMTCTRL 0x004C /* 16 bits */ #define SK_PCI_PME_EVENT 0x004F #define SK_PCI_VPD_CAPID 0x0050 #define SK_PCI_VPD_NEXTPTR 0x0051 #define SK_PCI_VPD_ADDR 0x0052 #define SK_PCI_VPD_DATA 0x0054 #define SK_PSTATE_MASK 0x0003 #define SK_PSTATE_D0 0x0000 #define SK_PSTATE_D1 0x0001 #define SK_PSTATE_D2 0x0002 #define SK_PSTATE_D3 0x0003 #define SK_PME_EN 0x0010 #define SK_PME_STATUS 0x8000 /* * VPD flag bit. Set to 0 to initiate a read, will become 1 when * read is complete. Set to 1 to initiate a write, will become 0 * when write is finished. */ #define SK_VPD_FLAG 0x8000 /* VPD structures */ struct vpd_res { u_int8_t vr_id; u_int8_t vr_len; u_int8_t vr_pad; }; struct vpd_key { char vk_key[2]; u_int8_t vk_len; }; #define VPD_RES_ID 0x82 /* ID string */ #define VPD_RES_READ 0x90 /* start of read only area */ #define VPD_RES_WRITE 0x81 /* start of read/write area */ #define VPD_RES_END 0x78 /* end tag */ #define CSR_WRITE_4(sc, reg, val) \ bus_space_write_4((sc)->sk_btag, (sc)->sk_bhandle, (reg), (val)) #define CSR_WRITE_2(sc, reg, val) \ bus_space_write_2((sc)->sk_btag, (sc)->sk_bhandle, (reg), (val)) #define CSR_WRITE_1(sc, reg, val) \ bus_space_write_1((sc)->sk_btag, (sc)->sk_bhandle, (reg), (val)) #define CSR_READ_4(sc, reg) \ bus_space_read_4((sc)->sk_btag, (sc)->sk_bhandle, (reg)) #define CSR_READ_2(sc, reg) \ bus_space_read_2((sc)->sk_btag, (sc)->sk_bhandle, (reg)) #define CSR_READ_1(sc, reg) \ bus_space_read_1((sc)->sk_btag, (sc)->sk_bhandle, (reg)) struct sk_type { u_int16_t sk_vid; u_int16_t sk_did; char *sk_name; }; +#define SK_ADDR_LO(x) ((u_int64_t) (x) & 0xffffffff) +#define SK_ADDR_HI(x) ((u_int64_t) (x) >> 32) + +#define SK_RING_ALIGN 64 + /* RX queue descriptor data structure */ struct sk_rx_desc { u_int32_t sk_ctl; u_int32_t sk_next; u_int32_t sk_data_lo; u_int32_t sk_data_hi; u_int32_t sk_xmac_rxstat; u_int32_t sk_timestamp; - u_int16_t sk_csum2; - u_int16_t sk_csum1; - u_int16_t sk_csum2_start; - u_int16_t sk_csum1_start; + u_int32_t sk_csum; + u_int32_t sk_csum_start; }; #define SK_OPCODE_DEFAULT 0x00550000 #define SK_OPCODE_CSUM 0x00560000 #define SK_RXCTL_LEN 0x0000FFFF #define SK_RXCTL_OPCODE 0x00FF0000 #define SK_RXCTL_TSTAMP_VALID 0x01000000 #define SK_RXCTL_STATUS_VALID 0x02000000 #define SK_RXCTL_DEV0 0x04000000 #define SK_RXCTL_EOF_INTR 0x08000000 #define SK_RXCTL_EOB_INTR 0x10000000 #define SK_RXCTL_LASTFRAG 0x20000000 #define SK_RXCTL_FIRSTFRAG 0x40000000 #define SK_RXCTL_OWN 0x80000000 #define SK_RXSTAT \ - (SK_OPCODE_DEFAULT|SK_RXCTL_EOF_INTR|SK_RXCTL_LASTFRAG| \ - SK_RXCTL_FIRSTFRAG|SK_RXCTL_OWN) + (SK_RXCTL_EOF_INTR|SK_RXCTL_LASTFRAG|SK_RXCTL_FIRSTFRAG|SK_RXCTL_OWN) struct sk_tx_desc { u_int32_t sk_ctl; u_int32_t sk_next; u_int32_t sk_data_lo; u_int32_t sk_data_hi; u_int32_t sk_xmac_txstat; - u_int16_t sk_rsvd0; - u_int16_t sk_csum_startval; - u_int16_t sk_csum_startpos; - u_int16_t sk_csum_writepos; + u_int32_t sk_csum_startval; + u_int32_t sk_csum_start; u_int32_t sk_rsvd1; }; #define SK_TXCTL_LEN 0x0000FFFF #define SK_TXCTL_OPCODE 0x00FF0000 #define SK_TXCTL_SW 0x01000000 #define SK_TXCTL_NOCRC 0x02000000 #define SK_TXCTL_STORENFWD 0x04000000 #define SK_TXCTL_EOF_INTR 0x08000000 #define SK_TXCTL_EOB_INTR 0x10000000 #define SK_TXCTL_LASTFRAG 0x20000000 #define SK_TXCTL_FIRSTFRAG 0x40000000 #define SK_TXCTL_OWN 0x80000000 #define SK_TXSTAT \ (SK_OPCODE_DEFAULT|SK_TXCTL_EOF_INTR|SK_TXCTL_LASTFRAG|SK_TXCTL_OWN) -#define SK_RXBYTES(x) (x) & 0x0000FFFF; +#define SK_RXBYTES(x) ((x) & 0x0000FFFF) #define SK_TXBYTES SK_RXBYTES #define SK_TX_RING_CNT 512 #define SK_RX_RING_CNT 256 +#define SK_JUMBO_RX_RING_CNT 256 +#define SK_MAXTXSEGS 32 +#define SK_MAXRXSEGS 32 /* * Jumbo buffer stuff. Note that we must allocate more jumbo * buffers than there are descriptors in the receive ring. This * is because we don't know how long it will take for a packet * to be released after we hand it off to the upper protocol * layers. To be safe, we allocate 1.5 times the number of * receive descriptors. */ #define SK_JUMBO_FRAMELEN 9018 #define SK_JUMBO_MTU (SK_JUMBO_FRAMELEN-ETHER_HDR_LEN-ETHER_CRC_LEN) +#define SK_MAX_FRAMELEN \ + (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN - ETHER_CRC_LEN) +#define SK_MIN_FRAMELEN (ETHER_MIN_LEN - ETHER_CRC_LEN) #define SK_JSLOTS ((SK_RX_RING_CNT * 3) / 2) #define SK_JRAWLEN (SK_JUMBO_FRAMELEN + ETHER_ALIGN) #define SK_JLEN (SK_JRAWLEN + (sizeof(u_int64_t) - \ (SK_JRAWLEN % sizeof(u_int64_t)))) #define SK_JPAGESZ PAGE_SIZE #define SK_RESID (SK_JPAGESZ - (SK_JLEN * SK_JSLOTS) % SK_JPAGESZ) #define SK_JMEM ((SK_JLEN * SK_JSLOTS) + SK_RESID) struct sk_jpool_entry { int slot; SLIST_ENTRY(sk_jpool_entry) jpool_entries; }; -struct sk_chain { - void *sk_desc; - struct mbuf *sk_mbuf; - struct sk_chain *sk_next; +struct sk_txdesc { + struct mbuf *tx_m; + bus_dmamap_t tx_dmamap; + STAILQ_ENTRY(sk_txdesc) tx_q; }; +STAILQ_HEAD(sk_txdq, sk_txdesc); + +struct sk_rxdesc { + struct mbuf *rx_m; + bus_dmamap_t rx_dmamap; +}; + struct sk_chain_data { - struct sk_chain sk_tx_chain[SK_TX_RING_CNT]; - struct sk_chain sk_rx_chain[SK_RX_RING_CNT]; + bus_dma_tag_t sk_parent_tag; + bus_dma_tag_t sk_tx_tag; + struct sk_txdesc sk_txdesc[SK_TX_RING_CNT]; + struct sk_txdq sk_txfreeq; + struct sk_txdq sk_txbusyq; + bus_dma_tag_t sk_rx_tag; + struct sk_rxdesc sk_rxdesc[SK_RX_RING_CNT]; + bus_dma_tag_t sk_tx_ring_tag; + bus_dma_tag_t sk_rx_ring_tag; + bus_dmamap_t sk_tx_ring_map; + bus_dmamap_t sk_rx_ring_map; + bus_dmamap_t sk_rx_sparemap; + bus_dma_tag_t sk_jumbo_rx_tag; + bus_dma_tag_t sk_jumbo_tag; + bus_dmamap_t sk_jumbo_map; + bus_dma_tag_t sk_jumbo_mtag; + caddr_t sk_jslots[SK_JSLOTS]; + struct sk_rxdesc sk_jumbo_rxdesc[SK_JUMBO_RX_RING_CNT]; + bus_dma_tag_t sk_jumbo_rx_ring_tag; + bus_dmamap_t sk_jumbo_rx_ring_map; + bus_dmamap_t sk_jumbo_rx_sparemap; int sk_tx_prod; int sk_tx_cons; int sk_tx_cnt; - int sk_rx_prod; int sk_rx_cons; - int sk_rx_cnt; - /* Stick the jumbo mem management stuff here too. */ - caddr_t sk_jslots[SK_JSLOTS]; - void *sk_jumbo_buf; - + int sk_jumbo_rx_cons; }; struct sk_ring_data { - struct sk_tx_desc sk_tx_ring[SK_TX_RING_CNT]; - struct sk_rx_desc sk_rx_ring[SK_RX_RING_CNT]; + struct sk_tx_desc *sk_tx_ring; + bus_addr_t sk_tx_ring_paddr; + struct sk_rx_desc *sk_rx_ring; + bus_addr_t sk_rx_ring_paddr; + struct sk_rx_desc *sk_jumbo_rx_ring; + bus_addr_t sk_jumbo_rx_ring_paddr; + void *sk_jumbo_buf; + bus_addr_t sk_jumbo_buf_paddr; }; +#define SK_TX_RING_ADDR(sc, i) \ + ((sc)->sk_rdata.sk_tx_ring_paddr + sizeof(struct sk_tx_desc) * (i)) +#define SK_RX_RING_ADDR(sc, i) \ + ((sc)->sk_rdata.sk_rx_ring_paddr + sizeof(struct sk_rx_desc) * (i)) +#define SK_JUMBO_RX_RING_ADDR(sc, i) \ + ((sc)->sk_rdata.sk_jumbo_rx_ring_paddr + sizeof(struct sk_rx_desc) * (i)) + +#define SK_TX_RING_SZ \ + (sizeof(struct sk_tx_desc) * SK_TX_RING_CNT) +#define SK_RX_RING_SZ \ + (sizeof(struct sk_rx_desc) * SK_RX_RING_CNT) +#define SK_JUMBO_RX_RING_SZ \ + (sizeof(struct sk_rx_desc) * SK_JUMBO_RX_RING_CNT) + struct sk_bcom_hack { int reg; int val; }; #define SK_INC(x, y) (x) = (x + 1) % y /* Forward decl. */ struct sk_if_softc; /* Softc for the GEnesis controller. */ struct sk_softc { bus_space_handle_t sk_bhandle; /* bus space handle */ bus_space_tag_t sk_btag; /* bus space tag */ void *sk_intrhand; /* irq handler handle */ struct resource *sk_irq; /* IRQ resource handle */ struct resource *sk_res; /* I/O or shared mem handle */ - u_int8_t sk_unit; /* controller number */ + device_t sk_dev; u_int8_t sk_type; u_int8_t sk_rev; u_int8_t spare; char *sk_vpd_prodname; char *sk_vpd_readonly; uint16_t sk_vpd_readonly_len; u_int32_t sk_rboff; /* RAMbuffer offset */ u_int32_t sk_ramsize; /* amount of RAM on NIC */ u_int32_t sk_pmd; /* physical media type */ + u_int32_t sk_coppertype; u_int32_t sk_intrmask; int sk_int_mod; int sk_int_ticks; + int sk_suspended; struct sk_if_softc *sk_if[2]; device_t sk_devs[2]; + struct mtx sk_mii_mtx; struct mtx sk_mtx; }; #define SK_LOCK(_sc) mtx_lock(&(_sc)->sk_mtx) #define SK_UNLOCK(_sc) mtx_unlock(&(_sc)->sk_mtx) #define SK_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->sk_mtx, MA_OWNED) #define SK_IF_LOCK(_sc) SK_LOCK((_sc)->sk_softc) #define SK_IF_UNLOCK(_sc) SK_UNLOCK((_sc)->sk_softc) #define SK_IF_LOCK_ASSERT(_sc) SK_LOCK_ASSERT((_sc)->sk_softc) +#define SK_IF_MII_LOCK(_sc) mtx_lock(&(_sc)->sk_softc->sk_mii_mtx) +#define SK_IF_MII_UNLOCK(_sc) mtx_unlock(&(_sc)->sk_softc->sk_mii_mtx) /* Softc for each logical interface */ struct sk_if_softc { struct ifnet *sk_ifp; /* interface info */ device_t sk_miibus; - u_int8_t sk_unit; /* interface number */ + device_t sk_if_dev; u_int8_t sk_port; /* port # on controller */ u_int8_t sk_xmac_rev; /* XMAC chip rev (B2 or C1) */ u_int32_t sk_rx_ramstart; u_int32_t sk_rx_ramend; u_int32_t sk_tx_ramstart; u_int32_t sk_tx_ramend; int sk_phytype; int sk_phyaddr; - device_t sk_dev; - int sk_cnt; int sk_link; - struct callout_handle sk_tick_ch; + struct callout sk_tick_ch; struct sk_chain_data sk_cdata; - struct sk_ring_data *sk_rdata; + struct sk_ring_data sk_rdata; struct sk_softc *sk_softc; /* parent controller */ int sk_tx_bmu; /* TX BMU register */ int sk_if_flags; SLIST_HEAD(__sk_jfreehead, sk_jpool_entry) sk_jfree_listhead; SLIST_HEAD(__sk_jinusehead, sk_jpool_entry) sk_jinuse_listhead; struct mtx sk_jlist_mtx; }; #define SK_JLIST_LOCK(_sc) mtx_lock(&(_sc)->sk_jlist_mtx) #define SK_JLIST_UNLOCK(_sc) mtx_unlock(&(_sc)->sk_jlist_mtx) -#define SK_MAXUNIT 256 #define SK_TIMEOUT 1000 -#define ETHER_ALIGN 2 - -#ifdef __alpha__ -#undef vtophys -#define vtophys(va) alpha_XXX_dmamap((vm_offset_t)va) -#endif Index: stable/6/sys/dev/sk/xmaciireg.h =================================================================== --- stable/6/sys/dev/sk/xmaciireg.h (revision 159562) +++ stable/6/sys/dev/sk/xmaciireg.h (revision 159563) @@ -1,403 +1,406 @@ /*- * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Registers and data structures for the XaQti Corporation XMAC II * Gigabit Ethernet MAC. Datasheet is available from http://www.xaqti.com. * The XMAC can be programmed for 16-bit or 32-bit register access modes. * The SysKonnect gigabit ethernet adapters use 16-bit mode, so that's * how the registers are laid out here. */ #define XM_DEVICEID 0x00E0AE20 #define XM_XAQTI_OUI 0x00E0AE #define XM_XMAC_REV(x) (((x) & 0x000000E0) >> 5) #define XM_XMAC_REV_B2 0x0 #define XM_XMAC_REV_C1 0x1 #define XM_MMUCMD 0x0000 #define XM_POFF 0x0008 #define XM_BURST 0x000C #define XM_VLAN_TAGLEV1 0x0010 #define XM_VLAN_TAGLEV2 0x0014 #define XM_TXCMD 0x0020 #define XM_TX_RETRYLIMIT 0x0024 #define XM_TX_SLOTTIME 0x0028 #define XM_TX_IPG 0x003C #define XM_RXCMD 0x0030 #define XM_PHY_ADDR 0x0034 #define XM_PHY_DATA 0x0038 #define XM_GPIO 0x0040 #define XM_IMR 0x0044 #define XM_ISR 0x0048 #define XM_HWCFG 0x004C #define XM_TX_LOWAT 0x0060 #define XM_TX_HIWAT 0x0062 #define XM_TX_REQTHRESH_LO 0x0064 #define XM_TX_REQTHRESH_HI 0x0066 #define XM_TX_REQTHRESH XM_TX_REQTHRESH_LO #define XM_PAUSEDST0 0x0068 #define XM_PAUSEDST1 0x006A #define XM_PAUSEDST2 0x006C #define XM_CTLPARM_LO 0x0070 #define XM_CTLPARM_HI 0x0072 #define XM_CTLPARM XM_CTLPARM_LO #define XM_OPCODE_PAUSE_TIMER 0x0074 #define XM_TXSTAT_LIFO 0x0078 /* * Perfect filter registers. The XMAC has a table of 16 perfect * filter entries, spaced 8 bytes apart. This is in addition to * the station address registers, which appear below. */ #define XM_RXFILT_BASE 0x0080 #define XM_RXFILT_END 0x0107 #define XM_RXFILT_MAX 16 #define XM_RXFILT_ENTRY(ent) (XM_RXFILT_BASE + ((ent * 8))) /* Primary station address. */ #define XM_PAR0 0x0108 #define XM_PAR1 0x010A #define XM_PAR2 0x010C /* 64-bit multicast hash table registers */ #define XM_MAR0 0x0110 #define XM_MAR1 0x0112 #define XM_MAR2 0x0114 #define XM_MAR3 0x0116 #define XM_RX_LOWAT 0x0118 #define XM_RX_HIWAT 0x011A #define XM_RX_REQTHRESH_LO 0x011C #define XM_RX_REQTHRESH_HI 0x011E #define XM_RX_REQTHRESH XM_RX_REQTHRESH_LO #define XM_DEVID_LO 0x0120 #define XM_DEVID_HI 0x0122 #define XM_DEVID XM_DEVID_LO #define XM_MODE_LO 0x0124 #define XM_MODE_HI 0x0126 #define XM_MODE XM_MODE_LO #define XM_LASTSRC0 0x0128 #define XM_LASTSRC1 0x012A #define XM_LASTSRC2 0x012C #define XM_TSTAMP_READ 0x0130 #define XM_TSTAMP_LOAD 0x0134 #define XM_STATS_CMD 0x0200 #define XM_RXCNT_EVENT_LO 0x0204 #define XM_RXCNT_EVENT_HI 0x0206 #define XM_RXCNT_EVENT XM_RXCNT_EVENT_LO #define XM_TXCNT_EVENT_LO 0x0208 #define XM_TXCNT_EVENT_HI 0x020A #define XM_TXCNT_EVENT XM_TXCNT_EVENT_LO #define XM_RXCNT_EVMASK_LO 0x020C #define XM_RXCNT_EVMASK_HI 0x020E #define XM_RXCNT_EVMASK XM_RXCNT_EVMASK_LO #define XM_TXCNT_EVMASK_LO 0x0210 #define XM_TXCNT_EVMASK_HI 0x0212 #define XM_TXCNT_EVMASK XM_TXCNT_EVMASK_LO /* Statistics command register */ #define XM_STATCMD_CLR_TX 0x0001 #define XM_STATCMD_CLR_RX 0x0002 #define XM_STATCMD_COPY_TX 0x0004 #define XM_STATCMD_COPY_RX 0x0008 #define XM_STATCMD_SNAP_TX 0x0010 #define XM_STATCMD_SNAP_RX 0x0020 /* TX statistics registers */ #define XM_TXSTATS_PKTSOK 0x280 #define XM_TXSTATS_BYTESOK_HI 0x284 #define XM_TXSTATS_BYTESOK_LO 0x288 #define XM_TXSTATS_BCASTSOK 0x28C #define XM_TXSTATS_MCASTSOK 0x290 #define XM_TXSTATS_UCASTSOK 0x294 #define XM_TXSTATS_GIANTS 0x298 #define XM_TXSTATS_BURSTCNT 0x29C #define XM_TXSTATS_PAUSEPKTS 0x2A0 #define XM_TXSTATS_MACCTLPKTS 0x2A4 #define XM_TXSTATS_SINGLECOLS 0x2A8 #define XM_TXSTATS_MULTICOLS 0x2AC #define XM_TXSTATS_EXCESSCOLS 0x2B0 #define XM_TXSTATS_LATECOLS 0x2B4 #define XM_TXSTATS_DEFER 0x2B8 #define XM_TXSTATS_EXCESSDEFER 0x2BC #define XM_TXSTATS_UNDERRUN 0x2C0 #define XM_TXSTATS_CARRIERSENSE 0x2C4 #define XM_TXSTATS_UTILIZATION 0x2C8 #define XM_TXSTATS_64 0x2D0 #define XM_TXSTATS_65_127 0x2D4 #define XM_TXSTATS_128_255 0x2D8 #define XM_TXSTATS_256_511 0x2DC #define XM_TXSTATS_512_1023 0x2E0 #define XM_TXSTATS_1024_MAX 0x2E4 /* RX statistics registers */ #define XM_RXSTATS_PKTSOK 0x300 #define XM_RXSTATS_BYTESOK_HI 0x304 #define XM_RXSTATS_BYTESOK_LO 0x308 #define XM_RXSTATS_BCASTSOK 0x30C #define XM_RXSTATS_MCASTSOK 0x310 #define XM_RXSTATS_UCASTSOK 0x314 #define XM_RXSTATS_PAUSEPKTS 0x318 #define XM_RXSTATS_MACCTLPKTS 0x31C #define XM_RXSTATS_BADPAUSEPKTS 0x320 #define XM_RXSTATS_BADMACCTLPKTS 0x324 #define XM_RXSTATS_BURSTCNT 0x328 #define XM_RXSTATS_MISSEDPKTS 0x32C #define XM_RXSTATS_FRAMEERRS 0x330 #define XM_RXSTATS_OVERRUN 0x334 #define XM_RXSTATS_JABBER 0x338 #define XM_RXSTATS_CARRLOSS 0x33C #define XM_RXSTATS_INRNGLENERR 0x340 #define XM_RXSTATS_SYMERR 0x344 #define XM_RXSTATS_SHORTEVENT 0x348 #define XM_RXSTATS_RUNTS 0x34C #define XM_RXSTATS_GIANTS 0x350 #define XM_RXSTATS_CRCERRS 0x354 #define XM_RXSTATS_CEXTERRS 0x35C #define XM_RXSTATS_UTILIZATION 0x360 #define XM_RXSTATS_64 0x368 #define XM_RXSTATS_65_127 0x36C #define XM_RXSTATS_128_255 0x370 #define XM_RXSTATS_256_511 0x374 #define XM_RXSTATS_512_1023 0x378 #define XM_RXSTATS_1024_MAX 0x37C #define XM_MMUCMD_TX_ENB 0x0001 #define XM_MMUCMD_RX_ENB 0x0002 #define XM_MMUCMD_GMIILOOP 0x0004 #define XM_MMUCMD_RATECTL 0x0008 #define XM_MMUCMD_GMIIFDX 0x0010 #define XM_MMUCMD_NO_MGMT_PRMB 0x0020 #define XM_MMUCMD_SIMCOL 0x0040 #define XM_MMUCMD_FORCETX 0x0080 #define XM_MMUCMD_LOOPENB 0x0200 #define XM_MMUCMD_IGNPAUSE 0x0400 #define XM_MMUCMD_PHYBUSY 0x0800 #define XM_MMUCMD_PHYDATARDY 0x1000 #define XM_TXCMD_AUTOPAD 0x0001 #define XM_TXCMD_NOCRC 0x0002 #define XM_TXCMD_NOPREAMBLE 0x0004 #define XM_TXCMD_NOGIGAMODE 0x0008 #define XM_TXCMD_SAMPLELINE 0x0010 #define XM_TXCMD_ENCBYPASS 0x0020 #define XM_TXCMD_XMITBK2BK 0x0040 #define XM_TXCMD_FAIRSHARE 0x0080 #define XM_RXCMD_DISABLE_CEXT 0x0001 #define XM_RXCMD_STRIPPAD 0x0002 #define XM_RXCMD_SAMPLELINE 0x0004 #define XM_RXCMD_SELFRX 0x0008 #define XM_RXCMD_STRIPFCS 0x0010 #define XM_RXCMD_TRANSPARENT 0x0020 #define XM_RXCMD_IPGCAPTURE 0x0040 #define XM_RXCMD_BIGPKTOK 0x0080 #define XM_RXCMD_LENERROK 0x0100 #define XM_GPIO_GP0_SET 0x0001 #define XM_GPIO_RESETSTATS 0x0004 #define XM_GPIO_RESETMAC 0x0008 #define XM_GPIO_FORCEINT 0x0020 #define XM_GPIO_ANEGINPROG 0x0040 #define XM_IMR_RX_EOF 0x0001 #define XM_IMR_TX_EOF 0x0002 #define XM_IMR_TX_UNDERRUN 0x0004 #define XM_IMR_RX_OVERRUN 0x0008 #define XM_IMR_TX_STATS_OFLOW 0x0010 #define XM_IMR_RX_STATS_OFLOW 0x0020 #define XM_IMR_TSTAMP_OFLOW 0x0040 #define XM_IMR_AUTONEG_DONE 0x0080 #define XM_IMR_NEXTPAGE_RDY 0x0100 #define XM_IMR_PAGE_RECEIVED 0x0200 #define XM_IMR_LP_REQCFG 0x0400 #define XM_IMR_GP0_SET 0x0800 #define XM_IMR_FORCEINTR 0x1000 #define XM_IMR_TX_ABORT 0x2000 #define XM_IMR_LINKEVENT 0x4000 #define XM_INTRS \ (~(XM_IMR_GP0_SET|XM_IMR_AUTONEG_DONE|XM_IMR_TX_UNDERRUN)) #define XM_ISR_RX_EOF 0x0001 #define XM_ISR_TX_EOF 0x0002 #define XM_ISR_TX_UNDERRUN 0x0004 #define XM_ISR_RX_OVERRUN 0x0008 #define XM_ISR_TX_STATS_OFLOW 0x0010 #define XM_ISR_RX_STATS_OFLOW 0x0020 #define XM_ISR_TSTAMP_OFLOW 0x0040 #define XM_ISR_AUTONEG_DONE 0x0080 #define XM_ISR_NEXTPAGE_RDY 0x0100 #define XM_ISR_PAGE_RECEIVED 0x0200 #define XM_ISR_LP_REQCFG 0x0400 #define XM_ISR_GP0_SET 0x0800 #define XM_ISR_FORCEINTR 0x1000 #define XM_ISR_TX_ABORT 0x2000 #define XM_ISR_LINKEVENT 0x4000 #define XM_HWCFG_GENEOP 0x0008 #define XM_HWCFG_SIGSTATCKH 0x0004 #define XM_HWCFG_GMIIMODE 0x0001 #define XM_MODE_FLUSH_RXFIFO 0x00000001 #define XM_MODE_FLUSH_TXFIFO 0x00000002 #define XM_MODE_BIGENDIAN 0x00000004 #define XM_MODE_RX_PROMISC 0x00000008 #define XM_MODE_RX_NOBROAD 0x00000010 #define XM_MODE_RX_NOMULTI 0x00000020 #define XM_MODE_RX_NOUNI 0x00000040 #define XM_MODE_RX_BADFRAMES 0x00000080 #define XM_MODE_RX_CRCERRS 0x00000100 #define XM_MODE_RX_GIANTS 0x00000200 #define XM_MODE_RX_INRANGELEN 0x00000400 #define XM_MODE_RX_RUNTS 0x00000800 #define XM_MODE_RX_MACCTL 0x00001000 #define XM_MODE_RX_USE_PERFECT 0x00002000 #define XM_MODE_RX_USE_STATION 0x00004000 #define XM_MODE_RX_USE_HASH 0x00008000 #define XM_MODE_RX_ADDRPAIR 0x00010000 #define XM_MODE_PAUSEONHI 0x00020000 #define XM_MODE_PAUSEONLO 0x00040000 #define XM_MODE_TIMESTAMP 0x00080000 #define XM_MODE_SENDPAUSE 0x00100000 #define XM_MODE_SENDCONTINUOUS 0x00200000 #define XM_MODE_LE_STATUSWORD 0x00400000 #define XM_MODE_AUTOFIFOPAUSE 0x00800000 #define XM_MODE_EXPAUSEGEN 0x02000000 #define XM_MODE_RX_INVERSE 0x04000000 #define XM_RXSTAT_MACCTL 0x00000001 #define XM_RXSTAT_ERRFRAME 0x00000002 #define XM_RXSTAT_CRCERR 0x00000004 #define XM_RXSTAT_GIANT 0x00000008 #define XM_RXSTAT_RUNT 0x00000010 #define XM_RXSTAT_FRAMEERR 0x00000020 #define XM_RXSTAT_INRANGEERR 0x00000040 #define XM_RXSTAT_CARRIERERR 0x00000080 #define XM_RXSTAT_COLLERR 0x00000100 #define XM_RXSTAT_802_3 0x00000200 #define XM_RXSTAT_CARREXTERR 0x00000400 #define XM_RXSTAT_BURSTMODE 0x00000800 #define XM_RXSTAT_UNICAST 0x00002000 #define XM_RXSTAT_MULTICAST 0x00004000 #define XM_RXSTAT_BROADCAST 0x00008000 #define XM_RXSTAT_VLAN_LEV1 0x00010000 #define XM_RXSTAT_VLAN_LEV2 0x00020000 #define XM_RXSTAT_LEN 0xFFFC0000 +#define XM_RXSTAT_LENSHIFT 18 + +#define XM_RXSTAT_BYTES(x) ((x) >> XM_RXSTAT_LENSHIFT) /* * XMAC PHY registers, indirectly accessed through * XM_PHY_ADDR and XM_PHY_REG. */ #define XM_PHY_BMCR 0x0000 /* control */ #define XM_PHY_BMSR 0x0001 /* status */ #define XM_PHY_VENID 0x0002 /* vendor id */ #define XM_PHY_DEVID 0x0003 /* device id */ #define XM_PHY_ANAR 0x0004 /* autoneg advertisenemt */ #define XM_PHY_LPAR 0x0005 /* link partner ability */ #define XM_PHY_ANEXP 0x0006 /* autoneg expansion */ #define XM_PHY_NEXTP 0x0007 /* nextpage */ #define XM_PHY_LPNEXTP 0x0008 /* link partner's nextpage */ #define XM_PHY_EXTSTS 0x000F /* extented status */ #define XM_PHY_RESAB 0x0010 /* resolved ability */ #define XM_BMCR_DUPLEX 0x0100 #define XM_BMCR_RENEGOTIATE 0x0200 #define XM_BMCR_AUTONEGENBL 0x1000 #define XM_BMCR_LOOPBACK 0x4000 #define XM_BMCR_RESET 0x8000 #define XM_BMSR_EXTCAP 0x0001 #define XM_BMSR_LINKSTAT 0x0004 #define XM_BMSR_AUTONEGABLE 0x0008 #define XM_BMSR_REMFAULT 0x0010 #define XM_BMSR_AUTONEGDONE 0x0020 #define XM_BMSR_EXTSTAT 0x0100 #define XM_VENID_XAQTI 0xD14C #define XM_DEVID_XMAC 0x0002 #define XM_ANAR_FULLDUPLEX 0x0020 #define XM_ANAR_HALFDUPLEX 0x0040 #define XM_ANAR_PAUSEBITS 0x0180 #define XM_ANAR_REMFAULTBITS 0x1800 #define XM_ANAR_ACK 0x4000 #define XM_ANAR_NEXTPAGE 0x8000 #define XM_LPAR_FULLDUPLEX 0x0020 #define XM_LPAR_HALFDUPLEX 0x0040 #define XM_LPAR_PAUSEBITS 0x0180 #define XM_LPAR_REMFAULTBITS 0x1800 #define XM_LPAR_ACK 0x4000 #define XM_LPAR_NEXTPAGE 0x8000 #define XM_PAUSE_NOPAUSE 0x0000 #define XM_PAUSE_SYMPAUSE 0x0080 #define XM_PAUSE_ASYMPAUSE 0x0100 #define XM_PAUSE_BOTH 0x0180 #define XM_REMFAULT_LINKOK 0x0000 #define XM_REMFAULT_LINKFAIL 0x0800 #define XM_REMFAULT_OFFLINE 0x1000 #define XM_REMFAULT_ANEGERR 0x1800 #define XM_ANEXP_GOTPAGE 0x0002 #define XM_ANEXP_NEXTPAGE_SELF 0x0004 #define XM_ANEXP_NEXTPAGE_LP 0x0008 #define XM_NEXTP_MESSAGE 0x07FF #define XM_NEXTP_TOGGLE 0x0800 #define XM_NEXTP_ACK2 0x1000 #define XM_NEXTP_MPAGE 0x2000 #define XM_NEXTP_ACK1 0x4000 #define XM_NEXTP_NPAGE 0x8000 #define XM_LPNEXTP_MESSAGE 0x07FF #define XM_LPNEXTP_TOGGLE 0x0800 #define XM_LPNEXTP_ACK2 0x1000 #define XM_LPNEXTP_MPAGE 0x2000 #define XM_LPNEXTP_ACK1 0x4000 #define XM_LPNEXTP_NPAGE 0x8000 #define XM_EXTSTS_HALFDUPLEX 0x4000 #define XM_EXTSTS_FULLDUPLEX 0x8000 #define XM_RESAB_PAUSEMISMATCH 0x0008 #define XM_RESAB_ABLMISMATCH 0x0010 #define XM_RESAB_FDMODESEL 0x0020 #define XM_RESAB_HDMODESEL 0x0040 #define XM_RESAB_PAUSEBITS 0x0180 Index: stable/6/sys/dev/sk/yukonreg.h =================================================================== --- stable/6/sys/dev/sk/yukonreg.h (revision 159562) +++ stable/6/sys/dev/sk/yukonreg.h (revision 159563) @@ -1,171 +1,190 @@ /* $OpenBSD: yukonreg.h,v 1.2 2003/08/12 05:23:06 nate Exp $ */ /*- * Copyright (c) 2003 Nathan L. Binkert * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * $FreeBSD$ */ /* General Purpose Status Register (GPSR) */ #define YUKON_GPSR 0x0000 #define YU_GPSR_SPEED 0x8000 /* speed 0 - 10Mbps, 1 - 100Mbps */ #define YU_GPSR_DUPLEX 0x4000 /* 0 - half duplex, 1 - full duplex */ -#define YU_GPSR_FCTL_TX 0x2000 /* flow control */ +#define YU_GPSR_FCTL_TX 0x2000 /* Tx flow control, 1 - disabled */ #define YU_GPSR_LINK 0x1000 /* link status (down/up) */ #define YU_GPSR_PAUSE 0x0800 /* flow control enable/disable */ #define YU_GPSR_TX_IN_PROG 0x0400 /* transmit in progress */ #define YU_GPSR_EXCESS_COL 0x0200 /* excessive collisions occurred */ #define YU_GPSR_LATE_COL 0x0100 /* late collision occurred */ #define YU_GPSR_MII_PHY_STC 0x0020 /* MII PHY status change */ #define YU_GPSR_GIG_SPEED 0x0010 /* Gigabit Speed (0 - use speed bit) */ #define YU_GPSR_PARTITION 0x0008 /* partition mode */ -#define YU_GPSR_FCTL_RX 0x0004 /* flow control enable/disable */ -#define YU_GPSR_PROMS_EN 0x0002 /* promiscuous mode enable/disable */ +#define YU_GPSR_FCTL_RX 0x0004 /* Rx flow control, 1 - disabled */ +#define YU_GPSR_PROMS_EN 0x0002 /* promiscuous mode, 1 - enabled */ /* General Purpose Control Register (GPCR) */ #define YUKON_GPCR 0x0004 -#define YU_GPCR_FCTL_TX 0x2000 /* Transmit flow control 802.3x */ +#define YU_GPCR_FCTL_TX_DIS 0x2000 /* Disable Tx flow control 802.3x */ #define YU_GPCR_TXEN 0x1000 /* Transmit Enable */ #define YU_GPCR_RXEN 0x0800 /* Receive Enable */ -#define YU_GPCR_LPBK 0x0200 /* Loopback Enable */ +#define YU_GPCR_BURSTEN 0x0400 /* Burst Mode Enable */ +#define YU_GPCR_LPBK 0x0200 /* MAC Loopback Enable */ #define YU_GPCR_PAR 0x0100 /* Partition Enable */ -#define YU_GPCR_GIG 0x0080 /* Gigabit Speed */ +#define YU_GPCR_GIG 0x0080 /* Gigabit Speed 1000Mbps */ #define YU_GPCR_FLP 0x0040 /* Force Link Pass */ #define YU_GPCR_DUPLEX 0x0020 /* Duplex Enable */ -#define YU_GPCR_FCTL_RX 0x0010 /* Receive flow control 802.3x */ -#define YU_GPCR_SPEED 0x0008 /* Port Speed */ -#define YU_GPCR_DPLX_EN 0x0004 /* Enable Auto-Update for duplex */ -#define YU_GPCR_FCTL_EN 0x0002 /* Enabel Auto-Update for 802.3x */ -#define YU_GPCR_SPEED_EN 0x0001 /* Enable Auto-Update for speed */ +#define YU_GPCR_FCTL_RX_DIS 0x0010 /* Disable Rx flow control 802.3x */ +#define YU_GPCR_SPEED 0x0008 /* Port Speed 100Mbps */ +#define YU_GPCR_DPLX_DIS 0x0004 /* Disable Auto-Update for duplex */ +#define YU_GPCR_FCTL_DIS 0x0002 /* Disable Auto-Update for 802.3x */ +#define YU_GPCR_SPEED_DIS 0x0001 /* Disable Auto-Update for speed */ /* Transmit Control Register (TCR) */ #define YUKON_TCR 0x0008 #define YU_TCR_FJ 0x8000 /* force jam / flow control */ #define YU_TCR_CRCD 0x4000 /* insert CRC (0 - enable) */ #define YU_TCR_PADD 0x2000 /* pad packets to 64b (0 - enable) */ #define YU_TCR_COLTH 0x1c00 /* collision threshold */ /* Receive Control Register (RCR) */ #define YUKON_RCR 0x000c #define YU_RCR_UFLEN 0x8000 /* unicast filter enable */ #define YU_RCR_MUFLEN 0x4000 /* multicast filter enable */ #define YU_RCR_CRCR 0x2000 /* remove CRC */ #define YU_RCR_PASSFC 0x1000 /* pass flow control packets */ /* Transmit Flow Control Register (TFCR) */ #define YUKON_TFCR 0x0010 /* Pause Time */ /* Transmit Parameter Register (TPR) */ #define YUKON_TPR 0x0014 #define YU_TPR_JAM_LEN(x) (((x) & 0x3) << 14) #define YU_TPR_JAM_IPG(x) (((x) & 0x1f) << 9) #define YU_TPR_JAM2DATA_IPG(x) (((x) & 0x1f) << 4) /* Serial Mode Register (SMR) */ #define YUKON_SMR 0x0018 #define YU_SMR_DATA_BLIND(x) (((x) & 0x1f) << 11) #define YU_SMR_LIMIT4 0x0400 /* reset after 16 / 4 collisions */ #define YU_SMR_MFL_JUMBO 0x0100 /* max frame length for jumbo frames */ #define YU_SMR_MFL_VLAN 0x0200 /* max frame length + vlan tag */ #define YU_SMR_IPG_DATA(x) ((x) & 0x1f) /* Source Address Low #1 (SAL1) */ #define YUKON_SAL1 0x001c /* SA1[15:0] */ /* Source Address Middle #1 (SAM1) */ #define YUKON_SAM1 0x0020 /* SA1[31:16] */ /* Source Address High #1 (SAH1) */ #define YUKON_SAH1 0x0024 /* SA1[47:32] */ /* Source Address Low #2 (SAL2) */ #define YUKON_SAL2 0x0028 /* SA2[15:0] */ /* Source Address Middle #2 (SAM2) */ #define YUKON_SAM2 0x002c /* SA2[31:16] */ /* Source Address High #2 (SAH2) */ #define YUKON_SAH2 0x0030 /* SA2[47:32] */ /* Multicatst Address Hash Register 1 (MCAH1) */ #define YUKON_MCAH1 0x0034 /* Multicatst Address Hash Register 2 (MCAH2) */ #define YUKON_MCAH2 0x0038 /* Multicatst Address Hash Register 3 (MCAH3) */ #define YUKON_MCAH3 0x003c /* Multicatst Address Hash Register 4 (MCAH4) */ #define YUKON_MCAH4 0x0040 /* Transmit Interrupt Register (TIR) */ #define YUKON_TIR 0x0044 #define YU_TIR_OUT_UNICAST 0x0001 /* Num Unicast Packets Transmitted */ #define YU_TIR_OUT_BROADCAST 0x0002 /* Num Broadcast Packets Transmitted */ #define YU_TIR_OUT_PAUSE 0x0004 /* Num Pause Packets Transmitted */ #define YU_TIR_OUT_MULTICAST 0x0008 /* Num Multicast Packets Transmitted */ #define YU_TIR_OUT_OCTETS 0x0030 /* Num Bytes Transmitted */ #define YU_TIR_OUT_64_OCTETS 0x0000 /* Num Packets Transmitted */ #define YU_TIR_OUT_127_OCTETS 0x0000 /* Num Packets Transmitted */ #define YU_TIR_OUT_255_OCTETS 0x0000 /* Num Packets Transmitted */ #define YU_TIR_OUT_511_OCTETS 0x0000 /* Num Packets Transmitted */ #define YU_TIR_OUT_1023_OCTETS 0x0000 /* Num Packets Transmitted */ #define YU_TIR_OUT_1518_OCTETS 0x0000 /* Num Packets Transmitted */ #define YU_TIR_OUT_MAX_OCTETS 0x0000 /* Num Packets Transmitted */ #define YU_TIR_OUT_SPARE 0x0000 /* Num Packets Transmitted */ #define YU_TIR_OUT_COLLISIONS 0x0000 /* Num Packets Transmitted */ #define YU_TIR_OUT_LATE 0x0000 /* Num Packets Transmitted */ /* Receive Interrupt Register (RIR) */ #define YUKON_RIR 0x0048 /* Transmit and Receive Interrupt Register (TRIR) */ #define YUKON_TRIR 0x004c /* Transmit Interrupt Mask Register (TIMR) */ #define YUKON_TIMR 0x0050 /* Receive Interrupt Mask Register (RIMR) */ #define YUKON_RIMR 0x0054 /* Transmit and Receive Interrupt Mask Register (TRIMR) */ #define YUKON_TRIMR 0x0058 /* SMI Control Register (SMICR) */ #define YUKON_SMICR 0x0080 #define YU_SMICR_PHYAD(x) (((x) & 0x1f) << 11) #define YU_SMICR_REGAD(x) (((x) & 0x1f) << 6) #define YU_SMICR_OPCODE 0x0020 /* opcode (0 - write, 1 - read) */ #define YU_SMICR_OP_READ 0x0020 /* opcode read */ #define YU_SMICR_OP_WRITE 0x0000 /* opcode write */ #define YU_SMICR_READ_VALID 0x0010 /* read valid */ #define YU_SMICR_BUSY 0x0008 /* busy (writing) */ /* SMI Data Register (SMIDR) */ #define YUKON_SMIDR 0x0084 /* PHY Addres Register (PAR) */ #define YUKON_PAR 0x0088 #define YU_PAR_MIB_CLR 0x0020 /* MIB Counters Clear Mode */ #define YU_PAR_LOAD_TSTCNT 0x0010 /* Load count 0xfffffff0 into cntr */ + +/* Receive status */ +#define YU_RXSTAT_FOFL 0x00000001 /* Rx FIFO overflow */ +#define YU_RXSTAT_CRCERR 0x00000002 /* CRC error */ +#define YU_RXSTAT_FRAGMENT 0x00000008 /* fragment */ +#define YU_RXSTAT_LONGERR 0x00000010 /* too long packet */ +#define YU_RXSTAT_MIIERR 0x00000020 /* MII error */ +#define YU_RXSTAT_BADFC 0x00000040 /* bad flow-control packet */ +#define YU_RXSTAT_GOODFC 0x00000080 /* good flow-control packet */ +#define YU_RXSTAT_RXOK 0x00000100 /* receice OK (Good packet) */ +#define YU_RXSTAT_BROADCAST 0x00000200 /* broadcast packet */ +#define YU_RXSTAT_MULTICAST 0x00000400 /* multicast packet */ +#define YU_RXSTAT_RUNT 0x00000800 /* undersize packet */ +#define YU_RXSTAT_JABBER 0x00001000 /* jabber packet */ +#define YU_RXSTAT_VLAN 0x00002000 /* VLAN packet */ +#define YU_RXSTAT_LENSHIFT 16 + +#define YU_RXSTAT_BYTES(x) ((x) >> YU_RXSTAT_LENSHIFT)