Index: head/sys/dev/dc/if_dc.c =================================================================== --- head/sys/dev/dc/if_dc.c (revision 71961) +++ head/sys/dev/dc/if_dc.c (revision 71962) @@ -1,3371 +1,3367 @@ /* * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143 * series chips and several workalikes including the following: * * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com) * Macronix/Lite-On 82c115 PNIC II (www.macronix.com) * Lite-On 82c168/82c169 PNIC (www.litecom.com) * ASIX Electronics AX88140A (www.asix.com.tw) * ASIX Electronics AX88141 (www.asix.com.tw) * ADMtek AL981 (www.admtek.com.tw) * ADMtek AN985 (www.admtek.com.tw) * Davicom DM9100, DM9102, DM9102A (www.davicom8.com) * Accton EN1217 (www.accton.com) * Xircom X3201 (www.xircom.com) * Abocom FE2500 * * Datasheets for the 21143 are available at developer.intel.com. * Datasheets for the clone parts can be found at their respective sites. * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.) * The PNIC II is essentially a Macronix 98715A chip; the only difference * worth noting is that its multicast hash table is only 128 bits wide * instead of 512. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The Intel 21143 is the successor to the DEC 21140. It is basically * the same as the 21140 but with a few new features. The 21143 supports * three kinds of media attachments: * * o MII port, for 10Mbps and 100Mbps support and NWAY * autonegotiation provided by an external PHY. * o SYM port, for symbol mode 100Mbps support. * o 10baseT port. * o AUI/BNC port. * * The 100Mbps SYM port and 10baseT port can be used together in * combination with the internal NWAY support to create a 10/100 * autosensing configuration. * * Note that not all tulip workalikes are handled in this driver: we only * deal with those which are relatively well behaved. The Winbond is * handled separately due to its different register offsets and the * special handling needed for its various bugs. The PNIC is handled * here, but I'm not thrilled about it. * * All of the workalike chips use some form of MII transceiver support * with the exception of the Macronix chips, which also have a SYM port. * The ASIX AX88140A is also documented to have a SYM port, but all * the cards I've seen use an MII transceiver, probably because the * AX88140A doesn't support internal NWAY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include #include #include #define DC_USEIOSPACE #ifdef __alpha__ #define SRM_MEDIA #endif #include MODULE_DEPEND(dc, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif /* * Various supported device vendors/types and their names. */ static struct dc_type dc_devs[] = { { DC_VENDORID_DEC, DC_DEVICEID_21143, "Intel 21143 10/100BaseTX" }, { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100, "Davicom DM9100 10/100BaseTX" }, { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102, "Davicom DM9102 10/100BaseTX" }, { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102, "Davicom DM9102A 10/100BaseTX" }, { DC_VENDORID_ADMTEK, DC_DEVICEID_AL981, "ADMtek AL981 10/100BaseTX" }, { DC_VENDORID_ADMTEK, DC_DEVICEID_AN985, "ADMtek AN985 10/100BaseTX" }, { DC_VENDORID_ASIX, DC_DEVICEID_AX88140A, "ASIX AX88140A 10/100BaseTX" }, { DC_VENDORID_ASIX, DC_DEVICEID_AX88140A, "ASIX AX88141 10/100BaseTX" }, { DC_VENDORID_MX, DC_DEVICEID_98713, "Macronix 98713 10/100BaseTX" }, { DC_VENDORID_MX, DC_DEVICEID_98713, "Macronix 98713A 10/100BaseTX" }, { DC_VENDORID_CP, DC_DEVICEID_98713_CP, "Compex RL100-TX 10/100BaseTX" }, { DC_VENDORID_CP, DC_DEVICEID_98713_CP, "Compex RL100-TX 10/100BaseTX" }, { DC_VENDORID_MX, DC_DEVICEID_987x5, "Macronix 98715/98715A 10/100BaseTX" }, { DC_VENDORID_MX, DC_DEVICEID_987x5, "Macronix 98715AEC-C 10/100BaseTX" }, { DC_VENDORID_MX, DC_DEVICEID_987x5, "Macronix 98725 10/100BaseTX" }, { DC_VENDORID_MX, DC_DEVICEID_98727, "Macronix 98727/98732 10/100BaseTX" }, { DC_VENDORID_LO, DC_DEVICEID_82C115, "LC82C115 PNIC II 10/100BaseTX" }, { DC_VENDORID_LO, DC_DEVICEID_82C168, "82c168 PNIC 10/100BaseTX" }, { DC_VENDORID_LO, DC_DEVICEID_82C168, "82c169 PNIC 10/100BaseTX" }, { DC_VENDORID_ACCTON, DC_DEVICEID_EN1217, "Accton EN1217 10/100BaseTX" }, { DC_VENDORID_ACCTON, DC_DEVICEID_EN2242, "Accton EN2242 MiniPCI 10/100BaseTX" }, { DC_VENDORID_XIRCOM, DC_DEVICEID_X3201, "Xircom X3201 10/100BaseTX" }, { DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500, "Abocom FE2500 10/100BaseTX" }, { 0, 0, NULL } }; static int dc_probe __P((device_t)); static int dc_attach __P((device_t)); static int dc_detach __P((device_t)); static void dc_acpi __P((device_t)); static struct dc_type *dc_devtype __P((device_t)); static int dc_newbuf __P((struct dc_softc *, int, struct mbuf *)); static int dc_encap __P((struct dc_softc *, struct mbuf *, u_int32_t *)); static int dc_coal __P((struct dc_softc *, struct mbuf **)); static void dc_pnic_rx_bug_war __P((struct dc_softc *, int)); static int dc_rx_resync __P((struct dc_softc *)); static void dc_rxeof __P((struct dc_softc *)); static void dc_txeof __P((struct dc_softc *)); static void dc_tick __P((void *)); static void dc_intr __P((void *)); static void dc_start __P((struct ifnet *)); static int dc_ioctl __P((struct ifnet *, u_long, caddr_t)); static void dc_init __P((void *)); static void dc_stop __P((struct dc_softc *)); static void dc_watchdog __P((struct ifnet *)); static void dc_shutdown __P((device_t)); static int dc_ifmedia_upd __P((struct ifnet *)); static void dc_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); static void dc_delay __P((struct dc_softc *)); static void dc_eeprom_idle __P((struct dc_softc *)); static void dc_eeprom_putbyte __P((struct dc_softc *, int)); static void dc_eeprom_getword __P((struct dc_softc *, int, u_int16_t *)); static void dc_eeprom_getword_pnic __P((struct dc_softc *, int, u_int16_t *)); static void dc_eeprom_getword_xircom __P((struct dc_softc *, int, u_int16_t *)); static void dc_read_eeprom __P((struct dc_softc *, caddr_t, int, int, int)); static void dc_mii_writebit __P((struct dc_softc *, int)); static int dc_mii_readbit __P((struct dc_softc *)); static void dc_mii_sync __P((struct dc_softc *)); static void dc_mii_send __P((struct dc_softc *, u_int32_t, int)); static int dc_mii_readreg __P((struct dc_softc *, struct dc_mii_frame *)); static int dc_mii_writereg __P((struct dc_softc *, struct dc_mii_frame *)); static int dc_miibus_readreg __P((device_t, int, int)); static int dc_miibus_writereg __P((device_t, int, int, int)); static void dc_miibus_statchg __P((device_t)); static void dc_miibus_mediainit __P((device_t)); static void dc_setcfg __P((struct dc_softc *, int)); static u_int32_t dc_crc_le __P((struct dc_softc *, caddr_t)); static u_int32_t dc_crc_be __P((caddr_t)); static void dc_setfilt_21143 __P((struct dc_softc *)); static void dc_setfilt_asix __P((struct dc_softc *)); static void dc_setfilt_admtek __P((struct dc_softc *)); static void dc_setfilt_xircom __P((struct dc_softc *)); static void dc_setfilt __P((struct dc_softc *)); static void dc_reset __P((struct dc_softc *)); static int dc_list_rx_init __P((struct dc_softc *)); static int dc_list_tx_init __P((struct dc_softc *)); static void dc_parse_21143_srom __P((struct dc_softc *)); static void dc_decode_leaf_sia __P((struct dc_softc *, struct dc_eblock_sia *)); static void dc_decode_leaf_mii __P((struct dc_softc *, struct dc_eblock_mii *)); static void dc_decode_leaf_sym __P((struct dc_softc *, struct dc_eblock_sym *)); static void dc_apply_fixup __P((struct dc_softc *, int)); #ifdef DC_USEIOSPACE #define DC_RES SYS_RES_IOPORT #define DC_RID DC_PCI_CFBIO #else #define DC_RES SYS_RES_MEMORY #define DC_RID DC_PCI_CFBMA #endif static device_method_t dc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dc_probe), DEVMETHOD(device_attach, dc_attach), DEVMETHOD(device_detach, dc_detach), DEVMETHOD(device_shutdown, dc_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, dc_miibus_readreg), DEVMETHOD(miibus_writereg, dc_miibus_writereg), DEVMETHOD(miibus_statchg, dc_miibus_statchg), DEVMETHOD(miibus_mediainit, dc_miibus_mediainit), { 0, 0 } }; static driver_t dc_driver = { "dc", dc_methods, sizeof(struct dc_softc) }; static devclass_t dc_devclass; DRIVER_MODULE(if_dc, cardbus, dc_driver, dc_devclass, 0, 0); DRIVER_MODULE(if_dc, pci, dc_driver, dc_devclass, 0, 0); DRIVER_MODULE(miibus, dc, miibus_driver, miibus_devclass, 0, 0); #define DC_SETBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) #define DC_CLRBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) #define SIO_SET(x) DC_SETBIT(sc, DC_SIO, (x)) #define SIO_CLR(x) DC_CLRBIT(sc, DC_SIO, (x)) #define IS_MPSAFE 0 static void dc_delay(sc) struct dc_softc *sc; { int idx; for (idx = (300 / 33) + 1; idx > 0; idx--) CSR_READ_4(sc, DC_BUSCTL); } static void dc_eeprom_idle(sc) struct dc_softc *sc; { register int i; CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); dc_delay(sc); for (i = 0; i < 25; i++) { DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); } DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS); dc_delay(sc); CSR_WRITE_4(sc, DC_SIO, 0x00000000); return; } /* * Send a read command and address to the EEPROM, check for ACK. */ static void dc_eeprom_putbyte(sc, addr) struct dc_softc *sc; int addr; { register int d, i; /* * The AN985 has a 93C66 EEPROM on it instead of * a 93C46. It uses a different bit sequence for * specifying the "read" opcode. */ if (DC_IS_CENTAUR(sc)) d = addr | (DC_EECMD_READ << 2); else d = addr | DC_EECMD_READ; /* * Feed in each bit and strobe the clock. */ for (i = 0x400; i; i >>= 1) { if (d & i) { SIO_SET(DC_SIO_EE_DATAIN); } else { SIO_CLR(DC_SIO_EE_DATAIN); } dc_delay(sc); SIO_SET(DC_SIO_EE_CLK); dc_delay(sc); SIO_CLR(DC_SIO_EE_CLK); dc_delay(sc); } return; } /* * Read a word of data stored in the EEPROM at address 'addr.' * The PNIC 82c168/82c169 has its own non-standard way to read * the EEPROM. */ static void dc_eeprom_getword_pnic(sc, addr, dest) struct dc_softc *sc; int addr; u_int16_t *dest; { register int i; u_int32_t r; CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ|addr); for (i = 0; i < DC_TIMEOUT; i++) { DELAY(1); r = CSR_READ_4(sc, DC_SIO); if (!(r & DC_PN_SIOCTL_BUSY)) { *dest = (u_int16_t)(r & 0xFFFF); return; } } return; } /* * Read a word of data stored in the EEPROM at address 'addr.' * The Xircom X3201 has its own non-standard way to read * the EEPROM, too. */ static void dc_eeprom_getword_xircom(sc, addr, dest) struct dc_softc *sc; int addr; u_int16_t *dest; { SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); addr *= 2; CSR_WRITE_4(sc, DC_ROM, addr | 0x160); *dest = (u_int16_t)CSR_READ_4(sc, DC_SIO)&0xff; addr += 1; CSR_WRITE_4(sc, DC_ROM, addr | 0x160); *dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO)&0xff) << 8; SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); return; } /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void dc_eeprom_getword(sc, addr, dest) struct dc_softc *sc; int addr; u_int16_t *dest; { register int i; u_int16_t word = 0; /* Force EEPROM to idle state. */ dc_eeprom_idle(sc); /* Enter EEPROM access mode. */ CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); dc_delay(sc); /* * Send address of word we want to read. */ dc_eeprom_putbyte(sc, addr); /* * Start reading bits from EEPROM. */ for (i = 0x8000; i; i >>= 1) { SIO_SET(DC_SIO_EE_CLK); dc_delay(sc); if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT) word |= i; dc_delay(sc); SIO_CLR(DC_SIO_EE_CLK); dc_delay(sc); } /* Turn off EEPROM access mode. */ dc_eeprom_idle(sc); *dest = word; return; } /* * Read a sequence of words from the EEPROM. */ static void dc_read_eeprom(sc, dest, off, cnt, swap) struct dc_softc *sc; caddr_t dest; int off; int cnt; int swap; { int i; u_int16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { if (DC_IS_PNIC(sc)) dc_eeprom_getword_pnic(sc, off + i, &word); else if (DC_IS_XIRCOM(sc)) dc_eeprom_getword_xircom(sc, off + i, &word); else dc_eeprom_getword(sc, off + i, &word); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } return; } /* * The following two routines are taken from the Macronix 98713 * Application Notes pp.19-21. */ /* * Write a bit to the MII bus. */ static void dc_mii_writebit(sc, bit) struct dc_softc *sc; int bit; { if (bit) CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE|DC_SIO_MII_DATAOUT); else CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); return; } /* * Read a bit from the MII bus. */ static int dc_mii_readbit(sc) struct dc_softc *sc; { CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ|DC_SIO_MII_DIR); CSR_READ_4(sc, DC_SIO); DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN) return(1); return(0); } /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void dc_mii_sync(sc) struct dc_softc *sc; { register int i; CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); for (i = 0; i < 32; i++) dc_mii_writebit(sc, 1); return; } /* * Clock a series of bits through the MII. */ static void dc_mii_send(sc, bits, cnt) struct dc_softc *sc; u_int32_t bits; int cnt; { int i; for (i = (0x1 << (cnt - 1)); i; i >>= 1) dc_mii_writebit(sc, bits & i); } /* * Read an PHY register through the MII. */ static int dc_mii_readreg(sc, frame) struct dc_softc *sc; struct dc_mii_frame *frame; { int i, ack; DC_LOCK(sc); /* * Set up frame for RX. */ frame->mii_stdelim = DC_MII_STARTDELIM; frame->mii_opcode = DC_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; /* * Sync the PHYs. */ dc_mii_sync(sc); /* * Send command/address info. */ dc_mii_send(sc, frame->mii_stdelim, 2); dc_mii_send(sc, frame->mii_opcode, 2); dc_mii_send(sc, frame->mii_phyaddr, 5); dc_mii_send(sc, frame->mii_regaddr, 5); #ifdef notdef /* Idle bit */ dc_mii_writebit(sc, 1); dc_mii_writebit(sc, 0); #endif /* Check for ack */ ack = dc_mii_readbit(sc); /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ if (ack) { for(i = 0; i < 16; i++) { dc_mii_readbit(sc); } goto fail; } for (i = 0x8000; i; i >>= 1) { if (!ack) { if (dc_mii_readbit(sc)) frame->mii_data |= i; } } fail: dc_mii_writebit(sc, 0); dc_mii_writebit(sc, 0); DC_UNLOCK(sc); if (ack) return(1); return(0); } /* * Write to a PHY register through the MII. */ static int dc_mii_writereg(sc, frame) struct dc_softc *sc; struct dc_mii_frame *frame; { DC_LOCK(sc); /* * Set up frame for TX. */ frame->mii_stdelim = DC_MII_STARTDELIM; frame->mii_opcode = DC_MII_WRITEOP; frame->mii_turnaround = DC_MII_TURNAROUND; /* * Sync the PHYs. */ dc_mii_sync(sc); dc_mii_send(sc, frame->mii_stdelim, 2); dc_mii_send(sc, frame->mii_opcode, 2); dc_mii_send(sc, frame->mii_phyaddr, 5); dc_mii_send(sc, frame->mii_regaddr, 5); dc_mii_send(sc, frame->mii_turnaround, 2); dc_mii_send(sc, frame->mii_data, 16); /* Idle bit. */ dc_mii_writebit(sc, 0); dc_mii_writebit(sc, 0); DC_UNLOCK(sc); return(0); } static int dc_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct dc_mii_frame frame; struct dc_softc *sc; int i, rval, phy_reg = 0; sc = device_get_softc(dev); bzero((char *)&frame, sizeof(frame)); /* * Note: both the AL981 and AN985 have internal PHYs, * however the AL981 provides direct access to the PHY * registers while the AN985 uses a serial MII interface. * The AN985's MII interface is also buggy in that you * can read from any MII address (0 to 31), but only address 1 * behaves normally. To deal with both cases, we pretend * that the PHY is at MII address 1. */ if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) return(0); if (sc->dc_pmode != DC_PMODE_MII) { if (phy == (MII_NPHY - 1)) { switch(reg) { case MII_BMSR: /* * Fake something to make the probe * code think there's a PHY here. */ return(BMSR_MEDIAMASK); break; case MII_PHYIDR1: if (DC_IS_PNIC(sc)) return(DC_VENDORID_LO); return(DC_VENDORID_DEC); break; case MII_PHYIDR2: if (DC_IS_PNIC(sc)) return(DC_DEVICEID_82C168); return(DC_DEVICEID_21143); break; default: return(0); break; } } else return(0); } if (DC_IS_PNIC(sc)) { CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ | (phy << 23) | (reg << 18)); for (i = 0; i < DC_TIMEOUT; i++) { DELAY(1); rval = CSR_READ_4(sc, DC_PN_MII); if (!(rval & DC_PN_MII_BUSY)) { rval &= 0xFFFF; return(rval == 0xFFFF ? 0 : rval); } } return(0); } if (DC_IS_COMET(sc)) { switch(reg) { case MII_BMCR: phy_reg = DC_AL_BMCR; break; case MII_BMSR: phy_reg = DC_AL_BMSR; break; case MII_PHYIDR1: phy_reg = DC_AL_VENID; break; case MII_PHYIDR2: phy_reg = DC_AL_DEVID; break; case MII_ANAR: phy_reg = DC_AL_ANAR; break; case MII_ANLPAR: phy_reg = DC_AL_LPAR; break; case MII_ANER: phy_reg = DC_AL_ANER; break; default: printf("dc%d: phy_read: bad phy register %x\n", sc->dc_unit, reg); return(0); break; } rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF; if (rval == 0xFFFF) return(0); return(rval); } frame.mii_phyaddr = phy; frame.mii_regaddr = reg; if (sc->dc_type == DC_TYPE_98713) { phy_reg = CSR_READ_4(sc, DC_NETCFG); CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); } dc_mii_readreg(sc, &frame); if (sc->dc_type == DC_TYPE_98713) CSR_WRITE_4(sc, DC_NETCFG, phy_reg); return(frame.mii_data); } static int dc_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct dc_softc *sc; struct dc_mii_frame frame; int i, phy_reg = 0; sc = device_get_softc(dev); bzero((char *)&frame, sizeof(frame)); if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) return(0); if (DC_IS_PNIC(sc)) { CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE | (phy << 23) | (reg << 10) | data); for (i = 0; i < DC_TIMEOUT; i++) { if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY)) break; } return(0); } if (DC_IS_COMET(sc)) { switch(reg) { case MII_BMCR: phy_reg = DC_AL_BMCR; break; case MII_BMSR: phy_reg = DC_AL_BMSR; break; case MII_PHYIDR1: phy_reg = DC_AL_VENID; break; case MII_PHYIDR2: phy_reg = DC_AL_DEVID; break; case MII_ANAR: phy_reg = DC_AL_ANAR; break; case MII_ANLPAR: phy_reg = DC_AL_LPAR; break; case MII_ANER: phy_reg = DC_AL_ANER; break; default: printf("dc%d: phy_write: bad phy register %x\n", sc->dc_unit, reg); return(0); break; } CSR_WRITE_4(sc, phy_reg, data); return(0); } frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = data; if (sc->dc_type == DC_TYPE_98713) { phy_reg = CSR_READ_4(sc, DC_NETCFG); CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); } dc_mii_writereg(sc, &frame); if (sc->dc_type == DC_TYPE_98713) CSR_WRITE_4(sc, DC_NETCFG, phy_reg); return(0); } static void dc_miibus_statchg(dev) device_t dev; { struct dc_softc *sc; struct mii_data *mii; struct ifmedia *ifm; sc = device_get_softc(dev); if (DC_IS_ADMTEK(sc)) return; mii = device_get_softc(sc->dc_miibus); ifm = &mii->mii_media; if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_homePNA) { dc_setcfg(sc, ifm->ifm_media); sc->dc_if_media = ifm->ifm_media; } else { dc_setcfg(sc, mii->mii_media_active); sc->dc_if_media = mii->mii_media_active; } return; } /* * Special support for DM9102A cards with HomePNA PHYs. Note: * with the Davicom DM9102A/DM9801 eval board that I have, it seems * to be impossible to talk to the management interface of the DM9801 * PHY (its MDIO pin is not connected to anything). Consequently, * the driver has to just 'know' about the additional mode and deal * with it itself. *sigh* */ static void dc_miibus_mediainit(dev) device_t dev; { struct dc_softc *sc; struct mii_data *mii; struct ifmedia *ifm; int rev; rev = pci_read_config(dev, DC_PCI_CFRV, 4) & 0xFF; sc = device_get_softc(dev); mii = device_get_softc(sc->dc_miibus); ifm = &mii->mii_media; if (DC_IS_DAVICOM(sc) && rev >= DC_REVISION_DM9102A) ifmedia_add(ifm, IFM_ETHER|IFM_homePNA, 0, NULL); return; } #define DC_POLY 0xEDB88320 #define DC_BITS_512 9 #define DC_BITS_128 7 #define DC_BITS_64 6 static u_int32_t dc_crc_le(sc, addr) struct dc_softc *sc; caddr_t addr; { u_int32_t idx, bit, data, crc; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (idx = 0; idx < 6; idx++) { for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) crc = (crc >> 1) ^ (((crc ^ data) & 1) ? DC_POLY : 0); } /* * The hash table on the PNIC II and the MX98715AEC-C/D/E * chips is only 128 bits wide. */ if (sc->dc_flags & DC_128BIT_HASH) return (crc & ((1 << DC_BITS_128) - 1)); /* The hash table on the MX98715BEC is only 64 bits wide. */ if (sc->dc_flags & DC_64BIT_HASH) return (crc & ((1 << DC_BITS_64) - 1)); /* Xircom's hash filtering table is different (read: weird) */ /* Xircom uses the LEAST significant bits */ if (DC_IS_XIRCOM(sc)) { if ((crc & 0x180) == 0x180) return (crc & 0x0F) + (crc & 0x70)*3 + (14 << 4); else return (crc & 0x1F) + ((crc>>1) & 0xF0)*3 + (12 << 4); } return (crc & ((1 << DC_BITS_512) - 1)); } /* * Calculate CRC of a multicast group address, return the lower 6 bits. */ static u_int32_t dc_crc_be(addr) caddr_t addr; { u_int32_t crc, carry; int i, j; u_int8_t c; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (i = 0; i < 6; i++) { c = *(addr + i); for (j = 0; j < 8; j++) { carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); crc <<= 1; c >>= 1; if (carry) crc = (crc ^ 0x04c11db6) | carry; } } /* return the filter bit position */ return((crc >> 26) & 0x0000003F); } /* * 21143-style RX filter setup routine. Filter programming is done by * downloading a special setup frame into the TX engine. 21143, Macronix, * PNIC, PNIC II and Davicom chips are programmed this way. * * We always program the chip using 'hash perfect' mode, i.e. one perfect * address (our node address) and a 512-bit hash filter for multicast * frames. We also sneak the broadcast address into the hash filter since * we need that too. */ void dc_setfilt_21143(sc) struct dc_softc *sc; { struct dc_desc *sframe; u_int32_t h, *sp; struct ifmultiaddr *ifma; struct ifnet *ifp; int i; ifp = &sc->arpcom.ac_if; i = sc->dc_cdata.dc_tx_prod; DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); sc->dc_cdata.dc_tx_cnt++; sframe = &sc->dc_ldata->dc_tx_list[i]; sp = (u_int32_t *)&sc->dc_cdata.dc_sbuf; bzero((char *)sp, DC_SFRAME_LEN); sframe->dc_data = vtophys(&sc->dc_cdata.dc_sbuf); sframe->dc_ctl = DC_SFRAME_LEN | DC_TXCTL_SETUP | DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT; sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)&sc->dc_cdata.dc_sbuf; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); if (ifp->if_flags & IFF_ALLMULTI) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = dc_crc_le(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); sp[h >> 4] |= 1 << (h & 0xF); } if (ifp->if_flags & IFF_BROADCAST) { h = dc_crc_le(sc, (caddr_t)ðerbroadcastaddr); sp[h >> 4] |= 1 << (h & 0xF); } /* Set our MAC address */ sp[39] = ((u_int16_t *)sc->arpcom.ac_enaddr)[0]; sp[40] = ((u_int16_t *)sc->arpcom.ac_enaddr)[1]; sp[41] = ((u_int16_t *)sc->arpcom.ac_enaddr)[2]; sframe->dc_status = DC_TXSTAT_OWN; CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); /* * The PNIC takes an exceedingly long time to process its * setup frame; wait 10ms after posting the setup frame * before proceeding, just so it has time to swallow its * medicine. */ DELAY(10000); ifp->if_timer = 5; return; } void dc_setfilt_admtek(sc) struct dc_softc *sc; { struct ifnet *ifp; int h = 0; u_int32_t hashes[2] = { 0, 0 }; struct ifmultiaddr *ifma; ifp = &sc->arpcom.ac_if; /* Init our MAC address */ CSR_WRITE_4(sc, DC_AL_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); CSR_WRITE_4(sc, DC_AL_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); if (ifp->if_flags & IFF_ALLMULTI) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, DC_AL_MAR0, 0); CSR_WRITE_4(sc, DC_AL_MAR1, 0); /* * If we're already in promisc or allmulti mode, we * don't have to bother programming the multicast filter. */ if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) return; /* now program new ones */ - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = dc_crc_be(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]); CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]); return; } void dc_setfilt_asix(sc) struct dc_softc *sc; { struct ifnet *ifp; int h = 0; u_int32_t hashes[2] = { 0, 0 }; struct ifmultiaddr *ifma; ifp = &sc->arpcom.ac_if; /* Init our MAC address */ CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0); CSR_WRITE_4(sc, DC_AX_FILTDATA, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1); CSR_WRITE_4(sc, DC_AX_FILTDATA, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); if (ifp->if_flags & IFF_ALLMULTI) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); /* * The ASIX chip has a special bit to enable reception * of broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); else DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); /* * If we're already in promisc or allmulti mode, we * don't have to bother programming the multicast filter. */ if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) return; /* now program new ones */ - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = dc_crc_be(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]); CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]); return; } void dc_setfilt_xircom(sc) struct dc_softc *sc; { struct dc_desc *sframe; u_int32_t h, *sp; struct ifmultiaddr *ifma; struct ifnet *ifp; int i; ifp = &sc->arpcom.ac_if; DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)); i = sc->dc_cdata.dc_tx_prod; DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); sc->dc_cdata.dc_tx_cnt++; sframe = &sc->dc_ldata->dc_tx_list[i]; sp = (u_int32_t *)&sc->dc_cdata.dc_sbuf; bzero((char *)sp, DC_SFRAME_LEN); sframe->dc_data = vtophys(&sc->dc_cdata.dc_sbuf); sframe->dc_ctl = DC_SFRAME_LEN | DC_TXCTL_SETUP | DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT; sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)&sc->dc_cdata.dc_sbuf; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); if (ifp->if_flags & IFF_ALLMULTI) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = dc_crc_le(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); sp[h >> 4] |= 1 << (h & 0xF); } if (ifp->if_flags & IFF_BROADCAST) { h = dc_crc_le(sc, (caddr_t)ðerbroadcastaddr); sp[h >> 4] |= 1 << (h & 0xF); } /* Set our MAC address */ sp[0] = ((u_int16_t *)sc->arpcom.ac_enaddr)[0]; sp[1] = ((u_int16_t *)sc->arpcom.ac_enaddr)[1]; sp[2] = ((u_int16_t *)sc->arpcom.ac_enaddr)[2]; DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); ifp->if_flags |= IFF_RUNNING; sframe->dc_status = DC_TXSTAT_OWN; CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); /* * wait some time... */ DELAY(1000); ifp->if_timer = 5; return; } static void dc_setfilt(sc) struct dc_softc *sc; { if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) || DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc)) dc_setfilt_21143(sc); if (DC_IS_ASIX(sc)) dc_setfilt_asix(sc); if (DC_IS_ADMTEK(sc)) dc_setfilt_admtek(sc); if (DC_IS_XIRCOM(sc)) dc_setfilt_xircom(sc); return; } /* * In order to fiddle with the * 'full-duplex' and '100Mbps' bits in the netconfig register, we * first have to put the transmit and/or receive logic in the idle state. */ static void dc_setcfg(sc, media) struct dc_softc *sc; int media; { int i, restart = 0; u_int32_t isr; if (IFM_SUBTYPE(media) == IFM_NONE) return; if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)) { restart = 1; DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)); for (i = 0; i < DC_TIMEOUT; i++) { DELAY(10); isr = CSR_READ_4(sc, DC_ISR); if (isr & DC_ISR_TX_IDLE || (isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED) break; } if (i == DC_TIMEOUT) printf("dc%d: failed to force tx and " "rx to idle state\n", sc->dc_unit); } if (IFM_SUBTYPE(media) == IFM_100_TX) { DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); if (sc->dc_pmode == DC_PMODE_MII) { int watchdogreg; if (DC_IS_INTEL(sc)) { /* there's a write enable bit here that reads as 1 */ watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); watchdogreg &= ~DC_WDOG_CTLWREN; watchdogreg |= DC_WDOG_JABBERDIS; CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); } else { DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); } DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER)); if (sc->dc_type == DC_TYPE_98713) DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| DC_NETCFG_SCRAMBLER)); if (!DC_IS_DAVICOM(sc)) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); if (DC_IS_INTEL(sc)) dc_apply_fixup(sc, IFM_AUTO); } else { if (DC_IS_PNIC(sc)) { DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL); DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); } DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); if (DC_IS_INTEL(sc)) dc_apply_fixup(sc, (media & IFM_GMASK) == IFM_FDX ? IFM_100_TX|IFM_FDX : IFM_100_TX); } } if (IFM_SUBTYPE(media) == IFM_10_T) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); if (sc->dc_pmode == DC_PMODE_MII) { int watchdogreg; /* there's a write enable bit here that reads as 1 */ if (DC_IS_INTEL(sc)) { watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); watchdogreg &= ~DC_WDOG_CTLWREN; watchdogreg |= DC_WDOG_JABBERDIS; CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); } else { DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); } DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER)); if (sc->dc_type == DC_TYPE_98713) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); if (!DC_IS_DAVICOM(sc)) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); if (DC_IS_INTEL(sc)) dc_apply_fixup(sc, IFM_AUTO); } else { if (DC_IS_PNIC(sc)) { DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL); DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); } DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS); DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); if (DC_IS_INTEL(sc)) { DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET); DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); if ((media & IFM_GMASK) == IFM_FDX) DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D); else DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F); DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); DC_CLRBIT(sc, DC_10BTCTRL, DC_TCTL_AUTONEGENBL); dc_apply_fixup(sc, (media & IFM_GMASK) == IFM_FDX ? IFM_10_T|IFM_FDX : IFM_10_T); DELAY(20000); } } } /* * If this is a Davicom DM9102A card with a DM9801 HomePNA * PHY and we want HomePNA mode, set the portsel bit to turn * on the external MII port. */ if (DC_IS_DAVICOM(sc)) { if (IFM_SUBTYPE(media) == IFM_homePNA) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); sc->dc_link = 1; } else { DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); } } if ((media & IFM_GMASK) == IFM_FDX) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); } else { DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); } if (restart) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON|DC_NETCFG_RX_ON); return; } static void dc_reset(sc) struct dc_softc *sc; { register int i; DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); for (i = 0; i < DC_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET)) break; } if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_XIRCOM(sc) || DC_IS_INTEL(sc)) { DELAY(10000); DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); i = 0; } if (i == DC_TIMEOUT) printf("dc%d: reset never completed!\n", sc->dc_unit); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); CSR_WRITE_4(sc, DC_IMR, 0x00000000); CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000); CSR_WRITE_4(sc, DC_NETCFG, 0x00000000); /* * Bring the SIA out of reset. In some cases, it looks * like failing to unreset the SIA soon enough gets it * into a state where it will never come out of reset * until we reset the whole chip again. */ if (DC_IS_INTEL(sc)) { DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); CSR_WRITE_4(sc, DC_10BTCTRL, 0); CSR_WRITE_4(sc, DC_WATCHDOG, 0); } return; } static struct dc_type *dc_devtype(dev) device_t dev; { struct dc_type *t; u_int32_t rev; t = dc_devs; while(t->dc_name != NULL) { if ((pci_get_vendor(dev) == t->dc_vid) && (pci_get_device(dev) == t->dc_did)) { /* Check the PCI revision */ rev = pci_read_config(dev, DC_PCI_CFRV, 4) & 0xFF; if (t->dc_did == DC_DEVICEID_98713 && rev >= DC_REVISION_98713A) t++; if (t->dc_did == DC_DEVICEID_98713_CP && rev >= DC_REVISION_98713A) t++; if (t->dc_did == DC_DEVICEID_987x5 && rev >= DC_REVISION_98715AEC_C) t++; if (t->dc_did == DC_DEVICEID_987x5 && rev >= DC_REVISION_98725) t++; if (t->dc_did == DC_DEVICEID_AX88140A && rev >= DC_REVISION_88141) t++; if (t->dc_did == DC_DEVICEID_82C168 && rev >= DC_REVISION_82C169) t++; if (t->dc_did == DC_DEVICEID_DM9102 && rev >= DC_REVISION_DM9102A) t++; return(t); } t++; } return(NULL); } /* * Probe for a 21143 or clone chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. * We do a little bit of extra work to identify the exact type of * chip. The MX98713 and MX98713A have the same PCI vendor/device ID, * but different revision IDs. The same is true for 98715/98715A * chips and the 98725, as well as the ASIX and ADMtek chips. In some * cases, the exact chip revision affects driver behavior. */ static int dc_probe(dev) device_t dev; { struct dc_type *t; t = dc_devtype(dev); if (t != NULL) { device_set_desc(dev, t->dc_name); return(0); } return(ENXIO); } static void dc_acpi(dev) device_t dev; { int unit; unit = device_get_unit(dev); if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { u_int32_t iobase, membase, irq; /* Save important PCI config data. */ iobase = pci_read_config(dev, DC_PCI_CFBIO, 4); membase = pci_read_config(dev, DC_PCI_CFBMA, 4); irq = pci_read_config(dev, DC_PCI_CFIT, 4); /* Reset the power state. */ printf("dc%d: chip is in D%d power mode " "-- setting to D0\n", unit, pci_get_powerstate(dev)); pci_set_powerstate(dev, PCI_POWERSTATE_D0); /* Restore PCI config data. */ pci_write_config(dev, DC_PCI_CFBIO, iobase, 4); pci_write_config(dev, DC_PCI_CFBMA, membase, 4); pci_write_config(dev, DC_PCI_CFIT, irq, 4); } return; } static void dc_apply_fixup(sc, media) struct dc_softc *sc; int media; { struct dc_mediainfo *m; u_int8_t *p; int i; u_int32_t reg; m = sc->dc_mi; while (m != NULL) { if (m->dc_media == media) break; m = m->dc_next; } if (m == NULL) return; for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) { reg = (p[0] | (p[1] << 8)) << 16; CSR_WRITE_4(sc, DC_WATCHDOG, reg); } for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) { reg = (p[0] | (p[1] << 8)) << 16; CSR_WRITE_4(sc, DC_WATCHDOG, reg); } return; } static void dc_decode_leaf_sia(sc, l) struct dc_softc *sc; struct dc_eblock_sia *l; { struct dc_mediainfo *m; m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT); bzero(m, sizeof(struct dc_mediainfo)); if (l->dc_sia_code == DC_SIA_CODE_10BT) m->dc_media = IFM_10_T; if (l->dc_sia_code == DC_SIA_CODE_10BT_FDX) m->dc_media = IFM_10_T|IFM_FDX; if (l->dc_sia_code == DC_SIA_CODE_10B2) m->dc_media = IFM_10_2; if (l->dc_sia_code == DC_SIA_CODE_10B5) m->dc_media = IFM_10_5; m->dc_gp_len = 2; m->dc_gp_ptr = (u_int8_t *)&l->dc_sia_gpio_ctl; m->dc_next = sc->dc_mi; sc->dc_mi = m; sc->dc_pmode = DC_PMODE_SIA; return; } static void dc_decode_leaf_sym(sc, l) struct dc_softc *sc; struct dc_eblock_sym *l; { struct dc_mediainfo *m; m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT); bzero(m, sizeof(struct dc_mediainfo)); if (l->dc_sym_code == DC_SYM_CODE_100BT) m->dc_media = IFM_100_TX; if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX) m->dc_media = IFM_100_TX|IFM_FDX; m->dc_gp_len = 2; m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl; m->dc_next = sc->dc_mi; sc->dc_mi = m; sc->dc_pmode = DC_PMODE_SYM; return; } static void dc_decode_leaf_mii(sc, l) struct dc_softc *sc; struct dc_eblock_mii *l; { u_int8_t *p; struct dc_mediainfo *m; m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT); bzero(m, sizeof(struct dc_mediainfo)); /* We abuse IFM_AUTO to represent MII. */ m->dc_media = IFM_AUTO; m->dc_gp_len = l->dc_gpr_len; p = (u_int8_t *)l; p += sizeof(struct dc_eblock_mii); m->dc_gp_ptr = p; p += 2 * l->dc_gpr_len; m->dc_reset_len = *p; p++; m->dc_reset_ptr = p; m->dc_next = sc->dc_mi; sc->dc_mi = m; return; } static void dc_parse_21143_srom(sc) struct dc_softc *sc; { struct dc_leaf_hdr *lhdr; struct dc_eblock_hdr *hdr; int i, loff; char *ptr; loff = sc->dc_srom[27]; lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]); ptr = (char *)lhdr; ptr += sizeof(struct dc_leaf_hdr) - 1; for (i = 0; i < lhdr->dc_mcnt; i++) { hdr = (struct dc_eblock_hdr *)ptr; switch(hdr->dc_type) { case DC_EBLOCK_MII: dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr); break; case DC_EBLOCK_SIA: dc_decode_leaf_sia(sc, (struct dc_eblock_sia *)hdr); break; case DC_EBLOCK_SYM: dc_decode_leaf_sym(sc, (struct dc_eblock_sym *)hdr); break; default: /* Don't care. Yet. */ break; } ptr += (hdr->dc_len & 0x7F); ptr++; } return; } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int dc_attach(dev) device_t dev; { int tmp = 0; u_char eaddr[ETHER_ADDR_LEN]; u_int32_t command; struct dc_softc *sc; struct ifnet *ifp; u_int32_t revision; int unit, error = 0, rid, mac_offset; sc = device_get_softc(dev); unit = device_get_unit(dev); bzero(sc, sizeof(struct dc_softc)); mtx_init(&sc->dc_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE); DC_LOCK(sc); /* * Handle power management nonsense. */ dc_acpi(dev); /* * Map control/status registers. */ command = pci_read_config(dev, PCIR_COMMAND, 4); command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, command, 4); command = pci_read_config(dev, PCIR_COMMAND, 4); #ifdef DC_USEIOSPACE if (!(command & PCIM_CMD_PORTEN)) { printf("dc%d: failed to enable I/O ports!\n", unit); error = ENXIO; goto fail; } #else if (!(command & PCIM_CMD_MEMEN)) { printf("dc%d: failed to enable memory mapping!\n", unit); error = ENXIO; goto fail; } #endif rid = DC_RID; sc->dc_res = bus_alloc_resource(dev, DC_RES, &rid, 0, ~0, 1, RF_ACTIVE); if (sc->dc_res == NULL) { printf("dc%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->dc_btag = rman_get_bustag(sc->dc_res); sc->dc_bhandle = rman_get_bushandle(sc->dc_res); /* Allocate interrupt */ rid = 0; sc->dc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->dc_irq == NULL) { printf("dc%d: couldn't map interrupt\n", unit); bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->dc_irq, INTR_TYPE_NET | (IS_MPSAFE ? INTR_MPSAFE : 0), dc_intr, sc, &sc->dc_intrhand); if (error) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq); bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); printf("dc%d: couldn't set up irq\n", unit); goto fail; } /* Need this info to decide on a chip type. */ sc->dc_info = dc_devtype(dev); revision = pci_read_config(dev, DC_PCI_CFRV, 4) & 0x000000FF; switch(sc->dc_info->dc_did) { case DC_DEVICEID_21143: sc->dc_type = DC_TYPE_21143; sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; sc->dc_flags |= DC_REDUCED_MII_POLL; /* Save EEPROM contents so we can parse them later. */ dc_read_eeprom(sc, (caddr_t)&sc->dc_srom, 0, 512, 0); break; case DC_DEVICEID_DM9100: case DC_DEVICEID_DM9102: sc->dc_type = DC_TYPE_DM9102; sc->dc_flags |= DC_TX_COALESCE|DC_TX_INTR_ALWAYS; sc->dc_flags |= DC_REDUCED_MII_POLL|DC_TX_STORENFWD; sc->dc_pmode = DC_PMODE_MII; /* Increase the latency timer value. */ command = pci_read_config(dev, DC_PCI_CFLT, 4); command &= 0xFFFF00FF; command |= 0x00008000; pci_write_config(dev, DC_PCI_CFLT, command, 4); break; case DC_DEVICEID_AL981: sc->dc_type = DC_TYPE_AL981; sc->dc_flags |= DC_TX_USE_TX_INTR; sc->dc_flags |= DC_TX_ADMTEK_WAR; sc->dc_pmode = DC_PMODE_MII; break; case DC_DEVICEID_AN985: case DC_DEVICEID_FE2500: case DC_DEVICEID_EN2242: sc->dc_type = DC_TYPE_AN985; sc->dc_flags |= DC_TX_USE_TX_INTR; sc->dc_flags |= DC_TX_ADMTEK_WAR; sc->dc_pmode = DC_PMODE_MII; break; case DC_DEVICEID_98713: case DC_DEVICEID_98713_CP: if (revision < DC_REVISION_98713A) { sc->dc_type = DC_TYPE_98713; } if (revision >= DC_REVISION_98713A) { sc->dc_type = DC_TYPE_98713A; sc->dc_flags |= DC_21143_NWAY; } sc->dc_flags |= DC_REDUCED_MII_POLL; sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; break; case DC_DEVICEID_987x5: case DC_DEVICEID_EN1217: /* * Macronix MX98715AEC-C/D/E parts have only a * 128-bit hash table. We need to deal with these * in the same manner as the PNIC II so that we * get the right number of bits out of the * CRC routine. */ if (revision >= DC_REVISION_98715AEC_C && revision < DC_REVISION_98725) sc->dc_flags |= DC_128BIT_HASH; sc->dc_type = DC_TYPE_987x5; sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; sc->dc_flags |= DC_REDUCED_MII_POLL|DC_21143_NWAY; break; case DC_DEVICEID_98727: sc->dc_type = DC_TYPE_987x5; sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; sc->dc_flags |= DC_REDUCED_MII_POLL|DC_21143_NWAY; break; case DC_DEVICEID_82C115: sc->dc_type = DC_TYPE_PNICII; sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR|DC_128BIT_HASH; sc->dc_flags |= DC_REDUCED_MII_POLL|DC_21143_NWAY; break; case DC_DEVICEID_82C168: sc->dc_type = DC_TYPE_PNIC; sc->dc_flags |= DC_TX_STORENFWD|DC_TX_INTR_ALWAYS; sc->dc_flags |= DC_PNIC_RX_BUG_WAR; sc->dc_pnic_rx_buf = malloc(DC_RXLEN * 5, M_DEVBUF, M_NOWAIT); if (revision < DC_REVISION_82C169) sc->dc_pmode = DC_PMODE_SYM; break; case DC_DEVICEID_AX88140A: sc->dc_type = DC_TYPE_ASIX; sc->dc_flags |= DC_TX_USE_TX_INTR|DC_TX_INTR_FIRSTFRAG; sc->dc_flags |= DC_REDUCED_MII_POLL; sc->dc_pmode = DC_PMODE_MII; break; case DC_DEVICEID_X3201: sc->dc_type = DC_TYPE_XIRCOM; sc->dc_flags |= DC_TX_INTR_ALWAYS | DC_TX_COALESCE; /* * We don't actually need to coalesce, but we're doing * it to obtain a double word aligned buffer. */ break; default: printf("dc%d: unknown device: %x\n", sc->dc_unit, sc->dc_info->dc_did); break; } /* Save the cache line size. */ if (DC_IS_DAVICOM(sc)) sc->dc_cachesize = 0; else sc->dc_cachesize = pci_read_config(dev, DC_PCI_CFLT, 4) & 0xFF; /* Reset the adapter. */ dc_reset(sc); /* Take 21143 out of snooze mode */ if (DC_IS_INTEL(sc) || DC_IS_XIRCOM(sc)) { command = pci_read_config(dev, DC_PCI_CFDD, 4); command &= ~(DC_CFDD_SNOOZE_MODE|DC_CFDD_SLEEP_MODE); pci_write_config(dev, DC_PCI_CFDD, command, 4); } /* * Try to learn something about the supported media. * We know that ASIX and ADMtek and Davicom devices * will *always* be using MII media, so that's a no-brainer. * The tricky ones are the Macronix/PNIC II and the * Intel 21143. */ if (DC_IS_INTEL(sc)) dc_parse_21143_srom(sc); else if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { if (sc->dc_type == DC_TYPE_98713) sc->dc_pmode = DC_PMODE_MII; else sc->dc_pmode = DC_PMODE_SYM; } else if (!sc->dc_pmode) sc->dc_pmode = DC_PMODE_MII; /* * Get station address from the EEPROM. */ switch(sc->dc_type) { case DC_TYPE_98713: case DC_TYPE_98713A: case DC_TYPE_987x5: case DC_TYPE_PNICII: dc_read_eeprom(sc, (caddr_t)&mac_offset, (DC_EE_NODEADDR_OFFSET / 2), 1, 0); dc_read_eeprom(sc, (caddr_t)&eaddr, (mac_offset / 2), 3, 0); break; case DC_TYPE_PNIC: dc_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 1); break; case DC_TYPE_DM9102: case DC_TYPE_21143: case DC_TYPE_ASIX: dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); break; case DC_TYPE_AL981: case DC_TYPE_AN985: dc_read_eeprom(sc, (caddr_t)&eaddr, DC_AL_EE_NODEADDR, 3, 0); break; case DC_TYPE_XIRCOM: dc_read_eeprom(sc, (caddr_t)&eaddr, 3, 3, 0); break; default: dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); break; } /* * A 21143 or clone chip was detected. Inform the world. */ printf("dc%d: Ethernet address: %6D\n", unit, eaddr, ":"); sc->dc_unit = unit; bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); sc->dc_ldata = contigmalloc(sizeof(struct dc_list_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->dc_ldata == NULL) { printf("dc%d: no memory for list buffers!\n", unit); bus_teardown_intr(dev, sc->dc_irq, sc->dc_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq); bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); error = ENXIO; goto fail; } bzero(sc->dc_ldata, sizeof(struct dc_list_data)); ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; ifp->if_unit = unit; ifp->if_name = "dc"; /* XXX: bleah, MTU gets overwritten in ether_ifattach() */ ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = dc_ioctl; ifp->if_output = ether_output; ifp->if_start = dc_start; ifp->if_watchdog = dc_watchdog; ifp->if_init = dc_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = DC_TX_LIST_CNT - 1; ifp->if_mpsafe = IS_MPSAFE; /* * Do MII setup. If this is a 21143, check for a PHY on the * MII bus after applying any necessary fixups to twiddle the * GPIO bits. If we don't end up finding a PHY, restore the * old selection (SIA only or SIA/SYM) and attach the dcphy * driver instead. */ if (DC_IS_INTEL(sc)) { dc_apply_fixup(sc, IFM_AUTO); tmp = sc->dc_pmode; sc->dc_pmode = DC_PMODE_MII; } error = mii_phy_probe(dev, &sc->dc_miibus, dc_ifmedia_upd, dc_ifmedia_sts); if (error && DC_IS_INTEL(sc)) { sc->dc_pmode = tmp; if (sc->dc_pmode != DC_PMODE_SIA) sc->dc_pmode = DC_PMODE_SYM; sc->dc_flags |= DC_21143_NWAY; mii_phy_probe(dev, &sc->dc_miibus, dc_ifmedia_upd, dc_ifmedia_sts); /* * For non-MII cards, we need to have the 21143 * drive the LEDs. Except there are some systems * like the NEC VersaPro NoteBook PC which have no * LEDs, and twiddling these bits has adverse effects * on them. (I.e. you suddenly can't get a link.) */ if (pci_read_config(dev, DC_PCI_CSID, 4) != 0x80281033) sc->dc_flags |= DC_TULIP_LEDS; error = 0; } if (error) { printf("dc%d: MII without any PHY!\n", sc->dc_unit); bus_teardown_intr(dev, sc->dc_irq, sc->dc_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq); bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); error = ENXIO; goto fail; } if (DC_IS_XIRCOM(sc)) { /* * setup General Purpose Port mode and data so the tulip * can talk to the MII. */ CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); DELAY(10); CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); DELAY(10); } /* * Call MI attach routine. */ ether_ifattach(ifp, ETHER_BPF_SUPPORTED); callout_init(&sc->dc_stat_ch, IS_MPSAFE); #ifdef SRM_MEDIA sc->dc_srm_media = 0; /* Remember the SRM console media setting */ if (DC_IS_INTEL(sc)) { command = pci_read_config(dev, DC_PCI_CFDD, 4); command &= ~(DC_CFDD_SNOOZE_MODE|DC_CFDD_SLEEP_MODE); switch ((command >> 8) & 0xff) { case 3: sc->dc_srm_media = IFM_10_T; break; case 4: sc->dc_srm_media = IFM_10_T | IFM_FDX; break; case 5: sc->dc_srm_media = IFM_100_TX; break; case 6: sc->dc_srm_media = IFM_100_TX | IFM_FDX; break; } if (sc->dc_srm_media) sc->dc_srm_media |= IFM_ACTIVE | IFM_ETHER; } #endif DC_UNLOCK(sc); return(0); fail: DC_UNLOCK(sc); mtx_destroy(&sc->dc_mtx); return(error); } static int dc_detach(dev) device_t dev; { struct dc_softc *sc; struct ifnet *ifp; struct dc_mediainfo *m; sc = device_get_softc(dev); DC_LOCK(sc); ifp = &sc->arpcom.ac_if; dc_stop(sc); ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); bus_generic_detach(dev); device_delete_child(dev, sc->dc_miibus); bus_teardown_intr(dev, sc->dc_irq, sc->dc_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq); bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); contigfree(sc->dc_ldata, sizeof(struct dc_list_data), M_DEVBUF); if (sc->dc_pnic_rx_buf != NULL) free(sc->dc_pnic_rx_buf, M_DEVBUF); while(sc->dc_mi != NULL) { m = sc->dc_mi->dc_next; free(sc->dc_mi, M_DEVBUF); sc->dc_mi = m; } DC_UNLOCK(sc); mtx_destroy(&sc->dc_mtx); return(0); } /* * Initialize the transmit descriptors. */ static int dc_list_tx_init(sc) struct dc_softc *sc; { struct dc_chain_data *cd; struct dc_list_data *ld; int i; cd = &sc->dc_cdata; ld = sc->dc_ldata; for (i = 0; i < DC_TX_LIST_CNT; i++) { if (i == (DC_TX_LIST_CNT - 1)) { ld->dc_tx_list[i].dc_next = vtophys(&ld->dc_tx_list[0]); } else { ld->dc_tx_list[i].dc_next = vtophys(&ld->dc_tx_list[i + 1]); } cd->dc_tx_chain[i] = NULL; ld->dc_tx_list[i].dc_data = 0; ld->dc_tx_list[i].dc_ctl = 0; } cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0; return(0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int dc_list_rx_init(sc) struct dc_softc *sc; { struct dc_chain_data *cd; struct dc_list_data *ld; int i; cd = &sc->dc_cdata; ld = sc->dc_ldata; for (i = 0; i < DC_RX_LIST_CNT; i++) { if (dc_newbuf(sc, i, NULL) == ENOBUFS) return(ENOBUFS); if (i == (DC_RX_LIST_CNT - 1)) { ld->dc_rx_list[i].dc_next = vtophys(&ld->dc_rx_list[0]); } else { ld->dc_rx_list[i].dc_next = vtophys(&ld->dc_rx_list[i + 1]); } } cd->dc_rx_prod = 0; return(0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int dc_newbuf(sc, i, m) struct dc_softc *sc; int i; struct mbuf *m; { struct mbuf *m_new = NULL; struct dc_desc *c; c = &sc->dc_ldata->dc_rx_list[i]; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("dc%d: no memory for rx list " "-- packet dropped!\n", sc->dc_unit); return(ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("dc%d: no memory for rx list " "-- packet dropped!\n", sc->dc_unit); m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, sizeof(u_int64_t)); /* * If this is a PNIC chip, zero the buffer. This is part * of the workaround for the receive bug in the 82c168 and * 82c169 chips. */ if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) bzero((char *)mtod(m_new, char *), m_new->m_len); sc->dc_cdata.dc_rx_chain[i] = m_new; c->dc_data = vtophys(mtod(m_new, caddr_t)); c->dc_ctl = DC_RXCTL_RLINK | DC_RXLEN; c->dc_status = DC_RXSTAT_OWN; return(0); } /* * Grrrrr. * The PNIC chip has a terrible bug in it that manifests itself during * periods of heavy activity. The exact mode of failure if difficult to * pinpoint: sometimes it only happens in promiscuous mode, sometimes it * will happen on slow machines. The bug is that sometimes instead of * uploading one complete frame during reception, it uploads what looks * like the entire contents of its FIFO memory. The frame we want is at * the end of the whole mess, but we never know exactly how much data has * been uploaded, so salvaging the frame is hard. * * There is only one way to do it reliably, and it's disgusting. * Here's what we know: * * - We know there will always be somewhere between one and three extra * descriptors uploaded. * * - We know the desired received frame will always be at the end of the * total data upload. * * - We know the size of the desired received frame because it will be * provided in the length field of the status word in the last descriptor. * * Here's what we do: * * - When we allocate buffers for the receive ring, we bzero() them. * This means that we know that the buffer contents should be all * zeros, except for data uploaded by the chip. * * - We also force the PNIC chip to upload frames that include the * ethernet CRC at the end. * * - We gather all of the bogus frame data into a single buffer. * * - We then position a pointer at the end of this buffer and scan * backwards until we encounter the first non-zero byte of data. * This is the end of the received frame. We know we will encounter * some data at the end of the frame because the CRC will always be * there, so even if the sender transmits a packet of all zeros, * we won't be fooled. * * - We know the size of the actual received frame, so we subtract * that value from the current pointer location. This brings us * to the start of the actual received packet. * * - We copy this into an mbuf and pass it on, along with the actual * frame length. * * The performance hit is tremendous, but it beats dropping frames all * the time. */ #define DC_WHOLEFRAME (DC_RXSTAT_FIRSTFRAG|DC_RXSTAT_LASTFRAG) static void dc_pnic_rx_bug_war(sc, idx) struct dc_softc *sc; int idx; { struct dc_desc *cur_rx; struct dc_desc *c = NULL; struct mbuf *m = NULL; unsigned char *ptr; int i, total_len; u_int32_t rxstat = 0; i = sc->dc_pnic_rx_bug_save; cur_rx = &sc->dc_ldata->dc_rx_list[idx]; ptr = sc->dc_pnic_rx_buf; bzero(ptr, sizeof(DC_RXLEN * 5)); /* Copy all the bytes from the bogus buffers. */ while (1) { c = &sc->dc_ldata->dc_rx_list[i]; rxstat = c->dc_status; m = sc->dc_cdata.dc_rx_chain[i]; bcopy(mtod(m, char *), ptr, DC_RXLEN); ptr += DC_RXLEN; /* If this is the last buffer, break out. */ if (i == idx || rxstat & DC_RXSTAT_LASTFRAG) break; dc_newbuf(sc, i, m); DC_INC(i, DC_RX_LIST_CNT); } /* Find the length of the actual receive frame. */ total_len = DC_RXBYTES(rxstat); /* Scan backwards until we hit a non-zero byte. */ while(*ptr == 0x00) ptr--; /* Round off. */ if ((uintptr_t)(ptr) & 0x3) ptr -= 1; /* Now find the start of the frame. */ ptr -= total_len; if (ptr < sc->dc_pnic_rx_buf) ptr = sc->dc_pnic_rx_buf; /* * Now copy the salvaged frame to the last mbuf and fake up * the status word to make it look like a successful * frame reception. */ dc_newbuf(sc, i, m); bcopy(ptr, mtod(m, char *), total_len); cur_rx->dc_status = rxstat | DC_RXSTAT_FIRSTFRAG; return; } /* * This routine searches the RX ring for dirty descriptors in the * event that the rxeof routine falls out of sync with the chip's * current descriptor pointer. This may happen sometimes as a result * of a "no RX buffer available" condition that happens when the chip * consumes all of the RX buffers before the driver has a chance to * process the RX ring. This routine may need to be called more than * once to bring the driver back in sync with the chip, however we * should still be getting RX DONE interrupts to drive the search * for new packets in the RX ring, so we should catch up eventually. */ static int dc_rx_resync(sc) struct dc_softc *sc; { int i, pos; struct dc_desc *cur_rx; pos = sc->dc_cdata.dc_rx_prod; for (i = 0; i < DC_RX_LIST_CNT; i++) { cur_rx = &sc->dc_ldata->dc_rx_list[pos]; if (!(cur_rx->dc_status & DC_RXSTAT_OWN)) break; DC_INC(pos, DC_RX_LIST_CNT); } /* If the ring really is empty, then just return. */ if (i == DC_RX_LIST_CNT) return(0); /* We've fallen behing the chip: catch it. */ sc->dc_cdata.dc_rx_prod = pos; return(EAGAIN); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static void dc_rxeof(sc) struct dc_softc *sc; { struct ether_header *eh; struct mbuf *m; struct ifnet *ifp; struct dc_desc *cur_rx; int i, total_len = 0; u_int32_t rxstat; ifp = &sc->arpcom.ac_if; i = sc->dc_cdata.dc_rx_prod; while(!(sc->dc_ldata->dc_rx_list[i].dc_status & DC_RXSTAT_OWN)) { struct mbuf *m0 = NULL; cur_rx = &sc->dc_ldata->dc_rx_list[i]; rxstat = cur_rx->dc_status; m = sc->dc_cdata.dc_rx_chain[i]; total_len = DC_RXBYTES(rxstat); if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) { if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) { if (rxstat & DC_RXSTAT_FIRSTFRAG) sc->dc_pnic_rx_bug_save = i; if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) { DC_INC(i, DC_RX_LIST_CNT); continue; } dc_pnic_rx_bug_war(sc, i); rxstat = cur_rx->dc_status; total_len = DC_RXBYTES(rxstat); } } sc->dc_cdata.dc_rx_chain[i] = NULL; /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (rxstat & DC_RXSTAT_RXERR) { ifp->if_ierrors++; if (rxstat & DC_RXSTAT_COLLSEEN) ifp->if_collisions++; dc_newbuf(sc, i, m); if (rxstat & DC_RXSTAT_CRCERR) { DC_INC(i, DC_RX_LIST_CNT); continue; } else { dc_init(sc); return; } } /* No errors; receive the packet. */ total_len -= ETHER_CRC_LEN; m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, total_len + ETHER_ALIGN, 0, ifp, NULL); dc_newbuf(sc, i, m); DC_INC(i, DC_RX_LIST_CNT); if (m0 == NULL) { ifp->if_ierrors++; continue; } m_adj(m0, ETHER_ALIGN); m = m0; ifp->if_ipackets++; eh = mtod(m, struct ether_header *); /* Remove header from mbuf and pass it on. */ m_adj(m, sizeof(struct ether_header)); ether_input(ifp, eh, m); } sc->dc_cdata.dc_rx_prod = i; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void dc_txeof(sc) struct dc_softc *sc; { struct dc_desc *cur_tx = NULL; struct ifnet *ifp; int idx; ifp = &sc->arpcom.ac_if; /* Clear the timeout timer. */ ifp->if_timer = 0; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ idx = sc->dc_cdata.dc_tx_cons; while(idx != sc->dc_cdata.dc_tx_prod) { u_int32_t txstat; cur_tx = &sc->dc_ldata->dc_tx_list[idx]; txstat = cur_tx->dc_status; if (txstat & DC_TXSTAT_OWN) break; if (!(cur_tx->dc_ctl & DC_TXCTL_LASTFRAG) || cur_tx->dc_ctl & DC_TXCTL_SETUP) { sc->dc_cdata.dc_tx_cnt--; if (cur_tx->dc_ctl & DC_TXCTL_SETUP) { /* * Yes, the PNIC is so brain damaged * that it will sometimes generate a TX * underrun error while DMAing the RX * filter setup frame. If we detect this, * we have to send the setup frame again, * or else the filter won't be programmed * correctly. */ if (DC_IS_PNIC(sc)) { if (txstat & DC_TXSTAT_ERRSUM) dc_setfilt(sc); } sc->dc_cdata.dc_tx_chain[idx] = NULL; } DC_INC(idx, DC_TX_LIST_CNT); continue; } if (DC_IS_XIRCOM(sc)) { /* * XXX: Why does my Xircom taunt me so? * For some reason it likes setting the CARRLOST flag * even when the carrier is there. wtf?!? */ if (/*sc->dc_type == DC_TYPE_21143 &&*/ sc->dc_pmode == DC_PMODE_MII && ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM| DC_TXSTAT_NOCARRIER))) txstat &= ~DC_TXSTAT_ERRSUM; } else { if (/*sc->dc_type == DC_TYPE_21143 &&*/ sc->dc_pmode == DC_PMODE_MII && ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM| DC_TXSTAT_NOCARRIER|DC_TXSTAT_CARRLOST))) txstat &= ~DC_TXSTAT_ERRSUM; } if (txstat & DC_TXSTAT_ERRSUM) { ifp->if_oerrors++; if (txstat & DC_TXSTAT_EXCESSCOLL) ifp->if_collisions++; if (txstat & DC_TXSTAT_LATECOLL) ifp->if_collisions++; if (!(txstat & DC_TXSTAT_UNDERRUN)) { dc_init(sc); return; } } ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3; ifp->if_opackets++; if (sc->dc_cdata.dc_tx_chain[idx] != NULL) { m_freem(sc->dc_cdata.dc_tx_chain[idx]); sc->dc_cdata.dc_tx_chain[idx] = NULL; } sc->dc_cdata.dc_tx_cnt--; DC_INC(idx, DC_TX_LIST_CNT); } sc->dc_cdata.dc_tx_cons = idx; if (cur_tx != NULL) ifp->if_flags &= ~IFF_OACTIVE; return; } static void dc_tick(xsc) void *xsc; { struct dc_softc *sc; struct mii_data *mii; struct ifnet *ifp; u_int32_t r; sc = xsc; DC_LOCK(sc); ifp = &sc->arpcom.ac_if; mii = device_get_softc(sc->dc_miibus); if (sc->dc_flags & DC_REDUCED_MII_POLL) { if (sc->dc_flags & DC_21143_NWAY) { r = CSR_READ_4(sc, DC_10BTSTAT); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX && (r & DC_TSTAT_LS100)) { sc->dc_link = 0; mii_mediachg(mii); } if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T && (r & DC_TSTAT_LS10)) { sc->dc_link = 0; mii_mediachg(mii); } if (sc->dc_link == 0) mii_tick(mii); } else { r = CSR_READ_4(sc, DC_ISR); if ((r & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT && sc->dc_cdata.dc_tx_cnt == 0) mii_tick(mii); if (!(mii->mii_media_status & IFM_ACTIVE)) sc->dc_link = 0; } } else mii_tick(mii); /* * When the init routine completes, we expect to be able to send * packets right away, and in fact the network code will send a * gratuitous ARP the moment the init routine marks the interface * as running. However, even though the MAC may have been initialized, * there may be a delay of a few seconds before the PHY completes * autonegotiation and the link is brought up. Any transmissions * made during that delay will be lost. Dealing with this is tricky: * we can't just pause in the init routine while waiting for the * PHY to come ready since that would bring the whole system to * a screeching halt for several seconds. * * What we do here is prevent the TX start routine from sending * any packets until a link has been established. After the * interface has been initialized, the tick routine will poll * the state of the PHY until the IFM_ACTIVE flag is set. Until * that time, packets will stay in the send queue, and once the * link comes up, they will be flushed out to the wire. */ if (!sc->dc_link) { mii_pollstat(mii); if (mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->dc_link++; if (ifp->if_snd.ifq_head != NULL) dc_start(ifp); } } if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link) callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc); else callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc); DC_UNLOCK(sc); return; } static void dc_intr(arg) void *arg; { struct dc_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; DC_LOCK(sc); ifp = &sc->arpcom.ac_if; /* Supress unwanted interrupts */ if (!(ifp->if_flags & IFF_UP)) { if (CSR_READ_4(sc, DC_ISR) & DC_INTRS) dc_stop(sc); DC_UNLOCK(sc); return; } /* Disable interrupts. */ CSR_WRITE_4(sc, DC_IMR, 0x00000000); while(((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) && status != 0xFFFFFFFF) { CSR_WRITE_4(sc, DC_ISR, status); if (status & DC_ISR_RX_OK) { int curpkts; curpkts = ifp->if_ipackets; dc_rxeof(sc); if (curpkts == ifp->if_ipackets) { while(dc_rx_resync(sc)) dc_rxeof(sc); } } if (status & (DC_ISR_TX_OK|DC_ISR_TX_NOBUF)) dc_txeof(sc); if (status & DC_ISR_TX_IDLE) { dc_txeof(sc); if (sc->dc_cdata.dc_tx_cnt) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); } } if (status & DC_ISR_TX_UNDERRUN) { u_int32_t cfg; printf("dc%d: TX underrun -- ", sc->dc_unit); if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) dc_init(sc); cfg = CSR_READ_4(sc, DC_NETCFG); cfg &= ~DC_NETCFG_TX_THRESH; if (sc->dc_txthresh == DC_TXTHRESH_160BYTES) { printf("using store and forward mode\n"); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); } else if (sc->dc_flags & DC_TX_STORENFWD) { printf("resetting\n"); } else { sc->dc_txthresh += 0x4000; printf("increasing TX threshold\n"); CSR_WRITE_4(sc, DC_NETCFG, cfg); DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); } } if ((status & DC_ISR_RX_WATDOGTIMEO) || (status & DC_ISR_RX_NOBUF)) { int curpkts; curpkts = ifp->if_ipackets; dc_rxeof(sc); if (curpkts == ifp->if_ipackets) { while(dc_rx_resync(sc)) dc_rxeof(sc); } } if (status & DC_ISR_BUS_ERR) { dc_reset(sc); dc_init(sc); } } /* Re-enable interrupts. */ CSR_WRITE_4(sc, DC_IMR, DC_INTRS); if (ifp->if_snd.ifq_head != NULL) dc_start(ifp); DC_UNLOCK(sc); return; } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int dc_encap(sc, m_head, txidx) struct dc_softc *sc; struct mbuf *m_head; u_int32_t *txidx; { struct dc_desc *f = NULL; struct mbuf *m; int frag, cur, cnt = 0; /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ m = m_head; cur = frag = *txidx; for (m = m_head; m != NULL; m = m->m_next) { if (m->m_len != 0) { if (sc->dc_flags & DC_TX_ADMTEK_WAR) { if (*txidx != sc->dc_cdata.dc_tx_prod && frag == (DC_TX_LIST_CNT - 1)) return(ENOBUFS); } if ((DC_TX_LIST_CNT - (sc->dc_cdata.dc_tx_cnt + cnt)) < 5) return(ENOBUFS); f = &sc->dc_ldata->dc_tx_list[frag]; f->dc_ctl = DC_TXCTL_TLINK | m->m_len; if (cnt == 0) { f->dc_status = 0; f->dc_ctl |= DC_TXCTL_FIRSTFRAG; } else f->dc_status = DC_TXSTAT_OWN; f->dc_data = vtophys(mtod(m, vm_offset_t)); cur = frag; DC_INC(frag, DC_TX_LIST_CNT); cnt++; } } if (m != NULL) return(ENOBUFS); sc->dc_cdata.dc_tx_cnt += cnt; sc->dc_cdata.dc_tx_chain[cur] = m_head; sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_LASTFRAG; if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG) sc->dc_ldata->dc_tx_list[*txidx].dc_ctl |= DC_TXCTL_FINT; if (sc->dc_flags & DC_TX_INTR_ALWAYS) sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_FINT; if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64) sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_FINT; sc->dc_ldata->dc_tx_list[*txidx].dc_status = DC_TXSTAT_OWN; *txidx = frag; return(0); } /* * Coalesce an mbuf chain into a single mbuf cluster buffer. * Needed for some really badly behaved chips that just can't * do scatter/gather correctly. */ static int dc_coal(sc, m_head) struct dc_softc *sc; struct mbuf **m_head; { struct mbuf *m_new, *m; m = *m_head; MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("dc%d: no memory for tx list", sc->dc_unit); return(ENOBUFS); } if (m->m_pkthdr.len > MHLEN) { MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); printf("dc%d: no memory for tx list", sc->dc_unit); return(ENOBUFS); } } m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, caddr_t)); m_new->m_pkthdr.len = m_new->m_len = m->m_pkthdr.len; m_freem(m); *m_head = m_new; return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void dc_start(ifp) struct ifnet *ifp; { struct dc_softc *sc; struct mbuf *m_head = NULL; int idx; sc = ifp->if_softc; DC_LOCK(sc); if (!sc->dc_link) { DC_UNLOCK(sc); return; } if (ifp->if_flags & IFF_OACTIVE) { DC_UNLOCK(sc); return; } idx = sc->dc_cdata.dc_tx_prod; while(sc->dc_cdata.dc_tx_chain[idx] == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (sc->dc_flags & DC_TX_COALESCE) { if (dc_coal(sc, &m_head)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } } if (dc_encap(sc, m_head, &idx)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if (ifp->if_bpf) bpf_mtap(ifp, m_head); if (sc->dc_flags & DC_TX_ONE) { ifp->if_flags |= IFF_OACTIVE; break; } } /* Transmit */ sc->dc_cdata.dc_tx_prod = idx; if (!(sc->dc_flags & DC_TX_POLL)) CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; DC_UNLOCK(sc); return; } static void dc_init(xsc) void *xsc; { struct dc_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; struct mii_data *mii; DC_LOCK(sc); mii = device_get_softc(sc->dc_miibus); /* * Cancel pending I/O and free all RX/TX buffers. */ dc_stop(sc); dc_reset(sc); /* * Set cache alignment and burst length. */ if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc)) CSR_WRITE_4(sc, DC_BUSCTL, 0); else CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME|DC_BUSCTL_MRLE); if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) { DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA); } else { DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG); } if (sc->dc_flags & DC_TX_POLL) DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1); switch(sc->dc_cachesize) { case 32: DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG); break; case 16: DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG); break; case 8: DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG); break; case 0: default: DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE); break; } if (sc->dc_flags & DC_TX_STORENFWD) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); else { if (sc->dc_txthresh == DC_TXTHRESH_160BYTES) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); } else { DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); } } DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC); DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF); if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { /* * The app notes for the 98713 and 98715A say that * in order to have the chips operate properly, a magic * number must be written to CSR16. Macronix does not * document the meaning of these bits so there's no way * to know exactly what they do. The 98713 has a magic * number all its own; the rest all use a different one. */ DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000); if (sc->dc_type == DC_TYPE_98713) DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713); else DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715); } if (DC_IS_XIRCOM(sc)) { /* * setup General Purpose Port mode and data so the tulip * can talk to the MII. */ CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); DELAY(10); CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); DELAY(10); } DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_72BYTES); /* Init circular RX list. */ if (dc_list_rx_init(sc) == ENOBUFS) { printf("dc%d: initialization failed: no " "memory for rx buffers\n", sc->dc_unit); dc_stop(sc); DC_UNLOCK(sc); return; } /* * Init tx descriptors. */ dc_list_tx_init(sc); /* * Load the address of the RX list. */ CSR_WRITE_4(sc, DC_RXADDR, vtophys(&sc->dc_ldata->dc_rx_list[0])); CSR_WRITE_4(sc, DC_TXADDR, vtophys(&sc->dc_ldata->dc_tx_list[0])); /* * Enable interrupts. */ CSR_WRITE_4(sc, DC_IMR, DC_INTRS); CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF); /* Enable transmitter. */ DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); /* * If this is an Intel 21143 and we're not using the * MII port, program the LED control pins so we get * link and activity indications. */ if (sc->dc_flags & DC_TULIP_LEDS) { CSR_WRITE_4(sc, DC_WATCHDOG, DC_WDOG_CTLWREN|DC_WDOG_LINK|DC_WDOG_ACTIVITY); CSR_WRITE_4(sc, DC_WATCHDOG, 0); } /* * Load the RX/multicast filter. We do this sort of late * because the filter programming scheme on the 21143 and * some clones requires DMAing a setup frame via the TX * engine, and we need the transmitter enabled for that. */ dc_setfilt(sc); /* Enable receiver. */ DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF); mii_mediachg(mii); dc_setcfg(sc, sc->dc_if_media); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; /* Don't start the ticker if this is a homePNA link. */ if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_homePNA) sc->dc_link = 1; else { if (sc->dc_flags & DC_21143_NWAY) callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc); else callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc); } #ifdef SRM_MEDIA if(sc->dc_srm_media) { struct ifreq ifr; ifr.ifr_media = sc->dc_srm_media; ifmedia_ioctl(ifp, &ifr, &mii->mii_media, SIOCSIFMEDIA); sc->dc_srm_media = 0; } #endif DC_UNLOCK(sc); return; } /* * Set media options. */ static int dc_ifmedia_upd(ifp) struct ifnet *ifp; { struct dc_softc *sc; struct mii_data *mii; struct ifmedia *ifm; sc = ifp->if_softc; mii = device_get_softc(sc->dc_miibus); mii_mediachg(mii); ifm = &mii->mii_media; if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_homePNA) dc_setcfg(sc, ifm->ifm_media); else sc->dc_link = 0; return(0); } /* * Report current media status. */ static void dc_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct dc_softc *sc; struct mii_data *mii; struct ifmedia *ifm; sc = ifp->if_softc; mii = device_get_softc(sc->dc_miibus); mii_pollstat(mii); ifm = &mii->mii_media; if (DC_IS_DAVICOM(sc)) { if (IFM_SUBTYPE(ifm->ifm_media) == IFM_homePNA) { ifmr->ifm_active = ifm->ifm_media; ifmr->ifm_status = 0; return; } } ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static int dc_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct dc_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; DC_LOCK(sc); switch(command) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: error = ether_ioctl(ifp, command, data); break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->dc_if_flags & IFF_PROMISC)) { dc_setfilt(sc); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->dc_if_flags & IFF_PROMISC) { dc_setfilt(sc); } else if (!(ifp->if_flags & IFF_RUNNING)) { sc->dc_txthresh = 0; dc_init(sc); } } else { if (ifp->if_flags & IFF_RUNNING) dc_stop(sc); } sc->dc_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: dc_setfilt(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->dc_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); #ifdef SRM_MEDIA if (sc->dc_srm_media) sc->dc_srm_media = 0; #endif break; default: error = EINVAL; break; } DC_UNLOCK(sc); return(error); } static void dc_watchdog(ifp) struct ifnet *ifp; { struct dc_softc *sc; sc = ifp->if_softc; DC_LOCK(sc); ifp->if_oerrors++; printf("dc%d: watchdog timeout\n", sc->dc_unit); dc_stop(sc); dc_reset(sc); dc_init(sc); if (ifp->if_snd.ifq_head != NULL) dc_start(ifp); DC_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void dc_stop(sc) struct dc_softc *sc; { register int i; struct ifnet *ifp; DC_LOCK(sc); ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; callout_stop(&sc->dc_stat_ch); DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON|DC_NETCFG_TX_ON)); CSR_WRITE_4(sc, DC_IMR, 0x00000000); CSR_WRITE_4(sc, DC_TXADDR, 0x00000000); CSR_WRITE_4(sc, DC_RXADDR, 0x00000000); sc->dc_link = 0; /* * Free data in the RX lists. */ for (i = 0; i < DC_RX_LIST_CNT; i++) { if (sc->dc_cdata.dc_rx_chain[i] != NULL) { m_freem(sc->dc_cdata.dc_rx_chain[i]); sc->dc_cdata.dc_rx_chain[i] = NULL; } } bzero((char *)&sc->dc_ldata->dc_rx_list, sizeof(sc->dc_ldata->dc_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < DC_TX_LIST_CNT; i++) { if (sc->dc_cdata.dc_tx_chain[i] != NULL) { if (sc->dc_ldata->dc_tx_list[i].dc_ctl & DC_TXCTL_SETUP) { sc->dc_cdata.dc_tx_chain[i] = NULL; continue; } m_freem(sc->dc_cdata.dc_tx_chain[i]); sc->dc_cdata.dc_tx_chain[i] = NULL; } } bzero((char *)&sc->dc_ldata->dc_tx_list, sizeof(sc->dc_ldata->dc_tx_list)); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); DC_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void dc_shutdown(dev) device_t dev; { struct dc_softc *sc; sc = device_get_softc(dev); dc_stop(sc); return; } Index: head/sys/dev/fxp/if_fxp.c =================================================================== --- head/sys/dev/fxp/if_fxp.c (revision 71961) +++ head/sys/dev/fxp/if_fxp.c (revision 71962) @@ -1,1951 +1,1950 @@ /* * Copyright (c) 1995, David Greenman * All rights reserved. * * Modifications to support media selection: * Copyright (c) 1997 Jason R. Thorpe. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Intel EtherExpress Pro/100B PCI Fast Ethernet driver */ #include #include #include #include #include #include #include #include #include #include #ifdef NS #include #include #endif #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include /* for PCIM_CMD_xxx */ #include #include #ifdef __alpha__ /* XXX */ /* XXX XXX NEED REAL DMA MAPPING SUPPORT XXX XXX */ #undef vtophys #define vtophys(va) alpha_XXX_dmamap((vm_offset_t)(va)) #endif /* __alpha__ */ /* * NOTE! On the Alpha, we have an alignment constraint. The * card DMAs the packet immediately following the RFA. However, * the first thing in the packet is a 14-byte Ethernet header. * This means that the packet is misaligned. To compensate, * we actually offset the RFA 2 bytes into the cluster. This * alignes the packet after the Ethernet header at a 32-bit * boundary. HOWEVER! This means that the RFA is misaligned! */ #define RFA_ALIGNMENT_FUDGE 2 /* * Inline function to copy a 16-bit aligned 32-bit quantity. */ static __inline void fxp_lwcopy __P((volatile u_int32_t *, volatile u_int32_t *)); static __inline void fxp_lwcopy(src, dst) volatile u_int32_t *src, *dst; { #ifdef __i386__ *dst = *src; #else volatile u_int16_t *a = (volatile u_int16_t *)src; volatile u_int16_t *b = (volatile u_int16_t *)dst; b[0] = a[0]; b[1] = a[1]; #endif } /* * Template for default configuration parameters. * See struct fxp_cb_config for the bit definitions. */ static u_char fxp_cb_config_template[] = { 0x0, 0x0, /* cb_status */ 0x80, 0x2, /* cb_command */ 0xff, 0xff, 0xff, 0xff, /* link_addr */ 0x16, /* 0 */ 0x8, /* 1 */ 0x0, /* 2 */ 0x0, /* 3 */ 0x0, /* 4 */ 0x80, /* 5 */ 0xb2, /* 6 */ 0x3, /* 7 */ 0x1, /* 8 */ 0x0, /* 9 */ 0x26, /* 10 */ 0x0, /* 11 */ 0x60, /* 12 */ 0x0, /* 13 */ 0xf2, /* 14 */ 0x48, /* 15 */ 0x0, /* 16 */ 0x40, /* 17 */ 0xf3, /* 18 */ 0x0, /* 19 */ 0x3f, /* 20 */ 0x5 /* 21 */ }; /* Supported media types. */ struct fxp_supported_media { const int fsm_phy; /* PHY type */ const int *fsm_media; /* the media array */ const int fsm_nmedia; /* the number of supported media */ const int fsm_defmedia; /* default media for this PHY */ }; static const int fxp_media_standard[] = { IFM_ETHER|IFM_10_T, IFM_ETHER|IFM_10_T|IFM_FDX, IFM_ETHER|IFM_100_TX, IFM_ETHER|IFM_100_TX|IFM_FDX, IFM_ETHER|IFM_AUTO, }; #define FXP_MEDIA_STANDARD_DEFMEDIA (IFM_ETHER|IFM_AUTO) static const int fxp_media_default[] = { IFM_ETHER|IFM_MANUAL, /* XXX IFM_AUTO ? */ }; #define FXP_MEDIA_DEFAULT_DEFMEDIA (IFM_ETHER|IFM_MANUAL) static const struct fxp_supported_media fxp_media[] = { { FXP_PHY_DP83840, fxp_media_standard, sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), FXP_MEDIA_STANDARD_DEFMEDIA }, { FXP_PHY_DP83840A, fxp_media_standard, sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), FXP_MEDIA_STANDARD_DEFMEDIA }, { FXP_PHY_82553A, fxp_media_standard, sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), FXP_MEDIA_STANDARD_DEFMEDIA }, { FXP_PHY_82553C, fxp_media_standard, sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), FXP_MEDIA_STANDARD_DEFMEDIA }, { FXP_PHY_82555, fxp_media_standard, sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), FXP_MEDIA_STANDARD_DEFMEDIA }, { FXP_PHY_82555B, fxp_media_standard, sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), FXP_MEDIA_STANDARD_DEFMEDIA }, { FXP_PHY_80C24, fxp_media_default, sizeof(fxp_media_default) / sizeof(fxp_media_default[0]), FXP_MEDIA_DEFAULT_DEFMEDIA }, }; #define NFXPMEDIA (sizeof(fxp_media) / sizeof(fxp_media[0])) static int fxp_mediachange __P((struct ifnet *)); static void fxp_mediastatus __P((struct ifnet *, struct ifmediareq *)); static void fxp_set_media __P((struct fxp_softc *, int)); static __inline void fxp_scb_wait __P((struct fxp_softc *)); static __inline void fxp_dma_wait __P((volatile u_int16_t *, struct fxp_softc *sc)); static void fxp_intr __P((void *)); static void fxp_start __P((struct ifnet *)); static int fxp_ioctl __P((struct ifnet *, u_long, caddr_t)); static void fxp_init __P((void *)); static void fxp_stop __P((struct fxp_softc *)); static void fxp_watchdog __P((struct ifnet *)); static int fxp_add_rfabuf __P((struct fxp_softc *, struct mbuf *)); static int fxp_mdi_read __P((struct fxp_softc *, int, int)); static void fxp_mdi_write __P((struct fxp_softc *, int, int, int)); static void fxp_autosize_eeprom __P((struct fxp_softc *)); static void fxp_read_eeprom __P((struct fxp_softc *, u_int16_t *, int, int)); static int fxp_attach_common __P((struct fxp_softc *, u_int8_t *)); static void fxp_stats_update __P((void *)); static void fxp_mc_setup __P((struct fxp_softc *)); /* * Set initial transmit threshold at 64 (512 bytes). This is * increased by 64 (512 bytes) at a time, to maximum of 192 * (1536 bytes), if an underrun occurs. */ static int tx_threshold = 64; /* * Number of transmit control blocks. This determines the number * of transmit buffers that can be chained in the CB list. * This must be a power of two. */ #define FXP_NTXCB 128 /* * Number of completed TX commands at which point an interrupt * will be generated to garbage collect the attached buffers. * Must be at least one less than FXP_NTXCB, and should be * enough less so that the transmitter doesn't becomes idle * during the buffer rundown (which would reduce performance). */ #define FXP_CXINT_THRESH 120 /* * TxCB list index mask. This is used to do list wrap-around. */ #define FXP_TXCB_MASK (FXP_NTXCB - 1) /* * Number of receive frame area buffers. These are large so chose * wisely. */ #define FXP_NRFABUFS 64 /* * Maximum number of seconds that the receiver can be idle before we * assume it's dead and attempt to reset it by reprogramming the * multicast filter. This is part of a work-around for a bug in the * NIC. See fxp_stats_update(). */ #define FXP_MAX_RX_IDLE 15 /* * Wait for the previous command to be accepted (but not necessarily * completed). */ static __inline void fxp_scb_wait(sc) struct fxp_softc *sc; { int i = 10000; while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i) DELAY(2); if (i == 0) printf("fxp%d: SCB timeout\n", FXP_UNIT(sc)); } static __inline void fxp_dma_wait(status, sc) volatile u_int16_t *status; struct fxp_softc *sc; { int i = 10000; while (!(*status & FXP_CB_STATUS_C) && --i) DELAY(2); if (i == 0) printf("fxp%d: DMA timeout\n", FXP_UNIT(sc)); } /* * Return identification string if this is device is ours. */ static int fxp_probe(device_t dev) { if (pci_get_vendor(dev) == FXP_VENDORID_INTEL) { switch (pci_get_device(dev)) { case FXP_DEVICEID_i82557: device_set_desc(dev, "Intel Pro 10/100B/100+ Ethernet"); return 0; case FXP_DEVICEID_i82559: device_set_desc(dev, "Intel InBusiness 10/100 Ethernet"); return 0; case FXP_DEVICEID_i82559ER: device_set_desc(dev, "Intel Embedded 10/100 Ethernet"); return 0; case FXP_DEVICEID_i82562: device_set_desc(dev, "Intel PLC 10/100 Ethernet"); return 0; default: break; } } return ENXIO; } static int fxp_attach(device_t dev) { int error = 0; struct fxp_softc *sc = device_get_softc(dev); struct ifnet *ifp; u_int32_t val; int rid, m1, m2, ebitmap; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE); callout_handle_init(&sc->stat_ch); FXP_LOCK(sc); /* * Enable bus mastering. Enable memory space too, in case * BIOS/Prom forgot about it. */ val = pci_read_config(dev, PCIR_COMMAND, 2); val |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, val, 2); val = pci_read_config(dev, PCIR_COMMAND, 2); if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { u_int32_t iobase, membase, irq; /* Save important PCI config data. */ iobase = pci_read_config(dev, FXP_PCI_IOBA, 4); membase = pci_read_config(dev, FXP_PCI_MMBA, 4); irq = pci_read_config(dev, PCIR_INTLINE, 4); /* Reset the power state. */ device_printf(dev, "chip is in D%d power mode " "-- setting to D0\n", pci_get_powerstate(dev)); pci_set_powerstate(dev, PCI_POWERSTATE_D0); /* Restore PCI config data. */ pci_write_config(dev, FXP_PCI_IOBA, iobase, 4); pci_write_config(dev, FXP_PCI_MMBA, membase, 4); pci_write_config(dev, PCIR_INTLINE, irq, 4); } /* * Figure out which we should try first - memory mapping or i/o mapping? * We default to memory mapping. Then we accept an override from the * command line. Then we check to see which one is enabled. */ m1 = PCIM_CMD_MEMEN; m2 = PCIM_CMD_PORTEN; ebitmap = 0; if (getenv_int("fxp_iomap", &ebitmap)) { if (ebitmap & (1 << device_get_unit(dev))) { m1 = PCIM_CMD_PORTEN; m2 = PCIM_CMD_MEMEN; } } if (val & m1) { sc->rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; sc->rgd = (m1 == PCIM_CMD_MEMEN)? FXP_PCI_MMBA : FXP_PCI_IOBA; sc->mem = bus_alloc_resource(dev, sc->rtp, &sc->rgd, 0, ~0, 1, RF_ACTIVE); } if (sc->mem == NULL && (val & m2)) { sc->rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; sc->rgd = (m2 == PCIM_CMD_MEMEN)? FXP_PCI_MMBA : FXP_PCI_IOBA; sc->mem = bus_alloc_resource(dev, sc->rtp, &sc->rgd, 0, ~0, 1, RF_ACTIVE); } if (!sc->mem) { device_printf(dev, "could not map device registers\n"); error = ENXIO; goto fail; } if (bootverbose) { device_printf(dev, "using %s space register mapping\n", sc->rtp == SYS_RES_MEMORY? "memory" : "I/O"); } sc->sc_st = rman_get_bustag(sc->mem); sc->sc_sh = rman_get_bushandle(sc->mem); /* * Allocate our interrupt. */ rid = 0; sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->irq == NULL) { device_printf(dev, "could not map interrupt\n"); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET, fxp_intr, sc, &sc->ih); if (error) { device_printf(dev, "could not setup irq\n"); goto fail; } /* Do generic parts of attach. */ if (fxp_attach_common(sc, sc->arpcom.ac_enaddr)) { /* Failed! */ bus_teardown_intr(dev, sc->irq, sc->ih); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); bus_release_resource(dev, sc->rtp, sc->rgd, sc->mem); error = ENXIO; goto fail; } device_printf(dev, "Ethernet address %6D%s\n", sc->arpcom.ac_enaddr, ":", sc->phy_10Mbps_only ? ", 10Mbps" : ""); ifp = &sc->arpcom.ac_if; ifp->if_unit = device_get_unit(dev); ifp->if_name = "fxp"; ifp->if_output = ether_output; ifp->if_baudrate = 100000000; ifp->if_init = fxp_init; ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = fxp_ioctl; ifp->if_start = fxp_start; ifp->if_watchdog = fxp_watchdog; /* * Attach the interface. */ ether_ifattach(ifp, ETHER_BPF_SUPPORTED); /* * Let the system queue as many packets as we have available * TX descriptors. */ ifp->if_snd.ifq_maxlen = FXP_NTXCB - 1; FXP_UNLOCK(sc); return 0; fail: FXP_UNLOCK(sc); mtx_destroy(&sc->sc_mtx); return error; } /* * Detach interface. */ static int fxp_detach(device_t dev) { struct fxp_softc *sc = device_get_softc(dev); FXP_LOCK(sc); /* * Close down routes etc. */ ether_ifdetach(&sc->arpcom.ac_if, ETHER_BPF_SUPPORTED); /* * Stop DMA and drop transmit queue. */ fxp_stop(sc); /* * Deallocate resources. */ bus_teardown_intr(dev, sc->irq, sc->ih); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); bus_release_resource(dev, sc->rtp, sc->rgd, sc->mem); /* * Free all the receive buffers. */ if (sc->rfa_headm != NULL) m_freem(sc->rfa_headm); /* * Free all media structures. */ ifmedia_removeall(&sc->sc_media); /* * Free anciliary structures. */ free(sc->cbl_base, M_DEVBUF); free(sc->fxp_stats, M_DEVBUF); free(sc->mcsp, M_DEVBUF); FXP_UNLOCK(sc); mtx_destroy(&sc->sc_mtx); return 0; } /* * Device shutdown routine. Called at system shutdown after sync. The * main purpose of this routine is to shut off receiver DMA so that * kernel memory doesn't get clobbered during warmboot. */ static int fxp_shutdown(device_t dev) { /* * Make sure that DMA is disabled prior to reboot. Not doing * do could allow DMA to corrupt kernel memory during the * reboot before the driver initializes. */ fxp_stop((struct fxp_softc *) device_get_softc(dev)); return 0; } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int fxp_suspend(device_t dev) { struct fxp_softc *sc = device_get_softc(dev); int i; FXP_LOCK(sc); fxp_stop(sc); for (i=0; i<5; i++) sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i*4, 4); sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); sc->suspended = 1; FXP_UNLOCK(sc); return 0; } /* * Device resume routine. Restore some PCI settings in case the BIOS * doesn't, re-enable busmastering, and restart the interface if * appropriate. */ static int fxp_resume(device_t dev) { struct fxp_softc *sc = device_get_softc(dev); struct ifnet *ifp = &sc->sc_if; u_int16_t pci_command; int i; FXP_LOCK(sc); /* better way to do this? */ for (i=0; i<5; i++) pci_write_config(dev, PCIR_MAPS + i*4, sc->saved_maps[i], 4); pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); /* reenable busmastering */ pci_command = pci_read_config(dev, PCIR_COMMAND, 2); pci_command |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, pci_command, 2); CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); DELAY(10); /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) fxp_init(sc); sc->suspended = 0; FXP_UNLOCK(sc); return 0; } static device_method_t fxp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, fxp_probe), DEVMETHOD(device_attach, fxp_attach), DEVMETHOD(device_detach, fxp_detach), DEVMETHOD(device_shutdown, fxp_shutdown), DEVMETHOD(device_suspend, fxp_suspend), DEVMETHOD(device_resume, fxp_resume), { 0, 0 } }; static driver_t fxp_driver = { "fxp", fxp_methods, sizeof(struct fxp_softc), }; static devclass_t fxp_devclass; DRIVER_MODULE(if_fxp, pci, fxp_driver, fxp_devclass, 0, 0); DRIVER_MODULE(if_fxp, cardbus, fxp_driver, fxp_devclass, 0, 0); /* * Do generic parts of attach. */ static int fxp_attach_common(sc, enaddr) struct fxp_softc *sc; u_int8_t *enaddr; { u_int16_t data; int i, nmedia, defmedia; const int *media; /* * Reset to a stable state. */ CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); DELAY(10); sc->cbl_base = malloc(sizeof(struct fxp_cb_tx) * FXP_NTXCB, M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->cbl_base == NULL) goto fail; sc->fxp_stats = malloc(sizeof(struct fxp_stats), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->fxp_stats == NULL) goto fail; sc->mcsp = malloc(sizeof(struct fxp_cb_mcs), M_DEVBUF, M_NOWAIT); if (sc->mcsp == NULL) goto fail; /* * Pre-allocate our receive buffers. */ for (i = 0; i < FXP_NRFABUFS; i++) { if (fxp_add_rfabuf(sc, NULL) != 0) { goto fail; } } /* * Find out how large of an SEEPROM we have. */ fxp_autosize_eeprom(sc); /* * Get info about the primary PHY */ fxp_read_eeprom(sc, (u_int16_t *)&data, 6, 1); sc->phy_primary_addr = data & 0xff; sc->phy_primary_device = (data >> 8) & 0x3f; sc->phy_10Mbps_only = data >> 15; /* * Read MAC address. */ fxp_read_eeprom(sc, (u_int16_t *)enaddr, 0, 3); /* * Initialize the media structures. */ media = fxp_media_default; nmedia = sizeof(fxp_media_default) / sizeof(fxp_media_default[0]); defmedia = FXP_MEDIA_DEFAULT_DEFMEDIA; for (i = 0; i < NFXPMEDIA; i++) { if (sc->phy_primary_device == fxp_media[i].fsm_phy) { media = fxp_media[i].fsm_media; nmedia = fxp_media[i].fsm_nmedia; defmedia = fxp_media[i].fsm_defmedia; } } ifmedia_init(&sc->sc_media, 0, fxp_mediachange, fxp_mediastatus); for (i = 0; i < nmedia; i++) { if (IFM_SUBTYPE(media[i]) == IFM_100_TX && sc->phy_10Mbps_only) continue; ifmedia_add(&sc->sc_media, media[i], 0, NULL); } ifmedia_set(&sc->sc_media, defmedia); return (0); fail: printf("fxp%d: Failed to malloc memory\n", FXP_UNIT(sc)); if (sc->cbl_base) free(sc->cbl_base, M_DEVBUF); if (sc->fxp_stats) free(sc->fxp_stats, M_DEVBUF); if (sc->mcsp) free(sc->mcsp, M_DEVBUF); /* frees entire chain */ if (sc->rfa_headm) m_freem(sc->rfa_headm); return (ENOMEM); } /* * From NetBSD: * * Figure out EEPROM size. * * 559's can have either 64-word or 256-word EEPROMs, the 558 * datasheet only talks about 64-word EEPROMs, and the 557 datasheet * talks about the existance of 16 to 256 word EEPROMs. * * The only known sizes are 64 and 256, where the 256 version is used * by CardBus cards to store CIS information. * * The address is shifted in msb-to-lsb, and after the last * address-bit the EEPROM is supposed to output a `dummy zero' bit, * after which follows the actual data. We try to detect this zero, by * probing the data-out bit in the EEPROM control register just after * having shifted in a bit. If the bit is zero, we assume we've * shifted enough address bits. The data-out should be tri-state, * before this, which should translate to a logical one. * * Other ways to do this would be to try to read a register with known * contents with a varying number of address bits, but no such * register seem to be available. The high bits of register 10 are 01 * on the 558 and 559, but apparently not on the 557. * * The Linux driver computes a checksum on the EEPROM data, but the * value of this checksum is not very well documented. */ static void fxp_autosize_eeprom(sc) struct fxp_softc *sc; { u_int16_t reg; int x; CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); /* * Shift in read opcode. */ for (x = 3; x > 0; x--) { if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) { reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; } else { reg = FXP_EEPROM_EECS; } CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); DELAY(1); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); } /* * Shift in address. * Wait for the dummy zero following a correct address shift. */ for (x = 1; x <= 8; x++) { CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS | FXP_EEPROM_EESK); DELAY(1); if ((CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) == 0) break; CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); DELAY(1); } CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); DELAY(1); sc->eeprom_size = x; } /* * Read from the serial EEPROM. Basically, you manually shift in * the read opcode (one bit at a time) and then shift in the address, * and then you shift out the data (all of this one bit at a time). * The word size is 16 bits, so you have to provide the address for * every 16 bits of data. */ static void fxp_read_eeprom(sc, data, offset, words) struct fxp_softc *sc; u_short *data; int offset; int words; { u_int16_t reg; int i, x; for (i = 0; i < words; i++) { CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); /* * Shift in read opcode. */ for (x = 3; x > 0; x--) { if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) { reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; } else { reg = FXP_EEPROM_EECS; } CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); DELAY(1); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); } /* * Shift in address. */ for (x = sc->eeprom_size; x > 0; x--) { if ((i + offset) & (1 << (x - 1))) { reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; } else { reg = FXP_EEPROM_EECS; } CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); DELAY(1); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); } reg = FXP_EEPROM_EECS; data[i] = 0; /* * Shift out data. */ for (x = 16; x > 0; x--) { CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); DELAY(1); if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) data[i] |= (1 << (x - 1)); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); } CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); DELAY(1); } } /* * Start packet transmission on the interface. */ static void fxp_start(ifp) struct ifnet *ifp; { struct fxp_softc *sc = ifp->if_softc; struct fxp_cb_tx *txp; FXP_LOCK(sc); /* * See if we need to suspend xmit until the multicast filter * has been reprogrammed (which can only be done at the head * of the command chain). */ if (sc->need_mcsetup) { FXP_UNLOCK(sc); return; } txp = NULL; /* * We're finished if there is nothing more to add to the list or if * we're all filled up with buffers to transmit. * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add * a NOP command when needed. */ while (ifp->if_snd.ifq_head != NULL && sc->tx_queued < FXP_NTXCB - 1) { struct mbuf *m, *mb_head; int segment; /* * Grab a packet to transmit. */ IF_DEQUEUE(&ifp->if_snd, mb_head); /* * Get pointer to next available tx desc. */ txp = sc->cbl_last->next; /* * Go through each of the mbufs in the chain and initialize * the transmit buffer descriptors with the physical address * and size of the mbuf. */ tbdinit: for (m = mb_head, segment = 0; m != NULL; m = m->m_next) { if (m->m_len != 0) { if (segment == FXP_NTXSEG) break; txp->tbd[segment].tb_addr = vtophys(mtod(m, vm_offset_t)); txp->tbd[segment].tb_size = m->m_len; segment++; } } if (m != NULL) { struct mbuf *mn; /* * We ran out of segments. We have to recopy this mbuf * chain first. Bail out if we can't get the new buffers. */ MGETHDR(mn, M_DONTWAIT, MT_DATA); if (mn == NULL) { m_freem(mb_head); break; } if (mb_head->m_pkthdr.len > MHLEN) { MCLGET(mn, M_DONTWAIT); if ((mn->m_flags & M_EXT) == 0) { m_freem(mn); m_freem(mb_head); break; } } m_copydata(mb_head, 0, mb_head->m_pkthdr.len, mtod(mn, caddr_t)); mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len; m_freem(mb_head); mb_head = mn; goto tbdinit; } txp->tbd_number = segment; txp->mb_head = mb_head; txp->cb_status = 0; if (sc->tx_queued != FXP_CXINT_THRESH - 1) { txp->cb_command = FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S; } else { txp->cb_command = FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I; /* * Set a 5 second timer just in case we don't hear from the * card again. */ ifp->if_timer = 5; } txp->tx_threshold = tx_threshold; /* * Advance the end of list forward. */ #ifdef __alpha__ /* * On platforms which can't access memory in 16-bit * granularities, we must prevent the card from DMA'ing * up the status while we update the command field. * This could cause us to overwrite the completion status. */ atomic_clear_short(&sc->cbl_last->cb_command, FXP_CB_COMMAND_S); #else sc->cbl_last->cb_command &= ~FXP_CB_COMMAND_S; #endif /*__alpha__*/ sc->cbl_last = txp; /* * Advance the beginning of the list forward if there are * no other packets queued (when nothing is queued, cbl_first * sits on the last TxCB that was sent out). */ if (sc->tx_queued == 0) sc->cbl_first = txp; sc->tx_queued++; /* * Pass packet to bpf if there is a listener. */ if (ifp->if_bpf) bpf_mtap(ifp, mb_head); } /* * We're finished. If we added to the list, issue a RESUME to get DMA * going again if suspended. */ if (txp != NULL) { fxp_scb_wait(sc); CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_RESUME); } FXP_UNLOCK(sc); } /* * Process interface interrupts. */ static void fxp_intr(arg) void *arg; { struct fxp_softc *sc = arg; struct ifnet *ifp = &sc->sc_if; u_int8_t statack; FXP_LOCK(sc); if (sc->suspended) { FXP_UNLOCK(sc); return; } while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) { /* * First ACK all the interrupts in this pass. */ CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack); /* * Free any finished transmit mbuf chains. * * Handle the CNA event likt a CXTNO event. It used to * be that this event (control unit not ready) was not * encountered, but it is now with the SMPng modifications. * The exact sequence of events that occur when the interface * is brought up are different now, and if this event * goes unhandled, the configuration/rxfilter setup sequence * can stall for several seconds. The result is that no * packets go out onto the wire for about 5 to 10 seconds * after the interface is ifconfig'ed for the first time. */ if (statack & (FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA)) { struct fxp_cb_tx *txp; for (txp = sc->cbl_first; sc->tx_queued && (txp->cb_status & FXP_CB_STATUS_C) != 0; txp = txp->next) { if (txp->mb_head != NULL) { m_freem(txp->mb_head); txp->mb_head = NULL; } sc->tx_queued--; } sc->cbl_first = txp; ifp->if_timer = 0; if (sc->tx_queued == 0) { if (sc->need_mcsetup) fxp_mc_setup(sc); } /* * Try to start more packets transmitting. */ if (ifp->if_snd.ifq_head != NULL) fxp_start(ifp); } /* * Process receiver interrupts. If a no-resource (RNR) * condition exists, get whatever packets we can and * re-start the receiver. */ if (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR)) { struct mbuf *m; struct fxp_rfa *rfa; rcvloop: m = sc->rfa_headm; rfa = (struct fxp_rfa *)(m->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE); if (rfa->rfa_status & FXP_RFA_STATUS_C) { /* * Remove first packet from the chain. */ sc->rfa_headm = m->m_next; m->m_next = NULL; /* * Add a new buffer to the receive chain. * If this fails, the old buffer is recycled * instead. */ if (fxp_add_rfabuf(sc, m) == 0) { struct ether_header *eh; int total_len; total_len = rfa->actual_size & (MCLBYTES - 1); if (total_len < sizeof(struct ether_header)) { m_freem(m); goto rcvloop; } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; eh = mtod(m, struct ether_header *); m->m_data += sizeof(struct ether_header); m->m_len -= sizeof(struct ether_header); m->m_pkthdr.len = m->m_len; ether_input(ifp, eh, m); } goto rcvloop; } if (statack & FXP_SCB_STATACK_RNR) { fxp_scb_wait(sc); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(sc->rfa_headm->m_ext.ext_buf) + RFA_ALIGNMENT_FUDGE); CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_START); } } } FXP_UNLOCK(sc); } /* * Update packet in/out/collision statistics. The i82557 doesn't * allow you to access these counters without doing a fairly * expensive DMA to get _all_ of the statistics it maintains, so * we do this operation here only once per second. The statistics * counters in the kernel are updated from the previous dump-stats * DMA and then a new dump-stats DMA is started. The on-chip * counters are zeroed when the DMA completes. If we can't start * the DMA immediately, we don't wait - we just prepare to read * them again next time. */ static void fxp_stats_update(arg) void *arg; { struct fxp_softc *sc = arg; struct ifnet *ifp = &sc->sc_if; struct fxp_stats *sp = sc->fxp_stats; struct fxp_cb_tx *txp; ifp->if_opackets += sp->tx_good; ifp->if_collisions += sp->tx_total_collisions; if (sp->rx_good) { ifp->if_ipackets += sp->rx_good; sc->rx_idle_secs = 0; } else { /* * Receiver's been idle for another second. */ sc->rx_idle_secs++; } ifp->if_ierrors += sp->rx_crc_errors + sp->rx_alignment_errors + sp->rx_rnr_errors + sp->rx_overrun_errors; /* * If any transmit underruns occured, bump up the transmit * threshold by another 512 bytes (64 * 8). */ if (sp->tx_underruns) { ifp->if_oerrors += sp->tx_underruns; if (tx_threshold < 192) tx_threshold += 64; } FXP_LOCK(sc); /* * Release any xmit buffers that have completed DMA. This isn't * strictly necessary to do here, but it's advantagous for mbufs * with external storage to be released in a timely manner rather * than being defered for a potentially long time. This limits * the delay to a maximum of one second. */ for (txp = sc->cbl_first; sc->tx_queued && (txp->cb_status & FXP_CB_STATUS_C) != 0; txp = txp->next) { if (txp->mb_head != NULL) { m_freem(txp->mb_head); txp->mb_head = NULL; } sc->tx_queued--; } sc->cbl_first = txp; /* * If we haven't received any packets in FXP_MAC_RX_IDLE seconds, * then assume the receiver has locked up and attempt to clear * the condition by reprogramming the multicast filter. This is * a work-around for a bug in the 82557 where the receiver locks * up if it gets certain types of garbage in the syncronization * bits prior to the packet header. This bug is supposed to only * occur in 10Mbps mode, but has been seen to occur in 100Mbps * mode as well (perhaps due to a 10/100 speed transition). */ if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) { sc->rx_idle_secs = 0; fxp_mc_setup(sc); } /* * If there is no pending command, start another stats * dump. Otherwise punt for now. */ if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) { /* * Start another stats dump. */ CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_DUMPRESET); } else { /* * A previous command is still waiting to be accepted. * Just zero our copy of the stats and wait for the * next timer event to update them. */ sp->tx_good = 0; sp->tx_underruns = 0; sp->tx_total_collisions = 0; sp->rx_good = 0; sp->rx_crc_errors = 0; sp->rx_alignment_errors = 0; sp->rx_rnr_errors = 0; sp->rx_overrun_errors = 0; } FXP_UNLOCK(sc); /* * Schedule another timeout one second from now. */ sc->stat_ch = timeout(fxp_stats_update, sc, hz); } /* * Stop the interface. Cancels the statistics updater and resets * the interface. */ static void fxp_stop(sc) struct fxp_softc *sc; { struct ifnet *ifp = &sc->sc_if; struct fxp_cb_tx *txp; int i; FXP_LOCK(sc); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); ifp->if_timer = 0; /* * Cancel stats updater. */ untimeout(fxp_stats_update, sc, sc->stat_ch); /* * Issue software reset */ CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); DELAY(10); /* * Release any xmit buffers. */ txp = sc->cbl_base; if (txp != NULL) { for (i = 0; i < FXP_NTXCB; i++) { if (txp[i].mb_head != NULL) { m_freem(txp[i].mb_head); txp[i].mb_head = NULL; } } } sc->tx_queued = 0; /* * Free all the receive buffers then reallocate/reinitialize */ if (sc->rfa_headm != NULL) m_freem(sc->rfa_headm); sc->rfa_headm = NULL; sc->rfa_tailm = NULL; for (i = 0; i < FXP_NRFABUFS; i++) { if (fxp_add_rfabuf(sc, NULL) != 0) { /* * This "can't happen" - we're at splimp() * and we just freed all the buffers we need * above. */ panic("fxp_stop: no buffers!"); } } FXP_UNLOCK(sc); } /* * Watchdog/transmission transmit timeout handler. Called when a * transmission is started on the interface, but no interrupt is * received before the timeout. This usually indicates that the * card has wedged for some reason. */ static void fxp_watchdog(ifp) struct ifnet *ifp; { struct fxp_softc *sc = ifp->if_softc; printf("fxp%d: device timeout\n", FXP_UNIT(sc)); ifp->if_oerrors++; fxp_init(sc); } static void fxp_init(xsc) void *xsc; { struct fxp_softc *sc = xsc; struct ifnet *ifp = &sc->sc_if; struct fxp_cb_config *cbp; struct fxp_cb_ias *cb_ias; struct fxp_cb_tx *txp; int i, prm; FXP_LOCK(sc); /* * Cancel any pending I/O */ fxp_stop(sc); prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0; /* * Initialize base of CBL and RFA memory. Loading with zero * sets it up for regular linear addressing. */ CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_BASE); fxp_scb_wait(sc); CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_BASE); /* * Initialize base of dump-stats buffer. */ fxp_scb_wait(sc); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(sc->fxp_stats)); CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_DUMP_ADR); /* * We temporarily use memory that contains the TxCB list to * construct the config CB. The TxCB list memory is rebuilt * later. */ cbp = (struct fxp_cb_config *) sc->cbl_base; /* * This bcopy is kind of disgusting, but there are a bunch of must be * zero and must be one bits in this structure and this is the easiest * way to initialize them all to proper values. */ bcopy(fxp_cb_config_template, (void *)(uintptr_t)(volatile void *)&cbp->cb_status, sizeof(fxp_cb_config_template)); cbp->cb_status = 0; cbp->cb_command = FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL; cbp->link_addr = -1; /* (no) next command */ cbp->byte_count = 22; /* (22) bytes to config */ cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */ cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */ cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */ cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */ cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */ cbp->dma_bce = 0; /* (disable) dma max counters */ cbp->late_scb = 0; /* (don't) defer SCB update */ cbp->tno_int = 0; /* (disable) tx not okay interrupt */ cbp->ci_int = 1; /* interrupt on CU idle */ cbp->save_bf = prm; /* save bad frames */ cbp->disc_short_rx = !prm; /* discard short packets */ cbp->underrun_retry = 1; /* retry mode (1) on DMA underrun */ cbp->mediatype = !sc->phy_10Mbps_only; /* interface mode */ cbp->nsai = 1; /* (don't) disable source addr insert */ cbp->preamble_length = 2; /* (7 byte) preamble */ cbp->loopback = 0; /* (don't) loopback */ cbp->linear_priority = 0; /* (normal CSMA/CD operation) */ cbp->linear_pri_mode = 0; /* (wait after xmit only) */ cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */ cbp->promiscuous = prm; /* promiscuous mode */ cbp->bcast_disable = 0; /* (don't) disable broadcasts */ cbp->crscdt = 0; /* (CRS only) */ cbp->stripping = !prm; /* truncate rx packet to byte count */ cbp->padding = 1; /* (do) pad short tx packets */ cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */ cbp->force_fdx = 0; /* (don't) force full duplex */ cbp->fdx_pin_en = 1; /* (enable) FDX# pin */ cbp->multi_ia = 0; /* (don't) accept multiple IAs */ cbp->mc_all = sc->all_mcasts;/* accept all multicasts */ /* * Start the config command/DMA. */ fxp_scb_wait(sc); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(&cbp->cb_status)); CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); /* ...and wait for it to complete. */ fxp_dma_wait(&cbp->cb_status, sc); /* * Now initialize the station address. Temporarily use the TxCB * memory area like we did above for the config CB. */ cb_ias = (struct fxp_cb_ias *) sc->cbl_base; cb_ias->cb_status = 0; cb_ias->cb_command = FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL; cb_ias->link_addr = -1; bcopy(sc->arpcom.ac_enaddr, (void *)(uintptr_t)(volatile void *)cb_ias->macaddr, sizeof(sc->arpcom.ac_enaddr)); /* * Start the IAS (Individual Address Setup) command/DMA. */ fxp_scb_wait(sc); CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); /* ...and wait for it to complete. */ fxp_dma_wait(&cb_ias->cb_status, sc); /* * Initialize transmit control block (TxCB) list. */ txp = sc->cbl_base; bzero(txp, sizeof(struct fxp_cb_tx) * FXP_NTXCB); for (i = 0; i < FXP_NTXCB; i++) { txp[i].cb_status = FXP_CB_STATUS_C | FXP_CB_STATUS_OK; txp[i].cb_command = FXP_CB_COMMAND_NOP; txp[i].link_addr = vtophys(&txp[(i + 1) & FXP_TXCB_MASK].cb_status); txp[i].tbd_array_addr = vtophys(&txp[i].tbd[0]); txp[i].next = &txp[(i + 1) & FXP_TXCB_MASK]; } /* * Set the suspend flag on the first TxCB and start the control * unit. It will execute the NOP and then suspend. */ txp->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S; sc->cbl_first = sc->cbl_last = txp; sc->tx_queued = 1; fxp_scb_wait(sc); CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); /* * Initialize receiver buffer area - RFA. */ fxp_scb_wait(sc); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(sc->rfa_headm->m_ext.ext_buf) + RFA_ALIGNMENT_FUDGE); CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_START); /* * Set current media. */ fxp_set_media(sc, sc->sc_media.ifm_cur->ifm_media); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; FXP_UNLOCK(sc); /* * Start stats updater. */ sc->stat_ch = timeout(fxp_stats_update, sc, hz); } static void fxp_set_media(sc, media) struct fxp_softc *sc; int media; { switch (sc->phy_primary_device) { case FXP_PHY_DP83840: case FXP_PHY_DP83840A: fxp_mdi_write(sc, sc->phy_primary_addr, FXP_DP83840_PCR, fxp_mdi_read(sc, sc->phy_primary_addr, FXP_DP83840_PCR) | FXP_DP83840_PCR_LED4_MODE | /* LED4 always indicates duplex */ FXP_DP83840_PCR_F_CONNECT | /* force link disconnect bypass */ FXP_DP83840_PCR_BIT10); /* XXX I have no idea */ /* fall through */ case FXP_PHY_82553A: case FXP_PHY_82553C: /* untested */ case FXP_PHY_82555: case FXP_PHY_82555B: if (IFM_SUBTYPE(media) != IFM_AUTO) { int flags; flags = (IFM_SUBTYPE(media) == IFM_100_TX) ? FXP_PHY_BMCR_SPEED_100M : 0; flags |= (media & IFM_FDX) ? FXP_PHY_BMCR_FULLDUPLEX : 0; fxp_mdi_write(sc, sc->phy_primary_addr, FXP_PHY_BMCR, (fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_BMCR) & ~(FXP_PHY_BMCR_AUTOEN | FXP_PHY_BMCR_SPEED_100M | FXP_PHY_BMCR_FULLDUPLEX)) | flags); } else { fxp_mdi_write(sc, sc->phy_primary_addr, FXP_PHY_BMCR, (fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_BMCR) | FXP_PHY_BMCR_AUTOEN)); } break; /* * The Seeq 80c24 doesn't have a PHY programming interface, so do * nothing. */ case FXP_PHY_80C24: break; default: printf("fxp%d: warning: unsupported PHY, type = %d, addr = %d\n", FXP_UNIT(sc), sc->phy_primary_device, sc->phy_primary_addr); } } /* * Change media according to request. */ int fxp_mediachange(ifp) struct ifnet *ifp; { struct fxp_softc *sc = ifp->if_softc; struct ifmedia *ifm = &sc->sc_media; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); fxp_set_media(sc, ifm->ifm_media); return (0); } /* * Notify the world which media we're using. */ void fxp_mediastatus(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct fxp_softc *sc = ifp->if_softc; int flags, stsflags; switch (sc->phy_primary_device) { case FXP_PHY_82555: case FXP_PHY_82555B: case FXP_PHY_DP83840: case FXP_PHY_DP83840A: ifmr->ifm_status = IFM_AVALID; /* IFM_ACTIVE will be valid */ ifmr->ifm_active = IFM_ETHER; /* * the following is not an error. * You need to read this register twice to get current * status. This is correct documented behaviour, the * first read gets latched values. */ stsflags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_STS); stsflags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_STS); if (stsflags & FXP_PHY_STS_LINK_STS) ifmr->ifm_status |= IFM_ACTIVE; /* * If we are in auto mode, then try report the result. */ flags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_BMCR); if (flags & FXP_PHY_BMCR_AUTOEN) { ifmr->ifm_active |= IFM_AUTO; /* XXX presently 0 */ if (stsflags & FXP_PHY_STS_AUTO_DONE) { /* * Intel and National parts report * differently on what they found. */ if ((sc->phy_primary_device == FXP_PHY_82555) || (sc->phy_primary_device == FXP_PHY_82555B)) { flags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_USC); if (flags & FXP_PHY_USC_SPEED) ifmr->ifm_active |= IFM_100_TX; else ifmr->ifm_active |= IFM_10_T; if (flags & FXP_PHY_USC_DUPLEX) ifmr->ifm_active |= IFM_FDX; } else { /* it's National. only know speed */ flags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_DP83840_PAR); if (flags & FXP_DP83840_PAR_SPEED_10) ifmr->ifm_active |= IFM_10_T; else ifmr->ifm_active |= IFM_100_TX; } } } else { /* in manual mode.. just report what we were set to */ if (flags & FXP_PHY_BMCR_SPEED_100M) ifmr->ifm_active |= IFM_100_TX; else ifmr->ifm_active |= IFM_10_T; if (flags & FXP_PHY_BMCR_FULLDUPLEX) ifmr->ifm_active |= IFM_FDX; } break; case FXP_PHY_80C24: default: ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; /* XXX IFM_AUTO ? */ } } /* * Add a buffer to the end of the RFA buffer list. * Return 0 if successful, 1 for failure. A failure results in * adding the 'oldm' (if non-NULL) on to the end of the list - * tossing out its old contents and recycling it. * The RFA struct is stuck at the beginning of mbuf cluster and the * data pointer is fixed up to point just past it. */ static int fxp_add_rfabuf(sc, oldm) struct fxp_softc *sc; struct mbuf *oldm; { u_int32_t v; struct mbuf *m; struct fxp_rfa *rfa, *p_rfa; MGETHDR(m, M_DONTWAIT, MT_DATA); if (m != NULL) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { m_freem(m); if (oldm == NULL) return 1; m = oldm; m->m_data = m->m_ext.ext_buf; } } else { if (oldm == NULL) return 1; m = oldm; m->m_data = m->m_ext.ext_buf; } /* * Move the data pointer up so that the incoming data packet * will be 32-bit aligned. */ m->m_data += RFA_ALIGNMENT_FUDGE; /* * Get a pointer to the base of the mbuf cluster and move * data start past it. */ rfa = mtod(m, struct fxp_rfa *); m->m_data += sizeof(struct fxp_rfa); rfa->size = (u_int16_t)(MCLBYTES - sizeof(struct fxp_rfa) - RFA_ALIGNMENT_FUDGE); /* * Initialize the rest of the RFA. Note that since the RFA * is misaligned, we cannot store values directly. Instead, * we use an optimized, inline copy. */ rfa->rfa_status = 0; rfa->rfa_control = FXP_RFA_CONTROL_EL; rfa->actual_size = 0; v = -1; fxp_lwcopy(&v, (volatile u_int32_t *) rfa->link_addr); fxp_lwcopy(&v, (volatile u_int32_t *) rfa->rbd_addr); /* * If there are other buffers already on the list, attach this * one to the end by fixing up the tail to point to this one. */ if (sc->rfa_headm != NULL) { p_rfa = (struct fxp_rfa *) (sc->rfa_tailm->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE); sc->rfa_tailm->m_next = m; v = vtophys(rfa); fxp_lwcopy(&v, (volatile u_int32_t *) p_rfa->link_addr); p_rfa->rfa_control = 0; } else { sc->rfa_headm = m; } sc->rfa_tailm = m; return (m == oldm); } static volatile int fxp_mdi_read(sc, phy, reg) struct fxp_softc *sc; int phy; int reg; { int count = 10000; int value; CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21)); while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0 && count--) DELAY(10); if (count <= 0) printf("fxp%d: fxp_mdi_read: timed out\n", FXP_UNIT(sc)); return (value & 0xffff); } static void fxp_mdi_write(sc, phy, reg, value) struct fxp_softc *sc; int phy; int reg; int value; { int count = 10000; CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) | (value & 0xffff)); while((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 && count--) DELAY(10); if (count <= 0) printf("fxp%d: fxp_mdi_write: timed out\n", FXP_UNIT(sc)); } static int fxp_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct fxp_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int error = 0; FXP_LOCK(sc); switch (command) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: error = ether_ioctl(ifp, command, data); break; case SIOCSIFFLAGS: sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0; /* * If interface is marked up and not running, then start it. * If it is marked down and running, stop it. * XXX If it's up then re-initialize it. This is so flags * such as IFF_PROMISC are handled. */ if (ifp->if_flags & IFF_UP) { fxp_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) fxp_stop(sc); } break; case SIOCADDMULTI: case SIOCDELMULTI: sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0; /* * Multicast list has changed; set the hardware filter * accordingly. */ if (!sc->all_mcasts) fxp_mc_setup(sc); /* * fxp_mc_setup() can turn on sc->all_mcasts, so check it * again rather than else {}. */ if (sc->all_mcasts) fxp_init(sc); error = 0; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command); break; default: error = EINVAL; } FXP_UNLOCK(sc); return (error); } /* * Program the multicast filter. * * We have an artificial restriction that the multicast setup command * must be the first command in the chain, so we take steps to ensure * this. By requiring this, it allows us to keep up the performance of * the pre-initialized command ring (esp. link pointers) by not actually * inserting the mcsetup command in the ring - i.e. its link pointer * points to the TxCB ring, but the mcsetup descriptor itself is not part * of it. We then can do 'CU_START' on the mcsetup descriptor and have it * lead into the regular TxCB ring when it completes. * * This function must be called at splimp. */ static void fxp_mc_setup(sc) struct fxp_softc *sc; { struct fxp_cb_mcs *mcsp = sc->mcsp; struct ifnet *ifp = &sc->sc_if; struct ifmultiaddr *ifma; int nmcasts; int count; /* * If there are queued commands, we must wait until they are all * completed. If we are already waiting, then add a NOP command * with interrupt option so that we're notified when all commands * have been completed - fxp_start() ensures that no additional * TX commands will be added when need_mcsetup is true. */ if (sc->tx_queued) { struct fxp_cb_tx *txp; /* * need_mcsetup will be true if we are already waiting for the * NOP command to be completed (see below). In this case, bail. */ if (sc->need_mcsetup) return; sc->need_mcsetup = 1; /* * Add a NOP command with interrupt so that we are notified when all * TX commands have been processed. */ txp = sc->cbl_last->next; txp->mb_head = NULL; txp->cb_status = 0; txp->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I; /* * Advance the end of list forward. */ sc->cbl_last->cb_command &= ~FXP_CB_COMMAND_S; sc->cbl_last = txp; sc->tx_queued++; /* * Issue a resume in case the CU has just suspended. */ fxp_scb_wait(sc); CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_RESUME); /* * Set a 5 second timer just in case we don't hear from the * card again. */ ifp->if_timer = 5; return; } sc->need_mcsetup = 0; /* * Initialize multicast setup descriptor. */ mcsp->next = sc->cbl_base; mcsp->mb_head = NULL; mcsp->cb_status = 0; mcsp->cb_command = FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I; mcsp->link_addr = vtophys(&sc->cbl_base->cb_status); nmcasts = 0; if (!sc->all_mcasts) { - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (nmcasts >= MAXMCADDR) { sc->all_mcasts = 1; nmcasts = 0; break; } bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), (void *)(uintptr_t)(volatile void *) &sc->mcsp->mc_addr[nmcasts][0], 6); nmcasts++; } } mcsp->mc_cnt = nmcasts * 6; sc->cbl_first = sc->cbl_last = (struct fxp_cb_tx *) mcsp; sc->tx_queued = 1; /* * Wait until command unit is not active. This should never * be the case when nothing is queued, but make sure anyway. */ count = 100; while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) == FXP_SCB_CUS_ACTIVE && --count) DELAY(10); if (count == 0) { printf("fxp%d: command queue timeout\n", FXP_UNIT(sc)); return; } /* * Start the multicast setup command. */ fxp_scb_wait(sc); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(&mcsp->cb_status)); CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); ifp->if_timer = 2; return; } Index: head/sys/dev/lnc/if_lnc.c =================================================================== --- head/sys/dev/lnc/if_lnc.c (revision 71961) +++ head/sys/dev/lnc/if_lnc.c (revision 71962) @@ -1,1571 +1,1570 @@ /*- * Copyright (c) 1994-2000 * Paul Richards. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * verbatim and that no modifications are made prior to this * point in the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name Paul Richards may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY PAUL RICHARDS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL PAUL RICHARDS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* #define DIAGNOSTIC #define DEBUG * * TODO ---- * * Check all the XXX comments -- some of them are just things I've left * unfinished rather than "difficult" problems that were hacked around. * * Check log settings. * * Check how all the arpcom flags get set and used. * * Re-inline and re-static all routines after debugging. * * Remember to assign iobase in SHMEM probe routines. * * Replace all occurences of LANCE-controller-card etc in prints by the name * strings of the appropriate type -- nifty window dressing * * Add DEPCA support -- mostly done. * */ #include "opt_inet.h" /* Some defines that should really be in generic locations */ #define FCS_LEN 4 #define MULTICAST_FILTER_LEN 8 #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include devclass_t lnc_devclass; static char const * const nic_ident[] = { "Unknown", "BICC", "NE2100", "DEPCA", "CNET98S", /* PC-98 */ }; static char const * const ic_ident[] = { "Unknown", "LANCE", "C-LANCE", "PCnet-ISA", "PCnet-ISA+", "PCnet-ISA II", "PCnet-32 VL-Bus", "PCnet-PCI", "PCnet-PCI II", "PCnet-FAST", "PCnet-FAST+", "PCnet-Home", }; static void lnc_setladrf __P((struct lnc_softc *sc)); static void lnc_reset __P((struct lnc_softc *sc)); static void lnc_free_mbufs __P((struct lnc_softc *sc)); static __inline int alloc_mbuf_cluster __P((struct lnc_softc *sc, struct host_ring_entry *desc)); static __inline struct mbuf *chain_mbufs __P((struct lnc_softc *sc, int start_of_packet, int pkt_len)); static __inline struct mbuf *mbuf_packet __P((struct lnc_softc *sc, int start_of_packet, int pkt_len)); static __inline void lnc_rint __P((struct lnc_softc *sc)); static __inline void lnc_tint __P((struct lnc_softc *sc)); static void lnc_init __P((void *)); static __inline int mbuf_to_buffer __P((struct mbuf *m, char *buffer)); static __inline struct mbuf *chain_to_cluster __P((struct mbuf *m)); static void lnc_start __P((struct ifnet *ifp)); static int lnc_ioctl __P((struct ifnet *ifp, u_long command, caddr_t data)); static void lnc_watchdog __P((struct ifnet *ifp)); #ifdef DEBUG void lnc_dump_state __P((struct lnc_softc *sc)); void mbuf_dump_chain __P((struct mbuf *m)); #endif void write_csr(struct lnc_softc *, u_short, u_short); u_short read_csr(struct lnc_softc *, u_short); void lnc_release_resources(device_t); u_short read_csr(struct lnc_softc *sc, u_short port) { bus_space_write_2(sc->lnc_btag, sc->lnc_bhandle, sc->rap, port); return(bus_space_read_2(sc->lnc_btag, sc->lnc_bhandle, sc->rdp)); } void write_csr(struct lnc_softc *sc, u_short port, u_short val) { bus_space_write_2(sc->lnc_btag, sc->lnc_bhandle, sc->rap, port); bus_space_write_2(sc->lnc_btag, sc->lnc_bhandle, sc->rdp, val); } #undef inb #define inb(port) bus_space_read_1(sc->lnc_btag, sc->lnc_bhandle, port) #define inw(port) bus_space_read_2(sc->lnc_btag, sc->lnc_bhandle, port) #define outw(port, val) bus_space_write_2(sc->lnc_btag, sc->lnc_bhandle, port, val) static __inline void write_bcr(struct lnc_softc *sc, u_short port, u_short val) { outw(sc->rap, port); outw(sc->bdp, val); } static __inline u_short read_bcr(struct lnc_softc *sc, u_short port) { outw(sc->rap, port); return (inw(sc->bdp)); } static __inline u_long ether_crc(const u_char *ether_addr) { #define POLYNOMIAL 0xEDB88320UL u_char i, j, addr; u_int crc = 0xFFFFFFFFUL; for (i = 0; i < ETHER_ADDR_LEN; i++) { addr = *ether_addr++; for (j = 0; j < MULTICAST_FILTER_LEN; j++) { crc = (crc >> 1) ^ (((crc ^ addr) & 1) ? POLYNOMIAL : 0); addr >>= 1; } } return crc; #undef POLYNOMIAL } void lnc_release_resources(device_t dev) { lnc_softc_t *sc = device_get_softc(dev); if (sc->irqres) { bus_teardown_intr(dev, sc->irqres, sc->intrhand); bus_release_resource(dev, SYS_RES_IRQ, sc->irqrid, sc->irqres); } if (sc->portres) bus_release_resource(dev, SYS_RES_IOPORT, sc->portrid, sc->portres); if (sc->drqres) bus_release_resource(dev, SYS_RES_DRQ, sc->drqrid, sc->drqres); if (sc->dmat) { if (sc->dmamap) { bus_dmamap_unload(sc->dmat, sc->dmamap); bus_dmamem_free(sc->dmat, sc->recv_ring, sc->dmamap); } bus_dma_tag_destroy(sc->dmat); } } /* * Set up the logical address filter for multicast packets */ static __inline void lnc_setladrf(struct lnc_softc *sc) { struct ifnet *ifp = &sc->arpcom.ac_if; struct ifmultiaddr *ifma; u_long index; int i; if (sc->flags & IFF_ALLMULTI) { for (i=0; i < MULTICAST_FILTER_LEN; i++) sc->init_block->ladrf[i] = 0xFF; return; } /* * For each multicast address, calculate a crc for that address and * then use the high order 6 bits of the crc as a hash code where * bits 3-5 select the byte of the address filter and bits 0-2 select * the bit within that byte. */ bzero(sc->init_block->ladrf, MULTICAST_FILTER_LEN); - for (ifma = ifp->if_multiaddrs.lh_first; ifma; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; index = ether_crc(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)) >> 26; sc->init_block->ladrf[index >> 3] |= 1 << (index & 7); } } void lnc_stop(struct lnc_softc *sc) { write_csr(sc, CSR0, STOP); } static void lnc_reset(struct lnc_softc *sc) { lnc_init(sc); } static void lnc_free_mbufs(struct lnc_softc *sc) { int i; /* * We rely on other routines to keep the buff.mbuf field valid. If * it's not NULL then we assume it points to an allocated mbuf. */ for (i = 0; i < NDESC(sc->nrdre); i++) if ((sc->recv_ring + i)->buff.mbuf) m_free((sc->recv_ring + i)->buff.mbuf); for (i = 0; i < NDESC(sc->ntdre); i++) if ((sc->trans_ring + i)->buff.mbuf) m_free((sc->trans_ring + i)->buff.mbuf); if (sc->mbuf_count) m_freem(sc->mbufs); } static __inline int alloc_mbuf_cluster(struct lnc_softc *sc, struct host_ring_entry *desc) { register struct mds *md = desc->md; struct mbuf *m=0; int addr; /* Try and get cluster off local cache */ if (sc->mbuf_count) { sc->mbuf_count--; m = sc->mbufs; sc->mbufs = m->m_next; /* XXX m->m_data = m->m_ext.ext_buf;*/ } else { MGET(m, M_DONTWAIT, MT_DATA); if (!m) return(1); MCLGET(m, M_DONTWAIT); if (!m->m_ext.ext_buf) { m_free(m); return(1); } } desc->buff.mbuf = m; addr = kvtop(m->m_data); md->md0 = addr; md->md1= ((addr >> 16) & 0xff) | OWN; md->md2 = -(short)(MCLBYTES - sizeof(struct pkthdr)); md->md3 = 0; return(0); } static __inline struct mbuf * chain_mbufs(struct lnc_softc *sc, int start_of_packet, int pkt_len) { struct mbuf *head, *m; struct host_ring_entry *desc; /* * Turn head into a pkthdr mbuf -- * assumes a pkthdr type mbuf was * allocated to the descriptor * originally. */ desc = sc->recv_ring + start_of_packet; head = desc->buff.mbuf; head->m_flags |= M_PKTHDR; bzero(&head->m_pkthdr, sizeof(head->m_pkthdr)); m = head; do { m = desc->buff.mbuf; m->m_len = min((MCLBYTES - sizeof(struct pkthdr)), pkt_len); pkt_len -= m->m_len; if (alloc_mbuf_cluster(sc, desc)) return((struct mbuf *)NULL); INC_MD_PTR(start_of_packet, sc->nrdre) desc = sc->recv_ring + start_of_packet; m->m_next = desc->buff.mbuf; } while (start_of_packet != sc->recv_next); m->m_next = 0; return(head); } static __inline struct mbuf * mbuf_packet(struct lnc_softc *sc, int start_of_packet, int pkt_len) { struct host_ring_entry *start; struct mbuf *head,*m,*m_prev; char *data,*mbuf_data; short blen; int amount; /* Get a pkthdr mbuf for the start of packet */ MGETHDR(head, M_DONTWAIT, MT_DATA); if (!head) { LNCSTATS(drop_packet) return(0); } m = head; m->m_len = 0; start = sc->recv_ring + start_of_packet; /*blen = -(start->md->md2);*/ blen = RECVBUFSIZE; /* XXX More PCnet-32 crap */ data = start->buff.data; mbuf_data = m->m_data; while (start_of_packet != sc->recv_next) { /* * If the data left fits in a single buffer then set * blen to the size of the data left. */ if (pkt_len < blen) blen = pkt_len; /* * amount is least of data in current ring buffer and * amount of space left in current mbuf. */ amount = min(blen, M_TRAILINGSPACE(m)); if (amount == 0) { /* mbuf must be empty */ m_prev = m; MGET(m, M_DONTWAIT, MT_DATA); if (!m) { m_freem(head); return(0); } if (pkt_len >= MINCLSIZE) MCLGET(m, M_DONTWAIT); m->m_len = 0; m_prev->m_next = m; amount = min(blen, M_TRAILINGSPACE(m)); mbuf_data = m->m_data; } bcopy(data, mbuf_data, amount); blen -= amount; pkt_len -= amount; m->m_len += amount; data += amount; mbuf_data += amount; if (blen == 0) { start->md->md1 &= HADR; start->md->md1 |= OWN; start->md->md2 = -RECVBUFSIZE; /* XXX - shouldn't be necessary */ INC_MD_PTR(start_of_packet, sc->nrdre) start = sc->recv_ring + start_of_packet; data = start->buff.data; /*blen = -(start->md->md2);*/ blen = RECVBUFSIZE; /* XXX More PCnet-32 crap */ } } return(head); } static __inline void lnc_rint(struct lnc_softc *sc) { struct host_ring_entry *next, *start; int start_of_packet; struct mbuf *head; struct ether_header *eh; int lookahead; int flags; int pkt_len; /* * The LANCE will issue a RINT interrupt when the ownership of the * last buffer of a receive packet has been relinquished by the LANCE. * Therefore, it can be assumed that a complete packet can be found * before hitting buffers that are still owned by the LANCE, if not * then there is a bug in the driver that is causing the descriptors * to get out of sync. */ #ifdef DIAGNOSTIC if ((sc->recv_ring + sc->recv_next)->md->md1 & OWN) { int unit = sc->arpcom.ac_if.if_unit; log(LOG_ERR, "lnc%d: Receive interrupt with buffer still owned by controller -- Resetting\n", unit); lnc_reset(sc); return; } if (!((sc->recv_ring + sc->recv_next)->md->md1 & STP)) { int unit = sc->arpcom.ac_if.if_unit; log(LOG_ERR, "lnc%d: Receive interrupt but not start of packet -- Resetting\n", unit); lnc_reset(sc); return; } #endif lookahead = 0; next = sc->recv_ring + sc->recv_next; while ((flags = next->md->md1) & STP) { /* Make a note of the start of the packet */ start_of_packet = sc->recv_next; /* * Find the end of the packet. Even if not data chaining, * jabber packets can overrun into a second descriptor. * If there is no error, then the ENP flag is set in the last * descriptor of the packet. If there is an error then the ERR * flag will be set in the descriptor where the error occured. * Therefore, to find the last buffer of a packet we search for * either ERR or ENP. */ if (!(flags & (ENP | MDERR))) { do { INC_MD_PTR(sc->recv_next, sc->nrdre) next = sc->recv_ring + sc->recv_next; flags = next->md->md1; } while (!(flags & (STP | OWN | ENP | MDERR))); if (flags & STP) { int unit = sc->arpcom.ac_if.if_unit; log(LOG_ERR, "lnc%d: Start of packet found before end of previous in receive ring -- Resetting\n", unit); lnc_reset(sc); return; } if (flags & OWN) { if (lookahead) { /* * Looked ahead into a packet still * being received */ sc->recv_next = start_of_packet; break; } else { int unit = sc->arpcom.ac_if.if_unit; log(LOG_ERR, "lnc%d: End of received packet not found-- Resetting\n", unit); lnc_reset(sc); return; } } } pkt_len = (next->md->md3 & MCNT) - FCS_LEN; /* Move pointer onto start of next packet */ INC_MD_PTR(sc->recv_next, sc->nrdre) next = sc->recv_ring + sc->recv_next; if (flags & MDERR) { int unit = sc->arpcom.ac_if.if_unit; if (flags & RBUFF) { LNCSTATS(rbuff) log(LOG_ERR, "lnc%d: Receive buffer error\n", unit); } if (flags & OFLO) { /* OFLO only valid if ENP is not set */ if (!(flags & ENP)) { LNCSTATS(oflo) log(LOG_ERR, "lnc%d: Receive overflow error \n", unit); } } else if (flags & ENP) { if ((sc->arpcom.ac_if.if_flags & IFF_PROMISC)==0) { /* * FRAM and CRC are valid only if ENP * is set and OFLO is not. */ if (flags & FRAM) { LNCSTATS(fram) log(LOG_ERR, "lnc%d: Framing error\n", unit); /* * FRAM is only set if there's a CRC * error so avoid multiple messages */ } else if (flags & CRC) { LNCSTATS(crc) log(LOG_ERR, "lnc%d: Receive CRC error\n", unit); } } } /* Drop packet */ LNCSTATS(rerr) sc->arpcom.ac_if.if_ierrors++; while (start_of_packet != sc->recv_next) { start = sc->recv_ring + start_of_packet; start->md->md2 = -RECVBUFSIZE; /* XXX - shouldn't be necessary */ start->md->md1 &= HADR; start->md->md1 |= OWN; INC_MD_PTR(start_of_packet, sc->nrdre) } } else { /* Valid packet */ sc->arpcom.ac_if.if_ipackets++; if (sc->nic.mem_mode == DMA_MBUF) head = chain_mbufs(sc, start_of_packet, pkt_len); else head = mbuf_packet(sc, start_of_packet, pkt_len); if (head) { /* * First mbuf in packet holds the * ethernet and packet headers */ head->m_pkthdr.rcvif = &sc->arpcom.ac_if; head->m_pkthdr.len = pkt_len ; eh = (struct ether_header *) head->m_data; /* * vmware ethernet hardware emulation loops * packets back to itself, violates IFF_SIMPLEX. * drop it if it is from myself. */ if (bcmp(eh->ether_shost, sc->arpcom.ac_enaddr, ETHER_ADDR_LEN) == 0) { m_freem(head); } else { /* Skip over the ether header */ head->m_data += sizeof *eh; head->m_len -= sizeof *eh; head->m_pkthdr.len -= sizeof *eh; ether_input(&sc->arpcom.ac_if, eh, head); } } else { int unit = sc->arpcom.ac_if.if_unit; log(LOG_ERR,"lnc%d: Packet dropped, no mbufs\n",unit); LNCSTATS(drop_packet) } } lookahead++; } /* * At this point all completely received packets have been processed * so clear RINT since any packets that have arrived while we were in * here have been dealt with. */ outw(sc->rdp, RINT | INEA); } static __inline void lnc_tint(struct lnc_softc *sc) { struct host_ring_entry *next, *start; int start_of_packet; int lookahead; /* * If the driver is reset in this routine then we return immediately to * the interrupt driver routine. Any interrupts that have occured * since the reset will be dealt with there. sc->trans_next * should point to the start of the first packet that was awaiting * transmission after the last transmit interrupt was dealt with. The * LANCE should have relinquished ownership of that descriptor before * the interrupt. Therefore, sc->trans_next should point to a * descriptor with STP set and OWN cleared. If not then the driver's * pointers are out of sync with the LANCE, which signifies a bug in * the driver. Therefore, the following two checks are really * diagnostic, since if the driver is working correctly they should * never happen. */ #ifdef DIAGNOSTIC if ((sc->trans_ring + sc->trans_next)->md->md1 & OWN) { int unit = sc->arpcom.ac_if.if_unit; log(LOG_ERR, "lnc%d: Transmit interrupt with buffer still owned by controller -- Resetting\n", unit); lnc_reset(sc); return; } #endif /* * The LANCE will write the status information for the packet it just * tried to transmit in one of two places. If the packet was * transmitted successfully then the status will be written into the * last descriptor of the packet. If the transmit failed then the * status will be written into the descriptor that was being accessed * when the error occured and all subsequent descriptors in that * packet will have been relinquished by the LANCE. * * At this point we know that sc->trans_next points to the start * of a packet that the LANCE has just finished trying to transmit. * We now search for a buffer with either ENP or ERR set. */ lookahead = 0; do { start_of_packet = sc->trans_next; next = sc->trans_ring + sc->trans_next; #ifdef DIAGNOSTIC if (!(next->md->md1 & STP)) { int unit = sc->arpcom.ac_if.if_unit; log(LOG_ERR, "lnc%d: Transmit interrupt but not start of packet -- Resetting\n", unit); lnc_reset(sc); return; } #endif /* * Find end of packet. */ if (!(next->md->md1 & (ENP | MDERR))) { do { INC_MD_PTR(sc->trans_next, sc->ntdre) next = sc->trans_ring + sc->trans_next; } while (!(next->md->md1 & (STP | OWN | ENP | MDERR))); if (next->md->md1 & STP) { int unit = sc->arpcom.ac_if.if_unit; log(LOG_ERR, "lnc%d: Start of packet found before end of previous in transmit ring -- Resetting\n", unit); lnc_reset(sc); return; } if (next->md->md1 & OWN) { if (lookahead) { /* * Looked ahead into a packet still * being transmitted */ sc->trans_next = start_of_packet; break; } else { int unit = sc->arpcom.ac_if.if_unit; log(LOG_ERR, "lnc%d: End of transmitted packet not found -- Resetting\n", unit); lnc_reset(sc); return; } } } /* * Check for ERR first since other flags are irrelevant if an * error occurred. */ if (next->md->md1 & MDERR) { int unit = sc->arpcom.ac_if.if_unit; LNCSTATS(terr) sc->arpcom.ac_if.if_oerrors++; if (next->md->md3 & LCOL) { LNCSTATS(lcol) log(LOG_ERR, "lnc%d: Transmit late collision -- Net error?\n", unit); sc->arpcom.ac_if.if_collisions++; /* * Clear TBUFF since it's not valid when LCOL * set */ next->md->md3 &= ~TBUFF; } if (next->md->md3 & LCAR) { LNCSTATS(lcar) log(LOG_ERR, "lnc%d: Loss of carrier during transmit -- Net error?\n", unit); } if (next->md->md3 & RTRY) { LNCSTATS(rtry) log(LOG_ERR, "lnc%d: Transmit of packet failed after 16 attempts -- TDR = %d\n", unit, ((sc->trans_ring + sc->trans_next)->md->md3 & TDR)); sc->arpcom.ac_if.if_collisions += 16; /* * Clear TBUFF since it's not valid when RTRY * set */ next->md->md3 &= ~TBUFF; } /* * TBUFF is only valid if neither LCOL nor RTRY are set. * We need to check UFLO after LCOL and RTRY so that we * know whether or not TBUFF is valid. If either are * set then TBUFF will have been cleared above. A * UFLO error will turn off the transmitter so we * have to reset. * */ if (next->md->md3 & UFLO) { LNCSTATS(uflo) /* * If an UFLO has occured it's possibly due * to a TBUFF error */ if (next->md->md3 & TBUFF) { LNCSTATS(tbuff) log(LOG_ERR, "lnc%d: Transmit buffer error -- Resetting\n", unit); } else log(LOG_ERR, "lnc%d: Transmit underflow error -- Resetting\n", unit); lnc_reset(sc); return; } do { INC_MD_PTR(sc->trans_next, sc->ntdre) next = sc->trans_ring + sc->trans_next; } while (!(next->md->md1 & STP) && (sc->trans_next != sc->next_to_send)); } else { /* * Since we check for ERR first then if we get here * the packet was transmitted correctly. There may * still have been non-fatal errors though. * Don't bother checking for DEF, waste of time. */ sc->arpcom.ac_if.if_opackets++; if (next->md->md1 & MORE) { LNCSTATS(more) sc->arpcom.ac_if.if_collisions += 2; } /* * ONE is invalid if LCOL is set. If LCOL was set then * ERR would have also been set and we would have * returned from lnc_tint above. Therefore we can * assume if we arrive here that ONE is valid. * */ if (next->md->md1 & ONE) { LNCSTATS(one) sc->arpcom.ac_if.if_collisions++; } INC_MD_PTR(sc->trans_next, sc->ntdre) next = sc->trans_ring + sc->trans_next; } /* * Clear descriptors and free any mbufs. */ do { start = sc->trans_ring + start_of_packet; start->md->md1 &= HADR; if (sc->nic.mem_mode == DMA_MBUF) { /* Cache clusters on a local queue */ if ((start->buff.mbuf->m_flags & M_EXT) && (sc->mbuf_count < MBUF_CACHE_LIMIT)) { if (sc->mbuf_count) { start->buff.mbuf->m_next = sc->mbufs; sc->mbufs = start->buff.mbuf; } else sc->mbufs = start->buff.mbuf; sc->mbuf_count++; start->buff.mbuf = 0; } else { struct mbuf *junk; MFREE(start->buff.mbuf, junk); start->buff.mbuf = 0; } } sc->pending_transmits--; INC_MD_PTR(start_of_packet, sc->ntdre) }while (start_of_packet != sc->trans_next); /* * There's now at least one free descriptor * in the ring so indicate that we can accept * more packets again. */ sc->arpcom.ac_if.if_flags &= ~IFF_OACTIVE; lookahead++; } while (sc->pending_transmits && !(next->md->md1 & OWN)); /* * Clear TINT since we've dealt with all * the completed transmissions. */ outw(sc->rdp, TINT | INEA); } int lnc_attach_common(device_t dev) { int unit = device_get_unit(dev); lnc_softc_t *sc = device_get_softc(dev); int i; int skip; if (sc->nic.ident == BICC) { skip = 2; } else { skip = 1; } /* Set default mode */ sc->nic.mode = NORMAL; /* Fill in arpcom structure entries */ sc->arpcom.ac_if.if_softc = sc; sc->arpcom.ac_if.if_name = "lnc"; sc->arpcom.ac_if.if_unit = unit; sc->arpcom.ac_if.if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; sc->arpcom.ac_if.if_timer = 0; sc->arpcom.ac_if.if_output = ether_output; sc->arpcom.ac_if.if_start = lnc_start; sc->arpcom.ac_if.if_ioctl = lnc_ioctl; sc->arpcom.ac_if.if_watchdog = lnc_watchdog; sc->arpcom.ac_if.if_init = lnc_init; sc->arpcom.ac_if.if_snd.ifq_maxlen = IFQ_MAXLEN; /* Extract MAC address from PROM */ for (i = 0; i < ETHER_ADDR_LEN; i++) sc->arpcom.ac_enaddr[i] = inb(i * skip); /* * XXX -- should check return status of if_attach */ ether_ifattach(&sc->arpcom.ac_if, ETHER_BPF_SUPPORTED); printf("lnc%d: ", unit); if (sc->nic.ic == LANCE || sc->nic.ic == C_LANCE) printf("%s (%s)", nic_ident[sc->nic.ident], ic_ident[sc->nic.ic]); else printf("%s", ic_ident[sc->nic.ic]); printf(" address %6D\n", sc->arpcom.ac_enaddr, ":"); return (1); } static void lnc_init(xsc) void *xsc; { struct lnc_softc *sc = xsc; int s, i; char *lnc_mem; /* Check that interface has valid address */ if (TAILQ_EMPTY(&sc->arpcom.ac_if.if_addrhead)) { /* XXX unlikely */ printf("XXX no address?\n"); return; } /* Shut down interface */ s = splimp(); lnc_stop(sc); sc->arpcom.ac_if.if_flags |= IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; /* XXX??? */ /* * This sets up the memory area for the controller. Memory is set up for * the initialisation block (12 words of contiguous memory starting * on a word boundary),the transmit and receive ring structures (each * entry is 4 words long and must start on a quadword boundary) and * the data buffers. * * The alignment tests are particularly paranoid. */ sc->recv_next = 0; sc->trans_ring = sc->recv_ring + NDESC(sc->nrdre); sc->trans_next = 0; if (sc->nic.mem_mode == SHMEM) lnc_mem = (char *) sc->nic.iobase; else lnc_mem = (char *) (sc->trans_ring + NDESC(sc->ntdre)); lnc_mem = (char *)(((int)lnc_mem + 1) & ~1); sc->init_block = (struct init_block *) ((int) lnc_mem & ~1); lnc_mem = (char *) (sc->init_block + 1); lnc_mem = (char *)(((int)lnc_mem + 7) & ~7); /* Initialise pointers to descriptor entries */ for (i = 0; i < NDESC(sc->nrdre); i++) { (sc->recv_ring + i)->md = (struct mds *) lnc_mem; lnc_mem += sizeof(struct mds); } for (i = 0; i < NDESC(sc->ntdre); i++) { (sc->trans_ring + i)->md = (struct mds *) lnc_mem; lnc_mem += sizeof(struct mds); } /* Initialise the remaining ring entries */ if (sc->nic.mem_mode == DMA_MBUF) { sc->mbufs = 0; sc->mbuf_count = 0; /* Free previously allocated mbufs */ if (sc->flags & LNC_INITIALISED) lnc_free_mbufs(sc); for (i = 0; i < NDESC(sc->nrdre); i++) { if (alloc_mbuf_cluster(sc, sc->recv_ring+i)) { log(LOG_ERR, "Initialisation failed -- no mbufs\n"); splx(s); return; } } for (i = 0; i < NDESC(sc->ntdre); i++) { (sc->trans_ring + i)->buff.mbuf = 0; (sc->trans_ring + i)->md->md0 = 0; (sc->trans_ring + i)->md->md1 = 0; (sc->trans_ring + i)->md->md2 = 0; (sc->trans_ring + i)->md->md3 = 0; } } else { for (i = 0; i < NDESC(sc->nrdre); i++) { (sc->recv_ring + i)->md->md0 = kvtop(lnc_mem); (sc->recv_ring + i)->md->md1 = ((kvtop(lnc_mem) >> 16) & 0xff) | OWN; (sc->recv_ring + i)->md->md2 = -RECVBUFSIZE; (sc->recv_ring + i)->md->md3 = 0; (sc->recv_ring + i)->buff.data = lnc_mem; lnc_mem += RECVBUFSIZE; } for (i = 0; i < NDESC(sc->ntdre); i++) { (sc->trans_ring + i)->md->md0 = kvtop(lnc_mem); (sc->trans_ring + i)->md->md1 = ((kvtop(lnc_mem) >> 16) & 0xff); (sc->trans_ring + i)->md->md2 = 0; (sc->trans_ring + i)->md->md3 = 0; (sc->trans_ring + i)->buff.data = lnc_mem; lnc_mem += TRANSBUFSIZE; } } sc->next_to_send = 0; /* Set up initialisation block */ sc->init_block->mode = sc->nic.mode; for (i = 0; i < ETHER_ADDR_LEN; i++) sc->init_block->padr[i] = sc->arpcom.ac_enaddr[i]; lnc_setladrf(sc); sc->init_block->rdra = kvtop(sc->recv_ring->md); sc->init_block->rlen = ((kvtop(sc->recv_ring->md) >> 16) & 0xff) | (sc->nrdre << 13); sc->init_block->tdra = kvtop(sc->trans_ring->md); sc->init_block->tlen = ((kvtop(sc->trans_ring->md) >> 16) & 0xff) | (sc->ntdre << 13); /* Set flags to show that the memory area is valid */ sc->flags |= LNC_INITIALISED; sc->pending_transmits = 0; /* Give the LANCE the physical address of the initialisation block */ if (sc->nic.ic == PCnet_Home) { u_short media; /* Set PHY_SEL to HomeRun */ media = read_bcr(sc, BCR49); media &= ~3; media |= 1; write_bcr(sc, BCR49, media); } write_csr(sc, CSR1, kvtop(sc->init_block)); write_csr(sc, CSR2, (kvtop(sc->init_block) >> 16) & 0xff); /* * Depending on which controller this is, CSR3 has different meanings. * For the Am7990 it controls DMA operations, for the Am79C960 it * controls interrupt masks and transmitter algorithms. In either * case, none of the flags are set. * */ write_csr(sc, CSR3, 0); /* Let's see if it starts */ /* printf("Enabling lnc interrupts\n"); sc->arpcom.ac_if.if_timer = 10; write_csr(sc, CSR0, INIT|INEA); */ /* * Now that the initialisation is complete there's no reason to * access anything except CSR0, so we leave RAP pointing there * so we can just access RDP from now on, saving an outw each * time. */ write_csr(sc, CSR0, INIT); for(i=0; i < 1000; i++) if (read_csr(sc, CSR0) & IDON) break; if (read_csr(sc, CSR0) & IDON) { /* * Enable interrupts, start the LANCE, mark the interface as * running and transmit any pending packets. */ write_csr(sc, CSR0, STRT | INEA); sc->arpcom.ac_if.if_flags |= IFF_RUNNING; sc->arpcom.ac_if.if_flags &= ~IFF_OACTIVE; lnc_start(&sc->arpcom.ac_if); } else log(LOG_ERR, "lnc%d: Initialisation failed\n", sc->arpcom.ac_if.if_unit); splx(s); } /* * The interrupt flag (INTR) will be set and provided that the interrupt enable * flag (INEA) is also set, the interrupt pin will be driven low when any of * the following occur: * * 1) Completion of the initialisation routine (IDON). 2) The reception of a * packet (RINT). 3) The transmission of a packet (TINT). 4) A transmitter * timeout error (BABL). 5) A missed packet (MISS). 6) A memory error (MERR). * * The interrupt flag is cleared when all of the above conditions are cleared. * * If the driver is reset from this routine then it first checks to see if any * interrupts have ocurred since the reset and handles them before returning. * This is because the NIC may signify a pending interrupt in CSR0 using the * INTR flag even if a hardware interrupt is currently inhibited (at least I * think it does from reading the data sheets). We may as well deal with * these pending interrupts now rather than get the overhead of another * hardware interrupt immediately upon returning from the interrupt handler. * */ void lncintr(void *arg) { lnc_softc_t *sc = arg; int unit = sc->arpcom.ac_if.if_unit; u_short csr0; /* * INEA is the only bit that can be cleared by writing a 0 to it so * we have to include it in any writes that clear other flags. */ while ((csr0 = inw(sc->rdp)) & INTR) { /* * Clear interrupt flags early to avoid race conditions. The * controller can still set these flags even while we're in * this interrupt routine. If the flag is still set from the * event that caused this interrupt any new events will * be missed. */ outw(sc->rdp, csr0); /*outw(sc->rdp, IDON | CERR | BABL | MISS | MERR | RINT | TINT | INEA);*/ #ifdef notyet if (csr0 & IDON) { printf("IDON\n"); sc->arpcom.ac_if.if_timer = 0; write_csr(sc, CSR0, STRT | INEA); sc->arpcom.ac_if.if_flags |= IFF_RUNNING; sc->arpcom.ac_if.if_flags &= ~IFF_OACTIVE; lnc_start(&sc->arpcom.ac_if); continue; } #endif if (csr0 & ERR) { if (csr0 & CERR) { log(LOG_ERR, "lnc%d: Heartbeat error -- SQE test failed\n", unit); LNCSTATS(cerr) } if (csr0 & BABL) { log(LOG_ERR, "lnc%d: Babble error - more than 1519 bytes transmitted\n", unit); LNCSTATS(babl) sc->arpcom.ac_if.if_oerrors++; } if (csr0 & MISS) { log(LOG_ERR, "lnc%d: Missed packet -- no receive buffer\n", unit); LNCSTATS(miss) sc->arpcom.ac_if.if_ierrors++; } if (csr0 & MERR) { log(LOG_ERR, "lnc%d: Memory error -- Resetting\n", unit); LNCSTATS(merr) lnc_reset(sc); continue; } } if (csr0 & RINT) { LNCSTATS(rint) lnc_rint(sc); } if (csr0 & TINT) { LNCSTATS(tint) sc->arpcom.ac_if.if_timer = 0; lnc_tint(sc); } /* * If there's room in the transmit descriptor ring then queue * some more transmit packets. */ if (!(sc->arpcom.ac_if.if_flags & IFF_OACTIVE)) lnc_start(&sc->arpcom.ac_if); } } static __inline int mbuf_to_buffer(struct mbuf *m, char *buffer) { int len=0; for( ; m; m = m->m_next) { bcopy(mtod(m, caddr_t), buffer, m->m_len); buffer += m->m_len; len += m->m_len; } return(len); } static __inline struct mbuf * chain_to_cluster(struct mbuf *m) { struct mbuf *new; MGET(new, M_DONTWAIT, MT_DATA); if (new) { MCLGET(new, M_DONTWAIT); if (new->m_ext.ext_buf) { new->m_len = mbuf_to_buffer(m, new->m_data); m_freem(m); return(new); } else m_free(new); } return(0); } /* * IFF_OACTIVE and IFF_RUNNING are checked in ether_output so it's redundant * to check them again since we wouldn't have got here if they were not * appropriately set. This is also called from lnc_init and lncintr but the * flags should be ok at those points too. */ static void lnc_start(struct ifnet *ifp) { struct lnc_softc *sc = ifp->if_softc; struct host_ring_entry *desc; int tmp; int end_of_packet; struct mbuf *head, *m; int len, chunk; int addr; int no_entries_needed; do { IF_DEQUEUE(&sc->arpcom.ac_if.if_snd, head); if (!head) return; if (sc->nic.mem_mode == DMA_MBUF) { no_entries_needed = 0; for (m=head; m; m = m->m_next) no_entries_needed++; /* * We try and avoid bcopy as much as possible * but there are two cases when we use it. * * 1) If there are not enough free entries in the ring * to hold each mbuf in the chain then compact the * chain into a single cluster. * * 2) The Am7990 and Am79C90 must not have less than * 100 bytes in the first descriptor of a chained * packet so it's necessary to shuffle the mbuf * contents to ensure this. */ if (no_entries_needed > (NDESC(sc->ntdre) - sc->pending_transmits)) { if (!(head = chain_to_cluster(head))) { log(LOG_ERR, "lnc%d: Couldn't get mbuf for transmit packet -- Resetting \n ",ifp->if_unit); lnc_reset(sc); return; } } else if ((sc->nic.ic == LANCE) || (sc->nic.ic == C_LANCE)) { if ((head->m_len < 100) && (head->m_next)) { len = 100 - head->m_len; if (M_TRAILINGSPACE(head) < len) { /* * Move data to start of data * area. We assume the first * mbuf has a packet header * and is not a cluster. */ bcopy((caddr_t)head->m_data, (caddr_t)head->m_pktdat, head->m_len); head->m_data = head->m_pktdat; } m = head->m_next; while (m && (len > 0)) { chunk = min(len, m->m_len); bcopy(mtod(m, caddr_t), mtod(head, caddr_t) + head->m_len, chunk); len -= chunk; head->m_len += chunk; m->m_len -= chunk; m->m_data += chunk; if (m->m_len <= 0) { MFREE(m, head->m_next); m = head->m_next; } } } } tmp = sc->next_to_send; /* * On entering this loop we know that tmp points to a * descriptor with a clear OWN bit. */ desc = sc->trans_ring + tmp; len = ETHER_MIN_LEN; for (m = head; m; m = m->m_next) { desc->buff.mbuf = m; addr = kvtop(m->m_data); desc->md->md0 = addr; desc->md->md1 = ((addr >> 16) & 0xff); desc->md->md3 = 0; desc->md->md2 = -m->m_len; sc->pending_transmits++; len -= m->m_len; INC_MD_PTR(tmp, sc->ntdre) desc = sc->trans_ring + tmp; } end_of_packet = tmp; DEC_MD_PTR(tmp, sc->ntdre) desc = sc->trans_ring + tmp; desc->md->md1 |= ENP; if (len > 0) desc->md->md2 -= len; /* * Set OWN bits in reverse order, otherwise the Lance * could start sending the packet before all the * buffers have been relinquished by the host. */ while (tmp != sc->next_to_send) { desc->md->md1 |= OWN; DEC_MD_PTR(tmp, sc->ntdre) desc = sc->trans_ring + tmp; } sc->next_to_send = end_of_packet; desc->md->md1 |= STP | OWN; } else { sc->pending_transmits++; desc = sc->trans_ring + sc->next_to_send; len = mbuf_to_buffer(head, desc->buff.data); desc->md->md3 = 0; desc->md->md2 = -max(len, ETHER_MIN_LEN - ETHER_CRC_LEN); desc->md->md1 |= OWN | STP | ENP; INC_MD_PTR(sc->next_to_send, sc->ntdre) } /* Force an immediate poll of the transmit ring */ outw(sc->rdp, TDMD | INEA); /* * Set a timer so if the buggy Am7990.h shuts * down we can wake it up. */ ifp->if_timer = 2; if (sc->arpcom.ac_if.if_bpf) bpf_mtap(&sc->arpcom.ac_if, head); if (sc->nic.mem_mode != DMA_MBUF) m_freem(head); } while (sc->pending_transmits < NDESC(sc->ntdre)); /* * Transmit ring is full so set IFF_OACTIVE * since we can't buffer any more packets. */ sc->arpcom.ac_if.if_flags |= IFF_OACTIVE; LNCSTATS(trans_ring_full) } static int lnc_ioctl(struct ifnet * ifp, u_long command, caddr_t data) { struct lnc_softc *sc = ifp->if_softc; int s, error = 0; s = splimp(); switch (command) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: error = ether_ioctl(ifp, command, data); break; case SIOCSIFFLAGS: #ifdef DEBUG if (ifp->if_flags & IFF_DEBUG) sc->lnc_debug = 1; else sc->lnc_debug = 0; #endif if (ifp->if_flags & IFF_PROMISC) { if (!(sc->nic.mode & PROM)) { sc->nic.mode |= PROM; lnc_init(sc); } } else if (sc->nic.mode & PROM) { sc->nic.mode &= ~PROM; lnc_init(sc); } if ((ifp->if_flags & IFF_ALLMULTI) && !(sc->flags & LNC_ALLMULTI)) { sc->flags |= LNC_ALLMULTI; lnc_init(sc); } else if (!(ifp->if_flags & IFF_ALLMULTI) && (sc->flags & LNC_ALLMULTI)) { sc->flags &= ~LNC_ALLMULTI; lnc_init(sc); } if ((ifp->if_flags & IFF_UP) == 0 && (ifp->if_flags & IFF_RUNNING) != 0) { /* * If interface is marked down and it is running, * then stop it. */ lnc_stop(sc); ifp->if_flags &= ~IFF_RUNNING; } else if ((ifp->if_flags & IFF_UP) != 0 && (ifp->if_flags & IFF_RUNNING) == 0) { /* * If interface is marked up and it is stopped, then * start it. */ lnc_init(sc); } break; case SIOCADDMULTI: case SIOCDELMULTI: lnc_init(sc); error = 0; break; default: error = EINVAL; } (void) splx(s); return error; } static void lnc_watchdog(struct ifnet *ifp) { log(LOG_ERR, "lnc%d: Device timeout -- Resetting\n", ifp->if_unit); ifp->if_oerrors++; lnc_reset(ifp->if_softc); } #ifdef DEBUG void lnc_dump_state(struct lnc_softc *sc) { int i; printf("\nDriver/NIC [%d] state dump\n", sc->arpcom.ac_if.if_unit); printf("Memory access mode: %b\n", sc->nic.mem_mode, MEM_MODES); printf("Host memory\n"); printf("-----------\n"); printf("Receive ring: base = %p, next = %p\n", (void *)sc->recv_ring, (void *)(sc->recv_ring + sc->recv_next)); for (i = 0; i < NDESC(sc->nrdre); i++) printf("\t%d:%p md = %p buff = %p\n", i, (void *)(sc->recv_ring + i), (void *)(sc->recv_ring + i)->md, (void *)(sc->recv_ring + i)->buff.data); printf("Transmit ring: base = %p, next = %p\n", (void *)sc->trans_ring, (void *)(sc->trans_ring + sc->trans_next)); for (i = 0; i < NDESC(sc->ntdre); i++) printf("\t%d:%p md = %p buff = %p\n", i, (void *)(sc->trans_ring + i), (void *)(sc->trans_ring + i)->md, (void *)(sc->trans_ring + i)->buff.data); printf("Lance memory (may be on host(DMA) or card(SHMEM))\n"); printf("Init block = %p\n", (void *)sc->init_block); printf("\tmode = %b rlen:rdra = %x:%x tlen:tdra = %x:%x\n", sc->init_block->mode, INIT_MODE, sc->init_block->rlen, sc->init_block->rdra, sc->init_block->tlen, sc->init_block->tdra); printf("Receive descriptor ring\n"); for (i = 0; i < NDESC(sc->nrdre); i++) printf("\t%d buffer = 0x%x%x, BCNT = %d,\tMCNT = %u,\tflags = %b\n", i, ((sc->recv_ring + i)->md->md1 & HADR), (sc->recv_ring + i)->md->md0, -(short) (sc->recv_ring + i)->md->md2, (sc->recv_ring + i)->md->md3, (((sc->recv_ring + i)->md->md1 & ~HADR) >> 8), RECV_MD1); printf("Transmit descriptor ring\n"); for (i = 0; i < NDESC(sc->ntdre); i++) printf("\t%d buffer = 0x%x%x, BCNT = %d,\tflags = %b %b\n", i, ((sc->trans_ring + i)->md->md1 & HADR), (sc->trans_ring + i)->md->md0, -(short) (sc->trans_ring + i)->md->md2, ((sc->trans_ring + i)->md->md1 >> 8), TRANS_MD1, ((sc->trans_ring + i)->md->md3 >> 10), TRANS_MD3); printf("\nnext_to_send = %x\n", sc->next_to_send); printf("\n CSR0 = %b CSR1 = %x CSR2 = %x CSR3 = %x\n\n", read_csr(sc, CSR0), CSR0_FLAGS, read_csr(sc, CSR1), read_csr(sc, CSR2), read_csr(sc, CSR3)); /* Set RAP back to CSR0 */ outw(sc->rap, CSR0); } void mbuf_dump_chain(struct mbuf * m) { #define MBUF_FLAGS \ "\20\1M_EXT\2M_PKTHDR\3M_EOR\4UNKNOWN\5M_BCAST\6M_MCAST" if (!m) log(LOG_DEBUG, "m == NULL\n"); do { log(LOG_DEBUG, "m = %p\n", (void *)m); log(LOG_DEBUG, "m_hdr.mh_next = %p\n", (void *)m->m_hdr.mh_next); log(LOG_DEBUG, "m_hdr.mh_nextpkt = %p\n", (void *)m->m_hdr.mh_nextpkt); log(LOG_DEBUG, "m_hdr.mh_len = %d\n", m->m_hdr.mh_len); log(LOG_DEBUG, "m_hdr.mh_data = %p\n", (void *)m->m_hdr.mh_data); log(LOG_DEBUG, "m_hdr.mh_type = %d\n", m->m_hdr.mh_type); log(LOG_DEBUG, "m_hdr.mh_flags = %b\n", m->m_hdr.mh_flags, MBUF_FLAGS); if (!(m->m_hdr.mh_flags & (M_PKTHDR | M_EXT))) log(LOG_DEBUG, "M_dat.M_databuf = %p\n", (void *)m->M_dat.M_databuf); else { if (m->m_hdr.mh_flags & M_PKTHDR) { log(LOG_DEBUG, "M_dat.MH.MH_pkthdr.len = %d\n", m->M_dat.MH.MH_pkthdr.len); log(LOG_DEBUG, "M_dat.MH.MH_pkthdr.rcvif = %p\n", (void *)m->M_dat.MH.MH_pkthdr.rcvif); if (!(m->m_hdr.mh_flags & M_EXT)) log(LOG_DEBUG, "M_dat.MH.MH_dat.MH_databuf = %p\n", (void *)m->M_dat.MH.MH_dat.MH_databuf); } if (m->m_hdr.mh_flags & M_EXT) { log(LOG_DEBUG, "M_dat.MH.MH_dat.MH_ext.ext_buff %p\n", (void *)m->M_dat.MH.MH_dat.MH_ext.ext_buf); log(LOG_DEBUG, "M_dat.MH.MH_dat.MH_ext.ext_free %p\n", (void *)m->M_dat.MH.MH_dat.MH_ext.ext_free); log(LOG_DEBUG, "M_dat.MH.MH_dat.MH_ext.ext_size %d\n", m->M_dat.MH.MH_dat.MH_ext.ext_size); } } } while ((m = m->m_next) != NULL); } #endif Index: head/sys/dev/ray/if_ray.c =================================================================== --- head/sys/dev/ray/if_ray.c (revision 71961) +++ head/sys/dev/ray/if_ray.c (revision 71962) @@ -1,3567 +1,3565 @@ /* * Copyright (C) 2000 * Dr. Duncan McLennan Barclay, dmlb@ragnet.demon.co.uk. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY DUNCAN BARCLAY AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL DUNCAN BARCLAY OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ /* $NetBSD: if_ray.c,v 1.12 2000/02/07 09:36:27 augustss Exp $ */ /* * Copyright (c) 2000 Christian E. Hopps * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Card configuration * ================== * * This card is unusual in that it uses both common and attribute * memory whilst working. It should use common memory and an IO port. * * The bus resource allocations need to work around the brain deadness * of pccardd (where it reads the CIS for common memory, sets it all * up and then throws it all away assuming the card is an ed * driver...). Note that this could be dangerous (because it doesn't * interact with pccardd) if you use other memory mapped cards in the * same pccard slot as currently old mappings are not cleaned up very well * by the bus_release_resource methods or pccardd. * * There is no support for running this driver on 4.0. * * Ad-hoc and infra-structure modes * ================================ * * At present only the ad-hoc mode is tested. * * I have received an AP from Raylink and will be working on * infrastructure mode. * * The Linux driver also seems to have the capability to act as an AP. * I wonder what facilities the "AP" can provide within a driver? We can * probably use the BRIDGE code to form an ESS but I don't think * power saving etc. is easy. * * * Packet framing/encapsulation * ================================ * * Currently we only support the Webgear encapsulation * 802.11 header struct ieee80211_frame * 802.3 header struct ether_header * 802.2 LLC header * 802.2 SNAP header * * We should support whatever packet types the following drivers have * if_wi.c FreeBSD, RFC1042 * if_ray.c NetBSD Webgear, RFC1042 * rayctl.c Linux Webgear, RFC1042 * also whatever we can divine from the NDC Access points and Kanda's boxes. * * Most drivers appear to have a RFC1042 framing. The incoming packet is * 802.11 header struct ieee80211_frame * 802.2 LLC header * 802.2 SNAP header * * This is translated to * 802.3 header struct ether_header * 802.2 LLC header * 802.2 SNAP header * * Linux seems to look at the SNAP org_code and do some framings * for IPX and APPLEARP on that. This just may be how Linux does IPX * and NETATALK. Need to see how FreeBSD does these. * * Translation should be selected via if_media stuff or link types. * * * Authentication * ============== * * 802.11 provides two authentication mechanisms. The first is a very * simple host based mechanism (like xhost) called Open System and the * second is a more complex challenge/response called Shared Key built * ontop of WEP. * * This driver only supports Open System and does not implement any * host based control lists. In otherwords authentication is always * granted to hosts wanting to authenticate with this station. This is * the only sensible behaviour as the Open System mechanism uses MAC * addresses to identify hosts. Send me patches if you need it! */ /* * ***watchdog to catch screwed up removals? * ***error handling of RAY_COM_RUNQ * ***error handling of ECF command completions * ***can't seem to create a n/w that Win95 wants to see. * ***need decent association code * write up driver structure in comments above * UPDATE_PARAMS seems to return via an interrupt - maybe the timeout * is needed for wrong values? * havenet needs checking again * proper setting of mib_hop_seq_len with country code for v4 firmware * best done with raycontrol? * more translations * might be able to autodetect them * spinning in ray_com_ecf * countrycode setting is broken I think * userupdate should trap and do via startjoin etc. * fragmentation when rx level drops? * * infra mode stuff * proper handling of the basic rate set - see the manual * all ray_sj, ray_assoc sequencues need a "nicer" solution as we * remember association and authentication * need to consider WEP * acting as ap - should be able to get working from the manual * need to finish RAY_ECMD_REJOIN_DONE * * ray_nw_param * promisc in here too? - done * should be able to update the parameters before we download to the * device. This means we must attach a desired struct to the * runq entry and maybe have another big case statement to * move these desired into current when not running. * init must then use the current settings (pre-loaded * in attach now!) and pass to download. But we can't access * current nw params outside of the runq - ahhh * differeniate between parameters set in attach and init * sc_station_addr in here too (for changing mac address) * move desired into the command structure? * take downloaded MIB from a complete nw_param? * longer term need to attach a desired nw params to the runq entry * * * RAY_COM_RUNQ errors * * if sleeping in ccs_alloc with eintr/erestart/enxio/enodev * erestart try again from the top * XXX do not malloc more comqs * XXX ccs allocation hard * eintr clean up and return * enxio clean up and return - done in macro * * if sleeping in runq_arr itself with eintr/erestart/enxio/enodev * erestart try again from the top * XXX do not malloc more comqs * XXX ccs allocation hard * XXX reinsert comqs at head of list * eintr clean up and return * enxio clean up and return - done in macro */ #define XXX 0 #define XXX_ACTING_AP 0 #define XXX_INFRA 0 #define RAY_DEBUG ( \ /* RAY_DBG_SUBR | */ \ /* RAY_DBG_BOOTPARAM | */ \ /* RAY_DBG_STARTJOIN | */ \ /* RAY_DBG_CCS | */ \ /* RAY_DBG_IOCTL | */ \ /* RAY_DBG_MBUF | */ \ /* RAY_DBG_RX | */ \ /* RAY_DBG_CM | */ \ /* RAY_DBG_COM | */ \ /* RAY_DBG_STOP | */ \ /* RAY_DBG_CTL | */ \ /* RAY_DBG_MGT | */ \ /* RAY_DBG_TX | */ \ /* RAY_DBG_DCOM | */ \ 0 \ ) /* * XXX build options - move to LINT */ #define RAY_CM_RID 2 /* pccardd abuses windows 0 and 1 */ #define RAY_AM_RID 3 /* pccardd abuses windows 0 and 1 */ #define RAY_NEED_CM_REMAPPING 0 /* Needed until pccard maps more than one memory area */ #define RAY_NEED_AM_REMAPPING 0 /* Needed until pccard maps more than one memory area */ #define RAY_COM_TIMEOUT (hz/2) /* Timeout for CCS commands */ #define RAY_TX_TIMEOUT (hz/2) /* Timeout for rescheduling TX */ /* * XXX build options - move to LINT */ #ifndef RAY_DEBUG #define RAY_DEBUG 0x0000 #endif /* RAY_DEBUG */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "card_if.h" #include #include #include #include #include #include /* * Prototyping */ static int ray_attach (device_t); static int ray_ccs_alloc (struct ray_softc *sc, size_t *ccsp, char *wmesg); static void ray_ccs_fill (struct ray_softc *sc, size_t ccs, u_int cmd); static void ray_ccs_free (struct ray_softc *sc, size_t ccs); static int ray_ccs_tx (struct ray_softc *sc, size_t *ccsp, size_t *bufpp); static void ray_com_ecf (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_com_ecf_done (struct ray_softc *sc); static void ray_com_ecf_timo (void *xsc); static struct ray_comq_entry * ray_com_init (struct ray_comq_entry *com, ray_comqfn_t function, int flags, char *mesg); static struct ray_comq_entry * ray_com_malloc (ray_comqfn_t function, int flags, char *mesg); static void ray_com_runq (struct ray_softc *sc); static int ray_com_runq_add (struct ray_softc *sc, struct ray_comq_entry *com[], int ncom, char *wmesg); static void ray_com_runq_done (struct ray_softc *sc); static int ray_detach (device_t); static void ray_init (void *xsc); static int ray_init_user (struct ray_softc *sc); static void ray_init_assoc (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_init_assoc_done (struct ray_softc *sc, size_t ccs); static void ray_init_download (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_init_download_done (struct ray_softc *sc, size_t ccs); static void ray_init_download_v4 (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_init_download_v5 (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_init_mcast (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_init_sj (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_init_sj_done (struct ray_softc *sc, size_t ccs); static void ray_intr (void *xsc); static void ray_intr_ccs (struct ray_softc *sc, u_int8_t cmd, size_t ccs); static void ray_intr_rcs (struct ray_softc *sc, u_int8_t cmd, size_t ccs); static void ray_intr_updt_errcntrs (struct ray_softc *sc); static int ray_ioctl (struct ifnet *ifp, u_long command, caddr_t data); static void ray_mcast (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_mcast_done (struct ray_softc *sc, size_t ccs); static int ray_mcast_user (struct ray_softc *sc); static int ray_probe (device_t); static void ray_promisc (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_repparams (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_repparams_done (struct ray_softc *sc, size_t ccs); static int ray_repparams_user (struct ray_softc *sc, struct ray_param_req *pr); static int ray_repstats_user (struct ray_softc *sc, struct ray_stats_req *sr); static int ray_res_alloc_am (struct ray_softc *sc); static int ray_res_alloc_cm (struct ray_softc *sc); static int ray_res_alloc_irq (struct ray_softc *sc); static void ray_res_release (struct ray_softc *sc); static void ray_rx (struct ray_softc *sc, size_t rcs); static void ray_rx_ctl (struct ray_softc *sc, struct mbuf *m0); static void ray_rx_data (struct ray_softc *sc, struct mbuf *m0, u_int8_t siglev, u_int8_t antenna); static void ray_rx_mgt (struct ray_softc *sc, struct mbuf *m0); static void ray_rx_mgt_auth (struct ray_softc *sc, struct mbuf *m0); static void ray_rx_update_cache (struct ray_softc *sc, u_int8_t *src, u_int8_t siglev, u_int8_t antenna); static void ray_stop (struct ray_softc *sc, struct ray_comq_entry *com); static int ray_stop_user (struct ray_softc *sc); static void ray_tx (struct ifnet *ifp); static void ray_tx_done (struct ray_softc *sc, size_t ccs); static void ray_tx_timo (void *xsc); static int ray_tx_send (struct ray_softc *sc, size_t ccs, int pktlen, u_int8_t *dst); static size_t ray_tx_wrhdr (struct ray_softc *sc, size_t bufp, u_int8_t type, u_int8_t fc1, u_int8_t *addr1, u_int8_t *addr2, u_int8_t *addr3); static void ray_upparams (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_upparams_done (struct ray_softc *sc, size_t ccs); static int ray_upparams_user (struct ray_softc *sc, struct ray_param_req *pr); static void ray_watchdog (struct ifnet *ifp); static u_int8_t ray_tx_best_antenna (struct ray_softc *sc, u_int8_t *dst); #if RAY_DEBUG & RAY_DBG_COM static void ray_com_ecf_check (struct ray_softc *sc, size_t ccs, char *mesg); #endif /* RAY_DEBUG & RAY_DBG_COM */ #if RAY_DEBUG & RAY_DBG_MBUF static void ray_dump_mbuf (struct ray_softc *sc, struct mbuf *m, char *s); #endif /* RAY_DEBUG & RAY_DBG_MBUF */ /* * PC-Card (PCMCIA) driver definition */ static device_method_t ray_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ray_probe), DEVMETHOD(device_attach, ray_attach), DEVMETHOD(device_detach, ray_detach), { 0, 0 } }; static driver_t ray_driver = { "ray", ray_methods, sizeof(struct ray_softc) }; static devclass_t ray_devclass; DRIVER_MODULE(ray, pccard, ray_driver, ray_devclass, 0, 0); /* * Probe for the card by checking its startup results. * * Fixup any bugs/quirks for different firmware. */ static int ray_probe(device_t dev) { struct ray_softc *sc = device_get_softc(dev); struct ray_ecf_startup_v5 *ep = &sc->sc_ecf_startup; int error; sc->dev = dev; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); /* * Read startup results from the card. */ error = ray_res_alloc_cm(sc); if (error) return (error); RAY_MAP_CM(sc); SRAM_READ_REGION(sc, RAY_ECF_TO_HOST_BASE, ep, sizeof(sc->sc_ecf_startup)); ray_res_release(sc); /* * Check the card is okay and work out what version we are using. */ if (ep->e_status != RAY_ECFS_CARD_OK) { RAY_PRINTF(sc, "card failed self test 0x%b", ep->e_status, RAY_ECFS_PRINTFB); return (ENXIO); } if (sc->sc_version != RAY_ECFS_BUILD_4 && sc->sc_version != RAY_ECFS_BUILD_5) { RAY_PRINTF(sc, "unsupported firmware version 0x%0x", ep->e_fw_build_string); return (ENXIO); } RAY_DPRINTF(sc, RAY_DBG_BOOTPARAM, "found a card"); sc->gone = 0; /* * Fixup tib size to be correct - on build 4 it is garbage */ if (sc->sc_version == RAY_ECFS_BUILD_4 && sc->sc_tibsize == 0x55) sc->sc_tibsize = sizeof(struct ray_tx_tib); return (0); } /* * Attach the card into the kernel */ static int ray_attach(device_t dev) { struct ray_softc *sc = device_get_softc(dev); struct ray_ecf_startup_v5 *ep = &sc->sc_ecf_startup; struct ifnet *ifp = &sc->arpcom.ac_if; size_t ccs; int i, error; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); if ((sc == NULL) || (sc->gone)) return (ENXIO); /* * Grab the resources I need */ error = ray_res_alloc_cm(sc); if (error) return (error); error = ray_res_alloc_am(sc); if (error) { ray_res_release(sc); return (error); } error = ray_res_alloc_irq(sc); if (error) { ray_res_release(sc); return (error); } /* * Reset any pending interrupts */ RAY_HCS_CLEAR_INTR(sc); /* * Set the parameters that will survive stop/init and * reset a few things on the card. * * Do not update these in ray_init_download's parameter setup * * XXX see the ray_init_download section for stuff to move */ RAY_MAP_CM(sc); bzero(&sc->sc_d, sizeof(struct ray_nw_param)); bzero(&sc->sc_c, sizeof(struct ray_nw_param)); /* Clear statistics counters */ sc->sc_rxoverflow = 0; sc->sc_rxcksum = 0; sc->sc_rxhcksum = 0; sc->sc_rxnoise = 0; /* Clear signal and antenna cache */ bzero(sc->sc_siglevs, sizeof(sc->sc_siglevs)); /* Set all ccs to be free */ bzero(sc->sc_ccsinuse, sizeof(sc->sc_ccsinuse)); ccs = RAY_CCS_ADDRESS(0); for (i = 0; i < RAY_CCS_LAST; ccs += RAY_CCS_SIZE, i++) RAY_CCS_FREE(sc, ccs); /* * Initialise the network interface structure */ bcopy((char *)&ep->e_station_addr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); ifp->if_softc = sc; ifp->if_name = "ray"; ifp->if_unit = device_get_unit(dev); ifp->if_timer = 0; ifp->if_flags = (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); ifp->if_hdrlen = sizeof(struct ieee80211_frame) + sizeof(struct ether_header); ifp->if_baudrate = 1000000; /* Is this baud or bps ;-) */ ifp->if_output = ether_output; ifp->if_start = ray_tx; ifp->if_ioctl = ray_ioctl; ifp->if_watchdog = ray_watchdog; ifp->if_init = ray_init; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; ether_ifattach(ifp, ETHER_BPF_SUPPORTED); /* * Initialise the timers and driver */ callout_handle_init(&sc->com_timerh); callout_handle_init(&sc->tx_timerh); TAILQ_INIT(&sc->sc_comq); /* * Print out some useful information */ if (bootverbose || (RAY_DEBUG & RAY_DBG_BOOTPARAM)) { RAY_PRINTF(sc, "start up results"); if (sc->sc_version == RAY_ECFS_BUILD_4) printf(". Firmware version 4\n"); else printf(". Firmware version 5\n"); printf(". Status 0x%b\n", ep->e_status, RAY_ECFS_PRINTFB); printf(". Ether address %6D\n", ep->e_station_addr, ":"); if (sc->sc_version == RAY_ECFS_BUILD_4) { printf(". Program checksum %0x\n", ep->e_resv0); printf(". CIS checksum %0x\n", ep->e_rates[0]); } else { printf(". (reserved word) %0x\n", ep->e_resv0); printf(". Supported rates %8D\n", ep->e_rates, ":"); } printf(". Japan call sign %12D\n", ep->e_japan_callsign, ":"); if (sc->sc_version == RAY_ECFS_BUILD_5) { printf(". Program checksum %0x\n", ep->e_prg_cksum); printf(". CIS checksum %0x\n", ep->e_cis_cksum); printf(". Firmware version %0x\n", ep->e_fw_build_string); printf(". Firmware revision %0x\n", ep->e_fw_build); printf(". (reserved word) %0x\n", ep->e_fw_resv); printf(". ASIC version %0x\n", ep->e_asic_version); printf(". TIB size %0x\n", ep->e_tibsize); } } return (0); } /* * Detach the card * * This is usually called when the card is ejected, but * can be caused by a modunload of a controller driver. * The idea is to reset the driver's view of the device * and ensure that any driver entry points such as * read and write do not hang. */ static int ray_detach(device_t dev) { struct ray_softc *sc = device_get_softc(dev); struct ifnet *ifp = &sc->arpcom.ac_if; struct ray_comq_entry *com; int s; s = splimp(); RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STOP, ""); if ((sc == NULL) || (sc->gone)) return (0); /* * Mark as not running and detach the interface. * * N.B. if_detach can trigger ioctls so we do it first and * then clean the runq. */ sc->gone = 1; sc->sc_havenet = 0; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); /* * Stop the runq and wake up anyone sleeping for us. */ untimeout(ray_com_ecf_timo, sc, sc->com_timerh); untimeout(ray_tx_timo, sc, sc->tx_timerh); com = TAILQ_FIRST(&sc->sc_comq); for (com = TAILQ_FIRST(&sc->sc_comq); com != NULL; com = TAILQ_NEXT(com, c_chain)) { com->c_flags |= RAY_COM_FDETACHED; com->c_retval = 0; RAY_DPRINTF(sc, RAY_DBG_STOP, "looking at com %p %b", com, com->c_flags, RAY_COM_FLAGS_PRINTFB); if (com->c_flags & RAY_COM_FWOK) { RAY_DPRINTF(sc, RAY_DBG_STOP, "waking com %p", com); wakeup(com->c_wakeup); } } /* * Release resources */ ray_res_release(sc); RAY_DPRINTF(sc, RAY_DBG_STOP, "unloading complete"); splx(s); return (0); } /* * Network ioctl request. */ static int ray_ioctl(register struct ifnet *ifp, u_long command, caddr_t data) { struct ray_softc *sc = ifp->if_softc; struct ray_param_req pr; struct ray_stats_req sr; struct ifreq *ifr = (struct ifreq *)data; int s, error, error2; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_IOCTL, ""); if ((sc == NULL) || (sc->gone)) return (ENXIO); error = error2 = 0; s = splimp(); switch (command) { case SIOCGIFADDR: case SIOCSIFMTU: case SIOCSIFADDR: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GIFADDR/SIFMTU"); error = ether_ioctl(ifp, command, data); /* XXX SIFADDR used to fall through to SIOCSIFFLAGS */ break; case SIOCSIFFLAGS: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "SIFFLAGS 0x%0x", ifp->if_flags); /* * If the interface is marked up we call ray_init_user. * This will deal with mcast and promisc flags as well as * initialising the hardware if it needs it. */ if (ifp->if_flags & IFF_UP) error = ray_init_user(sc); else error = ray_stop_user(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "ADDMULTI/DELMULTI"); error = ray_mcast_user(sc); break; case SIOCSRAYPARAM: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "SRAYPARAM"); if ((error = copyin(ifr->ifr_data, &pr, sizeof(pr)))) break; error = ray_upparams_user(sc, &pr); error2 = copyout(&pr, ifr->ifr_data, sizeof(pr)); error = error2 ? error2 : error; break; case SIOCGRAYPARAM: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GRAYPARAM"); if ((error = copyin(ifr->ifr_data, &pr, sizeof(pr)))) break; error = ray_repparams_user(sc, &pr); error2 = copyout(&pr, ifr->ifr_data, sizeof(pr)); error = error2 ? error2 : error; break; case SIOCGRAYSTATS: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GRAYSTATS"); error = ray_repstats_user(sc, &sr); error2 = copyout(&sr, ifr->ifr_data, sizeof(sr)); error = error2 ? error2 : error; break; case SIOCGRAYSIGLEV: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GRAYSIGLEV"); error = copyout(sc->sc_siglevs, ifr->ifr_data, sizeof(sc->sc_siglevs)); break; case SIOCGIFFLAGS: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GIFFLAGS"); error = EINVAL; break; case SIOCGIFMETRIC: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GIFMETRIC"); error = EINVAL; break; case SIOCGIFMTU: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GIFMTU"); error = EINVAL; break; case SIOCGIFPHYS: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GIFPYHS"); error = EINVAL; break; case SIOCSIFMEDIA: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "SIFMEDIA"); error = EINVAL; break; case SIOCGIFMEDIA: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GIFMEDIA"); error = EINVAL; break; default: error = EINVAL; } splx(s); return (error); } /* * Ethernet layer entry to ray_init - discard errors */ static void ray_init(void *xsc) { struct ray_softc *sc = (struct ray_softc *)xsc; ray_init_user(sc); } /* * User land entry to network initialisation and changes in interface flags. * * We do a very little work here, just creating runq entries to * processes the actions needed to cope with interface flags. We do it * this way in case there are runq entries outstanding from earlier * ioctls that modify the interface flags. * * Returns values are either 0 for success, a varity of resource allocation * failures or errors in the command sent to the card. * * Note, IFF_RUNNING is eventually set by init_sj_done or init_assoc_done */ static int ray_init_user(struct ray_softc *sc) { struct ray_comq_entry *com[5]; int error, ncom; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); /* * Create the following runq entries to bring the card up. * * init_download - download the network to the card * init_mcast - reset multicast list * init_sj - find or start a BSS * init_assoc - associate with a ESSID if needed * * They are only actually executed if the card is not running */ ncom = 0; com[ncom++] = RAY_COM_MALLOC(ray_init_download, RAY_COM_FCHKRUNNING); com[ncom++] = RAY_COM_MALLOC(ray_init_mcast, RAY_COM_FCHKRUNNING); com[ncom++] = RAY_COM_MALLOC(ray_init_sj, RAY_COM_FCHKRUNNING); com[ncom++] = RAY_COM_MALLOC(ray_init_assoc, RAY_COM_FCHKRUNNING); /* * Create runq entries to process flags * * promisc - set/reset PROMISC and ALLMULTI flags * * They are only actually executed if the card is running */ com[ncom++] = RAY_COM_MALLOC(ray_promisc, 0); RAY_COM_RUNQ(sc, com, ncom, "rayinit", error); /* XXX no real error processing from anything yet! */ RAY_COM_FREE(com, ncom); return (error); } /* * Runq entry for resetting driver and downloading start up structures to card */ static void ray_init_download(struct ray_softc *sc, struct ray_comq_entry *com) { struct ifnet *ifp = &sc->arpcom.ac_if; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); /* * If card already running we don't need to download. */ if ((com->c_flags & RAY_COM_FCHKRUNNING) && (ifp->if_flags & IFF_RUNNING)) { ray_com_runq_done(sc); return; } /* * Reset instance variables * * The first set are network parameters that are read back when * the card starts or joins the network. * * The second set are network parameters that are downloaded to * the card. * * The third set are driver parameters. * * All of the variables in these sets can be updated by the * card or ioctls. * * XXX see the ray_attach section for stuff to move */ sc->sc_d.np_upd_param = 0; bzero(sc->sc_d.np_bss_id, ETHER_ADDR_LEN); sc->sc_d.np_inited = 0; sc->sc_d.np_def_txrate = RAY_MIB_BASIC_RATE_SET_DEFAULT; sc->sc_d.np_encrypt = 0; sc->sc_d.np_net_type = RAY_MIB_NET_TYPE_DEFAULT; bzero(sc->sc_d.np_ssid, IEEE80211_NWID_LEN); strncpy(sc->sc_d.np_ssid, RAY_MIB_SSID_DEFAULT, IEEE80211_NWID_LEN); sc->sc_d.np_priv_start = RAY_MIB_PRIVACY_MUST_START_DEFAULT; sc->sc_d.np_priv_join = RAY_MIB_PRIVACY_CAN_JOIN_DEFAULT; sc->sc_d.np_ap_status = RAY_MIB_AP_STATUS_DEFAULT; sc->sc_d.np_promisc = !!(ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)); sc->framing = SC_FRAMING_WEBGEAR; /* XXX this is a hack whilst I transition the code. The instance * XXX variables above should be set somewhere else. This is needed for * XXX start_join */ bcopy(&sc->sc_d, &com->c_desired, sizeof(struct ray_nw_param)); /* * Download the right firmware defaults */ if (sc->sc_version == RAY_ECFS_BUILD_4) ray_init_download_v4(sc, com); else ray_init_download_v5(sc, com); /* * Kick the card */ ray_ccs_fill(sc, com->c_ccs, RAY_CMD_DOWNLOAD_PARAMS); ray_com_ecf(sc, com); } #define PUT2(p, v) \ do { (p)[0] = ((v >> 8) & 0xff); (p)[1] = (v & 0xff); } while(0) /* * Firmware version 4 defaults - see if_raymib.h for details */ static void ray_init_download_v4(struct ray_softc *sc, struct ray_comq_entry *com) { struct ray_mib_4 ray_mib_4_default; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); RAY_MAP_CM(sc); #define MIB4(m) ray_mib_4_default.##m MIB4(mib_net_type) = com->c_desired.np_net_type; MIB4(mib_ap_status) = com->c_desired.np_ap_status; bcopy(com->c_desired.np_ssid, MIB4(mib_ssid), IEEE80211_NWID_LEN); MIB4(mib_scan_mode) = RAY_MIB_SCAN_MODE_DEFAULT; MIB4(mib_apm_mode) = RAY_MIB_APM_MODE_DEFAULT; bcopy(sc->sc_station_addr, MIB4(mib_mac_addr), ETHER_ADDR_LEN); PUT2(MIB4(mib_frag_thresh), RAY_MIB_FRAG_THRESH_DEFAULT); PUT2(MIB4(mib_dwell_time), RAY_MIB_DWELL_TIME_V4); PUT2(MIB4(mib_beacon_period), RAY_MIB_BEACON_PERIOD_V4); MIB4(mib_dtim_interval) = RAY_MIB_DTIM_INTERVAL_DEFAULT; MIB4(mib_max_retry) = RAY_MIB_MAX_RETRY_DEFAULT; MIB4(mib_ack_timo) = RAY_MIB_ACK_TIMO_DEFAULT; MIB4(mib_sifs) = RAY_MIB_SIFS_DEFAULT; MIB4(mib_difs) = RAY_MIB_DIFS_DEFAULT; MIB4(mib_pifs) = RAY_MIB_PIFS_V4; PUT2(MIB4(mib_rts_thresh), RAY_MIB_RTS_THRESH_DEFAULT); PUT2(MIB4(mib_scan_dwell), RAY_MIB_SCAN_DWELL_V4); PUT2(MIB4(mib_scan_max_dwell), RAY_MIB_SCAN_MAX_DWELL_V4); MIB4(mib_assoc_timo) = RAY_MIB_ASSOC_TIMO_DEFAULT; MIB4(mib_adhoc_scan_cycle) = RAY_MIB_ADHOC_SCAN_CYCLE_DEFAULT; MIB4(mib_infra_scan_cycle) = RAY_MIB_INFRA_SCAN_CYCLE_DEFAULT; MIB4(mib_infra_super_scan_cycle) = RAY_MIB_INFRA_SUPER_SCAN_CYCLE_DEFAULT; MIB4(mib_promisc) = com->c_desired.np_promisc; PUT2(MIB4(mib_uniq_word), RAY_MIB_UNIQ_WORD_DEFAULT); MIB4(mib_slot_time) = RAY_MIB_SLOT_TIME_V4; MIB4(mib_roam_low_snr_thresh) = RAY_MIB_ROAM_LOW_SNR_THRESH_DEFAULT; MIB4(mib_low_snr_count) = RAY_MIB_LOW_SNR_COUNT_DEFAULT; MIB4(mib_infra_missed_beacon_count) = RAY_MIB_INFRA_MISSED_BEACON_COUNT_DEFAULT; MIB4(mib_adhoc_missed_beacon_count) = RAY_MIB_ADHOC_MISSED_BEACON_COUNT_DEFAULT; MIB4(mib_country_code) = RAY_MIB_COUNTRY_CODE_DEFAULT; MIB4(mib_hop_seq) = RAY_MIB_HOP_SEQ_DEFAULT; MIB4(mib_hop_seq_len) = RAY_MIB_HOP_SEQ_LEN_V4; MIB4(mib_cw_max) = RAY_MIB_CW_MAX_V4; MIB4(mib_cw_min) = RAY_MIB_CW_MIN_V4; MIB4(mib_noise_filter_gain) = RAY_MIB_NOISE_FILTER_GAIN_DEFAULT; MIB4(mib_noise_limit_offset) = RAY_MIB_NOISE_LIMIT_OFFSET_DEFAULT; MIB4(mib_rssi_thresh_offset) = RAY_MIB_RSSI_THRESH_OFFSET_DEFAULT; MIB4(mib_busy_thresh_offset) = RAY_MIB_BUSY_THRESH_OFFSET_DEFAULT; MIB4(mib_sync_thresh) = RAY_MIB_SYNC_THRESH_DEFAULT; MIB4(mib_test_mode) = RAY_MIB_TEST_MODE_DEFAULT; MIB4(mib_test_min_chan) = RAY_MIB_TEST_MIN_CHAN_DEFAULT; MIB4(mib_test_max_chan) = RAY_MIB_TEST_MAX_CHAN_DEFAULT; #undef MIB4 SRAM_WRITE_REGION(sc, RAY_HOST_TO_ECF_BASE, &ray_mib_4_default, sizeof(ray_mib_4_default)); } /* * Firmware version 5 defaults - see if_raymib.h for details */ static void ray_init_download_v5(struct ray_softc *sc, struct ray_comq_entry *com) { struct ray_mib_5 ray_mib_5_default; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); RAY_MAP_CM(sc); #define MIB5(m) ray_mib_5_default.##m MIB5(mib_net_type) = com->c_desired.np_net_type; MIB5(mib_ap_status) = com->c_desired.np_ap_status; bcopy(com->c_desired.np_ssid, MIB5(mib_ssid), IEEE80211_NWID_LEN); MIB5(mib_scan_mode) = RAY_MIB_SCAN_MODE_DEFAULT; MIB5(mib_apm_mode) = RAY_MIB_APM_MODE_DEFAULT; bcopy(sc->sc_station_addr, MIB5(mib_mac_addr), ETHER_ADDR_LEN); PUT2(MIB5(mib_frag_thresh), RAY_MIB_FRAG_THRESH_DEFAULT); PUT2(MIB5(mib_dwell_time), RAY_MIB_DWELL_TIME_V5); PUT2(MIB5(mib_beacon_period), RAY_MIB_BEACON_PERIOD_V5); MIB5(mib_dtim_interval) = RAY_MIB_DTIM_INTERVAL_DEFAULT; MIB5(mib_max_retry) = RAY_MIB_MAX_RETRY_DEFAULT; MIB5(mib_ack_timo) = RAY_MIB_ACK_TIMO_DEFAULT; MIB5(mib_sifs) = RAY_MIB_SIFS_DEFAULT; MIB5(mib_difs) = RAY_MIB_DIFS_DEFAULT; MIB5(mib_pifs) = RAY_MIB_PIFS_V5; PUT2(MIB5(mib_rts_thresh), RAY_MIB_RTS_THRESH_DEFAULT); PUT2(MIB5(mib_scan_dwell), RAY_MIB_SCAN_DWELL_V5); PUT2(MIB5(mib_scan_max_dwell), RAY_MIB_SCAN_MAX_DWELL_V5); MIB5(mib_assoc_timo) = RAY_MIB_ASSOC_TIMO_DEFAULT; MIB5(mib_adhoc_scan_cycle) = RAY_MIB_ADHOC_SCAN_CYCLE_DEFAULT; MIB5(mib_infra_scan_cycle) = RAY_MIB_INFRA_SCAN_CYCLE_DEFAULT; MIB5(mib_infra_super_scan_cycle) = RAY_MIB_INFRA_SUPER_SCAN_CYCLE_DEFAULT; MIB5(mib_promisc) = com->c_desired.np_promisc; PUT2(MIB5(mib_uniq_word), RAY_MIB_UNIQ_WORD_DEFAULT); MIB5(mib_slot_time) = RAY_MIB_SLOT_TIME_V5; MIB5(mib_roam_low_snr_thresh) = RAY_MIB_ROAM_LOW_SNR_THRESH_DEFAULT; MIB5(mib_low_snr_count) = RAY_MIB_LOW_SNR_COUNT_DEFAULT; MIB5(mib_infra_missed_beacon_count) = RAY_MIB_INFRA_MISSED_BEACON_COUNT_DEFAULT; MIB5(mib_adhoc_missed_beacon_count) = RAY_MIB_ADHOC_MISSED_BEACON_COUNT_DEFAULT; MIB5(mib_country_code) = RAY_MIB_COUNTRY_CODE_DEFAULT; MIB5(mib_hop_seq) = RAY_MIB_HOP_SEQ_DEFAULT; MIB5(mib_hop_seq_len) = RAY_MIB_HOP_SEQ_LEN_V5; PUT2(MIB5(mib_cw_max), RAY_MIB_CW_MAX_V5); PUT2(MIB5(mib_cw_min), RAY_MIB_CW_MIN_V5); MIB5(mib_noise_filter_gain) = RAY_MIB_NOISE_FILTER_GAIN_DEFAULT; MIB5(mib_noise_limit_offset) = RAY_MIB_NOISE_LIMIT_OFFSET_DEFAULT; MIB5(mib_rssi_thresh_offset) = RAY_MIB_RSSI_THRESH_OFFSET_DEFAULT; MIB5(mib_busy_thresh_offset) = RAY_MIB_BUSY_THRESH_OFFSET_DEFAULT; MIB5(mib_sync_thresh) = RAY_MIB_SYNC_THRESH_DEFAULT; MIB5(mib_test_mode) = RAY_MIB_TEST_MODE_DEFAULT; MIB5(mib_test_min_chan) = RAY_MIB_TEST_MIN_CHAN_DEFAULT; MIB5(mib_test_max_chan) = RAY_MIB_TEST_MAX_CHAN_DEFAULT; MIB5(mib_allow_probe_resp) = RAY_MIB_ALLOW_PROBE_RESP_DEFAULT; MIB5(mib_privacy_must_start) = com->c_desired.np_priv_start; MIB5(mib_privacy_can_join) = com->c_desired.np_priv_join; MIB5(mib_basic_rate_set[0]) = com->c_desired.np_def_txrate; #undef MIB5 SRAM_WRITE_REGION(sc, RAY_HOST_TO_ECF_BASE, &ray_mib_5_default, sizeof(ray_mib_5_default)); } #undef PUT2 /* * Download completion routine */ static void ray_init_download_done(struct ray_softc *sc, size_t ccs) { RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); RAY_COM_CHECK(sc, ccs); ray_com_ecf_done(sc); } /* * Runq entry to empty the multicast filter list */ static void ray_init_mcast(struct ray_softc *sc, struct ray_comq_entry *com) { struct ifnet *ifp = &sc->arpcom.ac_if; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); RAY_MAP_CM(sc); /* * If card is already running we don't need to reset the list */ if ((com->c_flags & RAY_COM_FCHKRUNNING) && (ifp->if_flags & IFF_RUNNING)) { ray_com_runq_done(sc); return; } /* * Kick the card */ ray_ccs_fill(sc, com->c_ccs, RAY_CMD_UPDATE_MCAST); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_update_mcast, c_nmcast, 0); ray_com_ecf(sc, com); } /* * Runq entry to starting or joining a network */ static void ray_init_sj(struct ray_softc *sc, struct ray_comq_entry *com) { struct ifnet *ifp = &sc->arpcom.ac_if; struct ray_net_params np; int update; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); RAY_MAP_CM(sc); /* * If card already running we don't need to start the n/w. */ if ((com->c_flags & RAY_COM_FCHKRUNNING) && (ifp->if_flags & IFF_RUNNING)) { ray_com_runq_done(sc); return; } sc->sc_havenet = 0; if (sc->sc_d.np_net_type == RAY_MIB_NET_TYPE_ADHOC) ray_ccs_fill(sc, com->c_ccs, RAY_CMD_START_NET); else ray_ccs_fill(sc, com->c_ccs, RAY_CMD_JOIN_NET); update = 0; if (sc->sc_c.np_net_type != sc->sc_d.np_net_type) update++; if (bcmp(sc->sc_c.np_ssid, sc->sc_d.np_ssid, IEEE80211_NWID_LEN)) update++; if (sc->sc_c.np_priv_join != sc->sc_d.np_priv_join) update++; if (sc->sc_c.np_priv_start != sc->sc_d.np_priv_start) update++; RAY_DPRINTF(sc, RAY_DBG_STARTJOIN, "%s updating nw params", update?"is":"not"); if (update) { bzero(&np, sizeof(np)); np.p_net_type = sc->sc_d.np_net_type; bcopy(sc->sc_d.np_ssid, np.p_ssid, IEEE80211_NWID_LEN); np.p_privacy_must_start = sc->sc_d.np_priv_start; np.p_privacy_can_join = sc->sc_d.np_priv_join; SRAM_WRITE_REGION(sc, RAY_HOST_TO_ECF_BASE, &np, sizeof(np)); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_net, c_upd_param, 1); } else SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_net, c_upd_param, 0); /* * Kick the card */ ray_com_ecf(sc, com); } /* * Complete start command or intermediate step in assoc command */ static void ray_init_sj_done(struct ray_softc *sc, size_t ccs) { struct ifnet *ifp = &sc->arpcom.ac_if; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); RAY_MAP_CM(sc); RAY_COM_CHECK(sc, ccs); /* * Read back network parameters that the ECF sets */ SRAM_READ_REGION(sc, ccs, &sc->sc_c.p_1, sizeof(struct ray_cmd_net)); /* Adjust values for buggy build 4 */ if (sc->sc_c.np_def_txrate == 0x55) sc->sc_c.np_def_txrate = sc->sc_d.np_def_txrate; if (sc->sc_c.np_encrypt == 0x55) sc->sc_c.np_encrypt = sc->sc_d.np_encrypt; /* * Update our local state if we updated the network parameters * when the START_NET or JOIN_NET was issued. */ if (sc->sc_c.np_upd_param) { RAY_DPRINTF(sc, RAY_DBG_STARTJOIN, "updated parameters"); SRAM_READ_REGION(sc, RAY_HOST_TO_ECF_BASE, &sc->sc_c.p_2, sizeof(struct ray_net_params)); } /* * Hurrah! The network is now active. * * Clearing IFF_OACTIVE will ensure that the system will send us * packets. Just before we return from the interrupt context * we check to see if packets have been queued. */ if (SRAM_READ_FIELD_1(sc, ccs, ray_cmd, c_cmd) == RAY_CMD_START_NET) { sc->sc_havenet = 1; ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; } ray_com_ecf_done(sc); } /* * Runq entry to starting an association with an access point */ static void ray_init_assoc(struct ray_softc *sc, struct ray_comq_entry *com) { struct ifnet *ifp = &sc->arpcom.ac_if; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); /* * Don't do anything if we are not in a managed network */ if (sc->sc_c.np_net_type != RAY_MIB_NET_TYPE_INFRA) { ray_com_runq_done(sc); return; } /* * If card already running we don't need to associate. */ if ((com->c_flags & RAY_COM_FCHKRUNNING) && (ifp->if_flags & IFF_RUNNING)) { ray_com_runq_done(sc); return; } /* * Kick the card */ ray_ccs_fill(sc, com->c_ccs, RAY_CMD_START_ASSOC); ray_com_ecf(sc, com); } /* * Complete association */ static void ray_init_assoc_done(struct ray_softc *sc, size_t ccs) { struct ifnet *ifp = &sc->arpcom.ac_if; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); RAY_COM_CHECK(sc, ccs); /* * Hurrah! The network is now active. * * Clearing IFF_OACTIVE will ensure that the system will send us * packets. Just before we return from the interrupt context * we check to see if packets have been queued. */ sc->sc_havenet = 1; ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; ray_com_ecf_done(sc); } /* * Network stop. * * Inhibit card - if we can't prevent reception then do not worry; * stopping a NIC only guarantees no TX. * * The change to the interface flags is done via the runq so that any * existing commands can execute normally. */ static int ray_stop_user(struct ray_softc *sc) { struct ray_comq_entry *com[1]; int error, ncom; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STOP, ""); /* * Schedule the real stop routine */ ncom = 0; com[ncom++] = RAY_COM_MALLOC(ray_stop, 0); RAY_COM_RUNQ(sc, com, ncom, "raystop", error); /* XXX no real error processing from anything yet! */ RAY_COM_FREE(com, ncom); return (error); } /* * Runq entry for stopping the interface activity */ static void ray_stop(struct ray_softc *sc, struct ray_comq_entry *com) { struct ifnet *ifp = &sc->arpcom.ac_if; struct mbuf *m; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STOP, ""); /* * Mark as not running and drain output queue */ ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); ifp->if_timer = 0; for (;;) { IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; m_freem(m); } ray_com_runq_done(sc); } static void ray_watchdog(struct ifnet *ifp) { struct ray_softc *sc = ifp->if_softc; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); if ((sc == NULL) || (sc->gone)) return; RAY_PRINTF(sc, "watchdog timeout"); } /* * Transmit packet handling */ /* * Send a packet. * * We make two assumptions here: * 1) That the current priority is set to splimp _before_ this code * is called *and* is returned to the appropriate priority after * return * 2) That the IFF_OACTIVE flag is checked before this code is called * (i.e. that the output part of the interface is idle) * * A simple one packet at a time TX routine is used - we don't bother * chaining TX buffers. Performance is sufficient to max out the * wireless link on a P75. * * AST J30 Windows 95A (100MHz Pentium) to * Libretto 50CT FreeBSD-3.1 (75MHz Pentium) 167.37kB/s * Nonname box FreeBSD-3.4 (233MHz AMD K6) 161.82kB/s * * Libretto 50CT FreeBSD-3.1 (75MHz Pentium) to * AST J30 Windows 95A (100MHz Pentium) 167.37kB/s * Nonname box FreeBSD-3.4 (233MHz AMD K6) 161.38kB/s * * Given that 160kB/s is saturating the 2Mb/s wireless link we * are about there. * * In short I'm happy that the added complexity of chaining TX * packets together isn't worth it for my machines. */ static void ray_tx(struct ifnet *ifp) { struct ray_softc *sc = ifp->if_softc; struct mbuf *m0, *m; struct ether_header *eh; size_t ccs, bufp; int pktlen, len; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_TX, ""); RAY_MAP_CM(sc); /* * Some simple checks first - some are overkill */ if ((sc == NULL) || (sc->gone)) return; if (!(ifp->if_flags & IFF_RUNNING)) { RAY_RECERR(sc, "cannot transmit - not running"); return; } if (!sc->sc_havenet) { RAY_RECERR(sc, "cannot transmit - no network"); return; } if (!RAY_ECF_READY(sc)) { /* Can't assume that the ECF is busy because of this driver */ RAY_RECERR(sc, "cannot transmit - ECF busy"); sc->tx_timerh = timeout(ray_tx_timo, sc, RAY_TX_TIMEOUT); return; } else untimeout(ray_tx_timo, sc, sc->tx_timerh); /* * We find a ccs before we process the mbuf so that we are sure it * is worthwhile processing the packet. All errors in the mbuf * processing are either errors in the mbuf or gross configuration * errors and the packet wouldn't get through anyway. */ if (ray_ccs_tx(sc, &ccs, &bufp)) { ifp->if_flags |= IFF_OACTIVE; return; } /* * Get the mbuf and process it - we have to remember to free the * ccs if there are any errors */ IF_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) { RAY_CCS_FREE(sc, ccs); return; } eh = mtod(m0, struct ether_header *); pktlen = m0->m_pkthdr.len; if (pktlen > ETHER_MAX_LEN - ETHER_CRC_LEN) { RAY_RECERR(sc, "mbuf too long %d", pktlen); RAY_CCS_FREE(sc, ccs); ifp->if_oerrors++; m_freem(m0); return; } /* * Write the header according to network type etc. */ if (sc->sc_c.np_net_type == RAY_MIB_NET_TYPE_ADHOC) bufp = ray_tx_wrhdr(sc, bufp, IEEE80211_FC0_TYPE_DATA, IEEE80211_FC1_DIR_NODS, eh->ether_dhost, eh->ether_shost, sc->sc_c.np_bss_id); else if (sc->sc_c.np_ap_status == RAY_MIB_AP_STATUS_TERMINAL) bufp = ray_tx_wrhdr(sc, bufp, IEEE80211_FC0_TYPE_DATA, IEEE80211_FC1_DIR_TODS, sc->sc_c.np_bss_id, eh->ether_shost, eh->ether_dhost); else bufp = ray_tx_wrhdr(sc, bufp, IEEE80211_FC0_TYPE_DATA, IEEE80211_FC1_DIR_FROMDS, eh->ether_dhost, sc->sc_c.np_bss_id, eh->ether_shost); /* * Translation - capability as described earlier * * Remove/modify/addto the 802.3 and 802.2 headers as needed. * * We've pulled up the mbuf for you. * */ if (m0->m_len < sizeof(struct ether_header)) m0 = m_pullup(m0, sizeof(struct ether_header)); if (m0 == NULL) { RAY_RECERR(sc, "could not pullup ether"); RAY_CCS_FREE(sc, ccs); ifp->if_oerrors++; return; } switch (sc->framing) { case SC_FRAMING_WEBGEAR: /* Nice and easy - nothing! (just add an 802.11 header) */ break; default: RAY_RECERR(sc, "unknown framing type %d", sc->framing); RAY_CCS_FREE(sc, ccs); ifp->if_oerrors++; m_freem(m0); return; } if (m0 == NULL) { RAY_RECERR(sc, "could not translate packet"); RAY_CCS_FREE(sc, ccs); ifp->if_oerrors++; return; } /* * Copy the mbuf to the buffer in common memory * * We panic and don't bother wrapping as ethernet packets are 1518 * bytes, we checked the mbuf earlier, and our TX buffers are 2048 * bytes. We don't have 530 bytes of headers etc. so something * must be fubar. */ pktlen = sizeof(struct ieee80211_frame); for (m = m0; m != NULL; m = m->m_next) { pktlen += m->m_len; if ((len = m->m_len) == 0) continue; if ((bufp + len) < RAY_TX_END) SRAM_WRITE_REGION(sc, bufp, mtod(m, u_int8_t *), len); else RAY_PANIC(sc, "tx buffer overflow"); bufp += len; } RAY_MBUF_DUMP(sc, RAY_DBG_TX, m0, "ray_tx"); /* * Send it off */ if (ray_tx_send(sc, ccs, pktlen, eh->ether_dhost)) ifp->if_oerrors++; else ifp->if_opackets++; m_freem(m0); } /* * Start timeout routine. * * Used when card was busy but we needed to send a packet. */ static void ray_tx_timo(void *xsc) { struct ray_softc *sc = (struct ray_softc *)xsc; struct ifnet *ifp = &sc->arpcom.ac_if; int s; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); if (!(ifp->if_flags & IFF_OACTIVE) && (ifp->if_snd.ifq_head != NULL)) { s = splimp(); ray_tx(ifp); splx(s); } } /* * Write an 802.11 header into the Tx buffer space and return the * adjusted buffer pointer. */ static size_t ray_tx_wrhdr(struct ray_softc *sc, size_t bufp, u_int8_t type, u_int8_t fc1, u_int8_t *addr1, u_int8_t *addr2, u_int8_t *addr3) { struct ieee80211_frame header; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_TX, ""); RAY_MAP_CM(sc); bzero(&header, sizeof(struct ieee80211_frame)); header.i_fc[0] = (IEEE80211_FC0_VERSION_0 | type); header.i_fc[1] = fc1; bcopy(addr1, header.i_addr1, ETHER_ADDR_LEN); bcopy(addr2, header.i_addr2, ETHER_ADDR_LEN); bcopy(addr3, header.i_addr3, ETHER_ADDR_LEN); SRAM_WRITE_REGION(sc, bufp, (u_int8_t *)&header, sizeof(struct ieee80211_frame)); return (bufp + sizeof(struct ieee80211_frame)); } /* * Fill in a few loose ends and kick the card to send the packet * * Returns 0 on success, 1 on failure */ static int ray_tx_send(struct ray_softc *sc, size_t ccs, int pktlen, u_int8_t *dst) { RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_TX, ""); RAY_MAP_CM(sc); if (!RAY_ECF_READY(sc)) { /* * XXX If this can really happen perhaps we need to save * XXX the chain and use it later. */ RAY_RECERR(sc, "ECF busy, dropping packet"); RAY_CCS_FREE(sc, ccs); return (1); } SRAM_WRITE_FIELD_2(sc, ccs, ray_cmd_tx, c_len, pktlen); SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd_tx, c_antenna, ray_tx_best_antenna(sc, dst)); SRAM_WRITE_1(sc, RAY_SCB_CCSI, RAY_CCS_INDEX(ccs)); RAY_ECF_START_CMD(sc); return (0); } /* * Determine best antenna to use from rx level and antenna cache */ static u_int8_t ray_tx_best_antenna(struct ray_softc *sc, u_int8_t *dst) { struct ray_siglev *sl; int i; u_int8_t antenna; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_TX, ""); if (sc->sc_version == RAY_ECFS_BUILD_4) return (0); /* try to find host */ for (i = 0; i < RAY_NSIGLEVRECS; i++) { sl = &sc->sc_siglevs[i]; if (bcmp(sl->rsl_host, dst, ETHER_ADDR_LEN) == 0) goto found; } /* not found, return default setting */ return (0); found: /* This is a simple thresholding scheme that takes the mean * of the best antenna history. This is okay but as it is a * filter, it adds a bit of lag in situations where the * best antenna swaps from one side to the other slowly. Don't know * how likely this is given the horrible fading though. */ antenna = 0; for (i = 0; i < RAY_NANTENNA; i++) { antenna += sl->rsl_antennas[i]; } return (antenna > (RAY_NANTENNA >> 1)); } /* * Transmit now complete so clear ccs and network flags. */ static void ray_tx_done(struct ray_softc *sc, size_t ccs) { struct ifnet *ifp = &sc->arpcom.ac_if; char *ss[] = RAY_CCS_STATUS_STRINGS; u_int8_t status; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_TX, ""); RAY_MAP_CM(sc); status = SRAM_READ_FIELD_1(sc, ccs, ray_cmd, c_status); if (status != RAY_CCS_STATUS_COMPLETE) { RAY_RECERR(sc, "tx completed but status is %s", ss[status]); ifp->if_oerrors++; } RAY_CCS_FREE(sc, ccs); ifp->if_timer = 0; if (ifp->if_flags & IFF_OACTIVE) ifp->if_flags &= ~IFF_OACTIVE; } /* * Receiver packet handling */ /* * Receive a packet from the card */ static void ray_rx(struct ray_softc *sc, size_t rcs) { struct ieee80211_frame *header; struct ifnet *ifp = &sc->arpcom.ac_if; struct mbuf *m0; size_t pktlen, fraglen, readlen, tmplen; size_t bufp, ebufp; u_int8_t siglev, antenna; u_int first, ni, i; u_int8_t *mp; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); RAY_DPRINTF(sc, RAY_DBG_CCS, "using rcs 0x%x", rcs); m0 = NULL; readlen = 0; /* * Get first part of packet and the length. Do some sanity checks * and get a mbuf. */ first = RAY_CCS_INDEX(rcs); pktlen = SRAM_READ_FIELD_2(sc, rcs, ray_cmd_rx, c_pktlen); siglev = SRAM_READ_FIELD_1(sc, rcs, ray_cmd_rx, c_siglev); antenna = SRAM_READ_FIELD_1(sc, rcs, ray_cmd_rx, c_antenna); if ((pktlen > MCLBYTES) || (pktlen < sizeof(struct ieee80211_frame))) { RAY_RECERR(sc, "packet too big or too small"); ifp->if_ierrors++; goto skip_read; } MGETHDR(m0, M_DONTWAIT, MT_DATA); if (m0 == NULL) { RAY_RECERR(sc, "MGETHDR failed"); ifp->if_ierrors++; goto skip_read; } if (pktlen > MHLEN) { MCLGET(m0, M_DONTWAIT); if (!(m0->m_flags & M_EXT)) { RAY_RECERR(sc, "MCLGET failed"); ifp->if_ierrors++; m_freem(m0); m0 = NULL; goto skip_read; } } m0->m_pkthdr.rcvif = ifp; m0->m_pkthdr.len = pktlen; m0->m_len = pktlen; mp = mtod(m0, u_int8_t *); /* * Walk the fragment chain to build the complete packet. * * The use of two index variables removes a race with the * hardware. If one index were used the clearing of the CCS would * happen before reading the next pointer and the hardware can get in. * Not my idea but verbatim from the NetBSD driver. */ i = ni = first; while ((i = ni) && (i != RAY_CCS_LINK_NULL)) { rcs = RAY_CCS_ADDRESS(i); ni = SRAM_READ_FIELD_1(sc, rcs, ray_cmd_rx, c_nextfrag); bufp = SRAM_READ_FIELD_2(sc, rcs, ray_cmd_rx, c_bufp); fraglen = SRAM_READ_FIELD_2(sc, rcs, ray_cmd_rx, c_len); RAY_DPRINTF(sc, RAY_DBG_RX, "frag index %d len %d bufp 0x%x ni %d", i, fraglen, (int)bufp, ni); if (fraglen + readlen > pktlen) { RAY_RECERR(sc, "bad length current 0x%x pktlen 0x%x", fraglen + readlen, pktlen); ifp->if_ierrors++; m_freem(m0); m0 = NULL; goto skip_read; } if ((i < RAY_RCS_FIRST) || (i > RAY_RCS_LAST)) { RAY_RECERR(sc, "bad rcs index 0x%x", i); ifp->if_ierrors++; m_freem(m0); m0 = NULL; goto skip_read; } ebufp = bufp + fraglen; if (ebufp <= RAY_RX_END) SRAM_READ_REGION(sc, bufp, mp, fraglen); else { SRAM_READ_REGION(sc, bufp, mp, (tmplen = RAY_RX_END - bufp)); SRAM_READ_REGION(sc, RAY_RX_BASE, mp + tmplen, ebufp - RAY_RX_END); } mp += fraglen; readlen += fraglen; } skip_read: /* * Walk the chain again to free the rcss. */ i = ni = first; while ((i = ni) && (i != RAY_CCS_LINK_NULL)) { rcs = RAY_CCS_ADDRESS(i); ni = SRAM_READ_FIELD_1(sc, rcs, ray_cmd_rx, c_nextfrag); RAY_CCS_FREE(sc, rcs); } if (m0 == NULL) return; /* * Check the 802.11 packet type and hand off to * appropriate functions. */ header = mtod(m0, struct ieee80211_frame *); if ((header->i_fc[0] & IEEE80211_FC0_VERSION_MASK) != IEEE80211_FC0_VERSION_0) { RAY_RECERR(sc, "header not version 0 fc0 0x%x", header->i_fc[0]); ifp->if_ierrors++; m_freem(m0); return; } switch (header->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { case IEEE80211_FC0_TYPE_DATA: ray_rx_data(sc, m0, siglev, antenna); break; case IEEE80211_FC0_TYPE_MGT: ray_rx_mgt(sc, m0); break; case IEEE80211_FC0_TYPE_CTL: ray_rx_ctl(sc, m0); break; default: RAY_RECERR(sc, "unknown packet fc0 0x%x", header->i_fc[0]); ifp->if_ierrors++; m_freem(m0); } } /* * Deal with DATA packet types */ static void ray_rx_data(struct ray_softc *sc, struct mbuf *m0, u_int8_t siglev, u_int8_t antenna) { struct ifnet *ifp = &sc->arpcom.ac_if; struct ieee80211_frame *header = mtod(m0, struct ieee80211_frame *); struct ether_header *eh; u_int8_t *sa, *da, *ra, *ta; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_MGT, ""); /* * Check the the data packet subtype, some packets have * nothing in them so we will drop them here. */ switch (header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) { case IEEE80211_FC0_SUBTYPE_DATA: case IEEE80211_FC0_SUBTYPE_CF_ACK: case IEEE80211_FC0_SUBTYPE_CF_POLL: case IEEE80211_FC0_SUBTYPE_CF_ACPL: RAY_DPRINTF(sc, RAY_DBG_RX, "DATA packet"); break; case IEEE80211_FC0_SUBTYPE_NODATA: case IEEE80211_FC0_SUBTYPE_CFACK: case IEEE80211_FC0_SUBTYPE_CFPOLL: case IEEE80211_FC0_SUBTYPE_CF_ACK_CF_ACK: RAY_DPRINTF(sc, RAY_DBG_RX, "NULL packet"); m_freem(m0); return; break; default: RAY_RECERR(sc, "reserved DATA packet subtype 0x%x", header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK); ifp->if_ierrors++; m_freem(m0); return; } /* * Obtain the .11 addresses. Packets may come via APs so the * MAC addresses of the source/destination may be different * from the node that actually sent us the packet. * * XXX At present this information is unused, although it is * XXX available for translation routines to use. */ switch (header->i_fc[1] & IEEE80211_FC1_DIR_MASK) { case IEEE80211_FC1_DIR_NODS: da = header->i_addr1; sa = header->i_addr2; ra = ta = NULL; RAY_DPRINTF(sc, RAY_DBG_RX, "from %6D to %6D", sa, ":", da, ":"); break; case IEEE80211_FC1_DIR_FROMDS: da = header->i_addr1; ta = header->i_addr2; sa = header->i_addr3; ra = NULL; RAY_DPRINTF(sc, RAY_DBG_RX, "ap %6D from %6D to %6D", ta, ":", sa, ":", da, ":"); break; case IEEE80211_FC1_DIR_TODS: ra = header->i_addr1; sa = header->i_addr2; da = header->i_addr3; ta = NULL; RAY_DPRINTF(sc, RAY_DBG_RX, "from %6D to %6D ap %6D", sa, ":", da, ":", ra, ":"); break; case IEEE80211_FC1_DIR_DSTODS: ra = header->i_addr1; ta = header->i_addr2; da = header->i_addr3; sa = (u_int8_t *)header+1; RAY_DPRINTF(sc, RAY_DBG_RX, "from %6D to %6D ap %6D to %6D", sa, ":", da, ":", ta, ":", ra, ":"); break; } /* * Translation - capability as described earlier * * Each case must remove the 802.11 header and leave an 802.3 * header in the mbuf copy addresses as needed. */ RAY_MBUF_DUMP(sc, RAY_DBG_RX, m0, "DATA packet before framing"); switch (sc->framing) { case SC_FRAMING_WEBGEAR: /* Nice and easy - just trim the 802.11 header */ m_adj(m0, sizeof(struct ieee80211_frame)); break; default: RAY_RECERR(sc, "unknown framing type %d", sc->framing); ifp->if_ierrors++; m_freem(m0); return; } /* * Finally, do a bit of house keeping before sending the packet * up the stack. */ ifp->if_ipackets++; ray_rx_update_cache(sc, header->i_addr2, siglev, antenna); eh = mtod(m0, struct ether_header *); m_adj(m0, sizeof(struct ether_header)); ether_input(ifp, eh, m0); } /* * Deal with MGT packet types */ static void ray_rx_mgt(struct ray_softc *sc, struct mbuf *m0) { struct ifnet *ifp = &sc->arpcom.ac_if; struct ieee80211_frame *header = mtod(m0, struct ieee80211_frame *); RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_MGT, ""); if ((header->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_NODS) { RAY_RECERR(sc, "MGT TODS/FROMDS wrong fc1 0x%x", header->i_fc[1] & IEEE80211_FC1_DIR_MASK); ifp->if_ierrors++; m_freem(m0); return; } /* * Check the the mgt packet subtype, some packets should be * dropped depending on the mode the station is in. See pg * 52(60) of docs * * P - proccess, J - Junk, E - ECF deals with, I - Illegal * ECF Proccesses * AHDOC procces or junk * INFRA STA process or junk * INFRA AP process or jumk * * +PPP IEEE80211_FC0_SUBTYPE_BEACON * +EEE IEEE80211_FC0_SUBTYPE_PROBE_REQ * +EEE IEEE80211_FC0_SUBTYPE_PROBE_RESP * PPP IEEE80211_FC0_SUBTYPE_AUTH * PPP IEEE80211_FC0_SUBTYPE_DEAUTH * JJP IEEE80211_FC0_SUBTYPE_ASSOC_REQ * JPJ IEEE80211_FC0_SUBTYPE_ASSOC_RESP * JPP IEEE80211_FC0_SUBTYPE_DISASSOC * JJP IEEE80211_FC0_SUBTYPE_REASSOC_REQ * JPJ IEEE80211_FC0_SUBTYPE_REASSOC_RESP * +EEE IEEE80211_FC0_SUBTYPE_ATIM */ RAY_MBUF_DUMP(sc, RAY_DBG_RX, m0, "MGT packet"); switch (header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) { case IEEE80211_FC0_SUBTYPE_BEACON: RAY_DPRINTF(sc, RAY_DBG_MGT, "BEACON MGT packet"); /* XXX furtle anything interesting out */ /* XXX Note that there are rules governing what beacons to read, see 8802 S7.2.3, S11.1.2.3 */ break; case IEEE80211_FC0_SUBTYPE_AUTH: RAY_DPRINTF(sc, RAY_DBG_MGT, "AUTH MGT packet"); ray_rx_mgt_auth(sc, m0); break; case IEEE80211_FC0_SUBTYPE_DEAUTH: RAY_DPRINTF(sc, RAY_DBG_MGT, "DEAUTH MGT packet"); /* XXX ray_rx_mgt_deauth(sc, m0); */ break; case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: case IEEE80211_FC0_SUBTYPE_REASSOC_REQ: RAY_DPRINTF(sc, RAY_DBG_MGT, "(RE)ASSOC_REQ MGT packet"); if ((sc->sc_d.np_net_type == RAY_MIB_NET_TYPE_INFRA) && (sc->sc_c.np_ap_status == RAY_MIB_AP_STATUS_AP)) RAY_PANIC(sc, "can't be an AP yet"); /* XXX_ACTING_AP */ break; case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: case IEEE80211_FC0_SUBTYPE_REASSOC_RESP: RAY_DPRINTF(sc, RAY_DBG_MGT, "(RE)ASSOC_RESP MGT packet"); if ((sc->sc_d.np_net_type == RAY_MIB_NET_TYPE_INFRA) && (sc->sc_c.np_ap_status == RAY_MIB_AP_STATUS_TERMINAL)) RAY_PANIC(sc, "can't be in INFRA yet"); /* XXX_INFRA */ break; case IEEE80211_FC0_SUBTYPE_DISASSOC: RAY_DPRINTF(sc, RAY_DBG_MGT, "DISASSOC MGT packet"); if (sc->sc_d.np_net_type == RAY_MIB_NET_TYPE_INFRA) RAY_PANIC(sc, "can't be in INFRA yet"); /* XXX_INFRA */ break; case IEEE80211_FC0_SUBTYPE_PROBE_REQ: case IEEE80211_FC0_SUBTYPE_PROBE_RESP: case IEEE80211_FC0_SUBTYPE_ATIM: RAY_RECERR(sc, "unexpected MGT packet subtype 0x%0x", header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK); ifp->if_ierrors++; break; default: RAY_RECERR(sc, "reserved MGT packet subtype 0x%x", header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK); ifp->if_ierrors++; } m_freem(m0); } /* * Deal with AUTH management packet types */ static void ray_rx_mgt_auth(struct ray_softc *sc, struct mbuf *m0) { struct ieee80211_frame *header = mtod(m0, struct ieee80211_frame *); ieee80211_mgt_auth_t auth = (u_int8_t *)(header+1); size_t ccs, bufp; int pktlen; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_MGT, ""); RAY_MAP_CM(sc); switch (IEEE80211_AUTH_ALGORITHM(auth)) { case IEEE80211_AUTH_ALG_OPEN: RAY_RECERR(sc, "open system authentication request"); if (IEEE80211_AUTH_TRANSACTION(auth) == 1) { /* XXX see sys/dev/awi/awk.c:awi_{recv|send}_auth */ /* * Send authentication response if possible. If * we are out of CCSs we don't to anything, the * other end will try again. */ if (ray_ccs_tx(sc, &ccs, &bufp)) { return; } RAY_DPRINTF(sc, RAY_DBG_MGT, "bufp %x", bufp); bufp = ray_tx_wrhdr(sc, bufp, IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_AUTH, IEEE80211_FC1_DIR_NODS, header->i_addr2, header->i_addr1, header->i_addr3); for (pktlen = 0; pktlen < 6; pktlen++) SRAM_WRITE_1(sc, bufp+pktlen, 0); pktlen += sizeof(struct ieee80211_frame); SRAM_WRITE_1(sc, bufp+2, 2); RAY_DPRINTF(sc, RAY_DBG_MGT, "dump start %x", bufp-pktlen+6); RAY_DHEX8(sc, RAY_DBG_MGT, bufp-pktlen+6, pktlen, "AUTH MGT response to Open System request"); (void)ray_tx_send(sc, ccs, pktlen, header->i_addr2); } else if (IEEE80211_AUTH_TRANSACTION(auth) == 2) { /* * XXX probably need a lot more than this * XXX like initiating an auth sequence */ if (IEEE80211_AUTH_STATUS(auth) != IEEE80211_STATUS_SUCCESS) RAY_RECERR(sc, "authentication failed with status %d", IEEE80211_AUTH_STATUS(auth)); } break; case IEEE80211_AUTH_ALG_SHARED: RAY_RECERR(sc, "shared key authentication request"); break; default: RAY_RECERR(sc, "reserved authentication subtype 0x%04hx", IEEE80211_AUTH_ALGORITHM(auth)); break; } } /* * Deal with CTL packet types */ static void ray_rx_ctl(struct ray_softc *sc, struct mbuf *m0) { struct ifnet *ifp = &sc->arpcom.ac_if; struct ieee80211_frame *header = mtod(m0, struct ieee80211_frame *); RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_CTL, ""); if ((header->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_NODS) { RAY_RECERR(sc, "CTL TODS/FROMDS wrong fc1 0x%x", header->i_fc[1] & IEEE80211_FC1_DIR_MASK); ifp->if_ierrors++; m_freem(m0); return; } /* * Check the the ctl packet subtype, some packets should be * dropped depending on the mode the station is in. The ECF * should deal with everything but the power save poll to an * AP. See pg 52(60) of docs. */ RAY_MBUF_DUMP(sc, RAY_DBG_CTL, m0, "CTL packet"); switch (header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) { case IEEE80211_FC0_SUBTYPE_PS_POLL: RAY_DPRINTF(sc, RAY_DBG_CTL, "PS_POLL CTL packet"); if ((sc->sc_d.np_net_type == RAY_MIB_NET_TYPE_INFRA) && (sc->sc_c.np_ap_status == RAY_MIB_AP_STATUS_AP)) RAY_PANIC(sc, "can't be an AP yet"); /* XXX_ACTING_AP */ break; case IEEE80211_FC0_SUBTYPE_RTS: case IEEE80211_FC0_SUBTYPE_CTS: case IEEE80211_FC0_SUBTYPE_ACK: case IEEE80211_FC0_SUBTYPE_CF_END: case IEEE80211_FC0_SUBTYPE_CF_END_ACK: RAY_RECERR(sc, "unexpected CTL packet subtype 0x%0x", header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK); ifp->if_ierrors++; break; default: RAY_RECERR(sc, "reserved CTL packet subtype 0x%x", header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK); ifp->if_ierrors++; } m_freem(m0); } /* * Update rx level and antenna cache */ static void ray_rx_update_cache(struct ray_softc *sc, u_int8_t *src, u_int8_t siglev, u_int8_t antenna) { struct timeval mint; struct ray_siglev *sl; int i, mini; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); /* Try to find host */ for (i = 0; i < RAY_NSIGLEVRECS; i++) { sl = &sc->sc_siglevs[i]; if (bcmp(sl->rsl_host, src, ETHER_ADDR_LEN) == 0) goto found; } /* Not found, find oldest slot */ mini = 0; mint.tv_sec = LONG_MAX; mint.tv_usec = 0; for (i = 0; i < RAY_NSIGLEVRECS; i++) { sl = &sc->sc_siglevs[i]; if (timevalcmp(&sl->rsl_time, &mint, <)) { mini = i; mint = sl->rsl_time; } } sl = &sc->sc_siglevs[mini]; bzero(sl->rsl_siglevs, RAY_NSIGLEV); bzero(sl->rsl_antennas, RAY_NANTENNA); bcopy(src, sl->rsl_host, ETHER_ADDR_LEN); found: microtime(&sl->rsl_time); bcopy(sl->rsl_siglevs, &sl->rsl_siglevs[1], RAY_NSIGLEV-1); sl->rsl_siglevs[0] = siglev; if (sc->sc_version != RAY_ECFS_BUILD_4) { bcopy(sl->rsl_antennas, &sl->rsl_antennas[1], RAY_NANTENNA-1); sl->rsl_antennas[0] = antenna; } } /* * Interrupt handling */ /* * Process an interrupt */ static void ray_intr(void *xsc) { struct ray_softc *sc = (struct ray_softc *)xsc; struct ifnet *ifp = &sc->arpcom.ac_if; size_t ccs; u_int8_t cmd; int ccsi; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); if ((sc == NULL) || (sc->gone)) return; /* * Check that the interrupt was for us, if so get the rcs/ccs * and vector on the command contained within it. */ if (RAY_HCS_INTR(sc)) { ccsi = SRAM_READ_1(sc, RAY_SCB_RCSI); ccs = RAY_CCS_ADDRESS(ccsi); cmd = SRAM_READ_FIELD_1(sc, ccs, ray_cmd, c_cmd); if (ccsi <= RAY_CCS_LAST) ray_intr_ccs(sc, cmd, ccs); else if (ccsi <= RAY_RCS_LAST) ray_intr_rcs(sc, cmd, ccs); else RAY_RECERR(sc, "bad ccs index 0x%x", ccsi); RAY_HCS_CLEAR_INTR(sc); RAY_DPRINTF(sc, RAY_DBG_RX, "interrupt was handled"); } /* Send any packets lying around and update error counters */ if (!(ifp->if_flags & IFF_OACTIVE) && (ifp->if_snd.ifq_head != NULL)) ray_tx(ifp); if ((++sc->sc_checkcounters % 32) == 0) ray_intr_updt_errcntrs(sc); } /* * Read the error counters. */ static void ray_intr_updt_errcntrs(struct ray_softc *sc) { size_t csc; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); /* * The card implements the following protocol to keep the * values from being changed while read: It checks the `own' * bit and if zero writes the current internal counter value, * it then sets the `own' bit to 1. If the `own' bit was 1 it * incremenets its internal counter. The user thus reads the * counter if the `own' bit is one and then sets the own bit * to 0. */ csc = RAY_STATUS_BASE; if (SRAM_READ_FIELD_1(sc, csc, ray_csc, csc_mrxo_own)) { sc->sc_rxoverflow += SRAM_READ_FIELD_2(sc, csc, ray_csc, csc_mrx_overflow); SRAM_WRITE_FIELD_1(sc, csc, ray_csc, csc_mrxo_own, 0); } if (SRAM_READ_FIELD_1(sc, csc, ray_csc, csc_mrxc_own)) { sc->sc_rxcksum += SRAM_READ_FIELD_2(sc, csc, ray_csc, csc_mrx_overflow); SRAM_WRITE_FIELD_1(sc, csc, ray_csc, csc_mrxc_own, 0); } if (SRAM_READ_FIELD_1(sc, csc, ray_csc, csc_rxhc_own)) { sc->sc_rxhcksum += SRAM_READ_FIELD_2(sc, csc, ray_csc, csc_rx_hcksum); SRAM_WRITE_FIELD_1(sc, csc, ray_csc, csc_rxhc_own, 0); } sc->sc_rxnoise = SRAM_READ_FIELD_1(sc, csc, ray_csc, csc_rx_noise); } /* * Process CCS command completion */ static void ray_intr_ccs(struct ray_softc *sc, u_int8_t cmd, size_t ccs) { RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); switch (cmd) { case RAY_CMD_DOWNLOAD_PARAMS: RAY_DPRINTF(sc, RAY_DBG_COM, "START_PARAMS"); ray_init_download_done(sc, ccs); break; case RAY_CMD_UPDATE_PARAMS: RAY_DPRINTF(sc, RAY_DBG_COM, "UPDATE_PARAMS"); ray_upparams_done(sc, ccs); break; case RAY_CMD_REPORT_PARAMS: RAY_DPRINTF(sc, RAY_DBG_COM, "REPORT_PARAMS"); ray_repparams_done(sc, ccs); break; case RAY_CMD_UPDATE_MCAST: RAY_DPRINTF(sc, RAY_DBG_COM, "UPDATE_MCAST"); ray_mcast_done(sc, ccs); break; case RAY_CMD_START_NET: case RAY_CMD_JOIN_NET: RAY_DPRINTF(sc, RAY_DBG_COM, "START|JOIN_NET"); ray_init_sj_done(sc, ccs); break; case RAY_CMD_TX_REQ: RAY_DPRINTF(sc, RAY_DBG_COM, "TX_REQ"); ray_tx_done(sc, ccs); break; case RAY_CMD_START_ASSOC: RAY_DPRINTF(sc, RAY_DBG_COM, "START_ASSOC"); ray_init_assoc_done(sc, ccs); break; case RAY_CMD_UPDATE_APM: RAY_RECERR(sc, "unexpected UPDATE_APM"); break; case RAY_CMD_TEST_MEM: RAY_RECERR(sc, "unexpected TEST_MEM"); break; case RAY_CMD_SHUTDOWN: RAY_RECERR(sc, "unexpected SHUTDOWN"); break; case RAY_CMD_DUMP_MEM: RAY_RECERR(sc, "unexpected DUMP_MEM"); break; case RAY_CMD_START_TIMER: RAY_RECERR(sc, "unexpected START_TIMER"); break; default: RAY_RECERR(sc, "unknown command 0x%x", cmd); break; } } /* * Process ECF command request */ static void ray_intr_rcs(struct ray_softc *sc, u_int8_t cmd, size_t rcs) { RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); switch (cmd) { case RAY_ECMD_RX_DONE: RAY_DPRINTF(sc, RAY_DBG_RX, "RX_DONE"); ray_rx(sc, rcs); break; case RAY_ECMD_REJOIN_DONE: RAY_DPRINTF(sc, RAY_DBG_RX, "REJOIN_DONE"); sc->sc_havenet = 1; /* XXX Should not be here but in function */ break; case RAY_ECMD_ROAM_START: RAY_DPRINTF(sc, RAY_DBG_RX, "ROAM_START"); sc->sc_havenet = 0; /* XXX Should not be here but in function */ break; case RAY_ECMD_JAPAN_CALL_SIGNAL: RAY_RECERR(sc, "unexpected JAPAN_CALL_SIGNAL"); break; default: RAY_RECERR(sc, "unknown command 0x%x", cmd); break; } RAY_CCS_FREE(sc, rcs); } /* * User land entry to multicast list changes */ static int ray_mcast_user(struct ray_softc *sc) { struct ray_comq_entry *com[2]; int error, ncom; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); /* * Do all checking in the runq to preserve ordering. * * We run promisc to pick up changes to the ALL_MULTI * interface flag. */ ncom = 0; com[ncom++] = RAY_COM_MALLOC(ray_mcast, 0); com[ncom++] = RAY_COM_MALLOC(ray_promisc, 0); RAY_COM_RUNQ(sc, com, ncom, "raymcast", error); /* XXX no real error processing from anything yet! */ RAY_COM_FREE(com, ncom); return (error); } /* * Runq entry to setting the multicast filter list * * MUST always be followed by a call to ray_promisc to pick up changes * to promisc flag */ static void ray_mcast(struct ray_softc *sc, struct ray_comq_entry *com) { struct ifnet *ifp = &sc->arpcom.ac_if; struct ifmultiaddr *ifma; size_t bufp; int count; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); /* * If card is not running we don't need to update this. */ if (!(ifp->if_flags & IFF_RUNNING)) { RAY_DPRINTF(sc, RAY_DBG_IOCTL, "not running"); ray_com_runq_done(sc); return; } /* * The multicast list is only 16 items long so use promiscuous * mode and don't bother updating the multicast list. */ - for (ifma = ifp->if_multiaddrs.lh_first, count = 0; ifma != NULL; - ifma = ifma->ifma_link.le_next, count++) + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) if (count == 0) { ray_com_runq_done(sc); return; } else if (count > 16) { ifp->if_flags |= IFF_ALLMULTI; ray_com_runq_done(sc); return; } else if (ifp->if_flags & IFF_ALLMULTI) ifp->if_flags &= ~IFF_ALLMULTI; /* * Kick the card */ ray_ccs_fill(sc, com->c_ccs, RAY_CMD_UPDATE_MCAST); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_update_mcast, c_nmcast, count); bufp = RAY_HOST_TO_ECF_BASE; - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { SRAM_WRITE_REGION( sc, bufp, LLADDR((struct sockaddr_dl *)ifma->ifma_addr), ETHER_ADDR_LEN ); bufp += ETHER_ADDR_LEN; } ray_com_ecf(sc, com); } /* * Complete the multicast filter list update */ static void ray_mcast_done(struct ray_softc *sc, size_t ccs) { RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_COM_CHECK(sc, ccs); ray_com_ecf_done(sc); } /* * Runq entry to set/reset promiscuous mode */ static void ray_promisc(struct ray_softc *sc, struct ray_comq_entry *com) { struct ifnet *ifp = &sc->arpcom.ac_if; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); /* * If card not running or we already have the right flags * we don't need to update this */ sc->sc_d.np_promisc = !!(ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)); if (!(ifp->if_flags & IFF_RUNNING) || (sc->sc_c.np_promisc == sc->sc_d.np_promisc)) { ray_com_runq_done(sc); return; } /* * Kick the card */ ray_ccs_fill(sc, com->c_ccs, RAY_CMD_UPDATE_PARAMS); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_update, c_paramid, RAY_MIB_PROMISC); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_update, c_nparam, 1); SRAM_WRITE_1(sc, RAY_HOST_TO_ECF_BASE, sc->sc_d.np_promisc); ray_com_ecf(sc, com); } /* * User land entry to parameter reporting * * As we by pass the runq to report current parameters this function * only provides a snap shot of the driver's state. */ static int ray_repparams_user(struct ray_softc *sc, struct ray_param_req *pr) { struct ray_comq_entry *com[1]; int error, ncom; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); /* * Test for illegal values or immediate responses */ if (pr->r_paramid > RAY_MIB_MAX) return (EINVAL); if ((sc->sc_version == RAY_ECFS_BUILD_4) && !(mib_info[pr->r_paramid][0] & RAY_V4)) return (EINVAL); if ((sc->sc_version == RAY_ECFS_BUILD_5) && !(mib_info[pr->r_paramid][0] & RAY_V5)) return (EINVAL); if (pr->r_paramid > RAY_MIB_LASTUSER) { switch (pr->r_paramid) { case RAY_MIB_VERSION: if (sc->sc_version == RAY_ECFS_BUILD_4) *pr->r_data = RAY_V4; else *pr->r_data = RAY_V5; break; case RAY_MIB_CUR_BSSID: bcopy(sc->sc_c.np_bss_id, pr->r_data, ETHER_ADDR_LEN); break; case RAY_MIB_CUR_INITED: *pr->r_data = sc->sc_c.np_inited; break; case RAY_MIB_CUR_DEF_TXRATE: *pr->r_data = sc->sc_c.np_def_txrate; break; case RAY_MIB_CUR_ENCRYPT: *pr->r_data = sc->sc_c.np_encrypt; break; case RAY_MIB_CUR_NET_TYPE: *pr->r_data = sc->sc_c.np_net_type; break; case RAY_MIB_CUR_SSID: bcopy(sc->sc_c.np_ssid, pr->r_data, IEEE80211_NWID_LEN); break; case RAY_MIB_CUR_PRIV_START: *pr->r_data = sc->sc_c.np_priv_start; break; case RAY_MIB_CUR_PRIV_JOIN: *pr->r_data = sc->sc_c.np_priv_join; break; case RAY_MIB_DES_BSSID: bcopy(sc->sc_d.np_bss_id, pr->r_data, ETHER_ADDR_LEN); break; case RAY_MIB_DES_INITED: *pr->r_data = sc->sc_d.np_inited; break; case RAY_MIB_DES_DEF_TXRATE: *pr->r_data = sc->sc_d.np_def_txrate; break; case RAY_MIB_DES_ENCRYPT: *pr->r_data = sc->sc_d.np_encrypt; break; case RAY_MIB_DES_NET_TYPE: *pr->r_data = sc->sc_d.np_net_type; break; case RAY_MIB_DES_SSID: bcopy(sc->sc_d.np_ssid, pr->r_data, IEEE80211_NWID_LEN); break; case RAY_MIB_DES_PRIV_START: *pr->r_data = sc->sc_d.np_priv_start; break; case RAY_MIB_DES_PRIV_JOIN: *pr->r_data = sc->sc_d.np_priv_join; break; case RAY_MIB_CUR_AP_STATUS: *pr->r_data = sc->sc_c.np_ap_status; break; case RAY_MIB_CUR_PROMISC: *pr->r_data = sc->sc_c.np_promisc; break; case RAY_MIB_DES_AP_STATUS: *pr->r_data = sc->sc_d.np_ap_status; break; case RAY_MIB_DES_PROMISC: *pr->r_data = sc->sc_d.np_promisc; break; default: return (EINVAL); break; } pr->r_failcause = 0; if (sc->sc_version == RAY_ECFS_BUILD_4) pr->r_len = mib_info[pr->r_paramid][RAY_MIB_INFO_SIZ4]; else if (sc->sc_version == RAY_ECFS_BUILD_5) pr->r_len = mib_info[pr->r_paramid][RAY_MIB_INFO_SIZ5]; return (0); } pr->r_failcause = 0; ncom = 0; com[ncom++] = RAY_COM_MALLOC(ray_repparams, RAY_COM_FWOK); com[ncom-1]->c_pr = pr; RAY_COM_RUNQ(sc, com, ncom, "rayrparm", error); /* XXX no real error processing from anything yet! */ if (!com[0]->c_retval && pr->r_failcause) error = EINVAL; RAY_COM_FREE(com, ncom); return (error); } /* * Runq entry to read the required parameter * * The card and driver are happy for parameters to be read * whenever the card is plugged in */ static void ray_repparams(struct ray_softc *sc, struct ray_comq_entry *com) { RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); /* * Kick the card */ ray_ccs_fill(sc, com->c_ccs, RAY_CMD_REPORT_PARAMS); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_report, c_paramid, com->c_pr->r_paramid); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_report, c_nparam, 1); ray_com_ecf(sc, com); } /* * Complete the parameter reporting */ static void ray_repparams_done(struct ray_softc *sc, size_t ccs) { struct ray_comq_entry *com; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); RAY_COM_CHECK(sc, ccs); com = TAILQ_FIRST(&sc->sc_comq); com->c_pr->r_failcause = SRAM_READ_FIELD_1(sc, ccs, ray_cmd_report, c_failcause); com->c_pr->r_len = SRAM_READ_FIELD_1(sc, ccs, ray_cmd_report, c_len); SRAM_READ_REGION(sc, RAY_ECF_TO_HOST_BASE, com->c_pr->r_data, com->c_pr->r_len); ray_com_ecf_done(sc); } /* * User land entry (and exit) to the error counters */ static int ray_repstats_user(struct ray_softc *sc, struct ray_stats_req *sr) { RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); sr->rxoverflow = sc->sc_rxoverflow; sr->rxcksum = sc->sc_rxcksum; sr->rxhcksum = sc->sc_rxhcksum; sr->rxnoise = sc->sc_rxnoise; return (0); } /* * User land entry to parameter update changes * * As a parameter change can cause the network parameters to be * invalid we have to re-start/join. */ static int ray_upparams_user(struct ray_softc *sc, struct ray_param_req *pr) { struct ray_comq_entry *com[3]; int error, ncom, todo; #define RAY_UPP_SJ 0x1 #define RAY_UPP_PARAMS 0x2 RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); /* * Check that the parameter is available based on firmware version */ pr->r_failcause = 0; if (pr->r_paramid > RAY_MIB_LASTUSER) return (EINVAL); if ((sc->sc_version == RAY_ECFS_BUILD_4) && !(mib_info[pr->r_paramid][0] & RAY_V4)) return (EINVAL); if ((sc->sc_version == RAY_ECFS_BUILD_5) && !(mib_info[pr->r_paramid][0] & RAY_V5)) return (EINVAL); /* * Handle certain parameters specially */ todo = 0; switch (pr->r_paramid) { case RAY_MIB_NET_TYPE: /* Updated via START_NET JOIN_NET */ sc->sc_d.np_net_type = *pr->r_data; todo |= RAY_UPP_SJ; break; case RAY_MIB_SSID: /* Updated via START_NET JOIN_NET */ bcopy(pr->r_data, sc->sc_d.np_ssid, IEEE80211_NWID_LEN); todo |= RAY_UPP_SJ; break; case RAY_MIB_PRIVACY_MUST_START:/* Updated via START_NET */ if (sc->sc_c.np_net_type != RAY_MIB_NET_TYPE_ADHOC) return (EINVAL); sc->sc_d.np_priv_start = *pr->r_data; todo |= RAY_UPP_SJ; break; case RAY_MIB_PRIVACY_CAN_JOIN: /* Updated via START_NET JOIN_NET */ sc->sc_d.np_priv_join = *pr->r_data; todo |= RAY_UPP_SJ; break; case RAY_MIB_BASIC_RATE_SET: sc->sc_d.np_def_txrate = *pr->r_data; todo |= RAY_UPP_PARAMS; break; case RAY_MIB_AP_STATUS: /* Unsupported */ case RAY_MIB_MAC_ADDR: /* XXX Need interface up but could be done */ case RAY_MIB_PROMISC: /* BPF */ return (EINVAL); break; default: todo |= RAY_UPP_PARAMS; todo |= RAY_UPP_SJ; break; } /* * Generate the runq entries as needed */ ncom = 0; if (todo & RAY_UPP_PARAMS) { com[ncom++] = RAY_COM_MALLOC(ray_upparams, 0); com[ncom-1]->c_pr = pr; } if (todo & RAY_UPP_SJ) { com[ncom++] = RAY_COM_MALLOC(ray_init_sj, 0); com[ncom++] = RAY_COM_MALLOC(ray_init_assoc, 0); } RAY_COM_RUNQ(sc, com, ncom, "rayuparam", error); /* XXX no real error processing from anything yet! */ if (!com[0]->c_retval && pr->r_failcause) error = EINVAL; RAY_COM_FREE(com, ncom); return (error); } /* * Runq entry to update a parameter * * The card and driver are happy for parameters to be updated * whenever the card is plugged in * * XXX the above is a little bit of a lie until _download is sorted out and we * XXX keep local copies of things */ static void ray_upparams(struct ray_softc *sc, struct ray_comq_entry *com) { RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); ray_ccs_fill(sc, com->c_ccs, RAY_CMD_UPDATE_PARAMS); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_update, c_paramid, com->c_pr->r_paramid); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_update, c_nparam, 1); SRAM_WRITE_REGION(sc, RAY_HOST_TO_ECF_BASE, com->c_pr->r_data, com->c_pr->r_len); ray_com_ecf(sc, com); } /* * Complete the parameter update, note that promisc finishes up here too */ static void ray_upparams_done(struct ray_softc *sc, size_t ccs) { struct ray_comq_entry *com; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); RAY_COM_CHECK(sc, ccs); com = TAILQ_FIRST(&sc->sc_comq); switch (SRAM_READ_FIELD_1(sc, ccs, ray_cmd_update, c_paramid)) { case RAY_MIB_PROMISC: sc->sc_c.np_promisc = SRAM_READ_1(sc, RAY_HOST_TO_ECF_BASE); RAY_DPRINTF(sc, RAY_DBG_IOCTL, "promisc value %d", sc->sc_c.np_promisc); break; default: com->c_pr->r_failcause = SRAM_READ_FIELD_1(sc, ccs, ray_cmd_update, c_failcause); break; } ray_com_ecf_done(sc); } /* * Command queuing and execution */ /* * Set up a comq entry struct */ static struct ray_comq_entry * ray_com_init(struct ray_comq_entry *com, ray_comqfn_t function, int flags, char *mesg) { com->c_function = function; com->c_flags = flags; com->c_retval = 0; com->c_ccs = NULL; com->c_wakeup = NULL; com->c_pr = NULL; com->c_mesg = mesg; return (com); } /* * Malloc and set up a comq entry struct */ static struct ray_comq_entry * ray_com_malloc(ray_comqfn_t function, int flags, char *mesg) { struct ray_comq_entry *com; MALLOC(com, struct ray_comq_entry *, sizeof(struct ray_comq_entry), M_RAYCOM, M_WAITOK); return (ray_com_init(com, function, flags, mesg)); } /* * Add an array of commands to the runq, get some ccs's for them and * then run, waiting on the last command. * * We add the commands to the queue first to preserve ioctl ordering. * * On recoverable errors, this routine removes the entries from the * runq. A caller can requeue the commands (and still preserve its own * processes ioctl ordering) but doesn't have to. When the card is * detached we get out quickly to prevent panics and don't bother * about the runq. */ static int ray_com_runq_add(struct ray_softc *sc, struct ray_comq_entry *com[], int ncom, char *wmesg) { int i, error; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_COM, ""); error = 0; /* * Add the commands to the runq but don't let it run until * the ccs's are allocated successfully */ com[0]->c_flags |= RAY_COM_FWAIT; for (i = 0; i < ncom; i++) { com[i]->c_wakeup = com[ncom-1]; RAY_DPRINTF(sc, RAY_DBG_COM, "adding %p", com[i]); RAY_DCOM(sc, RAY_DBG_DCOM, com[i], "adding"); TAILQ_INSERT_TAIL(&sc->sc_comq, com[i], c_chain); } com[ncom-1]->c_flags |= RAY_COM_FWOK; /* * Allocate ccs's for each command. */ for (i = 0; i < ncom; i++) { error = ray_ccs_alloc(sc, &com[i]->c_ccs, wmesg); if (error == ENXIO) return (ENXIO); else if (error) goto cleanup; } /* * Allow the queue to run and sleep if needed. * * Iff the FDETACHED flag is set in the com entry we waited on * the driver is in a zombie state! The softc structure has been * freed by the generic bus detach methods - eek. We tread very * carefully! */ com[0]->c_flags &= ~RAY_COM_FWAIT; ray_com_runq(sc); if (TAILQ_FIRST(&sc->sc_comq) != NULL) { RAY_DPRINTF(sc, RAY_DBG_COM, "sleeping"); error = tsleep(com[ncom-1], PCATCH | PRIBIO, wmesg, 0); if (com[ncom-1]->c_flags & RAY_COM_FDETACHED) return (ENXIO); RAY_DPRINTF(sc, RAY_DBG_COM, "awakened, tsleep returned 0x%x", error); } else error = 0; cleanup: /* * Only clean the queue on real errors - we don't care about it * when we detach as the queue entries are freed by the callers. */ if (error && (error != ENXIO)) for (i = 0; i < ncom; i++) if (!(com[i]->c_flags & RAY_COM_FCOMPLETED)) { RAY_DPRINTF(sc, RAY_DBG_COM, "removing %p", com[i]); RAY_DCOM(sc, RAY_DBG_DCOM, com[i], "removing"); TAILQ_REMOVE(&sc->sc_comq, com[i], c_chain); ray_ccs_free(sc, com[i]->c_ccs); com[i]->c_ccs = NULL; } return (error); } /* * Run the command at the head of the queue (if not already running) */ static void ray_com_runq(struct ray_softc *sc) { struct ray_comq_entry *com; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_COM, ""); com = TAILQ_FIRST(&sc->sc_comq); if ((com == NULL) || (com->c_flags & RAY_COM_FRUNNING) || (com->c_flags & RAY_COM_FWAIT) || (com->c_flags & RAY_COM_FDETACHED)) return; com->c_flags |= RAY_COM_FRUNNING; RAY_DPRINTF(sc, RAY_DBG_COM, "running %p", com); RAY_DCOM(sc, RAY_DBG_DCOM, com, "running"); com->c_function(sc, com); } /* * Remove run command, free ccs and wakeup caller. * * Minimal checks are done here as we ensure that the com and command * handler were matched up earlier. Must be called at splnet or higher * so that entries on the command queue are correctly removed. * * Remove the com from the comq, and wakeup the caller if it requested * to be woken. This is used for ensuring a sequence of commands * completes. Finally, re-run the queue. */ static void ray_com_runq_done(struct ray_softc *sc) { struct ray_comq_entry *com; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_COM, ""); com = TAILQ_FIRST(&sc->sc_comq); /* XXX shall we check this as below */ RAY_DPRINTF(sc, RAY_DBG_COM, "removing %p", com); RAY_DCOM(sc, RAY_DBG_DCOM, com, "removing"); TAILQ_REMOVE(&sc->sc_comq, com, c_chain); com->c_flags &= ~RAY_COM_FRUNNING; com->c_flags |= RAY_COM_FCOMPLETED; com->c_retval = 0; ray_ccs_free(sc, com->c_ccs); com->c_ccs = NULL; if (com->c_flags & RAY_COM_FWOK) wakeup(com->c_wakeup); ray_com_runq(sc); /* XXX what about error on completion then? deal with when i fix * XXX the status checking * * XXX all the runq_done calls from IFF_RUNNING checks in runq * XXX routines should return EIO but shouldn't abort the runq */ } /* * Send a command to the ECF. */ static void ray_com_ecf(struct ray_softc *sc, struct ray_comq_entry *com) { u_int i; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_COM, ""); RAY_MAP_CM(sc); /* * XXX we probably want to call a timeout on ourself here... * XXX why isn't this processed like the TX case */ i = 0; while (!RAY_ECF_READY(sc)) if (++i > 50) RAY_PANIC(sc, "spun too long"); else if (i == 1) RAY_RECERR(sc, "spinning"); RAY_DPRINTF(sc, RAY_DBG_COM, "sending %p", com); RAY_DCOM(sc, RAY_DBG_DCOM, com, "sending"); SRAM_WRITE_1(sc, RAY_SCB_CCSI, RAY_CCS_INDEX(com->c_ccs)); RAY_ECF_START_CMD(sc); if (RAY_COM_NEEDS_TIMO( SRAM_READ_FIELD_1(sc, com->c_ccs, ray_cmd, c_cmd))) { RAY_DPRINTF(sc, RAY_DBG_COM, "adding timeout"); sc->com_timerh = timeout(ray_com_ecf_timo, sc, RAY_COM_TIMEOUT); } } /* * Deal with commands that require a timeout to test completion. * * This routine is coded to only expect one outstanding request for the * timed out requests at a time, but thats all that can be outstanding * per hardware limitations and all that we issue anyway. * * We don't do any fancy testing of the command currently issued as we * know it must be a timeout based one...unless I've got this wrong! */ static void ray_com_ecf_timo(void *xsc) { struct ray_softc *sc = (struct ray_softc *)xsc; struct ray_comq_entry *com; u_int8_t cmd; int s; s = splnet(); RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_COM, ""); RAY_MAP_CM(sc); com = TAILQ_FIRST(&sc->sc_comq); cmd = SRAM_READ_FIELD_1(sc, com->c_ccs, ray_cmd, c_cmd); switch (SRAM_READ_FIELD_1(sc, com->c_ccs, ray_cmd, c_status)) { case RAY_CCS_STATUS_COMPLETE: case RAY_CCS_STATUS_FREE: /* Buggy firmware */ ray_intr_ccs(sc, cmd, com->c_ccs); break; case RAY_CCS_STATUS_BUSY: sc->com_timerh = timeout(ray_com_ecf_timo, sc, RAY_COM_TIMEOUT); break; default: /* Replicates NetBSD */ if (sc->sc_ccsinuse[RAY_CCS_INDEX(com->c_ccs)] == 1) { /* give a chance for the interrupt to occur */ sc->sc_ccsinuse[RAY_CCS_INDEX(com->c_ccs)] = 2; sc->com_timerh = timeout(ray_com_ecf_timo, sc, RAY_COM_TIMEOUT); } else ray_intr_ccs(sc, cmd, com->c_ccs); break; } splx(s); } /* * Called when interrupt handler for the command has done all it * needs to. Will be called at splnet. */ static void ray_com_ecf_done(struct ray_softc *sc) { RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_COM, ""); untimeout(ray_com_ecf_timo, sc, sc->com_timerh); ray_com_runq_done(sc); } #if RAY_DEBUG & RAY_DBG_COM /* * Process completed ECF commands that probably came from the command queue * * This routine is called after vectoring the completed ECF command * to the appropriate _done routine. It helps check everything is okay. */ static void ray_com_ecf_check(struct ray_softc *sc, size_t ccs, char *mesg) { struct ray_comq_entry *com; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_COM, "%s", mesg); com = TAILQ_FIRST(&sc->sc_comq); if (com == NULL) RAY_PANIC(sc, "no command queue"); if (com->c_ccs != ccs) RAY_PANIC(sc, "ccs's don't match"); } #endif /* RAY_DEBUG & RAY_DBG_COM */ /* * CCS allocators */ /* * Obtain a ccs for a commmand * * Returns 0 and in `ccsp' the bus offset of the free ccs. Will block * awaiting free ccs if needed - if the sleep is interrupted * EINTR/ERESTART is returned, if the card is ejected we return ENXIO. */ static int ray_ccs_alloc(struct ray_softc *sc, size_t *ccsp, char *wmesg) { size_t ccs; u_int i; int error; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_CCS, ""); RAY_MAP_CM(sc); for (;;) { for (i = RAY_CCS_CMD_FIRST; i <= RAY_CCS_CMD_LAST; i++) { /* we probe here to make the card go */ (void)SRAM_READ_FIELD_1(sc, RAY_CCS_ADDRESS(i), ray_cmd, c_status); if (!sc->sc_ccsinuse[i]) break; } if (i > RAY_CCS_CMD_LAST) { RAY_DPRINTF(sc, RAY_DBG_CCS, "sleeping"); error = tsleep(ray_ccs_alloc, PCATCH | PRIBIO, wmesg, 0); if ((sc == NULL) || (sc->gone)) return (ENXIO); RAY_DPRINTF(sc, RAY_DBG_CCS, "awakened, tsleep returned 0x%x", error); if (error) return (error); } else break; } RAY_DPRINTF(sc, RAY_DBG_CCS, "allocated 0x%02x", i); sc->sc_ccsinuse[i] = 1; ccs = RAY_CCS_ADDRESS(i); *ccsp = ccs; return (0); } /* * Fill the easy bits in of a pre-allocated CCS */ static void ray_ccs_fill(struct ray_softc *sc, size_t ccs, u_int cmd) { RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_CCS, ""); RAY_MAP_CM(sc); if (ccs == NULL) RAY_PANIC(sc, "ccs not allocated"); SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd, c_status, RAY_CCS_STATUS_BUSY); SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd, c_cmd, cmd); SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd, c_link, RAY_CCS_LINK_NULL); } /* * Free up a ccs allocated via ray_ccs_alloc * * Return the old status. This routine is only used for ccs allocated via * ray_ccs_alloc (not tx, rx or ECF command requests). */ static void ray_ccs_free(struct ray_softc *sc, size_t ccs) { RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_CCS, ""); RAY_MAP_CM(sc); #if 1 | (RAY_DEBUG & RAY_DBG_CCS) if (!sc->sc_ccsinuse[RAY_CCS_INDEX(ccs)]) RAY_RECERR(sc, "freeing free ccs 0x%02x", RAY_CCS_INDEX(ccs)); #endif /* RAY_DEBUG & RAY_DBG_CCS */ if (!sc->gone) RAY_CCS_FREE(sc, ccs); sc->sc_ccsinuse[RAY_CCS_INDEX(ccs)] = 0; RAY_DPRINTF(sc, RAY_DBG_CCS, "freed 0x%02x", RAY_CCS_INDEX(ccs)); wakeup(ray_ccs_alloc); } /* * Obtain a ccs and tx buffer to transmit with and fill them in. * * Returns 0 and in `ccsp' the bus offset of the free ccs. Will not block * and if none available and will returns EAGAIN. * * The caller must fill in the length later. * The caller must clear the ccs on errors. */ static int ray_ccs_tx(struct ray_softc *sc, size_t *ccsp, size_t *bufpp) { size_t ccs, bufp; int i; u_int8_t status; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_CCS, ""); RAY_MAP_CM(sc); i = RAY_CCS_TX_FIRST; do { status = SRAM_READ_FIELD_1(sc, RAY_CCS_ADDRESS(i), ray_cmd, c_status); if (status == RAY_CCS_STATUS_FREE) break; i++; } while (i <= RAY_CCS_TX_LAST); if (i > RAY_CCS_TX_LAST) { return (EAGAIN); } RAY_DPRINTF(sc, RAY_DBG_CCS, "allocated 0x%02x", i); /* * Reserve and fill the ccs - must do the length later. * * Even though build 4 and build 5 have different fields all these * are common apart from tx_rate. Neither the NetBSD driver or Linux * driver bother to overwrite this for build 4 cards. * * The start of the buffer must be aligned to a 256 byte boundary * (least significant byte of address = 0x00). */ ccs = RAY_CCS_ADDRESS(i); bufp = RAY_TX_BASE + i * RAY_TX_BUF_SIZE; bufp += sc->sc_tibsize; SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd_tx, c_status, RAY_CCS_STATUS_BUSY); SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd_tx, c_cmd, RAY_CMD_TX_REQ); SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd_tx, c_link, RAY_CCS_LINK_NULL); SRAM_WRITE_FIELD_2(sc, ccs, ray_cmd_tx, c_bufp, bufp); SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd_tx, c_tx_rate, sc->sc_c.np_def_txrate); SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd_tx, c_apm_mode, 0); /* XXX */ bufp += sizeof(struct ray_tx_phy_header); *ccsp = ccs; *bufpp = bufp; return (0); } /* * Routines to obtain resources for the card */ /* * Allocate the attribute memory on the card * * A lot of this is hacking around pccardd brokeness */ static int ray_res_alloc_am(struct ray_softc *sc) { u_long start, count, flags; u_int32_t offset; int error; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_CM, ""); sc->am_rid = RAY_AM_RID; start = bus_get_resource_start(sc->dev, SYS_RES_MEMORY, sc->am_rid); count = bus_get_resource_count(sc->dev, SYS_RES_MEMORY, sc->am_rid); error = CARD_GET_RES_FLAGS(device_get_parent(sc->dev), sc->dev, SYS_RES_MEMORY, sc->am_rid, &flags); if (error) { RAY_PRINTF(sc, "CARD_GET_RES_FLAGS returned 0x%0x", error); return (error); } error = CARD_GET_MEMORY_OFFSET(device_get_parent(sc->dev), sc->dev, sc->am_rid, &offset); if (error) { RAY_PRINTF(sc, "CARD_GET_MEMORY_OFFSET returned 0x%0x", error); return (error); } RAY_DPRINTF(sc, RAY_DBG_CM | RAY_DBG_BOOTPARAM, "attribute start 0x%0lx count 0x%0lx flags 0x%0lx offset 0x%0x", start, count, flags, offset); if (start == 0x0) { RAY_PRINTF(sc, "fixing up AM map"); } if (count != 0x1000) { RAY_PRINTF(sc, "fixing up AM size from 0x%lx to 0x1000", count); count = 0x1000; } sc->am_res = bus_alloc_resource(sc->dev, SYS_RES_MEMORY, &sc->am_rid, start, ~0, count, RF_ACTIVE); if (!sc->am_res) { RAY_PRINTF(sc, "Cannot allocate attribute memory"); return (ENOMEM); } sc->am_bsh = rman_get_bushandle(sc->am_res); sc->am_bst = rman_get_bustag(sc->am_res); if (offset != 0) { RAY_PRINTF(sc, "fixing up AM card address from 0x%x to 0x0", offset); error = CARD_SET_MEMORY_OFFSET(device_get_parent(sc->dev), sc->dev, sc->am_rid, 0, NULL); if (error) { RAY_PRINTF(sc, "CARD_SET_MEMORY_OFFSET returned 0x%0x", error); return (error); } } if (!(flags & 0x10) /* XXX MDF_ATTR */) { RAY_PRINTF(sc, "fixing up AM flags from 0x%lx to 0x50", flags); error = CARD_SET_RES_FLAGS(device_get_parent(sc->dev), sc->dev, SYS_RES_MEMORY, sc->am_rid, PCCARD_A_MEM_ATTR); if (error) { RAY_PRINTF(sc, "CARD_SET_RES_FLAGS returned 0x%0x", error); return (error); } } #if RAY_DEBUG & (RAY_DBG_CM | RAY_DBG_BOOTPARAM) CARD_GET_RES_FLAGS(device_get_parent(sc->dev), sc->dev, SYS_RES_MEMORY, sc->am_rid, &flags); CARD_GET_MEMORY_OFFSET(device_get_parent(sc->dev), sc->dev, sc->cm_rid, &offset); RAY_PRINTF(sc, "allocated attribute memory:\n" ". start 0x%0lx count 0x%0lx flags 0x%0lx offset 0x%0x", bus_get_resource_start(sc->dev, SYS_RES_MEMORY, sc->am_rid), bus_get_resource_count(sc->dev, SYS_RES_MEMORY, sc->am_rid), flags, offset); #endif /* RAY_DEBUG & (RAY_DBG_CM | RAY_DBG_BOOTPARAM) */ return (0); } /* * Allocate the common memory on the card * * A lot of this is hacking around pccardd brokeness */ static int ray_res_alloc_cm(struct ray_softc *sc) { u_long start, count, flags; u_int32_t offset; int error; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_CM, ""); sc->cm_rid = RAY_CM_RID; start = bus_get_resource_start(sc->dev, SYS_RES_MEMORY, sc->cm_rid); count = bus_get_resource_count(sc->dev, SYS_RES_MEMORY, sc->cm_rid); error = CARD_GET_RES_FLAGS(device_get_parent(sc->dev), sc->dev, SYS_RES_MEMORY, sc->cm_rid, &flags); if (error) { RAY_PRINTF(sc, "CARD_GET_RES_FLAGS returned 0x%0x", error); return (error); } error = CARD_GET_MEMORY_OFFSET(device_get_parent(sc->dev), sc->dev, sc->cm_rid, &offset); if (error) { RAY_PRINTF(sc, "CARD_GET_MEMORY_OFFSET returned 0x%0x", error); return (error); } RAY_DPRINTF(sc, RAY_DBG_CM | RAY_DBG_BOOTPARAM, "memory start 0x%0lx count 0x%0lx flags 0x%0lx offset 0x%0x", start, count, flags, offset); if (start == 0x0) { RAY_PRINTF(sc, "fixing up CM map"); } if (count != 0xc000) { RAY_PRINTF(sc, "fixing up CM size from 0x%lx to 0xc000", count); count = 0xc000; } sc->cm_res = bus_alloc_resource(sc->dev, SYS_RES_MEMORY, &sc->cm_rid, start, ~0, count, RF_ACTIVE); if (!sc->cm_res) { RAY_PRINTF(sc, "Cannot allocate common memory"); return (ENOMEM); } sc->cm_bsh = rman_get_bushandle(sc->cm_res); sc->cm_bst = rman_get_bustag(sc->cm_res); if (offset != 0) { RAY_PRINTF(sc, "fixing up CM card address from 0x%x to 0x0", offset); error = CARD_SET_MEMORY_OFFSET(device_get_parent(sc->dev), sc->dev, sc->cm_rid, 0, NULL); if (error) { RAY_PRINTF(sc, "CARD_SET_MEMORY_OFFSET returned 0x%0x", error); return (error); } } if (flags != 0x40 /* XXX MDF_ACTIVE */) { RAY_PRINTF(sc, "fixing up CM flags from 0x%lx to 0x40", flags); error = CARD_SET_RES_FLAGS(device_get_parent(sc->dev), sc->dev, SYS_RES_MEMORY, sc->cm_rid, 2); if (error) { RAY_PRINTF(sc, "CARD_SET_RES_FLAGS returned 0x%0x", error); return (error); } } #if RAY_DEBUG & (RAY_DBG_CM | RAY_DBG_BOOTPARAM) CARD_GET_RES_FLAGS(device_get_parent(sc->dev), sc->dev, SYS_RES_MEMORY, sc->cm_rid, &flags); CARD_GET_MEMORY_OFFSET(device_get_parent(sc->dev), sc->dev, sc->cm_rid, &offset); RAY_PRINTF(sc, "allocated common memory:\n" ". start 0x%0lx count 0x%0lx flags 0x%0lx offset 0x%0x", bus_get_resource_start(sc->dev, SYS_RES_MEMORY, sc->cm_rid), bus_get_resource_count(sc->dev, SYS_RES_MEMORY, sc->cm_rid), flags, offset); #endif /* RAY_DEBUG & (RAY_DBG_CM | RAY_DBG_BOOTPARAM) */ return (0); } /* * Get an irq and attach it to the bus */ static int ray_res_alloc_irq(struct ray_softc *sc) { int error; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_DPRINTF(sc,RAY_DBG_CM | RAY_DBG_BOOTPARAM, "irq start 0x%0lx count 0x%0lx", bus_get_resource_start(sc->dev, SYS_RES_IRQ, 0), bus_get_resource_count(sc->dev, SYS_RES_IRQ, 0)); sc->irq_rid = 0; sc->irq_res = bus_alloc_resource(sc->dev, SYS_RES_IRQ, &sc->irq_rid, 0, ~0, 1, RF_ACTIVE); if (!sc->irq_res) { RAY_PRINTF(sc, "Cannot allocate irq"); return (ENOMEM); } if ((error = bus_setup_intr(sc->dev, sc->irq_res, INTR_TYPE_NET, ray_intr, sc, &sc->irq_handle)) != 0) { RAY_PRINTF(sc, "Failed to setup irq"); return (error); } RAY_DPRINTF(sc, RAY_DBG_CM | RAY_DBG_BOOTPARAM, "allocated irq:\n" ". start 0x%0lx count 0x%0lx", bus_get_resource_start(sc->dev, SYS_RES_IRQ, sc->irq_rid), bus_get_resource_count(sc->dev, SYS_RES_IRQ, sc->irq_rid)); return (0); } /* * Release all of the card's resources */ static void ray_res_release(struct ray_softc *sc) { if (sc->irq_res != 0) { bus_teardown_intr(sc->dev, sc->irq_res, sc->irq_handle); bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); sc->irq_res = 0; } if (sc->am_res != 0) { bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->am_rid, sc->am_res); sc->am_res = 0; } if (sc->cm_res != 0) { bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->cm_rid, sc->cm_res); sc->cm_res = 0; } } /* * mbuf dump */ #if RAY_DEBUG & RAY_DBG_MBUF static void ray_dump_mbuf(struct ray_softc *sc, struct mbuf *m, char *s) { u_int8_t *d, *ed; u_int i; char p[17]; RAY_PRINTF(sc, "%s", s); i = 0; bzero(p, 17); for (; m; m = m->m_next) { d = mtod(m, u_int8_t *); ed = d + m->m_len; for (; d < ed; i++, d++) { if ((i % 16) == 0) { printf(" %s\n\t", p); } else if ((i % 8) == 0) printf(" "); printf(" %02x", *d); p[i % 16] = ((*d >= 0x20) && (*d < 0x80)) ? *d : '.'; } } if ((i - 1) % 16) printf(" %s\n", p); } #endif /* RAY_DEBUG & RAY_DBG_MBUF */ Index: head/sys/dev/sf/if_sf.c =================================================================== --- head/sys/dev/sf/if_sf.c (revision 71961) +++ head/sys/dev/sf/if_sf.c (revision 71962) @@ -1,1512 +1,1511 @@ /* * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Adaptec AIC-6915 "Starfire" PCI fast ethernet driver for FreeBSD. * Programming manual is available from: * ftp.adaptec.com:/pub/BBS/userguides/aic6915_pg.pdf. * * Written by Bill Paul * Department of Electical Engineering * Columbia University, New York City */ /* * The Adaptec AIC-6915 "Starfire" is a 64-bit 10/100 PCI ethernet * controller designed with flexibility and reducing CPU load in mind. * The Starfire offers high and low priority buffer queues, a * producer/consumer index mechanism and several different buffer * queue and completion queue descriptor types. Any one of a number * of different driver designs can be used, depending on system and * OS requirements. This driver makes use of type0 transmit frame * descriptors (since BSD fragments packets across an mbuf chain) * and two RX buffer queues prioritized on size (one queue for small * frames that will fit into a single mbuf, another with full size * mbuf clusters for everything else). The producer/consumer indexes * and completion queues are also used. * * One downside to the Starfire has to do with alignment: buffer * queues must be aligned on 256-byte boundaries, and receive buffers * must be aligned on longword boundaries. The receive buffer alignment * causes problems on the Alpha platform, where the packet payload * should be longword aligned. There is no simple way around this. * * For receive filtering, the Starfire offers 16 perfect filter slots * and a 512-bit hash table. * * The Starfire has no internal transceiver, relying instead on an * external MII-based transceiver. Accessing registers on external * PHYs is done through a special register map rather than with the * usual bitbang MDIO method. * * Acesssing the registers on the Starfire is a little tricky. The * Starfire has a 512K internal register space. When programmed for * PCI memory mapped mode, the entire register space can be accessed * directly. However in I/O space mode, only 256 bytes are directly * mapped into PCI I/O space. The other registers can be accessed * indirectly using the SF_INDIRECTIO_ADDR and SF_INDIRECTIO_DATA * registers inside the 256-byte I/O window. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #include #include #define SF_USEIOSPACE #include MODULE_DEPEND(sf, miibus, 1, 1, 1); #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif static struct sf_type sf_devs[] = { { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX" }, { 0, 0, NULL } }; static int sf_probe __P((device_t)); static int sf_attach __P((device_t)); static int sf_detach __P((device_t)); static void sf_intr __P((void *)); static void sf_stats_update __P((void *)); static void sf_rxeof __P((struct sf_softc *)); static void sf_txeof __P((struct sf_softc *)); static int sf_encap __P((struct sf_softc *, struct sf_tx_bufdesc_type0 *, struct mbuf *)); static void sf_start __P((struct ifnet *)); static int sf_ioctl __P((struct ifnet *, u_long, caddr_t)); static void sf_init __P((void *)); static void sf_stop __P((struct sf_softc *)); static void sf_watchdog __P((struct ifnet *)); static void sf_shutdown __P((device_t)); static int sf_ifmedia_upd __P((struct ifnet *)); static void sf_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); static void sf_reset __P((struct sf_softc *)); static int sf_init_rx_ring __P((struct sf_softc *)); static void sf_init_tx_ring __P((struct sf_softc *)); static int sf_newbuf __P((struct sf_softc *, struct sf_rx_bufdesc_type0 *, struct mbuf *)); static void sf_setmulti __P((struct sf_softc *)); static int sf_setperf __P((struct sf_softc *, int, caddr_t)); static int sf_sethash __P((struct sf_softc *, caddr_t, int)); #ifdef notdef static int sf_setvlan __P((struct sf_softc *, int, u_int32_t)); #endif static u_int8_t sf_read_eeprom __P((struct sf_softc *, int)); static u_int32_t sf_calchash __P((caddr_t)); static int sf_miibus_readreg __P((device_t, int, int)); static int sf_miibus_writereg __P((device_t, int, int, int)); static void sf_miibus_statchg __P((device_t)); static u_int32_t csr_read_4 __P((struct sf_softc *, int)); static void csr_write_4 __P((struct sf_softc *, int, u_int32_t)); #ifdef SF_USEIOSPACE #define SF_RES SYS_RES_IOPORT #define SF_RID SF_PCI_LOIO #else #define SF_RES SYS_RES_MEMORY #define SF_RID SF_PCI_LOMEM #endif static device_method_t sf_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sf_probe), DEVMETHOD(device_attach, sf_attach), DEVMETHOD(device_detach, sf_detach), DEVMETHOD(device_shutdown, sf_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, sf_miibus_readreg), DEVMETHOD(miibus_writereg, sf_miibus_writereg), DEVMETHOD(miibus_statchg, sf_miibus_statchg), { 0, 0 } }; static driver_t sf_driver = { "sf", sf_methods, sizeof(struct sf_softc), }; static devclass_t sf_devclass; DRIVER_MODULE(if_sf, pci, sf_driver, sf_devclass, 0, 0); DRIVER_MODULE(miibus, sf, miibus_driver, miibus_devclass, 0, 0); #define SF_SETBIT(sc, reg, x) \ csr_write_4(sc, reg, csr_read_4(sc, reg) | x) #define SF_CLRBIT(sc, reg, x) \ csr_write_4(sc, reg, csr_read_4(sc, reg) & ~x) static u_int32_t csr_read_4(sc, reg) struct sf_softc *sc; int reg; { u_int32_t val; #ifdef SF_USEIOSPACE CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE); val = CSR_READ_4(sc, SF_INDIRECTIO_DATA); #else val = CSR_READ_4(sc, (reg + SF_RMAP_INTREG_BASE)); #endif return(val); } static u_int8_t sf_read_eeprom(sc, reg) struct sf_softc *sc; int reg; { u_int8_t val; val = (csr_read_4(sc, SF_EEADDR_BASE + (reg & 0xFFFFFFFC)) >> (8 * (reg & 3))) & 0xFF; return(val); } static void csr_write_4(sc, reg, val) struct sf_softc *sc; int reg; u_int32_t val; { #ifdef SF_USEIOSPACE CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE); CSR_WRITE_4(sc, SF_INDIRECTIO_DATA, val); #else CSR_WRITE_4(sc, (reg + SF_RMAP_INTREG_BASE), val); #endif return; } static u_int32_t sf_calchash(addr) caddr_t addr; { u_int32_t crc, carry; int i, j; u_int8_t c; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (i = 0; i < 6; i++) { c = *(addr + i); for (j = 0; j < 8; j++) { carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); crc <<= 1; c >>= 1; if (carry) crc = (crc ^ 0x04c11db6) | carry; } } /* return the filter bit position */ return(crc >> 23 & 0x1FF); } /* * Copy the address 'mac' into the perfect RX filter entry at * offset 'idx.' The perfect filter only has 16 entries so do * some sanity tests. */ static int sf_setperf(sc, idx, mac) struct sf_softc *sc; int idx; caddr_t mac; { u_int16_t *p; if (idx < 0 || idx > SF_RXFILT_PERFECT_CNT) return(EINVAL); if (mac == NULL) return(EINVAL); p = (u_int16_t *)mac; csr_write_4(sc, SF_RXFILT_PERFECT_BASE + (idx * SF_RXFILT_PERFECT_SKIP), htons(p[2])); csr_write_4(sc, SF_RXFILT_PERFECT_BASE + (idx * SF_RXFILT_PERFECT_SKIP) + 4, htons(p[1])); csr_write_4(sc, SF_RXFILT_PERFECT_BASE + (idx * SF_RXFILT_PERFECT_SKIP) + 8, htons(p[0])); return(0); } /* * Set the bit in the 512-bit hash table that corresponds to the * specified mac address 'mac.' If 'prio' is nonzero, update the * priority hash table instead of the filter hash table. */ static int sf_sethash(sc, mac, prio) struct sf_softc *sc; caddr_t mac; int prio; { u_int32_t h = 0; if (mac == NULL) return(EINVAL); h = sf_calchash(mac); if (prio) { SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_PRIOOFF + (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF))); } else { SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_ADDROFF + (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF))); } return(0); } #ifdef notdef /* * Set a VLAN tag in the receive filter. */ static int sf_setvlan(sc, idx, vlan) struct sf_softc *sc; int idx; u_int32_t vlan; { if (idx < 0 || idx >> SF_RXFILT_HASH_CNT) return(EINVAL); csr_write_4(sc, SF_RXFILT_HASH_BASE + (idx * SF_RXFILT_HASH_SKIP) + SF_RXFILT_HASH_VLANOFF, vlan); return(0); } #endif static int sf_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct sf_softc *sc; int i; u_int32_t val = 0; sc = device_get_softc(dev); for (i = 0; i < SF_TIMEOUT; i++) { val = csr_read_4(sc, SF_PHY_REG(phy, reg)); if (val & SF_MII_DATAVALID) break; } if (i == SF_TIMEOUT) return(0); if ((val & 0x0000FFFF) == 0xFFFF) return(0); return(val & 0x0000FFFF); } static int sf_miibus_writereg(dev, phy, reg, val) device_t dev; int phy, reg, val; { struct sf_softc *sc; int i; int busy; sc = device_get_softc(dev); csr_write_4(sc, SF_PHY_REG(phy, reg), val); for (i = 0; i < SF_TIMEOUT; i++) { busy = csr_read_4(sc, SF_PHY_REG(phy, reg)); if (!(busy & SF_MII_BUSY)) break; } return(0); } static void sf_miibus_statchg(dev) device_t dev; { struct sf_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->sf_miibus); if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_FULLDUPLEX); csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_FDX); } else { SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_FULLDUPLEX); csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_HDX); } return; } static void sf_setmulti(sc) struct sf_softc *sc; { struct ifnet *ifp; int i; struct ifmultiaddr *ifma; u_int8_t dummy[] = { 0, 0, 0, 0, 0, 0 }; ifp = &sc->arpcom.ac_if; /* First zot all the existing filters. */ for (i = 1; i < SF_RXFILT_PERFECT_CNT; i++) sf_setperf(sc, i, (char *)&dummy); for (i = SF_RXFILT_HASH_BASE; i < (SF_RXFILT_HASH_MAX + 1); i += 4) csr_write_4(sc, i, 0); SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_ALLMULTI); /* Now program new ones. */ if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_ALLMULTI); } else { i = 1; /* First find the tail of the list. */ - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_link.le_next == NULL) break; } /* Now traverse the list backwards. */ for (; ifma != NULL && ifma != (void *)&ifp->if_multiaddrs; ifma = (struct ifmultiaddr *)ifma->ifma_link.le_prev) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; /* * Program the first 15 multicast groups * into the perfect filter. For all others, * use the hash table. */ if (i < SF_RXFILT_PERFECT_CNT) { sf_setperf(sc, i, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); i++; continue; } sf_sethash(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 0); } } return; } /* * Set media options. */ static int sf_ifmedia_upd(ifp) struct ifnet *ifp; { struct sf_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->sf_miibus); sc->sf_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; miisc = LIST_NEXT(miisc, mii_list)) mii_phy_reset(miisc); } mii_mediachg(mii); return(0); } /* * Report current media status. */ static void sf_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct sf_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->sf_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static int sf_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct sf_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; SF_LOCK(sc); switch(command) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: error = ether_ioctl(ifp, command, data); break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->sf_if_flags & IFF_PROMISC)) { SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->sf_if_flags & IFF_PROMISC) { SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC); } else if (!(ifp->if_flags & IFF_RUNNING)) sf_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) sf_stop(sc); } sc->sf_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: sf_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->sf_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = EINVAL; break; } SF_UNLOCK(sc); return(error); } static void sf_reset(sc) struct sf_softc *sc; { register int i; csr_write_4(sc, SF_GEN_ETH_CTL, 0); SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); DELAY(1000); SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_RESET); for (i = 0; i < SF_TIMEOUT; i++) { DELAY(10); if (!(csr_read_4(sc, SF_PCI_DEVCFG) & SF_PCIDEVCFG_RESET)) break; } if (i == SF_TIMEOUT) printf("sf%d: reset never completed!\n", sc->sf_unit); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); return; } /* * Probe for an Adaptec AIC-6915 chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. * We also check the subsystem ID so that we can identify exactly which * NIC has been found, if possible. */ static int sf_probe(dev) device_t dev; { struct sf_type *t; t = sf_devs; while(t->sf_name != NULL) { if ((pci_get_vendor(dev) == t->sf_vid) && (pci_get_device(dev) == t->sf_did)) { switch((pci_read_config(dev, SF_PCI_SUBVEN_ID, 4) >> 16) & 0xFFFF) { case AD_SUBSYSID_62011_REV0: case AD_SUBSYSID_62011_REV1: device_set_desc(dev, "Adaptec ANA-62011 10/100BaseTX"); return(0); break; case AD_SUBSYSID_62022: device_set_desc(dev, "Adaptec ANA-62022 10/100BaseTX"); return(0); break; case AD_SUBSYSID_62044_REV0: case AD_SUBSYSID_62044_REV1: device_set_desc(dev, "Adaptec ANA-62044 10/100BaseTX"); return(0); break; case AD_SUBSYSID_62020: device_set_desc(dev, "Adaptec ANA-62020 10/100BaseFX"); return(0); break; case AD_SUBSYSID_69011: device_set_desc(dev, "Adaptec ANA-69011 10/100BaseTX"); return(0); break; default: device_set_desc(dev, t->sf_name); return(0); break; } } t++; } return(ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int sf_attach(dev) device_t dev; { int i; u_int32_t command; struct sf_softc *sc; struct ifnet *ifp; int unit, rid, error = 0; sc = device_get_softc(dev); unit = device_get_unit(dev); bzero(sc, sizeof(struct sf_softc)); mtx_init(&sc->sf_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE); SF_LOCK(sc); /* * Handle power management nonsense. */ command = pci_read_config(dev, SF_PCI_CAPID, 4) & 0x000000FF; if (command == 0x01) { command = pci_read_config(dev, SF_PCI_PWRMGMTCTRL, 4); if (command & SF_PSTATE_MASK) { u_int32_t iobase, membase, irq; /* Save important PCI config data. */ iobase = pci_read_config(dev, SF_PCI_LOIO, 4); membase = pci_read_config(dev, SF_PCI_LOMEM, 4); irq = pci_read_config(dev, SF_PCI_INTLINE, 4); /* Reset the power state. */ printf("sf%d: chip is in D%d power mode " "-- setting to D0\n", unit, command & SF_PSTATE_MASK); command &= 0xFFFFFFFC; pci_write_config(dev, SF_PCI_PWRMGMTCTRL, command, 4); /* Restore PCI config data. */ pci_write_config(dev, SF_PCI_LOIO, iobase, 4); pci_write_config(dev, SF_PCI_LOMEM, membase, 4); pci_write_config(dev, SF_PCI_INTLINE, irq, 4); } } /* * Map control/status registers. */ command = pci_read_config(dev, PCIR_COMMAND, 4); command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, command, 4); command = pci_read_config(dev, PCIR_COMMAND, 4); #ifdef SF_USEIOSPACE if (!(command & PCIM_CMD_PORTEN)) { printf("sf%d: failed to enable I/O ports!\n", unit); error = ENXIO; goto fail; } #else if (!(command & PCIM_CMD_MEMEN)) { printf("sf%d: failed to enable memory mapping!\n", unit); error = ENXIO; goto fail; } #endif rid = SF_RID; sc->sf_res = bus_alloc_resource(dev, SF_RES, &rid, 0, ~0, 1, RF_ACTIVE); if (sc->sf_res == NULL) { printf ("sf%d: couldn't map ports\n", unit); error = ENXIO; goto fail; } sc->sf_btag = rman_get_bustag(sc->sf_res); sc->sf_bhandle = rman_get_bushandle(sc->sf_res); /* Allocate interrupt */ rid = 0; sc->sf_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->sf_irq == NULL) { printf("sf%d: couldn't map interrupt\n", unit); bus_release_resource(dev, SF_RES, SF_RID, sc->sf_res); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->sf_irq, INTR_TYPE_NET, sf_intr, sc, &sc->sf_intrhand); if (error) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_res); bus_release_resource(dev, SF_RES, SF_RID, sc->sf_res); printf("sf%d: couldn't set up irq\n", unit); goto fail; } callout_handle_init(&sc->sf_stat_ch); /* Reset the adapter. */ sf_reset(sc); /* * Get station address from the EEPROM. */ for (i = 0; i < ETHER_ADDR_LEN; i++) sc->arpcom.ac_enaddr[i] = sf_read_eeprom(sc, SF_EE_NODEADDR + ETHER_ADDR_LEN - i); /* * An Adaptec chip was detected. Inform the world. */ printf("sf%d: Ethernet address: %6D\n", unit, sc->arpcom.ac_enaddr, ":"); sc->sf_unit = unit; /* Allocate the descriptor queues. */ sc->sf_ldata = contigmalloc(sizeof(struct sf_list_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->sf_ldata == NULL) { printf("sf%d: no memory for list buffers!\n", unit); bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq); bus_release_resource(dev, SF_RES, SF_RID, sc->sf_res); error = ENXIO; goto fail; } bzero(sc->sf_ldata, sizeof(struct sf_list_data)); /* Do MII setup. */ if (mii_phy_probe(dev, &sc->sf_miibus, sf_ifmedia_upd, sf_ifmedia_sts)) { printf("sf%d: MII without any phy!\n", sc->sf_unit); contigfree(sc->sf_ldata,sizeof(struct sf_list_data),M_DEVBUF); bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq); bus_release_resource(dev, SF_RES, SF_RID, sc->sf_res); error = ENXIO; goto fail; } ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; ifp->if_unit = unit; ifp->if_name = "sf"; ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = sf_ioctl; ifp->if_output = ether_output; ifp->if_start = sf_start; ifp->if_watchdog = sf_watchdog; ifp->if_init = sf_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = SF_TX_DLIST_CNT - 1; /* * Call MI attach routine. */ ether_ifattach(ifp, ETHER_BPF_SUPPORTED); SF_UNLOCK(sc); return(0); fail: SF_UNLOCK(sc); mtx_destroy(&sc->sf_mtx); return(error); } static int sf_detach(dev) device_t dev; { struct sf_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); SF_LOCK(sc); ifp = &sc->arpcom.ac_if; ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); sf_stop(sc); bus_generic_detach(dev); device_delete_child(dev, sc->sf_miibus); bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq); bus_release_resource(dev, SF_RES, SF_RID, sc->sf_res); contigfree(sc->sf_ldata, sizeof(struct sf_list_data), M_DEVBUF); SF_UNLOCK(sc); mtx_destroy(&sc->sf_mtx); return(0); } static int sf_init_rx_ring(sc) struct sf_softc *sc; { struct sf_list_data *ld; int i; ld = sc->sf_ldata; bzero((char *)ld->sf_rx_dlist_big, sizeof(struct sf_rx_bufdesc_type0) * SF_RX_DLIST_CNT); bzero((char *)ld->sf_rx_clist, sizeof(struct sf_rx_cmpdesc_type3) * SF_RX_CLIST_CNT); for (i = 0; i < SF_RX_DLIST_CNT; i++) { if (sf_newbuf(sc, &ld->sf_rx_dlist_big[i], NULL) == ENOBUFS) return(ENOBUFS); } return(0); } static void sf_init_tx_ring(sc) struct sf_softc *sc; { struct sf_list_data *ld; int i; ld = sc->sf_ldata; bzero((char *)ld->sf_tx_dlist, sizeof(struct sf_tx_bufdesc_type0) * SF_TX_DLIST_CNT); bzero((char *)ld->sf_tx_clist, sizeof(struct sf_tx_cmpdesc_type0) * SF_TX_CLIST_CNT); for (i = 0; i < SF_TX_DLIST_CNT; i++) ld->sf_tx_dlist[i].sf_id = SF_TX_BUFDESC_ID; for (i = 0; i < SF_TX_CLIST_CNT; i++) ld->sf_tx_clist[i].sf_type = SF_TXCMPTYPE_TX; ld->sf_tx_dlist[SF_TX_DLIST_CNT - 1].sf_end = 1; sc->sf_tx_cnt = 0; return; } static int sf_newbuf(sc, c, m) struct sf_softc *sc; struct sf_rx_bufdesc_type0 *c; struct mbuf *m; { struct mbuf *m_new = NULL; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("sf%d: no memory for rx list -- " "packet dropped!\n", sc->sf_unit); return(ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("sf%d: no memory for rx list -- " "packet dropped!\n", sc->sf_unit); m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, sizeof(u_int64_t)); c->sf_mbuf = m_new; c->sf_addrlo = SF_RX_HOSTADDR(vtophys(mtod(m_new, caddr_t))); c->sf_valid = 1; return(0); } /* * The starfire is programmed to use 'normal' mode for packet reception, * which means we use the consumer/producer model for both the buffer * descriptor queue and the completion descriptor queue. The only problem * with this is that it involves a lot of register accesses: we have to * read the RX completion consumer and producer indexes and the RX buffer * producer index, plus the RX completion consumer and RX buffer producer * indexes have to be updated. It would have been easier if Adaptec had * put each index in a separate register, especially given that the damn * NIC has a 512K register space. * * In spite of all the lovely features that Adaptec crammed into the 6915, * it is marred by one truly stupid design flaw, which is that receive * buffer addresses must be aligned on a longword boundary. This forces * the packet payload to be unaligned, which is suboptimal on the x86 and * completely unuseable on the Alpha. Our only recourse is to copy received * packets into properly aligned buffers before handing them off. */ static void sf_rxeof(sc) struct sf_softc *sc; { struct ether_header *eh; struct mbuf *m; struct ifnet *ifp; struct sf_rx_bufdesc_type0 *desc; struct sf_rx_cmpdesc_type3 *cur_rx; u_int32_t rxcons, rxprod; int cmpprodidx, cmpconsidx, bufprodidx; ifp = &sc->arpcom.ac_if; rxcons = csr_read_4(sc, SF_CQ_CONSIDX); rxprod = csr_read_4(sc, SF_RXDQ_PTR_Q1); cmpprodidx = SF_IDX_LO(csr_read_4(sc, SF_CQ_PRODIDX)); cmpconsidx = SF_IDX_LO(rxcons); bufprodidx = SF_IDX_LO(rxprod); while (cmpconsidx != cmpprodidx) { struct mbuf *m0; cur_rx = &sc->sf_ldata->sf_rx_clist[cmpconsidx]; desc = &sc->sf_ldata->sf_rx_dlist_big[cur_rx->sf_endidx]; m = desc->sf_mbuf; SF_INC(cmpconsidx, SF_RX_CLIST_CNT); SF_INC(bufprodidx, SF_RX_DLIST_CNT); if (!(cur_rx->sf_status1 & SF_RXSTAT1_OK)) { ifp->if_ierrors++; sf_newbuf(sc, desc, m); continue; } m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, cur_rx->sf_len + ETHER_ALIGN, 0, ifp, NULL); sf_newbuf(sc, desc, m); if (m0 == NULL) { ifp->if_ierrors++; continue; } m_adj(m0, ETHER_ALIGN); m = m0; eh = mtod(m, struct ether_header *); ifp->if_ipackets++; /* Remove header from mbuf and pass it on. */ m_adj(m, sizeof(struct ether_header)); ether_input(ifp, eh, m); } csr_write_4(sc, SF_CQ_CONSIDX, (rxcons & ~SF_CQ_CONSIDX_RXQ1) | cmpconsidx); csr_write_4(sc, SF_RXDQ_PTR_Q1, (rxprod & ~SF_RXDQ_PRODIDX) | bufprodidx); return; } /* * Read the transmit status from the completion queue and release * mbufs. Note that the buffer descriptor index in the completion * descriptor is an offset from the start of the transmit buffer * descriptor list in bytes. This is important because the manual * gives the impression that it should match the producer/consumer * index, which is the offset in 8 byte blocks. */ static void sf_txeof(sc) struct sf_softc *sc; { int txcons, cmpprodidx, cmpconsidx; struct sf_tx_cmpdesc_type1 *cur_cmp; struct sf_tx_bufdesc_type0 *cur_tx; struct ifnet *ifp; ifp = &sc->arpcom.ac_if; txcons = csr_read_4(sc, SF_CQ_CONSIDX); cmpprodidx = SF_IDX_HI(csr_read_4(sc, SF_CQ_PRODIDX)); cmpconsidx = SF_IDX_HI(txcons); while (cmpconsidx != cmpprodidx) { cur_cmp = &sc->sf_ldata->sf_tx_clist[cmpconsidx]; cur_tx = &sc->sf_ldata->sf_tx_dlist[cur_cmp->sf_index >> 7]; SF_INC(cmpconsidx, SF_TX_CLIST_CNT); if (cur_cmp->sf_txstat & SF_TXSTAT_TX_OK) ifp->if_opackets++; else ifp->if_oerrors++; sc->sf_tx_cnt--; if (cur_tx->sf_mbuf != NULL) { m_freem(cur_tx->sf_mbuf); cur_tx->sf_mbuf = NULL; } } ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; csr_write_4(sc, SF_CQ_CONSIDX, (txcons & ~SF_CQ_CONSIDX_TXQ) | ((cmpconsidx << 16) & 0xFFFF0000)); return; } static void sf_intr(arg) void *arg; { struct sf_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; SF_LOCK(sc); ifp = &sc->arpcom.ac_if; if (!(csr_read_4(sc, SF_ISR_SHADOW) & SF_ISR_PCIINT_ASSERTED)) { SF_UNLOCK(sc); return; } /* Disable interrupts. */ csr_write_4(sc, SF_IMR, 0x00000000); for (;;) { status = csr_read_4(sc, SF_ISR); if (status) csr_write_4(sc, SF_ISR, status); if (!(status & SF_INTRS)) break; if (status & SF_ISR_RXDQ1_DMADONE) sf_rxeof(sc); if (status & SF_ISR_TX_TXDONE) sf_txeof(sc); if (status & SF_ISR_ABNORMALINTR) { if (status & SF_ISR_STATSOFLOW) { untimeout(sf_stats_update, sc, sc->sf_stat_ch); sf_stats_update(sc); } else sf_init(sc); } } /* Re-enable interrupts. */ csr_write_4(sc, SF_IMR, SF_INTRS); if (ifp->if_snd.ifq_head != NULL) sf_start(ifp); SF_UNLOCK(sc); return; } static void sf_init(xsc) void *xsc; { struct sf_softc *sc; struct ifnet *ifp; struct mii_data *mii; int i; sc = xsc; SF_LOCK(sc); ifp = &sc->arpcom.ac_if; mii = device_get_softc(sc->sf_miibus); sf_stop(sc); sf_reset(sc); /* Init all the receive filter registers */ for (i = SF_RXFILT_PERFECT_BASE; i < (SF_RXFILT_HASH_MAX + 1); i += 4) csr_write_4(sc, i, 0); /* Empty stats counter registers. */ for (i = 0; i < sizeof(struct sf_stats)/sizeof(u_int32_t); i++) csr_write_4(sc, SF_STATS_BASE + (i + sizeof(u_int32_t)), 0); /* Init our MAC address */ csr_write_4(sc, SF_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); csr_write_4(sc, SF_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); sf_setperf(sc, 0, (caddr_t)&sc->arpcom.ac_enaddr); if (sf_init_rx_ring(sc) == ENOBUFS) { printf("sf%d: initialization failed: no " "memory for rx buffers\n", sc->sf_unit); SF_UNLOCK(sc); return; } sf_init_tx_ring(sc); csr_write_4(sc, SF_RXFILT, SF_PERFMODE_NORMAL|SF_HASHMODE_WITHVLAN); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC); } else { SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC); } if (ifp->if_flags & IFF_BROADCAST) { SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_BROAD); } else { SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_BROAD); } /* * Load the multicast filter. */ sf_setmulti(sc); /* Init the completion queue indexes */ csr_write_4(sc, SF_CQ_CONSIDX, 0); csr_write_4(sc, SF_CQ_PRODIDX, 0); /* Init the RX completion queue */ csr_write_4(sc, SF_RXCQ_CTL_1, vtophys(sc->sf_ldata->sf_rx_clist) & SF_RXCQ_ADDR); SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQTYPE_3); /* Init RX DMA control. */ SF_SETBIT(sc, SF_RXDMA_CTL, SF_RXDMA_REPORTBADPKTS); /* Init the RX buffer descriptor queue. */ csr_write_4(sc, SF_RXDQ_ADDR_Q1, vtophys(sc->sf_ldata->sf_rx_dlist_big)); csr_write_4(sc, SF_RXDQ_CTL_1, (MCLBYTES << 16) | SF_DESCSPACE_16BYTES); csr_write_4(sc, SF_RXDQ_PTR_Q1, SF_RX_DLIST_CNT - 1); /* Init the TX completion queue */ csr_write_4(sc, SF_TXCQ_CTL, vtophys(sc->sf_ldata->sf_tx_clist) & SF_RXCQ_ADDR); /* Init the TX buffer descriptor queue. */ csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, vtophys(sc->sf_ldata->sf_tx_dlist)); SF_SETBIT(sc, SF_TX_FRAMCTL, SF_TXFRMCTL_CPLAFTERTX); csr_write_4(sc, SF_TXDQ_CTL, SF_TXBUFDESC_TYPE0|SF_TXMINSPACE_128BYTES|SF_TXSKIPLEN_8BYTES); SF_SETBIT(sc, SF_TXDQ_CTL, SF_TXDQCTL_NODMACMP); /* Enable autopadding of short TX frames. */ SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_AUTOPAD); /* Enable interrupts. */ csr_write_4(sc, SF_IMR, SF_INTRS); SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_INTR_ENB); /* Enable the RX and TX engines. */ SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RX_ENB|SF_ETHCTL_RXDMA_ENB); SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TX_ENB|SF_ETHCTL_TXDMA_ENB); /*mii_mediachg(mii);*/ sf_ifmedia_upd(ifp); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->sf_stat_ch = timeout(sf_stats_update, sc, hz); SF_UNLOCK(sc); return; } static int sf_encap(sc, c, m_head) struct sf_softc *sc; struct sf_tx_bufdesc_type0 *c; struct mbuf *m_head; { int frag = 0; struct sf_frag *f = NULL; struct mbuf *m; m = m_head; for (m = m_head, frag = 0; m != NULL; m = m->m_next) { if (m->m_len != 0) { if (frag == SF_MAXFRAGS) break; f = &c->sf_frags[frag]; if (frag == 0) f->sf_pktlen = m_head->m_pkthdr.len; f->sf_fraglen = m->m_len; f->sf_addr = vtophys(mtod(m, vm_offset_t)); frag++; } } if (m != NULL) { struct mbuf *m_new = NULL; MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("sf%d: no memory for tx list", sc->sf_unit); return(1); } if (m_head->m_pkthdr.len > MHLEN) { MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); printf("sf%d: no memory for tx list", sc->sf_unit); return(1); } } m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t)); m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; m_freem(m_head); m_head = m_new; f = &c->sf_frags[0]; f->sf_fraglen = f->sf_pktlen = m_head->m_pkthdr.len; f->sf_addr = vtophys(mtod(m_head, caddr_t)); frag = 1; } c->sf_mbuf = m_head; c->sf_id = SF_TX_BUFDESC_ID; c->sf_fragcnt = frag; c->sf_intr = 1; c->sf_caltcp = 0; c->sf_crcen = 1; return(0); } static void sf_start(ifp) struct ifnet *ifp; { struct sf_softc *sc; struct sf_tx_bufdesc_type0 *cur_tx = NULL; struct mbuf *m_head = NULL; int i, txprod; sc = ifp->if_softc; SF_LOCK(sc); if (!sc->sf_link) { SF_UNLOCK(sc); return; } if (ifp->if_flags & IFF_OACTIVE) { SF_UNLOCK(sc); return; } txprod = csr_read_4(sc, SF_TXDQ_PRODIDX); i = SF_IDX_HI(txprod) >> 4; while(sc->sf_ldata->sf_tx_dlist[i].sf_mbuf == NULL) { if (sc->sf_tx_cnt == (SF_TX_DLIST_CNT - 2)) { ifp->if_flags |= IFF_OACTIVE; cur_tx = NULL; break; } IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; cur_tx = &sc->sf_ldata->sf_tx_dlist[i]; if (sf_encap(sc, cur_tx, m_head)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; cur_tx = NULL; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if (ifp->if_bpf) bpf_mtap(ifp, m_head); SF_INC(i, SF_TX_DLIST_CNT); sc->sf_tx_cnt++; } if (cur_tx == NULL) { SF_UNLOCK(sc); return; } /* Transmit */ csr_write_4(sc, SF_TXDQ_PRODIDX, (txprod & ~SF_TXDQ_PRODIDX_HIPRIO) | ((i << 20) & 0xFFFF0000)); ifp->if_timer = 5; SF_UNLOCK(sc); return; } static void sf_stop(sc) struct sf_softc *sc; { int i; struct ifnet *ifp; SF_LOCK(sc); ifp = &sc->arpcom.ac_if; untimeout(sf_stats_update, sc, sc->sf_stat_ch); csr_write_4(sc, SF_GEN_ETH_CTL, 0); csr_write_4(sc, SF_CQ_CONSIDX, 0); csr_write_4(sc, SF_CQ_PRODIDX, 0); csr_write_4(sc, SF_RXDQ_ADDR_Q1, 0); csr_write_4(sc, SF_RXDQ_CTL_1, 0); csr_write_4(sc, SF_RXDQ_PTR_Q1, 0); csr_write_4(sc, SF_TXCQ_CTL, 0); csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0); csr_write_4(sc, SF_TXDQ_CTL, 0); sf_reset(sc); sc->sf_link = 0; for (i = 0; i < SF_RX_DLIST_CNT; i++) { if (sc->sf_ldata->sf_rx_dlist_big[i].sf_mbuf != NULL) { m_freem(sc->sf_ldata->sf_rx_dlist_big[i].sf_mbuf); sc->sf_ldata->sf_rx_dlist_big[i].sf_mbuf = NULL; } } for (i = 0; i < SF_TX_DLIST_CNT; i++) { if (sc->sf_ldata->sf_tx_dlist[i].sf_mbuf != NULL) { m_freem(sc->sf_ldata->sf_tx_dlist[i].sf_mbuf); sc->sf_ldata->sf_tx_dlist[i].sf_mbuf = NULL; } } ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE); SF_UNLOCK(sc); return; } /* * Note: it is important that this function not be interrupted. We * use a two-stage register access scheme: if we are interrupted in * between setting the indirect address register and reading from the * indirect data register, the contents of the address register could * be changed out from under us. */ static void sf_stats_update(xsc) void *xsc; { struct sf_softc *sc; struct ifnet *ifp; struct mii_data *mii; struct sf_stats stats; u_int32_t *ptr; int i; sc = xsc; SF_LOCK(sc); ifp = &sc->arpcom.ac_if; mii = device_get_softc(sc->sf_miibus); ptr = (u_int32_t *)&stats; for (i = 0; i < sizeof(stats)/sizeof(u_int32_t); i++) ptr[i] = csr_read_4(sc, SF_STATS_BASE + (i + sizeof(u_int32_t))); for (i = 0; i < sizeof(stats)/sizeof(u_int32_t); i++) csr_write_4(sc, SF_STATS_BASE + (i + sizeof(u_int32_t)), 0); ifp->if_collisions += stats.sf_tx_single_colls + stats.sf_tx_multi_colls + stats.sf_tx_excess_colls; mii_tick(mii); if (!sc->sf_link) { mii_pollstat(mii); if (mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) sc->sf_link++; if (ifp->if_snd.ifq_head != NULL) sf_start(ifp); } sc->sf_stat_ch = timeout(sf_stats_update, sc, hz); SF_UNLOCK(sc); return; } static void sf_watchdog(ifp) struct ifnet *ifp; { struct sf_softc *sc; sc = ifp->if_softc; SF_LOCK(sc); ifp->if_oerrors++; printf("sf%d: watchdog timeout\n", sc->sf_unit); sf_stop(sc); sf_reset(sc); sf_init(sc); if (ifp->if_snd.ifq_head != NULL) sf_start(ifp); SF_UNLOCK(sc); return; } static void sf_shutdown(dev) device_t dev; { struct sf_softc *sc; sc = device_get_softc(dev); sf_stop(sc); return; } Index: head/sys/dev/sk/if_sk.c =================================================================== --- head/sys/dev/sk/if_sk.c (revision 71961) +++ head/sys/dev/sk/if_sk.c (revision 71962) @@ -1,2236 +1,2235 @@ /* * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports * the SK-984x series adapters, both single port and dual port. * References: * The XaQti XMAC II datasheet, * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf * The SysKonnect GEnesis manual, http://www.syskonnect.com * * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the * XMAC II datasheet online. I have put my copy at people.freebsd.org as a * convenience to others until Vitesse corrects this problem: * * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf * * Written by Bill Paul * Department of Electrical Engineering * Columbia University, New York City */ /* * The SysKonnect gigabit ethernet adapters consist of two main * components: the SysKonnect GEnesis controller chip and the XaQti Corp. * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC * components and a PHY while the GEnesis controller provides a PCI * interface with DMA support. Each card may have between 512K and * 2MB of SRAM on board depending on the configuration. * * The SysKonnect GEnesis controller can have either one or two XMAC * chips connected to it, allowing single or dual port NIC configurations. * SysKonnect has the distinction of being the only vendor on the market * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs, * dual DMA queues, packet/MAC/transmit arbiters and direct access to the * XMAC registers. This driver takes advantage of these features to allow * both XMACs to operate as independent interfaces. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include #include #include #include #define SK_USEIOSPACE #include #include MODULE_DEPEND(sk, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif static struct sk_type sk_devs[] = { { SK_VENDORID, SK_DEVICEID_GE, "SysKonnect Gigabit Ethernet" }, { 0, 0, NULL } }; static int sk_probe __P((device_t)); static int sk_attach __P((device_t)); static int sk_detach __P((device_t)); static int sk_detach_xmac __P((device_t)); static int sk_probe_xmac __P((device_t)); static int sk_attach_xmac __P((device_t)); static void sk_tick __P((void *)); static void sk_intr __P((void *)); static void sk_intr_xmac __P((struct sk_if_softc *)); static void sk_intr_bcom __P((struct sk_if_softc *)); static void sk_rxeof __P((struct sk_if_softc *)); static void sk_txeof __P((struct sk_if_softc *)); static int sk_encap __P((struct sk_if_softc *, struct mbuf *, u_int32_t *)); static void sk_start __P((struct ifnet *)); static int sk_ioctl __P((struct ifnet *, u_long, caddr_t)); static void sk_init __P((void *)); static void sk_init_xmac __P((struct sk_if_softc *)); static void sk_stop __P((struct sk_if_softc *)); static void sk_watchdog __P((struct ifnet *)); static void sk_shutdown __P((device_t)); static int sk_ifmedia_upd __P((struct ifnet *)); static void sk_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); static void sk_reset __P((struct sk_softc *)); static int sk_newbuf __P((struct sk_if_softc *, struct sk_chain *, struct mbuf *)); static int sk_alloc_jumbo_mem __P((struct sk_if_softc *)); static void *sk_jalloc __P((struct sk_if_softc *)); static void sk_jfree __P((caddr_t, void *)); static int sk_init_rx_ring __P((struct sk_if_softc *)); static void sk_init_tx_ring __P((struct sk_if_softc *)); static u_int32_t sk_win_read_4 __P((struct sk_softc *, int)); static u_int16_t sk_win_read_2 __P((struct sk_softc *, int)); static u_int8_t sk_win_read_1 __P((struct sk_softc *, int)); static void sk_win_write_4 __P((struct sk_softc *, int, u_int32_t)); static void sk_win_write_2 __P((struct sk_softc *, int, u_int32_t)); static void sk_win_write_1 __P((struct sk_softc *, int, u_int32_t)); static u_int8_t sk_vpd_readbyte __P((struct sk_softc *, int)); static void sk_vpd_read_res __P((struct sk_softc *, struct vpd_res *, int)); static void sk_vpd_read __P((struct sk_softc *)); static int sk_miibus_readreg __P((device_t, int, int)); static int sk_miibus_writereg __P((device_t, int, int, int)); static void sk_miibus_statchg __P((device_t)); static u_int32_t sk_calchash __P((caddr_t)); static void sk_setfilt __P((struct sk_if_softc *, caddr_t, int)); static void sk_setmulti __P((struct sk_if_softc *)); #ifdef SK_USEIOSPACE #define SK_RES SYS_RES_IOPORT #define SK_RID SK_PCI_LOIO #else #define SK_RES SYS_RES_MEMORY #define SK_RID SK_PCI_LOMEM #endif /* * Note that we have newbus methods for both the GEnesis controller * itself and the XMAC(s). The XMACs are children of the GEnesis, and * the miibus code is a child of the XMACs. We need to do it this way * so that the miibus drivers can access the PHY registers on the * right PHY. It's not quite what I had in mind, but it's the only * design that achieves the desired effect. */ static device_method_t skc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sk_probe), DEVMETHOD(device_attach, sk_attach), DEVMETHOD(device_detach, sk_detach), DEVMETHOD(device_shutdown, sk_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), { 0, 0 } }; static driver_t skc_driver = { "skc", skc_methods, sizeof(struct sk_softc) }; static devclass_t skc_devclass; static device_method_t sk_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sk_probe_xmac), DEVMETHOD(device_attach, sk_attach_xmac), DEVMETHOD(device_detach, sk_detach_xmac), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, sk_miibus_readreg), DEVMETHOD(miibus_writereg, sk_miibus_writereg), DEVMETHOD(miibus_statchg, sk_miibus_statchg), { 0, 0 } }; static driver_t sk_driver = { "sk", sk_methods, sizeof(struct sk_if_softc) }; static devclass_t sk_devclass; DRIVER_MODULE(if_sk, pci, skc_driver, skc_devclass, 0, 0); DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0); DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0); #define SK_SETBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x) #define SK_CLRBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x) #define SK_WIN_SETBIT_4(sc, reg, x) \ sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x) #define SK_WIN_CLRBIT_4(sc, reg, x) \ sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x) #define SK_WIN_SETBIT_2(sc, reg, x) \ sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x) #define SK_WIN_CLRBIT_2(sc, reg, x) \ sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x) static u_int32_t sk_win_read_4(sc, reg) struct sk_softc *sc; int reg; { CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg))); } static u_int16_t sk_win_read_2(sc, reg) struct sk_softc *sc; int reg; { CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg))); } static u_int8_t sk_win_read_1(sc, reg) struct sk_softc *sc; int reg; { CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg))); } static void sk_win_write_4(sc, reg, val) struct sk_softc *sc; int reg; u_int32_t val; { CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val); return; } static void sk_win_write_2(sc, reg, val) struct sk_softc *sc; int reg; u_int32_t val; { CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), (u_int32_t)val); return; } static void sk_win_write_1(sc, reg, val) struct sk_softc *sc; int reg; u_int32_t val; { CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val); return; } /* * The VPD EEPROM contains Vital Product Data, as suggested in * the PCI 2.1 specification. The VPD data is separared into areas * denoted by resource IDs. The SysKonnect VPD contains an ID string * resource (the name of the adapter), a read-only area resource * containing various key/data fields and a read/write area which * can be used to store asset management information or log messages. * We read the ID string and read-only into buffers attached to * the controller softc structure for later use. At the moment, * we only use the ID string during sk_attach(). */ static u_int8_t sk_vpd_readbyte(sc, addr) struct sk_softc *sc; int addr; { int i; sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr); for (i = 0; i < SK_TIMEOUT; i++) { DELAY(1); if (sk_win_read_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG) break; } if (i == SK_TIMEOUT) return(0); return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA))); } static void sk_vpd_read_res(sc, res, addr) struct sk_softc *sc; struct vpd_res *res; int addr; { int i; u_int8_t *ptr; ptr = (u_int8_t *)res; for (i = 0; i < sizeof(struct vpd_res); i++) ptr[i] = sk_vpd_readbyte(sc, i + addr); return; } static void sk_vpd_read(sc) struct sk_softc *sc; { int pos = 0, i; struct vpd_res res; if (sc->sk_vpd_prodname != NULL) free(sc->sk_vpd_prodname, M_DEVBUF); if (sc->sk_vpd_readonly != NULL) free(sc->sk_vpd_readonly, M_DEVBUF); sc->sk_vpd_prodname = NULL; sc->sk_vpd_readonly = NULL; sk_vpd_read_res(sc, &res, pos); if (res.vr_id != VPD_RES_ID) { printf("skc%d: bad VPD resource id: expected %x got %x\n", sc->sk_unit, VPD_RES_ID, res.vr_id); return; } pos += sizeof(res); sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); for (i = 0; i < res.vr_len; i++) sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos); sc->sk_vpd_prodname[i] = '\0'; pos += i; sk_vpd_read_res(sc, &res, pos); if (res.vr_id != VPD_RES_READ) { printf("skc%d: bad VPD resource id: expected %x got %x\n", sc->sk_unit, VPD_RES_READ, res.vr_id); return; } pos += sizeof(res); sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); for (i = 0; i < res.vr_len + 1; i++) sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos); return; } static int sk_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct sk_if_softc *sc_if; int i; sc_if = device_get_softc(dev); if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0) return(0); SK_IF_LOCK(sc_if); SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); SK_XM_READ_2(sc_if, XM_PHY_DATA); if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { for (i = 0; i < SK_TIMEOUT; i++) { DELAY(1); if (SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYDATARDY) break; } if (i == SK_TIMEOUT) { printf("sk%d: phy failed to come ready\n", sc_if->sk_unit); return(0); } } DELAY(1); i = SK_XM_READ_2(sc_if, XM_PHY_DATA); SK_IF_UNLOCK(sc_if); return(i); } static int sk_miibus_writereg(dev, phy, reg, val) device_t dev; int phy, reg, val; { struct sk_if_softc *sc_if; int i; sc_if = device_get_softc(dev); SK_IF_LOCK(sc_if); SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); for (i = 0; i < SK_TIMEOUT; i++) { if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) break; } if (i == SK_TIMEOUT) { printf("sk%d: phy failed to come ready\n", sc_if->sk_unit); return(ETIMEDOUT); } SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val); for (i = 0; i < SK_TIMEOUT; i++) { DELAY(1); if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) break; } SK_IF_UNLOCK(sc_if); if (i == SK_TIMEOUT) printf("sk%d: phy write timed out\n", sc_if->sk_unit); return(0); } static void sk_miibus_statchg(dev) device_t dev; { struct sk_if_softc *sc_if; struct mii_data *mii; sc_if = device_get_softc(dev); mii = device_get_softc(sc_if->sk_miibus); SK_IF_LOCK(sc_if); /* * If this is a GMII PHY, manually set the XMAC's * duplex mode accordingly. */ if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); } else { SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); } } SK_IF_UNLOCK(sc_if); return; } #define SK_POLY 0xEDB88320 #define SK_BITS 6 static u_int32_t sk_calchash(addr) caddr_t addr; { u_int32_t idx, bit, data, crc; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (idx = 0; idx < 6; idx++) { for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) crc = (crc >> 1) ^ (((crc ^ data) & 1) ? SK_POLY : 0); } return (~crc & ((1 << SK_BITS) - 1)); } static void sk_setfilt(sc_if, addr, slot) struct sk_if_softc *sc_if; caddr_t addr; int slot; { int base; base = XM_RXFILT_ENTRY(slot); SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0])); SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2])); SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4])); return; } static void sk_setmulti(sc_if) struct sk_if_softc *sc_if; { struct ifnet *ifp; u_int32_t hashes[2] = { 0, 0 }; int h, i; struct ifmultiaddr *ifma; u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 }; ifp = &sc_if->arpcom.ac_if; /* First, zot all the existing filters. */ for (i = 1; i < XM_RXFILT_MAX; i++) sk_setfilt(sc_if, (caddr_t)&dummy, i); SK_XM_WRITE_4(sc_if, XM_MAR0, 0); SK_XM_WRITE_4(sc_if, XM_MAR2, 0); /* Now program new ones. */ if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { hashes[0] = 0xFFFFFFFF; hashes[1] = 0xFFFFFFFF; } else { i = 1; /* First find the tail of the list. */ - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_link.le_next == NULL) break; } /* Now traverse the list backwards. */ for (; ifma != NULL && ifma != (void *)&ifp->if_multiaddrs; ifma = (struct ifmultiaddr *)ifma->ifma_link.le_prev) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; /* * Program the first XM_RXFILT_MAX multicast groups * into the perfect filter. For all others, * use the hash table. */ if (i < XM_RXFILT_MAX) { sk_setfilt(sc_if, LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i); i++; continue; } h = sk_calchash( LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } } SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH| XM_MODE_RX_USE_PERFECT); SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]); SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]); return; } static int sk_init_rx_ring(sc_if) struct sk_if_softc *sc_if; { struct sk_chain_data *cd; struct sk_ring_data *rd; int i; cd = &sc_if->sk_cdata; rd = sc_if->sk_rdata; bzero((char *)rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT); for (i = 0; i < SK_RX_RING_CNT; i++) { cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i]; if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS) return(ENOBUFS); if (i == (SK_RX_RING_CNT - 1)) { cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[0]; rd->sk_rx_ring[i].sk_next = vtophys(&rd->sk_rx_ring[0]); } else { cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[i + 1]; rd->sk_rx_ring[i].sk_next = vtophys(&rd->sk_rx_ring[i + 1]); } } sc_if->sk_cdata.sk_rx_prod = 0; sc_if->sk_cdata.sk_rx_cons = 0; return(0); } static void sk_init_tx_ring(sc_if) struct sk_if_softc *sc_if; { struct sk_chain_data *cd; struct sk_ring_data *rd; int i; cd = &sc_if->sk_cdata; rd = sc_if->sk_rdata; bzero((char *)sc_if->sk_rdata->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT); for (i = 0; i < SK_TX_RING_CNT; i++) { cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i]; if (i == (SK_TX_RING_CNT - 1)) { cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[0]; rd->sk_tx_ring[i].sk_next = vtophys(&rd->sk_tx_ring[0]); } else { cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[i + 1]; rd->sk_tx_ring[i].sk_next = vtophys(&rd->sk_tx_ring[i + 1]); } } sc_if->sk_cdata.sk_tx_prod = 0; sc_if->sk_cdata.sk_tx_cons = 0; sc_if->sk_cdata.sk_tx_cnt = 0; return; } static int sk_newbuf(sc_if, c, m) struct sk_if_softc *sc_if; struct sk_chain *c; struct mbuf *m; { struct mbuf *m_new = NULL; struct sk_rx_desc *r; if (m == NULL) { caddr_t *buf = NULL; MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("sk%d: no memory for rx list -- " "packet dropped!\n", sc_if->sk_unit); return(ENOBUFS); } /* Allocate the jumbo buffer */ buf = sk_jalloc(sc_if); if (buf == NULL) { m_freem(m_new); #ifdef SK_VERBOSE printf("sk%d: jumbo allocation failed " "-- packet dropped!\n", sc_if->sk_unit); #endif return(ENOBUFS); } /* Attach the buffer to the mbuf */ MEXTADD(m_new, buf, SK_JLEN, sk_jfree, (struct sk_if_softc *)sc_if, 0, EXT_NET_DRV); m_new->m_data = (void *)buf; m_new->m_pkthdr.len = m_new->m_len = SK_JLEN; } else { /* * We're re-using a previously allocated mbuf; * be sure to re-init pointers and lengths to * default values. */ m_new = m; m_new->m_len = m_new->m_pkthdr.len = SK_JLEN; m_new->m_data = m_new->m_ext.ext_buf; } /* * Adjust alignment so packet payload begins on a * longword boundary. Mandatory for Alpha, useful on * x86 too. */ m_adj(m_new, ETHER_ALIGN); r = c->sk_desc; c->sk_mbuf = m_new; r->sk_data_lo = vtophys(mtod(m_new, caddr_t)); r->sk_ctl = m_new->m_len | SK_RXSTAT; return(0); } /* * Allocate jumbo buffer storage. The SysKonnect adapters support * "jumbograms" (9K frames), although SysKonnect doesn't currently * use them in their drivers. In order for us to use them, we need * large 9K receive buffers, however standard mbuf clusters are only * 2048 bytes in size. Consequently, we need to allocate and manage * our own jumbo buffer pool. Fortunately, this does not require an * excessive amount of additional code. */ static int sk_alloc_jumbo_mem(sc_if) struct sk_if_softc *sc_if; { caddr_t ptr; register int i; struct sk_jpool_entry *entry; /* Grab a big chunk o' storage. */ sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc_if->sk_cdata.sk_jumbo_buf == NULL) { printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit); return(ENOBUFS); } SLIST_INIT(&sc_if->sk_jfree_listhead); SLIST_INIT(&sc_if->sk_jinuse_listhead); /* * Now divide it up into 9K pieces and save the addresses * in an array. */ ptr = sc_if->sk_cdata.sk_jumbo_buf; for (i = 0; i < SK_JSLOTS; i++) { sc_if->sk_cdata.sk_jslots[i] = ptr; ptr += SK_JLEN; entry = malloc(sizeof(struct sk_jpool_entry), M_DEVBUF, M_NOWAIT); if (entry == NULL) { free(sc_if->sk_cdata.sk_jumbo_buf, M_DEVBUF); sc_if->sk_cdata.sk_jumbo_buf = NULL; printf("sk%d: no memory for jumbo " "buffer queue!\n", sc_if->sk_unit); return(ENOBUFS); } entry->slot = i; SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries); } return(0); } /* * Allocate a jumbo buffer. */ static void *sk_jalloc(sc_if) struct sk_if_softc *sc_if; { struct sk_jpool_entry *entry; entry = SLIST_FIRST(&sc_if->sk_jfree_listhead); if (entry == NULL) { #ifdef SK_VERBOSE printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit); #endif return(NULL); } SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries); return(sc_if->sk_cdata.sk_jslots[entry->slot]); } /* * Release a jumbo buffer. */ static void sk_jfree(buf, args) caddr_t buf; void *args; { struct sk_if_softc *sc_if; int i; struct sk_jpool_entry *entry; /* Extract the softc struct pointer. */ sc_if = (struct sk_if_softc *)args; if (sc_if == NULL) panic("sk_jfree: didn't get softc pointer!"); /* calculate the slot this buffer belongs to */ i = ((vm_offset_t)buf - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN; if ((i < 0) || (i >= SK_JSLOTS)) panic("sk_jfree: asked to free buffer that we don't manage!"); entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead); if (entry == NULL) panic("sk_jfree: buffer not in use!"); entry->slot = i; SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries); return; } /* * Set media options. */ static int sk_ifmedia_upd(ifp) struct ifnet *ifp; { struct sk_if_softc *sc_if; struct mii_data *mii; sc_if = ifp->if_softc; mii = device_get_softc(sc_if->sk_miibus); sk_init(sc_if); mii_mediachg(mii); return(0); } /* * Report current media status. */ static void sk_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct sk_if_softc *sc_if; struct mii_data *mii; sc_if = ifp->if_softc; mii = device_get_softc(sc_if->sk_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static int sk_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct sk_if_softc *sc_if = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int error = 0; struct mii_data *mii; SK_IF_LOCK(sc_if); switch(command) { case SIOCSIFADDR: case SIOCGIFADDR: error = ether_ioctl(ifp, command, data); break; case SIOCSIFMTU: if (ifr->ifr_mtu > SK_JUMBO_MTU) error = EINVAL; else { ifp->if_mtu = ifr->ifr_mtu; sk_init(sc_if); } break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc_if->sk_if_flags & IFF_PROMISC)) { SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); sk_setmulti(sc_if); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc_if->sk_if_flags & IFF_PROMISC) { SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); sk_setmulti(sc_if); } else sk_init(sc_if); } else { if (ifp->if_flags & IFF_RUNNING) sk_stop(sc_if); } sc_if->sk_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: sk_setmulti(sc_if); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc_if->sk_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = EINVAL; break; } SK_IF_UNLOCK(sc_if); return(error); } /* * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int sk_probe(dev) device_t dev; { struct sk_type *t; t = sk_devs; while(t->sk_name != NULL) { if ((pci_get_vendor(dev) == t->sk_vid) && (pci_get_device(dev) == t->sk_did)) { device_set_desc(dev, t->sk_name); return(0); } t++; } return(ENXIO); } /* * Force the GEnesis into reset, then bring it out of reset. */ static void sk_reset(sc) struct sk_softc *sc; { CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_RESET); CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_RESET); DELAY(1000); CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_UNRESET); CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_UNRESET); /* Configure packet arbiter */ sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET); sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT); sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT); sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT); sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT); /* Enable RAM interface */ sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET); /* * Configure interrupt moderation. The moderation timer * defers interrupts specified in the interrupt moderation * timer mask based on the timeout specified in the interrupt * moderation timer init register. Each bit in the timer * register represents 18.825ns, so to specify a timeout in * microseconds, we have to multiply by 54. */ sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(200)); sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF| SK_ISR_RX1_EOF|SK_ISR_RX2_EOF); sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START); return; } static int sk_probe_xmac(dev) device_t dev; { /* * Not much to do here. We always know there will be * at least one XMAC present, and if there are two, * sk_attach() will create a second device instance * for us. */ device_set_desc(dev, "XaQti Corp. XMAC II"); return(0); } /* * Each XMAC chip is attached as a separate logical IP interface. * Single port cards will have only one logical interface of course. */ static int sk_attach_xmac(dev) device_t dev; { struct sk_softc *sc; struct sk_if_softc *sc_if; struct ifnet *ifp; int i, port; if (dev == NULL) return(EINVAL); sc_if = device_get_softc(dev); sc = device_get_softc(device_get_parent(dev)); SK_LOCK(sc); port = *(int *)device_get_ivars(dev); free(device_get_ivars(dev), M_DEVBUF); device_set_ivars(dev, NULL); sc_if->sk_dev = dev; bzero((char *)sc_if, sizeof(struct sk_if_softc)); sc_if->sk_dev = dev; sc_if->sk_unit = device_get_unit(dev); sc_if->sk_port = port; sc_if->sk_softc = sc; sc->sk_if[port] = sc_if; if (port == SK_PORT_A) sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0; if (port == SK_PORT_B) sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1; /* * Get station address for this interface. Note that * dual port cards actually come with three station * addresses: one for each port, plus an extra. The * extra one is used by the SysKonnect driver software * as a 'virtual' station address for when both ports * are operating in failover mode. Currently we don't * use this extra address. */ for (i = 0; i < ETHER_ADDR_LEN; i++) sc_if->arpcom.ac_enaddr[i] = sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i); printf("sk%d: Ethernet address: %6D\n", sc_if->sk_unit, sc_if->arpcom.ac_enaddr, ":"); /* * Set up RAM buffer addresses. The NIC will have a certain * amount of SRAM on it, somewhere between 512K and 2MB. We * need to divide this up a) between the transmitter and * receiver and b) between the two XMACs, if this is a * dual port NIC. Our algotithm is to divide up the memory * evenly so that everyone gets a fair share. */ if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) { u_int32_t chunk, val; chunk = sc->sk_ramsize / 2; val = sc->sk_rboff / sizeof(u_int64_t); sc_if->sk_rx_ramstart = val; val += (chunk / sizeof(u_int64_t)); sc_if->sk_rx_ramend = val - 1; sc_if->sk_tx_ramstart = val; val += (chunk / sizeof(u_int64_t)); sc_if->sk_tx_ramend = val - 1; } else { u_int32_t chunk, val; chunk = sc->sk_ramsize / 4; val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) / sizeof(u_int64_t); sc_if->sk_rx_ramstart = val; val += (chunk / sizeof(u_int64_t)); sc_if->sk_rx_ramend = val - 1; sc_if->sk_tx_ramstart = val; val += (chunk / sizeof(u_int64_t)); sc_if->sk_tx_ramend = val - 1; } /* Read and save PHY type and set PHY address */ sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF; switch(sc_if->sk_phytype) { case SK_PHYTYPE_XMAC: sc_if->sk_phyaddr = SK_PHYADDR_XMAC; break; case SK_PHYTYPE_BCOM: sc_if->sk_phyaddr = SK_PHYADDR_BCOM; break; default: printf("skc%d: unsupported PHY type: %d\n", sc->sk_unit, sc_if->sk_phytype); SK_UNLOCK(sc); return(ENODEV); } /* Allocate the descriptor queues. */ sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc_if->sk_rdata == NULL) { printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit); sc->sk_if[port] = NULL; SK_UNLOCK(sc); return(ENOMEM); } bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data)); /* Try to allocate memory for jumbo buffers. */ if (sk_alloc_jumbo_mem(sc_if)) { printf("sk%d: jumbo buffer allocation failed\n", sc_if->sk_unit); contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data), M_DEVBUF); sc->sk_if[port] = NULL; SK_UNLOCK(sc); return(ENOMEM); } ifp = &sc_if->arpcom.ac_if; ifp->if_softc = sc_if; ifp->if_unit = sc_if->sk_unit; ifp->if_name = "sk"; ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = sk_ioctl; ifp->if_output = ether_output; ifp->if_start = sk_start; ifp->if_watchdog = sk_watchdog; ifp->if_init = sk_init; ifp->if_baudrate = 1000000000; ifp->if_snd.ifq_maxlen = SK_TX_RING_CNT - 1; /* * Do miibus setup. */ sk_init_xmac(sc_if); if (mii_phy_probe(dev, &sc_if->sk_miibus, sk_ifmedia_upd, sk_ifmedia_sts)) { printf("skc%d: no PHY found!\n", sc_if->sk_unit); contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data), M_DEVBUF); SK_UNLOCK(sc); return(ENXIO); } /* * Call MI attach routine. */ ether_ifattach(ifp, ETHER_BPF_SUPPORTED); callout_handle_init(&sc_if->sk_tick_ch); SK_UNLOCK(sc); return(0); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int sk_attach(dev) device_t dev; { u_int32_t command; struct sk_softc *sc; int unit, error = 0, rid, *port; sc = device_get_softc(dev); unit = device_get_unit(dev); bzero(sc, sizeof(struct sk_softc)); mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE); SK_LOCK(sc); /* * Handle power management nonsense. */ command = pci_read_config(dev, SK_PCI_CAPID, 4) & 0x000000FF; if (command == 0x01) { command = pci_read_config(dev, SK_PCI_PWRMGMTCTRL, 4); if (command & SK_PSTATE_MASK) { u_int32_t iobase, membase, irq; /* Save important PCI config data. */ iobase = pci_read_config(dev, SK_PCI_LOIO, 4); membase = pci_read_config(dev, SK_PCI_LOMEM, 4); irq = pci_read_config(dev, SK_PCI_INTLINE, 4); /* Reset the power state. */ printf("skc%d: chip is in D%d power mode " "-- setting to D0\n", unit, command & SK_PSTATE_MASK); command &= 0xFFFFFFFC; pci_write_config(dev, SK_PCI_PWRMGMTCTRL, command, 4); /* Restore PCI config data. */ pci_write_config(dev, SK_PCI_LOIO, iobase, 4); pci_write_config(dev, SK_PCI_LOMEM, membase, 4); pci_write_config(dev, SK_PCI_INTLINE, irq, 4); } } /* * Map control/status registers. */ command = pci_read_config(dev, PCIR_COMMAND, 4); command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, command, 4); command = pci_read_config(dev, PCIR_COMMAND, 4); #ifdef SK_USEIOSPACE if (!(command & PCIM_CMD_PORTEN)) { printf("skc%d: failed to enable I/O ports!\n", unit); error = ENXIO; goto fail; } #else if (!(command & PCIM_CMD_MEMEN)) { printf("skc%d: failed to enable memory mapping!\n", unit); error = ENXIO; goto fail; } #endif rid = SK_RID; sc->sk_res = bus_alloc_resource(dev, SK_RES, &rid, 0, ~0, 1, RF_ACTIVE); if (sc->sk_res == NULL) { printf("sk%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->sk_btag = rman_get_bustag(sc->sk_res); sc->sk_bhandle = rman_get_bushandle(sc->sk_res); /* Allocate interrupt */ rid = 0; sc->sk_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->sk_irq == NULL) { printf("skc%d: couldn't map interrupt\n", unit); bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET, sk_intr, sc, &sc->sk_intrhand); if (error) { printf("skc%d: couldn't set up irq\n", unit); bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq); goto fail; } /* Reset the adapter. */ sk_reset(sc); sc->sk_unit = unit; /* Read and save vital product data from EEPROM. */ sk_vpd_read(sc); /* Read and save RAM size and RAMbuffer offset */ switch(sk_win_read_1(sc, SK_EPROM0)) { case SK_RAMSIZE_512K_64: sc->sk_ramsize = 0x80000; sc->sk_rboff = SK_RBOFF_0; break; case SK_RAMSIZE_1024K_64: sc->sk_ramsize = 0x100000; sc->sk_rboff = SK_RBOFF_80000; break; case SK_RAMSIZE_1024K_128: sc->sk_ramsize = 0x100000; sc->sk_rboff = SK_RBOFF_0; break; case SK_RAMSIZE_2048K_128: sc->sk_ramsize = 0x200000; sc->sk_rboff = SK_RBOFF_0; break; default: printf("skc%d: unknown ram size: %d\n", sc->sk_unit, sk_win_read_1(sc, SK_EPROM0)); bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq); bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res); error = ENXIO; goto fail; break; } /* Read and save physical media type */ switch(sk_win_read_1(sc, SK_PMDTYPE)) { case SK_PMD_1000BASESX: sc->sk_pmd = IFM_1000_SX; break; case SK_PMD_1000BASELX: sc->sk_pmd = IFM_1000_LX; break; case SK_PMD_1000BASECX: sc->sk_pmd = IFM_1000_CX; break; case SK_PMD_1000BASETX: sc->sk_pmd = IFM_1000_TX; break; default: printf("skc%d: unknown media type: 0x%x\n", sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE)); bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq); bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res); error = ENXIO; goto fail; } /* Announce the product name. */ printf("skc%d: %s\n", sc->sk_unit, sc->sk_vpd_prodname); sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1); port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); *port = SK_PORT_A; device_set_ivars(sc->sk_devs[SK_PORT_A], port); if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) { sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1); port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); *port = SK_PORT_B; device_set_ivars(sc->sk_devs[SK_PORT_B], port); } /* Turn on the 'driver is loaded' LED. */ CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON); bus_generic_attach(dev); SK_UNLOCK(sc); return(0); fail: SK_UNLOCK(sc); mtx_destroy(&sc->sk_mtx); return(error); } static int sk_detach_xmac(dev) device_t dev; { struct sk_softc *sc; struct sk_if_softc *sc_if; struct ifnet *ifp; sc = device_get_softc(device_get_parent(dev)); sc_if = device_get_softc(dev); SK_IF_LOCK(sc_if); ifp = &sc_if->arpcom.ac_if; sk_stop(sc_if); ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); bus_generic_detach(dev); if (sc_if->sk_miibus != NULL) device_delete_child(dev, sc_if->sk_miibus); contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF); contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data), M_DEVBUF); SK_IF_UNLOCK(sc_if); return(0); } static int sk_detach(dev) device_t dev; { struct sk_softc *sc; sc = device_get_softc(dev); SK_LOCK(sc); bus_generic_detach(dev); if (sc->sk_devs[SK_PORT_A] != NULL) device_delete_child(dev, sc->sk_devs[SK_PORT_A]); if (sc->sk_devs[SK_PORT_B] != NULL) device_delete_child(dev, sc->sk_devs[SK_PORT_B]); bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq); bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res); SK_UNLOCK(sc); mtx_destroy(&sc->sk_mtx); return(0); } static int sk_encap(sc_if, m_head, txidx) struct sk_if_softc *sc_if; struct mbuf *m_head; u_int32_t *txidx; { struct sk_tx_desc *f = NULL; struct mbuf *m; u_int32_t frag, cur, cnt = 0; m = m_head; cur = frag = *txidx; /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ for (m = m_head; m != NULL; m = m->m_next) { if (m->m_len != 0) { if ((SK_TX_RING_CNT - (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2) return(ENOBUFS); f = &sc_if->sk_rdata->sk_tx_ring[frag]; f->sk_data_lo = vtophys(mtod(m, vm_offset_t)); f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT; if (cnt == 0) f->sk_ctl |= SK_TXCTL_FIRSTFRAG; else f->sk_ctl |= SK_TXCTL_OWN; cur = frag; SK_INC(frag, SK_TX_RING_CNT); cnt++; } } if (m != NULL) return(ENOBUFS); sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |= SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR; sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head; sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN; sc_if->sk_cdata.sk_tx_cnt += cnt; *txidx = frag; return(0); } static void sk_start(ifp) struct ifnet *ifp; { struct sk_softc *sc; struct sk_if_softc *sc_if; struct mbuf *m_head = NULL; u_int32_t idx; sc_if = ifp->if_softc; sc = sc_if->sk_softc; SK_IF_LOCK(sc_if); idx = sc_if->sk_cdata.sk_tx_prod; while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (sk_encap(sc_if, m_head, &idx)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if (ifp->if_bpf) bpf_mtap(ifp, m_head); } /* Transmit */ sc_if->sk_cdata.sk_tx_prod = idx; CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); /* Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; SK_IF_UNLOCK(sc_if); return; } static void sk_watchdog(ifp) struct ifnet *ifp; { struct sk_if_softc *sc_if; sc_if = ifp->if_softc; printf("sk%d: watchdog timeout\n", sc_if->sk_unit); sk_init(sc_if); return; } static void sk_shutdown(dev) device_t dev; { struct sk_softc *sc; sc = device_get_softc(dev); SK_LOCK(sc); /* Turn off the 'driver is loaded' LED. */ CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF); /* * Reset the GEnesis controller. Doing this should also * assert the resets on the attached XMAC(s). */ sk_reset(sc); SK_UNLOCK(sc); return; } static void sk_rxeof(sc_if) struct sk_if_softc *sc_if; { struct ether_header *eh; struct mbuf *m; struct ifnet *ifp; struct sk_chain *cur_rx; int total_len = 0; int i; u_int32_t rxstat; ifp = &sc_if->arpcom.ac_if; i = sc_if->sk_cdata.sk_rx_prod; cur_rx = &sc_if->sk_cdata.sk_rx_chain[i]; while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) { cur_rx = &sc_if->sk_cdata.sk_rx_chain[i]; rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat; m = cur_rx->sk_mbuf; cur_rx->sk_mbuf = NULL; total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl); SK_INC(i, SK_RX_RING_CNT); if (rxstat & XM_RXSTAT_ERRFRAME) { ifp->if_ierrors++; sk_newbuf(sc_if, cur_rx, m); continue; } /* * Try to allocate a new jumbo buffer. If that * fails, copy the packet to mbufs and put the * jumbo buffer back in the ring so it can be * re-used. If allocating mbufs fails, then we * have to drop the packet. */ if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) { struct mbuf *m0; m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, total_len + ETHER_ALIGN, 0, ifp, NULL); sk_newbuf(sc_if, cur_rx, m); if (m0 == NULL) { printf("sk%d: no receive buffers " "available -- packet dropped!\n", sc_if->sk_unit); ifp->if_ierrors++; continue; } m_adj(m0, ETHER_ALIGN); m = m0; } else { m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; } ifp->if_ipackets++; eh = mtod(m, struct ether_header *); /* Remove header from mbuf and pass it on. */ m_adj(m, sizeof(struct ether_header)); ether_input(ifp, eh, m); } sc_if->sk_cdata.sk_rx_prod = i; return; } static void sk_txeof(sc_if) struct sk_if_softc *sc_if; { struct sk_tx_desc *cur_tx = NULL; struct ifnet *ifp; u_int32_t idx; ifp = &sc_if->arpcom.ac_if; /* * Go through our tx ring and free mbufs for those * frames that have been sent. */ idx = sc_if->sk_cdata.sk_tx_cons; while(idx != sc_if->sk_cdata.sk_tx_prod) { cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx]; if (cur_tx->sk_ctl & SK_TXCTL_OWN) break; if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG) ifp->if_opackets++; if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) { m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf); sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL; } sc_if->sk_cdata.sk_tx_cnt--; SK_INC(idx, SK_TX_RING_CNT); ifp->if_timer = 0; } sc_if->sk_cdata.sk_tx_cons = idx; if (cur_tx != NULL) ifp->if_flags &= ~IFF_OACTIVE; return; } static void sk_tick(xsc_if) void *xsc_if; { struct sk_if_softc *sc_if; struct mii_data *mii; struct ifnet *ifp; int i; sc_if = xsc_if; SK_IF_LOCK(sc_if); ifp = &sc_if->arpcom.ac_if; mii = device_get_softc(sc_if->sk_miibus); if (!(ifp->if_flags & IFF_UP)) { SK_IF_UNLOCK(sc_if); return; } if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { sk_intr_bcom(sc_if); SK_IF_UNLOCK(sc_if); return; } /* * According to SysKonnect, the correct way to verify that * the link has come back up is to poll bit 0 of the GPIO * register three times. This pin has the signal from the * link_sync pin connected to it; if we read the same link * state 3 times in a row, we know the link is up. */ for (i = 0; i < 3; i++) { if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET) break; } if (i != 3) { sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); SK_IF_UNLOCK(sc_if); return; } /* Turn the GP0 interrupt back on. */ SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); SK_XM_READ_2(sc_if, XM_ISR); mii_tick(mii); mii_pollstat(mii); untimeout(sk_tick, sc_if, sc_if->sk_tick_ch); SK_IF_UNLOCK(sc_if); return; } static void sk_intr_bcom(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc; struct mii_data *mii; struct ifnet *ifp; int status; sc = sc_if->sk_softc; mii = device_get_softc(sc_if->sk_miibus); ifp = &sc_if->arpcom.ac_if; SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); /* * Read the PHY interrupt register to make sure * we clear any pending interrupts. */ status = sk_miibus_readreg(sc_if->sk_dev, SK_PHYADDR_BCOM, BRGPHY_MII_ISR); if (!(ifp->if_flags & IFF_RUNNING)) { sk_init_xmac(sc_if); return; } if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) { int lstat; lstat = sk_miibus_readreg(sc_if->sk_dev, SK_PHYADDR_BCOM, BRGPHY_MII_AUXSTS); if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) { mii_mediachg(mii); /* Turn off the link LED. */ SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); sc_if->sk_link = 0; } else if (status & BRGPHY_ISR_LNK_CHG) { sk_miibus_writereg(sc_if->sk_dev, SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFF00); mii_tick(mii); sc_if->sk_link = 1; /* Turn on the link LED. */ SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF| SK_LINKLED_BLINK_OFF); mii_pollstat(mii); } else { mii_tick(mii); sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); } } SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); return; } static void sk_intr_xmac(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc; u_int16_t status; struct mii_data *mii; sc = sc_if->sk_softc; mii = device_get_softc(sc_if->sk_miibus); status = SK_XM_READ_2(sc_if, XM_ISR); /* * Link has gone down. Start MII tick timeout to * watch for link resync. */ if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) { if (status & XM_ISR_GP0_SET) { SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); } if (status & XM_ISR_AUTONEG_DONE) { sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); } } if (status & XM_IMR_TX_UNDERRUN) SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO); if (status & XM_IMR_RX_OVERRUN) SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO); status = SK_XM_READ_2(sc_if, XM_ISR); return; } static void sk_intr(xsc) void *xsc; { struct sk_softc *sc = xsc; struct sk_if_softc *sc_if0 = NULL, *sc_if1 = NULL; struct ifnet *ifp0 = NULL, *ifp1 = NULL; u_int32_t status; SK_LOCK(sc); sc_if0 = sc->sk_if[SK_PORT_A]; sc_if1 = sc->sk_if[SK_PORT_B]; if (sc_if0 != NULL) ifp0 = &sc_if0->arpcom.ac_if; if (sc_if1 != NULL) ifp1 = &sc_if1->arpcom.ac_if; for (;;) { status = CSR_READ_4(sc, SK_ISSR); if (!(status & sc->sk_intrmask)) break; /* Handle receive interrupts first. */ if (status & SK_ISR_RX1_EOF) { sk_rxeof(sc_if0); CSR_WRITE_4(sc, SK_BMU_RX_CSR0, SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); } if (status & SK_ISR_RX2_EOF) { sk_rxeof(sc_if1); CSR_WRITE_4(sc, SK_BMU_RX_CSR1, SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); } /* Then transmit interrupts. */ if (status & SK_ISR_TX1_S_EOF) { sk_txeof(sc_if0); CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, SK_TXBMU_CLR_IRQ_EOF); } if (status & SK_ISR_TX2_S_EOF) { sk_txeof(sc_if1); CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, SK_TXBMU_CLR_IRQ_EOF); } /* Then MAC interrupts. */ if (status & SK_ISR_MAC1 && ifp0->if_flags & IFF_RUNNING) sk_intr_xmac(sc_if0); if (status & SK_ISR_MAC2 && ifp1->if_flags & IFF_RUNNING) sk_intr_xmac(sc_if1); if (status & SK_ISR_EXTERNAL_REG) { if (ifp0 != NULL) sk_intr_bcom(sc_if0); if (ifp1 != NULL) sk_intr_bcom(sc_if1); } } CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); if (ifp0 != NULL && ifp0->if_snd.ifq_head != NULL) sk_start(ifp0); if (ifp1 != NULL && ifp1->if_snd.ifq_head != NULL) sk_start(ifp1); SK_UNLOCK(sc); return; } static void sk_init_xmac(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc; struct ifnet *ifp; struct sk_bcom_hack bhack[] = { { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, { 0, 0 } }; sc = sc_if->sk_softc; ifp = &sc_if->arpcom.ac_if; /* Unreset the XMAC. */ SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET); DELAY(1000); /* Reset the XMAC's internal state. */ SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); /* Save the XMAC II revision */ sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID)); /* * Perform additional initialization for external PHYs, * namely for the 1000baseTX cards that use the XMAC's * GMII mode. */ if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { int i = 0; u_int32_t val; /* Take PHY out of reset. */ val = sk_win_read_4(sc, SK_GPIO); if (sc_if->sk_port == SK_PORT_A) val |= SK_GPIO_DIR0|SK_GPIO_DAT0; else val |= SK_GPIO_DIR2|SK_GPIO_DAT2; sk_win_write_4(sc, SK_GPIO, val); /* Enable GMII mode on the XMAC. */ SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE); sk_miibus_writereg(sc_if->sk_dev, SK_PHYADDR_BCOM, BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET); DELAY(10000); sk_miibus_writereg(sc_if->sk_dev, SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFFF0); /* * Early versions of the BCM5400 apparently have * a bug that requires them to have their reserved * registers initialized to some magic values. I don't * know what the numbers do, I'm just the messenger. */ if (sk_miibus_readreg(sc_if->sk_dev, SK_PHYADDR_BCOM, 0x03) == 0x6041) { while(bhack[i].reg) { sk_miibus_writereg(sc_if->sk_dev, SK_PHYADDR_BCOM, bhack[i].reg, bhack[i].val); i++; } } } /* Set station address */ SK_XM_WRITE_2(sc_if, XM_PAR0, *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0])); SK_XM_WRITE_2(sc_if, XM_PAR1, *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2])); SK_XM_WRITE_2(sc_if, XM_PAR2, *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4])); SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION); if (ifp->if_flags & IFF_PROMISC) { SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); } else { SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); } if (ifp->if_flags & IFF_BROADCAST) { SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); } else { SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); } /* We don't need the FCS appended to the packet. */ SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS); /* We want short frames padded to 60 bytes. */ SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD); /* * Enable the reception of all error frames. This is is * a necessary evil due to the design of the XMAC. The * XMAC's receive FIFO is only 8K in size, however jumbo * frames can be up to 9000 bytes in length. When bad * frame filtering is enabled, the XMAC's RX FIFO operates * in 'store and forward' mode. For this to work, the * entire frame has to fit into the FIFO, but that means * that jumbo frames larger than 8192 bytes will be * truncated. Disabling all bad frame filtering causes * the RX FIFO to operate in streaming mode, in which * case the XMAC will start transfering frames out of the * RX FIFO as soon as the FIFO threshold is reached. */ SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES| XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS| XM_MODE_RX_INRANGELEN); if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); else SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); /* * Bump up the transmit threshold. This helps hold off transmit * underruns when we're blasting traffic from both ports at once. */ SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH); /* Set multicast filter */ sk_setmulti(sc_if); /* Clear and enable interrupts */ SK_XM_READ_2(sc_if, XM_ISR); if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS); else SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); /* Configure MAC arbiter */ switch(sc_if->sk_xmac_rev) { case XM_XMAC_REV_B2: sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2); sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2); sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2); sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2); sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2); sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2); sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2); sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2); sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); break; case XM_XMAC_REV_C1: sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1); sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1); sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1); sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1); sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1); sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1); sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1); sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1); sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); break; default: break; } sk_win_write_2(sc, SK_MACARB_CTL, SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF); sc_if->sk_link = 1; return; } /* * Note that to properly initialize any part of the GEnesis chip, * you first have to take it out of reset mode. */ static void sk_init(xsc) void *xsc; { struct sk_if_softc *sc_if = xsc; struct sk_softc *sc; struct ifnet *ifp; struct mii_data *mii; SK_IF_LOCK(sc_if); ifp = &sc_if->arpcom.ac_if; sc = sc_if->sk_softc; mii = device_get_softc(sc_if->sk_miibus); /* Cancel pending I/O and free all RX/TX buffers. */ sk_stop(sc_if); /* Configure LINK_SYNC LED */ SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON); SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_ON); /* Configure RX LED */ SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_START); /* Configure TX LED */ SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_START); /* Configure I2C registers */ /* Configure XMAC(s) */ sk_init_xmac(sc_if); mii_mediachg(mii); /* Configure MAC FIFOs */ SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET); SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END); SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON); SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET); SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END); SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON); /* Configure transmit arbiter(s) */ SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON); /* Configure RAMbuffers */ SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON); /* Configure BMUs */ SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE); SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, vtophys(&sc_if->sk_rdata->sk_rx_ring[0])); SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0); SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE); SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO, vtophys(&sc_if->sk_rdata->sk_tx_ring[0])); SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0); /* Init descriptors */ if (sk_init_rx_ring(sc_if) == ENOBUFS) { printf("sk%d: initialization failed: no " "memory for rx buffers\n", sc_if->sk_unit); sk_stop(sc_if); SK_IF_UNLOCK(sc_if); return; } sk_init_tx_ring(sc_if); /* Configure interrupt handling */ CSR_READ_4(sc, SK_ISSR); if (sc_if->sk_port == SK_PORT_A) sc->sk_intrmask |= SK_INTRS1; else sc->sk_intrmask |= SK_INTRS2; sc->sk_intrmask |= SK_ISR_EXTERNAL_REG; CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); /* Start BMUs. */ SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START); /* Enable XMACs TX and RX state machines */ SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE); SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; SK_IF_UNLOCK(sc_if); return; } static void sk_stop(sc_if) struct sk_if_softc *sc_if; { int i; struct sk_softc *sc; struct ifnet *ifp; SK_IF_LOCK(sc_if); sc = sc_if->sk_softc; ifp = &sc_if->arpcom.ac_if; untimeout(sk_tick, sc_if, sc_if->sk_tick_ch); if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { u_int32_t val; /* Put PHY back into reset. */ val = sk_win_read_4(sc, SK_GPIO); if (sc_if->sk_port == SK_PORT_A) { val |= SK_GPIO_DIR0; val &= ~SK_GPIO_DAT0; } else { val |= SK_GPIO_DIR2; val &= ~SK_GPIO_DAT2; } sk_win_write_4(sc, SK_GPIO, val); } /* Turn off various components of this interface. */ SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET); SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET); SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF); SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF); /* Disable interrupts */ if (sc_if->sk_port == SK_PORT_A) sc->sk_intrmask &= ~SK_INTRS1; else sc->sk_intrmask &= ~SK_INTRS2; CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); SK_XM_READ_2(sc_if, XM_ISR); SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); /* Free RX and TX mbufs still in the queues. */ for (i = 0; i < SK_RX_RING_CNT; i++) { if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) { m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf); sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL; } } for (i = 0; i < SK_TX_RING_CNT; i++) { if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) { m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf); sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL; } } ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE); SK_IF_UNLOCK(sc_if); return; } Index: head/sys/dev/snc/dp83932.c =================================================================== --- head/sys/dev/snc/dp83932.c (revision 71961) +++ head/sys/dev/snc/dp83932.c (revision 71962) @@ -1,1222 +1,1221 @@ /* $FreeBSD$ */ /* $NecBSD: dp83932.c,v 1.5 1999/07/29 05:08:44 kmatsuda Exp $ */ /* $NetBSD: if_snc.c,v 1.18 1998/04/25 21:27:40 scottr Exp $ */ /* * Copyright (c) 1997, 1998, 1999 * Kouichi Matsuda. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Kouichi Matsuda for * NetBSD/pc98. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Modified for FreeBSD(98) 4.0 from NetBSD/pc98 1.4.2 by Motomichi Matsuzaki. */ /* * Modified for NetBSD/pc98 1.2G from NetBSD/mac68k 1.2G by Kouichi Matsuda. * Make adapted for NEC PC-9801-83, 84, PC-9801-103, 104, PC-9801N-25 and * PC-9801N-J02, J02R, which uses National Semiconductor DP83934AVQB as * Ethernet Controller and National Semiconductor NS46C46 as * (64 * 16 bits) Microwire Serial EEPROM. */ /* * National Semiconductor DP8393X SONIC Driver * Copyright (c) 1991 Algorithmics Ltd (http://www.algor.co.uk) * You may use, copy, and modify this program so long as you retain the * copyright line. * * This driver has been substantially modified since Algorithmics donated * it. * * Denton Gentry * and also * Yanagisawa Takeshi * did the work to get this running on the Macintosh. */ #include "opt_inet.h" #include #include #include #include #include #include #include #include #if NRND > 0 #include #endif #include #include #include #include #include #include #include #include #include #include hide void sncwatchdog __P((struct ifnet *)); hide void sncinit __P((void *)); hide int sncstop __P((struct snc_softc *sc)); hide int sncioctl __P((struct ifnet *ifp, u_long cmd, caddr_t data)); hide void sncstart __P((struct ifnet *ifp)); hide void sncreset __P((struct snc_softc *sc)); hide void caminitialise __P((struct snc_softc *)); hide void camentry __P((struct snc_softc *, int, u_char *ea)); hide void camprogram __P((struct snc_softc *)); hide void initialise_tda __P((struct snc_softc *)); hide void initialise_rda __P((struct snc_softc *)); hide void initialise_rra __P((struct snc_softc *)); #ifdef SNCDEBUG hide void camdump __P((struct snc_softc *sc)); #endif hide void sonictxint __P((struct snc_softc *)); hide void sonicrxint __P((struct snc_softc *)); hide u_int sonicput __P((struct snc_softc *sc, struct mbuf *m0, int mtd_next)); hide int sonic_read __P((struct snc_softc *, u_int32_t, int)); hide struct mbuf *sonic_get __P((struct snc_softc *, u_int32_t, int)); int snc_enable __P((struct snc_softc *)); void snc_disable __P((struct snc_softc *)); int snc_mediachange __P((struct ifnet *)); void snc_mediastatus __P((struct ifnet *, struct ifmediareq *)); #ifdef NetBSD #if NetBSD <= 199714 struct cfdriver snc_cd = { NULL, "snc", DV_IFNET }; #endif #endif #undef assert #undef _assert #ifdef NDEBUG #define assert(e) ((void)0) #define _assert(e) ((void)0) #else #define _assert(e) assert(e) #ifdef __STDC__ #define assert(e) ((e) ? (void)0 : __assert("snc ", __FILE__, __LINE__, #e)) #else /* PCC */ #define assert(e) ((e) ? (void)0 : __assert("snc "__FILE__, __LINE__, "e")) #endif #endif #ifdef SNCDEBUG #define SNC_SHOWTXHDR 0x01 /* show tx ether_header */ #define SNC_SHOWRXHDR 0x02 /* show rx ether_header */ #define SNC_SHOWCAMENT 0x04 /* show CAM entry */ #endif /* SNCDEBUG */ int sncdebug = 0; void sncconfig(sc, media, nmedia, defmedia, myea) struct snc_softc *sc; int *media, nmedia, defmedia; u_int8_t *myea; { struct ifnet *ifp = &sc->sc_if; int i; #ifdef SNCDEBUG if ((sncdebug & SNC_SHOWCAMENT) != 0) { camdump(sc); } #endif device_printf(sc->sc_dev, "address %6D\n", myea, ":"); #ifdef SNCDEBUG device_printf(sc->sc_dev, "buffers: rra=0x%x cda=0x%x rda=0x%x tda=0x%x\n", sc->v_rra[0], sc->v_cda, sc->v_rda, sc->mtda[0].mtd_vtxp); #endif ifp->if_softc = sc; ifp->if_unit = device_get_unit(sc->sc_dev); ifp->if_name = "snc"; ifp->if_ioctl = sncioctl; ifp->if_output = ether_output; ifp->if_start = sncstart; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_watchdog = sncwatchdog; ifp->if_init = sncinit; ifp->if_mtu = ETHERMTU; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; bcopy(myea, sc->sc_ethercom.ac_enaddr, ETHER_ADDR_LEN); /* Initialize media goo. */ ifmedia_init(&sc->sc_media, 0, snc_mediachange, snc_mediastatus); if (media != NULL) { for (i = 0; i < nmedia; i++) ifmedia_add(&sc->sc_media, media[i], 0, NULL); ifmedia_set(&sc->sc_media, defmedia); } else { ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); } ether_ifattach(ifp, ETHER_BPF_SUPPORTED); #if NRND > 0 rnd_attach_source(&sc->rnd_source, device_get_nameunit(sc->sc_dev), RND_TYPE_NET, 0); #endif } void sncshutdown(arg) void *arg; { sncstop((struct snc_softc *)arg); } /* * Media change callback. */ int snc_mediachange(ifp) struct ifnet *ifp; { struct snc_softc *sc = ifp->if_softc; if (sc->sc_mediachange) return ((*sc->sc_mediachange)(sc)); return (EINVAL); } /* * Media status callback. */ void snc_mediastatus(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct snc_softc *sc = ifp->if_softc; if (sc->sc_enabled == 0) { ifmr->ifm_active = IFM_ETHER | IFM_NONE; ifmr->ifm_status = 0; return; } if (sc->sc_mediastatus) (*sc->sc_mediastatus)(sc, ifmr); } hide int sncioctl(ifp, cmd, data) struct ifnet *ifp; u_long cmd; caddr_t data; { struct ifreq *ifr; struct snc_softc *sc = ifp->if_softc; int s = splnet(), err = 0; int temp; switch (cmd) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: err = ether_ioctl(ifp, cmd, data); break; case SIOCSIFFLAGS: if ((ifp->if_flags & IFF_UP) == 0 && (ifp->if_flags & IFF_RUNNING) != 0) { /* * If interface is marked down and it is running, * then stop it. */ sncstop(sc); ifp->if_flags &= ~IFF_RUNNING; snc_disable(sc); } else if ((ifp->if_flags & IFF_UP) != 0 && (ifp->if_flags & IFF_RUNNING) == 0) { /* * If interface is marked up and it is stopped, * then start it. */ if ((err = snc_enable(sc)) != 0) break; sncinit(sc); } else if (sc->sc_enabled) { /* * reset the interface to pick up any other changes * in flags */ temp = ifp->if_flags & IFF_UP; sncreset(sc); ifp->if_flags |= temp; sncstart(ifp); } break; case SIOCADDMULTI: case SIOCDELMULTI: if (sc->sc_enabled == 0) { err = EIO; break; } temp = ifp->if_flags & IFF_UP; sncreset(sc); ifp->if_flags |= temp; err = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: ifr = (struct ifreq *) data; err = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); break; default: err = EINVAL; } splx(s); return (err); } /* * Encapsulate a packet of type family for the local net. */ hide void sncstart(ifp) struct ifnet *ifp; { struct snc_softc *sc = ifp->if_softc; struct mbuf *m; int mtd_next; if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) return; outloop: /* Check for room in the xmit buffer. */ if ((mtd_next = (sc->mtd_free + 1)) == NTDA) mtd_next = 0; if (mtd_next == sc->mtd_hw) { ifp->if_flags |= IFF_OACTIVE; return; } IF_DEQUEUE(&ifp->if_snd, m); if (m == 0) return; /* We need the header for m_pkthdr.len. */ if ((m->m_flags & M_PKTHDR) == 0) panic("%s: sncstart: no header mbuf", device_get_nameunit(sc->sc_dev)); /* * If bpf is listening on this interface, let it * see the packet before we commit it to the wire. */ if (ifp->if_bpf) bpf_mtap(ifp, m); /* * If there is nothing in the o/p queue, and there is room in * the Tx ring, then send the packet directly. Otherwise append * it to the o/p queue. */ if ((sonicput(sc, m, mtd_next)) == 0) { IF_PREPEND(&ifp->if_snd, m); return; } sc->mtd_prev = sc->mtd_free; sc->mtd_free = mtd_next; ifp->if_opackets++; /* # of pkts */ /* Jump back for possibly more punishment. */ goto outloop; } /* * reset and restart the SONIC. Called in case of fatal * hardware/software errors. */ hide void sncreset(sc) struct snc_softc *sc; { sncstop(sc); sncinit(sc); } hide void sncinit(xsc) void *xsc; { struct snc_softc *sc = xsc; u_long s_rcr; int s; if (sc->sc_if.if_flags & IFF_RUNNING) /* already running */ return; s = splnet(); NIC_PUT(sc, SNCR_CR, CR_RST); /* DCR only accessable in reset mode! */ /* config it */ NIC_PUT(sc, SNCR_DCR, (sc->sncr_dcr | (sc->bitmode ? DCR_DW32 : DCR_DW16))); NIC_PUT(sc, SNCR_DCR2, sc->sncr_dcr2); s_rcr = RCR_BRD | RCR_LBNONE; if (sc->sc_if.if_flags & IFF_PROMISC) s_rcr |= RCR_PRO; if (sc->sc_if.if_flags & IFF_ALLMULTI) s_rcr |= RCR_AMC; NIC_PUT(sc, SNCR_RCR, s_rcr); NIC_PUT(sc, SNCR_IMR, (IMR_PRXEN | IMR_PTXEN | IMR_TXEREN | IMR_LCDEN)); /* clear pending interrupts */ NIC_PUT(sc, SNCR_ISR, ISR_ALL); /* clear tally counters */ NIC_PUT(sc, SNCR_CRCT, -1); NIC_PUT(sc, SNCR_FAET, -1); NIC_PUT(sc, SNCR_MPT, -1); initialise_tda(sc); initialise_rda(sc); initialise_rra(sc); /* enable the chip */ NIC_PUT(sc, SNCR_CR, 0); wbflush(); /* program the CAM */ camprogram(sc); /* get it to read resource descriptors */ NIC_PUT(sc, SNCR_CR, CR_RRRA); wbflush(); while ((NIC_GET(sc, SNCR_CR)) & CR_RRRA) continue; /* enable rx */ NIC_PUT(sc, SNCR_CR, CR_RXEN); wbflush(); /* flag interface as "running" */ sc->sc_if.if_flags |= IFF_RUNNING; sc->sc_if.if_flags &= ~IFF_OACTIVE; splx(s); return; } /* * close down an interface and free its buffers * Called on final close of device, or if sncinit() fails * part way through. */ hide int sncstop(sc) struct snc_softc *sc; { struct mtd *mtd; int s = splnet(); /* stick chip in reset */ NIC_PUT(sc, SNCR_CR, CR_RST); wbflush(); /* free all receive buffers (currently static so nothing to do) */ /* free all pending transmit mbufs */ while (sc->mtd_hw != sc->mtd_free) { mtd = &sc->mtda[sc->mtd_hw]; if (mtd->mtd_mbuf) m_freem(mtd->mtd_mbuf); if (++sc->mtd_hw == NTDA) sc->mtd_hw = 0; } sc->sc_if.if_timer = 0; sc->sc_if.if_flags &= ~(IFF_RUNNING | IFF_UP); splx(s); return (0); } /* * Called if any Tx packets remain unsent after 5 seconds, * In all cases we just reset the chip, and any retransmission * will be handled by higher level protocol timeouts. */ hide void sncwatchdog(ifp) struct ifnet *ifp; { struct snc_softc *sc = ifp->if_softc; struct mtd *mtd; int temp; if (sc->mtd_hw != sc->mtd_free) { /* something still pending for transmit */ mtd = &sc->mtda[sc->mtd_hw]; if (SRO(sc, mtd->mtd_vtxp, TXP_STATUS) == 0) log(LOG_ERR, "%s: Tx - timeout\n", device_get_nameunit(sc->sc_dev)); else log(LOG_ERR, "%s: Tx - lost interrupt\n", device_get_nameunit(sc->sc_dev)); temp = ifp->if_flags & IFF_UP; sncreset(sc); ifp->if_flags |= temp; } } /* * stuff packet into sonic (at splnet) */ hide u_int sonicput(sc, m0, mtd_next) struct snc_softc *sc; struct mbuf *m0; int mtd_next; { struct mtd *mtdp; struct mbuf *m; u_int32_t buff; u_int32_t txp; u_int len = 0; u_int totlen = 0; #ifdef whyonearthwouldyoudothis if (NIC_GET(sc, SNCR_CR) & CR_TXP) return (0); #endif /* grab the replacement mtd */ mtdp = &sc->mtda[sc->mtd_free]; buff = mtdp->mtd_vbuf; /* this packet goes to mtdnext fill in the TDA */ mtdp->mtd_mbuf = m0; txp = mtdp->mtd_vtxp; /* Write to the config word. Every (NTDA/2)+1 packets we set an intr */ if (sc->mtd_pint == 0) { sc->mtd_pint = NTDA/2; SWO(sc, txp, TXP_CONFIG, TCR_PINT); } else { sc->mtd_pint--; SWO(sc, txp, TXP_CONFIG, 0); } for (m = m0; m; m = m->m_next) { len = m->m_len; totlen += len; (*sc->sc_copytobuf)(sc, mtod(m, caddr_t), buff, len); buff += len; } if (totlen >= TXBSIZE) { panic("%s: sonicput: packet overflow", device_get_nameunit(sc->sc_dev)); } SWO(sc, txp, TXP_FRAGOFF + (0 * TXP_FRAGSIZE) + TXP_FPTRLO, LOWER(mtdp->mtd_vbuf)); SWO(sc, txp, TXP_FRAGOFF + (0 * TXP_FRAGSIZE) + TXP_FPTRHI, UPPER(mtdp->mtd_vbuf)); if (totlen < ETHERMIN + sizeof(struct ether_header)) { int pad = ETHERMIN + sizeof(struct ether_header) - totlen; (*sc->sc_zerobuf)(sc, mtdp->mtd_vbuf + totlen, pad); totlen = ETHERMIN + sizeof(struct ether_header); } SWO(sc, txp, TXP_FRAGOFF + (0 * TXP_FRAGSIZE) + TXP_FSIZE, totlen); SWO(sc, txp, TXP_FRAGCNT, 1); SWO(sc, txp, TXP_PKTSIZE, totlen); /* link onto the next mtd that will be used */ SWO(sc, txp, TXP_FRAGOFF + (1 * TXP_FRAGSIZE) + TXP_FPTRLO, LOWER(sc->mtda[mtd_next].mtd_vtxp) | EOL); /* * The previous txp.tlink currently contains a pointer to * our txp | EOL. Want to clear the EOL, so write our * pointer to the previous txp. */ SWO(sc, sc->mtda[sc->mtd_prev].mtd_vtxp, sc->mtd_tlinko, LOWER(mtdp->mtd_vtxp)); /* make sure chip is running */ wbflush(); NIC_PUT(sc, SNCR_CR, CR_TXP); wbflush(); sc->sc_if.if_timer = 5; /* 5 seconds to watch for failing to transmit */ return (totlen); } /* * These are called from sonicioctl() when /etc/ifconfig is run to set * the address or switch the i/f on. */ /* * CAM support */ hide void caminitialise(sc) struct snc_softc *sc; { u_int32_t v_cda = sc->v_cda; int i; int camoffset; for (i = 0; i < MAXCAM; i++) { camoffset = i * CDA_CAMDESC; SWO(sc, v_cda, (camoffset + CDA_CAMEP), i); SWO(sc, v_cda, (camoffset + CDA_CAMAP2), 0); SWO(sc, v_cda, (camoffset + CDA_CAMAP1), 0); SWO(sc, v_cda, (camoffset + CDA_CAMAP0), 0); } SWO(sc, v_cda, CDA_ENABLE, 0); #ifdef SNCDEBUG if ((sncdebug & SNC_SHOWCAMENT) != 0) { camdump(sc); } #endif } hide void camentry(sc, entry, ea) int entry; u_char *ea; struct snc_softc *sc; { u_int32_t v_cda = sc->v_cda; int camoffset = entry * CDA_CAMDESC; SWO(sc, v_cda, camoffset + CDA_CAMEP, entry); SWO(sc, v_cda, camoffset + CDA_CAMAP2, (ea[5] << 8) | ea[4]); SWO(sc, v_cda, camoffset + CDA_CAMAP1, (ea[3] << 8) | ea[2]); SWO(sc, v_cda, camoffset + CDA_CAMAP0, (ea[1] << 8) | ea[0]); SWO(sc, v_cda, CDA_ENABLE, (SRO(sc, v_cda, CDA_ENABLE) | (1 << entry))); } hide void camprogram(sc) struct snc_softc *sc; { struct ifmultiaddr *ifma; struct ifnet *ifp; int timeout; int mcount = 0; caminitialise(sc); ifp = &sc->sc_if; /* Always load our own address first. */ camentry (sc, mcount, sc->sc_ethercom.ac_enaddr); mcount++; /* Assume we won't need allmulti bit. */ ifp->if_flags &= ~IFF_ALLMULTI; /* Loop through multicast addresses */ - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (mcount == MAXCAM) { ifp->if_flags |= IFF_ALLMULTI; break; } /* program the CAM with the specified entry */ camentry(sc, mcount, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); mcount++; } NIC_PUT(sc, SNCR_CDP, LOWER(sc->v_cda)); NIC_PUT(sc, SNCR_CDC, MAXCAM); NIC_PUT(sc, SNCR_CR, CR_LCAM); wbflush(); timeout = 10000; while ((NIC_GET(sc, SNCR_CR) & CR_LCAM) && timeout--) continue; if (timeout == 0) { /* XXX */ panic("%s: CAM initialisation failed\n", device_get_nameunit(sc->sc_dev)); } timeout = 10000; while (((NIC_GET(sc, SNCR_ISR) & ISR_LCD) == 0) && timeout--) continue; if (NIC_GET(sc, SNCR_ISR) & ISR_LCD) NIC_PUT(sc, SNCR_ISR, ISR_LCD); else device_printf(sc->sc_dev, "CAM initialisation without interrupt\n"); } #ifdef SNCDEBUG hide void camdump(sc) struct snc_softc *sc; { int i; printf("CAM entries:\n"); NIC_PUT(sc, SNCR_CR, CR_RST); wbflush(); for (i = 0; i < 16; i++) { ushort ap2, ap1, ap0; NIC_PUT(sc, SNCR_CEP, i); wbflush(); ap2 = NIC_GET(sc, SNCR_CAP2); ap1 = NIC_GET(sc, SNCR_CAP1); ap0 = NIC_GET(sc, SNCR_CAP0); printf("%d: ap2=0x%x ap1=0x%x ap0=0x%x\n", i, ap2, ap1, ap0); } printf("CAM enable 0x%x\n", NIC_GET(sc, SNCR_CEP)); NIC_PUT(sc, SNCR_CR, 0); wbflush(); } #endif hide void initialise_tda(sc) struct snc_softc *sc; { struct mtd *mtd; int i; for (i = 0; i < NTDA; i++) { mtd = &sc->mtda[i]; mtd->mtd_mbuf = 0; } sc->mtd_hw = 0; sc->mtd_prev = NTDA - 1; sc->mtd_free = 0; sc->mtd_tlinko = TXP_FRAGOFF + 1*TXP_FRAGSIZE + TXP_FPTRLO; sc->mtd_pint = NTDA/2; NIC_PUT(sc, SNCR_UTDA, UPPER(sc->mtda[0].mtd_vtxp)); NIC_PUT(sc, SNCR_CTDA, LOWER(sc->mtda[0].mtd_vtxp)); } hide void initialise_rda(sc) struct snc_softc *sc; { int i; u_int32_t vv_rda = 0; u_int32_t v_rda = 0; /* link the RDA's together into a circular list */ for (i = 0; i < (sc->sc_nrda - 1); i++) { v_rda = sc->v_rda + (i * RXPKT_SIZE(sc)); vv_rda = sc->v_rda + ((i+1) * RXPKT_SIZE(sc)); SWO(sc, v_rda, RXPKT_RLINK, LOWER(vv_rda)); SWO(sc, v_rda, RXPKT_INUSE, 1); } v_rda = sc->v_rda + ((sc->sc_nrda - 1) * RXPKT_SIZE(sc)); SWO(sc, v_rda, RXPKT_RLINK, LOWER(sc->v_rda) | EOL); SWO(sc, v_rda, RXPKT_INUSE, 1); /* mark end of receive descriptor list */ sc->sc_rdamark = sc->sc_nrda - 1; sc->sc_rxmark = 0; NIC_PUT(sc, SNCR_URDA, UPPER(sc->v_rda)); NIC_PUT(sc, SNCR_CRDA, LOWER(sc->v_rda)); wbflush(); } hide void initialise_rra(sc) struct snc_softc *sc; { int i; u_int v; int bitmode = sc->bitmode; if (bitmode) NIC_PUT(sc, SNCR_EOBC, RBASIZE(sc) / 2 - 2); else NIC_PUT(sc, SNCR_EOBC, RBASIZE(sc) / 2 - 1); NIC_PUT(sc, SNCR_URRA, UPPER(sc->v_rra[0])); NIC_PUT(sc, SNCR_RSA, LOWER(sc->v_rra[0])); /* rea must point just past the end of the rra space */ NIC_PUT(sc, SNCR_REA, LOWER(sc->v_rea)); NIC_PUT(sc, SNCR_RRP, LOWER(sc->v_rra[0])); NIC_PUT(sc, SNCR_RSC, 0); /* fill up SOME of the rra with buffers */ for (i = 0; i < NRBA; i++) { v = SONIC_GETDMA(sc->rbuf[i]); SWO(sc, sc->v_rra[i], RXRSRC_PTRHI, UPPER(v)); SWO(sc, sc->v_rra[i], RXRSRC_PTRLO, LOWER(v)); SWO(sc, sc->v_rra[i], RXRSRC_WCHI, UPPER(NBPG/2)); SWO(sc, sc->v_rra[i], RXRSRC_WCLO, LOWER(NBPG/2)); } sc->sc_rramark = NRBA; NIC_PUT(sc, SNCR_RWP, LOWER(sc->v_rra[sc->sc_rramark])); wbflush(); } void sncintr(arg) void *arg; { struct snc_softc *sc = (struct snc_softc *)arg; int isr; if (sc->sc_enabled == 0) return; while ((isr = (NIC_GET(sc, SNCR_ISR) & ISR_ALL)) != 0) { /* scrub the interrupts that we are going to service */ NIC_PUT(sc, SNCR_ISR, isr); wbflush(); if (isr & (ISR_BR | ISR_LCD | ISR_TC)) device_printf(sc->sc_dev, "unexpected interrupt status 0x%x\n", isr); if (isr & (ISR_TXDN | ISR_TXER | ISR_PINT)) sonictxint(sc); if (isr & ISR_PKTRX) sonicrxint(sc); if (isr & (ISR_HBL | ISR_RDE | ISR_RBE | ISR_RBAE | ISR_RFO)) { if (isr & ISR_HBL) /* * The repeater is not providing a heartbeat. * In itself this isn't harmful, lots of the * cheap repeater hubs don't supply a heartbeat. * So ignore the lack of heartbeat. Its only * if we can't detect a carrier that we have a * problem. */ ; if (isr & ISR_RDE) device_printf(sc->sc_dev, "receive descriptors exhausted\n"); if (isr & ISR_RBE) device_printf(sc->sc_dev, "receive buffers exhausted\n"); if (isr & ISR_RBAE) device_printf(sc->sc_dev, "receive buffer area exhausted\n"); if (isr & ISR_RFO) device_printf(sc->sc_dev, "receive FIFO overrun\n"); } if (isr & (ISR_CRC | ISR_FAE | ISR_MP)) { #ifdef notdef if (isr & ISR_CRC) sc->sc_crctally++; if (isr & ISR_FAE) sc->sc_faetally++; if (isr & ISR_MP) sc->sc_mptally++; #endif } sncstart(&sc->sc_if); #if NRND > 0 if (isr) rnd_add_uint32(&sc->rnd_source, isr); #endif } return; } /* * Transmit interrupt routine */ hide void sonictxint(sc) struct snc_softc *sc; { struct mtd *mtd; u_int32_t txp; unsigned short txp_status; int mtd_hw; struct ifnet *ifp = &sc->sc_if; mtd_hw = sc->mtd_hw; if (mtd_hw == sc->mtd_free) return; while (mtd_hw != sc->mtd_free) { mtd = &sc->mtda[mtd_hw]; txp = mtd->mtd_vtxp; if (SRO(sc, txp, TXP_STATUS) == 0) { break; /* it hasn't really gone yet */ } #ifdef SNCDEBUG if ((sncdebug & SNC_SHOWTXHDR) != 0) { struct ether_header eh; (*sc->sc_copyfrombuf)(sc, &eh, mtd->mtd_vbuf, sizeof(eh)); device_printf(sc->sc_dev, "xmit status=0x%x len=%d type=0x%x from %6D", SRO(sc, txp, TXP_STATUS), SRO(sc, txp, TXP_PKTSIZE), htons(eh.ether_type), eh.ether_shost, ":"); printf(" (to %6D)\n", eh.ether_dhost, ":"); } #endif /* SNCDEBUG */ ifp->if_flags &= ~IFF_OACTIVE; if (mtd->mtd_mbuf != 0) { m_freem(mtd->mtd_mbuf); mtd->mtd_mbuf = 0; } if (++mtd_hw == NTDA) mtd_hw = 0; txp_status = SRO(sc, txp, TXP_STATUS); ifp->if_collisions += (txp_status & TCR_EXC) ? 16 : ((txp_status & TCR_NC) >> 12); if ((txp_status & TCR_PTX) == 0) { ifp->if_oerrors++; device_printf(sc->sc_dev, "Tx packet status=0x%x\n", txp_status); /* XXX - DG This looks bogus */ if (mtd_hw != sc->mtd_free) { printf("resubmitting remaining packets\n"); mtd = &sc->mtda[mtd_hw]; NIC_PUT(sc, SNCR_CTDA, LOWER(mtd->mtd_vtxp)); NIC_PUT(sc, SNCR_CR, CR_TXP); wbflush(); break; } } } sc->mtd_hw = mtd_hw; return; } /* * Receive interrupt routine */ hide void sonicrxint(sc) struct snc_softc *sc; { u_int32_t rda; int orra; int len; int rramark; int rdamark; u_int16_t rxpkt_ptr; rda = sc->v_rda + (sc->sc_rxmark * RXPKT_SIZE(sc)); while (SRO(sc, rda, RXPKT_INUSE) == 0) { u_int status = SRO(sc, rda, RXPKT_STATUS); orra = RBASEQ(SRO(sc, rda, RXPKT_SEQNO)) & RRAMASK; rxpkt_ptr = SRO(sc, rda, RXPKT_PTRLO); /* * Do not trunc ether_header length. * Our sonic_read() and sonic_get() require it. */ len = SRO(sc, rda, RXPKT_BYTEC) - FCSSIZE; if (status & RCR_PRX) { /* XXX: Does PGOFSET require? */ u_int32_t pkt = sc->rbuf[orra & RBAMASK] + (rxpkt_ptr & PGOFSET); if (sonic_read(sc, pkt, len)) sc->sc_if.if_ipackets++; else sc->sc_if.if_ierrors++; } else sc->sc_if.if_ierrors++; /* * give receive buffer area back to chip. * * If this was the last packet in the RRA, give the RRA to * the chip again. * If sonic read didnt copy it out then we would have to * wait !! * (dont bother add it back in again straight away) * * Really, we're doing v_rra[rramark] = v_rra[orra] but * we have to use the macros because SONIC might be in * 16 or 32 bit mode. */ if (status & RCR_LPKT) { u_int32_t tmp1, tmp2; rramark = sc->sc_rramark; tmp1 = sc->v_rra[rramark]; tmp2 = sc->v_rra[orra]; SWO(sc, tmp1, RXRSRC_PTRLO, SRO(sc, tmp2, RXRSRC_PTRLO)); SWO(sc, tmp1, RXRSRC_PTRHI, SRO(sc, tmp2, RXRSRC_PTRHI)); SWO(sc, tmp1, RXRSRC_WCLO, SRO(sc, tmp2, RXRSRC_WCLO)); SWO(sc, tmp1, RXRSRC_WCHI, SRO(sc, tmp2, RXRSRC_WCHI)); /* zap old rra for fun */ SWO(sc, tmp2, RXRSRC_WCHI, 0); SWO(sc, tmp2, RXRSRC_WCLO, 0); sc->sc_rramark = (++rramark) & RRAMASK; NIC_PUT(sc, SNCR_RWP, LOWER(sc->v_rra[rramark])); wbflush(); } /* * give receive descriptor back to chip simple * list is circular */ rdamark = sc->sc_rdamark; SWO(sc, rda, RXPKT_INUSE, 1); SWO(sc, rda, RXPKT_RLINK, SRO(sc, rda, RXPKT_RLINK) | EOL); SWO(sc, (sc->v_rda + (rdamark * RXPKT_SIZE(sc))), RXPKT_RLINK, SRO(sc, (sc->v_rda + (rdamark * RXPKT_SIZE(sc))), RXPKT_RLINK) & ~EOL); sc->sc_rdamark = sc->sc_rxmark; if (++sc->sc_rxmark >= sc->sc_nrda) sc->sc_rxmark = 0; rda = sc->v_rda + (sc->sc_rxmark * RXPKT_SIZE(sc)); } } /* * sonic_read -- pull packet off interface and forward to * appropriate protocol handler */ hide int sonic_read(sc, pkt, len) struct snc_softc *sc; u_int32_t pkt; int len; { struct ifnet *ifp = &sc->sc_if; struct ether_header *et; struct mbuf *m; if (len <= sizeof(struct ether_header) || len > ETHERMTU + sizeof(struct ether_header)) { device_printf(sc->sc_dev, "invalid packet length %d bytes\n", len); return (0); } /* Pull packet off interface. */ m = sonic_get(sc, pkt, len); if (m == 0) { return (0); } /* We assume that the header fit entirely in one mbuf. */ et = mtod(m, struct ether_header *); #ifdef SNCDEBUG if ((sncdebug & SNC_SHOWRXHDR) != 0) { device_printf(sc->sc_dev, "rcvd 0x%x len=%d type=0x%x from %6D", pkt, len, htons(et->ether_type), et->ether_shost, ":"); printf(" (to %6D)\n", et->ether_dhost, ":"); } #endif /* SNCDEBUG */ /* Pass the packet up, with the ether header sort-of removed. */ m_adj(m, sizeof(struct ether_header)); ether_input(ifp, et, m); return (1); } /* * munge the received packet into an mbuf chain */ hide struct mbuf * sonic_get(sc, pkt, datalen) struct snc_softc *sc; u_int32_t pkt; int datalen; { struct mbuf *m, *top, **mp; int len; /* * Do not trunc ether_header length. * Our sonic_read() and sonic_get() require it. */ MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == 0) return (0); m->m_pkthdr.rcvif = &sc->sc_if; m->m_pkthdr.len = datalen; len = MHLEN; top = 0; mp = ⊤ while (datalen > 0) { if (top) { MGET(m, M_DONTWAIT, MT_DATA); if (m == 0) { m_freem(top); return (0); } len = MLEN; } if (datalen >= MINCLSIZE) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { if (top) m_freem(top); return (0); } len = MCLBYTES; } #if 0 /* XXX: Require? */ if (!top) { register int pad = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header); m->m_data += pad; len -= pad; } #endif m->m_len = len = min(datalen, len); (*sc->sc_copyfrombuf)(sc, mtod(m, caddr_t), pkt, len); pkt += len; datalen -= len; *mp = m; mp = &m->m_next; } return (top); } /* * Enable power on the interface. */ int snc_enable(sc) struct snc_softc *sc; { #ifdef SNCDEBUG device_printf(sc->sc_dev, "snc_enable()\n"); #endif /* SNCDEBUG */ if (sc->sc_enabled == 0 && sc->sc_enable != NULL) { if ((*sc->sc_enable)(sc) != 0) { device_printf(sc->sc_dev, "device enable failed\n"); return (EIO); } } sc->sc_enabled = 1; return (0); } /* * Disable power on the interface. */ void snc_disable(sc) struct snc_softc *sc; { #ifdef SNCDEBUG device_printf(sc->sc_dev, "snc_disable()\n"); #endif /* SNCDEBUG */ if (sc->sc_enabled != 0 && sc->sc_disable != NULL) { (*sc->sc_disable)(sc); sc->sc_enabled = 0; } } Index: head/sys/dev/ti/if_ti.c =================================================================== --- head/sys/dev/ti/if_ti.c (revision 71961) +++ head/sys/dev/ti/if_ti.c (revision 71962) @@ -1,2506 +1,2505 @@ /* * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Alteon Networks Tigon PCI gigabit ethernet driver for FreeBSD. * Manuals, sample driver and firmware source kits are available * from http://www.alteon.com/support/openkits. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The Alteon Networks Tigon chip contains an embedded R4000 CPU, * gigabit MAC, dual DMA channels and a PCI interface unit. NICs * using the Tigon may have anywhere from 512K to 2MB of SRAM. The * Tigon supports hardware IP, TCP and UCP checksumming, multicast * filtering and jumbo (9014 byte) frames. The hardware is largely * controlled by firmware, which must be loaded into the NIC during * initialization. * * The Tigon 2 contains 2 R4000 CPUs and requires a newer firmware * revision, which supports new features such as extended commands, * extended jumbo receive ring desciptors and a mini receive ring. * * Alteon Networks is to be commended for releasing such a vast amount * of development material for the Tigon NIC without requiring an NDA * (although they really should have done it a long time ago). With * any luck, the other vendors will finally wise up and follow Alteon's * stellar example. * * The firmware for the Tigon 1 and 2 NICs is compiled directly into * this driver by #including it as a C header file. This bloats the * driver somewhat, but it's the easiest method considering that the * driver code and firmware code need to be kept in sync. The source * for the firmware is not provided with the FreeBSD distribution since * compiling it requires a GNU toolchain targeted for mips-sgi-irix5.3. * * The following people deserve special thanks: * - Terry Murphy of 3Com, for providing a 3c985 Tigon 1 board * for testing * - Raymond Lee of Netgear, for providing a pair of Netgear * GA620 Tigon 2 boards for testing * - Ulf Zimmermann, for bringing the GA260 to my attention and * convincing me to write this driver. * - Andrew Gallatin for providing FreeBSD/Alpha support. */ #include "vlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if NVLAN > 0 #include #include #endif #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include #include #include #define TI_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_IP_FRAGS) #if !defined(lint) static const char rcsid[] = "$FreeBSD$"; #endif /* * Various supported device vendors/types and their names. */ static struct ti_type ti_devs[] = { { ALT_VENDORID, ALT_DEVICEID_ACENIC, "Alteon AceNIC 1000baseSX Gigabit Ethernet" }, { ALT_VENDORID, ALT_DEVICEID_ACENIC_COPPER, "Alteon AceNIC 1000baseT Gigabit Ethernet" }, { TC_VENDORID, TC_DEVICEID_3C985, "3Com 3c985-SX Gigabit Ethernet" }, { NG_VENDORID, NG_DEVICEID_GA620, "Netgear GA620 1000baseSX Gigabit Ethernet" }, { NG_VENDORID, NG_DEVICEID_GA620T, "Netgear GA620 1000baseT Gigabit Ethernet" }, { SGI_VENDORID, SGI_DEVICEID_TIGON, "Silicon Graphics Gigabit Ethernet" }, { DEC_VENDORID, DEC_DEVICEID_FARALLON_PN9000SX, "Farallon PN9000SX Gigabit Ethernet" }, { 0, 0, NULL } }; static int ti_probe __P((device_t)); static int ti_attach __P((device_t)); static int ti_detach __P((device_t)); static void ti_txeof __P((struct ti_softc *)); static void ti_rxeof __P((struct ti_softc *)); static void ti_stats_update __P((struct ti_softc *)); static int ti_encap __P((struct ti_softc *, struct mbuf *, u_int32_t *)); static void ti_intr __P((void *)); static void ti_start __P((struct ifnet *)); static int ti_ioctl __P((struct ifnet *, u_long, caddr_t)); static void ti_init __P((void *)); static void ti_init2 __P((struct ti_softc *)); static void ti_stop __P((struct ti_softc *)); static void ti_watchdog __P((struct ifnet *)); static void ti_shutdown __P((device_t)); static int ti_ifmedia_upd __P((struct ifnet *)); static void ti_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); static u_int32_t ti_eeprom_putbyte __P((struct ti_softc *, int)); static u_int8_t ti_eeprom_getbyte __P((struct ti_softc *, int, u_int8_t *)); static int ti_read_eeprom __P((struct ti_softc *, caddr_t, int, int)); static void ti_add_mcast __P((struct ti_softc *, struct ether_addr *)); static void ti_del_mcast __P((struct ti_softc *, struct ether_addr *)); static void ti_setmulti __P((struct ti_softc *)); static void ti_mem __P((struct ti_softc *, u_int32_t, u_int32_t, caddr_t)); static void ti_loadfw __P((struct ti_softc *)); static void ti_cmd __P((struct ti_softc *, struct ti_cmd_desc *)); static void ti_cmd_ext __P((struct ti_softc *, struct ti_cmd_desc *, caddr_t, int)); static void ti_handle_events __P((struct ti_softc *)); static int ti_alloc_jumbo_mem __P((struct ti_softc *)); static void *ti_jalloc __P((struct ti_softc *)); static void ti_jfree __P((caddr_t, void *)); static int ti_newbuf_std __P((struct ti_softc *, int, struct mbuf *)); static int ti_newbuf_mini __P((struct ti_softc *, int, struct mbuf *)); static int ti_newbuf_jumbo __P((struct ti_softc *, int, struct mbuf *)); static int ti_init_rx_ring_std __P((struct ti_softc *)); static void ti_free_rx_ring_std __P((struct ti_softc *)); static int ti_init_rx_ring_jumbo __P((struct ti_softc *)); static void ti_free_rx_ring_jumbo __P((struct ti_softc *)); static int ti_init_rx_ring_mini __P((struct ti_softc *)); static void ti_free_rx_ring_mini __P((struct ti_softc *)); static void ti_free_tx_ring __P((struct ti_softc *)); static int ti_init_tx_ring __P((struct ti_softc *)); static int ti_64bitslot_war __P((struct ti_softc *)); static int ti_chipinit __P((struct ti_softc *)); static int ti_gibinit __P((struct ti_softc *)); static device_method_t ti_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ti_probe), DEVMETHOD(device_attach, ti_attach), DEVMETHOD(device_detach, ti_detach), DEVMETHOD(device_shutdown, ti_shutdown), { 0, 0 } }; static driver_t ti_driver = { "ti", ti_methods, sizeof(struct ti_softc) }; static devclass_t ti_devclass; DRIVER_MODULE(if_ti, pci, ti_driver, ti_devclass, 0, 0); /* * Send an instruction or address to the EEPROM, check for ACK. */ static u_int32_t ti_eeprom_putbyte(sc, byte) struct ti_softc *sc; int byte; { register int i, ack = 0; /* * Make sure we're in TX mode. */ TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); /* * Feed in each bit and stobe the clock. */ for (i = 0x80; i; i >>= 1) { if (byte & i) { TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); } else { TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); } DELAY(1); TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); DELAY(1); TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); } /* * Turn off TX mode. */ TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); /* * Check for ack. */ TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); ack = CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN; TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); return(ack); } /* * Read a byte of data stored in the EEPROM at address 'addr.' * We have to send two address bytes since the EEPROM can hold * more than 256 bytes of data. */ static u_int8_t ti_eeprom_getbyte(sc, addr, dest) struct ti_softc *sc; int addr; u_int8_t *dest; { register int i; u_int8_t byte = 0; EEPROM_START; /* * Send write control code to EEPROM. */ if (ti_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) { printf("ti%d: failed to send write command, status: %x\n", sc->ti_unit, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); return(1); } /* * Send first byte of address of byte we want to read. */ if (ti_eeprom_putbyte(sc, (addr >> 8) & 0xFF)) { printf("ti%d: failed to send address, status: %x\n", sc->ti_unit, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); return(1); } /* * Send second byte address of byte we want to read. */ if (ti_eeprom_putbyte(sc, addr & 0xFF)) { printf("ti%d: failed to send address, status: %x\n", sc->ti_unit, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); return(1); } EEPROM_STOP; EEPROM_START; /* * Send read control code to EEPROM. */ if (ti_eeprom_putbyte(sc, EEPROM_CTL_READ)) { printf("ti%d: failed to send read command, status: %x\n", sc->ti_unit, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); return(1); } /* * Start reading bits from EEPROM. */ TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); for (i = 0x80; i; i >>= 1) { TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); DELAY(1); if (CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN) byte |= i; TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); DELAY(1); } EEPROM_STOP; /* * No ACK generated for read, so just return byte. */ *dest = byte; return(0); } /* * Read a sequence of bytes from the EEPROM. */ static int ti_read_eeprom(sc, dest, off, cnt) struct ti_softc *sc; caddr_t dest; int off; int cnt; { int err = 0, i; u_int8_t byte = 0; for (i = 0; i < cnt; i++) { err = ti_eeprom_getbyte(sc, off + i, &byte); if (err) break; *(dest + i) = byte; } return(err ? 1 : 0); } /* * NIC memory access function. Can be used to either clear a section * of NIC local memory or (if buf is non-NULL) copy data into it. */ static void ti_mem(sc, addr, len, buf) struct ti_softc *sc; u_int32_t addr, len; caddr_t buf; { int segptr, segsize, cnt; caddr_t ti_winbase, ptr; segptr = addr; cnt = len; ti_winbase = (caddr_t)(sc->ti_vhandle + TI_WINDOW); ptr = buf; while(cnt) { if (cnt < TI_WINLEN) segsize = cnt; else segsize = TI_WINLEN - (segptr % TI_WINLEN); CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); if (buf == NULL) bzero((char *)ti_winbase + (segptr & (TI_WINLEN - 1)), segsize); else { bcopy((char *)ptr, (char *)ti_winbase + (segptr & (TI_WINLEN - 1)), segsize); ptr += segsize; } segptr += segsize; cnt -= segsize; } return; } /* * Load firmware image into the NIC. Check that the firmware revision * is acceptable and see if we want the firmware for the Tigon 1 or * Tigon 2. */ static void ti_loadfw(sc) struct ti_softc *sc; { switch(sc->ti_hwrev) { case TI_HWREV_TIGON: if (tigonFwReleaseMajor != TI_FIRMWARE_MAJOR || tigonFwReleaseMinor != TI_FIRMWARE_MINOR || tigonFwReleaseFix != TI_FIRMWARE_FIX) { printf("ti%d: firmware revision mismatch; want " "%d.%d.%d, got %d.%d.%d\n", sc->ti_unit, TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR, TI_FIRMWARE_FIX, tigonFwReleaseMajor, tigonFwReleaseMinor, tigonFwReleaseFix); return; } ti_mem(sc, tigonFwTextAddr, tigonFwTextLen, (caddr_t)tigonFwText); ti_mem(sc, tigonFwDataAddr, tigonFwDataLen, (caddr_t)tigonFwData); ti_mem(sc, tigonFwRodataAddr, tigonFwRodataLen, (caddr_t)tigonFwRodata); ti_mem(sc, tigonFwBssAddr, tigonFwBssLen, NULL); ti_mem(sc, tigonFwSbssAddr, tigonFwSbssLen, NULL); CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigonFwStartAddr); break; case TI_HWREV_TIGON_II: if (tigon2FwReleaseMajor != TI_FIRMWARE_MAJOR || tigon2FwReleaseMinor != TI_FIRMWARE_MINOR || tigon2FwReleaseFix != TI_FIRMWARE_FIX) { printf("ti%d: firmware revision mismatch; want " "%d.%d.%d, got %d.%d.%d\n", sc->ti_unit, TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR, TI_FIRMWARE_FIX, tigon2FwReleaseMajor, tigon2FwReleaseMinor, tigon2FwReleaseFix); return; } ti_mem(sc, tigon2FwTextAddr, tigon2FwTextLen, (caddr_t)tigon2FwText); ti_mem(sc, tigon2FwDataAddr, tigon2FwDataLen, (caddr_t)tigon2FwData); ti_mem(sc, tigon2FwRodataAddr, tigon2FwRodataLen, (caddr_t)tigon2FwRodata); ti_mem(sc, tigon2FwBssAddr, tigon2FwBssLen, NULL); ti_mem(sc, tigon2FwSbssAddr, tigon2FwSbssLen, NULL); CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigon2FwStartAddr); break; default: printf("ti%d: can't load firmware: unknown hardware rev\n", sc->ti_unit); break; } return; } /* * Send the NIC a command via the command ring. */ static void ti_cmd(sc, cmd) struct ti_softc *sc; struct ti_cmd_desc *cmd; { u_int32_t index; if (sc->ti_rdata->ti_cmd_ring == NULL) return; index = sc->ti_cmd_saved_prodidx; CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(cmd)); TI_INC(index, TI_CMD_RING_CNT); CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); sc->ti_cmd_saved_prodidx = index; return; } /* * Send the NIC an extended command. The 'len' parameter specifies the * number of command slots to include after the initial command. */ static void ti_cmd_ext(sc, cmd, arg, len) struct ti_softc *sc; struct ti_cmd_desc *cmd; caddr_t arg; int len; { u_int32_t index; register int i; if (sc->ti_rdata->ti_cmd_ring == NULL) return; index = sc->ti_cmd_saved_prodidx; CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(cmd)); TI_INC(index, TI_CMD_RING_CNT); for (i = 0; i < len; i++) { CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(&arg[i * 4])); TI_INC(index, TI_CMD_RING_CNT); } CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); sc->ti_cmd_saved_prodidx = index; return; } /* * Handle events that have triggered interrupts. */ static void ti_handle_events(sc) struct ti_softc *sc; { struct ti_event_desc *e; if (sc->ti_rdata->ti_event_ring == NULL) return; while (sc->ti_ev_saved_considx != sc->ti_ev_prodidx.ti_idx) { e = &sc->ti_rdata->ti_event_ring[sc->ti_ev_saved_considx]; switch(e->ti_event) { case TI_EV_LINKSTAT_CHANGED: sc->ti_linkstat = e->ti_code; if (e->ti_code == TI_EV_CODE_LINK_UP) printf("ti%d: 10/100 link up\n", sc->ti_unit); else if (e->ti_code == TI_EV_CODE_GIG_LINK_UP) printf("ti%d: gigabit link up\n", sc->ti_unit); else if (e->ti_code == TI_EV_CODE_LINK_DOWN) printf("ti%d: link down\n", sc->ti_unit); break; case TI_EV_ERROR: if (e->ti_code == TI_EV_CODE_ERR_INVAL_CMD) printf("ti%d: invalid command\n", sc->ti_unit); else if (e->ti_code == TI_EV_CODE_ERR_UNIMP_CMD) printf("ti%d: unknown command\n", sc->ti_unit); else if (e->ti_code == TI_EV_CODE_ERR_BADCFG) printf("ti%d: bad config data\n", sc->ti_unit); break; case TI_EV_FIRMWARE_UP: ti_init2(sc); break; case TI_EV_STATS_UPDATED: ti_stats_update(sc); break; case TI_EV_RESET_JUMBO_RING: case TI_EV_MCAST_UPDATED: /* Who cares. */ break; default: printf("ti%d: unknown event: %d\n", sc->ti_unit, e->ti_event); break; } /* Advance the consumer index. */ TI_INC(sc->ti_ev_saved_considx, TI_EVENT_RING_CNT); CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, sc->ti_ev_saved_considx); } return; } /* * Memory management for the jumbo receive ring is a pain in the * butt. We need to allocate at least 9018 bytes of space per frame, * _and_ it has to be contiguous (unless you use the extended * jumbo descriptor format). Using malloc() all the time won't * work: malloc() allocates memory in powers of two, which means we * would end up wasting a considerable amount of space by allocating * 9K chunks. We don't have a jumbo mbuf cluster pool. Thus, we have * to do our own memory management. * * The driver needs to allocate a contiguous chunk of memory at boot * time. We then chop this up ourselves into 9K pieces and use them * as external mbuf storage. * * One issue here is how much memory to allocate. The jumbo ring has * 256 slots in it, but at 9K per slot than can consume over 2MB of * RAM. This is a bit much, especially considering we also need * RAM for the standard ring and mini ring (on the Tigon 2). To * save space, we only actually allocate enough memory for 64 slots * by default, which works out to between 500 and 600K. This can * be tuned by changing a #define in if_tireg.h. */ static int ti_alloc_jumbo_mem(sc) struct ti_softc *sc; { caddr_t ptr; register int i; struct ti_jpool_entry *entry; /* Grab a big chunk o' storage. */ sc->ti_cdata.ti_jumbo_buf = contigmalloc(TI_JMEM, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->ti_cdata.ti_jumbo_buf == NULL) { printf("ti%d: no memory for jumbo buffers!\n", sc->ti_unit); return(ENOBUFS); } SLIST_INIT(&sc->ti_jfree_listhead); SLIST_INIT(&sc->ti_jinuse_listhead); /* * Now divide it up into 9K pieces and save the addresses * in an array. */ ptr = sc->ti_cdata.ti_jumbo_buf; for (i = 0; i < TI_JSLOTS; i++) { sc->ti_cdata.ti_jslots[i] = ptr; ptr += TI_JLEN; entry = malloc(sizeof(struct ti_jpool_entry), M_DEVBUF, M_NOWAIT); if (entry == NULL) { contigfree(sc->ti_cdata.ti_jumbo_buf, TI_JMEM, M_DEVBUF); sc->ti_cdata.ti_jumbo_buf = NULL; printf("ti%d: no memory for jumbo " "buffer queue!\n", sc->ti_unit); return(ENOBUFS); } entry->slot = i; SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jpool_entries); } return(0); } /* * Allocate a jumbo buffer. */ static void *ti_jalloc(sc) struct ti_softc *sc; { struct ti_jpool_entry *entry; entry = SLIST_FIRST(&sc->ti_jfree_listhead); if (entry == NULL) { printf("ti%d: no free jumbo buffers\n", sc->ti_unit); return(NULL); } SLIST_REMOVE_HEAD(&sc->ti_jfree_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc->ti_jinuse_listhead, entry, jpool_entries); return(sc->ti_cdata.ti_jslots[entry->slot]); } /* * Release a jumbo buffer. */ static void ti_jfree(buf, args) caddr_t buf; void *args; { struct ti_softc *sc; int i; struct ti_jpool_entry *entry; /* Extract the softc struct pointer. */ sc = (struct ti_softc *)args; if (sc == NULL) panic("ti_jfree: didn't get softc pointer!"); /* calculate the slot this buffer belongs to */ i = ((vm_offset_t)buf - (vm_offset_t)sc->ti_cdata.ti_jumbo_buf) / TI_JLEN; if ((i < 0) || (i >= TI_JSLOTS)) panic("ti_jfree: asked to free buffer that we don't manage!"); entry = SLIST_FIRST(&sc->ti_jinuse_listhead); if (entry == NULL) panic("ti_jfree: buffer not in use!"); entry->slot = i; SLIST_REMOVE_HEAD(&sc->ti_jinuse_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jpool_entries); return; } /* * Intialize a standard receive ring descriptor. */ static int ti_newbuf_std(sc, i, m) struct ti_softc *sc; int i; struct mbuf *m; { struct mbuf *m_new = NULL; struct ti_rx_desc *r; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("ti%d: mbuf allocation failed " "-- packet dropped!\n", sc->ti_unit); return(ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("ti%d: cluster allocation failed " "-- packet dropped!\n", sc->ti_unit); m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, ETHER_ALIGN); sc->ti_cdata.ti_rx_std_chain[i] = m_new; r = &sc->ti_rdata->ti_rx_std_ring[i]; TI_HOSTADDR(r->ti_addr) = vtophys(mtod(m_new, caddr_t)); r->ti_type = TI_BDTYPE_RECV_BD; r->ti_flags = 0; if (sc->arpcom.ac_if.if_hwassist) r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; r->ti_len = m_new->m_len; r->ti_idx = i; return(0); } /* * Intialize a mini receive ring descriptor. This only applies to * the Tigon 2. */ static int ti_newbuf_mini(sc, i, m) struct ti_softc *sc; int i; struct mbuf *m; { struct mbuf *m_new = NULL; struct ti_rx_desc *r; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("ti%d: mbuf allocation failed " "-- packet dropped!\n", sc->ti_unit); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MHLEN; } else { m_new = m; m_new->m_data = m_new->m_pktdat; m_new->m_len = m_new->m_pkthdr.len = MHLEN; } m_adj(m_new, ETHER_ALIGN); r = &sc->ti_rdata->ti_rx_mini_ring[i]; sc->ti_cdata.ti_rx_mini_chain[i] = m_new; TI_HOSTADDR(r->ti_addr) = vtophys(mtod(m_new, caddr_t)); r->ti_type = TI_BDTYPE_RECV_BD; r->ti_flags = TI_BDFLAG_MINI_RING; if (sc->arpcom.ac_if.if_hwassist) r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; r->ti_len = m_new->m_len; r->ti_idx = i; return(0); } /* * Initialize a jumbo receive ring descriptor. This allocates * a jumbo buffer from the pool managed internally by the driver. */ static int ti_newbuf_jumbo(sc, i, m) struct ti_softc *sc; int i; struct mbuf *m; { struct mbuf *m_new = NULL; struct ti_rx_desc *r; if (m == NULL) { caddr_t *buf = NULL; /* Allocate the mbuf. */ MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("ti%d: mbuf allocation failed " "-- packet dropped!\n", sc->ti_unit); return(ENOBUFS); } /* Allocate the jumbo buffer */ buf = ti_jalloc(sc); if (buf == NULL) { m_freem(m_new); printf("ti%d: jumbo allocation failed " "-- packet dropped!\n", sc->ti_unit); return(ENOBUFS); } /* Attach the buffer to the mbuf. */ m_new->m_data = (void *) buf; m_new->m_len = m_new->m_pkthdr.len = TI_JUMBO_FRAMELEN; MEXTADD(m_new, buf, TI_JUMBO_FRAMELEN, ti_jfree, (struct ti_softc *)sc, 0, EXT_NET_DRV); } else { m_new = m; m_new->m_data = m_new->m_ext.ext_buf; m_new->m_ext.ext_size = TI_JUMBO_FRAMELEN; } m_adj(m_new, ETHER_ALIGN); /* Set up the descriptor. */ r = &sc->ti_rdata->ti_rx_jumbo_ring[i]; sc->ti_cdata.ti_rx_jumbo_chain[i] = m_new; TI_HOSTADDR(r->ti_addr) = vtophys(mtod(m_new, caddr_t)); r->ti_type = TI_BDTYPE_RECV_JUMBO_BD; r->ti_flags = TI_BDFLAG_JUMBO_RING; if (sc->arpcom.ac_if.if_hwassist) r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; r->ti_len = m_new->m_len; r->ti_idx = i; return(0); } /* * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, * that's 1MB or memory, which is a lot. For now, we fill only the first * 256 ring entries and hope that our CPU is fast enough to keep up with * the NIC. */ static int ti_init_rx_ring_std(sc) struct ti_softc *sc; { register int i; struct ti_cmd_desc cmd; for (i = 0; i < TI_SSLOTS; i++) { if (ti_newbuf_std(sc, i, NULL) == ENOBUFS) return(ENOBUFS); }; TI_UPDATE_STDPROD(sc, i - 1); sc->ti_std = i - 1; return(0); } static void ti_free_rx_ring_std(sc) struct ti_softc *sc; { register int i; for (i = 0; i < TI_STD_RX_RING_CNT; i++) { if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) { m_freem(sc->ti_cdata.ti_rx_std_chain[i]); sc->ti_cdata.ti_rx_std_chain[i] = NULL; } bzero((char *)&sc->ti_rdata->ti_rx_std_ring[i], sizeof(struct ti_rx_desc)); } return; } static int ti_init_rx_ring_jumbo(sc) struct ti_softc *sc; { register int i; struct ti_cmd_desc cmd; for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { if (ti_newbuf_jumbo(sc, i, NULL) == ENOBUFS) return(ENOBUFS); }; TI_UPDATE_JUMBOPROD(sc, i - 1); sc->ti_jumbo = i - 1; return(0); } static void ti_free_rx_ring_jumbo(sc) struct ti_softc *sc; { register int i; for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) { m_freem(sc->ti_cdata.ti_rx_jumbo_chain[i]); sc->ti_cdata.ti_rx_jumbo_chain[i] = NULL; } bzero((char *)&sc->ti_rdata->ti_rx_jumbo_ring[i], sizeof(struct ti_rx_desc)); } return; } static int ti_init_rx_ring_mini(sc) struct ti_softc *sc; { register int i; for (i = 0; i < TI_MSLOTS; i++) { if (ti_newbuf_mini(sc, i, NULL) == ENOBUFS) return(ENOBUFS); }; TI_UPDATE_MINIPROD(sc, i - 1); sc->ti_mini = i - 1; return(0); } static void ti_free_rx_ring_mini(sc) struct ti_softc *sc; { register int i; for (i = 0; i < TI_MINI_RX_RING_CNT; i++) { if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) { m_freem(sc->ti_cdata.ti_rx_mini_chain[i]); sc->ti_cdata.ti_rx_mini_chain[i] = NULL; } bzero((char *)&sc->ti_rdata->ti_rx_mini_ring[i], sizeof(struct ti_rx_desc)); } return; } static void ti_free_tx_ring(sc) struct ti_softc *sc; { register int i; if (sc->ti_rdata->ti_tx_ring == NULL) return; for (i = 0; i < TI_TX_RING_CNT; i++) { if (sc->ti_cdata.ti_tx_chain[i] != NULL) { m_freem(sc->ti_cdata.ti_tx_chain[i]); sc->ti_cdata.ti_tx_chain[i] = NULL; } bzero((char *)&sc->ti_rdata->ti_tx_ring[i], sizeof(struct ti_tx_desc)); } return; } static int ti_init_tx_ring(sc) struct ti_softc *sc; { sc->ti_txcnt = 0; sc->ti_tx_saved_considx = 0; CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, 0); return(0); } /* * The Tigon 2 firmware has a new way to add/delete multicast addresses, * but we have to support the old way too so that Tigon 1 cards will * work. */ void ti_add_mcast(sc, addr) struct ti_softc *sc; struct ether_addr *addr; { struct ti_cmd_desc cmd; u_int16_t *m; u_int32_t ext[2] = {0, 0}; m = (u_int16_t *)&addr->octet[0]; switch(sc->ti_hwrev) { case TI_HWREV_TIGON: CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); TI_DO_CMD(TI_CMD_ADD_MCAST_ADDR, 0, 0); break; case TI_HWREV_TIGON_II: ext[0] = htons(m[0]); ext[1] = (htons(m[1]) << 16) | htons(m[2]); TI_DO_CMD_EXT(TI_CMD_EXT_ADD_MCAST, 0, 0, (caddr_t)&ext, 2); break; default: printf("ti%d: unknown hwrev\n", sc->ti_unit); break; } return; } void ti_del_mcast(sc, addr) struct ti_softc *sc; struct ether_addr *addr; { struct ti_cmd_desc cmd; u_int16_t *m; u_int32_t ext[2] = {0, 0}; m = (u_int16_t *)&addr->octet[0]; switch(sc->ti_hwrev) { case TI_HWREV_TIGON: CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); TI_DO_CMD(TI_CMD_DEL_MCAST_ADDR, 0, 0); break; case TI_HWREV_TIGON_II: ext[0] = htons(m[0]); ext[1] = (htons(m[1]) << 16) | htons(m[2]); TI_DO_CMD_EXT(TI_CMD_EXT_DEL_MCAST, 0, 0, (caddr_t)&ext, 2); break; default: printf("ti%d: unknown hwrev\n", sc->ti_unit); break; } return; } /* * Configure the Tigon's multicast address filter. * * The actual multicast table management is a bit of a pain, thanks to * slight brain damage on the part of both Alteon and us. With our * multicast code, we are only alerted when the multicast address table * changes and at that point we only have the current list of addresses: * we only know the current state, not the previous state, so we don't * actually know what addresses were removed or added. The firmware has * state, but we can't get our grubby mits on it, and there is no 'delete * all multicast addresses' command. Hence, we have to maintain our own * state so we know what addresses have been programmed into the NIC at * any given time. */ static void ti_setmulti(sc) struct ti_softc *sc; { struct ifnet *ifp; struct ifmultiaddr *ifma; struct ti_cmd_desc cmd; struct ti_mc_entry *mc; u_int32_t intrs; ifp = &sc->arpcom.ac_if; if (ifp->if_flags & IFF_ALLMULTI) { TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_ENB, 0); return; } else { TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_DIS, 0); } /* Disable interrupts. */ intrs = CSR_READ_4(sc, TI_MB_HOSTINTR); CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); /* First, zot all the existing filters. */ while (sc->ti_mc_listhead.slh_first != NULL) { mc = sc->ti_mc_listhead.slh_first; ti_del_mcast(sc, &mc->mc_addr); SLIST_REMOVE_HEAD(&sc->ti_mc_listhead, mc_entries); free(mc, M_DEVBUF); } /* Now program new ones. */ - for (ifma = ifp->if_multiaddrs.lh_first; - ifma != NULL; ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; mc = malloc(sizeof(struct ti_mc_entry), M_DEVBUF, M_NOWAIT); bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), (char *)&mc->mc_addr, ETHER_ADDR_LEN); SLIST_INSERT_HEAD(&sc->ti_mc_listhead, mc, mc_entries); ti_add_mcast(sc, &mc->mc_addr); } /* Re-enable interrupts. */ CSR_WRITE_4(sc, TI_MB_HOSTINTR, intrs); return; } /* * Check to see if the BIOS has configured us for a 64 bit slot when * we aren't actually in one. If we detect this condition, we can work * around it on the Tigon 2 by setting a bit in the PCI state register, * but for the Tigon 1 we must give up and abort the interface attach. */ static int ti_64bitslot_war(sc) struct ti_softc *sc; { if (!(CSR_READ_4(sc, TI_PCI_STATE) & TI_PCISTATE_32BIT_BUS)) { CSR_WRITE_4(sc, 0x600, 0); CSR_WRITE_4(sc, 0x604, 0); CSR_WRITE_4(sc, 0x600, 0x5555AAAA); if (CSR_READ_4(sc, 0x604) == 0x5555AAAA) { if (sc->ti_hwrev == TI_HWREV_TIGON) return(EINVAL); else { TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_32BIT_BUS); return(0); } } } return(0); } /* * Do endian, PCI and DMA initialization. Also check the on-board ROM * self-test results. */ static int ti_chipinit(sc) struct ti_softc *sc; { u_int32_t cacheline; u_int32_t pci_writemax = 0; /* Initialize link to down state. */ sc->ti_linkstat = TI_EV_CODE_LINK_DOWN; sc->arpcom.ac_if.if_hwassist = TI_CSUM_FEATURES; /* Set endianness before we access any non-PCI registers. */ #if BYTE_ORDER == BIG_ENDIAN CSR_WRITE_4(sc, TI_MISC_HOST_CTL, TI_MHC_BIGENDIAN_INIT | (TI_MHC_BIGENDIAN_INIT << 24)); #else CSR_WRITE_4(sc, TI_MISC_HOST_CTL, TI_MHC_LITTLEENDIAN_INIT | (TI_MHC_LITTLEENDIAN_INIT << 24)); #endif /* Check the ROM failed bit to see if self-tests passed. */ if (CSR_READ_4(sc, TI_CPU_STATE) & TI_CPUSTATE_ROMFAIL) { printf("ti%d: board self-diagnostics failed!\n", sc->ti_unit); return(ENODEV); } /* Halt the CPU. */ TI_SETBIT(sc, TI_CPU_STATE, TI_CPUSTATE_HALT); /* Figure out the hardware revision. */ switch(CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_CHIP_REV_MASK) { case TI_REV_TIGON_I: sc->ti_hwrev = TI_HWREV_TIGON; break; case TI_REV_TIGON_II: sc->ti_hwrev = TI_HWREV_TIGON_II; break; default: printf("ti%d: unsupported chip revision\n", sc->ti_unit); return(ENODEV); } /* Do special setup for Tigon 2. */ if (sc->ti_hwrev == TI_HWREV_TIGON_II) { TI_SETBIT(sc, TI_CPU_CTL_B, TI_CPUSTATE_HALT); TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_SRAM_BANK_256K); TI_SETBIT(sc, TI_MISC_CONF, TI_MCR_SRAM_SYNCHRONOUS); } /* Set up the PCI state register. */ CSR_WRITE_4(sc, TI_PCI_STATE, TI_PCI_READ_CMD|TI_PCI_WRITE_CMD); if (sc->ti_hwrev == TI_HWREV_TIGON_II) { TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_USE_MEM_RD_MULT); } /* Clear the read/write max DMA parameters. */ TI_CLRBIT(sc, TI_PCI_STATE, (TI_PCISTATE_WRITE_MAXDMA| TI_PCISTATE_READ_MAXDMA)); /* Get cache line size. */ cacheline = CSR_READ_4(sc, TI_PCI_BIST) & 0xFF; /* * If the system has set enabled the PCI memory write * and invalidate command in the command register, set * the write max parameter accordingly. This is necessary * to use MWI with the Tigon 2. */ if (CSR_READ_4(sc, TI_PCI_CMDSTAT) & PCIM_CMD_MWIEN) { switch(cacheline) { case 1: case 4: case 8: case 16: case 32: case 64: break; default: /* Disable PCI memory write and invalidate. */ if (bootverbose) printf("ti%d: cache line size %d not " "supported; disabling PCI MWI\n", sc->ti_unit, cacheline); CSR_WRITE_4(sc, TI_PCI_CMDSTAT, CSR_READ_4(sc, TI_PCI_CMDSTAT) & ~PCIM_CMD_MWIEN); break; } } #ifdef __brokenalpha__ /* * From the Alteon sample driver: * Must insure that we do not cross an 8K (bytes) boundary * for DMA reads. Our highest limit is 1K bytes. This is a * restriction on some ALPHA platforms with early revision * 21174 PCI chipsets, such as the AlphaPC 164lx */ TI_SETBIT(sc, TI_PCI_STATE, pci_writemax|TI_PCI_READMAX_1024); #else TI_SETBIT(sc, TI_PCI_STATE, pci_writemax); #endif /* This sets the min dma param all the way up (0xff). */ TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_MINDMA); /* Configure DMA variables. */ #if BYTE_ORDER == BIG_ENDIAN CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_BD | TI_OPMODE_BYTESWAP_DATA | TI_OPMODE_WORDSWAP_BD | TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB | TI_OPMODE_DONT_FRAG_JUMBO); #else CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_DATA| TI_OPMODE_WORDSWAP_BD|TI_OPMODE_DONT_FRAG_JUMBO| TI_OPMODE_WARN_ENB|TI_OPMODE_FATAL_ENB); #endif /* * Only allow 1 DMA channel to be active at a time. * I don't think this is a good idea, but without it * the firmware racks up lots of nicDmaReadRingFull * errors. This is not compatible with hardware checksums. */ if (sc->arpcom.ac_if.if_hwassist == 0) TI_SETBIT(sc, TI_GCR_OPMODE, TI_OPMODE_1_DMA_ACTIVE); /* Recommended settings from Tigon manual. */ CSR_WRITE_4(sc, TI_GCR_DMA_WRITECFG, TI_DMA_STATE_THRESH_8W); CSR_WRITE_4(sc, TI_GCR_DMA_READCFG, TI_DMA_STATE_THRESH_8W); if (ti_64bitslot_war(sc)) { printf("ti%d: bios thinks we're in a 64 bit slot, " "but we aren't", sc->ti_unit); return(EINVAL); } return(0); } /* * Initialize the general information block and firmware, and * start the CPU(s) running. */ static int ti_gibinit(sc) struct ti_softc *sc; { struct ti_rcb *rcb; int i; struct ifnet *ifp; ifp = &sc->arpcom.ac_if; /* Disable interrupts for now. */ CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); /* Tell the chip where to find the general information block. */ CSR_WRITE_4(sc, TI_GCR_GENINFO_HI, 0); CSR_WRITE_4(sc, TI_GCR_GENINFO_LO, vtophys(&sc->ti_rdata->ti_info)); /* Load the firmware into SRAM. */ ti_loadfw(sc); /* Set up the contents of the general info and ring control blocks. */ /* Set up the event ring and producer pointer. */ rcb = &sc->ti_rdata->ti_info.ti_ev_rcb; TI_HOSTADDR(rcb->ti_hostaddr) = vtophys(&sc->ti_rdata->ti_event_ring); rcb->ti_flags = 0; TI_HOSTADDR(sc->ti_rdata->ti_info.ti_ev_prodidx_ptr) = vtophys(&sc->ti_ev_prodidx); sc->ti_ev_prodidx.ti_idx = 0; CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, 0); sc->ti_ev_saved_considx = 0; /* Set up the command ring and producer mailbox. */ rcb = &sc->ti_rdata->ti_info.ti_cmd_rcb; sc->ti_rdata->ti_cmd_ring = (struct ti_cmd_desc *)(sc->ti_vhandle + TI_GCR_CMDRING); TI_HOSTADDR(rcb->ti_hostaddr) = TI_GCR_NIC_ADDR(TI_GCR_CMDRING); rcb->ti_flags = 0; rcb->ti_max_len = 0; for (i = 0; i < TI_CMD_RING_CNT; i++) { CSR_WRITE_4(sc, TI_GCR_CMDRING + (i * 4), 0); } CSR_WRITE_4(sc, TI_GCR_CMDCONS_IDX, 0); CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, 0); sc->ti_cmd_saved_prodidx = 0; /* * Assign the address of the stats refresh buffer. * We re-use the current stats buffer for this to * conserve memory. */ TI_HOSTADDR(sc->ti_rdata->ti_info.ti_refresh_stats_ptr) = vtophys(&sc->ti_rdata->ti_info.ti_stats); /* Set up the standard receive ring. */ rcb = &sc->ti_rdata->ti_info.ti_std_rx_rcb; TI_HOSTADDR(rcb->ti_hostaddr) = vtophys(&sc->ti_rdata->ti_rx_std_ring); rcb->ti_max_len = TI_FRAMELEN; rcb->ti_flags = 0; if (sc->arpcom.ac_if.if_hwassist) rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; #if NVLAN > 0 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; #endif /* Set up the jumbo receive ring. */ rcb = &sc->ti_rdata->ti_info.ti_jumbo_rx_rcb; TI_HOSTADDR(rcb->ti_hostaddr) = vtophys(&sc->ti_rdata->ti_rx_jumbo_ring); rcb->ti_max_len = TI_JUMBO_FRAMELEN; rcb->ti_flags = 0; if (sc->arpcom.ac_if.if_hwassist) rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; #if NVLAN > 0 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; #endif /* * Set up the mini ring. Only activated on the * Tigon 2 but the slot in the config block is * still there on the Tigon 1. */ rcb = &sc->ti_rdata->ti_info.ti_mini_rx_rcb; TI_HOSTADDR(rcb->ti_hostaddr) = vtophys(&sc->ti_rdata->ti_rx_mini_ring); rcb->ti_max_len = MHLEN - ETHER_ALIGN; if (sc->ti_hwrev == TI_HWREV_TIGON) rcb->ti_flags = TI_RCB_FLAG_RING_DISABLED; else rcb->ti_flags = 0; if (sc->arpcom.ac_if.if_hwassist) rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; #if NVLAN > 0 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; #endif /* * Set up the receive return ring. */ rcb = &sc->ti_rdata->ti_info.ti_return_rcb; TI_HOSTADDR(rcb->ti_hostaddr) = vtophys(&sc->ti_rdata->ti_rx_return_ring); rcb->ti_flags = 0; rcb->ti_max_len = TI_RETURN_RING_CNT; TI_HOSTADDR(sc->ti_rdata->ti_info.ti_return_prodidx_ptr) = vtophys(&sc->ti_return_prodidx); /* * Set up the tx ring. Note: for the Tigon 2, we have the option * of putting the transmit ring in the host's address space and * letting the chip DMA it instead of leaving the ring in the NIC's * memory and accessing it through the shared memory region. We * do this for the Tigon 2, but it doesn't work on the Tigon 1, * so we have to revert to the shared memory scheme if we detect * a Tigon 1 chip. */ CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE); if (sc->ti_hwrev == TI_HWREV_TIGON) { sc->ti_rdata->ti_tx_ring_nic = (struct ti_tx_desc *)(sc->ti_vhandle + TI_WINDOW); } bzero((char *)sc->ti_rdata->ti_tx_ring, TI_TX_RING_CNT * sizeof(struct ti_tx_desc)); rcb = &sc->ti_rdata->ti_info.ti_tx_rcb; if (sc->ti_hwrev == TI_HWREV_TIGON) rcb->ti_flags = 0; else rcb->ti_flags = TI_RCB_FLAG_HOST_RING; #if NVLAN > 0 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; #endif if (sc->arpcom.ac_if.if_hwassist) rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; rcb->ti_max_len = TI_TX_RING_CNT; if (sc->ti_hwrev == TI_HWREV_TIGON) TI_HOSTADDR(rcb->ti_hostaddr) = TI_TX_RING_BASE; else TI_HOSTADDR(rcb->ti_hostaddr) = vtophys(&sc->ti_rdata->ti_tx_ring); TI_HOSTADDR(sc->ti_rdata->ti_info.ti_tx_considx_ptr) = vtophys(&sc->ti_tx_considx); /* Set up tuneables */ if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, (sc->ti_rx_coal_ticks / 10)); else CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, sc->ti_rx_coal_ticks); CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS, sc->ti_tx_coal_ticks); CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks); CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD, sc->ti_rx_max_coal_bds); CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD, sc->ti_tx_max_coal_bds); CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO, sc->ti_tx_buf_ratio); /* Turn interrupts on. */ CSR_WRITE_4(sc, TI_GCR_MASK_INTRS, 0); CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); /* Start CPU. */ TI_CLRBIT(sc, TI_CPU_STATE, (TI_CPUSTATE_HALT|TI_CPUSTATE_STEP)); return(0); } /* * Probe for a Tigon chip. Check the PCI vendor and device IDs * against our list and return its name if we find a match. */ static int ti_probe(dev) device_t dev; { struct ti_type *t; t = ti_devs; while(t->ti_name != NULL) { if ((pci_get_vendor(dev) == t->ti_vid) && (pci_get_device(dev) == t->ti_did)) { device_set_desc(dev, t->ti_name); return(0); } t++; } return(ENXIO); } static int ti_attach(dev) device_t dev; { u_int32_t command; struct ifnet *ifp; struct ti_softc *sc; int unit, error = 0, rid; sc = device_get_softc(dev); unit = device_get_unit(dev); bzero(sc, sizeof(struct ti_softc)); mtx_init(&sc->ti_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE); TI_LOCK(sc); /* * Map control/status registers. */ command = pci_read_config(dev, PCIR_COMMAND, 4); command |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, command, 4); command = pci_read_config(dev, PCIR_COMMAND, 4); if (!(command & PCIM_CMD_MEMEN)) { printf("ti%d: failed to enable memory mapping!\n", unit); error = ENXIO; goto fail; } rid = TI_PCI_LOMEM; sc->ti_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE|PCI_RF_DENSE); if (sc->ti_res == NULL) { printf ("ti%d: couldn't map memory\n", unit); error = ENXIO; goto fail; } sc->ti_btag = rman_get_bustag(sc->ti_res); sc->ti_bhandle = rman_get_bushandle(sc->ti_res); sc->ti_vhandle = (vm_offset_t)rman_get_virtual(sc->ti_res); /* Allocate interrupt */ rid = 0; sc->ti_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->ti_irq == NULL) { printf("ti%d: couldn't map interrupt\n", unit); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->ti_irq, INTR_TYPE_NET, ti_intr, sc, &sc->ti_intrhand); if (error) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq); bus_release_resource(dev, SYS_RES_MEMORY, TI_PCI_LOMEM, sc->ti_res); printf("ti%d: couldn't set up irq\n", unit); goto fail; } sc->ti_unit = unit; if (ti_chipinit(sc)) { printf("ti%d: chip initialization failed\n", sc->ti_unit); bus_teardown_intr(dev, sc->ti_irq, sc->ti_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq); bus_release_resource(dev, SYS_RES_MEMORY, TI_PCI_LOMEM, sc->ti_res); error = ENXIO; goto fail; } /* Zero out the NIC's on-board SRAM. */ ti_mem(sc, 0x2000, 0x100000 - 0x2000, NULL); /* Init again -- zeroing memory may have clobbered some registers. */ if (ti_chipinit(sc)) { printf("ti%d: chip initialization failed\n", sc->ti_unit); bus_teardown_intr(dev, sc->ti_irq, sc->ti_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq); bus_release_resource(dev, SYS_RES_MEMORY, TI_PCI_LOMEM, sc->ti_res); error = ENXIO; goto fail; } /* * Get station address from the EEPROM. Note: the manual states * that the MAC address is at offset 0x8c, however the data is * stored as two longwords (since that's how it's loaded into * the NIC). This means the MAC address is actually preceeded * by two zero bytes. We need to skip over those. */ if (ti_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, TI_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { printf("ti%d: failed to read station address\n", unit); bus_teardown_intr(dev, sc->ti_irq, sc->ti_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq); bus_release_resource(dev, SYS_RES_MEMORY, TI_PCI_LOMEM, sc->ti_res); error = ENXIO; goto fail; } /* * A Tigon chip was detected. Inform the world. */ printf("ti%d: Ethernet address: %6D\n", unit, sc->arpcom.ac_enaddr, ":"); /* Allocate the general information block and ring buffers. */ sc->ti_rdata = contigmalloc(sizeof(struct ti_ring_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->ti_rdata == NULL) { bus_teardown_intr(dev, sc->ti_irq, sc->ti_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq); bus_release_resource(dev, SYS_RES_MEMORY, TI_PCI_LOMEM, sc->ti_res); error = ENXIO; printf("ti%d: no memory for list buffers!\n", sc->ti_unit); goto fail; } bzero(sc->ti_rdata, sizeof(struct ti_ring_data)); /* Try to allocate memory for jumbo buffers. */ if (ti_alloc_jumbo_mem(sc)) { printf("ti%d: jumbo buffer allocation failed\n", sc->ti_unit); bus_teardown_intr(dev, sc->ti_irq, sc->ti_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq); bus_release_resource(dev, SYS_RES_MEMORY, TI_PCI_LOMEM, sc->ti_res); contigfree(sc->ti_rdata, sizeof(struct ti_ring_data), M_DEVBUF); error = ENXIO; goto fail; } /* * We really need a better way to tell a 1000baseTX card * from a 1000baseSX one, since in theory there could be * OEMed 1000baseTX cards from lame vendors who aren't * clever enough to change the PCI ID. For the moment * though, the AceNIC is the only copper card available. */ if (pci_get_vendor(dev) == ALT_VENDORID && pci_get_device(dev) == ALT_DEVICEID_ACENIC_COPPER) sc->ti_copper = 1; /* Ok, it's not the only copper card available. */ if (pci_get_vendor(dev) == NG_VENDORID && pci_get_device(dev) == NG_DEVICEID_GA620T) sc->ti_copper = 1; /* Set default tuneable values. */ sc->ti_stat_ticks = 2 * TI_TICKS_PER_SEC; sc->ti_rx_coal_ticks = TI_TICKS_PER_SEC / 5000; sc->ti_tx_coal_ticks = TI_TICKS_PER_SEC / 500; sc->ti_rx_max_coal_bds = 64; sc->ti_tx_max_coal_bds = 128; sc->ti_tx_buf_ratio = 21; /* Set up ifnet structure */ ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; ifp->if_unit = sc->ti_unit; ifp->if_name = "ti"; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = ti_ioctl; ifp->if_output = ether_output; ifp->if_start = ti_start; ifp->if_watchdog = ti_watchdog; ifp->if_init = ti_init; ifp->if_mtu = ETHERMTU; ifp->if_snd.ifq_maxlen = TI_TX_RING_CNT - 1; /* Set up ifmedia support. */ ifmedia_init(&sc->ifmedia, IFM_IMASK, ti_ifmedia_upd, ti_ifmedia_sts); if (sc->ti_copper) { /* * Copper cards allow manual 10/100 mode selection, * but not manual 1000baseTX mode selection. Why? * Becuase currently there's no way to specify the * master/slave setting through the firmware interface, * so Alteon decided to just bag it and handle it * via autonegotiation. */ ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_TX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_TX|IFM_FDX, 0, NULL); } else { /* Fiber cards don't support 10/100 modes. */ ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); } ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_AUTO); /* * Call MI attach routine. */ ether_ifattach(ifp, ETHER_BPF_SUPPORTED); TI_UNLOCK(sc); return(0); fail: TI_UNLOCK(sc); mtx_destroy(&sc->ti_mtx); return(error); } static int ti_detach(dev) device_t dev; { struct ti_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); TI_LOCK(sc); ifp = &sc->arpcom.ac_if; ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); ti_stop(sc); bus_teardown_intr(dev, sc->ti_irq, sc->ti_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq); bus_release_resource(dev, SYS_RES_MEMORY, TI_PCI_LOMEM, sc->ti_res); contigfree(sc->ti_cdata.ti_jumbo_buf, TI_JMEM, M_DEVBUF); contigfree(sc->ti_rdata, sizeof(struct ti_ring_data), M_DEVBUF); ifmedia_removeall(&sc->ifmedia); TI_UNLOCK(sc); mtx_destroy(&sc->ti_mtx); return(0); } /* * Frame reception handling. This is called if there's a frame * on the receive return list. * * Note: we have to be able to handle three possibilities here: * 1) the frame is from the mini receive ring (can only happen) * on Tigon 2 boards) * 2) the frame is from the jumbo recieve ring * 3) the frame is from the standard receive ring */ static void ti_rxeof(sc) struct ti_softc *sc; { struct ifnet *ifp; struct ti_cmd_desc cmd; ifp = &sc->arpcom.ac_if; while(sc->ti_rx_saved_considx != sc->ti_return_prodidx.ti_idx) { struct ti_rx_desc *cur_rx; u_int32_t rxidx; struct ether_header *eh; struct mbuf *m = NULL; #if NVLAN > 0 u_int16_t vlan_tag = 0; int have_tag = 0; #endif cur_rx = &sc->ti_rdata->ti_rx_return_ring[sc->ti_rx_saved_considx]; rxidx = cur_rx->ti_idx; TI_INC(sc->ti_rx_saved_considx, TI_RETURN_RING_CNT); #if NVLAN > 0 if (cur_rx->ti_flags & TI_BDFLAG_VLAN_TAG) { have_tag = 1; vlan_tag = cur_rx->ti_vlan_tag; } #endif if (cur_rx->ti_flags & TI_BDFLAG_JUMBO_RING) { TI_INC(sc->ti_jumbo, TI_JUMBO_RX_RING_CNT); m = sc->ti_cdata.ti_rx_jumbo_chain[rxidx]; sc->ti_cdata.ti_rx_jumbo_chain[rxidx] = NULL; if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { ifp->if_ierrors++; ti_newbuf_jumbo(sc, sc->ti_jumbo, m); continue; } if (ti_newbuf_jumbo(sc, sc->ti_jumbo, NULL) == ENOBUFS) { ifp->if_ierrors++; ti_newbuf_jumbo(sc, sc->ti_jumbo, m); continue; } } else if (cur_rx->ti_flags & TI_BDFLAG_MINI_RING) { TI_INC(sc->ti_mini, TI_MINI_RX_RING_CNT); m = sc->ti_cdata.ti_rx_mini_chain[rxidx]; sc->ti_cdata.ti_rx_mini_chain[rxidx] = NULL; if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { ifp->if_ierrors++; ti_newbuf_mini(sc, sc->ti_mini, m); continue; } if (ti_newbuf_mini(sc, sc->ti_mini, NULL) == ENOBUFS) { ifp->if_ierrors++; ti_newbuf_mini(sc, sc->ti_mini, m); continue; } } else { TI_INC(sc->ti_std, TI_STD_RX_RING_CNT); m = sc->ti_cdata.ti_rx_std_chain[rxidx]; sc->ti_cdata.ti_rx_std_chain[rxidx] = NULL; if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { ifp->if_ierrors++; ti_newbuf_std(sc, sc->ti_std, m); continue; } if (ti_newbuf_std(sc, sc->ti_std, NULL) == ENOBUFS) { ifp->if_ierrors++; ti_newbuf_std(sc, sc->ti_std, m); continue; } } m->m_pkthdr.len = m->m_len = cur_rx->ti_len; ifp->if_ipackets++; eh = mtod(m, struct ether_header *); m->m_pkthdr.rcvif = ifp; /* Remove header from mbuf and pass it on. */ m_adj(m, sizeof(struct ether_header)); if (ifp->if_hwassist) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_DATA_VALID; if ((cur_rx->ti_ip_cksum ^ 0xffff) == 0) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; m->m_pkthdr.csum_data = cur_rx->ti_tcp_udp_cksum; } #if NVLAN > 0 /* * If we received a packet with a vlan tag, pass it * to vlan_input() instead of ether_input(). */ if (have_tag) { vlan_input_tag(eh, m, vlan_tag); have_tag = vlan_tag = 0; continue; } #endif ether_input(ifp, eh, m); } /* Only necessary on the Tigon 1. */ if (sc->ti_hwrev == TI_HWREV_TIGON) CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, sc->ti_rx_saved_considx); TI_UPDATE_STDPROD(sc, sc->ti_std); TI_UPDATE_MINIPROD(sc, sc->ti_mini); TI_UPDATE_JUMBOPROD(sc, sc->ti_jumbo); return; } static void ti_txeof(sc) struct ti_softc *sc; { struct ti_tx_desc *cur_tx = NULL; struct ifnet *ifp; ifp = &sc->arpcom.ac_if; /* * Go through our tx ring and free mbufs for those * frames that have been sent. */ while (sc->ti_tx_saved_considx != sc->ti_tx_considx.ti_idx) { u_int32_t idx = 0; idx = sc->ti_tx_saved_considx; if (sc->ti_hwrev == TI_HWREV_TIGON) { if (idx > 383) CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE + 6144); else if (idx > 255) CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE + 4096); else if (idx > 127) CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE + 2048); else CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE); cur_tx = &sc->ti_rdata->ti_tx_ring_nic[idx % 128]; } else cur_tx = &sc->ti_rdata->ti_tx_ring[idx]; if (cur_tx->ti_flags & TI_BDFLAG_END) ifp->if_opackets++; if (sc->ti_cdata.ti_tx_chain[idx] != NULL) { m_freem(sc->ti_cdata.ti_tx_chain[idx]); sc->ti_cdata.ti_tx_chain[idx] = NULL; } sc->ti_txcnt--; TI_INC(sc->ti_tx_saved_considx, TI_TX_RING_CNT); ifp->if_timer = 0; } if (cur_tx != NULL) ifp->if_flags &= ~IFF_OACTIVE; return; } static void ti_intr(xsc) void *xsc; { struct ti_softc *sc; struct ifnet *ifp; sc = xsc; TI_LOCK(sc); ifp = &sc->arpcom.ac_if; #ifdef notdef /* Avoid this for now -- checking this register is expensive. */ /* Make sure this is really our interrupt. */ if (!(CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_INTSTATE)) { TI_UNLOCK(sc); return; } #endif /* Ack interrupt and stop others from occuring. */ CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); if (ifp->if_flags & IFF_RUNNING) { /* Check RX return ring producer/consumer */ ti_rxeof(sc); /* Check TX ring producer/consumer */ ti_txeof(sc); } ti_handle_events(sc); /* Re-enable interrupts. */ CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL) ti_start(ifp); TI_UNLOCK(sc); return; } static void ti_stats_update(sc) struct ti_softc *sc; { struct ifnet *ifp; ifp = &sc->arpcom.ac_if; ifp->if_collisions += (sc->ti_rdata->ti_info.ti_stats.dot3StatsSingleCollisionFrames + sc->ti_rdata->ti_info.ti_stats.dot3StatsMultipleCollisionFrames + sc->ti_rdata->ti_info.ti_stats.dot3StatsExcessiveCollisions + sc->ti_rdata->ti_info.ti_stats.dot3StatsLateCollisions) - ifp->if_collisions; return; } /* * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data * pointers to descriptors. */ static int ti_encap(sc, m_head, txidx) struct ti_softc *sc; struct mbuf *m_head; u_int32_t *txidx; { struct ti_tx_desc *f = NULL; struct mbuf *m; u_int32_t frag, cur, cnt = 0; u_int16_t csum_flags = 0; #if NVLAN > 0 struct ifvlan *ifv = NULL; if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) && m_head->m_pkthdr.rcvif != NULL && m_head->m_pkthdr.rcvif->if_type == IFT_8021_VLAN) ifv = m_head->m_pkthdr.rcvif->if_softc; #endif m = m_head; cur = frag = *txidx; if (m_head->m_pkthdr.csum_flags) { if (m_head->m_pkthdr.csum_flags & CSUM_IP) csum_flags |= TI_BDFLAG_IP_CKSUM; if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) csum_flags |= TI_BDFLAG_TCP_UDP_CKSUM; if (m_head->m_flags & M_LASTFRAG) csum_flags |= TI_BDFLAG_IP_FRAG_END; else if (m_head->m_flags & M_FRAG) csum_flags |= TI_BDFLAG_IP_FRAG; } /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ for (m = m_head; m != NULL; m = m->m_next) { if (m->m_len != 0) { if (sc->ti_hwrev == TI_HWREV_TIGON) { if (frag > 383) CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE + 6144); else if (frag > 255) CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE + 4096); else if (frag > 127) CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE + 2048); else CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE); f = &sc->ti_rdata->ti_tx_ring_nic[frag % 128]; } else f = &sc->ti_rdata->ti_tx_ring[frag]; if (sc->ti_cdata.ti_tx_chain[frag] != NULL) break; TI_HOSTADDR(f->ti_addr) = vtophys(mtod(m, vm_offset_t)); f->ti_len = m->m_len; f->ti_flags = csum_flags; #if NVLAN > 0 if (ifv != NULL) { f->ti_flags |= TI_BDFLAG_VLAN_TAG; f->ti_vlan_tag = ifv->ifv_tag; } else { f->ti_vlan_tag = 0; } #endif /* * Sanity check: avoid coming within 16 descriptors * of the end of the ring. */ if ((TI_TX_RING_CNT - (sc->ti_txcnt + cnt)) < 16) return(ENOBUFS); cur = frag; TI_INC(frag, TI_TX_RING_CNT); cnt++; } } if (m != NULL) return(ENOBUFS); if (frag == sc->ti_tx_saved_considx) return(ENOBUFS); if (sc->ti_hwrev == TI_HWREV_TIGON) sc->ti_rdata->ti_tx_ring_nic[cur % 128].ti_flags |= TI_BDFLAG_END; else sc->ti_rdata->ti_tx_ring[cur].ti_flags |= TI_BDFLAG_END; sc->ti_cdata.ti_tx_chain[cur] = m_head; sc->ti_txcnt += cnt; *txidx = frag; return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit descriptors. */ static void ti_start(ifp) struct ifnet *ifp; { struct ti_softc *sc; struct mbuf *m_head = NULL; u_int32_t prodidx = 0; sc = ifp->if_softc; TI_LOCK(sc); prodidx = CSR_READ_4(sc, TI_MB_SENDPROD_IDX); while(sc->ti_cdata.ti_tx_chain[prodidx] == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * XXX * safety overkill. If this is a fragmented packet chain * with delayed TCP/UDP checksums, then only encapsulate * it if we have enough descriptors to handle the entire * chain at once. * (paranoia -- may not actually be needed) */ if (m_head->m_flags & M_FIRSTFRAG && m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { if ((TI_TX_RING_CNT - sc->ti_txcnt) < m_head->m_pkthdr.csum_data + 16) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } } /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (ti_encap(sc, m_head, &prodidx)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if (ifp->if_bpf) bpf_mtap(ifp, m_head); } /* Transmit */ CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, prodidx); /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; TI_UNLOCK(sc); return; } static void ti_init(xsc) void *xsc; { struct ti_softc *sc = xsc; /* Cancel pending I/O and flush buffers. */ ti_stop(sc); TI_LOCK(sc); /* Init the gen info block, ring control blocks and firmware. */ if (ti_gibinit(sc)) { printf("ti%d: initialization failure\n", sc->ti_unit); TI_UNLOCK(sc); return; } TI_UNLOCK(sc); return; } static void ti_init2(sc) struct ti_softc *sc; { struct ti_cmd_desc cmd; struct ifnet *ifp; u_int16_t *m; struct ifmedia *ifm; int tmp; ifp = &sc->arpcom.ac_if; /* Specify MTU and interface index. */ CSR_WRITE_4(sc, TI_GCR_IFINDEX, ifp->if_unit); CSR_WRITE_4(sc, TI_GCR_IFMTU, ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN); TI_DO_CMD(TI_CMD_UPDATE_GENCOM, 0, 0); /* Load our MAC address. */ m = (u_int16_t *)&sc->arpcom.ac_enaddr[0]; CSR_WRITE_4(sc, TI_GCR_PAR0, htons(m[0])); CSR_WRITE_4(sc, TI_GCR_PAR1, (htons(m[1]) << 16) | htons(m[2])); TI_DO_CMD(TI_CMD_SET_MAC_ADDR, 0, 0); /* Enable or disable promiscuous mode as needed. */ if (ifp->if_flags & IFF_PROMISC) { TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_ENB, 0); } else { TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_DIS, 0); } /* Program multicast filter. */ ti_setmulti(sc); /* * If this is a Tigon 1, we should tell the * firmware to use software packet filtering. */ if (sc->ti_hwrev == TI_HWREV_TIGON) { TI_DO_CMD(TI_CMD_FDR_FILTERING, TI_CMD_CODE_FILT_ENB, 0); } /* Init RX ring. */ ti_init_rx_ring_std(sc); /* Init jumbo RX ring. */ if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) ti_init_rx_ring_jumbo(sc); /* * If this is a Tigon 2, we can also configure the * mini ring. */ if (sc->ti_hwrev == TI_HWREV_TIGON_II) ti_init_rx_ring_mini(sc); CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 0); sc->ti_rx_saved_considx = 0; /* Init TX ring. */ ti_init_tx_ring(sc); /* Tell firmware we're alive. */ TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_UP, 0); /* Enable host interrupts. */ CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; /* * Make sure to set media properly. We have to do this * here since we have to issue commands in order to set * the link negotiation and we can't issue commands until * the firmware is running. */ ifm = &sc->ifmedia; tmp = ifm->ifm_media; ifm->ifm_media = ifm->ifm_cur->ifm_media; ti_ifmedia_upd(ifp); ifm->ifm_media = tmp; return; } /* * Set media options. */ static int ti_ifmedia_upd(ifp) struct ifnet *ifp; { struct ti_softc *sc; struct ifmedia *ifm; struct ti_cmd_desc cmd; sc = ifp->if_softc; ifm = &sc->ifmedia; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return(EINVAL); switch(IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB| TI_GLNK_FULL_DUPLEX|TI_GLNK_RX_FLOWCTL_Y| TI_GLNK_AUTONEGENB|TI_GLNK_ENB); CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_100MB|TI_LNK_10MB| TI_LNK_FULL_DUPLEX|TI_LNK_HALF_DUPLEX| TI_LNK_AUTONEGENB|TI_LNK_ENB); TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, TI_CMD_CODE_NEGOTIATE_BOTH, 0); break; case IFM_1000_SX: case IFM_1000_TX: CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB| TI_GLNK_RX_FLOWCTL_Y|TI_GLNK_ENB); CSR_WRITE_4(sc, TI_GCR_LINK, 0); if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { TI_SETBIT(sc, TI_GCR_GLINK, TI_GLNK_FULL_DUPLEX); } TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, TI_CMD_CODE_NEGOTIATE_GIGABIT, 0); break; case IFM_100_FX: case IFM_10_FL: case IFM_100_TX: case IFM_10_T: CSR_WRITE_4(sc, TI_GCR_GLINK, 0); CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_ENB|TI_LNK_PREF); if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_FX || IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_100MB); } else { TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_10MB); } if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_FULL_DUPLEX); } else { TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_HALF_DUPLEX); } TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, TI_CMD_CODE_NEGOTIATE_10_100, 0); break; } return(0); } /* * Report current media status. */ static void ti_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct ti_softc *sc; u_int32_t media = 0; sc = ifp->if_softc; ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) return; ifmr->ifm_status |= IFM_ACTIVE; if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) { media = CSR_READ_4(sc, TI_GCR_GLINK_STAT); if (sc->ti_copper) ifmr->ifm_active |= IFM_1000_TX; else ifmr->ifm_active |= IFM_1000_SX; if (media & TI_GLNK_FULL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } else if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) { media = CSR_READ_4(sc, TI_GCR_LINK_STAT); if (sc->ti_copper) { if (media & TI_LNK_100MB) ifmr->ifm_active |= IFM_100_TX; if (media & TI_LNK_10MB) ifmr->ifm_active |= IFM_10_T; } else { if (media & TI_LNK_100MB) ifmr->ifm_active |= IFM_100_FX; if (media & TI_LNK_10MB) ifmr->ifm_active |= IFM_10_FL; } if (media & TI_LNK_FULL_DUPLEX) ifmr->ifm_active |= IFM_FDX; if (media & TI_LNK_HALF_DUPLEX) ifmr->ifm_active |= IFM_HDX; } return; } static int ti_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct ti_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int error = 0; struct ti_cmd_desc cmd; TI_LOCK(sc); switch(command) { case SIOCSIFADDR: case SIOCGIFADDR: error = ether_ioctl(ifp, command, data); break; case SIOCSIFMTU: if (ifr->ifr_mtu > TI_JUMBO_MTU) error = EINVAL; else { ifp->if_mtu = ifr->ifr_mtu; ti_init(sc); } break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { /* * If only the state of the PROMISC flag changed, * then just use the 'set promisc mode' command * instead of reinitializing the entire NIC. Doing * a full re-init means reloading the firmware and * waiting for it to start up, which may take a * second or two. */ if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->ti_if_flags & IFF_PROMISC)) { TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_ENB, 0); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->ti_if_flags & IFF_PROMISC) { TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_DIS, 0); } else ti_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) { ti_stop(sc); } } sc->ti_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: if (ifp->if_flags & IFF_RUNNING) { ti_setmulti(sc); error = 0; } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); break; default: error = EINVAL; break; } TI_UNLOCK(sc); return(error); } static void ti_watchdog(ifp) struct ifnet *ifp; { struct ti_softc *sc; sc = ifp->if_softc; TI_LOCK(sc); printf("ti%d: watchdog timeout -- resetting\n", sc->ti_unit); ti_stop(sc); ti_init(sc); ifp->if_oerrors++; TI_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void ti_stop(sc) struct ti_softc *sc; { struct ifnet *ifp; struct ti_cmd_desc cmd; TI_LOCK(sc); ifp = &sc->arpcom.ac_if; /* Disable host interrupts. */ CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); /* * Tell firmware we're shutting down. */ TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_DOWN, 0); /* Halt and reinitialize. */ ti_chipinit(sc); ti_mem(sc, 0x2000, 0x100000 - 0x2000, NULL); ti_chipinit(sc); /* Free the RX lists. */ ti_free_rx_ring_std(sc); /* Free jumbo RX list. */ ti_free_rx_ring_jumbo(sc); /* Free mini RX list. */ ti_free_rx_ring_mini(sc); /* Free TX buffers. */ ti_free_tx_ring(sc); sc->ti_ev_prodidx.ti_idx = 0; sc->ti_return_prodidx.ti_idx = 0; sc->ti_tx_considx.ti_idx = 0; sc->ti_tx_saved_considx = TI_TXCONS_UNSET; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); TI_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void ti_shutdown(dev) device_t dev; { struct ti_softc *sc; sc = device_get_softc(dev); TI_LOCK(sc); ti_chipinit(sc); TI_UNLOCK(sc); return; } Index: head/sys/dev/usb/if_aue.c =================================================================== --- head/sys/dev/usb/if_aue.c (revision 71961) +++ head/sys/dev/usb/if_aue.c (revision 71962) @@ -1,1571 +1,1570 @@ /* * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * ADMtek AN986 Pegasus USB to ethernet driver. Datasheet is available * from http://www.admtek.com.tw. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The Pegasus chip uses four USB "endpoints" to provide 10/100 ethernet * support: the control endpoint for reading/writing registers, burst * read endpoint for packet reception, burst write for packet transmission * and one for "interrupts." The chip uses the same RX filter scheme * as the other ADMtek ethernet parts: one perfect filter entry for the * the station address and a 64-bit multicast hash table. The chip supports * both MII and HomePNA attachments. * * Since the maximum data transfer speed of USB is supposed to be 12Mbps, * you're never really going to get 100Mbps speeds from this device. I * think the idea is to allow the device to connect to 10 or 100Mbps * networks, not necessarily to provide 100Mbps performance. Also, since * the controller uses an external PHY chip, it's possible that board * designers might simply choose a 10Mbps PHY. * * Registers are accessed using usbd_do_request(). Packet transfers are * done using usbd_transfer() and friends. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(aue, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif /* * Various supported device vendors/products. */ Static struct aue_type aue_devs[] = { { USB_VENDOR_ADMTEK, USB_PRODUCT_ADMTEK_PEGASUS }, { USB_VENDOR_BILLIONTON, USB_PRODUCT_BILLIONTON_USB100 }, { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_LUATX1 }, { USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650 }, { USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650TX }, { USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650TX_PNA }, { USB_VENDOR_SMC, USB_PRODUCT_SMC_2202USB }, { USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_USB100TX }, { USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_USB10TA }, { USB_VENDOR_COREGA, USB_PRODUCT_COREGA_FETHER_USB_TX }, { USB_VENDOR_KINGSTON, USB_PRODUCT_KINGSTON_KNU101TX, }, { 0, 0 } }; Static struct usb_qdat aue_qdat; Static int aue_match __P((device_t)); Static int aue_attach __P((device_t)); Static int aue_detach __P((device_t)); Static int aue_tx_list_init __P((struct aue_softc *)); Static int aue_rx_list_init __P((struct aue_softc *)); Static int aue_newbuf __P((struct aue_softc *, struct aue_chain *, struct mbuf *)); Static int aue_encap __P((struct aue_softc *, struct mbuf *, int)); #ifdef AUE_INTR_PIPE Static void aue_intr __P((usbd_xfer_handle, usbd_private_handle, usbd_status)); #endif Static void aue_rxeof __P((usbd_xfer_handle, usbd_private_handle, usbd_status)); Static void aue_txeof __P((usbd_xfer_handle, usbd_private_handle, usbd_status)); Static void aue_tick __P((void *)); Static void aue_rxstart __P((struct ifnet *)); Static void aue_start __P((struct ifnet *)); Static int aue_ioctl __P((struct ifnet *, u_long, caddr_t)); Static void aue_init __P((void *)); Static void aue_stop __P((struct aue_softc *)); Static void aue_watchdog __P((struct ifnet *)); Static void aue_shutdown __P((device_t)); Static int aue_ifmedia_upd __P((struct ifnet *)); Static void aue_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); Static void aue_eeprom_getword __P((struct aue_softc *, int, u_int16_t *)); Static void aue_read_eeprom __P((struct aue_softc *, caddr_t, int, int, int)); Static int aue_miibus_readreg __P((device_t, int, int)); Static int aue_miibus_writereg __P((device_t, int, int, int)); Static void aue_miibus_statchg __P((device_t)); Static void aue_setmulti __P((struct aue_softc *)); Static u_int32_t aue_crc __P((caddr_t)); Static void aue_reset __P((struct aue_softc *)); Static int csr_read_1 __P((struct aue_softc *, int)); Static int csr_write_1 __P((struct aue_softc *, int, int)); Static int csr_read_2 __P((struct aue_softc *, int)); Static int csr_write_2 __P((struct aue_softc *, int, int)); Static device_method_t aue_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aue_match), DEVMETHOD(device_attach, aue_attach), DEVMETHOD(device_detach, aue_detach), DEVMETHOD(device_shutdown, aue_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, aue_miibus_readreg), DEVMETHOD(miibus_writereg, aue_miibus_writereg), DEVMETHOD(miibus_statchg, aue_miibus_statchg), { 0, 0 } }; Static driver_t aue_driver = { "aue", aue_methods, sizeof(struct aue_softc) }; Static devclass_t aue_devclass; DRIVER_MODULE(if_aue, uhub, aue_driver, aue_devclass, usbd_driver_load, 0); DRIVER_MODULE(miibus, aue, miibus_driver, miibus_devclass, 0, 0); #define AUE_SETBIT(sc, reg, x) \ csr_write_1(sc, reg, csr_read_1(sc, reg) | (x)) #define AUE_CLRBIT(sc, reg, x) \ csr_write_1(sc, reg, csr_read_1(sc, reg) & ~(x)) Static int csr_read_1(sc, reg) struct aue_softc *sc; int reg; { usb_device_request_t req; usbd_status err; u_int8_t val = 0; if (sc->aue_gone) return(0); AUE_LOCK(sc); req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = AUE_UR_READREG; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, 1); err = usbd_do_request_flags(sc->aue_udev, &req, &val, USBD_NO_TSLEEP, NULL); AUE_UNLOCK(sc); if (err) return(0); return(val); } Static int csr_read_2(sc, reg) struct aue_softc *sc; int reg; { usb_device_request_t req; usbd_status err; u_int16_t val = 0; if (sc->aue_gone) return(0); AUE_LOCK(sc); req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = AUE_UR_READREG; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, 2); err = usbd_do_request_flags(sc->aue_udev, &req, &val, USBD_NO_TSLEEP, NULL); AUE_UNLOCK(sc); if (err) return(0); return(val); } Static int csr_write_1(sc, reg, val) struct aue_softc *sc; int reg, val; { usb_device_request_t req; usbd_status err; if (sc->aue_gone) return(0); AUE_LOCK(sc); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = AUE_UR_WRITEREG; USETW(req.wValue, val); USETW(req.wIndex, reg); USETW(req.wLength, 1); err = usbd_do_request_flags(sc->aue_udev, &req, &val, USBD_NO_TSLEEP, NULL); AUE_UNLOCK(sc); if (err) return(-1); return(0); } Static int csr_write_2(sc, reg, val) struct aue_softc *sc; int reg, val; { usb_device_request_t req; usbd_status err; if (sc->aue_gone) return(0); AUE_LOCK(sc); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = AUE_UR_WRITEREG; USETW(req.wValue, val); USETW(req.wIndex, reg); USETW(req.wLength, 2); err = usbd_do_request_flags(sc->aue_udev, &req, &val, USBD_NO_TSLEEP, NULL); AUE_UNLOCK(sc); if (err) return(-1); return(0); } /* * Read a word of data stored in the EEPROM at address 'addr.' */ Static void aue_eeprom_getword(sc, addr, dest) struct aue_softc *sc; int addr; u_int16_t *dest; { register int i; u_int16_t word = 0; csr_write_1(sc, AUE_EE_REG, addr); csr_write_1(sc, AUE_EE_CTL, AUE_EECTL_READ); for (i = 0; i < AUE_TIMEOUT; i++) { if (csr_read_1(sc, AUE_EE_CTL) & AUE_EECTL_DONE) break; } if (i == AUE_TIMEOUT) { printf("aue%d: EEPROM read timed out\n", sc->aue_unit); } word = csr_read_2(sc, AUE_EE_DATA); *dest = word; return; } /* * Read a sequence of words from the EEPROM. */ Static void aue_read_eeprom(sc, dest, off, cnt, swap) struct aue_softc *sc; caddr_t dest; int off; int cnt; int swap; { int i; u_int16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { aue_eeprom_getword(sc, off + i, &word); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } return; } Static int aue_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct aue_softc *sc; int i; u_int16_t val = 0; sc = device_get_softc(dev); /* * The Am79C901 HomePNA PHY actually contains * two transceivers: a 1Mbps HomePNA PHY and a * 10Mbps full/half duplex ethernet PHY with * NWAY autoneg. However in the ADMtek adapter, * only the 1Mbps PHY is actually connected to * anything, so we ignore the 10Mbps one. It * happens to be configured for MII address 3, * so we filter that out. */ if (sc->aue_info->aue_vid == USB_VENDOR_ADMTEK && sc->aue_info->aue_did == USB_PRODUCT_ADMTEK_PEGASUS) { if (phy == 3) return(0); #ifdef notdef if (phy != 1) return(0); #endif } csr_write_1(sc, AUE_PHY_ADDR, phy); csr_write_1(sc, AUE_PHY_CTL, reg|AUE_PHYCTL_READ); for (i = 0; i < AUE_TIMEOUT; i++) { if (csr_read_1(sc, AUE_PHY_CTL) & AUE_PHYCTL_DONE) break; } if (i == AUE_TIMEOUT) { printf("aue%d: MII read timed out\n", sc->aue_unit); } val = csr_read_2(sc, AUE_PHY_DATA); return(val); } Static int aue_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct aue_softc *sc; int i; if (phy == 3) return(0); sc = device_get_softc(dev); csr_write_2(sc, AUE_PHY_DATA, data); csr_write_1(sc, AUE_PHY_ADDR, phy); csr_write_1(sc, AUE_PHY_CTL, reg|AUE_PHYCTL_WRITE); for (i = 0; i < AUE_TIMEOUT; i++) { if (csr_read_1(sc, AUE_PHY_CTL) & AUE_PHYCTL_DONE) break; } if (i == AUE_TIMEOUT) { printf("aue%d: MII read timed out\n", sc->aue_unit); } return(0); } Static void aue_miibus_statchg(dev) device_t dev; { struct aue_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->aue_miibus); AUE_CLRBIT(sc, AUE_CTL0, AUE_CTL0_RX_ENB|AUE_CTL0_TX_ENB); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) { AUE_SETBIT(sc, AUE_CTL1, AUE_CTL1_SPEEDSEL); } else { AUE_CLRBIT(sc, AUE_CTL1, AUE_CTL1_SPEEDSEL); } if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { AUE_SETBIT(sc, AUE_CTL1, AUE_CTL1_DUPLEX); } else { AUE_CLRBIT(sc, AUE_CTL1, AUE_CTL1_DUPLEX); } AUE_SETBIT(sc, AUE_CTL0, AUE_CTL0_RX_ENB|AUE_CTL0_TX_ENB); /* * Set the LED modes on the LinkSys adapter. * This turns on the 'dual link LED' bin in the auxmode * register of the Broadcom PHY. */ if ((sc->aue_info->aue_vid == USB_VENDOR_LINKSYS && sc->aue_info->aue_did == USB_PRODUCT_LINKSYS_USB100TX) || (sc->aue_info->aue_vid == USB_VENDOR_LINKSYS && sc->aue_info->aue_did == USB_PRODUCT_LINKSYS_USB10TA) || (sc->aue_info->aue_vid == USB_VENDOR_DLINK && sc->aue_info->aue_did == USB_PRODUCT_DLINK_DSB650TX) || (sc->aue_info->aue_vid == USB_VENDOR_DLINK && sc->aue_info->aue_did == USB_PRODUCT_DLINK_DSB650)) { u_int16_t auxmode; auxmode = aue_miibus_readreg(dev, 0, 0x1b); aue_miibus_writereg(dev, 0, 0x1b, auxmode | 0x04); } return; } #define AUE_POLY 0xEDB88320 #define AUE_BITS 6 Static u_int32_t aue_crc(addr) caddr_t addr; { u_int32_t idx, bit, data, crc; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (idx = 0; idx < 6; idx++) { for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) crc = (crc >> 1) ^ (((crc ^ data) & 1) ? AUE_POLY : 0); } return (crc & ((1 << AUE_BITS) - 1)); } Static void aue_setmulti(sc) struct aue_softc *sc; { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int32_t h = 0, i; ifp = &sc->arpcom.ac_if; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { AUE_SETBIT(sc, AUE_CTL0, AUE_CTL0_ALLMULTI); return; } AUE_CLRBIT(sc, AUE_CTL0, AUE_CTL0_ALLMULTI); /* first, zot all the existing hash bits */ for (i = 0; i < 8; i++) csr_write_1(sc, AUE_MAR0 + i, 0); /* now program new ones */ - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = aue_crc(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); AUE_SETBIT(sc, AUE_MAR + (h >> 3), 1 << (h & 0x7)); } return; } Static void aue_reset(sc) struct aue_softc *sc; { register int i; AUE_SETBIT(sc, AUE_CTL1, AUE_CTL1_RESETMAC); for (i = 0; i < AUE_TIMEOUT; i++) { if (!(csr_read_1(sc, AUE_CTL1) & AUE_CTL1_RESETMAC)) break; } if (i == AUE_TIMEOUT) printf("aue%d: reset failed\n", sc->aue_unit); /* * The PHY(s) attached to the Pegasus chip may be held * in reset until we flip on the GPIO outputs. Make sure * to set the GPIO pins high so that the PHY(s) will * be enabled. * * Note: We force all of the GPIO pins low first, *then* * enable the ones we want. */ csr_write_1(sc, AUE_GPIO0, AUE_GPIO_OUT0|AUE_GPIO_SEL0); csr_write_1(sc, AUE_GPIO0, AUE_GPIO_OUT0|AUE_GPIO_SEL0|AUE_GPIO_SEL1); /* Grrr. LinkSys has to be different from everyone else. */ if ((sc->aue_info->aue_vid == USB_VENDOR_LINKSYS && sc->aue_info->aue_did == USB_PRODUCT_LINKSYS_USB100TX) || (sc->aue_info->aue_vid == USB_VENDOR_LINKSYS && sc->aue_info->aue_did == USB_PRODUCT_LINKSYS_USB10TA) || (sc->aue_info->aue_vid == USB_VENDOR_DLINK && sc->aue_info->aue_did == USB_PRODUCT_DLINK_DSB650TX) || (sc->aue_info->aue_vid == USB_VENDOR_DLINK && sc->aue_info->aue_did == USB_PRODUCT_DLINK_DSB650)) { csr_write_1(sc, AUE_GPIO0, AUE_GPIO_SEL0|AUE_GPIO_SEL1); csr_write_1(sc, AUE_GPIO0, AUE_GPIO_SEL0|AUE_GPIO_SEL1| AUE_GPIO_OUT0); } /* Wait a little while for the chip to get its brains in order. */ DELAY(10000); return; } /* * Probe for a Pegasus chip. */ USB_MATCH(aue) { USB_MATCH_START(aue, uaa); struct aue_type *t; if (!uaa->iface) return(UMATCH_NONE); t = aue_devs; while(t->aue_vid) { if (uaa->vendor == t->aue_vid && uaa->product == t->aue_did) { return(UMATCH_VENDOR_PRODUCT); } t++; } return(UMATCH_NONE); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ USB_ATTACH(aue) { USB_ATTACH_START(aue, sc, uaa); char devinfo[1024]; u_char eaddr[ETHER_ADDR_LEN]; struct ifnet *ifp; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; int i; struct aue_type *t; bzero(sc, sizeof(struct aue_softc)); sc->aue_iface = uaa->iface; sc->aue_udev = uaa->device; sc->aue_unit = device_get_unit(self); if (usbd_set_config_no(sc->aue_udev, AUE_CONFIG_NO, 0)) { printf("aue%d: getting interface handle failed\n", sc->aue_unit); USB_ATTACH_ERROR_RETURN; } t = aue_devs; while(t->aue_vid) { if (uaa->vendor == t->aue_vid && uaa->product == t->aue_did) { sc->aue_info = t; break; } t++; } id = usbd_get_interface_descriptor(uaa->iface); usbd_devinfo(uaa->device, 0, devinfo); device_set_desc_copy(self, devinfo); printf("%s: %s\n", USBDEVNAME(self), devinfo); /* Find endpoints. */ for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(uaa->iface, i); if (!ed) { printf("aue%d: couldn't get ep %d\n", sc->aue_unit, i); USB_ATTACH_ERROR_RETURN; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && (ed->bmAttributes & UE_XFERTYPE) == UE_BULK) { sc->aue_ed[AUE_ENDPT_RX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && (ed->bmAttributes & UE_XFERTYPE) == UE_BULK) { sc->aue_ed[AUE_ENDPT_TX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && (ed->bmAttributes & UE_XFERTYPE) == UE_INTERRUPT) { sc->aue_ed[AUE_ENDPT_INTR] = ed->bEndpointAddress; } } mtx_init(&sc->aue_mtx, device_get_nameunit(self), MTX_DEF | MTX_RECURSE); AUE_LOCK(sc); /* Reset the adapter. */ aue_reset(sc); /* * Get station address from the EEPROM. */ aue_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 0); /* * A Pegasus chip was detected. Inform the world. */ printf("aue%d: Ethernet address: %6D\n", sc->aue_unit, eaddr, ":"); bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; ifp->if_unit = sc->aue_unit; ifp->if_name = "aue"; ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = aue_ioctl; ifp->if_output = ether_output; ifp->if_start = aue_start; ifp->if_watchdog = aue_watchdog; ifp->if_init = aue_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; /* * Do MII setup. * NOTE: Doing this causes child devices to be attached to us, * which we would normally disconnect at in the detach routine * using device_delete_child(). However the USB code is set up * such that when this driver is removed, all children devices * are removed as well. In effect, the USB code ends up detaching * all of our children for us, so we don't have to do is ourselves * in aue_detach(). It's important to point this out since if * we *do* try to detach the child devices ourselves, we will * end up getting the children deleted twice, which will crash * the system. */ if (mii_phy_probe(self, &sc->aue_miibus, aue_ifmedia_upd, aue_ifmedia_sts)) { printf("aue%d: MII without any PHY!\n", sc->aue_unit); AUE_UNLOCK(sc); mtx_destroy(&sc->aue_mtx); USB_ATTACH_ERROR_RETURN; } aue_qdat.ifp = ifp; aue_qdat.if_rxstart = aue_rxstart; /* * Call MI attach routine. */ ether_ifattach(ifp, ETHER_BPF_SUPPORTED); callout_handle_init(&sc->aue_stat_ch); usb_register_netisr(); sc->aue_gone = 0; AUE_UNLOCK(sc); USB_ATTACH_SUCCESS_RETURN; } Static int aue_detach(dev) device_t dev; { struct aue_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); AUE_LOCK(sc); ifp = &sc->arpcom.ac_if; sc->aue_gone = 1; untimeout(aue_tick, sc, sc->aue_stat_ch); ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); if (sc->aue_ep[AUE_ENDPT_TX] != NULL) usbd_abort_pipe(sc->aue_ep[AUE_ENDPT_TX]); if (sc->aue_ep[AUE_ENDPT_RX] != NULL) usbd_abort_pipe(sc->aue_ep[AUE_ENDPT_RX]); #ifdef AUE_INTR_PIPE if (sc->aue_ep[AUE_ENDPT_INTR] != NULL) usbd_abort_pipe(sc->aue_ep[AUE_ENDPT_INTR]); #endif AUE_UNLOCK(sc); mtx_destroy(&sc->aue_mtx); return(0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ Static int aue_newbuf(sc, c, m) struct aue_softc *sc; struct aue_chain *c; struct mbuf *m; { struct mbuf *m_new = NULL; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("aue%d: no memory for rx list " "-- packet dropped!\n", sc->aue_unit); return(ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("aue%d: no memory for rx list " "-- packet dropped!\n", sc->aue_unit); m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, ETHER_ALIGN); c->aue_mbuf = m_new; return(0); } Static int aue_rx_list_init(sc) struct aue_softc *sc; { struct aue_cdata *cd; struct aue_chain *c; int i; cd = &sc->aue_cdata; for (i = 0; i < AUE_RX_LIST_CNT; i++) { c = &cd->aue_rx_chain[i]; c->aue_sc = sc; c->aue_idx = i; if (aue_newbuf(sc, c, NULL) == ENOBUFS) return(ENOBUFS); if (c->aue_xfer == NULL) { c->aue_xfer = usbd_alloc_xfer(sc->aue_udev); if (c->aue_xfer == NULL) return(ENOBUFS); } } return(0); } Static int aue_tx_list_init(sc) struct aue_softc *sc; { struct aue_cdata *cd; struct aue_chain *c; int i; cd = &sc->aue_cdata; for (i = 0; i < AUE_TX_LIST_CNT; i++) { c = &cd->aue_tx_chain[i]; c->aue_sc = sc; c->aue_idx = i; c->aue_mbuf = NULL; if (c->aue_xfer == NULL) { c->aue_xfer = usbd_alloc_xfer(sc->aue_udev); if (c->aue_xfer == NULL) return(ENOBUFS); } c->aue_buf = malloc(AUE_BUFSZ, M_USBDEV, M_NOWAIT); if (c->aue_buf == NULL) return(ENOBUFS); } return(0); } #ifdef AUE_INTR_PIPE Static void aue_intr(xfer, priv, status) usbd_xfer_handle xfer; usbd_private_handle priv; usbd_status status; { struct aue_softc *sc; struct ifnet *ifp; struct aue_intrpkt *p; sc = priv; AUE_LOCK(sc); ifp = &sc->arpcom.ac_if; if (!(ifp->if_flags & IFF_RUNNING)) { AUE_UNLOCK(sc); return; } if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { AUE_UNLOCK(sc); return; } printf("aue%d: usb error on intr: %s\n", sc->aue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->aue_ep[AUE_ENDPT_RX]); AUE_UNLOCK(sc); return; } usbd_get_xfer_status(xfer, NULL, (void **)&p, NULL, NULL); if (p->aue_txstat0) ifp->if_oerrors++; if (p->aue_txstat0 & (AUE_TXSTAT0_LATECOLL & AUE_TXSTAT0_EXCESSCOLL)) ifp->if_collisions++; AUE_UNLOCK(sc); return; } #endif Static void aue_rxstart(ifp) struct ifnet *ifp; { struct aue_softc *sc; struct aue_chain *c; sc = ifp->if_softc; AUE_LOCK(sc); c = &sc->aue_cdata.aue_rx_chain[sc->aue_cdata.aue_rx_prod]; if (aue_newbuf(sc, c, NULL) == ENOBUFS) { ifp->if_ierrors++; AUE_UNLOCK(sc); return; } /* Setup new transfer. */ usbd_setup_xfer(c->aue_xfer, sc->aue_ep[AUE_ENDPT_RX], c, mtod(c->aue_mbuf, char *), AUE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, aue_rxeof); usbd_transfer(c->aue_xfer); AUE_UNLOCK(sc); return; } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ Static void aue_rxeof(xfer, priv, status) usbd_xfer_handle xfer; usbd_private_handle priv; usbd_status status; { struct aue_softc *sc; struct aue_chain *c; struct mbuf *m; struct ifnet *ifp; int total_len = 0; struct aue_rxpkt r; c = priv; sc = c->aue_sc; if (sc->aue_gone) return; AUE_LOCK(sc); ifp = &sc->arpcom.ac_if; if (!(ifp->if_flags & IFF_RUNNING)) { AUE_UNLOCK(sc); return; } if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { AUE_UNLOCK(sc); return; } printf("aue%d: usb error on rx: %s\n", sc->aue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->aue_ep[AUE_ENDPT_RX]); goto done; } usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL); if (total_len <= 4 + ETHER_CRC_LEN) { ifp->if_ierrors++; goto done; } m = c->aue_mbuf; bcopy(mtod(m, char *) + total_len - 4, (char *)&r, sizeof(r)); /* Turn off all the non-error bits in the rx status word. */ r.aue_rxstat &= AUE_RXSTAT_MASK; if (r.aue_rxstat) { ifp->if_ierrors++; goto done; } /* No errors; receive the packet. */ total_len -= (4 + ETHER_CRC_LEN); ifp->if_ipackets++; m->m_pkthdr.rcvif = (struct ifnet *)&aue_qdat; m->m_pkthdr.len = m->m_len = total_len; /* Put the packet on the special USB input queue. */ usb_ether_input(m); AUE_UNLOCK(sc); return; done: /* Setup new transfer. */ usbd_setup_xfer(xfer, sc->aue_ep[AUE_ENDPT_RX], c, mtod(c->aue_mbuf, char *), AUE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, aue_rxeof); usbd_transfer(xfer); AUE_UNLOCK(sc); return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ Static void aue_txeof(xfer, priv, status) usbd_xfer_handle xfer; usbd_private_handle priv; usbd_status status; { struct aue_softc *sc; struct aue_chain *c; struct ifnet *ifp; usbd_status err; c = priv; sc = c->aue_sc; AUE_LOCK(sc); ifp = &sc->arpcom.ac_if; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { AUE_UNLOCK(sc); return; } printf("aue%d: usb error on tx: %s\n", sc->aue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->aue_ep[AUE_ENDPT_TX]); AUE_UNLOCK(sc); return; } ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; usbd_get_xfer_status(c->aue_xfer, NULL, NULL, NULL, &err); if (c->aue_mbuf != NULL) { c->aue_mbuf->m_pkthdr.rcvif = ifp; usb_tx_done(c->aue_mbuf); c->aue_mbuf = NULL; } if (err) ifp->if_oerrors++; else ifp->if_opackets++; AUE_UNLOCK(sc); return; } Static void aue_tick(xsc) void *xsc; { struct aue_softc *sc; struct ifnet *ifp; struct mii_data *mii; sc = xsc; if (sc == NULL) return; AUE_LOCK(sc); ifp = &sc->arpcom.ac_if; mii = device_get_softc(sc->aue_miibus); if (mii == NULL) { AUE_UNLOCK(sc); return; } mii_tick(mii); if (!sc->aue_link) { mii_pollstat(mii); if (mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) sc->aue_link++; if (ifp->if_snd.ifq_head != NULL) aue_start(ifp); } sc->aue_stat_ch = timeout(aue_tick, sc, hz); AUE_UNLOCK(sc); return; } Static int aue_encap(sc, m, idx) struct aue_softc *sc; struct mbuf *m; int idx; { int total_len; struct aue_chain *c; usbd_status err; c = &sc->aue_cdata.aue_tx_chain[idx]; /* * Copy the mbuf data into a contiguous buffer, leaving two * bytes at the beginning to hold the frame length. */ m_copydata(m, 0, m->m_pkthdr.len, c->aue_buf + 2); c->aue_mbuf = m; total_len = m->m_pkthdr.len + 2; /* * The ADMtek documentation says that the packet length is * supposed to be specified in the first two bytes of the * transfer, however it actually seems to ignore this info * and base the frame size on the bulk transfer length. */ c->aue_buf[0] = (u_int8_t)m->m_pkthdr.len; c->aue_buf[1] = (u_int8_t)(m->m_pkthdr.len >> 8); usbd_setup_xfer(c->aue_xfer, sc->aue_ep[AUE_ENDPT_TX], c, c->aue_buf, total_len, USBD_FORCE_SHORT_XFER, 10000, aue_txeof); /* Transmit */ err = usbd_transfer(c->aue_xfer); if (err != USBD_IN_PROGRESS) { aue_stop(sc); return(EIO); } sc->aue_cdata.aue_tx_cnt++; return(0); } Static void aue_start(ifp) struct ifnet *ifp; { struct aue_softc *sc; struct mbuf *m_head = NULL; sc = ifp->if_softc; AUE_LOCK(sc); if (!sc->aue_link) { AUE_UNLOCK(sc); return; } if (ifp->if_flags & IFF_OACTIVE) { AUE_UNLOCK(sc); return; } IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) { AUE_UNLOCK(sc); return; } if (aue_encap(sc, m_head, 0)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; AUE_UNLOCK(sc); return; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if (ifp->if_bpf) bpf_mtap(ifp, m_head); ifp->if_flags |= IFF_OACTIVE; /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; AUE_UNLOCK(sc); return; } Static void aue_init(xsc) void *xsc; { struct aue_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; struct mii_data *mii; struct aue_chain *c; usbd_status err; int i; AUE_LOCK(sc); if (ifp->if_flags & IFF_RUNNING) { AUE_UNLOCK(sc); return; } /* * Cancel pending I/O and free all RX/TX buffers. */ aue_reset(sc); mii = device_get_softc(sc->aue_miibus); /* Set MAC address */ for (i = 0; i < ETHER_ADDR_LEN; i++) csr_write_1(sc, AUE_PAR0 + i, sc->arpcom.ac_enaddr[i]); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { AUE_SETBIT(sc, AUE_CTL2, AUE_CTL2_RX_PROMISC); } else { AUE_CLRBIT(sc, AUE_CTL2, AUE_CTL2_RX_PROMISC); } /* Init TX ring. */ if (aue_tx_list_init(sc) == ENOBUFS) { printf("aue%d: tx list init failed\n", sc->aue_unit); AUE_UNLOCK(sc); return; } /* Init RX ring. */ if (aue_rx_list_init(sc) == ENOBUFS) { printf("aue%d: rx list init failed\n", sc->aue_unit); AUE_UNLOCK(sc); return; } #ifdef AUE_INTR_PIPE sc->aue_cdata.aue_ibuf = malloc(AUE_INTR_PKTLEN, M_USBDEV, M_NOWAIT); #endif /* Load the multicast filter. */ aue_setmulti(sc); /* Enable RX and TX */ csr_write_1(sc, AUE_CTL0, AUE_CTL0_RXSTAT_APPEND|AUE_CTL0_RX_ENB); AUE_SETBIT(sc, AUE_CTL0, AUE_CTL0_TX_ENB); AUE_SETBIT(sc, AUE_CTL2, AUE_CTL2_EP3_CLR); mii_mediachg(mii); /* Open RX and TX pipes. */ err = usbd_open_pipe(sc->aue_iface, sc->aue_ed[AUE_ENDPT_RX], USBD_EXCLUSIVE_USE, &sc->aue_ep[AUE_ENDPT_RX]); if (err) { printf("aue%d: open rx pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); AUE_UNLOCK(sc); return; } err = usbd_open_pipe(sc->aue_iface, sc->aue_ed[AUE_ENDPT_TX], USBD_EXCLUSIVE_USE, &sc->aue_ep[AUE_ENDPT_TX]); if (err) { printf("aue%d: open tx pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); AUE_UNLOCK(sc); return; } #ifdef AUE_INTR_PIPE err = usbd_open_pipe_intr(sc->aue_iface, sc->aue_ed[AUE_ENDPT_INTR], USBD_SHORT_XFER_OK, &sc->aue_ep[AUE_ENDPT_INTR], sc, sc->aue_cdata.aue_ibuf, AUE_INTR_PKTLEN, aue_intr, AUE_INTR_INTERVAL); if (err) { printf("aue%d: open intr pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); AUE_UNLOCK(sc); return; } #endif /* Start up the receive pipe. */ for (i = 0; i < AUE_RX_LIST_CNT; i++) { c = &sc->aue_cdata.aue_rx_chain[i]; usbd_setup_xfer(c->aue_xfer, sc->aue_ep[AUE_ENDPT_RX], c, mtod(c->aue_mbuf, char *), AUE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, aue_rxeof); usbd_transfer(c->aue_xfer); } ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->aue_stat_ch = timeout(aue_tick, sc, hz); AUE_UNLOCK(sc); return; } /* * Set media options. */ Static int aue_ifmedia_upd(ifp) struct ifnet *ifp; { struct aue_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->aue_miibus); sc->aue_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; miisc = LIST_NEXT(miisc, mii_list)) mii_phy_reset(miisc); } mii_mediachg(mii); return(0); } /* * Report current media status. */ Static void aue_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct aue_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->aue_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } Static int aue_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct aue_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; AUE_LOCK(sc); switch(command) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: error = ether_ioctl(ifp, command, data); break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->aue_if_flags & IFF_PROMISC)) { AUE_SETBIT(sc, AUE_CTL2, AUE_CTL2_RX_PROMISC); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->aue_if_flags & IFF_PROMISC) { AUE_CLRBIT(sc, AUE_CTL2, AUE_CTL2_RX_PROMISC); } else if (!(ifp->if_flags & IFF_RUNNING)) aue_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) aue_stop(sc); } sc->aue_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: aue_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->aue_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = EINVAL; break; } AUE_UNLOCK(sc); return(error); } Static void aue_watchdog(ifp) struct ifnet *ifp; { struct aue_softc *sc; struct aue_chain *c; usbd_status stat; sc = ifp->if_softc; AUE_LOCK(sc); ifp->if_oerrors++; printf("aue%d: watchdog timeout\n", sc->aue_unit); c = &sc->aue_cdata.aue_tx_chain[0]; usbd_get_xfer_status(c->aue_xfer, NULL, NULL, NULL, &stat); aue_txeof(c->aue_xfer, c, stat); if (ifp->if_snd.ifq_head != NULL) aue_start(ifp); AUE_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ Static void aue_stop(sc) struct aue_softc *sc; { usbd_status err; struct ifnet *ifp; int i; AUE_LOCK(sc); ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; csr_write_1(sc, AUE_CTL0, 0); csr_write_1(sc, AUE_CTL1, 0); aue_reset(sc); untimeout(aue_tick, sc, sc->aue_stat_ch); /* Stop transfers. */ if (sc->aue_ep[AUE_ENDPT_RX] != NULL) { err = usbd_abort_pipe(sc->aue_ep[AUE_ENDPT_RX]); if (err) { printf("aue%d: abort rx pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->aue_ep[AUE_ENDPT_RX]); if (err) { printf("aue%d: close rx pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); } sc->aue_ep[AUE_ENDPT_RX] = NULL; } if (sc->aue_ep[AUE_ENDPT_TX] != NULL) { err = usbd_abort_pipe(sc->aue_ep[AUE_ENDPT_TX]); if (err) { printf("aue%d: abort tx pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->aue_ep[AUE_ENDPT_TX]); if (err) { printf("aue%d: close tx pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); } sc->aue_ep[AUE_ENDPT_TX] = NULL; } #ifdef AUE_INTR_PIPE if (sc->aue_ep[AUE_ENDPT_INTR] != NULL) { err = usbd_abort_pipe(sc->aue_ep[AUE_ENDPT_INTR]); if (err) { printf("aue%d: abort intr pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->aue_ep[AUE_ENDPT_INTR]); if (err) { printf("aue%d: close intr pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); } sc->aue_ep[AUE_ENDPT_INTR] = NULL; } #endif /* Free RX resources. */ for (i = 0; i < AUE_RX_LIST_CNT; i++) { if (sc->aue_cdata.aue_rx_chain[i].aue_buf != NULL) { free(sc->aue_cdata.aue_rx_chain[i].aue_buf, M_USBDEV); sc->aue_cdata.aue_rx_chain[i].aue_buf = NULL; } if (sc->aue_cdata.aue_rx_chain[i].aue_mbuf != NULL) { m_freem(sc->aue_cdata.aue_rx_chain[i].aue_mbuf); sc->aue_cdata.aue_rx_chain[i].aue_mbuf = NULL; } if (sc->aue_cdata.aue_rx_chain[i].aue_xfer != NULL) { usbd_free_xfer(sc->aue_cdata.aue_rx_chain[i].aue_xfer); sc->aue_cdata.aue_rx_chain[i].aue_xfer = NULL; } } /* Free TX resources. */ for (i = 0; i < AUE_TX_LIST_CNT; i++) { if (sc->aue_cdata.aue_tx_chain[i].aue_buf != NULL) { free(sc->aue_cdata.aue_tx_chain[i].aue_buf, M_USBDEV); sc->aue_cdata.aue_tx_chain[i].aue_buf = NULL; } if (sc->aue_cdata.aue_tx_chain[i].aue_mbuf != NULL) { m_freem(sc->aue_cdata.aue_tx_chain[i].aue_mbuf); sc->aue_cdata.aue_tx_chain[i].aue_mbuf = NULL; } if (sc->aue_cdata.aue_tx_chain[i].aue_xfer != NULL) { usbd_free_xfer(sc->aue_cdata.aue_tx_chain[i].aue_xfer); sc->aue_cdata.aue_tx_chain[i].aue_xfer = NULL; } } #ifdef AUE_INTR_PIPE free(sc->aue_cdata.aue_ibuf, M_USBDEV); sc->aue_cdata.aue_ibuf = NULL; #endif sc->aue_link = 0; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); AUE_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ Static void aue_shutdown(dev) device_t dev; { struct aue_softc *sc; sc = device_get_softc(dev); sc->aue_gone++; AUE_LOCK(sc); aue_reset(sc); aue_stop(sc); AUE_UNLOCK(sc); return; } Index: head/sys/dev/usb/if_cue.c =================================================================== --- head/sys/dev/usb/if_cue.c (revision 71961) +++ head/sys/dev/usb/if_cue.c (revision 71962) @@ -1,1232 +1,1231 @@ /* * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * CATC USB-EL1210A USB to ethernet driver. Used in the CATC Netmate * adapters and others. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The CATC USB-EL1210A provides USB ethernet support at 10Mbps. The * RX filter uses a 512-bit multicast hash table, single perfect entry * for the station address, and promiscuous mode. Unlike the ADMtek * and KLSI chips, the CATC ASIC supports read and write combining * mode where multiple packets can be transfered using a single bulk * transaction, which helps performance a great deal. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif /* * Various supported device vendors/products. */ Static struct cue_type cue_devs[] = { { USB_VENDOR_CATC, USB_PRODUCT_CATC_NETMATE }, { USB_VENDOR_CATC, USB_PRODUCT_CATC_NETMATE2 }, { USB_VENDOR_SMARTBRIDGES, USB_PRODUCT_SMARTBRIDGES_SMARTLINK }, { 0, 0 } }; Static struct usb_qdat cue_qdat; Static int cue_match __P((device_t)); Static int cue_attach __P((device_t)); Static int cue_detach __P((device_t)); Static int cue_tx_list_init __P((struct cue_softc *)); Static int cue_rx_list_init __P((struct cue_softc *)); Static int cue_newbuf __P((struct cue_softc *, struct cue_chain *, struct mbuf *)); Static int cue_encap __P((struct cue_softc *, struct mbuf *, int)); Static void cue_rxeof __P((usbd_xfer_handle, usbd_private_handle, usbd_status)); Static void cue_txeof __P((usbd_xfer_handle, usbd_private_handle, usbd_status)); Static void cue_tick __P((void *)); Static void cue_rxstart __P((struct ifnet *)); Static void cue_start __P((struct ifnet *)); Static int cue_ioctl __P((struct ifnet *, u_long, caddr_t)); Static void cue_init __P((void *)); Static void cue_stop __P((struct cue_softc *)); Static void cue_watchdog __P((struct ifnet *)); Static void cue_shutdown __P((device_t)); Static void cue_setmulti __P((struct cue_softc *)); Static u_int32_t cue_crc __P((caddr_t)); Static void cue_reset __P((struct cue_softc *)); Static int csr_read_1 __P((struct cue_softc *, int)); Static int csr_write_1 __P((struct cue_softc *, int, int)); Static int csr_read_2 __P((struct cue_softc *, int)); #ifdef notdef Static int csr_write_2 __P((struct cue_softc *, int, int)); #endif Static int cue_mem __P((struct cue_softc *, int, int, void *, int)); Static int cue_getmac __P((struct cue_softc *, void *)); Static device_method_t cue_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cue_match), DEVMETHOD(device_attach, cue_attach), DEVMETHOD(device_detach, cue_detach), DEVMETHOD(device_shutdown, cue_shutdown), { 0, 0 } }; Static driver_t cue_driver = { "cue", cue_methods, sizeof(struct cue_softc) }; Static devclass_t cue_devclass; DRIVER_MODULE(if_cue, uhub, cue_driver, cue_devclass, usbd_driver_load, 0); #define CUE_SETBIT(sc, reg, x) \ csr_write_1(sc, reg, csr_read_1(sc, reg) | (x)) #define CUE_CLRBIT(sc, reg, x) \ csr_write_1(sc, reg, csr_read_1(sc, reg) & ~(x)) Static int csr_read_1(sc, reg) struct cue_softc *sc; int reg; { usb_device_request_t req; usbd_status err; u_int8_t val = 0; if (sc->cue_gone) return(0); CUE_LOCK(sc); req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = CUE_CMD_READREG; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, 1); err = usbd_do_request_flags(sc->cue_udev, &req, &val, USBD_NO_TSLEEP, NULL); CUE_UNLOCK(sc); if (err) return(0); return(val); } Static int csr_read_2(sc, reg) struct cue_softc *sc; int reg; { usb_device_request_t req; usbd_status err; u_int16_t val = 0; if (sc->cue_gone) return(0); CUE_LOCK(sc); req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = CUE_CMD_READREG; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, 2); err = usbd_do_request_flags(sc->cue_udev, &req, &val, USBD_NO_TSLEEP, NULL); CUE_UNLOCK(sc); if (err) return(0); return(val); } Static int csr_write_1(sc, reg, val) struct cue_softc *sc; int reg, val; { usb_device_request_t req; usbd_status err; if (sc->cue_gone) return(0); CUE_LOCK(sc); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = CUE_CMD_WRITEREG; USETW(req.wValue, val); USETW(req.wIndex, reg); USETW(req.wLength, 0); err = usbd_do_request_flags(sc->cue_udev, &req, &val, USBD_NO_TSLEEP, NULL); CUE_UNLOCK(sc); if (err) return(-1); return(0); } #ifdef notdef Static int csr_write_2(sc, reg, val) struct cue_softc *sc; int reg, val; { usb_device_request_t req; usbd_status err; if (sc->cue_gone) return(0); CUE_LOCK(sc); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = CUE_CMD_WRITEREG; USETW(req.wValue, val); USETW(req.wIndex, reg); USETW(req.wLength, 0); err = usbd_do_request_flags(sc->cue_udev, &req, &val, USBD_NO_TSLEEP, NULL); CUE_UNLOCK(sc); if (err) return(-1); return(0); } #endif Static int cue_mem(sc, cmd, addr, buf, len) struct cue_softc *sc; int cmd; int addr; void *buf; int len; { usb_device_request_t req; usbd_status err; if (sc->cue_gone) return(0); CUE_LOCK(sc); if (cmd == CUE_CMD_READSRAM) req.bmRequestType = UT_READ_VENDOR_DEVICE; else req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = cmd; USETW(req.wValue, 0); USETW(req.wIndex, addr); USETW(req.wLength, len); err = usbd_do_request_flags(sc->cue_udev, &req, &buf, USBD_NO_TSLEEP, NULL); CUE_UNLOCK(sc); if (err) return(-1); return(0); } Static int cue_getmac(sc, buf) struct cue_softc *sc; void *buf; { usb_device_request_t req; usbd_status err; if (sc->cue_gone) return(0); CUE_LOCK(sc); req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = CUE_CMD_GET_MACADDR; USETW(req.wValue, 0); USETW(req.wIndex, 0); USETW(req.wLength, ETHER_ADDR_LEN); err = usbd_do_request_flags(sc->cue_udev, &req, buf, USBD_NO_TSLEEP, NULL); CUE_UNLOCK(sc); if (err) { printf("cue%d: read MAC address failed\n", sc->cue_unit); return(-1); } return(0); } #define CUE_POLY 0xEDB88320 #define CUE_BITS 9 Static u_int32_t cue_crc(addr) caddr_t addr; { u_int32_t idx, bit, data, crc; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (idx = 0; idx < 6; idx++) { for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) crc = (crc >> 1) ^ (((crc ^ data) & 1) ? CUE_POLY : 0); } return (crc & ((1 << CUE_BITS) - 1)); } Static void cue_setmulti(sc) struct cue_softc *sc; { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int32_t h = 0, i; ifp = &sc->arpcom.ac_if; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { for (i = 0; i < CUE_MCAST_TABLE_LEN; i++) sc->cue_mctab[i] = 0xFF; cue_mem(sc, CUE_CMD_WRITESRAM, CUE_MCAST_TABLE_ADDR, &sc->cue_mctab, CUE_MCAST_TABLE_LEN); return; } /* first, zot all the existing hash bits */ for (i = 0; i < CUE_MCAST_TABLE_LEN; i++) sc->cue_mctab[i] = 0; /* now program new ones */ - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = cue_crc(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); sc->cue_mctab[h >> 3] |= 1 << (h & 0x7); } /* * Also include the broadcast address in the filter * so we can receive broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { h = cue_crc(etherbroadcastaddr); sc->cue_mctab[h >> 3] |= 1 << (h & 0x7); } cue_mem(sc, CUE_CMD_WRITESRAM, CUE_MCAST_TABLE_ADDR, &sc->cue_mctab, CUE_MCAST_TABLE_LEN); return; } Static void cue_reset(sc) struct cue_softc *sc; { usb_device_request_t req; usbd_status err; if (sc->cue_gone) return; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = CUE_CMD_RESET; USETW(req.wValue, 0); USETW(req.wIndex, 0); USETW(req.wLength, 0); err = usbd_do_request_flags(sc->cue_udev, &req, NULL, USBD_NO_TSLEEP, NULL); if (err) printf("cue%d: reset failed\n", sc->cue_unit); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); return; } /* * Probe for a Pegasus chip. */ USB_MATCH(cue) { USB_MATCH_START(cue, uaa); struct cue_type *t; if (!uaa->iface) return(UMATCH_NONE); t = cue_devs; while(t->cue_vid) { if (uaa->vendor == t->cue_vid && uaa->product == t->cue_did) { return(UMATCH_VENDOR_PRODUCT); } t++; } return(UMATCH_NONE); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ USB_ATTACH(cue) { USB_ATTACH_START(cue, sc, uaa); char devinfo[1024]; u_char eaddr[ETHER_ADDR_LEN]; struct ifnet *ifp; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; int i; bzero(sc, sizeof(struct cue_softc)); sc->cue_iface = uaa->iface; sc->cue_udev = uaa->device; sc->cue_unit = device_get_unit(self); if (usbd_set_config_no(sc->cue_udev, CUE_CONFIG_NO, 0)) { printf("cue%d: getting interface handle failed\n", sc->cue_unit); USB_ATTACH_ERROR_RETURN; } id = usbd_get_interface_descriptor(uaa->iface); usbd_devinfo(uaa->device, 0, devinfo); device_set_desc_copy(self, devinfo); printf("%s: %s\n", USBDEVNAME(self), devinfo); /* Find endpoints. */ for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(uaa->iface, i); if (!ed) { printf("cue%d: couldn't get ep %d\n", sc->cue_unit, i); USB_ATTACH_ERROR_RETURN; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && (ed->bmAttributes & UE_XFERTYPE) == UE_BULK) { sc->cue_ed[CUE_ENDPT_RX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && (ed->bmAttributes & UE_XFERTYPE) == UE_BULK) { sc->cue_ed[CUE_ENDPT_TX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && (ed->bmAttributes & UE_XFERTYPE) == UE_INTERRUPT) { sc->cue_ed[CUE_ENDPT_INTR] = ed->bEndpointAddress; } } mtx_init(&sc->cue_mtx, device_get_nameunit(self), MTX_DEF | MTX_RECURSE); CUE_LOCK(sc); #ifdef notdef /* Reset the adapter. */ cue_reset(sc); #endif /* * Get station address. */ cue_getmac(sc, &eaddr); /* * A CATC chip was detected. Inform the world. */ printf("cue%d: Ethernet address: %6D\n", sc->cue_unit, eaddr, ":"); bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; ifp->if_unit = sc->cue_unit; ifp->if_name = "cue"; ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = cue_ioctl; ifp->if_output = ether_output; ifp->if_start = cue_start; ifp->if_watchdog = cue_watchdog; ifp->if_init = cue_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; cue_qdat.ifp = ifp; cue_qdat.if_rxstart = cue_rxstart; /* * Call MI attach routine. */ ether_ifattach(ifp, ETHER_BPF_SUPPORTED); callout_handle_init(&sc->cue_stat_ch); usb_register_netisr(); sc->cue_gone = 0; CUE_UNLOCK(sc); USB_ATTACH_SUCCESS_RETURN; } Static int cue_detach(dev) device_t dev; { struct cue_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); CUE_LOCK(sc); ifp = &sc->arpcom.ac_if; sc->cue_gone = 1; untimeout(cue_tick, sc, sc->cue_stat_ch); ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); if (sc->cue_ep[CUE_ENDPT_TX] != NULL) usbd_abort_pipe(sc->cue_ep[CUE_ENDPT_TX]); if (sc->cue_ep[CUE_ENDPT_RX] != NULL) usbd_abort_pipe(sc->cue_ep[CUE_ENDPT_RX]); if (sc->cue_ep[CUE_ENDPT_INTR] != NULL) usbd_abort_pipe(sc->cue_ep[CUE_ENDPT_INTR]); CUE_UNLOCK(sc); mtx_destroy(&sc->cue_mtx); return(0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ Static int cue_newbuf(sc, c, m) struct cue_softc *sc; struct cue_chain *c; struct mbuf *m; { struct mbuf *m_new = NULL; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("cue%d: no memory for rx list " "-- packet dropped!\n", sc->cue_unit); return(ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("cue%d: no memory for rx list " "-- packet dropped!\n", sc->cue_unit); m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, ETHER_ALIGN); c->cue_mbuf = m_new; return(0); } Static int cue_rx_list_init(sc) struct cue_softc *sc; { struct cue_cdata *cd; struct cue_chain *c; int i; cd = &sc->cue_cdata; for (i = 0; i < CUE_RX_LIST_CNT; i++) { c = &cd->cue_rx_chain[i]; c->cue_sc = sc; c->cue_idx = i; if (cue_newbuf(sc, c, NULL) == ENOBUFS) return(ENOBUFS); if (c->cue_xfer == NULL) { c->cue_xfer = usbd_alloc_xfer(sc->cue_udev); if (c->cue_xfer == NULL) return(ENOBUFS); } } return(0); } Static int cue_tx_list_init(sc) struct cue_softc *sc; { struct cue_cdata *cd; struct cue_chain *c; int i; cd = &sc->cue_cdata; for (i = 0; i < CUE_TX_LIST_CNT; i++) { c = &cd->cue_tx_chain[i]; c->cue_sc = sc; c->cue_idx = i; c->cue_mbuf = NULL; if (c->cue_xfer == NULL) { c->cue_xfer = usbd_alloc_xfer(sc->cue_udev); if (c->cue_xfer == NULL) return(ENOBUFS); } c->cue_buf = malloc(CUE_BUFSZ, M_USBDEV, M_NOWAIT); if (c->cue_buf == NULL) return(ENOBUFS); } return(0); } Static void cue_rxstart(ifp) struct ifnet *ifp; { struct cue_softc *sc; struct cue_chain *c; sc = ifp->if_softc; CUE_LOCK(sc); c = &sc->cue_cdata.cue_rx_chain[sc->cue_cdata.cue_rx_prod]; if (cue_newbuf(sc, c, NULL) == ENOBUFS) { ifp->if_ierrors++; CUE_UNLOCK(sc); return; } /* Setup new transfer. */ usbd_setup_xfer(c->cue_xfer, sc->cue_ep[CUE_ENDPT_RX], c, mtod(c->cue_mbuf, char *), CUE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, cue_rxeof); usbd_transfer(c->cue_xfer); CUE_UNLOCK(sc); return; } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ Static void cue_rxeof(xfer, priv, status) usbd_xfer_handle xfer; usbd_private_handle priv; usbd_status status; { struct cue_softc *sc; struct cue_chain *c; struct mbuf *m; struct ifnet *ifp; int total_len = 0; u_int16_t len; c = priv; sc = c->cue_sc; CUE_LOCK(sc); ifp = &sc->arpcom.ac_if; if (!(ifp->if_flags & IFF_RUNNING)) { CUE_UNLOCK(sc); return; } if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { CUE_UNLOCK(sc); return; } printf("cue%d: usb error on rx: %s\n", sc->cue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->cue_ep[CUE_ENDPT_RX]); goto done; } usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL); m = c->cue_mbuf; len = *mtod(m, u_int16_t *); /* No errors; receive the packet. */ total_len = len; if (len < sizeof(struct ether_header)) { ifp->if_ierrors++; goto done; } ifp->if_ipackets++; m_adj(m, sizeof(u_int16_t)); m->m_pkthdr.rcvif = (struct ifnet *)&cue_qdat; m->m_pkthdr.len = m->m_len = total_len; /* Put the packet on the special USB input queue. */ usb_ether_input(m); CUE_UNLOCK(sc); return; done: /* Setup new transfer. */ usbd_setup_xfer(c->cue_xfer, sc->cue_ep[CUE_ENDPT_RX], c, mtod(c->cue_mbuf, char *), CUE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, cue_rxeof); usbd_transfer(c->cue_xfer); CUE_UNLOCK(sc); return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ Static void cue_txeof(xfer, priv, status) usbd_xfer_handle xfer; usbd_private_handle priv; usbd_status status; { struct cue_softc *sc; struct cue_chain *c; struct ifnet *ifp; usbd_status err; c = priv; sc = c->cue_sc; CUE_LOCK(sc); ifp = &sc->arpcom.ac_if; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { CUE_UNLOCK(sc); return; } printf("cue%d: usb error on tx: %s\n", sc->cue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->cue_ep[CUE_ENDPT_TX]); CUE_UNLOCK(sc); return; } ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; usbd_get_xfer_status(c->cue_xfer, NULL, NULL, NULL, &err); if (c->cue_mbuf != NULL) { c->cue_mbuf->m_pkthdr.rcvif = ifp; usb_tx_done(c->cue_mbuf); c->cue_mbuf = NULL; } if (err) ifp->if_oerrors++; else ifp->if_opackets++; CUE_UNLOCK(sc); return; } Static void cue_tick(xsc) void *xsc; { struct cue_softc *sc; struct ifnet *ifp; sc = xsc; if (sc == NULL) return; CUE_LOCK(sc); ifp = &sc->arpcom.ac_if; ifp->if_collisions += csr_read_2(sc, CUE_TX_SINGLECOLL); ifp->if_collisions += csr_read_2(sc, CUE_TX_MULTICOLL); ifp->if_collisions += csr_read_2(sc, CUE_TX_EXCESSCOLL); if (csr_read_2(sc, CUE_RX_FRAMEERR)) ifp->if_ierrors++; sc->cue_stat_ch = timeout(cue_tick, sc, hz); CUE_UNLOCK(sc); return; } Static int cue_encap(sc, m, idx) struct cue_softc *sc; struct mbuf *m; int idx; { int total_len; struct cue_chain *c; usbd_status err; c = &sc->cue_cdata.cue_tx_chain[idx]; /* * Copy the mbuf data into a contiguous buffer, leaving two * bytes at the beginning to hold the frame length. */ m_copydata(m, 0, m->m_pkthdr.len, c->cue_buf + 2); c->cue_mbuf = m; total_len = m->m_pkthdr.len + 2; /* The first two bytes are the frame length */ c->cue_buf[0] = (u_int8_t)m->m_pkthdr.len; c->cue_buf[1] = (u_int8_t)(m->m_pkthdr.len >> 8); usbd_setup_xfer(c->cue_xfer, sc->cue_ep[CUE_ENDPT_TX], c, c->cue_buf, total_len, 0, 10000, cue_txeof); /* Transmit */ err = usbd_transfer(c->cue_xfer); if (err != USBD_IN_PROGRESS) { cue_stop(sc); return(EIO); } sc->cue_cdata.cue_tx_cnt++; return(0); } Static void cue_start(ifp) struct ifnet *ifp; { struct cue_softc *sc; struct mbuf *m_head = NULL; sc = ifp->if_softc; CUE_LOCK(sc); if (ifp->if_flags & IFF_OACTIVE) { CUE_UNLOCK(sc); return; } IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) { CUE_UNLOCK(sc); return; } if (cue_encap(sc, m_head, 0)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; CUE_UNLOCK(sc); return; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if (ifp->if_bpf) bpf_mtap(ifp, m_head); ifp->if_flags |= IFF_OACTIVE; /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; CUE_UNLOCK(sc); return; } Static void cue_init(xsc) void *xsc; { struct cue_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; struct cue_chain *c; usbd_status err; int i; if (ifp->if_flags & IFF_RUNNING) return; CUE_LOCK(sc); /* * Cancel pending I/O and free all RX/TX buffers. */ #ifdef foo cue_reset(sc); #endif /* Set MAC address */ for (i = 0; i < ETHER_ADDR_LEN; i++) csr_write_1(sc, CUE_PAR0 - i, sc->arpcom.ac_enaddr[i]); /* Enable RX logic. */ csr_write_1(sc, CUE_ETHCTL, CUE_ETHCTL_RX_ON|CUE_ETHCTL_MCAST_ON); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { CUE_SETBIT(sc, CUE_ETHCTL, CUE_ETHCTL_PROMISC); } else { CUE_CLRBIT(sc, CUE_ETHCTL, CUE_ETHCTL_PROMISC); } /* Init TX ring. */ if (cue_tx_list_init(sc) == ENOBUFS) { printf("cue%d: tx list init failed\n", sc->cue_unit); CUE_UNLOCK(sc); return; } /* Init RX ring. */ if (cue_rx_list_init(sc) == ENOBUFS) { printf("cue%d: rx list init failed\n", sc->cue_unit); CUE_UNLOCK(sc); return; } /* Load the multicast filter. */ cue_setmulti(sc); /* * Set the number of RX and TX buffers that we want * to reserve inside the ASIC. */ csr_write_1(sc, CUE_RX_BUFPKTS, CUE_RX_FRAMES); csr_write_1(sc, CUE_TX_BUFPKTS, CUE_TX_FRAMES); /* Set advanced operation modes. */ csr_write_1(sc, CUE_ADVANCED_OPMODES, CUE_AOP_EMBED_RXLEN|0x01); /* 1 wait state */ /* Program the LED operation. */ csr_write_1(sc, CUE_LEDCTL, CUE_LEDCTL_FOLLOW_LINK); /* Open RX and TX pipes. */ err = usbd_open_pipe(sc->cue_iface, sc->cue_ed[CUE_ENDPT_RX], USBD_EXCLUSIVE_USE, &sc->cue_ep[CUE_ENDPT_RX]); if (err) { printf("cue%d: open rx pipe failed: %s\n", sc->cue_unit, usbd_errstr(err)); CUE_UNLOCK(sc); return; } err = usbd_open_pipe(sc->cue_iface, sc->cue_ed[CUE_ENDPT_TX], USBD_EXCLUSIVE_USE, &sc->cue_ep[CUE_ENDPT_TX]); if (err) { printf("cue%d: open tx pipe failed: %s\n", sc->cue_unit, usbd_errstr(err)); CUE_UNLOCK(sc); return; } /* Start up the receive pipe. */ for (i = 0; i < CUE_RX_LIST_CNT; i++) { c = &sc->cue_cdata.cue_rx_chain[i]; usbd_setup_xfer(c->cue_xfer, sc->cue_ep[CUE_ENDPT_RX], c, mtod(c->cue_mbuf, char *), CUE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, cue_rxeof); usbd_transfer(c->cue_xfer); } ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; CUE_UNLOCK(sc); sc->cue_stat_ch = timeout(cue_tick, sc, hz); return; } Static int cue_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct cue_softc *sc = ifp->if_softc; int error = 0; CUE_LOCK(sc); switch(command) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: error = ether_ioctl(ifp, command, data); break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->cue_if_flags & IFF_PROMISC)) { CUE_SETBIT(sc, CUE_ETHCTL, CUE_ETHCTL_PROMISC); cue_setmulti(sc); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->cue_if_flags & IFF_PROMISC) { CUE_CLRBIT(sc, CUE_ETHCTL, CUE_ETHCTL_PROMISC); cue_setmulti(sc); } else if (!(ifp->if_flags & IFF_RUNNING)) cue_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) cue_stop(sc); } sc->cue_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: cue_setmulti(sc); error = 0; break; default: error = EINVAL; break; } CUE_UNLOCK(sc); return(error); } Static void cue_watchdog(ifp) struct ifnet *ifp; { struct cue_softc *sc; struct cue_chain *c; usbd_status stat; sc = ifp->if_softc; CUE_LOCK(sc); ifp->if_oerrors++; printf("cue%d: watchdog timeout\n", sc->cue_unit); c = &sc->cue_cdata.cue_tx_chain[0]; usbd_get_xfer_status(c->cue_xfer, NULL, NULL, NULL, &stat); cue_txeof(c->cue_xfer, c, stat); if (ifp->if_snd.ifq_head != NULL) cue_start(ifp); CUE_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ Static void cue_stop(sc) struct cue_softc *sc; { usbd_status err; struct ifnet *ifp; int i; CUE_LOCK(sc); ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; csr_write_1(sc, CUE_ETHCTL, 0); cue_reset(sc); untimeout(cue_tick, sc, sc->cue_stat_ch); /* Stop transfers. */ if (sc->cue_ep[CUE_ENDPT_RX] != NULL) { err = usbd_abort_pipe(sc->cue_ep[CUE_ENDPT_RX]); if (err) { printf("cue%d: abort rx pipe failed: %s\n", sc->cue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->cue_ep[CUE_ENDPT_RX]); if (err) { printf("cue%d: close rx pipe failed: %s\n", sc->cue_unit, usbd_errstr(err)); } sc->cue_ep[CUE_ENDPT_RX] = NULL; } if (sc->cue_ep[CUE_ENDPT_TX] != NULL) { err = usbd_abort_pipe(sc->cue_ep[CUE_ENDPT_TX]); if (err) { printf("cue%d: abort tx pipe failed: %s\n", sc->cue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->cue_ep[CUE_ENDPT_TX]); if (err) { printf("cue%d: close tx pipe failed: %s\n", sc->cue_unit, usbd_errstr(err)); } sc->cue_ep[CUE_ENDPT_TX] = NULL; } if (sc->cue_ep[CUE_ENDPT_INTR] != NULL) { err = usbd_abort_pipe(sc->cue_ep[CUE_ENDPT_INTR]); if (err) { printf("cue%d: abort intr pipe failed: %s\n", sc->cue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->cue_ep[CUE_ENDPT_INTR]); if (err) { printf("cue%d: close intr pipe failed: %s\n", sc->cue_unit, usbd_errstr(err)); } sc->cue_ep[CUE_ENDPT_INTR] = NULL; } /* Free RX resources. */ for (i = 0; i < CUE_RX_LIST_CNT; i++) { if (sc->cue_cdata.cue_rx_chain[i].cue_buf != NULL) { free(sc->cue_cdata.cue_rx_chain[i].cue_buf, M_USBDEV); sc->cue_cdata.cue_rx_chain[i].cue_buf = NULL; } if (sc->cue_cdata.cue_rx_chain[i].cue_mbuf != NULL) { m_freem(sc->cue_cdata.cue_rx_chain[i].cue_mbuf); sc->cue_cdata.cue_rx_chain[i].cue_mbuf = NULL; } if (sc->cue_cdata.cue_rx_chain[i].cue_xfer != NULL) { usbd_free_xfer(sc->cue_cdata.cue_rx_chain[i].cue_xfer); sc->cue_cdata.cue_rx_chain[i].cue_xfer = NULL; } } /* Free TX resources. */ for (i = 0; i < CUE_TX_LIST_CNT; i++) { if (sc->cue_cdata.cue_tx_chain[i].cue_buf != NULL) { free(sc->cue_cdata.cue_tx_chain[i].cue_buf, M_USBDEV); sc->cue_cdata.cue_tx_chain[i].cue_buf = NULL; } if (sc->cue_cdata.cue_tx_chain[i].cue_mbuf != NULL) { m_freem(sc->cue_cdata.cue_tx_chain[i].cue_mbuf); sc->cue_cdata.cue_tx_chain[i].cue_mbuf = NULL; } if (sc->cue_cdata.cue_tx_chain[i].cue_xfer != NULL) { usbd_free_xfer(sc->cue_cdata.cue_tx_chain[i].cue_xfer); sc->cue_cdata.cue_tx_chain[i].cue_xfer = NULL; } } ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); CUE_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ Static void cue_shutdown(dev) device_t dev; { struct cue_softc *sc; sc = device_get_softc(dev); CUE_LOCK(sc); cue_reset(sc); cue_stop(sc); CUE_UNLOCK(sc); return; } Index: head/sys/dev/usb/if_kue.c =================================================================== --- head/sys/dev/usb/if_kue.c (revision 71961) +++ head/sys/dev/usb/if_kue.c (revision 71962) @@ -1,1148 +1,1147 @@ /* * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Kawasaki LSI KL5KUSB101B USB to ethernet adapter driver. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The KLSI USB to ethernet adapter chip contains an USB serial interface, * ethernet MAC and embedded microcontroller (called the QT Engine). * The chip must have firmware loaded into it before it will operate. * Packets are passed between the chip and host via bulk transfers. * There is an interrupt endpoint mentioned in the software spec, however * it's currently unused. This device is 10Mbps half-duplex only, hence * there is no media selection logic. The MAC supports a 128 entry * multicast filter, though the exact size of the filter can depend * on the firmware. Curiously, while the software spec describes various * ethernet statistics counters, my sample adapter and firmware combination * claims not to support any statistics counters at all. * * Note that once we load the firmware in the device, we have to be * careful not to load it again: if you restart your computer but * leave the adapter attached to the USB controller, it may remain * powered on and retain its firmware. In this case, we don't need * to load the firmware a second time. * * Special thanks to Rob Furr for providing an ADS Technologies * adapter for development and testing. No monkeys were harmed during * the development of this driver. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif /* * Various supported device vendors/products. */ Static struct kue_type kue_devs[] = { { USB_VENDOR_AOX, USB_PRODUCT_AOX_USB101 }, { USB_VENDOR_KLSI, USB_PRODUCT_AOX_USB101 }, { USB_VENDOR_ADS, USB_PRODUCT_ADS_UBS10BT }, { USB_VENDOR_ATEN, USB_PRODUCT_ATEN_UC10T }, { USB_VENDOR_NETGEAR, USB_PRODUCT_NETGEAR_EA101 }, { USB_VENDOR_PERACOM, USB_PRODUCT_PERACOM_ENET }, { USB_VENDOR_PERACOM, USB_PRODUCT_PERACOM_ENET2 }, { USB_VENDOR_ENTREGA, USB_PRODUCT_ENTREGA_E45 }, { USB_VENDOR_3COM, USB_PRODUCT_3COM_3C19250 }, { USB_VENDOR_COREGA, USB_PRODUCT_COREGA_ETHER_USB_T }, { USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650C }, { USB_VENDOR_SMC, USB_PRODUCT_SMC_2102USB }, { USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_USB10T }, { USB_VENDOR_KLSI, USB_PRODUCT_KLSI_DUH3E10BT }, { USB_VENDOR_PERACOM, USB_PRODUCT_PERACOM_ENET3 }, { 0, 0 } }; Static struct usb_qdat kue_qdat; Static int kue_match __P((device_t)); Static int kue_attach __P((device_t)); Static int kue_detach __P((device_t)); Static void kue_shutdown __P((device_t)); Static int kue_tx_list_init __P((struct kue_softc *)); Static int kue_rx_list_init __P((struct kue_softc *)); Static int kue_newbuf __P((struct kue_softc *, struct kue_chain *, struct mbuf *)); Static int kue_encap __P((struct kue_softc *, struct mbuf *, int)); Static void kue_rxeof __P((usbd_xfer_handle, usbd_private_handle, usbd_status)); Static void kue_txeof __P((usbd_xfer_handle, usbd_private_handle, usbd_status)); Static void kue_start __P((struct ifnet *)); Static void kue_rxstart __P((struct ifnet *)); Static int kue_ioctl __P((struct ifnet *, u_long, caddr_t)); Static void kue_init __P((void *)); Static void kue_stop __P((struct kue_softc *)); Static void kue_watchdog __P((struct ifnet *)); Static void kue_setmulti __P((struct kue_softc *)); Static void kue_reset __P((struct kue_softc *)); Static usbd_status kue_do_request __P((usbd_device_handle, usb_device_request_t *, void *)); Static usbd_status kue_ctl __P((struct kue_softc *, int, u_int8_t, u_int16_t, char *, int)); Static usbd_status kue_setword __P((struct kue_softc *, u_int8_t, u_int16_t)); Static int kue_load_fw __P((struct kue_softc *)); Static device_method_t kue_methods[] = { /* Device interface */ DEVMETHOD(device_probe, kue_match), DEVMETHOD(device_attach, kue_attach), DEVMETHOD(device_detach, kue_detach), DEVMETHOD(device_shutdown, kue_shutdown), { 0, 0 } }; Static driver_t kue_driver = { "kue", kue_methods, sizeof(struct kue_softc) }; Static devclass_t kue_devclass; DRIVER_MODULE(if_kue, uhub, kue_driver, kue_devclass, usbd_driver_load, 0); /* * We have a custom do_request function which is almost like the * regular do_request function, except it has a much longer timeout. * Why? Because we need to make requests over the control endpoint * to download the firmware to the device, which can take longer * than the default timeout. */ Static usbd_status kue_do_request(dev, req, data) usbd_device_handle dev; usb_device_request_t *req; void *data; { usbd_xfer_handle xfer; usbd_status err; xfer = usbd_alloc_xfer(dev); usbd_setup_default_xfer(xfer, dev, 0, 500000, req, data, UGETW(req->wLength), USBD_SHORT_XFER_OK|USBD_NO_TSLEEP, 0); err = usbd_sync_transfer(xfer); usbd_free_xfer(xfer); return(err); } Static usbd_status kue_setword(sc, breq, word) struct kue_softc *sc; u_int8_t breq; u_int16_t word; { usbd_device_handle dev; usb_device_request_t req; usbd_status err; if (sc->kue_gone) return(USBD_NORMAL_COMPLETION); dev = sc->kue_udev; KUE_LOCK(sc); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = breq; USETW(req.wValue, word); USETW(req.wIndex, 0); USETW(req.wLength, 0); err = kue_do_request(dev, &req, NULL); KUE_UNLOCK(sc); return(err); } Static usbd_status kue_ctl(sc, rw, breq, val, data, len) struct kue_softc *sc; int rw; u_int8_t breq; u_int16_t val; char *data; int len; { usbd_device_handle dev; usb_device_request_t req; usbd_status err; dev = sc->kue_udev; if (sc->kue_gone) return(USBD_NORMAL_COMPLETION); KUE_LOCK(sc); if (rw == KUE_CTL_WRITE) req.bmRequestType = UT_WRITE_VENDOR_DEVICE; else req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = breq; USETW(req.wValue, val); USETW(req.wIndex, 0); USETW(req.wLength, len); err = kue_do_request(dev, &req, data); KUE_UNLOCK(sc); return(err); } Static int kue_load_fw(sc) struct kue_softc *sc; { usbd_status err; usb_device_descriptor_t *dd; int hwrev; dd = &sc->kue_udev->ddesc; hwrev = UGETW(dd->bcdDevice); /* * First, check if we even need to load the firmware. * If the device was still attached when the system was * rebooted, it may already have firmware loaded in it. * If this is the case, we don't need to do it again. * And in fact, if we try to load it again, we'll hang, * so we have to avoid this condition if we don't want * to look stupid. * * We can test this quickly by checking the bcdRevision * code. The NIC will return a different revision code if * it's probed while the firmware is still loaded and * running. */ if (hwrev == 0x0202) return(0); /* Load code segment */ err = kue_ctl(sc, KUE_CTL_WRITE, KUE_CMD_SEND_SCAN, 0, kue_code_seg, sizeof(kue_code_seg)); if (err) { printf("kue%d: failed to load code segment: %s\n", sc->kue_unit, usbd_errstr(err)); return(ENXIO); } /* Load fixup segment */ err = kue_ctl(sc, KUE_CTL_WRITE, KUE_CMD_SEND_SCAN, 0, kue_fix_seg, sizeof(kue_fix_seg)); if (err) { printf("kue%d: failed to load fixup segment: %s\n", sc->kue_unit, usbd_errstr(err)); return(ENXIO); } /* Send trigger command. */ err = kue_ctl(sc, KUE_CTL_WRITE, KUE_CMD_SEND_SCAN, 0, kue_trig_seg, sizeof(kue_trig_seg)); if (err) { printf("kue%d: failed to load trigger segment: %s\n", sc->kue_unit, usbd_errstr(err)); return(ENXIO); } return(0); } Static void kue_setmulti(sc) struct kue_softc *sc; { struct ifnet *ifp; struct ifmultiaddr *ifma; int i = 0; ifp = &sc->arpcom.ac_if; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { sc->kue_rxfilt |= KUE_RXFILT_ALLMULTI; sc->kue_rxfilt &= ~KUE_RXFILT_MULTICAST; kue_setword(sc, KUE_CMD_SET_PKT_FILTER, sc->kue_rxfilt); return; } sc->kue_rxfilt &= ~KUE_RXFILT_ALLMULTI; - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; /* * If there are too many addresses for the * internal filter, switch over to allmulti mode. */ if (i == KUE_MCFILTCNT(sc)) break; bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), KUE_MCFILT(sc, i), ETHER_ADDR_LEN); i++; } if (i == KUE_MCFILTCNT(sc)) sc->kue_rxfilt |= KUE_RXFILT_ALLMULTI; else { sc->kue_rxfilt |= KUE_RXFILT_MULTICAST; kue_ctl(sc, KUE_CTL_WRITE, KUE_CMD_SET_MCAST_FILTERS, i, sc->kue_mcfilters, i * ETHER_ADDR_LEN); } kue_setword(sc, KUE_CMD_SET_PKT_FILTER, sc->kue_rxfilt); return; } /* * Issue a SET_CONFIGURATION command to reset the MAC. This should be * done after the firmware is loaded into the adapter in order to * bring it into proper operation. */ Static void kue_reset(sc) struct kue_softc *sc; { if (usbd_set_config_no(sc->kue_udev, KUE_CONFIG_NO, 0)) { printf("kue%d: getting interface handle failed\n", sc->kue_unit); } /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); return; } /* * Probe for a KLSI chip. */ USB_MATCH(kue) { USB_MATCH_START(kue, uaa); struct kue_type *t; if (!uaa->iface) return(UMATCH_NONE); t = kue_devs; while(t->kue_vid) { if (uaa->vendor == t->kue_vid && uaa->product == t->kue_did) { return(UMATCH_VENDOR_PRODUCT); } t++; } return(UMATCH_NONE); } /* * Attach the interface. Allocate softc structures, do * setup and ethernet/BPF attach. */ USB_ATTACH(kue) { USB_ATTACH_START(kue, sc, uaa); char devinfo[1024]; struct ifnet *ifp; usbd_status err; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; int i; bzero(sc, sizeof(struct kue_softc)); sc->kue_iface = uaa->iface; sc->kue_udev = uaa->device; sc->kue_unit = device_get_unit(self); id = usbd_get_interface_descriptor(uaa->iface); usbd_devinfo(uaa->device, 0, devinfo); device_set_desc_copy(self, devinfo); printf("%s: %s\n", USBDEVNAME(self), devinfo); /* Find endpoints. */ for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(uaa->iface, i); if (!ed) { printf("kue%d: couldn't get ep %d\n", sc->kue_unit, i); USB_ATTACH_ERROR_RETURN; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && (ed->bmAttributes & UE_XFERTYPE) == UE_BULK) { sc->kue_ed[KUE_ENDPT_RX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && (ed->bmAttributes & UE_XFERTYPE) == UE_BULK) { sc->kue_ed[KUE_ENDPT_TX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && (ed->bmAttributes & UE_XFERTYPE) == UE_INTERRUPT) { sc->kue_ed[KUE_ENDPT_INTR] = ed->bEndpointAddress; } } mtx_init(&sc->kue_mtx, device_get_nameunit(self), MTX_DEF | MTX_RECURSE); KUE_LOCK(sc); /* Load the firmware into the NIC. */ if (kue_load_fw(sc)) { KUE_UNLOCK(sc); mtx_destroy(&sc->kue_mtx); USB_ATTACH_ERROR_RETURN; } /* Reset the adapter. */ kue_reset(sc); /* Read ethernet descriptor */ err = kue_ctl(sc, KUE_CTL_READ, KUE_CMD_GET_ETHER_DESCRIPTOR, 0, (char *)&sc->kue_desc, sizeof(sc->kue_desc)); sc->kue_mcfilters = malloc(KUE_MCFILTCNT(sc) * ETHER_ADDR_LEN, M_USBDEV, M_NOWAIT); /* * A KLSI chip was detected. Inform the world. */ printf("kue%d: Ethernet address: %6D\n", sc->kue_unit, sc->kue_desc.kue_macaddr, ":"); bcopy(sc->kue_desc.kue_macaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; ifp->if_unit = sc->kue_unit; ifp->if_name = "kue"; ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = kue_ioctl; ifp->if_output = ether_output; ifp->if_start = kue_start; ifp->if_watchdog = kue_watchdog; ifp->if_init = kue_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; kue_qdat.ifp = ifp; kue_qdat.if_rxstart = kue_rxstart; /* * Call MI attach routine. */ ether_ifattach(ifp, ETHER_BPF_SUPPORTED); usb_register_netisr(); sc->kue_gone = 0; KUE_UNLOCK(sc); USB_ATTACH_SUCCESS_RETURN; } Static int kue_detach(dev) device_t dev; { struct kue_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); KUE_LOCK(sc); ifp = &sc->arpcom.ac_if; sc->kue_gone = 1; if (ifp != NULL) ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); if (sc->kue_ep[KUE_ENDPT_TX] != NULL) usbd_abort_pipe(sc->kue_ep[KUE_ENDPT_TX]); if (sc->kue_ep[KUE_ENDPT_RX] != NULL) usbd_abort_pipe(sc->kue_ep[KUE_ENDPT_RX]); if (sc->kue_ep[KUE_ENDPT_INTR] != NULL) usbd_abort_pipe(sc->kue_ep[KUE_ENDPT_INTR]); if (sc->kue_mcfilters != NULL) free(sc->kue_mcfilters, M_USBDEV); KUE_UNLOCK(sc); mtx_destroy(&sc->kue_mtx); return(0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ Static int kue_newbuf(sc, c, m) struct kue_softc *sc; struct kue_chain *c; struct mbuf *m; { struct mbuf *m_new = NULL; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("kue%d: no memory for rx list " "-- packet dropped!\n", sc->kue_unit); return(ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("kue%d: no memory for rx list " "-- packet dropped!\n", sc->kue_unit); m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } c->kue_mbuf = m_new; return(0); } Static int kue_rx_list_init(sc) struct kue_softc *sc; { struct kue_cdata *cd; struct kue_chain *c; int i; cd = &sc->kue_cdata; for (i = 0; i < KUE_RX_LIST_CNT; i++) { c = &cd->kue_rx_chain[i]; c->kue_sc = sc; c->kue_idx = i; if (kue_newbuf(sc, c, NULL) == ENOBUFS) return(ENOBUFS); if (c->kue_xfer == NULL) { c->kue_xfer = usbd_alloc_xfer(sc->kue_udev); if (c->kue_xfer == NULL) return(ENOBUFS); } } return(0); } Static int kue_tx_list_init(sc) struct kue_softc *sc; { struct kue_cdata *cd; struct kue_chain *c; int i; cd = &sc->kue_cdata; for (i = 0; i < KUE_TX_LIST_CNT; i++) { c = &cd->kue_tx_chain[i]; c->kue_sc = sc; c->kue_idx = i; c->kue_mbuf = NULL; if (c->kue_xfer == NULL) { c->kue_xfer = usbd_alloc_xfer(sc->kue_udev); if (c->kue_xfer == NULL) return(ENOBUFS); } c->kue_buf = malloc(KUE_BUFSZ, M_USBDEV, M_NOWAIT); if (c->kue_buf == NULL) return(ENOBUFS); } return(0); } Static void kue_rxstart(ifp) struct ifnet *ifp; { struct kue_softc *sc; struct kue_chain *c; sc = ifp->if_softc; KUE_LOCK(sc); c = &sc->kue_cdata.kue_rx_chain[sc->kue_cdata.kue_rx_prod]; if (kue_newbuf(sc, c, NULL) == ENOBUFS) { ifp->if_ierrors++; return; } /* Setup new transfer. */ usbd_setup_xfer(c->kue_xfer, sc->kue_ep[KUE_ENDPT_RX], c, mtod(c->kue_mbuf, char *), KUE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, kue_rxeof); usbd_transfer(c->kue_xfer); KUE_UNLOCK(sc); return; } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ Static void kue_rxeof(xfer, priv, status) usbd_xfer_handle xfer; usbd_private_handle priv; usbd_status status; { struct kue_softc *sc; struct kue_chain *c; struct mbuf *m; struct ifnet *ifp; int total_len = 0; u_int16_t len; c = priv; sc = c->kue_sc; KUE_LOCK(sc); ifp = &sc->arpcom.ac_if; if (!(ifp->if_flags & IFF_RUNNING)) { KUE_UNLOCK(sc); return; } if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { KUE_UNLOCK(sc); return; } printf("kue%d: usb error on rx: %s\n", sc->kue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->kue_ep[KUE_ENDPT_RX]); goto done; } usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL); m = c->kue_mbuf; if (total_len <= 1) goto done; len = *mtod(m, u_int16_t *); m_adj(m, sizeof(u_int16_t)); /* No errors; receive the packet. */ total_len = len; if (len < sizeof(struct ether_header)) { ifp->if_ierrors++; goto done; } ifp->if_ipackets++; m->m_pkthdr.rcvif = (struct ifnet *)&kue_qdat; m->m_pkthdr.len = m->m_len = total_len; /* Put the packet on the special USB input queue. */ usb_ether_input(m); KUE_UNLOCK(sc); return; done: /* Setup new transfer. */ usbd_setup_xfer(c->kue_xfer, sc->kue_ep[KUE_ENDPT_RX], c, mtod(c->kue_mbuf, char *), KUE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, kue_rxeof); usbd_transfer(c->kue_xfer); KUE_UNLOCK(sc); return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ Static void kue_txeof(xfer, priv, status) usbd_xfer_handle xfer; usbd_private_handle priv; usbd_status status; { struct kue_softc *sc; struct kue_chain *c; struct ifnet *ifp; usbd_status err; c = priv; sc = c->kue_sc; KUE_LOCK(sc); ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { KUE_UNLOCK(sc); return; } printf("kue%d: usb error on tx: %s\n", sc->kue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->kue_ep[KUE_ENDPT_TX]); KUE_UNLOCK(sc); return; } usbd_get_xfer_status(c->kue_xfer, NULL, NULL, NULL, &err); if (c->kue_mbuf != NULL) { c->kue_mbuf->m_pkthdr.rcvif = ifp; usb_tx_done(c->kue_mbuf); c->kue_mbuf = NULL; } if (err) ifp->if_oerrors++; else ifp->if_opackets++; KUE_UNLOCK(sc); return; } Static int kue_encap(sc, m, idx) struct kue_softc *sc; struct mbuf *m; int idx; { int total_len; struct kue_chain *c; usbd_status err; c = &sc->kue_cdata.kue_tx_chain[idx]; /* * Copy the mbuf data into a contiguous buffer, leaving two * bytes at the beginning to hold the frame length. */ m_copydata(m, 0, m->m_pkthdr.len, c->kue_buf + 2); c->kue_mbuf = m; total_len = m->m_pkthdr.len + 2; total_len += 64 - (total_len % 64); /* Frame length is specified in the first 2 bytes of the buffer. */ c->kue_buf[0] = (u_int8_t)m->m_pkthdr.len; c->kue_buf[1] = (u_int8_t)(m->m_pkthdr.len >> 8); usbd_setup_xfer(c->kue_xfer, sc->kue_ep[KUE_ENDPT_TX], c, c->kue_buf, total_len, 0, 10000, kue_txeof); /* Transmit */ err = usbd_transfer(c->kue_xfer); if (err != USBD_IN_PROGRESS) { kue_stop(sc); return(EIO); } sc->kue_cdata.kue_tx_cnt++; return(0); } Static void kue_start(ifp) struct ifnet *ifp; { struct kue_softc *sc; struct mbuf *m_head = NULL; sc = ifp->if_softc; KUE_LOCK(sc); if (ifp->if_flags & IFF_OACTIVE) { KUE_UNLOCK(sc); return; } IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) { KUE_UNLOCK(sc); return; } if (kue_encap(sc, m_head, 0)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; KUE_UNLOCK(sc); return; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if (ifp->if_bpf) bpf_mtap(ifp, m_head); ifp->if_flags |= IFF_OACTIVE; /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; KUE_UNLOCK(sc); return; } Static void kue_init(xsc) void *xsc; { struct kue_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; struct kue_chain *c; usbd_status err; int i; KUE_LOCK(sc); if (ifp->if_flags & IFF_RUNNING) { KUE_UNLOCK(sc); return; } /* Set MAC address */ kue_ctl(sc, KUE_CTL_WRITE, KUE_CMD_SET_MAC, 0, sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); sc->kue_rxfilt = KUE_RXFILT_UNICAST|KUE_RXFILT_BROADCAST; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) sc->kue_rxfilt |= KUE_RXFILT_PROMISC; kue_setword(sc, KUE_CMD_SET_PKT_FILTER, sc->kue_rxfilt); /* I'm not sure how to tune these. */ #ifdef notdef /* * Leave this one alone for now; setting it * wrong causes lockups on some machines/controllers. */ kue_setword(sc, KUE_CMD_SET_SOFS, 1); #endif kue_setword(sc, KUE_CMD_SET_URB_SIZE, 64); /* Init TX ring. */ if (kue_tx_list_init(sc) == ENOBUFS) { printf("kue%d: tx list init failed\n", sc->kue_unit); KUE_UNLOCK(sc); return; } /* Init RX ring. */ if (kue_rx_list_init(sc) == ENOBUFS) { printf("kue%d: rx list init failed\n", sc->kue_unit); KUE_UNLOCK(sc); return; } /* Load the multicast filter. */ kue_setmulti(sc); /* Open RX and TX pipes. */ err = usbd_open_pipe(sc->kue_iface, sc->kue_ed[KUE_ENDPT_RX], USBD_EXCLUSIVE_USE, &sc->kue_ep[KUE_ENDPT_RX]); if (err) { printf("kue%d: open rx pipe failed: %s\n", sc->kue_unit, usbd_errstr(err)); KUE_UNLOCK(sc); return; } err = usbd_open_pipe(sc->kue_iface, sc->kue_ed[KUE_ENDPT_TX], USBD_EXCLUSIVE_USE, &sc->kue_ep[KUE_ENDPT_TX]); if (err) { printf("kue%d: open tx pipe failed: %s\n", sc->kue_unit, usbd_errstr(err)); KUE_UNLOCK(sc); return; } /* Start up the receive pipe. */ for (i = 0; i < KUE_RX_LIST_CNT; i++) { c = &sc->kue_cdata.kue_rx_chain[i]; usbd_setup_xfer(c->kue_xfer, sc->kue_ep[KUE_ENDPT_RX], c, mtod(c->kue_mbuf, char *), KUE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, kue_rxeof); usbd_transfer(c->kue_xfer); } ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; KUE_UNLOCK(sc); return; } Static int kue_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct kue_softc *sc = ifp->if_softc; int error = 0; KUE_LOCK(sc); switch(command) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: error = ether_ioctl(ifp, command, data); break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->kue_if_flags & IFF_PROMISC)) { sc->kue_rxfilt |= KUE_RXFILT_PROMISC; kue_setword(sc, KUE_CMD_SET_PKT_FILTER, sc->kue_rxfilt); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->kue_if_flags & IFF_PROMISC) { sc->kue_rxfilt &= ~KUE_RXFILT_PROMISC; kue_setword(sc, KUE_CMD_SET_PKT_FILTER, sc->kue_rxfilt); } else if (!(ifp->if_flags & IFF_RUNNING)) kue_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) kue_stop(sc); } sc->kue_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: kue_setmulti(sc); error = 0; break; default: error = EINVAL; break; } KUE_UNLOCK(sc); return(error); } Static void kue_watchdog(ifp) struct ifnet *ifp; { struct kue_softc *sc; struct kue_chain *c; usbd_status stat; sc = ifp->if_softc; KUE_LOCK(sc); ifp->if_oerrors++; printf("kue%d: watchdog timeout\n", sc->kue_unit); c = &sc->kue_cdata.kue_tx_chain[0]; usbd_get_xfer_status(c->kue_xfer, NULL, NULL, NULL, &stat); kue_txeof(c->kue_xfer, c, stat); if (ifp->if_snd.ifq_head != NULL) kue_start(ifp); KUE_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ Static void kue_stop(sc) struct kue_softc *sc; { usbd_status err; struct ifnet *ifp; int i; KUE_LOCK(sc); ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; /* Stop transfers. */ if (sc->kue_ep[KUE_ENDPT_RX] != NULL) { err = usbd_abort_pipe(sc->kue_ep[KUE_ENDPT_RX]); if (err) { printf("kue%d: abort rx pipe failed: %s\n", sc->kue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->kue_ep[KUE_ENDPT_RX]); if (err) { printf("kue%d: close rx pipe failed: %s\n", sc->kue_unit, usbd_errstr(err)); } sc->kue_ep[KUE_ENDPT_RX] = NULL; } if (sc->kue_ep[KUE_ENDPT_TX] != NULL) { err = usbd_abort_pipe(sc->kue_ep[KUE_ENDPT_TX]); if (err) { printf("kue%d: abort tx pipe failed: %s\n", sc->kue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->kue_ep[KUE_ENDPT_TX]); if (err) { printf("kue%d: close tx pipe failed: %s\n", sc->kue_unit, usbd_errstr(err)); } sc->kue_ep[KUE_ENDPT_TX] = NULL; } if (sc->kue_ep[KUE_ENDPT_INTR] != NULL) { err = usbd_abort_pipe(sc->kue_ep[KUE_ENDPT_INTR]); if (err) { printf("kue%d: abort intr pipe failed: %s\n", sc->kue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->kue_ep[KUE_ENDPT_INTR]); if (err) { printf("kue%d: close intr pipe failed: %s\n", sc->kue_unit, usbd_errstr(err)); } sc->kue_ep[KUE_ENDPT_INTR] = NULL; } /* Free RX resources. */ for (i = 0; i < KUE_RX_LIST_CNT; i++) { if (sc->kue_cdata.kue_rx_chain[i].kue_buf != NULL) { free(sc->kue_cdata.kue_rx_chain[i].kue_buf, M_USBDEV); sc->kue_cdata.kue_rx_chain[i].kue_buf = NULL; } if (sc->kue_cdata.kue_rx_chain[i].kue_mbuf != NULL) { m_freem(sc->kue_cdata.kue_rx_chain[i].kue_mbuf); sc->kue_cdata.kue_rx_chain[i].kue_mbuf = NULL; } if (sc->kue_cdata.kue_rx_chain[i].kue_xfer != NULL) { usbd_free_xfer(sc->kue_cdata.kue_rx_chain[i].kue_xfer); sc->kue_cdata.kue_rx_chain[i].kue_xfer = NULL; } } /* Free TX resources. */ for (i = 0; i < KUE_TX_LIST_CNT; i++) { if (sc->kue_cdata.kue_tx_chain[i].kue_buf != NULL) { free(sc->kue_cdata.kue_tx_chain[i].kue_buf, M_USBDEV); sc->kue_cdata.kue_tx_chain[i].kue_buf = NULL; } if (sc->kue_cdata.kue_tx_chain[i].kue_mbuf != NULL) { m_freem(sc->kue_cdata.kue_tx_chain[i].kue_mbuf); sc->kue_cdata.kue_tx_chain[i].kue_mbuf = NULL; } if (sc->kue_cdata.kue_tx_chain[i].kue_xfer != NULL) { usbd_free_xfer(sc->kue_cdata.kue_tx_chain[i].kue_xfer); sc->kue_cdata.kue_tx_chain[i].kue_xfer = NULL; } } ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); KUE_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ Static void kue_shutdown(dev) device_t dev; { struct kue_softc *sc; sc = device_get_softc(dev); kue_stop(sc); return; } Index: head/sys/dev/vr/if_vr.c =================================================================== --- head/sys/dev/vr/if_vr.c (revision 71961) +++ head/sys/dev/vr/if_vr.c (revision 71962) @@ -1,1649 +1,1648 @@ /* * Copyright (c) 1997, 1998 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * VIA Rhine fast ethernet PCI NIC driver * * Supports various network adapters based on the VIA Rhine * and Rhine II PCI controllers, including the D-Link DFE530TX. * Datasheets are available at http://www.via.com.tw. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The VIA Rhine controllers are similar in some respects to the * the DEC tulip chips, except less complicated. The controller * uses an MII bus and an external physical layer interface. The * receiver has a one entry perfect filter and a 64-bit hash table * multicast filter. Transmit and receive descriptors are similar * to the tulip. * * The Rhine has a serious flaw in its transmit DMA mechanism: * transmit buffers must be longword aligned. Unfortunately, * FreeBSD doesn't guarantee that mbufs will be filled in starting * at longword boundaries, so we have to do a buffer copy before * transmission. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include #include #include #define VR_USEIOSPACE #include MODULE_DEPEND(vr, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif /* * Various supported device vendors/types and their names. */ static struct vr_type vr_devs[] = { { VIA_VENDORID, VIA_DEVICEID_RHINE, "VIA VT3043 Rhine I 10/100BaseTX" }, { VIA_VENDORID, VIA_DEVICEID_RHINE_II, "VIA VT86C100A Rhine II 10/100BaseTX" }, { VIA_VENDORID, VIA_DEVICEID_RHINE_II_2, "VIA VT6102 Rhine II 10/100BaseTX" }, { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II, "Delta Electronics Rhine II 10/100BaseTX" }, { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II, "Addtron Technology Rhine II 10/100BaseTX" }, { 0, 0, NULL } }; static int vr_probe __P((device_t)); static int vr_attach __P((device_t)); static int vr_detach __P((device_t)); static int vr_newbuf __P((struct vr_softc *, struct vr_chain_onefrag *, struct mbuf *)); static int vr_encap __P((struct vr_softc *, struct vr_chain *, struct mbuf * )); static void vr_rxeof __P((struct vr_softc *)); static void vr_rxeoc __P((struct vr_softc *)); static void vr_txeof __P((struct vr_softc *)); static void vr_txeoc __P((struct vr_softc *)); static void vr_tick __P((void *)); static void vr_intr __P((void *)); static void vr_start __P((struct ifnet *)); static int vr_ioctl __P((struct ifnet *, u_long, caddr_t)); static void vr_init __P((void *)); static void vr_stop __P((struct vr_softc *)); static void vr_watchdog __P((struct ifnet *)); static void vr_shutdown __P((device_t)); static int vr_ifmedia_upd __P((struct ifnet *)); static void vr_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); static void vr_mii_sync __P((struct vr_softc *)); static void vr_mii_send __P((struct vr_softc *, u_int32_t, int)); static int vr_mii_readreg __P((struct vr_softc *, struct vr_mii_frame *)); static int vr_mii_writereg __P((struct vr_softc *, struct vr_mii_frame *)); static int vr_miibus_readreg __P((device_t, int, int)); static int vr_miibus_writereg __P((device_t, int, int, int)); static void vr_miibus_statchg __P((device_t)); static void vr_setcfg __P((struct vr_softc *, int)); static u_int8_t vr_calchash __P((u_int8_t *)); static void vr_setmulti __P((struct vr_softc *)); static void vr_reset __P((struct vr_softc *)); static int vr_list_rx_init __P((struct vr_softc *)); static int vr_list_tx_init __P((struct vr_softc *)); #ifdef VR_USEIOSPACE #define VR_RES SYS_RES_IOPORT #define VR_RID VR_PCI_LOIO #else #define VR_RES SYS_RES_MEMORY #define VR_RID VR_PCI_LOMEM #endif static device_method_t vr_methods[] = { /* Device interface */ DEVMETHOD(device_probe, vr_probe), DEVMETHOD(device_attach, vr_attach), DEVMETHOD(device_detach, vr_detach), DEVMETHOD(device_shutdown, vr_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, vr_miibus_readreg), DEVMETHOD(miibus_writereg, vr_miibus_writereg), DEVMETHOD(miibus_statchg, vr_miibus_statchg), { 0, 0 } }; static driver_t vr_driver = { "vr", vr_methods, sizeof(struct vr_softc) }; static devclass_t vr_devclass; DRIVER_MODULE(if_vr, pci, vr_driver, vr_devclass, 0, 0); DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0); #define VR_SETBIT(sc, reg, x) \ CSR_WRITE_1(sc, reg, \ CSR_READ_1(sc, reg) | x) #define VR_CLRBIT(sc, reg, x) \ CSR_WRITE_1(sc, reg, \ CSR_READ_1(sc, reg) & ~x) #define VR_SETBIT16(sc, reg, x) \ CSR_WRITE_2(sc, reg, \ CSR_READ_2(sc, reg) | x) #define VR_CLRBIT16(sc, reg, x) \ CSR_WRITE_2(sc, reg, \ CSR_READ_2(sc, reg) & ~x) #define VR_SETBIT32(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) | x) #define VR_CLRBIT32(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) & ~x) #define SIO_SET(x) \ CSR_WRITE_1(sc, VR_MIICMD, \ CSR_READ_1(sc, VR_MIICMD) | x) #define SIO_CLR(x) \ CSR_WRITE_1(sc, VR_MIICMD, \ CSR_READ_1(sc, VR_MIICMD) & ~x) /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void vr_mii_sync(sc) struct vr_softc *sc; { register int i; SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN); for (i = 0; i < 32; i++) { SIO_SET(VR_MIICMD_CLK); DELAY(1); SIO_CLR(VR_MIICMD_CLK); DELAY(1); } return; } /* * Clock a series of bits through the MII. */ static void vr_mii_send(sc, bits, cnt) struct vr_softc *sc; u_int32_t bits; int cnt; { int i; SIO_CLR(VR_MIICMD_CLK); for (i = (0x1 << (cnt - 1)); i; i >>= 1) { if (bits & i) { SIO_SET(VR_MIICMD_DATAIN); } else { SIO_CLR(VR_MIICMD_DATAIN); } DELAY(1); SIO_CLR(VR_MIICMD_CLK); DELAY(1); SIO_SET(VR_MIICMD_CLK); } } /* * Read an PHY register through the MII. */ static int vr_mii_readreg(sc, frame) struct vr_softc *sc; struct vr_mii_frame *frame; { int i, ack; VR_LOCK(sc); /* * Set up frame for RX. */ frame->mii_stdelim = VR_MII_STARTDELIM; frame->mii_opcode = VR_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; CSR_WRITE_1(sc, VR_MIICMD, 0); VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); /* * Turn on data xmit. */ SIO_SET(VR_MIICMD_DIR); vr_mii_sync(sc); /* * Send command/address info. */ vr_mii_send(sc, frame->mii_stdelim, 2); vr_mii_send(sc, frame->mii_opcode, 2); vr_mii_send(sc, frame->mii_phyaddr, 5); vr_mii_send(sc, frame->mii_regaddr, 5); /* Idle bit */ SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN)); DELAY(1); SIO_SET(VR_MIICMD_CLK); DELAY(1); /* Turn off xmit. */ SIO_CLR(VR_MIICMD_DIR); /* Check for ack */ SIO_CLR(VR_MIICMD_CLK); DELAY(1); SIO_SET(VR_MIICMD_CLK); DELAY(1); ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT; /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ if (ack) { for(i = 0; i < 16; i++) { SIO_CLR(VR_MIICMD_CLK); DELAY(1); SIO_SET(VR_MIICMD_CLK); DELAY(1); } goto fail; } for (i = 0x8000; i; i >>= 1) { SIO_CLR(VR_MIICMD_CLK); DELAY(1); if (!ack) { if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT) frame->mii_data |= i; DELAY(1); } SIO_SET(VR_MIICMD_CLK); DELAY(1); } fail: SIO_CLR(VR_MIICMD_CLK); DELAY(1); SIO_SET(VR_MIICMD_CLK); DELAY(1); VR_UNLOCK(sc); if (ack) return(1); return(0); } /* * Write to a PHY register through the MII. */ static int vr_mii_writereg(sc, frame) struct vr_softc *sc; struct vr_mii_frame *frame; { VR_LOCK(sc); CSR_WRITE_1(sc, VR_MIICMD, 0); VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); /* * Set up frame for TX. */ frame->mii_stdelim = VR_MII_STARTDELIM; frame->mii_opcode = VR_MII_WRITEOP; frame->mii_turnaround = VR_MII_TURNAROUND; /* * Turn on data output. */ SIO_SET(VR_MIICMD_DIR); vr_mii_sync(sc); vr_mii_send(sc, frame->mii_stdelim, 2); vr_mii_send(sc, frame->mii_opcode, 2); vr_mii_send(sc, frame->mii_phyaddr, 5); vr_mii_send(sc, frame->mii_regaddr, 5); vr_mii_send(sc, frame->mii_turnaround, 2); vr_mii_send(sc, frame->mii_data, 16); /* Idle bit. */ SIO_SET(VR_MIICMD_CLK); DELAY(1); SIO_CLR(VR_MIICMD_CLK); DELAY(1); /* * Turn off xmit. */ SIO_CLR(VR_MIICMD_DIR); VR_UNLOCK(sc); return(0); } static int vr_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct vr_softc *sc; struct vr_mii_frame frame; sc = device_get_softc(dev); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; vr_mii_readreg(sc, &frame); return(frame.mii_data); } static int vr_miibus_writereg(dev, phy, reg, data) device_t dev; u_int16_t phy, reg, data; { struct vr_softc *sc; struct vr_mii_frame frame; sc = device_get_softc(dev); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = data; vr_mii_writereg(sc, &frame); return(0); } static void vr_miibus_statchg(dev) device_t dev; { struct vr_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); VR_LOCK(sc); mii = device_get_softc(sc->vr_miibus); vr_setcfg(sc, mii->mii_media_active); VR_UNLOCK(sc); return; } /* * Calculate CRC of a multicast group address, return the lower 6 bits. */ static u_int8_t vr_calchash(addr) u_int8_t *addr; { u_int32_t crc, carry; int i, j; u_int8_t c; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (i = 0; i < 6; i++) { c = *(addr + i); for (j = 0; j < 8; j++) { carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); crc <<= 1; c >>= 1; if (carry) crc = (crc ^ 0x04c11db6) | carry; } } /* return the filter bit position */ return((crc >> 26) & 0x0000003F); } /* * Program the 64-bit multicast hash filter. */ static void vr_setmulti(sc) struct vr_softc *sc; { struct ifnet *ifp; int h = 0; u_int32_t hashes[2] = { 0, 0 }; struct ifmultiaddr *ifma; u_int8_t rxfilt; int mcnt = 0; ifp = &sc->arpcom.ac_if; rxfilt = CSR_READ_1(sc, VR_RXCFG); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { rxfilt |= VR_RXCFG_RX_MULTI; CSR_WRITE_1(sc, VR_RXCFG, rxfilt); CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); return; } /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, VR_MAR0, 0); CSR_WRITE_4(sc, VR_MAR1, 0); /* now program new ones */ - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = vr_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); mcnt++; } if (mcnt) rxfilt |= VR_RXCFG_RX_MULTI; else rxfilt &= ~VR_RXCFG_RX_MULTI; CSR_WRITE_4(sc, VR_MAR0, hashes[0]); CSR_WRITE_4(sc, VR_MAR1, hashes[1]); CSR_WRITE_1(sc, VR_RXCFG, rxfilt); return; } /* * In order to fiddle with the * 'full-duplex' and '100Mbps' bits in the netconfig register, we * first have to put the transmit and/or receive logic in the idle state. */ static void vr_setcfg(sc, media) struct vr_softc *sc; int media; { int restart = 0; if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) { restart = 1; VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON)); } if ((media & IFM_GMASK) == IFM_FDX) VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); else VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); if (restart) VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON); return; } static void vr_reset(sc) struct vr_softc *sc; { register int i; VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET); for (i = 0; i < VR_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET)) break; } if (i == VR_TIMEOUT) printf("vr%d: reset never completed!\n", sc->vr_unit); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); return; } /* * Probe for a VIA Rhine chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int vr_probe(dev) device_t dev; { struct vr_type *t; t = vr_devs; while(t->vr_name != NULL) { if ((pci_get_vendor(dev) == t->vr_vid) && (pci_get_device(dev) == t->vr_did)) { device_set_desc(dev, t->vr_name); return(0); } t++; } return(ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int vr_attach(dev) device_t dev; { int i; u_char eaddr[ETHER_ADDR_LEN]; u_int32_t command; struct vr_softc *sc; struct ifnet *ifp; int unit, error = 0, rid; sc = device_get_softc(dev); unit = device_get_unit(dev); bzero(sc, sizeof(struct vr_softc *)); mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE); VR_LOCK(sc); /* * Handle power management nonsense. */ command = pci_read_config(dev, VR_PCI_CAPID, 4) & 0x000000FF; if (command == 0x01) { command = pci_read_config(dev, VR_PCI_PWRMGMTCTRL, 4); if (command & VR_PSTATE_MASK) { u_int32_t iobase, membase, irq; /* Save important PCI config data. */ iobase = pci_read_config(dev, VR_PCI_LOIO, 4); membase = pci_read_config(dev, VR_PCI_LOMEM, 4); irq = pci_read_config(dev, VR_PCI_INTLINE, 4); /* Reset the power state. */ printf("vr%d: chip is in D%d power mode " "-- setting to D0\n", unit, command & VR_PSTATE_MASK); command &= 0xFFFFFFFC; pci_write_config(dev, VR_PCI_PWRMGMTCTRL, command, 4); /* Restore PCI config data. */ pci_write_config(dev, VR_PCI_LOIO, iobase, 4); pci_write_config(dev, VR_PCI_LOMEM, membase, 4); pci_write_config(dev, VR_PCI_INTLINE, irq, 4); } } /* * Map control/status registers. */ command = pci_read_config(dev, PCIR_COMMAND, 4); command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, command, 4); command = pci_read_config(dev, PCIR_COMMAND, 4); #ifdef VR_USEIOSPACE if (!(command & PCIM_CMD_PORTEN)) { printf("vr%d: failed to enable I/O ports!\n", unit); free(sc, M_DEVBUF); goto fail; } #else if (!(command & PCIM_CMD_MEMEN)) { printf("vr%d: failed to enable memory mapping!\n", unit); goto fail; } #endif rid = VR_RID; sc->vr_res = bus_alloc_resource(dev, VR_RES, &rid, 0, ~0, 1, RF_ACTIVE); if (sc->vr_res == NULL) { printf("vr%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->vr_btag = rman_get_bustag(sc->vr_res); sc->vr_bhandle = rman_get_bushandle(sc->vr_res); /* Allocate interrupt */ rid = 0; sc->vr_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->vr_irq == NULL) { printf("vr%d: couldn't map interrupt\n", unit); bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET, vr_intr, sc, &sc->vr_intrhand); if (error) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res); printf("vr%d: couldn't set up irq\n", unit); goto fail; } /* Reset the adapter. */ vr_reset(sc); /* * Get station address. The way the Rhine chips work, * you're not allowed to directly access the EEPROM once * they've been programmed a special way. Consequently, * we need to read the node address from the PAR0 and PAR1 * registers. */ VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); DELAY(200); for (i = 0; i < ETHER_ADDR_LEN; i++) eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); /* * A Rhine chip was detected. Inform the world. */ printf("vr%d: Ethernet address: %6D\n", unit, eaddr, ":"); sc->vr_unit = unit; bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); sc->vr_ldata = contigmalloc(sizeof(struct vr_list_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->vr_ldata == NULL) { printf("vr%d: no memory for list buffers!\n", unit); bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res); error = ENXIO; goto fail; } bzero(sc->vr_ldata, sizeof(struct vr_list_data)); ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; ifp->if_unit = unit; ifp->if_name = "vr"; ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = vr_ioctl; ifp->if_output = ether_output; ifp->if_start = vr_start; ifp->if_watchdog = vr_watchdog; ifp->if_init = vr_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1; /* * Do MII setup. */ if (mii_phy_probe(dev, &sc->vr_miibus, vr_ifmedia_upd, vr_ifmedia_sts)) { printf("vr%d: MII without any phy!\n", sc->vr_unit); bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res); contigfree(sc->vr_ldata, sizeof(struct vr_list_data), M_DEVBUF); error = ENXIO; goto fail; } callout_handle_init(&sc->vr_stat_ch); /* * Call MI attach routine. */ ether_ifattach(ifp, ETHER_BPF_SUPPORTED); VR_UNLOCK(sc); return(0); fail: VR_UNLOCK(sc); mtx_destroy(&sc->vr_mtx); return(error); } static int vr_detach(dev) device_t dev; { struct vr_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); VR_LOCK(sc); ifp = &sc->arpcom.ac_if; vr_stop(sc); ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); bus_generic_detach(dev); device_delete_child(dev, sc->vr_miibus); bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res); contigfree(sc->vr_ldata, sizeof(struct vr_list_data), M_DEVBUF); VR_UNLOCK(sc); mtx_destroy(&sc->vr_mtx); return(0); } /* * Initialize the transmit descriptors. */ static int vr_list_tx_init(sc) struct vr_softc *sc; { struct vr_chain_data *cd; struct vr_list_data *ld; int i; cd = &sc->vr_cdata; ld = sc->vr_ldata; for (i = 0; i < VR_TX_LIST_CNT; i++) { cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i]; if (i == (VR_TX_LIST_CNT - 1)) cd->vr_tx_chain[i].vr_nextdesc = &cd->vr_tx_chain[0]; else cd->vr_tx_chain[i].vr_nextdesc = &cd->vr_tx_chain[i + 1]; } cd->vr_tx_free = &cd->vr_tx_chain[0]; cd->vr_tx_tail = cd->vr_tx_head = NULL; return(0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int vr_list_rx_init(sc) struct vr_softc *sc; { struct vr_chain_data *cd; struct vr_list_data *ld; int i; cd = &sc->vr_cdata; ld = sc->vr_ldata; for (i = 0; i < VR_RX_LIST_CNT; i++) { cd->vr_rx_chain[i].vr_ptr = (struct vr_desc *)&ld->vr_rx_list[i]; if (vr_newbuf(sc, &cd->vr_rx_chain[i], NULL) == ENOBUFS) return(ENOBUFS); if (i == (VR_RX_LIST_CNT - 1)) { cd->vr_rx_chain[i].vr_nextdesc = &cd->vr_rx_chain[0]; ld->vr_rx_list[i].vr_next = vtophys(&ld->vr_rx_list[0]); } else { cd->vr_rx_chain[i].vr_nextdesc = &cd->vr_rx_chain[i + 1]; ld->vr_rx_list[i].vr_next = vtophys(&ld->vr_rx_list[i + 1]); } } cd->vr_rx_head = &cd->vr_rx_chain[0]; return(0); } /* * Initialize an RX descriptor and attach an MBUF cluster. * Note: the length fields are only 11 bits wide, which means the * largest size we can specify is 2047. This is important because * MCLBYTES is 2048, so we have to subtract one otherwise we'll * overflow the field and make a mess. */ static int vr_newbuf(sc, c, m) struct vr_softc *sc; struct vr_chain_onefrag *c; struct mbuf *m; { struct mbuf *m_new = NULL; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("vr%d: no memory for rx list " "-- packet dropped!\n", sc->vr_unit); return(ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("vr%d: no memory for rx list " "-- packet dropped!\n", sc->vr_unit); m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, sizeof(u_int64_t)); c->vr_mbuf = m_new; c->vr_ptr->vr_status = VR_RXSTAT; c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t)); c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN; return(0); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static void vr_rxeof(sc) struct vr_softc *sc; { struct ether_header *eh; struct mbuf *m; struct ifnet *ifp; struct vr_chain_onefrag *cur_rx; int total_len = 0; u_int32_t rxstat; ifp = &sc->arpcom.ac_if; while(!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) & VR_RXSTAT_OWN)) { struct mbuf *m0 = NULL; cur_rx = sc->vr_cdata.vr_rx_head; sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc; m = cur_rx->vr_mbuf; /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (rxstat & VR_RXSTAT_RXERR) { ifp->if_ierrors++; printf("vr%d: rx error: ", sc->vr_unit); switch(rxstat & 0x000000FF) { case VR_RXSTAT_CRCERR: printf("crc error\n"); break; case VR_RXSTAT_FRAMEALIGNERR: printf("frame alignment error\n"); break; case VR_RXSTAT_FIFOOFLOW: printf("FIFO overflow\n"); break; case VR_RXSTAT_GIANT: printf("received giant packet\n"); break; case VR_RXSTAT_RUNT: printf("received runt packet\n"); break; case VR_RXSTAT_BUSERR: printf("system bus error\n"); break; case VR_RXSTAT_BUFFERR: printf("rx buffer error\n"); break; default: printf("unknown rx error\n"); break; } vr_newbuf(sc, cur_rx, m); continue; } /* No errors; receive the packet. */ total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status); /* * XXX The VIA Rhine chip includes the CRC with every * received frame, and there's no way to turn this * behavior off (at least, I can't find anything in * the manual that explains how to do it) so we have * to trim off the CRC manually. */ total_len -= ETHER_CRC_LEN; m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, total_len + ETHER_ALIGN, 0, ifp, NULL); vr_newbuf(sc, cur_rx, m); if (m0 == NULL) { ifp->if_ierrors++; continue; } m_adj(m0, ETHER_ALIGN); m = m0; ifp->if_ipackets++; eh = mtod(m, struct ether_header *); /* Remove header from mbuf and pass it on. */ m_adj(m, sizeof(struct ether_header)); ether_input(ifp, eh, m); } return; } void vr_rxeoc(sc) struct vr_softc *sc; { vr_rxeof(sc); VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr)); VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO); return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void vr_txeof(sc) struct vr_softc *sc; { struct vr_chain *cur_tx; struct ifnet *ifp; ifp = &sc->arpcom.ac_if; /* Clear the timeout timer. */ ifp->if_timer = 0; /* Sanity check. */ if (sc->vr_cdata.vr_tx_head == NULL) return; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ while(sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) { u_int32_t txstat; cur_tx = sc->vr_cdata.vr_tx_head; txstat = cur_tx->vr_ptr->vr_status; if (txstat & VR_TXSTAT_OWN) break; if (txstat & VR_TXSTAT_ERRSUM) { ifp->if_oerrors++; if (txstat & VR_TXSTAT_DEFER) ifp->if_collisions++; if (txstat & VR_TXSTAT_LATECOLL) ifp->if_collisions++; } ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3; ifp->if_opackets++; if (cur_tx->vr_mbuf != NULL) { m_freem(cur_tx->vr_mbuf); cur_tx->vr_mbuf = NULL; } if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) { sc->vr_cdata.vr_tx_head = NULL; sc->vr_cdata.vr_tx_tail = NULL; break; } sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc; } return; } /* * TX 'end of channel' interrupt handler. */ static void vr_txeoc(sc) struct vr_softc *sc; { struct ifnet *ifp; ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; if (sc->vr_cdata.vr_tx_head == NULL) { ifp->if_flags &= ~IFF_OACTIVE; sc->vr_cdata.vr_tx_tail = NULL; } return; } static void vr_tick(xsc) void *xsc; { struct vr_softc *sc; struct mii_data *mii; sc = xsc; VR_LOCK(sc); mii = device_get_softc(sc->vr_miibus); mii_tick(mii); sc->vr_stat_ch = timeout(vr_tick, sc, hz); VR_UNLOCK(sc); return; } static void vr_intr(arg) void *arg; { struct vr_softc *sc; struct ifnet *ifp; u_int16_t status; sc = arg; VR_LOCK(sc); ifp = &sc->arpcom.ac_if; /* Supress unwanted interrupts. */ if (!(ifp->if_flags & IFF_UP)) { vr_stop(sc); VR_UNLOCK(sc); return; } /* Disable interrupts. */ CSR_WRITE_2(sc, VR_IMR, 0x0000); for (;;) { status = CSR_READ_2(sc, VR_ISR); if (status) CSR_WRITE_2(sc, VR_ISR, status); if ((status & VR_INTRS) == 0) break; if (status & VR_ISR_RX_OK) vr_rxeof(sc); if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW) || (status & VR_ISR_RX_DROPPED)) { vr_rxeof(sc); vr_rxeoc(sc); } if (status & VR_ISR_TX_OK) { vr_txeof(sc); vr_txeoc(sc); } if ((status & VR_ISR_TX_UNDERRUN)||(status & VR_ISR_TX_ABRT)){ ifp->if_oerrors++; vr_txeof(sc); if (sc->vr_cdata.vr_tx_head != NULL) { VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON); VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO); } } if (status & VR_ISR_BUSERR) { vr_reset(sc); vr_init(sc); } } /* Re-enable interrupts. */ CSR_WRITE_2(sc, VR_IMR, VR_INTRS); if (ifp->if_snd.ifq_head != NULL) { vr_start(ifp); } VR_UNLOCK(sc); return; } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int vr_encap(sc, c, m_head) struct vr_softc *sc; struct vr_chain *c; struct mbuf *m_head; { int frag = 0; struct vr_desc *f = NULL; int total_len; struct mbuf *m; m = m_head; total_len = 0; /* * The VIA Rhine wants packet buffers to be longword * aligned, but very often our mbufs aren't. Rather than * waste time trying to decide when to copy and when not * to copy, just do it all the time. */ if (m != NULL) { struct mbuf *m_new = NULL; MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("vr%d: no memory for tx list\n", sc->vr_unit); return(1); } if (m_head->m_pkthdr.len > MHLEN) { MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); printf("vr%d: no memory for tx list\n", sc->vr_unit); return(1); } } m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t)); m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; m_freem(m_head); m_head = m_new; /* * The Rhine chip doesn't auto-pad, so we have to make * sure to pad short frames out to the minimum frame length * ourselves. */ if (m_head->m_len < VR_MIN_FRAMELEN) { m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len; m_new->m_len = m_new->m_pkthdr.len; } f = c->vr_ptr; f->vr_data = vtophys(mtod(m_new, caddr_t)); f->vr_ctl = total_len = m_new->m_len; f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG; f->vr_status = 0; frag = 1; } c->vr_mbuf = m_head; c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT; c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr); return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void vr_start(ifp) struct ifnet *ifp; { struct vr_softc *sc; struct mbuf *m_head = NULL; struct vr_chain *cur_tx = NULL, *start_tx; sc = ifp->if_softc; VR_LOCK(sc); if (ifp->if_flags & IFF_OACTIVE) { VR_UNLOCK(sc); return; } /* * Check for an available queue slot. If there are none, * punt. */ if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) { ifp->if_flags |= IFF_OACTIVE; return; } start_tx = sc->vr_cdata.vr_tx_free; while(sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* Pick a descriptor off the free list. */ cur_tx = sc->vr_cdata.vr_tx_free; sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc; /* Pack the data into the descriptor. */ if (vr_encap(sc, cur_tx, m_head)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; cur_tx = NULL; break; } if (cur_tx != start_tx) VR_TXOWN(cur_tx) = VR_TXSTAT_OWN; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if (ifp->if_bpf) bpf_mtap(ifp, cur_tx->vr_mbuf); VR_TXOWN(cur_tx) = VR_TXSTAT_OWN; VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO); } /* * If there are no frames queued, bail. */ if (cur_tx == NULL) { VR_UNLOCK(sc); return; } sc->vr_cdata.vr_tx_tail = cur_tx; if (sc->vr_cdata.vr_tx_head == NULL) sc->vr_cdata.vr_tx_head = start_tx; /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; VR_UNLOCK(sc); return; } static void vr_init(xsc) void *xsc; { struct vr_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; struct mii_data *mii; VR_LOCK(sc); mii = device_get_softc(sc->vr_miibus); /* * Cancel pending I/O and free all RX/TX buffers. */ vr_stop(sc); vr_reset(sc); VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD); VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD); /* Init circular RX list. */ if (vr_list_rx_init(sc) == ENOBUFS) { printf("vr%d: initialization failed: no " "memory for rx buffers\n", sc->vr_unit); vr_stop(sc); VR_UNLOCK(sc); return; } /* * Init tx descriptors. */ vr_list_tx_init(sc); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); else VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); /* Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); else VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); /* * Program the multicast filter, if necessary. */ vr_setmulti(sc); /* * Load the address of the RX list. */ CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr)); /* Enable receiver and transmitter. */ CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START| VR_CMD_TX_ON|VR_CMD_RX_ON| VR_CMD_RX_GO); CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0])); /* * Enable interrupts. */ CSR_WRITE_2(sc, VR_ISR, 0xFFFF); CSR_WRITE_2(sc, VR_IMR, VR_INTRS); mii_mediachg(mii); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->vr_stat_ch = timeout(vr_tick, sc, hz); VR_UNLOCK(sc); return; } /* * Set media options. */ static int vr_ifmedia_upd(ifp) struct ifnet *ifp; { struct vr_softc *sc; sc = ifp->if_softc; if (ifp->if_flags & IFF_UP) vr_init(sc); return(0); } /* * Report current media status. */ static void vr_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct vr_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->vr_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static int vr_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct vr_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; VR_LOCK(sc); switch(command) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: error = ether_ioctl(ifp, command, data); break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { vr_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) vr_stop(sc); } error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: vr_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->vr_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = EINVAL; break; } VR_UNLOCK(sc); return(error); } static void vr_watchdog(ifp) struct ifnet *ifp; { struct vr_softc *sc; sc = ifp->if_softc; VR_LOCK(sc); ifp->if_oerrors++; printf("vr%d: watchdog timeout\n", sc->vr_unit); vr_stop(sc); vr_reset(sc); vr_init(sc); if (ifp->if_snd.ifq_head != NULL) vr_start(ifp); VR_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void vr_stop(sc) struct vr_softc *sc; { register int i; struct ifnet *ifp; VR_LOCK(sc); ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; untimeout(vr_tick, sc, sc->vr_stat_ch); VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP); VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON)); CSR_WRITE_2(sc, VR_IMR, 0x0000); CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); /* * Free data in the RX lists. */ for (i = 0; i < VR_RX_LIST_CNT; i++) { if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) { m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf); sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL; } } bzero((char *)&sc->vr_ldata->vr_rx_list, sizeof(sc->vr_ldata->vr_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < VR_TX_LIST_CNT; i++) { if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) { m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf); sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL; } } bzero((char *)&sc->vr_ldata->vr_tx_list, sizeof(sc->vr_ldata->vr_tx_list)); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); VR_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void vr_shutdown(dev) device_t dev; { struct vr_softc *sc; sc = device_get_softc(dev); vr_stop(sc); return; } Index: head/sys/dev/wi/if_wi.c =================================================================== --- head/sys/dev/wi/if_wi.c (revision 71961) +++ head/sys/dev/wi/if_wi.c (revision 71962) @@ -1,1799 +1,1798 @@ /* * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Lucent WaveLAN/IEEE 802.11 PCMCIA driver for FreeBSD. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The WaveLAN/IEEE adapter is the second generation of the WaveLAN * from Lucent. Unlike the older cards, the new ones are programmed * entirely via a firmware-driven controller called the Hermes. * Unfortunately, Lucent will not release the Hermes programming manual * without an NDA (if at all). What they do release is an API library * called the HCF (Hardware Control Functions) which is supposed to * do the device-specific operations of a device driver for you. The * publically available version of the HCF library (the 'HCF Light') is * a) extremely gross, b) lacks certain features, particularly support * for 802.11 frames, and c) is contaminated by the GNU Public License. * * This driver does not use the HCF or HCF Light at all. Instead, it * programs the Hermes controller directly, using information gleaned * from the HCF Light code and corresponding documentation. * * This driver supports both the PCMCIA and ISA versions of the * WaveLAN/IEEE cards. Note however that the ISA card isn't really * anything of the sort: it's actually a PCMCIA bridge adapter * that fits into an ISA slot, into which a PCMCIA WaveLAN card is * inserted. Consequently, you need to use the pccard support for * both the ISA and PCMCIA adapters. */ #define WI_HERMES_AUTOINC_WAR /* Work around data write autoinc bug. */ #define WI_HERMES_STATS_WAR /* Work around stats counter bug. */ #define WICACHE /* turn on signal strength cache code */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "card_if.h" #if !defined(lint) static const char rcsid[] = "$FreeBSD$"; #endif #ifdef foo static u_int8_t wi_mcast_addr[6] = { 0x01, 0x60, 0x1D, 0x00, 0x01, 0x00 }; #endif static void wi_intr __P((void *)); static void wi_reset __P((struct wi_softc *)); static int wi_ioctl __P((struct ifnet *, u_long, caddr_t)); static void wi_init __P((void *)); static void wi_start __P((struct ifnet *)); static void wi_stop __P((struct wi_softc *)); static void wi_watchdog __P((struct ifnet *)); static void wi_rxeof __P((struct wi_softc *)); static void wi_txeof __P((struct wi_softc *, int)); static void wi_update_stats __P((struct wi_softc *)); static void wi_setmulti __P((struct wi_softc *)); static int wi_cmd __P((struct wi_softc *, int, int)); static int wi_read_record __P((struct wi_softc *, struct wi_ltv_gen *)); static int wi_write_record __P((struct wi_softc *, struct wi_ltv_gen *)); static int wi_read_data __P((struct wi_softc *, int, int, caddr_t, int)); static int wi_write_data __P((struct wi_softc *, int, int, caddr_t, int)); static int wi_seek __P((struct wi_softc *, int, int, int)); static int wi_alloc_nicmem __P((struct wi_softc *, int, int *)); static void wi_inquire __P((void *)); static void wi_setdef __P((struct wi_softc *, struct wi_req *)); static int wi_mgmt_xmit __P((struct wi_softc *, caddr_t, int)); #ifdef WICACHE static void wi_cache_store __P((struct wi_softc *, struct ether_header *, struct mbuf *, unsigned short)); #endif static int wi_pccard_match __P((device_t)); static int wi_pccard_probe __P((device_t)); static int wi_pccard_attach __P((device_t)); static int wi_pccard_detach __P((device_t)); static void wi_shutdown __P((device_t)); static int wi_alloc __P((device_t)); static void wi_free __P((device_t)); static device_method_t wi_pccard_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pccard_compat_probe), DEVMETHOD(device_attach, pccard_compat_attach), DEVMETHOD(device_detach, wi_pccard_detach), DEVMETHOD(device_shutdown, wi_shutdown), /* Card interface */ DEVMETHOD(card_compat_match, wi_pccard_match), DEVMETHOD(card_compat_probe, wi_pccard_probe), DEVMETHOD(card_compat_attach, wi_pccard_attach), { 0, 0 } }; static driver_t wi_pccard_driver = { "wi", wi_pccard_methods, sizeof(struct wi_softc) }; static devclass_t wi_pccard_devclass; DRIVER_MODULE(if_wi, pccard, wi_pccard_driver, wi_pccard_devclass, 0, 0); static const struct pccard_product wi_pccard_products[] = { { PCCARD_STR_LUCENT_WAVELAN_IEEE, PCCARD_VENDOR_LUCENT, PCCARD_PRODUCT_LUCENT_WAVELAN_IEEE, 0, PCCARD_CIS_LUCENT_WAVELAN_IEEE }, }; static int wi_pccard_match(dev) device_t dev; { const struct pccard_product *pp; if ((pp = pccard_product_lookup(dev, wi_pccard_products, sizeof(wi_pccard_products[0]), NULL)) != NULL) { device_set_desc(dev, pp->pp_name); return 0; } return ENXIO; } static int wi_pccard_probe(dev) device_t dev; { struct wi_softc *sc; int error; sc = device_get_softc(dev); sc->wi_gone = 0; error = wi_alloc(dev); if (error) return (error); device_set_desc(dev, "WaveLAN/IEEE 802.11"); wi_free(dev); /* Make sure interrupts are disabled. */ CSR_WRITE_2(sc, WI_INT_EN, 0); CSR_WRITE_2(sc, WI_EVENT_ACK, 0xFFFF); return (0); } static int wi_pccard_detach(dev) device_t dev; { struct wi_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); WI_LOCK(sc); ifp = &sc->arpcom.ac_if; if (sc->wi_gone) { device_printf(dev, "already unloaded\n"); WI_UNLOCK(sc); return(ENODEV); } wi_stop(sc); ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); bus_teardown_intr(dev, sc->irq, sc->wi_intrhand); wi_free(dev); sc->wi_gone = 1; device_printf(dev, "unload\n"); WI_UNLOCK(sc); mtx_destroy(&sc->wi_mtx); return(0); } static int wi_pccard_attach(device_t dev) { struct wi_softc *sc; struct wi_ltv_macaddr mac; struct wi_ltv_gen gen; struct ifnet *ifp; int error; u_int32_t flags; sc = device_get_softc(dev); ifp = &sc->arpcom.ac_if; /* * XXX: quick hack to support Prism II chip. * Currently, we need to set a flags in pccard.conf to specify * which type chip is used. * * We need to replace this code in a future. * It is better to use CIS than using a flag. */ flags = device_get_flags(dev); #define WI_FLAGS_PRISM2 0x10000 if (flags & WI_FLAGS_PRISM2) { sc->wi_prism2 = 1; if (bootverbose) { device_printf(dev, "found PrismII chip\n"); } } else { sc->wi_prism2 = 0; if (bootverbose) { device_printf(dev, "found Lucent chip\n"); } } error = wi_alloc(dev); if (error) { device_printf(dev, "wi_alloc() failed! (%d)\n", error); return (error); } error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET, wi_intr, sc, &sc->wi_intrhand); if (error) { device_printf(dev, "bus_setup_intr() failed! (%d)\n", error); wi_free(dev); return (error); } mtx_init(&sc->wi_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE); WI_LOCK(sc); /* Reset the NIC. */ wi_reset(sc); /* Read the station address. */ mac.wi_type = WI_RID_MAC_NODE; mac.wi_len = 4; wi_read_record(sc, (struct wi_ltv_gen *)&mac); bcopy((char *)&mac.wi_mac_addr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); device_printf(dev, "Ethernet address: %6D\n", sc->arpcom.ac_enaddr, ":"); ifp->if_softc = sc; ifp->if_unit = sc->wi_unit; ifp->if_name = "wi"; ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = wi_ioctl; ifp->if_output = ether_output; ifp->if_start = wi_start; ifp->if_watchdog = wi_watchdog; ifp->if_init = wi_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; bzero(sc->wi_node_name, sizeof(sc->wi_node_name)); bcopy(WI_DEFAULT_NODENAME, sc->wi_node_name, sizeof(WI_DEFAULT_NODENAME) - 1); bzero(sc->wi_net_name, sizeof(sc->wi_net_name)); bcopy(WI_DEFAULT_NETNAME, sc->wi_net_name, sizeof(WI_DEFAULT_NETNAME) - 1); bzero(sc->wi_ibss_name, sizeof(sc->wi_ibss_name)); bcopy(WI_DEFAULT_IBSS, sc->wi_ibss_name, sizeof(WI_DEFAULT_IBSS) - 1); sc->wi_portnum = WI_DEFAULT_PORT; sc->wi_ptype = WI_PORTTYPE_ADHOC; sc->wi_ap_density = WI_DEFAULT_AP_DENSITY; sc->wi_rts_thresh = WI_DEFAULT_RTS_THRESH; sc->wi_tx_rate = WI_DEFAULT_TX_RATE; sc->wi_max_data_len = WI_DEFAULT_DATALEN; sc->wi_create_ibss = WI_DEFAULT_CREATE_IBSS; sc->wi_pm_enabled = WI_DEFAULT_PM_ENABLED; sc->wi_max_sleep = WI_DEFAULT_MAX_SLEEP; /* * Read the default channel from the NIC. This may vary * depending on the country where the NIC was purchased, so * we can't hard-code a default and expect it to work for * everyone. */ gen.wi_type = WI_RID_OWN_CHNL; gen.wi_len = 2; wi_read_record(sc, &gen); sc->wi_channel = gen.wi_val; /* * Find out if we support WEP on this card. */ gen.wi_type = WI_RID_WEP_AVAIL; gen.wi_len = 2; wi_read_record(sc, &gen); sc->wi_has_wep = gen.wi_val; if (bootverbose) { device_printf(sc->dev, __FUNCTION__ ":wi_has_wep = %d\n", sc->wi_has_wep); } bzero((char *)&sc->wi_stats, sizeof(sc->wi_stats)); wi_init(sc); wi_stop(sc); /* * Call MI attach routine. */ ether_ifattach(ifp, ETHER_BPF_SUPPORTED); callout_handle_init(&sc->wi_stat_ch); WI_UNLOCK(sc); return(0); } static void wi_rxeof(sc) struct wi_softc *sc; { struct ifnet *ifp; struct ether_header *eh; struct wi_frame rx_frame; struct mbuf *m; int id; ifp = &sc->arpcom.ac_if; id = CSR_READ_2(sc, WI_RX_FID); /* First read in the frame header */ if (wi_read_data(sc, id, 0, (caddr_t)&rx_frame, sizeof(rx_frame))) { ifp->if_ierrors++; return; } if (rx_frame.wi_status & WI_STAT_ERRSTAT) { ifp->if_ierrors++; return; } MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { ifp->if_ierrors++; return; } MCLGET(m, M_DONTWAIT); if (!(m->m_flags & M_EXT)) { m_freem(m); ifp->if_ierrors++; return; } eh = mtod(m, struct ether_header *); m->m_pkthdr.rcvif = ifp; if (rx_frame.wi_status == WI_STAT_1042 || rx_frame.wi_status == WI_STAT_TUNNEL || rx_frame.wi_status == WI_STAT_WMP_MSG) { if((rx_frame.wi_dat_len + WI_SNAPHDR_LEN) > MCLBYTES) { device_printf(sc->dev, "oversized packet received " "(wi_dat_len=%d, wi_status=0x%x)\n", rx_frame.wi_dat_len, rx_frame.wi_status); m_freem(m); ifp->if_ierrors++; return; } m->m_pkthdr.len = m->m_len = rx_frame.wi_dat_len + WI_SNAPHDR_LEN; bcopy((char *)&rx_frame.wi_addr1, (char *)&eh->ether_dhost, ETHER_ADDR_LEN); if (sc->wi_ptype == WI_PORTTYPE_ADHOC) { bcopy((char *)&rx_frame.wi_addr2, (char *)&eh->ether_shost, ETHER_ADDR_LEN); } else { bcopy((char *)&rx_frame.wi_addr3, (char *)&eh->ether_shost, ETHER_ADDR_LEN); } bcopy((char *)&rx_frame.wi_type, (char *)&eh->ether_type, sizeof(u_int16_t)); if (wi_read_data(sc, id, WI_802_11_OFFSET, mtod(m, caddr_t) + sizeof(struct ether_header), m->m_len + 2)) { m_freem(m); ifp->if_ierrors++; return; } } else { if((rx_frame.wi_dat_len + sizeof(struct ether_header)) > MCLBYTES) { device_printf(sc->dev, "oversized packet received " "(wi_dat_len=%d, wi_status=0x%x)\n", rx_frame.wi_dat_len, rx_frame.wi_status); m_freem(m); ifp->if_ierrors++; return; } m->m_pkthdr.len = m->m_len = rx_frame.wi_dat_len + sizeof(struct ether_header); if (wi_read_data(sc, id, WI_802_3_OFFSET, mtod(m, caddr_t), m->m_len + 2)) { m_freem(m); ifp->if_ierrors++; return; } } ifp->if_ipackets++; /* Receive packet. */ m_adj(m, sizeof(struct ether_header)); #ifdef WICACHE wi_cache_store(sc, eh, m, rx_frame.wi_q_info); #endif ether_input(ifp, eh, m); } static void wi_txeof(sc, status) struct wi_softc *sc; int status; { struct ifnet *ifp; ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; if (status & WI_EV_TX_EXC) ifp->if_oerrors++; else ifp->if_opackets++; return; } void wi_inquire(xsc) void *xsc; { struct wi_softc *sc; struct ifnet *ifp; sc = xsc; ifp = &sc->arpcom.ac_if; sc->wi_stat_ch = timeout(wi_inquire, sc, hz * 60); /* Don't do this while we're transmitting */ if (ifp->if_flags & IFF_OACTIVE) return; wi_cmd(sc, WI_CMD_INQUIRE, WI_INFO_COUNTERS); return; } void wi_update_stats(sc) struct wi_softc *sc; { struct wi_ltv_gen gen; u_int16_t id; struct ifnet *ifp; u_int32_t *ptr; int i; u_int16_t t; ifp = &sc->arpcom.ac_if; id = CSR_READ_2(sc, WI_INFO_FID); wi_read_data(sc, id, 0, (char *)&gen, 4); if (gen.wi_type != WI_INFO_COUNTERS || gen.wi_len > (sizeof(sc->wi_stats) / 4) + 1) return; ptr = (u_int32_t *)&sc->wi_stats; for (i = 0; i < gen.wi_len - 1; i++) { t = CSR_READ_2(sc, WI_DATA1); #ifdef WI_HERMES_STATS_WAR if (t > 0xF000) t = ~t & 0xFFFF; #endif ptr[i] += t; } ifp->if_collisions = sc->wi_stats.wi_tx_single_retries + sc->wi_stats.wi_tx_multi_retries + sc->wi_stats.wi_tx_retry_limit; return; } static void wi_intr(xsc) void *xsc; { struct wi_softc *sc = xsc; struct ifnet *ifp; u_int16_t status; WI_LOCK(sc); ifp = &sc->arpcom.ac_if; if (!(ifp->if_flags & IFF_UP)) { CSR_WRITE_2(sc, WI_EVENT_ACK, 0xFFFF); CSR_WRITE_2(sc, WI_INT_EN, 0); WI_UNLOCK(sc); return; } /* Disable interrupts. */ CSR_WRITE_2(sc, WI_INT_EN, 0); status = CSR_READ_2(sc, WI_EVENT_STAT); CSR_WRITE_2(sc, WI_EVENT_ACK, ~WI_INTRS); if (status & WI_EV_RX) { wi_rxeof(sc); CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_RX); } if (status & WI_EV_TX) { wi_txeof(sc, status); CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_TX); } if (status & WI_EV_ALLOC) { int id; id = CSR_READ_2(sc, WI_ALLOC_FID); CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_ALLOC); if (id == sc->wi_tx_data_id) wi_txeof(sc, status); } if (status & WI_EV_INFO) { wi_update_stats(sc); CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_INFO); } if (status & WI_EV_TX_EXC) { wi_txeof(sc, status); CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_TX_EXC); } if (status & WI_EV_INFO_DROP) { CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_INFO_DROP); } /* Re-enable interrupts. */ CSR_WRITE_2(sc, WI_INT_EN, WI_INTRS); if (ifp->if_snd.ifq_head != NULL) wi_start(ifp); WI_UNLOCK(sc); return; } static int wi_cmd(sc, cmd, val) struct wi_softc *sc; int cmd; int val; { int i, s = 0; /* wait for the busy bit to clear */ for (i = 0; i < WI_TIMEOUT; i++) { if (!(CSR_READ_2(sc, WI_COMMAND) & WI_CMD_BUSY)) { break; } DELAY(10*1000); /* 10 m sec */ } if (i == WI_TIMEOUT) { return(ETIMEDOUT); } CSR_WRITE_2(sc, WI_PARAM0, val); CSR_WRITE_2(sc, WI_PARAM1, 0); CSR_WRITE_2(sc, WI_PARAM2, 0); CSR_WRITE_2(sc, WI_COMMAND, cmd); for (i = 0; i < WI_TIMEOUT; i++) { /* * Wait for 'command complete' bit to be * set in the event status register. */ s = CSR_READ_2(sc, WI_EVENT_STAT) & WI_EV_CMD; if (s) { /* Ack the event and read result code. */ s = CSR_READ_2(sc, WI_STATUS); CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_CMD); #ifdef foo if ((s & WI_CMD_CODE_MASK) != (cmd & WI_CMD_CODE_MASK)) return(EIO); #endif if (s & WI_STAT_CMD_RESULT) return(EIO); break; } } if (i == WI_TIMEOUT) return(ETIMEDOUT); return(0); } static void wi_reset(sc) struct wi_softc *sc; { #ifdef foo wi_cmd(sc, WI_CMD_INI, 0); DELAY(100000); wi_cmd(sc, WI_CMD_INI, 0); #endif DELAY(100000); if (wi_cmd(sc, WI_CMD_INI, 0)) device_printf(sc->dev, "init failed\n"); CSR_WRITE_2(sc, WI_INT_EN, 0); CSR_WRITE_2(sc, WI_EVENT_ACK, 0xFFFF); /* Calibrate timer. */ WI_SETVAL(WI_RID_TICK_TIME, 8); return; } /* * Read an LTV record from the NIC. */ static int wi_read_record(sc, ltv) struct wi_softc *sc; struct wi_ltv_gen *ltv; { u_int16_t *ptr; int i, len, code; struct wi_ltv_gen *oltv, p2ltv; oltv = ltv; if (sc->wi_prism2) { switch (ltv->wi_type) { case WI_RID_ENCRYPTION: p2ltv.wi_type = WI_RID_P2_ENCRYPTION; p2ltv.wi_len = 2; ltv = &p2ltv; break; case WI_RID_TX_CRYPT_KEY: p2ltv.wi_type = WI_RID_P2_TX_CRYPT_KEY; p2ltv.wi_len = 2; ltv = &p2ltv; break; } } /* Tell the NIC to enter record read mode. */ if (wi_cmd(sc, WI_CMD_ACCESS|WI_ACCESS_READ, ltv->wi_type)) return(EIO); /* Seek to the record. */ if (wi_seek(sc, ltv->wi_type, 0, WI_BAP1)) return(EIO); /* * Read the length and record type and make sure they * match what we expect (this verifies that we have enough * room to hold all of the returned data). */ len = CSR_READ_2(sc, WI_DATA1); if (len > ltv->wi_len) return(ENOSPC); code = CSR_READ_2(sc, WI_DATA1); if (code != ltv->wi_type) return(EIO); ltv->wi_len = len; ltv->wi_type = code; /* Now read the data. */ ptr = <v->wi_val; for (i = 0; i < ltv->wi_len - 1; i++) ptr[i] = CSR_READ_2(sc, WI_DATA1); if (sc->wi_prism2) { switch (oltv->wi_type) { case WI_RID_TX_RATE: case WI_RID_CUR_TX_RATE: switch (ltv->wi_val) { case 1: oltv->wi_val = 1; break; case 2: oltv->wi_val = 2; break; case 3: oltv->wi_val = 6; break; case 4: oltv->wi_val = 5; break; case 7: oltv->wi_val = 7; break; case 8: oltv->wi_val = 11; break; case 15: oltv->wi_val = 3; break; default: oltv->wi_val = 0x100 + ltv->wi_val; break; } break; case WI_RID_ENCRYPTION: oltv->wi_len = 2; if (ltv->wi_val & 0x01) oltv->wi_val = 1; else oltv->wi_val = 0; break; case WI_RID_TX_CRYPT_KEY: oltv->wi_len = 2; oltv->wi_val = ltv->wi_val; break; } } return(0); } /* * Same as read, except we inject data instead of reading it. */ static int wi_write_record(sc, ltv) struct wi_softc *sc; struct wi_ltv_gen *ltv; { u_int16_t *ptr; int i; struct wi_ltv_gen p2ltv; if (sc->wi_prism2) { switch (ltv->wi_type) { case WI_RID_TX_RATE: p2ltv.wi_type = WI_RID_TX_RATE; p2ltv.wi_len = 2; switch (ltv->wi_val) { case 1: p2ltv.wi_val = 1; break; case 2: p2ltv.wi_val = 2; break; case 3: p2ltv.wi_val = 15; break; case 5: p2ltv.wi_val = 4; break; case 6: p2ltv.wi_val = 3; break; case 7: p2ltv.wi_val = 7; break; case 11: p2ltv.wi_val = 8; break; default: return EINVAL; } ltv = &p2ltv; break; case WI_RID_ENCRYPTION: p2ltv.wi_type = WI_RID_P2_ENCRYPTION; p2ltv.wi_len = 2; if (ltv->wi_val) p2ltv.wi_val = 0x03; else p2ltv.wi_val = 0x90; ltv = &p2ltv; break; case WI_RID_TX_CRYPT_KEY: p2ltv.wi_type = WI_RID_P2_TX_CRYPT_KEY; p2ltv.wi_len = 2; p2ltv.wi_val = ltv->wi_val; ltv = &p2ltv; break; case WI_RID_DEFLT_CRYPT_KEYS: { int error; struct wi_ltv_str ws; struct wi_ltv_keys *wk = (struct wi_ltv_keys *)ltv; for (i = 0; i < 4; i++) { ws.wi_len = 4; ws.wi_type = WI_RID_P2_CRYPT_KEY0 + i; memcpy(ws.wi_str, &wk->wi_keys[i].wi_keydat, 5); ws.wi_str[5] = '\0'; error = wi_write_record(sc, (struct wi_ltv_gen *)&ws); if (error) return error; } return 0; } } } if (wi_seek(sc, ltv->wi_type, 0, WI_BAP1)) return(EIO); CSR_WRITE_2(sc, WI_DATA1, ltv->wi_len); CSR_WRITE_2(sc, WI_DATA1, ltv->wi_type); ptr = <v->wi_val; for (i = 0; i < ltv->wi_len - 1; i++) CSR_WRITE_2(sc, WI_DATA1, ptr[i]); if (wi_cmd(sc, WI_CMD_ACCESS|WI_ACCESS_WRITE, ltv->wi_type)) return(EIO); return(0); } static int wi_seek(sc, id, off, chan) struct wi_softc *sc; int id, off, chan; { int i; int selreg, offreg; switch (chan) { case WI_BAP0: selreg = WI_SEL0; offreg = WI_OFF0; break; case WI_BAP1: selreg = WI_SEL1; offreg = WI_OFF1; break; default: device_printf(sc->dev, "invalid data path: %x\n", chan); return(EIO); } CSR_WRITE_2(sc, selreg, id); CSR_WRITE_2(sc, offreg, off); for (i = 0; i < WI_TIMEOUT; i++) { if (!(CSR_READ_2(sc, offreg) & (WI_OFF_BUSY|WI_OFF_ERR))) break; } if (i == WI_TIMEOUT) return(ETIMEDOUT); return(0); } static int wi_read_data(sc, id, off, buf, len) struct wi_softc *sc; int id, off; caddr_t buf; int len; { int i; u_int16_t *ptr; if (wi_seek(sc, id, off, WI_BAP1)) return(EIO); ptr = (u_int16_t *)buf; for (i = 0; i < len / 2; i++) ptr[i] = CSR_READ_2(sc, WI_DATA1); return(0); } /* * According to the comments in the HCF Light code, there is a bug in * the Hermes (or possibly in certain Hermes firmware revisions) where * the chip's internal autoincrement counter gets thrown off during * data writes: the autoincrement is missed, causing one data word to * be overwritten and subsequent words to be written to the wrong memory * locations. The end result is that we could end up transmitting bogus * frames without realizing it. The workaround for this is to write a * couple of extra guard words after the end of the transfer, then * attempt to read then back. If we fail to locate the guard words where * we expect them, we preform the transfer over again. */ static int wi_write_data(sc, id, off, buf, len) struct wi_softc *sc; int id, off; caddr_t buf; int len; { int i; u_int16_t *ptr; #ifdef WI_HERMES_AUTOINC_WAR again: #endif if (wi_seek(sc, id, off, WI_BAP0)) return(EIO); ptr = (u_int16_t *)buf; for (i = 0; i < (len / 2); i++) CSR_WRITE_2(sc, WI_DATA0, ptr[i]); #ifdef WI_HERMES_AUTOINC_WAR CSR_WRITE_2(sc, WI_DATA0, 0x1234); CSR_WRITE_2(sc, WI_DATA0, 0x5678); if (wi_seek(sc, id, off + len, WI_BAP0)) return(EIO); if (CSR_READ_2(sc, WI_DATA0) != 0x1234 || CSR_READ_2(sc, WI_DATA0) != 0x5678) goto again; #endif return(0); } /* * Allocate a region of memory inside the NIC and zero * it out. */ static int wi_alloc_nicmem(sc, len, id) struct wi_softc *sc; int len; int *id; { int i; if (wi_cmd(sc, WI_CMD_ALLOC_MEM, len)) { device_printf(sc->dev, "failed to allocate %d bytes on NIC\n", len); return(ENOMEM); } for (i = 0; i < WI_TIMEOUT; i++) { if (CSR_READ_2(sc, WI_EVENT_STAT) & WI_EV_ALLOC) break; } if (i == WI_TIMEOUT) return(ETIMEDOUT); CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_ALLOC); *id = CSR_READ_2(sc, WI_ALLOC_FID); if (wi_seek(sc, *id, 0, WI_BAP0)) return(EIO); for (i = 0; i < len / 2; i++) CSR_WRITE_2(sc, WI_DATA0, 0); return(0); } static void wi_setmulti(sc) struct wi_softc *sc; { struct ifnet *ifp; int i = 0; struct ifmultiaddr *ifma; struct wi_ltv_mcast mcast; ifp = &sc->arpcom.ac_if; bzero((char *)&mcast, sizeof(mcast)); mcast.wi_type = WI_RID_MCAST; mcast.wi_len = (3 * 16) + 1; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { wi_write_record(sc, (struct wi_ltv_gen *)&mcast); return; } - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (i < 16) { bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), (char *)&mcast.wi_mcast[i], ETHER_ADDR_LEN); i++; } else { bzero((char *)&mcast, sizeof(mcast)); break; } } mcast.wi_len = (i * 3) + 1; wi_write_record(sc, (struct wi_ltv_gen *)&mcast); return; } static void wi_setdef(sc, wreq) struct wi_softc *sc; struct wi_req *wreq; { struct sockaddr_dl *sdl; struct ifaddr *ifa; struct ifnet *ifp; ifp = &sc->arpcom.ac_if; switch(wreq->wi_type) { case WI_RID_MAC_NODE: ifa = ifnet_addrs[ifp->if_index - 1]; sdl = (struct sockaddr_dl *)ifa->ifa_addr; bcopy((char *)&wreq->wi_val, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); bcopy((char *)&wreq->wi_val, LLADDR(sdl), ETHER_ADDR_LEN); break; case WI_RID_PORTTYPE: sc->wi_ptype = wreq->wi_val[0]; break; case WI_RID_TX_RATE: sc->wi_tx_rate = wreq->wi_val[0]; break; case WI_RID_MAX_DATALEN: sc->wi_max_data_len = wreq->wi_val[0]; break; case WI_RID_RTS_THRESH: sc->wi_rts_thresh = wreq->wi_val[0]; break; case WI_RID_SYSTEM_SCALE: sc->wi_ap_density = wreq->wi_val[0]; break; case WI_RID_CREATE_IBSS: sc->wi_create_ibss = wreq->wi_val[0]; break; case WI_RID_OWN_CHNL: sc->wi_channel = wreq->wi_val[0]; break; case WI_RID_NODENAME: bzero(sc->wi_node_name, sizeof(sc->wi_node_name)); bcopy((char *)&wreq->wi_val[1], sc->wi_node_name, 30); break; case WI_RID_DESIRED_SSID: bzero(sc->wi_net_name, sizeof(sc->wi_net_name)); bcopy((char *)&wreq->wi_val[1], sc->wi_net_name, 30); break; case WI_RID_OWN_SSID: bzero(sc->wi_ibss_name, sizeof(sc->wi_ibss_name)); bcopy((char *)&wreq->wi_val[1], sc->wi_ibss_name, 30); break; case WI_RID_PM_ENABLED: sc->wi_pm_enabled = wreq->wi_val[0]; break; case WI_RID_MAX_SLEEP: sc->wi_max_sleep = wreq->wi_val[0]; break; case WI_RID_ENCRYPTION: sc->wi_use_wep = wreq->wi_val[0]; break; case WI_RID_TX_CRYPT_KEY: sc->wi_tx_key = wreq->wi_val[0]; break; case WI_RID_DEFLT_CRYPT_KEYS: bcopy((char *)wreq, (char *)&sc->wi_keys, sizeof(struct wi_ltv_keys)); break; default: break; } /* Reinitialize WaveLAN. */ wi_init(sc); return; } static int wi_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { int error = 0; struct wi_softc *sc; struct wi_req wreq; struct ifreq *ifr; struct proc *p = curproc; sc = ifp->if_softc; WI_LOCK(sc); ifr = (struct ifreq *)data; if (sc->wi_gone) { error = ENODEV; goto out; } switch(command) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: error = ether_ioctl(ifp, command, data); break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->wi_if_flags & IFF_PROMISC)) { WI_SETVAL(WI_RID_PROMISC, 1); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->wi_if_flags & IFF_PROMISC) { WI_SETVAL(WI_RID_PROMISC, 0); } else wi_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) { wi_stop(sc); } } sc->wi_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: wi_setmulti(sc); error = 0; break; case SIOCGWAVELAN: error = copyin(ifr->ifr_data, &wreq, sizeof(wreq)); if (error) break; /* Don't show WEP keys to non-root users. */ if (wreq.wi_type == WI_RID_DEFLT_CRYPT_KEYS && suser(p)) break; if (wreq.wi_type == WI_RID_IFACE_STATS) { bcopy((char *)&sc->wi_stats, (char *)&wreq.wi_val, sizeof(sc->wi_stats)); wreq.wi_len = (sizeof(sc->wi_stats) / 2) + 1; } else if (wreq.wi_type == WI_RID_DEFLT_CRYPT_KEYS) { bcopy((char *)&sc->wi_keys, (char *)&wreq, sizeof(struct wi_ltv_keys)); } #ifdef WICACHE else if (wreq.wi_type == WI_RID_ZERO_CACHE) { sc->wi_sigitems = sc->wi_nextitem = 0; } else if (wreq.wi_type == WI_RID_READ_CACHE) { char *pt = (char *)&wreq.wi_val; bcopy((char *)&sc->wi_sigitems, (char *)pt, sizeof(int)); pt += (sizeof (int)); wreq.wi_len = sizeof(int) / 2; bcopy((char *)&sc->wi_sigcache, (char *)pt, sizeof(struct wi_sigcache) * sc->wi_sigitems); wreq.wi_len += ((sizeof(struct wi_sigcache) * sc->wi_sigitems) / 2) + 1; } #endif else { if (wi_read_record(sc, (struct wi_ltv_gen *)&wreq)) { error = EINVAL; break; } } error = copyout(&wreq, ifr->ifr_data, sizeof(wreq)); break; case SIOCSWAVELAN: if ((error = suser(p))) goto out; error = copyin(ifr->ifr_data, &wreq, sizeof(wreq)); if (error) break; if (wreq.wi_type == WI_RID_IFACE_STATS) { error = EINVAL; break; } else if (wreq.wi_type == WI_RID_MGMT_XMIT) { error = wi_mgmt_xmit(sc, (caddr_t)&wreq.wi_val, wreq.wi_len); } else { error = wi_write_record(sc, (struct wi_ltv_gen *)&wreq); if (!error) wi_setdef(sc, &wreq); } break; default: error = EINVAL; break; } out: WI_UNLOCK(sc); return(error); } static void wi_init(xsc) void *xsc; { struct wi_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; struct wi_ltv_macaddr mac; int id = 0; WI_LOCK(sc); if (sc->wi_gone) { WI_UNLOCK(sc); return; } if (ifp->if_flags & IFF_RUNNING) wi_stop(sc); wi_reset(sc); /* Program max data length. */ WI_SETVAL(WI_RID_MAX_DATALEN, sc->wi_max_data_len); /* Enable/disable IBSS creation. */ WI_SETVAL(WI_RID_CREATE_IBSS, sc->wi_create_ibss); /* Set the port type. */ WI_SETVAL(WI_RID_PORTTYPE, sc->wi_ptype); /* Program the RTS/CTS threshold. */ WI_SETVAL(WI_RID_RTS_THRESH, sc->wi_rts_thresh); /* Program the TX rate */ WI_SETVAL(WI_RID_TX_RATE, sc->wi_tx_rate); /* Access point density */ WI_SETVAL(WI_RID_SYSTEM_SCALE, sc->wi_ap_density); /* Power Management Enabled */ WI_SETVAL(WI_RID_PM_ENABLED, sc->wi_pm_enabled); /* Power Managment Max Sleep */ WI_SETVAL(WI_RID_MAX_SLEEP, sc->wi_max_sleep); /* Specify the IBSS name */ WI_SETSTR(WI_RID_OWN_SSID, sc->wi_ibss_name); /* Specify the network name */ WI_SETSTR(WI_RID_DESIRED_SSID, sc->wi_net_name); /* Specify the frequency to use */ WI_SETVAL(WI_RID_OWN_CHNL, sc->wi_channel); /* Program the nodename. */ WI_SETSTR(WI_RID_NODENAME, sc->wi_node_name); /* Set our MAC address. */ mac.wi_len = 4; mac.wi_type = WI_RID_MAC_NODE; bcopy((char *)&sc->arpcom.ac_enaddr, (char *)&mac.wi_mac_addr, ETHER_ADDR_LEN); wi_write_record(sc, (struct wi_ltv_gen *)&mac); /* Configure WEP. */ if (sc->wi_has_wep) { WI_SETVAL(WI_RID_ENCRYPTION, sc->wi_use_wep); WI_SETVAL(WI_RID_TX_CRYPT_KEY, sc->wi_tx_key); sc->wi_keys.wi_len = (sizeof(struct wi_ltv_keys) / 2) + 1; sc->wi_keys.wi_type = WI_RID_DEFLT_CRYPT_KEYS; wi_write_record(sc, (struct wi_ltv_gen *)&sc->wi_keys); } /* Initialize promisc mode. */ if (ifp->if_flags & IFF_PROMISC) { WI_SETVAL(WI_RID_PROMISC, 1); } else { WI_SETVAL(WI_RID_PROMISC, 0); } /* Set multicast filter. */ wi_setmulti(sc); /* Enable desired port */ wi_cmd(sc, WI_CMD_ENABLE|sc->wi_portnum, 0); if (wi_alloc_nicmem(sc, 1518 + sizeof(struct wi_frame) + 8, &id)) device_printf(sc->dev, "tx buffer allocation failed\n"); sc->wi_tx_data_id = id; if (wi_alloc_nicmem(sc, 1518 + sizeof(struct wi_frame) + 8, &id)) device_printf(sc->dev, "mgmt. buffer allocation failed\n"); sc->wi_tx_mgmt_id = id; /* enable interrupts */ CSR_WRITE_2(sc, WI_INT_EN, WI_INTRS); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->wi_stat_ch = timeout(wi_inquire, sc, hz * 60); WI_UNLOCK(sc); return; } static void wi_start(ifp) struct ifnet *ifp; { struct wi_softc *sc; struct mbuf *m0; struct wi_frame tx_frame; struct ether_header *eh; int id; sc = ifp->if_softc; WI_LOCK(sc); if (sc->wi_gone) { WI_UNLOCK(sc); return; } if (ifp->if_flags & IFF_OACTIVE) { WI_UNLOCK(sc); return; } IF_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) { WI_UNLOCK(sc); return; } bzero((char *)&tx_frame, sizeof(tx_frame)); id = sc->wi_tx_data_id; eh = mtod(m0, struct ether_header *); /* * Use RFC1042 encoding for IP and ARP datagrams, * 802.3 for anything else. */ if (ntohs(eh->ether_type) > 1518) { bcopy((char *)&eh->ether_dhost, (char *)&tx_frame.wi_addr1, ETHER_ADDR_LEN); bcopy((char *)&eh->ether_shost, (char *)&tx_frame.wi_addr2, ETHER_ADDR_LEN); bcopy((char *)&eh->ether_dhost, (char *)&tx_frame.wi_dst_addr, ETHER_ADDR_LEN); bcopy((char *)&eh->ether_shost, (char *)&tx_frame.wi_src_addr, ETHER_ADDR_LEN); tx_frame.wi_dat_len = m0->m_pkthdr.len - WI_SNAPHDR_LEN; tx_frame.wi_frame_ctl = WI_FTYPE_DATA; tx_frame.wi_dat[0] = htons(WI_SNAP_WORD0); tx_frame.wi_dat[1] = htons(WI_SNAP_WORD1); tx_frame.wi_len = htons(m0->m_pkthdr.len - WI_SNAPHDR_LEN); tx_frame.wi_type = eh->ether_type; m_copydata(m0, sizeof(struct ether_header), m0->m_pkthdr.len - sizeof(struct ether_header), (caddr_t)&sc->wi_txbuf); wi_write_data(sc, id, 0, (caddr_t)&tx_frame, sizeof(struct wi_frame)); wi_write_data(sc, id, WI_802_11_OFFSET, (caddr_t)&sc->wi_txbuf, (m0->m_pkthdr.len - sizeof(struct ether_header)) + 2); } else { tx_frame.wi_dat_len = m0->m_pkthdr.len; eh->ether_type = htons(m0->m_pkthdr.len - WI_SNAPHDR_LEN); m_copydata(m0, 0, m0->m_pkthdr.len, (caddr_t)&sc->wi_txbuf); wi_write_data(sc, id, 0, (caddr_t)&tx_frame, sizeof(struct wi_frame)); wi_write_data(sc, id, WI_802_3_OFFSET, (caddr_t)&sc->wi_txbuf, m0->m_pkthdr.len + 2); } /* * If there's a BPF listner, bounce a copy of * this frame to him. */ if (ifp->if_bpf) bpf_mtap(ifp, m0); m_freem(m0); if (wi_cmd(sc, WI_CMD_TX|WI_RECLAIM, id)) device_printf(sc->dev, "xmit failed\n"); ifp->if_flags |= IFF_OACTIVE; /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; WI_UNLOCK(sc); return; } static int wi_mgmt_xmit(sc, data, len) struct wi_softc *sc; caddr_t data; int len; { struct wi_frame tx_frame; int id; struct wi_80211_hdr *hdr; caddr_t dptr; if (sc->wi_gone) return(ENODEV); hdr = (struct wi_80211_hdr *)data; dptr = data + sizeof(struct wi_80211_hdr); bzero((char *)&tx_frame, sizeof(tx_frame)); id = sc->wi_tx_mgmt_id; bcopy((char *)hdr, (char *)&tx_frame.wi_frame_ctl, sizeof(struct wi_80211_hdr)); tx_frame.wi_dat_len = len - WI_SNAPHDR_LEN; tx_frame.wi_len = htons(len - WI_SNAPHDR_LEN); wi_write_data(sc, id, 0, (caddr_t)&tx_frame, sizeof(struct wi_frame)); wi_write_data(sc, id, WI_802_11_OFFSET_RAW, dptr, (len - sizeof(struct wi_80211_hdr)) + 2); if (wi_cmd(sc, WI_CMD_TX|WI_RECLAIM, id)) { device_printf(sc->dev, "xmit failed\n"); return(EIO); } return(0); } static void wi_stop(sc) struct wi_softc *sc; { struct ifnet *ifp; WI_LOCK(sc); if (sc->wi_gone) { WI_UNLOCK(sc); return; } ifp = &sc->arpcom.ac_if; /* * If the card is gone and the memory port isn't mapped, we will * (hopefully) get 0xffff back from the status read, which is not * a valid status value. */ if (CSR_READ_2(sc, WI_STATUS) != 0xffff) { CSR_WRITE_2(sc, WI_INT_EN, 0); wi_cmd(sc, WI_CMD_DISABLE|sc->wi_portnum, 0); } untimeout(wi_inquire, sc, sc->wi_stat_ch); ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE); WI_UNLOCK(sc); return; } static void wi_watchdog(ifp) struct ifnet *ifp; { struct wi_softc *sc; sc = ifp->if_softc; device_printf(sc->dev,"device timeout\n"); wi_init(sc); ifp->if_oerrors++; return; } static int wi_alloc(dev) device_t dev; { struct wi_softc *sc = device_get_softc(dev); int rid; rid = 0; sc->iobase = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, (1 << 6), rman_make_alignment_flags(1 << 6) | RF_ACTIVE); if (!sc->iobase) { device_printf(dev, "No I/O space?!\n"); return (ENXIO); } rid = 0; sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_ACTIVE); if (!sc->irq) { device_printf(dev, "No irq?!\n"); return (ENXIO); } sc->dev = dev; sc->wi_unit = device_get_unit(dev); sc->wi_io_addr = rman_get_start(sc->iobase); sc->wi_btag = rman_get_bustag(sc->iobase); sc->wi_bhandle = rman_get_bushandle(sc->iobase); return (0); } static void wi_free(dev) device_t dev; { struct wi_softc *sc = device_get_softc(dev); if (sc->iobase != NULL) bus_release_resource(dev, SYS_RES_IOPORT, 0, sc->iobase); if (sc->irq != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); return; } static void wi_shutdown(dev) device_t dev; { struct wi_softc *sc; sc = device_get_softc(dev); wi_stop(sc); return; } #ifdef WICACHE /* wavelan signal strength cache code. * store signal/noise/quality on per MAC src basis in * a small fixed cache. The cache wraps if > MAX slots * used. The cache may be zeroed out to start over. * Two simple filters exist to reduce computation: * 1. ip only (literally 0x800) which may be used * to ignore some packets. It defaults to ip only. * it could be used to focus on broadcast, non-IP 802.11 beacons. * 2. multicast/broadcast only. This may be used to * ignore unicast packets and only cache signal strength * for multicast/broadcast packets (beacons); e.g., Mobile-IP * beacons and not unicast traffic. * * The cache stores (MAC src(index), IP src (major clue), signal, * quality, noise) * * No apologies for storing IP src here. It's easy and saves much * trouble elsewhere. The cache is assumed to be INET dependent, * although it need not be. */ #ifdef documentation int wi_sigitems; /* number of cached entries */ struct wi_sigcache wi_sigcache[MAXWICACHE]; /* array of cache entries */ int wi_nextitem; /* index/# of entries */ #endif /* control variables for cache filtering. Basic idea is * to reduce cost (e.g., to only Mobile-IP agent beacons * which are broadcast or multicast). Still you might * want to measure signal strength with unicast ping packets * on a pt. to pt. ant. setup. */ /* set true if you want to limit cache items to broadcast/mcast * only packets (not unicast). Useful for mobile-ip beacons which * are broadcast/multicast at network layer. Default is all packets * so ping/unicast will work say with pt. to pt. antennae setup. */ static int wi_cache_mcastonly = 0; SYSCTL_INT(_machdep, OID_AUTO, wi_cache_mcastonly, CTLFLAG_RW, &wi_cache_mcastonly, 0, ""); /* set true if you want to limit cache items to IP packets only */ static int wi_cache_iponly = 1; SYSCTL_INT(_machdep, OID_AUTO, wi_cache_iponly, CTLFLAG_RW, &wi_cache_iponly, 0, ""); /* * Original comments: * ----------------- * wi_cache_store, per rx packet store signal * strength in MAC (src) indexed cache. * * follows linux driver in how signal strength is computed. * In ad hoc mode, we use the rx_quality field. * signal and noise are trimmed to fit in the range from 47..138. * rx_quality field MSB is signal strength. * rx_quality field LSB is noise. * "quality" is (signal - noise) as is log value. * note: quality CAN be negative. * * In BSS mode, we use the RID for communication quality. * TBD: BSS mode is currently untested. * * Bill's comments: * --------------- * Actually, we use the rx_quality field all the time for both "ad-hoc" * and BSS modes. Why? Because reading an RID is really, really expensive: * there's a bunch of PIO operations that have to be done to read a record * from the NIC, and reading the comms quality RID each time a packet is * received can really hurt performance. We don't have to do this anyway: * the comms quality field only reflects the values in the rx_quality field * anyway. The comms quality RID is only meaningful in infrastructure mode, * but the values it contains are updated based on the rx_quality from * frames received from the access point. * * Also, according to Lucent, the signal strength and noise level values * can be converted to dBms by subtracting 149, so I've modified the code * to do that instead of the scaling it did originally. */ static void wi_cache_store (struct wi_softc *sc, struct ether_header *eh, struct mbuf *m, unsigned short rx_quality) { struct ip *ip = 0; int i; static int cache_slot = 0; /* use this cache entry */ static int wrapindex = 0; /* next "free" cache entry */ int sig, noise; int sawip=0; /* filters: * 1. ip only * 2. configurable filter to throw out unicast packets, * keep multicast only. */ if ((ntohs(eh->ether_type) == 0x800)) { sawip = 1; } /* filter for ip packets only */ if (wi_cache_iponly && !sawip) { return; } /* filter for broadcast/multicast only */ if (wi_cache_mcastonly && ((eh->ether_dhost[0] & 1) == 0)) { return; } #ifdef SIGDEBUG printf("wi%d: q value %x (MSB=0x%x, LSB=0x%x) \n", sc->wi_unit, rx_quality & 0xffff, rx_quality >> 8, rx_quality & 0xff); #endif /* find the ip header. we want to store the ip_src * address. */ if (sawip) { ip = mtod(m, struct ip *); } /* do a linear search for a matching MAC address * in the cache table * . MAC address is 6 bytes, * . var w_nextitem holds total number of entries already cached */ for(i = 0; i < sc->wi_nextitem; i++) { if (! bcmp(eh->ether_shost , sc->wi_sigcache[i].macsrc, 6 )) { /* Match!, * so we already have this entry, * update the data */ break; } } /* did we find a matching mac address? * if yes, then overwrite a previously existing cache entry */ if (i < sc->wi_nextitem ) { cache_slot = i; } /* else, have a new address entry,so * add this new entry, * if table full, then we need to replace LRU entry */ else { /* check for space in cache table * note: wi_nextitem also holds number of entries * added in the cache table */ if ( sc->wi_nextitem < MAXWICACHE ) { cache_slot = sc->wi_nextitem; sc->wi_nextitem++; sc->wi_sigitems = sc->wi_nextitem; } /* no space found, so simply wrap with wrap index * and "zap" the next entry */ else { if (wrapindex == MAXWICACHE) { wrapindex = 0; } cache_slot = wrapindex++; } } /* invariant: cache_slot now points at some slot * in cache. */ if (cache_slot < 0 || cache_slot >= MAXWICACHE) { log(LOG_ERR, "wi_cache_store, bad index: %d of " "[0..%d], gross cache error\n", cache_slot, MAXWICACHE); return; } /* store items in cache * .ip source address * .mac src * .signal, etc. */ if (sawip) { sc->wi_sigcache[cache_slot].ipsrc = ip->ip_src.s_addr; } bcopy( eh->ether_shost, sc->wi_sigcache[cache_slot].macsrc, 6); sig = (rx_quality >> 8) & 0xFF; noise = rx_quality & 0xFF; sc->wi_sigcache[cache_slot].signal = sig - 149; sc->wi_sigcache[cache_slot].noise = noise - 149; sc->wi_sigcache[cache_slot].quality = sig - noise; return; } #endif Index: head/sys/pci/if_dc.c =================================================================== --- head/sys/pci/if_dc.c (revision 71961) +++ head/sys/pci/if_dc.c (revision 71962) @@ -1,3371 +1,3367 @@ /* * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143 * series chips and several workalikes including the following: * * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com) * Macronix/Lite-On 82c115 PNIC II (www.macronix.com) * Lite-On 82c168/82c169 PNIC (www.litecom.com) * ASIX Electronics AX88140A (www.asix.com.tw) * ASIX Electronics AX88141 (www.asix.com.tw) * ADMtek AL981 (www.admtek.com.tw) * ADMtek AN985 (www.admtek.com.tw) * Davicom DM9100, DM9102, DM9102A (www.davicom8.com) * Accton EN1217 (www.accton.com) * Xircom X3201 (www.xircom.com) * Abocom FE2500 * * Datasheets for the 21143 are available at developer.intel.com. * Datasheets for the clone parts can be found at their respective sites. * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.) * The PNIC II is essentially a Macronix 98715A chip; the only difference * worth noting is that its multicast hash table is only 128 bits wide * instead of 512. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The Intel 21143 is the successor to the DEC 21140. It is basically * the same as the 21140 but with a few new features. The 21143 supports * three kinds of media attachments: * * o MII port, for 10Mbps and 100Mbps support and NWAY * autonegotiation provided by an external PHY. * o SYM port, for symbol mode 100Mbps support. * o 10baseT port. * o AUI/BNC port. * * The 100Mbps SYM port and 10baseT port can be used together in * combination with the internal NWAY support to create a 10/100 * autosensing configuration. * * Note that not all tulip workalikes are handled in this driver: we only * deal with those which are relatively well behaved. The Winbond is * handled separately due to its different register offsets and the * special handling needed for its various bugs. The PNIC is handled * here, but I'm not thrilled about it. * * All of the workalike chips use some form of MII transceiver support * with the exception of the Macronix chips, which also have a SYM port. * The ASIX AX88140A is also documented to have a SYM port, but all * the cards I've seen use an MII transceiver, probably because the * AX88140A doesn't support internal NWAY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include #include #include #define DC_USEIOSPACE #ifdef __alpha__ #define SRM_MEDIA #endif #include MODULE_DEPEND(dc, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif /* * Various supported device vendors/types and their names. */ static struct dc_type dc_devs[] = { { DC_VENDORID_DEC, DC_DEVICEID_21143, "Intel 21143 10/100BaseTX" }, { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100, "Davicom DM9100 10/100BaseTX" }, { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102, "Davicom DM9102 10/100BaseTX" }, { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102, "Davicom DM9102A 10/100BaseTX" }, { DC_VENDORID_ADMTEK, DC_DEVICEID_AL981, "ADMtek AL981 10/100BaseTX" }, { DC_VENDORID_ADMTEK, DC_DEVICEID_AN985, "ADMtek AN985 10/100BaseTX" }, { DC_VENDORID_ASIX, DC_DEVICEID_AX88140A, "ASIX AX88140A 10/100BaseTX" }, { DC_VENDORID_ASIX, DC_DEVICEID_AX88140A, "ASIX AX88141 10/100BaseTX" }, { DC_VENDORID_MX, DC_DEVICEID_98713, "Macronix 98713 10/100BaseTX" }, { DC_VENDORID_MX, DC_DEVICEID_98713, "Macronix 98713A 10/100BaseTX" }, { DC_VENDORID_CP, DC_DEVICEID_98713_CP, "Compex RL100-TX 10/100BaseTX" }, { DC_VENDORID_CP, DC_DEVICEID_98713_CP, "Compex RL100-TX 10/100BaseTX" }, { DC_VENDORID_MX, DC_DEVICEID_987x5, "Macronix 98715/98715A 10/100BaseTX" }, { DC_VENDORID_MX, DC_DEVICEID_987x5, "Macronix 98715AEC-C 10/100BaseTX" }, { DC_VENDORID_MX, DC_DEVICEID_987x5, "Macronix 98725 10/100BaseTX" }, { DC_VENDORID_MX, DC_DEVICEID_98727, "Macronix 98727/98732 10/100BaseTX" }, { DC_VENDORID_LO, DC_DEVICEID_82C115, "LC82C115 PNIC II 10/100BaseTX" }, { DC_VENDORID_LO, DC_DEVICEID_82C168, "82c168 PNIC 10/100BaseTX" }, { DC_VENDORID_LO, DC_DEVICEID_82C168, "82c169 PNIC 10/100BaseTX" }, { DC_VENDORID_ACCTON, DC_DEVICEID_EN1217, "Accton EN1217 10/100BaseTX" }, { DC_VENDORID_ACCTON, DC_DEVICEID_EN2242, "Accton EN2242 MiniPCI 10/100BaseTX" }, { DC_VENDORID_XIRCOM, DC_DEVICEID_X3201, "Xircom X3201 10/100BaseTX" }, { DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500, "Abocom FE2500 10/100BaseTX" }, { 0, 0, NULL } }; static int dc_probe __P((device_t)); static int dc_attach __P((device_t)); static int dc_detach __P((device_t)); static void dc_acpi __P((device_t)); static struct dc_type *dc_devtype __P((device_t)); static int dc_newbuf __P((struct dc_softc *, int, struct mbuf *)); static int dc_encap __P((struct dc_softc *, struct mbuf *, u_int32_t *)); static int dc_coal __P((struct dc_softc *, struct mbuf **)); static void dc_pnic_rx_bug_war __P((struct dc_softc *, int)); static int dc_rx_resync __P((struct dc_softc *)); static void dc_rxeof __P((struct dc_softc *)); static void dc_txeof __P((struct dc_softc *)); static void dc_tick __P((void *)); static void dc_intr __P((void *)); static void dc_start __P((struct ifnet *)); static int dc_ioctl __P((struct ifnet *, u_long, caddr_t)); static void dc_init __P((void *)); static void dc_stop __P((struct dc_softc *)); static void dc_watchdog __P((struct ifnet *)); static void dc_shutdown __P((device_t)); static int dc_ifmedia_upd __P((struct ifnet *)); static void dc_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); static void dc_delay __P((struct dc_softc *)); static void dc_eeprom_idle __P((struct dc_softc *)); static void dc_eeprom_putbyte __P((struct dc_softc *, int)); static void dc_eeprom_getword __P((struct dc_softc *, int, u_int16_t *)); static void dc_eeprom_getword_pnic __P((struct dc_softc *, int, u_int16_t *)); static void dc_eeprom_getword_xircom __P((struct dc_softc *, int, u_int16_t *)); static void dc_read_eeprom __P((struct dc_softc *, caddr_t, int, int, int)); static void dc_mii_writebit __P((struct dc_softc *, int)); static int dc_mii_readbit __P((struct dc_softc *)); static void dc_mii_sync __P((struct dc_softc *)); static void dc_mii_send __P((struct dc_softc *, u_int32_t, int)); static int dc_mii_readreg __P((struct dc_softc *, struct dc_mii_frame *)); static int dc_mii_writereg __P((struct dc_softc *, struct dc_mii_frame *)); static int dc_miibus_readreg __P((device_t, int, int)); static int dc_miibus_writereg __P((device_t, int, int, int)); static void dc_miibus_statchg __P((device_t)); static void dc_miibus_mediainit __P((device_t)); static void dc_setcfg __P((struct dc_softc *, int)); static u_int32_t dc_crc_le __P((struct dc_softc *, caddr_t)); static u_int32_t dc_crc_be __P((caddr_t)); static void dc_setfilt_21143 __P((struct dc_softc *)); static void dc_setfilt_asix __P((struct dc_softc *)); static void dc_setfilt_admtek __P((struct dc_softc *)); static void dc_setfilt_xircom __P((struct dc_softc *)); static void dc_setfilt __P((struct dc_softc *)); static void dc_reset __P((struct dc_softc *)); static int dc_list_rx_init __P((struct dc_softc *)); static int dc_list_tx_init __P((struct dc_softc *)); static void dc_parse_21143_srom __P((struct dc_softc *)); static void dc_decode_leaf_sia __P((struct dc_softc *, struct dc_eblock_sia *)); static void dc_decode_leaf_mii __P((struct dc_softc *, struct dc_eblock_mii *)); static void dc_decode_leaf_sym __P((struct dc_softc *, struct dc_eblock_sym *)); static void dc_apply_fixup __P((struct dc_softc *, int)); #ifdef DC_USEIOSPACE #define DC_RES SYS_RES_IOPORT #define DC_RID DC_PCI_CFBIO #else #define DC_RES SYS_RES_MEMORY #define DC_RID DC_PCI_CFBMA #endif static device_method_t dc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dc_probe), DEVMETHOD(device_attach, dc_attach), DEVMETHOD(device_detach, dc_detach), DEVMETHOD(device_shutdown, dc_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, dc_miibus_readreg), DEVMETHOD(miibus_writereg, dc_miibus_writereg), DEVMETHOD(miibus_statchg, dc_miibus_statchg), DEVMETHOD(miibus_mediainit, dc_miibus_mediainit), { 0, 0 } }; static driver_t dc_driver = { "dc", dc_methods, sizeof(struct dc_softc) }; static devclass_t dc_devclass; DRIVER_MODULE(if_dc, cardbus, dc_driver, dc_devclass, 0, 0); DRIVER_MODULE(if_dc, pci, dc_driver, dc_devclass, 0, 0); DRIVER_MODULE(miibus, dc, miibus_driver, miibus_devclass, 0, 0); #define DC_SETBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) #define DC_CLRBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) #define SIO_SET(x) DC_SETBIT(sc, DC_SIO, (x)) #define SIO_CLR(x) DC_CLRBIT(sc, DC_SIO, (x)) #define IS_MPSAFE 0 static void dc_delay(sc) struct dc_softc *sc; { int idx; for (idx = (300 / 33) + 1; idx > 0; idx--) CSR_READ_4(sc, DC_BUSCTL); } static void dc_eeprom_idle(sc) struct dc_softc *sc; { register int i; CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); dc_delay(sc); for (i = 0; i < 25; i++) { DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); } DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS); dc_delay(sc); CSR_WRITE_4(sc, DC_SIO, 0x00000000); return; } /* * Send a read command and address to the EEPROM, check for ACK. */ static void dc_eeprom_putbyte(sc, addr) struct dc_softc *sc; int addr; { register int d, i; /* * The AN985 has a 93C66 EEPROM on it instead of * a 93C46. It uses a different bit sequence for * specifying the "read" opcode. */ if (DC_IS_CENTAUR(sc)) d = addr | (DC_EECMD_READ << 2); else d = addr | DC_EECMD_READ; /* * Feed in each bit and strobe the clock. */ for (i = 0x400; i; i >>= 1) { if (d & i) { SIO_SET(DC_SIO_EE_DATAIN); } else { SIO_CLR(DC_SIO_EE_DATAIN); } dc_delay(sc); SIO_SET(DC_SIO_EE_CLK); dc_delay(sc); SIO_CLR(DC_SIO_EE_CLK); dc_delay(sc); } return; } /* * Read a word of data stored in the EEPROM at address 'addr.' * The PNIC 82c168/82c169 has its own non-standard way to read * the EEPROM. */ static void dc_eeprom_getword_pnic(sc, addr, dest) struct dc_softc *sc; int addr; u_int16_t *dest; { register int i; u_int32_t r; CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ|addr); for (i = 0; i < DC_TIMEOUT; i++) { DELAY(1); r = CSR_READ_4(sc, DC_SIO); if (!(r & DC_PN_SIOCTL_BUSY)) { *dest = (u_int16_t)(r & 0xFFFF); return; } } return; } /* * Read a word of data stored in the EEPROM at address 'addr.' * The Xircom X3201 has its own non-standard way to read * the EEPROM, too. */ static void dc_eeprom_getword_xircom(sc, addr, dest) struct dc_softc *sc; int addr; u_int16_t *dest; { SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); addr *= 2; CSR_WRITE_4(sc, DC_ROM, addr | 0x160); *dest = (u_int16_t)CSR_READ_4(sc, DC_SIO)&0xff; addr += 1; CSR_WRITE_4(sc, DC_ROM, addr | 0x160); *dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO)&0xff) << 8; SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); return; } /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void dc_eeprom_getword(sc, addr, dest) struct dc_softc *sc; int addr; u_int16_t *dest; { register int i; u_int16_t word = 0; /* Force EEPROM to idle state. */ dc_eeprom_idle(sc); /* Enter EEPROM access mode. */ CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); dc_delay(sc); /* * Send address of word we want to read. */ dc_eeprom_putbyte(sc, addr); /* * Start reading bits from EEPROM. */ for (i = 0x8000; i; i >>= 1) { SIO_SET(DC_SIO_EE_CLK); dc_delay(sc); if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT) word |= i; dc_delay(sc); SIO_CLR(DC_SIO_EE_CLK); dc_delay(sc); } /* Turn off EEPROM access mode. */ dc_eeprom_idle(sc); *dest = word; return; } /* * Read a sequence of words from the EEPROM. */ static void dc_read_eeprom(sc, dest, off, cnt, swap) struct dc_softc *sc; caddr_t dest; int off; int cnt; int swap; { int i; u_int16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { if (DC_IS_PNIC(sc)) dc_eeprom_getword_pnic(sc, off + i, &word); else if (DC_IS_XIRCOM(sc)) dc_eeprom_getword_xircom(sc, off + i, &word); else dc_eeprom_getword(sc, off + i, &word); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } return; } /* * The following two routines are taken from the Macronix 98713 * Application Notes pp.19-21. */ /* * Write a bit to the MII bus. */ static void dc_mii_writebit(sc, bit) struct dc_softc *sc; int bit; { if (bit) CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE|DC_SIO_MII_DATAOUT); else CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); return; } /* * Read a bit from the MII bus. */ static int dc_mii_readbit(sc) struct dc_softc *sc; { CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ|DC_SIO_MII_DIR); CSR_READ_4(sc, DC_SIO); DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN) return(1); return(0); } /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void dc_mii_sync(sc) struct dc_softc *sc; { register int i; CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); for (i = 0; i < 32; i++) dc_mii_writebit(sc, 1); return; } /* * Clock a series of bits through the MII. */ static void dc_mii_send(sc, bits, cnt) struct dc_softc *sc; u_int32_t bits; int cnt; { int i; for (i = (0x1 << (cnt - 1)); i; i >>= 1) dc_mii_writebit(sc, bits & i); } /* * Read an PHY register through the MII. */ static int dc_mii_readreg(sc, frame) struct dc_softc *sc; struct dc_mii_frame *frame; { int i, ack; DC_LOCK(sc); /* * Set up frame for RX. */ frame->mii_stdelim = DC_MII_STARTDELIM; frame->mii_opcode = DC_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; /* * Sync the PHYs. */ dc_mii_sync(sc); /* * Send command/address info. */ dc_mii_send(sc, frame->mii_stdelim, 2); dc_mii_send(sc, frame->mii_opcode, 2); dc_mii_send(sc, frame->mii_phyaddr, 5); dc_mii_send(sc, frame->mii_regaddr, 5); #ifdef notdef /* Idle bit */ dc_mii_writebit(sc, 1); dc_mii_writebit(sc, 0); #endif /* Check for ack */ ack = dc_mii_readbit(sc); /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ if (ack) { for(i = 0; i < 16; i++) { dc_mii_readbit(sc); } goto fail; } for (i = 0x8000; i; i >>= 1) { if (!ack) { if (dc_mii_readbit(sc)) frame->mii_data |= i; } } fail: dc_mii_writebit(sc, 0); dc_mii_writebit(sc, 0); DC_UNLOCK(sc); if (ack) return(1); return(0); } /* * Write to a PHY register through the MII. */ static int dc_mii_writereg(sc, frame) struct dc_softc *sc; struct dc_mii_frame *frame; { DC_LOCK(sc); /* * Set up frame for TX. */ frame->mii_stdelim = DC_MII_STARTDELIM; frame->mii_opcode = DC_MII_WRITEOP; frame->mii_turnaround = DC_MII_TURNAROUND; /* * Sync the PHYs. */ dc_mii_sync(sc); dc_mii_send(sc, frame->mii_stdelim, 2); dc_mii_send(sc, frame->mii_opcode, 2); dc_mii_send(sc, frame->mii_phyaddr, 5); dc_mii_send(sc, frame->mii_regaddr, 5); dc_mii_send(sc, frame->mii_turnaround, 2); dc_mii_send(sc, frame->mii_data, 16); /* Idle bit. */ dc_mii_writebit(sc, 0); dc_mii_writebit(sc, 0); DC_UNLOCK(sc); return(0); } static int dc_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct dc_mii_frame frame; struct dc_softc *sc; int i, rval, phy_reg = 0; sc = device_get_softc(dev); bzero((char *)&frame, sizeof(frame)); /* * Note: both the AL981 and AN985 have internal PHYs, * however the AL981 provides direct access to the PHY * registers while the AN985 uses a serial MII interface. * The AN985's MII interface is also buggy in that you * can read from any MII address (0 to 31), but only address 1 * behaves normally. To deal with both cases, we pretend * that the PHY is at MII address 1. */ if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) return(0); if (sc->dc_pmode != DC_PMODE_MII) { if (phy == (MII_NPHY - 1)) { switch(reg) { case MII_BMSR: /* * Fake something to make the probe * code think there's a PHY here. */ return(BMSR_MEDIAMASK); break; case MII_PHYIDR1: if (DC_IS_PNIC(sc)) return(DC_VENDORID_LO); return(DC_VENDORID_DEC); break; case MII_PHYIDR2: if (DC_IS_PNIC(sc)) return(DC_DEVICEID_82C168); return(DC_DEVICEID_21143); break; default: return(0); break; } } else return(0); } if (DC_IS_PNIC(sc)) { CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ | (phy << 23) | (reg << 18)); for (i = 0; i < DC_TIMEOUT; i++) { DELAY(1); rval = CSR_READ_4(sc, DC_PN_MII); if (!(rval & DC_PN_MII_BUSY)) { rval &= 0xFFFF; return(rval == 0xFFFF ? 0 : rval); } } return(0); } if (DC_IS_COMET(sc)) { switch(reg) { case MII_BMCR: phy_reg = DC_AL_BMCR; break; case MII_BMSR: phy_reg = DC_AL_BMSR; break; case MII_PHYIDR1: phy_reg = DC_AL_VENID; break; case MII_PHYIDR2: phy_reg = DC_AL_DEVID; break; case MII_ANAR: phy_reg = DC_AL_ANAR; break; case MII_ANLPAR: phy_reg = DC_AL_LPAR; break; case MII_ANER: phy_reg = DC_AL_ANER; break; default: printf("dc%d: phy_read: bad phy register %x\n", sc->dc_unit, reg); return(0); break; } rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF; if (rval == 0xFFFF) return(0); return(rval); } frame.mii_phyaddr = phy; frame.mii_regaddr = reg; if (sc->dc_type == DC_TYPE_98713) { phy_reg = CSR_READ_4(sc, DC_NETCFG); CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); } dc_mii_readreg(sc, &frame); if (sc->dc_type == DC_TYPE_98713) CSR_WRITE_4(sc, DC_NETCFG, phy_reg); return(frame.mii_data); } static int dc_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct dc_softc *sc; struct dc_mii_frame frame; int i, phy_reg = 0; sc = device_get_softc(dev); bzero((char *)&frame, sizeof(frame)); if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) return(0); if (DC_IS_PNIC(sc)) { CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE | (phy << 23) | (reg << 10) | data); for (i = 0; i < DC_TIMEOUT; i++) { if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY)) break; } return(0); } if (DC_IS_COMET(sc)) { switch(reg) { case MII_BMCR: phy_reg = DC_AL_BMCR; break; case MII_BMSR: phy_reg = DC_AL_BMSR; break; case MII_PHYIDR1: phy_reg = DC_AL_VENID; break; case MII_PHYIDR2: phy_reg = DC_AL_DEVID; break; case MII_ANAR: phy_reg = DC_AL_ANAR; break; case MII_ANLPAR: phy_reg = DC_AL_LPAR; break; case MII_ANER: phy_reg = DC_AL_ANER; break; default: printf("dc%d: phy_write: bad phy register %x\n", sc->dc_unit, reg); return(0); break; } CSR_WRITE_4(sc, phy_reg, data); return(0); } frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = data; if (sc->dc_type == DC_TYPE_98713) { phy_reg = CSR_READ_4(sc, DC_NETCFG); CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); } dc_mii_writereg(sc, &frame); if (sc->dc_type == DC_TYPE_98713) CSR_WRITE_4(sc, DC_NETCFG, phy_reg); return(0); } static void dc_miibus_statchg(dev) device_t dev; { struct dc_softc *sc; struct mii_data *mii; struct ifmedia *ifm; sc = device_get_softc(dev); if (DC_IS_ADMTEK(sc)) return; mii = device_get_softc(sc->dc_miibus); ifm = &mii->mii_media; if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_homePNA) { dc_setcfg(sc, ifm->ifm_media); sc->dc_if_media = ifm->ifm_media; } else { dc_setcfg(sc, mii->mii_media_active); sc->dc_if_media = mii->mii_media_active; } return; } /* * Special support for DM9102A cards with HomePNA PHYs. Note: * with the Davicom DM9102A/DM9801 eval board that I have, it seems * to be impossible to talk to the management interface of the DM9801 * PHY (its MDIO pin is not connected to anything). Consequently, * the driver has to just 'know' about the additional mode and deal * with it itself. *sigh* */ static void dc_miibus_mediainit(dev) device_t dev; { struct dc_softc *sc; struct mii_data *mii; struct ifmedia *ifm; int rev; rev = pci_read_config(dev, DC_PCI_CFRV, 4) & 0xFF; sc = device_get_softc(dev); mii = device_get_softc(sc->dc_miibus); ifm = &mii->mii_media; if (DC_IS_DAVICOM(sc) && rev >= DC_REVISION_DM9102A) ifmedia_add(ifm, IFM_ETHER|IFM_homePNA, 0, NULL); return; } #define DC_POLY 0xEDB88320 #define DC_BITS_512 9 #define DC_BITS_128 7 #define DC_BITS_64 6 static u_int32_t dc_crc_le(sc, addr) struct dc_softc *sc; caddr_t addr; { u_int32_t idx, bit, data, crc; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (idx = 0; idx < 6; idx++) { for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) crc = (crc >> 1) ^ (((crc ^ data) & 1) ? DC_POLY : 0); } /* * The hash table on the PNIC II and the MX98715AEC-C/D/E * chips is only 128 bits wide. */ if (sc->dc_flags & DC_128BIT_HASH) return (crc & ((1 << DC_BITS_128) - 1)); /* The hash table on the MX98715BEC is only 64 bits wide. */ if (sc->dc_flags & DC_64BIT_HASH) return (crc & ((1 << DC_BITS_64) - 1)); /* Xircom's hash filtering table is different (read: weird) */ /* Xircom uses the LEAST significant bits */ if (DC_IS_XIRCOM(sc)) { if ((crc & 0x180) == 0x180) return (crc & 0x0F) + (crc & 0x70)*3 + (14 << 4); else return (crc & 0x1F) + ((crc>>1) & 0xF0)*3 + (12 << 4); } return (crc & ((1 << DC_BITS_512) - 1)); } /* * Calculate CRC of a multicast group address, return the lower 6 bits. */ static u_int32_t dc_crc_be(addr) caddr_t addr; { u_int32_t crc, carry; int i, j; u_int8_t c; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (i = 0; i < 6; i++) { c = *(addr + i); for (j = 0; j < 8; j++) { carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); crc <<= 1; c >>= 1; if (carry) crc = (crc ^ 0x04c11db6) | carry; } } /* return the filter bit position */ return((crc >> 26) & 0x0000003F); } /* * 21143-style RX filter setup routine. Filter programming is done by * downloading a special setup frame into the TX engine. 21143, Macronix, * PNIC, PNIC II and Davicom chips are programmed this way. * * We always program the chip using 'hash perfect' mode, i.e. one perfect * address (our node address) and a 512-bit hash filter for multicast * frames. We also sneak the broadcast address into the hash filter since * we need that too. */ void dc_setfilt_21143(sc) struct dc_softc *sc; { struct dc_desc *sframe; u_int32_t h, *sp; struct ifmultiaddr *ifma; struct ifnet *ifp; int i; ifp = &sc->arpcom.ac_if; i = sc->dc_cdata.dc_tx_prod; DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); sc->dc_cdata.dc_tx_cnt++; sframe = &sc->dc_ldata->dc_tx_list[i]; sp = (u_int32_t *)&sc->dc_cdata.dc_sbuf; bzero((char *)sp, DC_SFRAME_LEN); sframe->dc_data = vtophys(&sc->dc_cdata.dc_sbuf); sframe->dc_ctl = DC_SFRAME_LEN | DC_TXCTL_SETUP | DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT; sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)&sc->dc_cdata.dc_sbuf; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); if (ifp->if_flags & IFF_ALLMULTI) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = dc_crc_le(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); sp[h >> 4] |= 1 << (h & 0xF); } if (ifp->if_flags & IFF_BROADCAST) { h = dc_crc_le(sc, (caddr_t)ðerbroadcastaddr); sp[h >> 4] |= 1 << (h & 0xF); } /* Set our MAC address */ sp[39] = ((u_int16_t *)sc->arpcom.ac_enaddr)[0]; sp[40] = ((u_int16_t *)sc->arpcom.ac_enaddr)[1]; sp[41] = ((u_int16_t *)sc->arpcom.ac_enaddr)[2]; sframe->dc_status = DC_TXSTAT_OWN; CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); /* * The PNIC takes an exceedingly long time to process its * setup frame; wait 10ms after posting the setup frame * before proceeding, just so it has time to swallow its * medicine. */ DELAY(10000); ifp->if_timer = 5; return; } void dc_setfilt_admtek(sc) struct dc_softc *sc; { struct ifnet *ifp; int h = 0; u_int32_t hashes[2] = { 0, 0 }; struct ifmultiaddr *ifma; ifp = &sc->arpcom.ac_if; /* Init our MAC address */ CSR_WRITE_4(sc, DC_AL_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); CSR_WRITE_4(sc, DC_AL_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); if (ifp->if_flags & IFF_ALLMULTI) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, DC_AL_MAR0, 0); CSR_WRITE_4(sc, DC_AL_MAR1, 0); /* * If we're already in promisc or allmulti mode, we * don't have to bother programming the multicast filter. */ if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) return; /* now program new ones */ - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = dc_crc_be(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]); CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]); return; } void dc_setfilt_asix(sc) struct dc_softc *sc; { struct ifnet *ifp; int h = 0; u_int32_t hashes[2] = { 0, 0 }; struct ifmultiaddr *ifma; ifp = &sc->arpcom.ac_if; /* Init our MAC address */ CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0); CSR_WRITE_4(sc, DC_AX_FILTDATA, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1); CSR_WRITE_4(sc, DC_AX_FILTDATA, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); if (ifp->if_flags & IFF_ALLMULTI) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); /* * The ASIX chip has a special bit to enable reception * of broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); else DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); /* * If we're already in promisc or allmulti mode, we * don't have to bother programming the multicast filter. */ if (ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) return; /* now program new ones */ - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = dc_crc_be(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]); CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]); return; } void dc_setfilt_xircom(sc) struct dc_softc *sc; { struct dc_desc *sframe; u_int32_t h, *sp; struct ifmultiaddr *ifma; struct ifnet *ifp; int i; ifp = &sc->arpcom.ac_if; DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)); i = sc->dc_cdata.dc_tx_prod; DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); sc->dc_cdata.dc_tx_cnt++; sframe = &sc->dc_ldata->dc_tx_list[i]; sp = (u_int32_t *)&sc->dc_cdata.dc_sbuf; bzero((char *)sp, DC_SFRAME_LEN); sframe->dc_data = vtophys(&sc->dc_cdata.dc_sbuf); sframe->dc_ctl = DC_SFRAME_LEN | DC_TXCTL_SETUP | DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT; sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)&sc->dc_cdata.dc_sbuf; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); if (ifp->if_flags & IFF_ALLMULTI) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = dc_crc_le(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); sp[h >> 4] |= 1 << (h & 0xF); } if (ifp->if_flags & IFF_BROADCAST) { h = dc_crc_le(sc, (caddr_t)ðerbroadcastaddr); sp[h >> 4] |= 1 << (h & 0xF); } /* Set our MAC address */ sp[0] = ((u_int16_t *)sc->arpcom.ac_enaddr)[0]; sp[1] = ((u_int16_t *)sc->arpcom.ac_enaddr)[1]; sp[2] = ((u_int16_t *)sc->arpcom.ac_enaddr)[2]; DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); ifp->if_flags |= IFF_RUNNING; sframe->dc_status = DC_TXSTAT_OWN; CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); /* * wait some time... */ DELAY(1000); ifp->if_timer = 5; return; } static void dc_setfilt(sc) struct dc_softc *sc; { if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) || DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc)) dc_setfilt_21143(sc); if (DC_IS_ASIX(sc)) dc_setfilt_asix(sc); if (DC_IS_ADMTEK(sc)) dc_setfilt_admtek(sc); if (DC_IS_XIRCOM(sc)) dc_setfilt_xircom(sc); return; } /* * In order to fiddle with the * 'full-duplex' and '100Mbps' bits in the netconfig register, we * first have to put the transmit and/or receive logic in the idle state. */ static void dc_setcfg(sc, media) struct dc_softc *sc; int media; { int i, restart = 0; u_int32_t isr; if (IFM_SUBTYPE(media) == IFM_NONE) return; if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)) { restart = 1; DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON|DC_NETCFG_RX_ON)); for (i = 0; i < DC_TIMEOUT; i++) { DELAY(10); isr = CSR_READ_4(sc, DC_ISR); if (isr & DC_ISR_TX_IDLE || (isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED) break; } if (i == DC_TIMEOUT) printf("dc%d: failed to force tx and " "rx to idle state\n", sc->dc_unit); } if (IFM_SUBTYPE(media) == IFM_100_TX) { DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); if (sc->dc_pmode == DC_PMODE_MII) { int watchdogreg; if (DC_IS_INTEL(sc)) { /* there's a write enable bit here that reads as 1 */ watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); watchdogreg &= ~DC_WDOG_CTLWREN; watchdogreg |= DC_WDOG_JABBERDIS; CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); } else { DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); } DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER)); if (sc->dc_type == DC_TYPE_98713) DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| DC_NETCFG_SCRAMBLER)); if (!DC_IS_DAVICOM(sc)) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); if (DC_IS_INTEL(sc)) dc_apply_fixup(sc, IFM_AUTO); } else { if (DC_IS_PNIC(sc)) { DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL); DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); } DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); if (DC_IS_INTEL(sc)) dc_apply_fixup(sc, (media & IFM_GMASK) == IFM_FDX ? IFM_100_TX|IFM_FDX : IFM_100_TX); } } if (IFM_SUBTYPE(media) == IFM_10_T) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); if (sc->dc_pmode == DC_PMODE_MII) { int watchdogreg; /* there's a write enable bit here that reads as 1 */ if (DC_IS_INTEL(sc)) { watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); watchdogreg &= ~DC_WDOG_CTLWREN; watchdogreg |= DC_WDOG_JABBERDIS; CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); } else { DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); } DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS| DC_NETCFG_PORTSEL|DC_NETCFG_SCRAMBLER)); if (sc->dc_type == DC_TYPE_98713) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); if (!DC_IS_DAVICOM(sc)) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); if (DC_IS_INTEL(sc)) dc_apply_fixup(sc, IFM_AUTO); } else { if (DC_IS_PNIC(sc)) { DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL); DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); } DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS); DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); if (DC_IS_INTEL(sc)) { DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET); DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); if ((media & IFM_GMASK) == IFM_FDX) DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D); else DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F); DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); DC_CLRBIT(sc, DC_10BTCTRL, DC_TCTL_AUTONEGENBL); dc_apply_fixup(sc, (media & IFM_GMASK) == IFM_FDX ? IFM_10_T|IFM_FDX : IFM_10_T); DELAY(20000); } } } /* * If this is a Davicom DM9102A card with a DM9801 HomePNA * PHY and we want HomePNA mode, set the portsel bit to turn * on the external MII port. */ if (DC_IS_DAVICOM(sc)) { if (IFM_SUBTYPE(media) == IFM_homePNA) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); sc->dc_link = 1; } else { DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); } } if ((media & IFM_GMASK) == IFM_FDX) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); } else { DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); } if (restart) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON|DC_NETCFG_RX_ON); return; } static void dc_reset(sc) struct dc_softc *sc; { register int i; DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); for (i = 0; i < DC_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET)) break; } if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_XIRCOM(sc) || DC_IS_INTEL(sc)) { DELAY(10000); DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); i = 0; } if (i == DC_TIMEOUT) printf("dc%d: reset never completed!\n", sc->dc_unit); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); CSR_WRITE_4(sc, DC_IMR, 0x00000000); CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000); CSR_WRITE_4(sc, DC_NETCFG, 0x00000000); /* * Bring the SIA out of reset. In some cases, it looks * like failing to unreset the SIA soon enough gets it * into a state where it will never come out of reset * until we reset the whole chip again. */ if (DC_IS_INTEL(sc)) { DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); CSR_WRITE_4(sc, DC_10BTCTRL, 0); CSR_WRITE_4(sc, DC_WATCHDOG, 0); } return; } static struct dc_type *dc_devtype(dev) device_t dev; { struct dc_type *t; u_int32_t rev; t = dc_devs; while(t->dc_name != NULL) { if ((pci_get_vendor(dev) == t->dc_vid) && (pci_get_device(dev) == t->dc_did)) { /* Check the PCI revision */ rev = pci_read_config(dev, DC_PCI_CFRV, 4) & 0xFF; if (t->dc_did == DC_DEVICEID_98713 && rev >= DC_REVISION_98713A) t++; if (t->dc_did == DC_DEVICEID_98713_CP && rev >= DC_REVISION_98713A) t++; if (t->dc_did == DC_DEVICEID_987x5 && rev >= DC_REVISION_98715AEC_C) t++; if (t->dc_did == DC_DEVICEID_987x5 && rev >= DC_REVISION_98725) t++; if (t->dc_did == DC_DEVICEID_AX88140A && rev >= DC_REVISION_88141) t++; if (t->dc_did == DC_DEVICEID_82C168 && rev >= DC_REVISION_82C169) t++; if (t->dc_did == DC_DEVICEID_DM9102 && rev >= DC_REVISION_DM9102A) t++; return(t); } t++; } return(NULL); } /* * Probe for a 21143 or clone chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. * We do a little bit of extra work to identify the exact type of * chip. The MX98713 and MX98713A have the same PCI vendor/device ID, * but different revision IDs. The same is true for 98715/98715A * chips and the 98725, as well as the ASIX and ADMtek chips. In some * cases, the exact chip revision affects driver behavior. */ static int dc_probe(dev) device_t dev; { struct dc_type *t; t = dc_devtype(dev); if (t != NULL) { device_set_desc(dev, t->dc_name); return(0); } return(ENXIO); } static void dc_acpi(dev) device_t dev; { int unit; unit = device_get_unit(dev); if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { u_int32_t iobase, membase, irq; /* Save important PCI config data. */ iobase = pci_read_config(dev, DC_PCI_CFBIO, 4); membase = pci_read_config(dev, DC_PCI_CFBMA, 4); irq = pci_read_config(dev, DC_PCI_CFIT, 4); /* Reset the power state. */ printf("dc%d: chip is in D%d power mode " "-- setting to D0\n", unit, pci_get_powerstate(dev)); pci_set_powerstate(dev, PCI_POWERSTATE_D0); /* Restore PCI config data. */ pci_write_config(dev, DC_PCI_CFBIO, iobase, 4); pci_write_config(dev, DC_PCI_CFBMA, membase, 4); pci_write_config(dev, DC_PCI_CFIT, irq, 4); } return; } static void dc_apply_fixup(sc, media) struct dc_softc *sc; int media; { struct dc_mediainfo *m; u_int8_t *p; int i; u_int32_t reg; m = sc->dc_mi; while (m != NULL) { if (m->dc_media == media) break; m = m->dc_next; } if (m == NULL) return; for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) { reg = (p[0] | (p[1] << 8)) << 16; CSR_WRITE_4(sc, DC_WATCHDOG, reg); } for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) { reg = (p[0] | (p[1] << 8)) << 16; CSR_WRITE_4(sc, DC_WATCHDOG, reg); } return; } static void dc_decode_leaf_sia(sc, l) struct dc_softc *sc; struct dc_eblock_sia *l; { struct dc_mediainfo *m; m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT); bzero(m, sizeof(struct dc_mediainfo)); if (l->dc_sia_code == DC_SIA_CODE_10BT) m->dc_media = IFM_10_T; if (l->dc_sia_code == DC_SIA_CODE_10BT_FDX) m->dc_media = IFM_10_T|IFM_FDX; if (l->dc_sia_code == DC_SIA_CODE_10B2) m->dc_media = IFM_10_2; if (l->dc_sia_code == DC_SIA_CODE_10B5) m->dc_media = IFM_10_5; m->dc_gp_len = 2; m->dc_gp_ptr = (u_int8_t *)&l->dc_sia_gpio_ctl; m->dc_next = sc->dc_mi; sc->dc_mi = m; sc->dc_pmode = DC_PMODE_SIA; return; } static void dc_decode_leaf_sym(sc, l) struct dc_softc *sc; struct dc_eblock_sym *l; { struct dc_mediainfo *m; m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT); bzero(m, sizeof(struct dc_mediainfo)); if (l->dc_sym_code == DC_SYM_CODE_100BT) m->dc_media = IFM_100_TX; if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX) m->dc_media = IFM_100_TX|IFM_FDX; m->dc_gp_len = 2; m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl; m->dc_next = sc->dc_mi; sc->dc_mi = m; sc->dc_pmode = DC_PMODE_SYM; return; } static void dc_decode_leaf_mii(sc, l) struct dc_softc *sc; struct dc_eblock_mii *l; { u_int8_t *p; struct dc_mediainfo *m; m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT); bzero(m, sizeof(struct dc_mediainfo)); /* We abuse IFM_AUTO to represent MII. */ m->dc_media = IFM_AUTO; m->dc_gp_len = l->dc_gpr_len; p = (u_int8_t *)l; p += sizeof(struct dc_eblock_mii); m->dc_gp_ptr = p; p += 2 * l->dc_gpr_len; m->dc_reset_len = *p; p++; m->dc_reset_ptr = p; m->dc_next = sc->dc_mi; sc->dc_mi = m; return; } static void dc_parse_21143_srom(sc) struct dc_softc *sc; { struct dc_leaf_hdr *lhdr; struct dc_eblock_hdr *hdr; int i, loff; char *ptr; loff = sc->dc_srom[27]; lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]); ptr = (char *)lhdr; ptr += sizeof(struct dc_leaf_hdr) - 1; for (i = 0; i < lhdr->dc_mcnt; i++) { hdr = (struct dc_eblock_hdr *)ptr; switch(hdr->dc_type) { case DC_EBLOCK_MII: dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr); break; case DC_EBLOCK_SIA: dc_decode_leaf_sia(sc, (struct dc_eblock_sia *)hdr); break; case DC_EBLOCK_SYM: dc_decode_leaf_sym(sc, (struct dc_eblock_sym *)hdr); break; default: /* Don't care. Yet. */ break; } ptr += (hdr->dc_len & 0x7F); ptr++; } return; } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int dc_attach(dev) device_t dev; { int tmp = 0; u_char eaddr[ETHER_ADDR_LEN]; u_int32_t command; struct dc_softc *sc; struct ifnet *ifp; u_int32_t revision; int unit, error = 0, rid, mac_offset; sc = device_get_softc(dev); unit = device_get_unit(dev); bzero(sc, sizeof(struct dc_softc)); mtx_init(&sc->dc_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE); DC_LOCK(sc); /* * Handle power management nonsense. */ dc_acpi(dev); /* * Map control/status registers. */ command = pci_read_config(dev, PCIR_COMMAND, 4); command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, command, 4); command = pci_read_config(dev, PCIR_COMMAND, 4); #ifdef DC_USEIOSPACE if (!(command & PCIM_CMD_PORTEN)) { printf("dc%d: failed to enable I/O ports!\n", unit); error = ENXIO; goto fail; } #else if (!(command & PCIM_CMD_MEMEN)) { printf("dc%d: failed to enable memory mapping!\n", unit); error = ENXIO; goto fail; } #endif rid = DC_RID; sc->dc_res = bus_alloc_resource(dev, DC_RES, &rid, 0, ~0, 1, RF_ACTIVE); if (sc->dc_res == NULL) { printf("dc%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->dc_btag = rman_get_bustag(sc->dc_res); sc->dc_bhandle = rman_get_bushandle(sc->dc_res); /* Allocate interrupt */ rid = 0; sc->dc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->dc_irq == NULL) { printf("dc%d: couldn't map interrupt\n", unit); bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->dc_irq, INTR_TYPE_NET | (IS_MPSAFE ? INTR_MPSAFE : 0), dc_intr, sc, &sc->dc_intrhand); if (error) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq); bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); printf("dc%d: couldn't set up irq\n", unit); goto fail; } /* Need this info to decide on a chip type. */ sc->dc_info = dc_devtype(dev); revision = pci_read_config(dev, DC_PCI_CFRV, 4) & 0x000000FF; switch(sc->dc_info->dc_did) { case DC_DEVICEID_21143: sc->dc_type = DC_TYPE_21143; sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; sc->dc_flags |= DC_REDUCED_MII_POLL; /* Save EEPROM contents so we can parse them later. */ dc_read_eeprom(sc, (caddr_t)&sc->dc_srom, 0, 512, 0); break; case DC_DEVICEID_DM9100: case DC_DEVICEID_DM9102: sc->dc_type = DC_TYPE_DM9102; sc->dc_flags |= DC_TX_COALESCE|DC_TX_INTR_ALWAYS; sc->dc_flags |= DC_REDUCED_MII_POLL|DC_TX_STORENFWD; sc->dc_pmode = DC_PMODE_MII; /* Increase the latency timer value. */ command = pci_read_config(dev, DC_PCI_CFLT, 4); command &= 0xFFFF00FF; command |= 0x00008000; pci_write_config(dev, DC_PCI_CFLT, command, 4); break; case DC_DEVICEID_AL981: sc->dc_type = DC_TYPE_AL981; sc->dc_flags |= DC_TX_USE_TX_INTR; sc->dc_flags |= DC_TX_ADMTEK_WAR; sc->dc_pmode = DC_PMODE_MII; break; case DC_DEVICEID_AN985: case DC_DEVICEID_FE2500: case DC_DEVICEID_EN2242: sc->dc_type = DC_TYPE_AN985; sc->dc_flags |= DC_TX_USE_TX_INTR; sc->dc_flags |= DC_TX_ADMTEK_WAR; sc->dc_pmode = DC_PMODE_MII; break; case DC_DEVICEID_98713: case DC_DEVICEID_98713_CP: if (revision < DC_REVISION_98713A) { sc->dc_type = DC_TYPE_98713; } if (revision >= DC_REVISION_98713A) { sc->dc_type = DC_TYPE_98713A; sc->dc_flags |= DC_21143_NWAY; } sc->dc_flags |= DC_REDUCED_MII_POLL; sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; break; case DC_DEVICEID_987x5: case DC_DEVICEID_EN1217: /* * Macronix MX98715AEC-C/D/E parts have only a * 128-bit hash table. We need to deal with these * in the same manner as the PNIC II so that we * get the right number of bits out of the * CRC routine. */ if (revision >= DC_REVISION_98715AEC_C && revision < DC_REVISION_98725) sc->dc_flags |= DC_128BIT_HASH; sc->dc_type = DC_TYPE_987x5; sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; sc->dc_flags |= DC_REDUCED_MII_POLL|DC_21143_NWAY; break; case DC_DEVICEID_98727: sc->dc_type = DC_TYPE_987x5; sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR; sc->dc_flags |= DC_REDUCED_MII_POLL|DC_21143_NWAY; break; case DC_DEVICEID_82C115: sc->dc_type = DC_TYPE_PNICII; sc->dc_flags |= DC_TX_POLL|DC_TX_USE_TX_INTR|DC_128BIT_HASH; sc->dc_flags |= DC_REDUCED_MII_POLL|DC_21143_NWAY; break; case DC_DEVICEID_82C168: sc->dc_type = DC_TYPE_PNIC; sc->dc_flags |= DC_TX_STORENFWD|DC_TX_INTR_ALWAYS; sc->dc_flags |= DC_PNIC_RX_BUG_WAR; sc->dc_pnic_rx_buf = malloc(DC_RXLEN * 5, M_DEVBUF, M_NOWAIT); if (revision < DC_REVISION_82C169) sc->dc_pmode = DC_PMODE_SYM; break; case DC_DEVICEID_AX88140A: sc->dc_type = DC_TYPE_ASIX; sc->dc_flags |= DC_TX_USE_TX_INTR|DC_TX_INTR_FIRSTFRAG; sc->dc_flags |= DC_REDUCED_MII_POLL; sc->dc_pmode = DC_PMODE_MII; break; case DC_DEVICEID_X3201: sc->dc_type = DC_TYPE_XIRCOM; sc->dc_flags |= DC_TX_INTR_ALWAYS | DC_TX_COALESCE; /* * We don't actually need to coalesce, but we're doing * it to obtain a double word aligned buffer. */ break; default: printf("dc%d: unknown device: %x\n", sc->dc_unit, sc->dc_info->dc_did); break; } /* Save the cache line size. */ if (DC_IS_DAVICOM(sc)) sc->dc_cachesize = 0; else sc->dc_cachesize = pci_read_config(dev, DC_PCI_CFLT, 4) & 0xFF; /* Reset the adapter. */ dc_reset(sc); /* Take 21143 out of snooze mode */ if (DC_IS_INTEL(sc) || DC_IS_XIRCOM(sc)) { command = pci_read_config(dev, DC_PCI_CFDD, 4); command &= ~(DC_CFDD_SNOOZE_MODE|DC_CFDD_SLEEP_MODE); pci_write_config(dev, DC_PCI_CFDD, command, 4); } /* * Try to learn something about the supported media. * We know that ASIX and ADMtek and Davicom devices * will *always* be using MII media, so that's a no-brainer. * The tricky ones are the Macronix/PNIC II and the * Intel 21143. */ if (DC_IS_INTEL(sc)) dc_parse_21143_srom(sc); else if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { if (sc->dc_type == DC_TYPE_98713) sc->dc_pmode = DC_PMODE_MII; else sc->dc_pmode = DC_PMODE_SYM; } else if (!sc->dc_pmode) sc->dc_pmode = DC_PMODE_MII; /* * Get station address from the EEPROM. */ switch(sc->dc_type) { case DC_TYPE_98713: case DC_TYPE_98713A: case DC_TYPE_987x5: case DC_TYPE_PNICII: dc_read_eeprom(sc, (caddr_t)&mac_offset, (DC_EE_NODEADDR_OFFSET / 2), 1, 0); dc_read_eeprom(sc, (caddr_t)&eaddr, (mac_offset / 2), 3, 0); break; case DC_TYPE_PNIC: dc_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 1); break; case DC_TYPE_DM9102: case DC_TYPE_21143: case DC_TYPE_ASIX: dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); break; case DC_TYPE_AL981: case DC_TYPE_AN985: dc_read_eeprom(sc, (caddr_t)&eaddr, DC_AL_EE_NODEADDR, 3, 0); break; case DC_TYPE_XIRCOM: dc_read_eeprom(sc, (caddr_t)&eaddr, 3, 3, 0); break; default: dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); break; } /* * A 21143 or clone chip was detected. Inform the world. */ printf("dc%d: Ethernet address: %6D\n", unit, eaddr, ":"); sc->dc_unit = unit; bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); sc->dc_ldata = contigmalloc(sizeof(struct dc_list_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->dc_ldata == NULL) { printf("dc%d: no memory for list buffers!\n", unit); bus_teardown_intr(dev, sc->dc_irq, sc->dc_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq); bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); error = ENXIO; goto fail; } bzero(sc->dc_ldata, sizeof(struct dc_list_data)); ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; ifp->if_unit = unit; ifp->if_name = "dc"; /* XXX: bleah, MTU gets overwritten in ether_ifattach() */ ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = dc_ioctl; ifp->if_output = ether_output; ifp->if_start = dc_start; ifp->if_watchdog = dc_watchdog; ifp->if_init = dc_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = DC_TX_LIST_CNT - 1; ifp->if_mpsafe = IS_MPSAFE; /* * Do MII setup. If this is a 21143, check for a PHY on the * MII bus after applying any necessary fixups to twiddle the * GPIO bits. If we don't end up finding a PHY, restore the * old selection (SIA only or SIA/SYM) and attach the dcphy * driver instead. */ if (DC_IS_INTEL(sc)) { dc_apply_fixup(sc, IFM_AUTO); tmp = sc->dc_pmode; sc->dc_pmode = DC_PMODE_MII; } error = mii_phy_probe(dev, &sc->dc_miibus, dc_ifmedia_upd, dc_ifmedia_sts); if (error && DC_IS_INTEL(sc)) { sc->dc_pmode = tmp; if (sc->dc_pmode != DC_PMODE_SIA) sc->dc_pmode = DC_PMODE_SYM; sc->dc_flags |= DC_21143_NWAY; mii_phy_probe(dev, &sc->dc_miibus, dc_ifmedia_upd, dc_ifmedia_sts); /* * For non-MII cards, we need to have the 21143 * drive the LEDs. Except there are some systems * like the NEC VersaPro NoteBook PC which have no * LEDs, and twiddling these bits has adverse effects * on them. (I.e. you suddenly can't get a link.) */ if (pci_read_config(dev, DC_PCI_CSID, 4) != 0x80281033) sc->dc_flags |= DC_TULIP_LEDS; error = 0; } if (error) { printf("dc%d: MII without any PHY!\n", sc->dc_unit); bus_teardown_intr(dev, sc->dc_irq, sc->dc_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq); bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); error = ENXIO; goto fail; } if (DC_IS_XIRCOM(sc)) { /* * setup General Purpose Port mode and data so the tulip * can talk to the MII. */ CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); DELAY(10); CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); DELAY(10); } /* * Call MI attach routine. */ ether_ifattach(ifp, ETHER_BPF_SUPPORTED); callout_init(&sc->dc_stat_ch, IS_MPSAFE); #ifdef SRM_MEDIA sc->dc_srm_media = 0; /* Remember the SRM console media setting */ if (DC_IS_INTEL(sc)) { command = pci_read_config(dev, DC_PCI_CFDD, 4); command &= ~(DC_CFDD_SNOOZE_MODE|DC_CFDD_SLEEP_MODE); switch ((command >> 8) & 0xff) { case 3: sc->dc_srm_media = IFM_10_T; break; case 4: sc->dc_srm_media = IFM_10_T | IFM_FDX; break; case 5: sc->dc_srm_media = IFM_100_TX; break; case 6: sc->dc_srm_media = IFM_100_TX | IFM_FDX; break; } if (sc->dc_srm_media) sc->dc_srm_media |= IFM_ACTIVE | IFM_ETHER; } #endif DC_UNLOCK(sc); return(0); fail: DC_UNLOCK(sc); mtx_destroy(&sc->dc_mtx); return(error); } static int dc_detach(dev) device_t dev; { struct dc_softc *sc; struct ifnet *ifp; struct dc_mediainfo *m; sc = device_get_softc(dev); DC_LOCK(sc); ifp = &sc->arpcom.ac_if; dc_stop(sc); ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); bus_generic_detach(dev); device_delete_child(dev, sc->dc_miibus); bus_teardown_intr(dev, sc->dc_irq, sc->dc_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq); bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); contigfree(sc->dc_ldata, sizeof(struct dc_list_data), M_DEVBUF); if (sc->dc_pnic_rx_buf != NULL) free(sc->dc_pnic_rx_buf, M_DEVBUF); while(sc->dc_mi != NULL) { m = sc->dc_mi->dc_next; free(sc->dc_mi, M_DEVBUF); sc->dc_mi = m; } DC_UNLOCK(sc); mtx_destroy(&sc->dc_mtx); return(0); } /* * Initialize the transmit descriptors. */ static int dc_list_tx_init(sc) struct dc_softc *sc; { struct dc_chain_data *cd; struct dc_list_data *ld; int i; cd = &sc->dc_cdata; ld = sc->dc_ldata; for (i = 0; i < DC_TX_LIST_CNT; i++) { if (i == (DC_TX_LIST_CNT - 1)) { ld->dc_tx_list[i].dc_next = vtophys(&ld->dc_tx_list[0]); } else { ld->dc_tx_list[i].dc_next = vtophys(&ld->dc_tx_list[i + 1]); } cd->dc_tx_chain[i] = NULL; ld->dc_tx_list[i].dc_data = 0; ld->dc_tx_list[i].dc_ctl = 0; } cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0; return(0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int dc_list_rx_init(sc) struct dc_softc *sc; { struct dc_chain_data *cd; struct dc_list_data *ld; int i; cd = &sc->dc_cdata; ld = sc->dc_ldata; for (i = 0; i < DC_RX_LIST_CNT; i++) { if (dc_newbuf(sc, i, NULL) == ENOBUFS) return(ENOBUFS); if (i == (DC_RX_LIST_CNT - 1)) { ld->dc_rx_list[i].dc_next = vtophys(&ld->dc_rx_list[0]); } else { ld->dc_rx_list[i].dc_next = vtophys(&ld->dc_rx_list[i + 1]); } } cd->dc_rx_prod = 0; return(0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int dc_newbuf(sc, i, m) struct dc_softc *sc; int i; struct mbuf *m; { struct mbuf *m_new = NULL; struct dc_desc *c; c = &sc->dc_ldata->dc_rx_list[i]; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("dc%d: no memory for rx list " "-- packet dropped!\n", sc->dc_unit); return(ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("dc%d: no memory for rx list " "-- packet dropped!\n", sc->dc_unit); m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, sizeof(u_int64_t)); /* * If this is a PNIC chip, zero the buffer. This is part * of the workaround for the receive bug in the 82c168 and * 82c169 chips. */ if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) bzero((char *)mtod(m_new, char *), m_new->m_len); sc->dc_cdata.dc_rx_chain[i] = m_new; c->dc_data = vtophys(mtod(m_new, caddr_t)); c->dc_ctl = DC_RXCTL_RLINK | DC_RXLEN; c->dc_status = DC_RXSTAT_OWN; return(0); } /* * Grrrrr. * The PNIC chip has a terrible bug in it that manifests itself during * periods of heavy activity. The exact mode of failure if difficult to * pinpoint: sometimes it only happens in promiscuous mode, sometimes it * will happen on slow machines. The bug is that sometimes instead of * uploading one complete frame during reception, it uploads what looks * like the entire contents of its FIFO memory. The frame we want is at * the end of the whole mess, but we never know exactly how much data has * been uploaded, so salvaging the frame is hard. * * There is only one way to do it reliably, and it's disgusting. * Here's what we know: * * - We know there will always be somewhere between one and three extra * descriptors uploaded. * * - We know the desired received frame will always be at the end of the * total data upload. * * - We know the size of the desired received frame because it will be * provided in the length field of the status word in the last descriptor. * * Here's what we do: * * - When we allocate buffers for the receive ring, we bzero() them. * This means that we know that the buffer contents should be all * zeros, except for data uploaded by the chip. * * - We also force the PNIC chip to upload frames that include the * ethernet CRC at the end. * * - We gather all of the bogus frame data into a single buffer. * * - We then position a pointer at the end of this buffer and scan * backwards until we encounter the first non-zero byte of data. * This is the end of the received frame. We know we will encounter * some data at the end of the frame because the CRC will always be * there, so even if the sender transmits a packet of all zeros, * we won't be fooled. * * - We know the size of the actual received frame, so we subtract * that value from the current pointer location. This brings us * to the start of the actual received packet. * * - We copy this into an mbuf and pass it on, along with the actual * frame length. * * The performance hit is tremendous, but it beats dropping frames all * the time. */ #define DC_WHOLEFRAME (DC_RXSTAT_FIRSTFRAG|DC_RXSTAT_LASTFRAG) static void dc_pnic_rx_bug_war(sc, idx) struct dc_softc *sc; int idx; { struct dc_desc *cur_rx; struct dc_desc *c = NULL; struct mbuf *m = NULL; unsigned char *ptr; int i, total_len; u_int32_t rxstat = 0; i = sc->dc_pnic_rx_bug_save; cur_rx = &sc->dc_ldata->dc_rx_list[idx]; ptr = sc->dc_pnic_rx_buf; bzero(ptr, sizeof(DC_RXLEN * 5)); /* Copy all the bytes from the bogus buffers. */ while (1) { c = &sc->dc_ldata->dc_rx_list[i]; rxstat = c->dc_status; m = sc->dc_cdata.dc_rx_chain[i]; bcopy(mtod(m, char *), ptr, DC_RXLEN); ptr += DC_RXLEN; /* If this is the last buffer, break out. */ if (i == idx || rxstat & DC_RXSTAT_LASTFRAG) break; dc_newbuf(sc, i, m); DC_INC(i, DC_RX_LIST_CNT); } /* Find the length of the actual receive frame. */ total_len = DC_RXBYTES(rxstat); /* Scan backwards until we hit a non-zero byte. */ while(*ptr == 0x00) ptr--; /* Round off. */ if ((uintptr_t)(ptr) & 0x3) ptr -= 1; /* Now find the start of the frame. */ ptr -= total_len; if (ptr < sc->dc_pnic_rx_buf) ptr = sc->dc_pnic_rx_buf; /* * Now copy the salvaged frame to the last mbuf and fake up * the status word to make it look like a successful * frame reception. */ dc_newbuf(sc, i, m); bcopy(ptr, mtod(m, char *), total_len); cur_rx->dc_status = rxstat | DC_RXSTAT_FIRSTFRAG; return; } /* * This routine searches the RX ring for dirty descriptors in the * event that the rxeof routine falls out of sync with the chip's * current descriptor pointer. This may happen sometimes as a result * of a "no RX buffer available" condition that happens when the chip * consumes all of the RX buffers before the driver has a chance to * process the RX ring. This routine may need to be called more than * once to bring the driver back in sync with the chip, however we * should still be getting RX DONE interrupts to drive the search * for new packets in the RX ring, so we should catch up eventually. */ static int dc_rx_resync(sc) struct dc_softc *sc; { int i, pos; struct dc_desc *cur_rx; pos = sc->dc_cdata.dc_rx_prod; for (i = 0; i < DC_RX_LIST_CNT; i++) { cur_rx = &sc->dc_ldata->dc_rx_list[pos]; if (!(cur_rx->dc_status & DC_RXSTAT_OWN)) break; DC_INC(pos, DC_RX_LIST_CNT); } /* If the ring really is empty, then just return. */ if (i == DC_RX_LIST_CNT) return(0); /* We've fallen behing the chip: catch it. */ sc->dc_cdata.dc_rx_prod = pos; return(EAGAIN); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static void dc_rxeof(sc) struct dc_softc *sc; { struct ether_header *eh; struct mbuf *m; struct ifnet *ifp; struct dc_desc *cur_rx; int i, total_len = 0; u_int32_t rxstat; ifp = &sc->arpcom.ac_if; i = sc->dc_cdata.dc_rx_prod; while(!(sc->dc_ldata->dc_rx_list[i].dc_status & DC_RXSTAT_OWN)) { struct mbuf *m0 = NULL; cur_rx = &sc->dc_ldata->dc_rx_list[i]; rxstat = cur_rx->dc_status; m = sc->dc_cdata.dc_rx_chain[i]; total_len = DC_RXBYTES(rxstat); if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) { if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) { if (rxstat & DC_RXSTAT_FIRSTFRAG) sc->dc_pnic_rx_bug_save = i; if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) { DC_INC(i, DC_RX_LIST_CNT); continue; } dc_pnic_rx_bug_war(sc, i); rxstat = cur_rx->dc_status; total_len = DC_RXBYTES(rxstat); } } sc->dc_cdata.dc_rx_chain[i] = NULL; /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (rxstat & DC_RXSTAT_RXERR) { ifp->if_ierrors++; if (rxstat & DC_RXSTAT_COLLSEEN) ifp->if_collisions++; dc_newbuf(sc, i, m); if (rxstat & DC_RXSTAT_CRCERR) { DC_INC(i, DC_RX_LIST_CNT); continue; } else { dc_init(sc); return; } } /* No errors; receive the packet. */ total_len -= ETHER_CRC_LEN; m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, total_len + ETHER_ALIGN, 0, ifp, NULL); dc_newbuf(sc, i, m); DC_INC(i, DC_RX_LIST_CNT); if (m0 == NULL) { ifp->if_ierrors++; continue; } m_adj(m0, ETHER_ALIGN); m = m0; ifp->if_ipackets++; eh = mtod(m, struct ether_header *); /* Remove header from mbuf and pass it on. */ m_adj(m, sizeof(struct ether_header)); ether_input(ifp, eh, m); } sc->dc_cdata.dc_rx_prod = i; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void dc_txeof(sc) struct dc_softc *sc; { struct dc_desc *cur_tx = NULL; struct ifnet *ifp; int idx; ifp = &sc->arpcom.ac_if; /* Clear the timeout timer. */ ifp->if_timer = 0; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ idx = sc->dc_cdata.dc_tx_cons; while(idx != sc->dc_cdata.dc_tx_prod) { u_int32_t txstat; cur_tx = &sc->dc_ldata->dc_tx_list[idx]; txstat = cur_tx->dc_status; if (txstat & DC_TXSTAT_OWN) break; if (!(cur_tx->dc_ctl & DC_TXCTL_LASTFRAG) || cur_tx->dc_ctl & DC_TXCTL_SETUP) { sc->dc_cdata.dc_tx_cnt--; if (cur_tx->dc_ctl & DC_TXCTL_SETUP) { /* * Yes, the PNIC is so brain damaged * that it will sometimes generate a TX * underrun error while DMAing the RX * filter setup frame. If we detect this, * we have to send the setup frame again, * or else the filter won't be programmed * correctly. */ if (DC_IS_PNIC(sc)) { if (txstat & DC_TXSTAT_ERRSUM) dc_setfilt(sc); } sc->dc_cdata.dc_tx_chain[idx] = NULL; } DC_INC(idx, DC_TX_LIST_CNT); continue; } if (DC_IS_XIRCOM(sc)) { /* * XXX: Why does my Xircom taunt me so? * For some reason it likes setting the CARRLOST flag * even when the carrier is there. wtf?!? */ if (/*sc->dc_type == DC_TYPE_21143 &&*/ sc->dc_pmode == DC_PMODE_MII && ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM| DC_TXSTAT_NOCARRIER))) txstat &= ~DC_TXSTAT_ERRSUM; } else { if (/*sc->dc_type == DC_TYPE_21143 &&*/ sc->dc_pmode == DC_PMODE_MII && ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM| DC_TXSTAT_NOCARRIER|DC_TXSTAT_CARRLOST))) txstat &= ~DC_TXSTAT_ERRSUM; } if (txstat & DC_TXSTAT_ERRSUM) { ifp->if_oerrors++; if (txstat & DC_TXSTAT_EXCESSCOLL) ifp->if_collisions++; if (txstat & DC_TXSTAT_LATECOLL) ifp->if_collisions++; if (!(txstat & DC_TXSTAT_UNDERRUN)) { dc_init(sc); return; } } ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3; ifp->if_opackets++; if (sc->dc_cdata.dc_tx_chain[idx] != NULL) { m_freem(sc->dc_cdata.dc_tx_chain[idx]); sc->dc_cdata.dc_tx_chain[idx] = NULL; } sc->dc_cdata.dc_tx_cnt--; DC_INC(idx, DC_TX_LIST_CNT); } sc->dc_cdata.dc_tx_cons = idx; if (cur_tx != NULL) ifp->if_flags &= ~IFF_OACTIVE; return; } static void dc_tick(xsc) void *xsc; { struct dc_softc *sc; struct mii_data *mii; struct ifnet *ifp; u_int32_t r; sc = xsc; DC_LOCK(sc); ifp = &sc->arpcom.ac_if; mii = device_get_softc(sc->dc_miibus); if (sc->dc_flags & DC_REDUCED_MII_POLL) { if (sc->dc_flags & DC_21143_NWAY) { r = CSR_READ_4(sc, DC_10BTSTAT); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX && (r & DC_TSTAT_LS100)) { sc->dc_link = 0; mii_mediachg(mii); } if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T && (r & DC_TSTAT_LS10)) { sc->dc_link = 0; mii_mediachg(mii); } if (sc->dc_link == 0) mii_tick(mii); } else { r = CSR_READ_4(sc, DC_ISR); if ((r & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT && sc->dc_cdata.dc_tx_cnt == 0) mii_tick(mii); if (!(mii->mii_media_status & IFM_ACTIVE)) sc->dc_link = 0; } } else mii_tick(mii); /* * When the init routine completes, we expect to be able to send * packets right away, and in fact the network code will send a * gratuitous ARP the moment the init routine marks the interface * as running. However, even though the MAC may have been initialized, * there may be a delay of a few seconds before the PHY completes * autonegotiation and the link is brought up. Any transmissions * made during that delay will be lost. Dealing with this is tricky: * we can't just pause in the init routine while waiting for the * PHY to come ready since that would bring the whole system to * a screeching halt for several seconds. * * What we do here is prevent the TX start routine from sending * any packets until a link has been established. After the * interface has been initialized, the tick routine will poll * the state of the PHY until the IFM_ACTIVE flag is set. Until * that time, packets will stay in the send queue, and once the * link comes up, they will be flushed out to the wire. */ if (!sc->dc_link) { mii_pollstat(mii); if (mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->dc_link++; if (ifp->if_snd.ifq_head != NULL) dc_start(ifp); } } if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link) callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc); else callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc); DC_UNLOCK(sc); return; } static void dc_intr(arg) void *arg; { struct dc_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; DC_LOCK(sc); ifp = &sc->arpcom.ac_if; /* Supress unwanted interrupts */ if (!(ifp->if_flags & IFF_UP)) { if (CSR_READ_4(sc, DC_ISR) & DC_INTRS) dc_stop(sc); DC_UNLOCK(sc); return; } /* Disable interrupts. */ CSR_WRITE_4(sc, DC_IMR, 0x00000000); while(((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) && status != 0xFFFFFFFF) { CSR_WRITE_4(sc, DC_ISR, status); if (status & DC_ISR_RX_OK) { int curpkts; curpkts = ifp->if_ipackets; dc_rxeof(sc); if (curpkts == ifp->if_ipackets) { while(dc_rx_resync(sc)) dc_rxeof(sc); } } if (status & (DC_ISR_TX_OK|DC_ISR_TX_NOBUF)) dc_txeof(sc); if (status & DC_ISR_TX_IDLE) { dc_txeof(sc); if (sc->dc_cdata.dc_tx_cnt) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); } } if (status & DC_ISR_TX_UNDERRUN) { u_int32_t cfg; printf("dc%d: TX underrun -- ", sc->dc_unit); if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) dc_init(sc); cfg = CSR_READ_4(sc, DC_NETCFG); cfg &= ~DC_NETCFG_TX_THRESH; if (sc->dc_txthresh == DC_TXTHRESH_160BYTES) { printf("using store and forward mode\n"); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); } else if (sc->dc_flags & DC_TX_STORENFWD) { printf("resetting\n"); } else { sc->dc_txthresh += 0x4000; printf("increasing TX threshold\n"); CSR_WRITE_4(sc, DC_NETCFG, cfg); DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); } } if ((status & DC_ISR_RX_WATDOGTIMEO) || (status & DC_ISR_RX_NOBUF)) { int curpkts; curpkts = ifp->if_ipackets; dc_rxeof(sc); if (curpkts == ifp->if_ipackets) { while(dc_rx_resync(sc)) dc_rxeof(sc); } } if (status & DC_ISR_BUS_ERR) { dc_reset(sc); dc_init(sc); } } /* Re-enable interrupts. */ CSR_WRITE_4(sc, DC_IMR, DC_INTRS); if (ifp->if_snd.ifq_head != NULL) dc_start(ifp); DC_UNLOCK(sc); return; } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int dc_encap(sc, m_head, txidx) struct dc_softc *sc; struct mbuf *m_head; u_int32_t *txidx; { struct dc_desc *f = NULL; struct mbuf *m; int frag, cur, cnt = 0; /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ m = m_head; cur = frag = *txidx; for (m = m_head; m != NULL; m = m->m_next) { if (m->m_len != 0) { if (sc->dc_flags & DC_TX_ADMTEK_WAR) { if (*txidx != sc->dc_cdata.dc_tx_prod && frag == (DC_TX_LIST_CNT - 1)) return(ENOBUFS); } if ((DC_TX_LIST_CNT - (sc->dc_cdata.dc_tx_cnt + cnt)) < 5) return(ENOBUFS); f = &sc->dc_ldata->dc_tx_list[frag]; f->dc_ctl = DC_TXCTL_TLINK | m->m_len; if (cnt == 0) { f->dc_status = 0; f->dc_ctl |= DC_TXCTL_FIRSTFRAG; } else f->dc_status = DC_TXSTAT_OWN; f->dc_data = vtophys(mtod(m, vm_offset_t)); cur = frag; DC_INC(frag, DC_TX_LIST_CNT); cnt++; } } if (m != NULL) return(ENOBUFS); sc->dc_cdata.dc_tx_cnt += cnt; sc->dc_cdata.dc_tx_chain[cur] = m_head; sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_LASTFRAG; if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG) sc->dc_ldata->dc_tx_list[*txidx].dc_ctl |= DC_TXCTL_FINT; if (sc->dc_flags & DC_TX_INTR_ALWAYS) sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_FINT; if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64) sc->dc_ldata->dc_tx_list[cur].dc_ctl |= DC_TXCTL_FINT; sc->dc_ldata->dc_tx_list[*txidx].dc_status = DC_TXSTAT_OWN; *txidx = frag; return(0); } /* * Coalesce an mbuf chain into a single mbuf cluster buffer. * Needed for some really badly behaved chips that just can't * do scatter/gather correctly. */ static int dc_coal(sc, m_head) struct dc_softc *sc; struct mbuf **m_head; { struct mbuf *m_new, *m; m = *m_head; MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("dc%d: no memory for tx list", sc->dc_unit); return(ENOBUFS); } if (m->m_pkthdr.len > MHLEN) { MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); printf("dc%d: no memory for tx list", sc->dc_unit); return(ENOBUFS); } } m_copydata(m, 0, m->m_pkthdr.len, mtod(m_new, caddr_t)); m_new->m_pkthdr.len = m_new->m_len = m->m_pkthdr.len; m_freem(m); *m_head = m_new; return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void dc_start(ifp) struct ifnet *ifp; { struct dc_softc *sc; struct mbuf *m_head = NULL; int idx; sc = ifp->if_softc; DC_LOCK(sc); if (!sc->dc_link) { DC_UNLOCK(sc); return; } if (ifp->if_flags & IFF_OACTIVE) { DC_UNLOCK(sc); return; } idx = sc->dc_cdata.dc_tx_prod; while(sc->dc_cdata.dc_tx_chain[idx] == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (sc->dc_flags & DC_TX_COALESCE) { if (dc_coal(sc, &m_head)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } } if (dc_encap(sc, m_head, &idx)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if (ifp->if_bpf) bpf_mtap(ifp, m_head); if (sc->dc_flags & DC_TX_ONE) { ifp->if_flags |= IFF_OACTIVE; break; } } /* Transmit */ sc->dc_cdata.dc_tx_prod = idx; if (!(sc->dc_flags & DC_TX_POLL)) CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; DC_UNLOCK(sc); return; } static void dc_init(xsc) void *xsc; { struct dc_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; struct mii_data *mii; DC_LOCK(sc); mii = device_get_softc(sc->dc_miibus); /* * Cancel pending I/O and free all RX/TX buffers. */ dc_stop(sc); dc_reset(sc); /* * Set cache alignment and burst length. */ if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc)) CSR_WRITE_4(sc, DC_BUSCTL, 0); else CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME|DC_BUSCTL_MRLE); if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) { DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA); } else { DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG); } if (sc->dc_flags & DC_TX_POLL) DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1); switch(sc->dc_cachesize) { case 32: DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG); break; case 16: DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG); break; case 8: DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG); break; case 0: default: DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE); break; } if (sc->dc_flags & DC_TX_STORENFWD) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); else { if (sc->dc_txthresh == DC_TXTHRESH_160BYTES) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); } else { DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); } } DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC); DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF); if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { /* * The app notes for the 98713 and 98715A say that * in order to have the chips operate properly, a magic * number must be written to CSR16. Macronix does not * document the meaning of these bits so there's no way * to know exactly what they do. The 98713 has a magic * number all its own; the rest all use a different one. */ DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000); if (sc->dc_type == DC_TYPE_98713) DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713); else DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715); } if (DC_IS_XIRCOM(sc)) { /* * setup General Purpose Port mode and data so the tulip * can talk to the MII. */ CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); DELAY(10); CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); DELAY(10); } DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_72BYTES); /* Init circular RX list. */ if (dc_list_rx_init(sc) == ENOBUFS) { printf("dc%d: initialization failed: no " "memory for rx buffers\n", sc->dc_unit); dc_stop(sc); DC_UNLOCK(sc); return; } /* * Init tx descriptors. */ dc_list_tx_init(sc); /* * Load the address of the RX list. */ CSR_WRITE_4(sc, DC_RXADDR, vtophys(&sc->dc_ldata->dc_rx_list[0])); CSR_WRITE_4(sc, DC_TXADDR, vtophys(&sc->dc_ldata->dc_tx_list[0])); /* * Enable interrupts. */ CSR_WRITE_4(sc, DC_IMR, DC_INTRS); CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF); /* Enable transmitter. */ DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); /* * If this is an Intel 21143 and we're not using the * MII port, program the LED control pins so we get * link and activity indications. */ if (sc->dc_flags & DC_TULIP_LEDS) { CSR_WRITE_4(sc, DC_WATCHDOG, DC_WDOG_CTLWREN|DC_WDOG_LINK|DC_WDOG_ACTIVITY); CSR_WRITE_4(sc, DC_WATCHDOG, 0); } /* * Load the RX/multicast filter. We do this sort of late * because the filter programming scheme on the 21143 and * some clones requires DMAing a setup frame via the TX * engine, and we need the transmitter enabled for that. */ dc_setfilt(sc); /* Enable receiver. */ DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF); mii_mediachg(mii); dc_setcfg(sc, sc->dc_if_media); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; /* Don't start the ticker if this is a homePNA link. */ if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_homePNA) sc->dc_link = 1; else { if (sc->dc_flags & DC_21143_NWAY) callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc); else callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc); } #ifdef SRM_MEDIA if(sc->dc_srm_media) { struct ifreq ifr; ifr.ifr_media = sc->dc_srm_media; ifmedia_ioctl(ifp, &ifr, &mii->mii_media, SIOCSIFMEDIA); sc->dc_srm_media = 0; } #endif DC_UNLOCK(sc); return; } /* * Set media options. */ static int dc_ifmedia_upd(ifp) struct ifnet *ifp; { struct dc_softc *sc; struct mii_data *mii; struct ifmedia *ifm; sc = ifp->if_softc; mii = device_get_softc(sc->dc_miibus); mii_mediachg(mii); ifm = &mii->mii_media; if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_homePNA) dc_setcfg(sc, ifm->ifm_media); else sc->dc_link = 0; return(0); } /* * Report current media status. */ static void dc_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct dc_softc *sc; struct mii_data *mii; struct ifmedia *ifm; sc = ifp->if_softc; mii = device_get_softc(sc->dc_miibus); mii_pollstat(mii); ifm = &mii->mii_media; if (DC_IS_DAVICOM(sc)) { if (IFM_SUBTYPE(ifm->ifm_media) == IFM_homePNA) { ifmr->ifm_active = ifm->ifm_media; ifmr->ifm_status = 0; return; } } ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static int dc_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct dc_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; DC_LOCK(sc); switch(command) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: error = ether_ioctl(ifp, command, data); break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->dc_if_flags & IFF_PROMISC)) { dc_setfilt(sc); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->dc_if_flags & IFF_PROMISC) { dc_setfilt(sc); } else if (!(ifp->if_flags & IFF_RUNNING)) { sc->dc_txthresh = 0; dc_init(sc); } } else { if (ifp->if_flags & IFF_RUNNING) dc_stop(sc); } sc->dc_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: dc_setfilt(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->dc_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); #ifdef SRM_MEDIA if (sc->dc_srm_media) sc->dc_srm_media = 0; #endif break; default: error = EINVAL; break; } DC_UNLOCK(sc); return(error); } static void dc_watchdog(ifp) struct ifnet *ifp; { struct dc_softc *sc; sc = ifp->if_softc; DC_LOCK(sc); ifp->if_oerrors++; printf("dc%d: watchdog timeout\n", sc->dc_unit); dc_stop(sc); dc_reset(sc); dc_init(sc); if (ifp->if_snd.ifq_head != NULL) dc_start(ifp); DC_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void dc_stop(sc) struct dc_softc *sc; { register int i; struct ifnet *ifp; DC_LOCK(sc); ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; callout_stop(&sc->dc_stat_ch); DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON|DC_NETCFG_TX_ON)); CSR_WRITE_4(sc, DC_IMR, 0x00000000); CSR_WRITE_4(sc, DC_TXADDR, 0x00000000); CSR_WRITE_4(sc, DC_RXADDR, 0x00000000); sc->dc_link = 0; /* * Free data in the RX lists. */ for (i = 0; i < DC_RX_LIST_CNT; i++) { if (sc->dc_cdata.dc_rx_chain[i] != NULL) { m_freem(sc->dc_cdata.dc_rx_chain[i]); sc->dc_cdata.dc_rx_chain[i] = NULL; } } bzero((char *)&sc->dc_ldata->dc_rx_list, sizeof(sc->dc_ldata->dc_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < DC_TX_LIST_CNT; i++) { if (sc->dc_cdata.dc_tx_chain[i] != NULL) { if (sc->dc_ldata->dc_tx_list[i].dc_ctl & DC_TXCTL_SETUP) { sc->dc_cdata.dc_tx_chain[i] = NULL; continue; } m_freem(sc->dc_cdata.dc_tx_chain[i]); sc->dc_cdata.dc_tx_chain[i] = NULL; } } bzero((char *)&sc->dc_ldata->dc_tx_list, sizeof(sc->dc_ldata->dc_tx_list)); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); DC_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void dc_shutdown(dev) device_t dev; { struct dc_softc *sc; sc = device_get_softc(dev); dc_stop(sc); return; } Index: head/sys/pci/if_fxp.c =================================================================== --- head/sys/pci/if_fxp.c (revision 71961) +++ head/sys/pci/if_fxp.c (revision 71962) @@ -1,1951 +1,1950 @@ /* * Copyright (c) 1995, David Greenman * All rights reserved. * * Modifications to support media selection: * Copyright (c) 1997 Jason R. Thorpe. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Intel EtherExpress Pro/100B PCI Fast Ethernet driver */ #include #include #include #include #include #include #include #include #include #include #ifdef NS #include #include #endif #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include /* for PCIM_CMD_xxx */ #include #include #ifdef __alpha__ /* XXX */ /* XXX XXX NEED REAL DMA MAPPING SUPPORT XXX XXX */ #undef vtophys #define vtophys(va) alpha_XXX_dmamap((vm_offset_t)(va)) #endif /* __alpha__ */ /* * NOTE! On the Alpha, we have an alignment constraint. The * card DMAs the packet immediately following the RFA. However, * the first thing in the packet is a 14-byte Ethernet header. * This means that the packet is misaligned. To compensate, * we actually offset the RFA 2 bytes into the cluster. This * alignes the packet after the Ethernet header at a 32-bit * boundary. HOWEVER! This means that the RFA is misaligned! */ #define RFA_ALIGNMENT_FUDGE 2 /* * Inline function to copy a 16-bit aligned 32-bit quantity. */ static __inline void fxp_lwcopy __P((volatile u_int32_t *, volatile u_int32_t *)); static __inline void fxp_lwcopy(src, dst) volatile u_int32_t *src, *dst; { #ifdef __i386__ *dst = *src; #else volatile u_int16_t *a = (volatile u_int16_t *)src; volatile u_int16_t *b = (volatile u_int16_t *)dst; b[0] = a[0]; b[1] = a[1]; #endif } /* * Template for default configuration parameters. * See struct fxp_cb_config for the bit definitions. */ static u_char fxp_cb_config_template[] = { 0x0, 0x0, /* cb_status */ 0x80, 0x2, /* cb_command */ 0xff, 0xff, 0xff, 0xff, /* link_addr */ 0x16, /* 0 */ 0x8, /* 1 */ 0x0, /* 2 */ 0x0, /* 3 */ 0x0, /* 4 */ 0x80, /* 5 */ 0xb2, /* 6 */ 0x3, /* 7 */ 0x1, /* 8 */ 0x0, /* 9 */ 0x26, /* 10 */ 0x0, /* 11 */ 0x60, /* 12 */ 0x0, /* 13 */ 0xf2, /* 14 */ 0x48, /* 15 */ 0x0, /* 16 */ 0x40, /* 17 */ 0xf3, /* 18 */ 0x0, /* 19 */ 0x3f, /* 20 */ 0x5 /* 21 */ }; /* Supported media types. */ struct fxp_supported_media { const int fsm_phy; /* PHY type */ const int *fsm_media; /* the media array */ const int fsm_nmedia; /* the number of supported media */ const int fsm_defmedia; /* default media for this PHY */ }; static const int fxp_media_standard[] = { IFM_ETHER|IFM_10_T, IFM_ETHER|IFM_10_T|IFM_FDX, IFM_ETHER|IFM_100_TX, IFM_ETHER|IFM_100_TX|IFM_FDX, IFM_ETHER|IFM_AUTO, }; #define FXP_MEDIA_STANDARD_DEFMEDIA (IFM_ETHER|IFM_AUTO) static const int fxp_media_default[] = { IFM_ETHER|IFM_MANUAL, /* XXX IFM_AUTO ? */ }; #define FXP_MEDIA_DEFAULT_DEFMEDIA (IFM_ETHER|IFM_MANUAL) static const struct fxp_supported_media fxp_media[] = { { FXP_PHY_DP83840, fxp_media_standard, sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), FXP_MEDIA_STANDARD_DEFMEDIA }, { FXP_PHY_DP83840A, fxp_media_standard, sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), FXP_MEDIA_STANDARD_DEFMEDIA }, { FXP_PHY_82553A, fxp_media_standard, sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), FXP_MEDIA_STANDARD_DEFMEDIA }, { FXP_PHY_82553C, fxp_media_standard, sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), FXP_MEDIA_STANDARD_DEFMEDIA }, { FXP_PHY_82555, fxp_media_standard, sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), FXP_MEDIA_STANDARD_DEFMEDIA }, { FXP_PHY_82555B, fxp_media_standard, sizeof(fxp_media_standard) / sizeof(fxp_media_standard[0]), FXP_MEDIA_STANDARD_DEFMEDIA }, { FXP_PHY_80C24, fxp_media_default, sizeof(fxp_media_default) / sizeof(fxp_media_default[0]), FXP_MEDIA_DEFAULT_DEFMEDIA }, }; #define NFXPMEDIA (sizeof(fxp_media) / sizeof(fxp_media[0])) static int fxp_mediachange __P((struct ifnet *)); static void fxp_mediastatus __P((struct ifnet *, struct ifmediareq *)); static void fxp_set_media __P((struct fxp_softc *, int)); static __inline void fxp_scb_wait __P((struct fxp_softc *)); static __inline void fxp_dma_wait __P((volatile u_int16_t *, struct fxp_softc *sc)); static void fxp_intr __P((void *)); static void fxp_start __P((struct ifnet *)); static int fxp_ioctl __P((struct ifnet *, u_long, caddr_t)); static void fxp_init __P((void *)); static void fxp_stop __P((struct fxp_softc *)); static void fxp_watchdog __P((struct ifnet *)); static int fxp_add_rfabuf __P((struct fxp_softc *, struct mbuf *)); static int fxp_mdi_read __P((struct fxp_softc *, int, int)); static void fxp_mdi_write __P((struct fxp_softc *, int, int, int)); static void fxp_autosize_eeprom __P((struct fxp_softc *)); static void fxp_read_eeprom __P((struct fxp_softc *, u_int16_t *, int, int)); static int fxp_attach_common __P((struct fxp_softc *, u_int8_t *)); static void fxp_stats_update __P((void *)); static void fxp_mc_setup __P((struct fxp_softc *)); /* * Set initial transmit threshold at 64 (512 bytes). This is * increased by 64 (512 bytes) at a time, to maximum of 192 * (1536 bytes), if an underrun occurs. */ static int tx_threshold = 64; /* * Number of transmit control blocks. This determines the number * of transmit buffers that can be chained in the CB list. * This must be a power of two. */ #define FXP_NTXCB 128 /* * Number of completed TX commands at which point an interrupt * will be generated to garbage collect the attached buffers. * Must be at least one less than FXP_NTXCB, and should be * enough less so that the transmitter doesn't becomes idle * during the buffer rundown (which would reduce performance). */ #define FXP_CXINT_THRESH 120 /* * TxCB list index mask. This is used to do list wrap-around. */ #define FXP_TXCB_MASK (FXP_NTXCB - 1) /* * Number of receive frame area buffers. These are large so chose * wisely. */ #define FXP_NRFABUFS 64 /* * Maximum number of seconds that the receiver can be idle before we * assume it's dead and attempt to reset it by reprogramming the * multicast filter. This is part of a work-around for a bug in the * NIC. See fxp_stats_update(). */ #define FXP_MAX_RX_IDLE 15 /* * Wait for the previous command to be accepted (but not necessarily * completed). */ static __inline void fxp_scb_wait(sc) struct fxp_softc *sc; { int i = 10000; while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i) DELAY(2); if (i == 0) printf("fxp%d: SCB timeout\n", FXP_UNIT(sc)); } static __inline void fxp_dma_wait(status, sc) volatile u_int16_t *status; struct fxp_softc *sc; { int i = 10000; while (!(*status & FXP_CB_STATUS_C) && --i) DELAY(2); if (i == 0) printf("fxp%d: DMA timeout\n", FXP_UNIT(sc)); } /* * Return identification string if this is device is ours. */ static int fxp_probe(device_t dev) { if (pci_get_vendor(dev) == FXP_VENDORID_INTEL) { switch (pci_get_device(dev)) { case FXP_DEVICEID_i82557: device_set_desc(dev, "Intel Pro 10/100B/100+ Ethernet"); return 0; case FXP_DEVICEID_i82559: device_set_desc(dev, "Intel InBusiness 10/100 Ethernet"); return 0; case FXP_DEVICEID_i82559ER: device_set_desc(dev, "Intel Embedded 10/100 Ethernet"); return 0; case FXP_DEVICEID_i82562: device_set_desc(dev, "Intel PLC 10/100 Ethernet"); return 0; default: break; } } return ENXIO; } static int fxp_attach(device_t dev) { int error = 0; struct fxp_softc *sc = device_get_softc(dev); struct ifnet *ifp; u_int32_t val; int rid, m1, m2, ebitmap; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE); callout_handle_init(&sc->stat_ch); FXP_LOCK(sc); /* * Enable bus mastering. Enable memory space too, in case * BIOS/Prom forgot about it. */ val = pci_read_config(dev, PCIR_COMMAND, 2); val |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, val, 2); val = pci_read_config(dev, PCIR_COMMAND, 2); if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { u_int32_t iobase, membase, irq; /* Save important PCI config data. */ iobase = pci_read_config(dev, FXP_PCI_IOBA, 4); membase = pci_read_config(dev, FXP_PCI_MMBA, 4); irq = pci_read_config(dev, PCIR_INTLINE, 4); /* Reset the power state. */ device_printf(dev, "chip is in D%d power mode " "-- setting to D0\n", pci_get_powerstate(dev)); pci_set_powerstate(dev, PCI_POWERSTATE_D0); /* Restore PCI config data. */ pci_write_config(dev, FXP_PCI_IOBA, iobase, 4); pci_write_config(dev, FXP_PCI_MMBA, membase, 4); pci_write_config(dev, PCIR_INTLINE, irq, 4); } /* * Figure out which we should try first - memory mapping or i/o mapping? * We default to memory mapping. Then we accept an override from the * command line. Then we check to see which one is enabled. */ m1 = PCIM_CMD_MEMEN; m2 = PCIM_CMD_PORTEN; ebitmap = 0; if (getenv_int("fxp_iomap", &ebitmap)) { if (ebitmap & (1 << device_get_unit(dev))) { m1 = PCIM_CMD_PORTEN; m2 = PCIM_CMD_MEMEN; } } if (val & m1) { sc->rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; sc->rgd = (m1 == PCIM_CMD_MEMEN)? FXP_PCI_MMBA : FXP_PCI_IOBA; sc->mem = bus_alloc_resource(dev, sc->rtp, &sc->rgd, 0, ~0, 1, RF_ACTIVE); } if (sc->mem == NULL && (val & m2)) { sc->rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; sc->rgd = (m2 == PCIM_CMD_MEMEN)? FXP_PCI_MMBA : FXP_PCI_IOBA; sc->mem = bus_alloc_resource(dev, sc->rtp, &sc->rgd, 0, ~0, 1, RF_ACTIVE); } if (!sc->mem) { device_printf(dev, "could not map device registers\n"); error = ENXIO; goto fail; } if (bootverbose) { device_printf(dev, "using %s space register mapping\n", sc->rtp == SYS_RES_MEMORY? "memory" : "I/O"); } sc->sc_st = rman_get_bustag(sc->mem); sc->sc_sh = rman_get_bushandle(sc->mem); /* * Allocate our interrupt. */ rid = 0; sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->irq == NULL) { device_printf(dev, "could not map interrupt\n"); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET, fxp_intr, sc, &sc->ih); if (error) { device_printf(dev, "could not setup irq\n"); goto fail; } /* Do generic parts of attach. */ if (fxp_attach_common(sc, sc->arpcom.ac_enaddr)) { /* Failed! */ bus_teardown_intr(dev, sc->irq, sc->ih); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); bus_release_resource(dev, sc->rtp, sc->rgd, sc->mem); error = ENXIO; goto fail; } device_printf(dev, "Ethernet address %6D%s\n", sc->arpcom.ac_enaddr, ":", sc->phy_10Mbps_only ? ", 10Mbps" : ""); ifp = &sc->arpcom.ac_if; ifp->if_unit = device_get_unit(dev); ifp->if_name = "fxp"; ifp->if_output = ether_output; ifp->if_baudrate = 100000000; ifp->if_init = fxp_init; ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = fxp_ioctl; ifp->if_start = fxp_start; ifp->if_watchdog = fxp_watchdog; /* * Attach the interface. */ ether_ifattach(ifp, ETHER_BPF_SUPPORTED); /* * Let the system queue as many packets as we have available * TX descriptors. */ ifp->if_snd.ifq_maxlen = FXP_NTXCB - 1; FXP_UNLOCK(sc); return 0; fail: FXP_UNLOCK(sc); mtx_destroy(&sc->sc_mtx); return error; } /* * Detach interface. */ static int fxp_detach(device_t dev) { struct fxp_softc *sc = device_get_softc(dev); FXP_LOCK(sc); /* * Close down routes etc. */ ether_ifdetach(&sc->arpcom.ac_if, ETHER_BPF_SUPPORTED); /* * Stop DMA and drop transmit queue. */ fxp_stop(sc); /* * Deallocate resources. */ bus_teardown_intr(dev, sc->irq, sc->ih); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); bus_release_resource(dev, sc->rtp, sc->rgd, sc->mem); /* * Free all the receive buffers. */ if (sc->rfa_headm != NULL) m_freem(sc->rfa_headm); /* * Free all media structures. */ ifmedia_removeall(&sc->sc_media); /* * Free anciliary structures. */ free(sc->cbl_base, M_DEVBUF); free(sc->fxp_stats, M_DEVBUF); free(sc->mcsp, M_DEVBUF); FXP_UNLOCK(sc); mtx_destroy(&sc->sc_mtx); return 0; } /* * Device shutdown routine. Called at system shutdown after sync. The * main purpose of this routine is to shut off receiver DMA so that * kernel memory doesn't get clobbered during warmboot. */ static int fxp_shutdown(device_t dev) { /* * Make sure that DMA is disabled prior to reboot. Not doing * do could allow DMA to corrupt kernel memory during the * reboot before the driver initializes. */ fxp_stop((struct fxp_softc *) device_get_softc(dev)); return 0; } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int fxp_suspend(device_t dev) { struct fxp_softc *sc = device_get_softc(dev); int i; FXP_LOCK(sc); fxp_stop(sc); for (i=0; i<5; i++) sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i*4, 4); sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4); sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1); sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); sc->suspended = 1; FXP_UNLOCK(sc); return 0; } /* * Device resume routine. Restore some PCI settings in case the BIOS * doesn't, re-enable busmastering, and restart the interface if * appropriate. */ static int fxp_resume(device_t dev) { struct fxp_softc *sc = device_get_softc(dev); struct ifnet *ifp = &sc->sc_if; u_int16_t pci_command; int i; FXP_LOCK(sc); /* better way to do this? */ for (i=0; i<5; i++) pci_write_config(dev, PCIR_MAPS + i*4, sc->saved_maps[i], 4); pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4); pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1); pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1); pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1); /* reenable busmastering */ pci_command = pci_read_config(dev, PCIR_COMMAND, 2); pci_command |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, pci_command, 2); CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); DELAY(10); /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) fxp_init(sc); sc->suspended = 0; FXP_UNLOCK(sc); return 0; } static device_method_t fxp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, fxp_probe), DEVMETHOD(device_attach, fxp_attach), DEVMETHOD(device_detach, fxp_detach), DEVMETHOD(device_shutdown, fxp_shutdown), DEVMETHOD(device_suspend, fxp_suspend), DEVMETHOD(device_resume, fxp_resume), { 0, 0 } }; static driver_t fxp_driver = { "fxp", fxp_methods, sizeof(struct fxp_softc), }; static devclass_t fxp_devclass; DRIVER_MODULE(if_fxp, pci, fxp_driver, fxp_devclass, 0, 0); DRIVER_MODULE(if_fxp, cardbus, fxp_driver, fxp_devclass, 0, 0); /* * Do generic parts of attach. */ static int fxp_attach_common(sc, enaddr) struct fxp_softc *sc; u_int8_t *enaddr; { u_int16_t data; int i, nmedia, defmedia; const int *media; /* * Reset to a stable state. */ CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); DELAY(10); sc->cbl_base = malloc(sizeof(struct fxp_cb_tx) * FXP_NTXCB, M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->cbl_base == NULL) goto fail; sc->fxp_stats = malloc(sizeof(struct fxp_stats), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->fxp_stats == NULL) goto fail; sc->mcsp = malloc(sizeof(struct fxp_cb_mcs), M_DEVBUF, M_NOWAIT); if (sc->mcsp == NULL) goto fail; /* * Pre-allocate our receive buffers. */ for (i = 0; i < FXP_NRFABUFS; i++) { if (fxp_add_rfabuf(sc, NULL) != 0) { goto fail; } } /* * Find out how large of an SEEPROM we have. */ fxp_autosize_eeprom(sc); /* * Get info about the primary PHY */ fxp_read_eeprom(sc, (u_int16_t *)&data, 6, 1); sc->phy_primary_addr = data & 0xff; sc->phy_primary_device = (data >> 8) & 0x3f; sc->phy_10Mbps_only = data >> 15; /* * Read MAC address. */ fxp_read_eeprom(sc, (u_int16_t *)enaddr, 0, 3); /* * Initialize the media structures. */ media = fxp_media_default; nmedia = sizeof(fxp_media_default) / sizeof(fxp_media_default[0]); defmedia = FXP_MEDIA_DEFAULT_DEFMEDIA; for (i = 0; i < NFXPMEDIA; i++) { if (sc->phy_primary_device == fxp_media[i].fsm_phy) { media = fxp_media[i].fsm_media; nmedia = fxp_media[i].fsm_nmedia; defmedia = fxp_media[i].fsm_defmedia; } } ifmedia_init(&sc->sc_media, 0, fxp_mediachange, fxp_mediastatus); for (i = 0; i < nmedia; i++) { if (IFM_SUBTYPE(media[i]) == IFM_100_TX && sc->phy_10Mbps_only) continue; ifmedia_add(&sc->sc_media, media[i], 0, NULL); } ifmedia_set(&sc->sc_media, defmedia); return (0); fail: printf("fxp%d: Failed to malloc memory\n", FXP_UNIT(sc)); if (sc->cbl_base) free(sc->cbl_base, M_DEVBUF); if (sc->fxp_stats) free(sc->fxp_stats, M_DEVBUF); if (sc->mcsp) free(sc->mcsp, M_DEVBUF); /* frees entire chain */ if (sc->rfa_headm) m_freem(sc->rfa_headm); return (ENOMEM); } /* * From NetBSD: * * Figure out EEPROM size. * * 559's can have either 64-word or 256-word EEPROMs, the 558 * datasheet only talks about 64-word EEPROMs, and the 557 datasheet * talks about the existance of 16 to 256 word EEPROMs. * * The only known sizes are 64 and 256, where the 256 version is used * by CardBus cards to store CIS information. * * The address is shifted in msb-to-lsb, and after the last * address-bit the EEPROM is supposed to output a `dummy zero' bit, * after which follows the actual data. We try to detect this zero, by * probing the data-out bit in the EEPROM control register just after * having shifted in a bit. If the bit is zero, we assume we've * shifted enough address bits. The data-out should be tri-state, * before this, which should translate to a logical one. * * Other ways to do this would be to try to read a register with known * contents with a varying number of address bits, but no such * register seem to be available. The high bits of register 10 are 01 * on the 558 and 559, but apparently not on the 557. * * The Linux driver computes a checksum on the EEPROM data, but the * value of this checksum is not very well documented. */ static void fxp_autosize_eeprom(sc) struct fxp_softc *sc; { u_int16_t reg; int x; CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); /* * Shift in read opcode. */ for (x = 3; x > 0; x--) { if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) { reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; } else { reg = FXP_EEPROM_EECS; } CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); DELAY(1); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); } /* * Shift in address. * Wait for the dummy zero following a correct address shift. */ for (x = 1; x <= 8; x++) { CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS | FXP_EEPROM_EESK); DELAY(1); if ((CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) == 0) break; CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); DELAY(1); } CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); DELAY(1); sc->eeprom_size = x; } /* * Read from the serial EEPROM. Basically, you manually shift in * the read opcode (one bit at a time) and then shift in the address, * and then you shift out the data (all of this one bit at a time). * The word size is 16 bits, so you have to provide the address for * every 16 bits of data. */ static void fxp_read_eeprom(sc, data, offset, words) struct fxp_softc *sc; u_short *data; int offset; int words; { u_int16_t reg; int i, x; for (i = 0; i < words; i++) { CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); /* * Shift in read opcode. */ for (x = 3; x > 0; x--) { if (FXP_EEPROM_OPC_READ & (1 << (x - 1))) { reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; } else { reg = FXP_EEPROM_EECS; } CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); DELAY(1); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); } /* * Shift in address. */ for (x = sc->eeprom_size; x > 0; x--) { if ((i + offset) & (1 << (x - 1))) { reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; } else { reg = FXP_EEPROM_EECS; } CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); DELAY(1); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); } reg = FXP_EEPROM_EECS; data[i] = 0; /* * Shift out data. */ for (x = 16; x > 0; x--) { CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); DELAY(1); if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) data[i] |= (1 << (x - 1)); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); } CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); DELAY(1); } } /* * Start packet transmission on the interface. */ static void fxp_start(ifp) struct ifnet *ifp; { struct fxp_softc *sc = ifp->if_softc; struct fxp_cb_tx *txp; FXP_LOCK(sc); /* * See if we need to suspend xmit until the multicast filter * has been reprogrammed (which can only be done at the head * of the command chain). */ if (sc->need_mcsetup) { FXP_UNLOCK(sc); return; } txp = NULL; /* * We're finished if there is nothing more to add to the list or if * we're all filled up with buffers to transmit. * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add * a NOP command when needed. */ while (ifp->if_snd.ifq_head != NULL && sc->tx_queued < FXP_NTXCB - 1) { struct mbuf *m, *mb_head; int segment; /* * Grab a packet to transmit. */ IF_DEQUEUE(&ifp->if_snd, mb_head); /* * Get pointer to next available tx desc. */ txp = sc->cbl_last->next; /* * Go through each of the mbufs in the chain and initialize * the transmit buffer descriptors with the physical address * and size of the mbuf. */ tbdinit: for (m = mb_head, segment = 0; m != NULL; m = m->m_next) { if (m->m_len != 0) { if (segment == FXP_NTXSEG) break; txp->tbd[segment].tb_addr = vtophys(mtod(m, vm_offset_t)); txp->tbd[segment].tb_size = m->m_len; segment++; } } if (m != NULL) { struct mbuf *mn; /* * We ran out of segments. We have to recopy this mbuf * chain first. Bail out if we can't get the new buffers. */ MGETHDR(mn, M_DONTWAIT, MT_DATA); if (mn == NULL) { m_freem(mb_head); break; } if (mb_head->m_pkthdr.len > MHLEN) { MCLGET(mn, M_DONTWAIT); if ((mn->m_flags & M_EXT) == 0) { m_freem(mn); m_freem(mb_head); break; } } m_copydata(mb_head, 0, mb_head->m_pkthdr.len, mtod(mn, caddr_t)); mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len; m_freem(mb_head); mb_head = mn; goto tbdinit; } txp->tbd_number = segment; txp->mb_head = mb_head; txp->cb_status = 0; if (sc->tx_queued != FXP_CXINT_THRESH - 1) { txp->cb_command = FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S; } else { txp->cb_command = FXP_CB_COMMAND_XMIT | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I; /* * Set a 5 second timer just in case we don't hear from the * card again. */ ifp->if_timer = 5; } txp->tx_threshold = tx_threshold; /* * Advance the end of list forward. */ #ifdef __alpha__ /* * On platforms which can't access memory in 16-bit * granularities, we must prevent the card from DMA'ing * up the status while we update the command field. * This could cause us to overwrite the completion status. */ atomic_clear_short(&sc->cbl_last->cb_command, FXP_CB_COMMAND_S); #else sc->cbl_last->cb_command &= ~FXP_CB_COMMAND_S; #endif /*__alpha__*/ sc->cbl_last = txp; /* * Advance the beginning of the list forward if there are * no other packets queued (when nothing is queued, cbl_first * sits on the last TxCB that was sent out). */ if (sc->tx_queued == 0) sc->cbl_first = txp; sc->tx_queued++; /* * Pass packet to bpf if there is a listener. */ if (ifp->if_bpf) bpf_mtap(ifp, mb_head); } /* * We're finished. If we added to the list, issue a RESUME to get DMA * going again if suspended. */ if (txp != NULL) { fxp_scb_wait(sc); CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_RESUME); } FXP_UNLOCK(sc); } /* * Process interface interrupts. */ static void fxp_intr(arg) void *arg; { struct fxp_softc *sc = arg; struct ifnet *ifp = &sc->sc_if; u_int8_t statack; FXP_LOCK(sc); if (sc->suspended) { FXP_UNLOCK(sc); return; } while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) { /* * First ACK all the interrupts in this pass. */ CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack); /* * Free any finished transmit mbuf chains. * * Handle the CNA event likt a CXTNO event. It used to * be that this event (control unit not ready) was not * encountered, but it is now with the SMPng modifications. * The exact sequence of events that occur when the interface * is brought up are different now, and if this event * goes unhandled, the configuration/rxfilter setup sequence * can stall for several seconds. The result is that no * packets go out onto the wire for about 5 to 10 seconds * after the interface is ifconfig'ed for the first time. */ if (statack & (FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA)) { struct fxp_cb_tx *txp; for (txp = sc->cbl_first; sc->tx_queued && (txp->cb_status & FXP_CB_STATUS_C) != 0; txp = txp->next) { if (txp->mb_head != NULL) { m_freem(txp->mb_head); txp->mb_head = NULL; } sc->tx_queued--; } sc->cbl_first = txp; ifp->if_timer = 0; if (sc->tx_queued == 0) { if (sc->need_mcsetup) fxp_mc_setup(sc); } /* * Try to start more packets transmitting. */ if (ifp->if_snd.ifq_head != NULL) fxp_start(ifp); } /* * Process receiver interrupts. If a no-resource (RNR) * condition exists, get whatever packets we can and * re-start the receiver. */ if (statack & (FXP_SCB_STATACK_FR | FXP_SCB_STATACK_RNR)) { struct mbuf *m; struct fxp_rfa *rfa; rcvloop: m = sc->rfa_headm; rfa = (struct fxp_rfa *)(m->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE); if (rfa->rfa_status & FXP_RFA_STATUS_C) { /* * Remove first packet from the chain. */ sc->rfa_headm = m->m_next; m->m_next = NULL; /* * Add a new buffer to the receive chain. * If this fails, the old buffer is recycled * instead. */ if (fxp_add_rfabuf(sc, m) == 0) { struct ether_header *eh; int total_len; total_len = rfa->actual_size & (MCLBYTES - 1); if (total_len < sizeof(struct ether_header)) { m_freem(m); goto rcvloop; } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; eh = mtod(m, struct ether_header *); m->m_data += sizeof(struct ether_header); m->m_len -= sizeof(struct ether_header); m->m_pkthdr.len = m->m_len; ether_input(ifp, eh, m); } goto rcvloop; } if (statack & FXP_SCB_STATACK_RNR) { fxp_scb_wait(sc); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(sc->rfa_headm->m_ext.ext_buf) + RFA_ALIGNMENT_FUDGE); CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_START); } } } FXP_UNLOCK(sc); } /* * Update packet in/out/collision statistics. The i82557 doesn't * allow you to access these counters without doing a fairly * expensive DMA to get _all_ of the statistics it maintains, so * we do this operation here only once per second. The statistics * counters in the kernel are updated from the previous dump-stats * DMA and then a new dump-stats DMA is started. The on-chip * counters are zeroed when the DMA completes. If we can't start * the DMA immediately, we don't wait - we just prepare to read * them again next time. */ static void fxp_stats_update(arg) void *arg; { struct fxp_softc *sc = arg; struct ifnet *ifp = &sc->sc_if; struct fxp_stats *sp = sc->fxp_stats; struct fxp_cb_tx *txp; ifp->if_opackets += sp->tx_good; ifp->if_collisions += sp->tx_total_collisions; if (sp->rx_good) { ifp->if_ipackets += sp->rx_good; sc->rx_idle_secs = 0; } else { /* * Receiver's been idle for another second. */ sc->rx_idle_secs++; } ifp->if_ierrors += sp->rx_crc_errors + sp->rx_alignment_errors + sp->rx_rnr_errors + sp->rx_overrun_errors; /* * If any transmit underruns occured, bump up the transmit * threshold by another 512 bytes (64 * 8). */ if (sp->tx_underruns) { ifp->if_oerrors += sp->tx_underruns; if (tx_threshold < 192) tx_threshold += 64; } FXP_LOCK(sc); /* * Release any xmit buffers that have completed DMA. This isn't * strictly necessary to do here, but it's advantagous for mbufs * with external storage to be released in a timely manner rather * than being defered for a potentially long time. This limits * the delay to a maximum of one second. */ for (txp = sc->cbl_first; sc->tx_queued && (txp->cb_status & FXP_CB_STATUS_C) != 0; txp = txp->next) { if (txp->mb_head != NULL) { m_freem(txp->mb_head); txp->mb_head = NULL; } sc->tx_queued--; } sc->cbl_first = txp; /* * If we haven't received any packets in FXP_MAC_RX_IDLE seconds, * then assume the receiver has locked up and attempt to clear * the condition by reprogramming the multicast filter. This is * a work-around for a bug in the 82557 where the receiver locks * up if it gets certain types of garbage in the syncronization * bits prior to the packet header. This bug is supposed to only * occur in 10Mbps mode, but has been seen to occur in 100Mbps * mode as well (perhaps due to a 10/100 speed transition). */ if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) { sc->rx_idle_secs = 0; fxp_mc_setup(sc); } /* * If there is no pending command, start another stats * dump. Otherwise punt for now. */ if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) { /* * Start another stats dump. */ CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_DUMPRESET); } else { /* * A previous command is still waiting to be accepted. * Just zero our copy of the stats and wait for the * next timer event to update them. */ sp->tx_good = 0; sp->tx_underruns = 0; sp->tx_total_collisions = 0; sp->rx_good = 0; sp->rx_crc_errors = 0; sp->rx_alignment_errors = 0; sp->rx_rnr_errors = 0; sp->rx_overrun_errors = 0; } FXP_UNLOCK(sc); /* * Schedule another timeout one second from now. */ sc->stat_ch = timeout(fxp_stats_update, sc, hz); } /* * Stop the interface. Cancels the statistics updater and resets * the interface. */ static void fxp_stop(sc) struct fxp_softc *sc; { struct ifnet *ifp = &sc->sc_if; struct fxp_cb_tx *txp; int i; FXP_LOCK(sc); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); ifp->if_timer = 0; /* * Cancel stats updater. */ untimeout(fxp_stats_update, sc, sc->stat_ch); /* * Issue software reset */ CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); DELAY(10); /* * Release any xmit buffers. */ txp = sc->cbl_base; if (txp != NULL) { for (i = 0; i < FXP_NTXCB; i++) { if (txp[i].mb_head != NULL) { m_freem(txp[i].mb_head); txp[i].mb_head = NULL; } } } sc->tx_queued = 0; /* * Free all the receive buffers then reallocate/reinitialize */ if (sc->rfa_headm != NULL) m_freem(sc->rfa_headm); sc->rfa_headm = NULL; sc->rfa_tailm = NULL; for (i = 0; i < FXP_NRFABUFS; i++) { if (fxp_add_rfabuf(sc, NULL) != 0) { /* * This "can't happen" - we're at splimp() * and we just freed all the buffers we need * above. */ panic("fxp_stop: no buffers!"); } } FXP_UNLOCK(sc); } /* * Watchdog/transmission transmit timeout handler. Called when a * transmission is started on the interface, but no interrupt is * received before the timeout. This usually indicates that the * card has wedged for some reason. */ static void fxp_watchdog(ifp) struct ifnet *ifp; { struct fxp_softc *sc = ifp->if_softc; printf("fxp%d: device timeout\n", FXP_UNIT(sc)); ifp->if_oerrors++; fxp_init(sc); } static void fxp_init(xsc) void *xsc; { struct fxp_softc *sc = xsc; struct ifnet *ifp = &sc->sc_if; struct fxp_cb_config *cbp; struct fxp_cb_ias *cb_ias; struct fxp_cb_tx *txp; int i, prm; FXP_LOCK(sc); /* * Cancel any pending I/O */ fxp_stop(sc); prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0; /* * Initialize base of CBL and RFA memory. Loading with zero * sets it up for regular linear addressing. */ CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_BASE); fxp_scb_wait(sc); CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_BASE); /* * Initialize base of dump-stats buffer. */ fxp_scb_wait(sc); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(sc->fxp_stats)); CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_DUMP_ADR); /* * We temporarily use memory that contains the TxCB list to * construct the config CB. The TxCB list memory is rebuilt * later. */ cbp = (struct fxp_cb_config *) sc->cbl_base; /* * This bcopy is kind of disgusting, but there are a bunch of must be * zero and must be one bits in this structure and this is the easiest * way to initialize them all to proper values. */ bcopy(fxp_cb_config_template, (void *)(uintptr_t)(volatile void *)&cbp->cb_status, sizeof(fxp_cb_config_template)); cbp->cb_status = 0; cbp->cb_command = FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL; cbp->link_addr = -1; /* (no) next command */ cbp->byte_count = 22; /* (22) bytes to config */ cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */ cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */ cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */ cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */ cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */ cbp->dma_bce = 0; /* (disable) dma max counters */ cbp->late_scb = 0; /* (don't) defer SCB update */ cbp->tno_int = 0; /* (disable) tx not okay interrupt */ cbp->ci_int = 1; /* interrupt on CU idle */ cbp->save_bf = prm; /* save bad frames */ cbp->disc_short_rx = !prm; /* discard short packets */ cbp->underrun_retry = 1; /* retry mode (1) on DMA underrun */ cbp->mediatype = !sc->phy_10Mbps_only; /* interface mode */ cbp->nsai = 1; /* (don't) disable source addr insert */ cbp->preamble_length = 2; /* (7 byte) preamble */ cbp->loopback = 0; /* (don't) loopback */ cbp->linear_priority = 0; /* (normal CSMA/CD operation) */ cbp->linear_pri_mode = 0; /* (wait after xmit only) */ cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */ cbp->promiscuous = prm; /* promiscuous mode */ cbp->bcast_disable = 0; /* (don't) disable broadcasts */ cbp->crscdt = 0; /* (CRS only) */ cbp->stripping = !prm; /* truncate rx packet to byte count */ cbp->padding = 1; /* (do) pad short tx packets */ cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */ cbp->force_fdx = 0; /* (don't) force full duplex */ cbp->fdx_pin_en = 1; /* (enable) FDX# pin */ cbp->multi_ia = 0; /* (don't) accept multiple IAs */ cbp->mc_all = sc->all_mcasts;/* accept all multicasts */ /* * Start the config command/DMA. */ fxp_scb_wait(sc); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(&cbp->cb_status)); CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); /* ...and wait for it to complete. */ fxp_dma_wait(&cbp->cb_status, sc); /* * Now initialize the station address. Temporarily use the TxCB * memory area like we did above for the config CB. */ cb_ias = (struct fxp_cb_ias *) sc->cbl_base; cb_ias->cb_status = 0; cb_ias->cb_command = FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL; cb_ias->link_addr = -1; bcopy(sc->arpcom.ac_enaddr, (void *)(uintptr_t)(volatile void *)cb_ias->macaddr, sizeof(sc->arpcom.ac_enaddr)); /* * Start the IAS (Individual Address Setup) command/DMA. */ fxp_scb_wait(sc); CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); /* ...and wait for it to complete. */ fxp_dma_wait(&cb_ias->cb_status, sc); /* * Initialize transmit control block (TxCB) list. */ txp = sc->cbl_base; bzero(txp, sizeof(struct fxp_cb_tx) * FXP_NTXCB); for (i = 0; i < FXP_NTXCB; i++) { txp[i].cb_status = FXP_CB_STATUS_C | FXP_CB_STATUS_OK; txp[i].cb_command = FXP_CB_COMMAND_NOP; txp[i].link_addr = vtophys(&txp[(i + 1) & FXP_TXCB_MASK].cb_status); txp[i].tbd_array_addr = vtophys(&txp[i].tbd[0]); txp[i].next = &txp[(i + 1) & FXP_TXCB_MASK]; } /* * Set the suspend flag on the first TxCB and start the control * unit. It will execute the NOP and then suspend. */ txp->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S; sc->cbl_first = sc->cbl_last = txp; sc->tx_queued = 1; fxp_scb_wait(sc); CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); /* * Initialize receiver buffer area - RFA. */ fxp_scb_wait(sc); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(sc->rfa_headm->m_ext.ext_buf) + RFA_ALIGNMENT_FUDGE); CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_RU_START); /* * Set current media. */ fxp_set_media(sc, sc->sc_media.ifm_cur->ifm_media); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; FXP_UNLOCK(sc); /* * Start stats updater. */ sc->stat_ch = timeout(fxp_stats_update, sc, hz); } static void fxp_set_media(sc, media) struct fxp_softc *sc; int media; { switch (sc->phy_primary_device) { case FXP_PHY_DP83840: case FXP_PHY_DP83840A: fxp_mdi_write(sc, sc->phy_primary_addr, FXP_DP83840_PCR, fxp_mdi_read(sc, sc->phy_primary_addr, FXP_DP83840_PCR) | FXP_DP83840_PCR_LED4_MODE | /* LED4 always indicates duplex */ FXP_DP83840_PCR_F_CONNECT | /* force link disconnect bypass */ FXP_DP83840_PCR_BIT10); /* XXX I have no idea */ /* fall through */ case FXP_PHY_82553A: case FXP_PHY_82553C: /* untested */ case FXP_PHY_82555: case FXP_PHY_82555B: if (IFM_SUBTYPE(media) != IFM_AUTO) { int flags; flags = (IFM_SUBTYPE(media) == IFM_100_TX) ? FXP_PHY_BMCR_SPEED_100M : 0; flags |= (media & IFM_FDX) ? FXP_PHY_BMCR_FULLDUPLEX : 0; fxp_mdi_write(sc, sc->phy_primary_addr, FXP_PHY_BMCR, (fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_BMCR) & ~(FXP_PHY_BMCR_AUTOEN | FXP_PHY_BMCR_SPEED_100M | FXP_PHY_BMCR_FULLDUPLEX)) | flags); } else { fxp_mdi_write(sc, sc->phy_primary_addr, FXP_PHY_BMCR, (fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_BMCR) | FXP_PHY_BMCR_AUTOEN)); } break; /* * The Seeq 80c24 doesn't have a PHY programming interface, so do * nothing. */ case FXP_PHY_80C24: break; default: printf("fxp%d: warning: unsupported PHY, type = %d, addr = %d\n", FXP_UNIT(sc), sc->phy_primary_device, sc->phy_primary_addr); } } /* * Change media according to request. */ int fxp_mediachange(ifp) struct ifnet *ifp; { struct fxp_softc *sc = ifp->if_softc; struct ifmedia *ifm = &sc->sc_media; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); fxp_set_media(sc, ifm->ifm_media); return (0); } /* * Notify the world which media we're using. */ void fxp_mediastatus(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct fxp_softc *sc = ifp->if_softc; int flags, stsflags; switch (sc->phy_primary_device) { case FXP_PHY_82555: case FXP_PHY_82555B: case FXP_PHY_DP83840: case FXP_PHY_DP83840A: ifmr->ifm_status = IFM_AVALID; /* IFM_ACTIVE will be valid */ ifmr->ifm_active = IFM_ETHER; /* * the following is not an error. * You need to read this register twice to get current * status. This is correct documented behaviour, the * first read gets latched values. */ stsflags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_STS); stsflags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_STS); if (stsflags & FXP_PHY_STS_LINK_STS) ifmr->ifm_status |= IFM_ACTIVE; /* * If we are in auto mode, then try report the result. */ flags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_BMCR); if (flags & FXP_PHY_BMCR_AUTOEN) { ifmr->ifm_active |= IFM_AUTO; /* XXX presently 0 */ if (stsflags & FXP_PHY_STS_AUTO_DONE) { /* * Intel and National parts report * differently on what they found. */ if ((sc->phy_primary_device == FXP_PHY_82555) || (sc->phy_primary_device == FXP_PHY_82555B)) { flags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_PHY_USC); if (flags & FXP_PHY_USC_SPEED) ifmr->ifm_active |= IFM_100_TX; else ifmr->ifm_active |= IFM_10_T; if (flags & FXP_PHY_USC_DUPLEX) ifmr->ifm_active |= IFM_FDX; } else { /* it's National. only know speed */ flags = fxp_mdi_read(sc, sc->phy_primary_addr, FXP_DP83840_PAR); if (flags & FXP_DP83840_PAR_SPEED_10) ifmr->ifm_active |= IFM_10_T; else ifmr->ifm_active |= IFM_100_TX; } } } else { /* in manual mode.. just report what we were set to */ if (flags & FXP_PHY_BMCR_SPEED_100M) ifmr->ifm_active |= IFM_100_TX; else ifmr->ifm_active |= IFM_10_T; if (flags & FXP_PHY_BMCR_FULLDUPLEX) ifmr->ifm_active |= IFM_FDX; } break; case FXP_PHY_80C24: default: ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; /* XXX IFM_AUTO ? */ } } /* * Add a buffer to the end of the RFA buffer list. * Return 0 if successful, 1 for failure. A failure results in * adding the 'oldm' (if non-NULL) on to the end of the list - * tossing out its old contents and recycling it. * The RFA struct is stuck at the beginning of mbuf cluster and the * data pointer is fixed up to point just past it. */ static int fxp_add_rfabuf(sc, oldm) struct fxp_softc *sc; struct mbuf *oldm; { u_int32_t v; struct mbuf *m; struct fxp_rfa *rfa, *p_rfa; MGETHDR(m, M_DONTWAIT, MT_DATA); if (m != NULL) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { m_freem(m); if (oldm == NULL) return 1; m = oldm; m->m_data = m->m_ext.ext_buf; } } else { if (oldm == NULL) return 1; m = oldm; m->m_data = m->m_ext.ext_buf; } /* * Move the data pointer up so that the incoming data packet * will be 32-bit aligned. */ m->m_data += RFA_ALIGNMENT_FUDGE; /* * Get a pointer to the base of the mbuf cluster and move * data start past it. */ rfa = mtod(m, struct fxp_rfa *); m->m_data += sizeof(struct fxp_rfa); rfa->size = (u_int16_t)(MCLBYTES - sizeof(struct fxp_rfa) - RFA_ALIGNMENT_FUDGE); /* * Initialize the rest of the RFA. Note that since the RFA * is misaligned, we cannot store values directly. Instead, * we use an optimized, inline copy. */ rfa->rfa_status = 0; rfa->rfa_control = FXP_RFA_CONTROL_EL; rfa->actual_size = 0; v = -1; fxp_lwcopy(&v, (volatile u_int32_t *) rfa->link_addr); fxp_lwcopy(&v, (volatile u_int32_t *) rfa->rbd_addr); /* * If there are other buffers already on the list, attach this * one to the end by fixing up the tail to point to this one. */ if (sc->rfa_headm != NULL) { p_rfa = (struct fxp_rfa *) (sc->rfa_tailm->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE); sc->rfa_tailm->m_next = m; v = vtophys(rfa); fxp_lwcopy(&v, (volatile u_int32_t *) p_rfa->link_addr); p_rfa->rfa_control = 0; } else { sc->rfa_headm = m; } sc->rfa_tailm = m; return (m == oldm); } static volatile int fxp_mdi_read(sc, phy, reg) struct fxp_softc *sc; int phy; int reg; { int count = 10000; int value; CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21)); while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0 && count--) DELAY(10); if (count <= 0) printf("fxp%d: fxp_mdi_read: timed out\n", FXP_UNIT(sc)); return (value & 0xffff); } static void fxp_mdi_write(sc, phy, reg, value) struct fxp_softc *sc; int phy; int reg; int value; { int count = 10000; CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) | (value & 0xffff)); while((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 && count--) DELAY(10); if (count <= 0) printf("fxp%d: fxp_mdi_write: timed out\n", FXP_UNIT(sc)); } static int fxp_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct fxp_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int error = 0; FXP_LOCK(sc); switch (command) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: error = ether_ioctl(ifp, command, data); break; case SIOCSIFFLAGS: sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0; /* * If interface is marked up and not running, then start it. * If it is marked down and running, stop it. * XXX If it's up then re-initialize it. This is so flags * such as IFF_PROMISC are handled. */ if (ifp->if_flags & IFF_UP) { fxp_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) fxp_stop(sc); } break; case SIOCADDMULTI: case SIOCDELMULTI: sc->all_mcasts = (ifp->if_flags & IFF_ALLMULTI) ? 1 : 0; /* * Multicast list has changed; set the hardware filter * accordingly. */ if (!sc->all_mcasts) fxp_mc_setup(sc); /* * fxp_mc_setup() can turn on sc->all_mcasts, so check it * again rather than else {}. */ if (sc->all_mcasts) fxp_init(sc); error = 0; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command); break; default: error = EINVAL; } FXP_UNLOCK(sc); return (error); } /* * Program the multicast filter. * * We have an artificial restriction that the multicast setup command * must be the first command in the chain, so we take steps to ensure * this. By requiring this, it allows us to keep up the performance of * the pre-initialized command ring (esp. link pointers) by not actually * inserting the mcsetup command in the ring - i.e. its link pointer * points to the TxCB ring, but the mcsetup descriptor itself is not part * of it. We then can do 'CU_START' on the mcsetup descriptor and have it * lead into the regular TxCB ring when it completes. * * This function must be called at splimp. */ static void fxp_mc_setup(sc) struct fxp_softc *sc; { struct fxp_cb_mcs *mcsp = sc->mcsp; struct ifnet *ifp = &sc->sc_if; struct ifmultiaddr *ifma; int nmcasts; int count; /* * If there are queued commands, we must wait until they are all * completed. If we are already waiting, then add a NOP command * with interrupt option so that we're notified when all commands * have been completed - fxp_start() ensures that no additional * TX commands will be added when need_mcsetup is true. */ if (sc->tx_queued) { struct fxp_cb_tx *txp; /* * need_mcsetup will be true if we are already waiting for the * NOP command to be completed (see below). In this case, bail. */ if (sc->need_mcsetup) return; sc->need_mcsetup = 1; /* * Add a NOP command with interrupt so that we are notified when all * TX commands have been processed. */ txp = sc->cbl_last->next; txp->mb_head = NULL; txp->cb_status = 0; txp->cb_command = FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I; /* * Advance the end of list forward. */ sc->cbl_last->cb_command &= ~FXP_CB_COMMAND_S; sc->cbl_last = txp; sc->tx_queued++; /* * Issue a resume in case the CU has just suspended. */ fxp_scb_wait(sc); CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_RESUME); /* * Set a 5 second timer just in case we don't hear from the * card again. */ ifp->if_timer = 5; return; } sc->need_mcsetup = 0; /* * Initialize multicast setup descriptor. */ mcsp->next = sc->cbl_base; mcsp->mb_head = NULL; mcsp->cb_status = 0; mcsp->cb_command = FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I; mcsp->link_addr = vtophys(&sc->cbl_base->cb_status); nmcasts = 0; if (!sc->all_mcasts) { - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (nmcasts >= MAXMCADDR) { sc->all_mcasts = 1; nmcasts = 0; break; } bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), (void *)(uintptr_t)(volatile void *) &sc->mcsp->mc_addr[nmcasts][0], 6); nmcasts++; } } mcsp->mc_cnt = nmcasts * 6; sc->cbl_first = sc->cbl_last = (struct fxp_cb_tx *) mcsp; sc->tx_queued = 1; /* * Wait until command unit is not active. This should never * be the case when nothing is queued, but make sure anyway. */ count = 100; while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) == FXP_SCB_CUS_ACTIVE && --count) DELAY(10); if (count == 0) { printf("fxp%d: command queue timeout\n", FXP_UNIT(sc)); return; } /* * Start the multicast setup command. */ fxp_scb_wait(sc); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, vtophys(&mcsp->cb_status)); CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_SCB_COMMAND_CU_START); ifp->if_timer = 2; return; } Index: head/sys/pci/if_pcn.c =================================================================== --- head/sys/pci/if_pcn.c (revision 71961) +++ head/sys/pci/if_pcn.c (revision 71962) @@ -1,1437 +1,1436 @@ /* * Copyright (c) 2000 Berkeley Software Design, Inc. * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * AMD Am79c972 fast ethernet PCI NIC driver. Datatheets are available * from http://www.amd.com. * * Written by Bill Paul */ /* * The AMD PCnet/PCI controllers are more advanced and functional * versions of the venerable 7990 LANCE. The PCnet/PCI chips retain * backwards compatibility with the LANCE and thus can be made * to work with older LANCE drivers. This is in fact how the * PCnet/PCI chips were supported in FreeBSD originally. The trouble * is that the PCnet/PCI devices offer several performance enhancements * which can't be exploited in LANCE compatibility mode. Chief among * these enhancements is the ability to perform PCI DMA operations * using 32-bit addressing (which eliminates the need for ISA * bounce-buffering), and special receive buffer alignment (which * allows the receive handler to pass packets to the upper protocol * layers without copying on both the x86 and alpha platforms). */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include #include #include #define PCN_USEIOSPACE #include MODULE_DEPEND(pcn, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif /* * Various supported device vendors/types and their names. */ static struct pcn_type pcn_devs[] = { { PCN_VENDORID, PCN_DEVICEID_PCNET, "AMD PCnet/PCI 10/100BaseTX" }, { PCN_VENDORID, PCN_DEVICEID_HOME, "AMD PCnet/Home HomePNA" }, { 0, 0, NULL } }; static u_int32_t pcn_csr_read __P((struct pcn_softc *, int)); static u_int16_t pcn_csr_read16 __P((struct pcn_softc *, int)); static u_int16_t pcn_bcr_read16 __P((struct pcn_softc *, int)); static void pcn_csr_write __P((struct pcn_softc *, int, int)); static u_int32_t pcn_bcr_read __P((struct pcn_softc *, int)); static void pcn_bcr_write __P((struct pcn_softc *, int, int)); static int pcn_probe __P((device_t)); static int pcn_attach __P((device_t)); static int pcn_detach __P((device_t)); static int pcn_newbuf __P((struct pcn_softc *, int, struct mbuf *)); static int pcn_encap __P((struct pcn_softc *, struct mbuf *, u_int32_t *)); static void pcn_rxeof __P((struct pcn_softc *)); static void pcn_txeof __P((struct pcn_softc *)); static void pcn_intr __P((void *)); static void pcn_tick __P((void *)); static void pcn_start __P((struct ifnet *)); static int pcn_ioctl __P((struct ifnet *, u_long, caddr_t)); static void pcn_init __P((void *)); static void pcn_stop __P((struct pcn_softc *)); static void pcn_watchdog __P((struct ifnet *)); static void pcn_shutdown __P((device_t)); static int pcn_ifmedia_upd __P((struct ifnet *)); static void pcn_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); static int pcn_miibus_readreg __P((device_t, int, int)); static int pcn_miibus_writereg __P((device_t, int, int, int)); static void pcn_miibus_statchg __P((device_t)); static void pcn_setfilt __P((struct ifnet *)); static void pcn_setmulti __P((struct pcn_softc *)); static u_int32_t pcn_crc __P((caddr_t)); static void pcn_reset __P((struct pcn_softc *)); static int pcn_list_rx_init __P((struct pcn_softc *)); static int pcn_list_tx_init __P((struct pcn_softc *)); #ifdef PCN_USEIOSPACE #define PCN_RES SYS_RES_IOPORT #define PCN_RID PCN_PCI_LOIO #else #define PCN_RES SYS_RES_MEMORY #define PCN_RID PCN_PCI_LOMEM #endif static device_method_t pcn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pcn_probe), DEVMETHOD(device_attach, pcn_attach), DEVMETHOD(device_detach, pcn_detach), DEVMETHOD(device_shutdown, pcn_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, pcn_miibus_readreg), DEVMETHOD(miibus_writereg, pcn_miibus_writereg), DEVMETHOD(miibus_statchg, pcn_miibus_statchg), { 0, 0 } }; static driver_t pcn_driver = { "pcn", pcn_methods, sizeof(struct pcn_softc) }; static devclass_t pcn_devclass; DRIVER_MODULE(if_pcn, pci, pcn_driver, pcn_devclass, 0, 0); DRIVER_MODULE(miibus, pcn, miibus_driver, miibus_devclass, 0, 0); #define PCN_CSR_SETBIT(sc, reg, x) \ pcn_csr_write(sc, reg, pcn_csr_read(sc, reg) | (x)) #define PCN_CSR_CLRBIT(sc, reg, x) \ pcn_csr_write(sc, reg, pcn_csr_read(sc, reg) & ~(x)) #define PCN_BCR_SETBIT(sc, reg, x) \ pcn_bcr_write(sc, reg, pcn_bcr_read(sc, reg) | (x)) #define PCN_BCR_CLRBIT(sc, reg, x) \ pcn_bcr_write(sc, reg, pcn_bcr_read(sc, reg) & ~(x)) static u_int32_t pcn_csr_read(sc, reg) struct pcn_softc *sc; int reg; { CSR_WRITE_4(sc, PCN_IO32_RAP, reg); return(CSR_READ_4(sc, PCN_IO32_RDP)); } static u_int16_t pcn_csr_read16(sc, reg) struct pcn_softc *sc; int reg; { CSR_WRITE_2(sc, PCN_IO16_RAP, reg); return(CSR_READ_2(sc, PCN_IO16_RDP)); } static void pcn_csr_write(sc, reg, val) struct pcn_softc *sc; int reg; { CSR_WRITE_4(sc, PCN_IO32_RAP, reg); CSR_WRITE_4(sc, PCN_IO32_RDP, val); return; } static u_int32_t pcn_bcr_read(sc, reg) struct pcn_softc *sc; int reg; { CSR_WRITE_4(sc, PCN_IO32_RAP, reg); return(CSR_READ_4(sc, PCN_IO32_BDP)); } static u_int16_t pcn_bcr_read16(sc, reg) struct pcn_softc *sc; int reg; { CSR_WRITE_2(sc, PCN_IO16_RAP, reg); return(CSR_READ_2(sc, PCN_IO16_BDP)); } static void pcn_bcr_write(sc, reg, val) struct pcn_softc *sc; int reg; { CSR_WRITE_4(sc, PCN_IO32_RAP, reg); CSR_WRITE_4(sc, PCN_IO32_BDP, val); return; } static int pcn_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct pcn_softc *sc; int val; sc = device_get_softc(dev); if (sc->pcn_phyaddr && phy > sc->pcn_phyaddr) return(0); pcn_bcr_write(sc, PCN_BCR_MIIADDR, reg | (phy << 5)); val = pcn_bcr_read(sc, PCN_BCR_MIIDATA) & 0xFFFF; if (val == 0xFFFF) return(0); sc->pcn_phyaddr = phy; return(val); } static int pcn_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct pcn_softc *sc; sc = device_get_softc(dev); pcn_bcr_write(sc, PCN_BCR_MIIADDR, reg | (phy << 5)); pcn_bcr_write(sc, PCN_BCR_MIIDATA, data); return(0); } static void pcn_miibus_statchg(dev) device_t dev; { struct pcn_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->pcn_miibus); if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { PCN_BCR_SETBIT(sc, PCN_BCR_DUPLEX, PCN_DUPLEX_FDEN); } else { PCN_BCR_CLRBIT(sc, PCN_BCR_DUPLEX, PCN_DUPLEX_FDEN); } return; } #define DC_POLY 0xEDB88320 static u_int32_t pcn_crc(addr) caddr_t addr; { u_int32_t idx, bit, data, crc; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (idx = 0; idx < 6; idx++) { for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) crc = (crc >> 1) ^ (((crc ^ data) & 1) ? DC_POLY : 0); } return ((crc >> 26) & 0x3F); } static void pcn_setmulti(sc) struct pcn_softc *sc; { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int32_t h, i; u_int16_t hashes[4] = { 0, 0, 0, 0 }; ifp = &sc->arpcom.ac_if; PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { for (i = 0; i < 4; i++) pcn_csr_write(sc, PCN_CSR_MAR0 + i, 0xFFFF); PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); return; } /* first, zot all the existing hash bits */ for (i = 0; i < 4; i++) pcn_csr_write(sc, PCN_CSR_MAR0 + i, 0); /* now program new ones */ - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = pcn_crc(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); hashes[h >> 4] |= 1 << (h & 0xF); } for (i = 0; i < 4; i++) pcn_csr_write(sc, PCN_CSR_MAR0 + i, hashes[i]); PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); return; } static void pcn_reset(sc) struct pcn_softc *sc; { /* * Issue a reset by reading from the RESET register. * Note that we don't know if the chip is operating in * 16-bit or 32-bit mode at this point, so we attempt * to reset the chip both ways. If one fails, the other * will succeed. */ CSR_READ_2(sc, PCN_IO16_RESET); CSR_READ_4(sc, PCN_IO32_RESET); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); /* Select 32-bit (DWIO) mode */ CSR_WRITE_4(sc, PCN_IO32_RDP, 0); /* Select software style 3. */ pcn_bcr_write(sc, PCN_BCR_SSTYLE, PCN_SWSTYLE_PCNETPCI_BURST); return; } /* * Probe for an AMD chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int pcn_probe(dev) device_t dev; { struct pcn_type *t; struct pcn_softc *sc; int rid; u_int32_t chip_id; t = pcn_devs; sc = device_get_softc(dev); while(t->pcn_name != NULL) { if ((pci_get_vendor(dev) == t->pcn_vid) && (pci_get_device(dev) == t->pcn_did)) { /* * Temporarily map the I/O space * so we can read the chip ID register. */ rid = PCN_RID; sc->pcn_res = bus_alloc_resource(dev, PCN_RES, &rid, 0, ~0, 1, RF_ACTIVE); if (sc->pcn_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); return(ENXIO); } sc->pcn_btag = rman_get_bustag(sc->pcn_res); sc->pcn_bhandle = rman_get_bushandle(sc->pcn_res); mtx_init(&sc->pcn_mtx, device_get_nameunit(dev), MTX_DEF); PCN_LOCK(sc); /* * Note: we can *NOT* put the chip into * 32-bit mode yet. The lnc driver will only * work in 16-bit mode, and once the chip * goes into 32-bit mode, the only way to * get it out again is with a hardware reset. * So if pcn_probe() is called before the * lnc driver's probe routine, the chip will * be locked into 32-bit operation and the lnc * driver will be unable to attach to it. * Note II: if the chip happens to already * be in 32-bit mode, we still need to check * the chip ID, but first we have to detect * 32-bit mode using only 16-bit operations. * The safest way to do this is to read the * PCI subsystem ID from BCR23/24 and compare * that with the value read from PCI config * space. */ chip_id = pcn_bcr_read16(sc, PCN_BCR_PCISUBSYSID); chip_id <<= 16; chip_id |= pcn_bcr_read16(sc, PCN_BCR_PCISUBVENID); if (chip_id == pci_read_config(dev, PCIR_SUBVEND_0, 4)) { /* We're in 16-bit mode. */ chip_id = pcn_csr_read16(sc, PCN_CSR_CHIPID1); chip_id <<= 16; chip_id |= pcn_csr_read16(sc, PCN_CSR_CHIPID0); } else { /* We're in 32-bit mode. */ chip_id = pcn_csr_read(sc, PCN_CSR_CHIPID1); chip_id <<= 16; chip_id |= pcn_csr_read(sc, PCN_CSR_CHIPID0); } bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res); PCN_UNLOCK(sc); mtx_destroy(&sc->pcn_mtx); chip_id >>= 12; sc->pcn_type = chip_id & PART_MASK; switch(sc->pcn_type) { case Am79C971: case Am79C972: case Am79C973: case Am79C975: case Am79C976: case Am79C978: break; default: return(ENXIO); break; } device_set_desc(dev, t->pcn_name); return(0); } t++; } return(ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int pcn_attach(dev) device_t dev; { u_int32_t eaddr[2]; u_int32_t command; struct pcn_softc *sc; struct ifnet *ifp; int unit, error = 0, rid; sc = device_get_softc(dev); unit = device_get_unit(dev); /* Initialize our mutex. */ mtx_init(&sc->pcn_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE); PCN_LOCK(sc); /* * Handle power management nonsense. */ command = pci_read_config(dev, PCN_PCI_CAPID, 4) & 0x000000FF; if (command == 0x01) { command = pci_read_config(dev, PCN_PCI_PWRMGMTCTRL, 4); if (command & PCN_PSTATE_MASK) { u_int32_t iobase, membase, irq; /* Save important PCI config data. */ iobase = pci_read_config(dev, PCN_PCI_LOIO, 4); membase = pci_read_config(dev, PCN_PCI_LOMEM, 4); irq = pci_read_config(dev, PCN_PCI_INTLINE, 4); /* Reset the power state. */ printf("pcn%d: chip is in D%d power mode " "-- setting to D0\n", unit, command & PCN_PSTATE_MASK); command &= 0xFFFFFFFC; pci_write_config(dev, PCN_PCI_PWRMGMTCTRL, command, 4); /* Restore PCI config data. */ pci_write_config(dev, PCN_PCI_LOIO, iobase, 4); pci_write_config(dev, PCN_PCI_LOMEM, membase, 4); pci_write_config(dev, PCN_PCI_INTLINE, irq, 4); } } /* * Map control/status registers. */ command = pci_read_config(dev, PCIR_COMMAND, 4); command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, command, 4); command = pci_read_config(dev, PCIR_COMMAND, 4); #ifdef PCN_USEIOSPACE if (!(command & PCIM_CMD_PORTEN)) { printf("pcn%d: failed to enable I/O ports!\n", unit); error = ENXIO;; goto fail; } #else if (!(command & PCIM_CMD_MEMEN)) { printf("pcn%d: failed to enable memory mapping!\n", unit); error = ENXIO;; goto fail; } #endif rid = PCN_RID; sc->pcn_res = bus_alloc_resource(dev, PCN_RES, &rid, 0, ~0, 1, RF_ACTIVE); if (sc->pcn_res == NULL) { printf("pcn%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->pcn_btag = rman_get_bustag(sc->pcn_res); sc->pcn_bhandle = rman_get_bushandle(sc->pcn_res); /* Allocate interrupt */ rid = 0; sc->pcn_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->pcn_irq == NULL) { printf("pcn%d: couldn't map interrupt\n", unit); bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->pcn_irq, INTR_TYPE_NET, pcn_intr, sc, &sc->pcn_intrhand); if (error) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_res); bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res); printf("pcn%d: couldn't set up irq\n", unit); goto fail; } /* Reset the adapter. */ pcn_reset(sc); /* * Get station address from the EEPROM. */ eaddr[0] = CSR_READ_4(sc, PCN_IO32_APROM00); eaddr[1] = CSR_READ_4(sc, PCN_IO32_APROM01); bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); /* * An AMD chip was detected. Inform the world. */ printf("pcn%d: Ethernet address: %6D\n", unit, sc->arpcom.ac_enaddr, ":"); sc->pcn_unit = unit; callout_handle_init(&sc->pcn_stat_ch); sc->pcn_ldata = contigmalloc(sizeof(struct pcn_list_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->pcn_ldata == NULL) { printf("pcn%d: no memory for list buffers!\n", unit); bus_teardown_intr(dev, sc->pcn_irq, sc->pcn_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_irq); bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res); error = ENXIO; goto fail; } bzero(sc->pcn_ldata, sizeof(struct pcn_list_data)); ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; ifp->if_unit = unit; ifp->if_name = "pcn"; ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = pcn_ioctl; ifp->if_output = ether_output; ifp->if_start = pcn_start; ifp->if_watchdog = pcn_watchdog; ifp->if_init = pcn_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = PCN_TX_LIST_CNT - 1; /* * Do MII setup. */ if (mii_phy_probe(dev, &sc->pcn_miibus, pcn_ifmedia_upd, pcn_ifmedia_sts)) { printf("pcn%d: MII without any PHY!\n", sc->pcn_unit); bus_teardown_intr(dev, sc->pcn_irq, sc->pcn_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_irq); bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res); error = ENXIO; goto fail; } /* * Call MI attach routine. */ ether_ifattach(ifp, ETHER_BPF_SUPPORTED); callout_handle_init(&sc->pcn_stat_ch); PCN_UNLOCK(sc); return(0); fail: PCN_UNLOCK(sc); mtx_destroy(&sc->pcn_mtx); return(error); } static int pcn_detach(dev) device_t dev; { struct pcn_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = &sc->arpcom.ac_if; PCN_LOCK(sc); pcn_reset(sc); pcn_stop(sc); ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); if (sc->pcn_miibus != NULL) { bus_generic_detach(dev); device_delete_child(dev, sc->pcn_miibus); } bus_teardown_intr(dev, sc->pcn_irq, sc->pcn_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_irq); bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res); contigfree(sc->pcn_ldata, sizeof(struct pcn_list_data), M_DEVBUF); PCN_UNLOCK(sc); mtx_destroy(&sc->pcn_mtx); return(0); } /* * Initialize the transmit descriptors. */ static int pcn_list_tx_init(sc) struct pcn_softc *sc; { struct pcn_list_data *ld; struct pcn_ring_data *cd; int i; cd = &sc->pcn_cdata; ld = sc->pcn_ldata; for (i = 0; i < PCN_TX_LIST_CNT; i++) { cd->pcn_tx_chain[i] = NULL; ld->pcn_tx_list[i].pcn_tbaddr = 0; ld->pcn_tx_list[i].pcn_txctl = 0; ld->pcn_tx_list[i].pcn_txstat = 0; } cd->pcn_tx_prod = cd->pcn_tx_cons = cd->pcn_tx_cnt = 0; return(0); } /* * Initialize the RX descriptors and allocate mbufs for them. */ static int pcn_list_rx_init(sc) struct pcn_softc *sc; { struct pcn_list_data *ld; struct pcn_ring_data *cd; int i; ld = sc->pcn_ldata; cd = &sc->pcn_cdata; for (i = 0; i < PCN_RX_LIST_CNT; i++) { if (pcn_newbuf(sc, i, NULL) == ENOBUFS) return(ENOBUFS); } cd->pcn_rx_prod = 0; return(0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int pcn_newbuf(sc, idx, m) struct pcn_softc *sc; int idx; struct mbuf *m; { struct mbuf *m_new = NULL; struct pcn_rx_desc *c; c = &sc->pcn_ldata->pcn_rx_list[idx]; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("pcn%d: no memory for rx list " "-- packet dropped!\n", sc->pcn_unit); return(ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("pcn%d: no memory for rx list " "-- packet dropped!\n", sc->pcn_unit); m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, ETHER_ALIGN); sc->pcn_cdata.pcn_rx_chain[idx] = m_new; c->pcn_rbaddr = vtophys(mtod(m_new, caddr_t)); c->pcn_bufsz = (~(PCN_RXLEN) + 1) & PCN_RXLEN_BUFSZ; c->pcn_bufsz |= PCN_RXLEN_MBO; c->pcn_rxstat = PCN_RXSTAT_STP|PCN_RXSTAT_ENP|PCN_RXSTAT_OWN; return(0); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static void pcn_rxeof(sc) struct pcn_softc *sc; { struct ether_header *eh; struct mbuf *m; struct ifnet *ifp; struct pcn_rx_desc *cur_rx; int i; ifp = &sc->arpcom.ac_if; i = sc->pcn_cdata.pcn_rx_prod; while(PCN_OWN_RXDESC(&sc->pcn_ldata->pcn_rx_list[i])) { cur_rx = &sc->pcn_ldata->pcn_rx_list[i]; m = sc->pcn_cdata.pcn_rx_chain[i]; sc->pcn_cdata.pcn_rx_chain[i] = NULL; /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (cur_rx->pcn_rxstat & PCN_RXSTAT_ERR) { ifp->if_ierrors++; pcn_newbuf(sc, i, m); PCN_INC(i, PCN_RX_LIST_CNT); continue; } if (pcn_newbuf(sc, i, NULL)) { /* Ran out of mbufs; recycle this one. */ pcn_newbuf(sc, i, m); ifp->if_ierrors++; PCN_INC(i, PCN_RX_LIST_CNT); continue; } PCN_INC(i, PCN_RX_LIST_CNT); /* No errors; receive the packet. */ ifp->if_ipackets++; eh = mtod(m, struct ether_header *); m->m_len = m->m_pkthdr.len = cur_rx->pcn_rxlen - ETHER_CRC_LEN; m->m_pkthdr.rcvif = ifp; /* Remove header from mbuf and pass it on. */ m_adj(m, sizeof(struct ether_header)); ether_input(ifp, eh, m); } sc->pcn_cdata.pcn_rx_prod = i; return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void pcn_txeof(sc) struct pcn_softc *sc; { struct pcn_tx_desc *cur_tx = NULL; struct ifnet *ifp; u_int32_t idx; ifp = &sc->arpcom.ac_if; /* Clear the timeout timer. */ ifp->if_timer = 0; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ idx = sc->pcn_cdata.pcn_tx_cons; while (idx != sc->pcn_cdata.pcn_tx_prod) { cur_tx = &sc->pcn_ldata->pcn_tx_list[idx]; if (!PCN_OWN_TXDESC(cur_tx)) break; if (!(cur_tx->pcn_txctl & PCN_TXCTL_ENP)) { sc->pcn_cdata.pcn_tx_cnt--; PCN_INC(idx, PCN_TX_LIST_CNT); continue; } if (cur_tx->pcn_txctl & PCN_TXCTL_ERR) { ifp->if_oerrors++; if (cur_tx->pcn_txstat & PCN_TXSTAT_EXDEF) ifp->if_collisions++; if (cur_tx->pcn_txstat & PCN_TXSTAT_RTRY) ifp->if_collisions++; } ifp->if_collisions += cur_tx->pcn_txstat & PCN_TXSTAT_TRC; ifp->if_opackets++; if (sc->pcn_cdata.pcn_tx_chain[idx] != NULL) { m_freem(sc->pcn_cdata.pcn_tx_chain[idx]); sc->pcn_cdata.pcn_tx_chain[idx] = NULL; } sc->pcn_cdata.pcn_tx_cnt--; PCN_INC(idx, PCN_TX_LIST_CNT); ifp->if_timer = 0; } sc->pcn_cdata.pcn_tx_cons = idx; if (cur_tx != NULL) ifp->if_flags &= ~IFF_OACTIVE; return; } static void pcn_tick(xsc) void *xsc; { struct pcn_softc *sc; struct mii_data *mii; struct ifnet *ifp; sc = xsc; ifp = &sc->arpcom.ac_if; PCN_LOCK(sc); mii = device_get_softc(sc->pcn_miibus); mii_tick(mii); if (sc->pcn_link & !(mii->mii_media_status & IFM_ACTIVE)) sc->pcn_link = 0; if (!sc->pcn_link) { mii_pollstat(mii); if (mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) sc->pcn_link++; if (ifp->if_snd.ifq_head != NULL) pcn_start(ifp); } sc->pcn_stat_ch = timeout(pcn_tick, sc, hz); PCN_UNLOCK(sc); return; } static void pcn_intr(arg) void *arg; { struct pcn_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; ifp = &sc->arpcom.ac_if; /* Supress unwanted interrupts */ if (!(ifp->if_flags & IFF_UP)) { pcn_stop(sc); return; } CSR_WRITE_4(sc, PCN_IO32_RAP, PCN_CSR_CSR); while ((status = CSR_READ_4(sc, PCN_IO32_RDP)) & PCN_CSR_INTR) { CSR_WRITE_4(sc, PCN_IO32_RDP, status); if (status & PCN_CSR_RINT) pcn_rxeof(sc); if (status & PCN_CSR_TINT) pcn_txeof(sc); if (status & PCN_CSR_ERR) { pcn_init(sc); break; } } if (ifp->if_snd.ifq_head != NULL) pcn_start(ifp); return; } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int pcn_encap(sc, m_head, txidx) struct pcn_softc *sc; struct mbuf *m_head; u_int32_t *txidx; { struct pcn_tx_desc *f = NULL; struct mbuf *m; int frag, cur, cnt = 0; /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ m = m_head; cur = frag = *txidx; for (m = m_head; m != NULL; m = m->m_next) { if (m->m_len != 0) { if ((PCN_TX_LIST_CNT - (sc->pcn_cdata.pcn_tx_cnt + cnt)) < 2) return(ENOBUFS); f = &sc->pcn_ldata->pcn_tx_list[frag]; f->pcn_txctl = (~(m->m_len) + 1) & PCN_TXCTL_BUFSZ; f->pcn_txctl |= PCN_TXCTL_MBO; f->pcn_tbaddr = vtophys(mtod(m, vm_offset_t)); if (cnt == 0) f->pcn_txctl |= PCN_TXCTL_STP; else f->pcn_txctl |= PCN_TXCTL_OWN; cur = frag; PCN_INC(frag, PCN_TX_LIST_CNT); cnt++; } } if (m != NULL) return(ENOBUFS); sc->pcn_cdata.pcn_tx_chain[cur] = m_head; sc->pcn_ldata->pcn_tx_list[cur].pcn_txctl |= PCN_TXCTL_ENP|PCN_TXCTL_ADD_FCS|PCN_TXCTL_MORE_LTINT; sc->pcn_ldata->pcn_tx_list[*txidx].pcn_txctl |= PCN_TXCTL_OWN; sc->pcn_cdata.pcn_tx_cnt += cnt; *txidx = frag; return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void pcn_start(ifp) struct ifnet *ifp; { struct pcn_softc *sc; struct mbuf *m_head = NULL; u_int32_t idx; sc = ifp->if_softc; PCN_LOCK(sc); if (!sc->pcn_link) { PCN_UNLOCK(sc); return; } idx = sc->pcn_cdata.pcn_tx_prod; if (ifp->if_flags & IFF_OACTIVE) { PCN_UNLOCK(sc); return; } while(sc->pcn_cdata.pcn_tx_chain[idx] == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (pcn_encap(sc, m_head, &idx)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if (ifp->if_bpf) bpf_mtap(ifp, m_head); } /* Transmit */ sc->pcn_cdata.pcn_tx_prod = idx; pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_TX|PCN_CSR_INTEN); /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; PCN_UNLOCK(sc); return; } static void pcn_setfilt(ifp) struct ifnet *ifp; { struct pcn_softc *sc; sc = ifp->if_softc; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { PCN_CSR_SETBIT(sc, PCN_CSR_MODE, PCN_MODE_PROMISC); } else { PCN_CSR_CLRBIT(sc, PCN_CSR_MODE, PCN_MODE_PROMISC); } /* Set the capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { PCN_CSR_CLRBIT(sc, PCN_CSR_MODE, PCN_MODE_RXNOBROAD); } else { PCN_CSR_SETBIT(sc, PCN_CSR_MODE, PCN_MODE_RXNOBROAD); } return; } static void pcn_init(xsc) void *xsc; { struct pcn_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; struct mii_data *mii = NULL; PCN_LOCK(sc); /* * Cancel pending I/O and free all RX/TX buffers. */ pcn_stop(sc); pcn_reset(sc); mii = device_get_softc(sc->pcn_miibus); /* Set MAC address */ pcn_csr_write(sc, PCN_CSR_PAR0, ((u_int16_t *)sc->arpcom.ac_enaddr)[0]); pcn_csr_write(sc, PCN_CSR_PAR1, ((u_int16_t *)sc->arpcom.ac_enaddr)[1]); pcn_csr_write(sc, PCN_CSR_PAR2, ((u_int16_t *)sc->arpcom.ac_enaddr)[2]); /* Init circular RX list. */ if (pcn_list_rx_init(sc) == ENOBUFS) { printf("pcn%d: initialization failed: no " "memory for rx buffers\n", sc->pcn_unit); pcn_stop(sc); PCN_UNLOCK(sc); return; } /* * Init tx descriptors. */ pcn_list_tx_init(sc); /* Set up the mode register. */ pcn_csr_write(sc, PCN_CSR_MODE, PCN_PORT_MII); /* Set up RX filter. */ pcn_setfilt(ifp); /* * Load the multicast filter. */ pcn_setmulti(sc); /* * Load the addresses of the RX and TX lists. */ pcn_csr_write(sc, PCN_CSR_RXADDR0, vtophys(&sc->pcn_ldata->pcn_rx_list[0]) & 0xFFFF); pcn_csr_write(sc, PCN_CSR_RXADDR1, (vtophys(&sc->pcn_ldata->pcn_rx_list[0]) >> 16) & 0xFFFF); pcn_csr_write(sc, PCN_CSR_TXADDR0, vtophys(&sc->pcn_ldata->pcn_tx_list[0]) & 0xFFFF); pcn_csr_write(sc, PCN_CSR_TXADDR1, (vtophys(&sc->pcn_ldata->pcn_tx_list[0]) >> 16) & 0xFFFF); /* Set the RX and TX ring sizes. */ pcn_csr_write(sc, PCN_CSR_RXRINGLEN, (~PCN_RX_LIST_CNT) + 1); pcn_csr_write(sc, PCN_CSR_TXRINGLEN, (~PCN_TX_LIST_CNT) + 1); /* We're not using the initialization block. */ pcn_csr_write(sc, PCN_CSR_IAB1, 0); /* Enable fast suspend mode. */ PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL2, PCN_EXTCTL2_FASTSPNDE); /* * Enable burst read and write. Also set the no underflow * bit. This will avoid transmit underruns in certain * conditions while still providing decent performance. */ PCN_BCR_SETBIT(sc, PCN_BCR_BUSCTL, PCN_BUSCTL_NOUFLOW| PCN_BUSCTL_BREAD|PCN_BUSCTL_BWRITE); /* Enable graceful recovery from underflow. */ PCN_CSR_SETBIT(sc, PCN_CSR_IMR, PCN_IMR_DXSUFLO); /* Enable auto-padding of short TX frames. */ PCN_CSR_SETBIT(sc, PCN_CSR_TFEAT, PCN_TFEAT_PAD_TX); /* Disable MII autoneg (we handle this ourselves). */ PCN_BCR_CLRBIT(sc, PCN_BCR_MIICTL, PCN_MIICTL_DANAS); if (sc->pcn_type == Am79C978) pcn_bcr_write(sc, PCN_BCR_PHYSEL, PCN_PHYSEL_PCNET|PCN_PHY_HOMEPNA); /* Enable interrupts and start the controller running. */ pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_INTEN|PCN_CSR_START); mii_mediachg(mii); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->pcn_stat_ch = timeout(pcn_tick, sc, hz); PCN_UNLOCK(sc); return; } /* * Set media options. */ static int pcn_ifmedia_upd(ifp) struct ifnet *ifp; { struct pcn_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->pcn_miibus); sc->pcn_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; miisc = LIST_NEXT(miisc, mii_list)) mii_phy_reset(miisc); } mii_mediachg(mii); return(0); } /* * Report current media status. */ static void pcn_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct pcn_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->pcn_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static int pcn_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct pcn_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii = NULL; int error = 0; PCN_LOCK(sc); switch(command) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: error = ether_ioctl(ifp, command, data); break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->pcn_if_flags & IFF_PROMISC)) { PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); pcn_setfilt(ifp); PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_INTEN|PCN_CSR_START); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->pcn_if_flags & IFF_PROMISC) { PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); pcn_setfilt(ifp); PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_INTEN|PCN_CSR_START); } else if (!(ifp->if_flags & IFF_RUNNING)) pcn_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) pcn_stop(sc); } sc->pcn_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: pcn_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->pcn_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = EINVAL; break; } PCN_UNLOCK(sc); return(error); } static void pcn_watchdog(ifp) struct ifnet *ifp; { struct pcn_softc *sc; sc = ifp->if_softc; PCN_LOCK(sc); ifp->if_oerrors++; printf("pcn%d: watchdog timeout\n", sc->pcn_unit); pcn_stop(sc); pcn_reset(sc); pcn_init(sc); if (ifp->if_snd.ifq_head != NULL) pcn_start(ifp); PCN_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void pcn_stop(sc) struct pcn_softc *sc; { register int i; struct ifnet *ifp; ifp = &sc->arpcom.ac_if; PCN_LOCK(sc); ifp->if_timer = 0; untimeout(pcn_tick, sc, sc->pcn_stat_ch); PCN_CSR_SETBIT(sc, PCN_CSR_CSR, PCN_CSR_STOP); sc->pcn_link = 0; /* * Free data in the RX lists. */ for (i = 0; i < PCN_RX_LIST_CNT; i++) { if (sc->pcn_cdata.pcn_rx_chain[i] != NULL) { m_freem(sc->pcn_cdata.pcn_rx_chain[i]); sc->pcn_cdata.pcn_rx_chain[i] = NULL; } } bzero((char *)&sc->pcn_ldata->pcn_rx_list, sizeof(sc->pcn_ldata->pcn_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < PCN_TX_LIST_CNT; i++) { if (sc->pcn_cdata.pcn_tx_chain[i] != NULL) { m_freem(sc->pcn_cdata.pcn_tx_chain[i]); sc->pcn_cdata.pcn_tx_chain[i] = NULL; } } bzero((char *)&sc->pcn_ldata->pcn_tx_list, sizeof(sc->pcn_ldata->pcn_tx_list)); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); PCN_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void pcn_shutdown(dev) device_t dev; { struct pcn_softc *sc; sc = device_get_softc(dev); PCN_LOCK(sc); pcn_reset(sc); pcn_stop(sc); PCN_UNLOCK(sc); return; } Index: head/sys/pci/if_rl.c =================================================================== --- head/sys/pci/if_rl.c (revision 71961) +++ head/sys/pci/if_rl.c (revision 71962) @@ -1,1685 +1,1684 @@ /* * Copyright (c) 1997, 1998 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * RealTek 8129/8139 PCI NIC driver * * Supports several extremely cheap PCI 10/100 adapters based on * the RealTek chipset. Datasheets can be obtained from * www.realtek.com.tw. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is * probably the worst PCI ethernet controller ever made, with the possible * exception of the FEAST chip made by SMC. The 8139 supports bus-master * DMA, but it has a terrible interface that nullifies any performance * gains that bus-master DMA usually offers. * * For transmission, the chip offers a series of four TX descriptor * registers. Each transmit frame must be in a contiguous buffer, aligned * on a longword (32-bit) boundary. This means we almost always have to * do mbuf copies in order to transmit a frame, except in the unlikely * case where a) the packet fits into a single mbuf, and b) the packet * is 32-bit aligned within the mbuf's data area. The presence of only * four descriptor registers means that we can never have more than four * packets queued for transmission at any one time. * * Reception is not much better. The driver has to allocate a single large * buffer area (up to 64K in size) into which the chip will DMA received * frames. Because we don't know where within this region received packets * will begin or end, we have no choice but to copy data from the buffer * area into mbufs in order to pass the packets up to the higher protocol * levels. * * It's impossible given this rotten design to really achieve decent * performance at 100Mbps, unless you happen to have a 400Mhz PII or * some equally overmuscled CPU to drive it. * * On the bright side, the 8139 does have a built-in PHY, although * rather than using an MDIO serial interface like most other NICs, the * PHY registers are directly accessible through the 8139's register * space. The 8139 supports autonegotiation, as well as a 64-bit multicast * filter. * * The 8129 chip is an older version of the 8139 that uses an external PHY * chip. The 8129 has a serial MDIO interface for accessing the MII where * the 8139 lets you directly access the on-board PHY registers. We need * to select which interface to use depending on the chip type. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(rl, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Default to using PIO access for this driver. On SMP systems, * there appear to be problems with memory mapped mode: it looks like * doing too many memory mapped access back to back in rapid succession * can hang the bus. I'm inclined to blame this on crummy design/construction * on the part of RealTek. Memory mapped mode does appear to work on * uniprocessor systems though. */ #define RL_USEIOSPACE #include #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif /* * Various supported device vendors/types and their names. */ static struct rl_type rl_devs[] = { { RT_VENDORID, RT_DEVICEID_8129, "RealTek 8129 10/100BaseTX" }, { RT_VENDORID, RT_DEVICEID_8139, "RealTek 8139 10/100BaseTX" }, { RT_VENDORID, RT_DEVICEID_8138, "RealTek 8139 10/100BaseTX CardBus" }, { ACCTON_VENDORID, ACCTON_DEVICEID_5030, "Accton MPX 5030/5038 10/100BaseTX" }, { DELTA_VENDORID, DELTA_DEVICEID_8139, "Delta Electronics 8139 10/100BaseTX" }, { ADDTRON_VENDORID, ADDTRON_DEVICEID_8139, "Addtron Technolgy 8139 10/100BaseTX" }, { 0, 0, NULL } }; static int rl_probe __P((device_t)); static int rl_attach __P((device_t)); static int rl_detach __P((device_t)); static int rl_encap __P((struct rl_softc *, struct mbuf * )); static void rl_rxeof __P((struct rl_softc *)); static void rl_txeof __P((struct rl_softc *)); static void rl_intr __P((void *)); static void rl_tick __P((void *)); static void rl_start __P((struct ifnet *)); static int rl_ioctl __P((struct ifnet *, u_long, caddr_t)); static void rl_init __P((void *)); static void rl_stop __P((struct rl_softc *)); static void rl_watchdog __P((struct ifnet *)); static void rl_shutdown __P((device_t)); static int rl_ifmedia_upd __P((struct ifnet *)); static void rl_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); static void rl_eeprom_putbyte __P((struct rl_softc *, int)); static void rl_eeprom_getword __P((struct rl_softc *, int, u_int16_t *)); static void rl_read_eeprom __P((struct rl_softc *, caddr_t, int, int, int)); static void rl_mii_sync __P((struct rl_softc *)); static void rl_mii_send __P((struct rl_softc *, u_int32_t, int)); static int rl_mii_readreg __P((struct rl_softc *, struct rl_mii_frame *)); static int rl_mii_writereg __P((struct rl_softc *, struct rl_mii_frame *)); static int rl_miibus_readreg __P((device_t, int, int)); static int rl_miibus_writereg __P((device_t, int, int, int)); static void rl_miibus_statchg __P((device_t)); static u_int8_t rl_calchash __P((caddr_t)); static void rl_setmulti __P((struct rl_softc *)); static void rl_reset __P((struct rl_softc *)); static int rl_list_tx_init __P((struct rl_softc *)); #ifdef RL_USEIOSPACE #define RL_RES SYS_RES_IOPORT #define RL_RID RL_PCI_LOIO #else #define RL_RES SYS_RES_MEMORY #define RL_RID RL_PCI_LOMEM #endif static device_method_t rl_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rl_probe), DEVMETHOD(device_attach, rl_attach), DEVMETHOD(device_detach, rl_detach), DEVMETHOD(device_shutdown, rl_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, rl_miibus_readreg), DEVMETHOD(miibus_writereg, rl_miibus_writereg), DEVMETHOD(miibus_statchg, rl_miibus_statchg), { 0, 0 } }; static driver_t rl_driver = { "rl", rl_methods, sizeof(struct rl_softc) }; static devclass_t rl_devclass; DRIVER_MODULE(if_rl, pci, rl_driver, rl_devclass, 0, 0); DRIVER_MODULE(if_rl, cardbus, rl_driver, rl_devclass, 0, 0); DRIVER_MODULE(miibus, rl, miibus_driver, miibus_devclass, 0, 0); #define EE_SET(x) \ CSR_WRITE_1(sc, RL_EECMD, \ CSR_READ_1(sc, RL_EECMD) | x) #define EE_CLR(x) \ CSR_WRITE_1(sc, RL_EECMD, \ CSR_READ_1(sc, RL_EECMD) & ~x) /* * Send a read command and address to the EEPROM, check for ACK. */ static void rl_eeprom_putbyte(sc, addr) struct rl_softc *sc; int addr; { register int d, i; d = addr | sc->rl_eecmd_read; /* * Feed in each bit and strobe the clock. */ for (i = 0x400; i; i >>= 1) { if (d & i) { EE_SET(RL_EE_DATAIN); } else { EE_CLR(RL_EE_DATAIN); } DELAY(100); EE_SET(RL_EE_CLK); DELAY(150); EE_CLR(RL_EE_CLK); DELAY(100); } return; } /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void rl_eeprom_getword(sc, addr, dest) struct rl_softc *sc; int addr; u_int16_t *dest; { register int i; u_int16_t word = 0; /* Enter EEPROM access mode. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); /* * Send address of word we want to read. */ rl_eeprom_putbyte(sc, addr); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); /* * Start reading bits from EEPROM. */ for (i = 0x8000; i; i >>= 1) { EE_SET(RL_EE_CLK); DELAY(100); if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) word |= i; EE_CLR(RL_EE_CLK); DELAY(100); } /* Turn off EEPROM access mode. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); *dest = word; return; } /* * Read a sequence of words from the EEPROM. */ static void rl_read_eeprom(sc, dest, off, cnt, swap) struct rl_softc *sc; caddr_t dest; int off; int cnt; int swap; { int i; u_int16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { rl_eeprom_getword(sc, off + i, &word); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } return; } /* * MII access routines are provided for the 8129, which * doesn't have a built-in PHY. For the 8139, we fake things * up by diverting rl_phy_readreg()/rl_phy_writereg() to the * direct access PHY registers. */ #define MII_SET(x) \ CSR_WRITE_1(sc, RL_MII, \ CSR_READ_1(sc, RL_MII) | x) #define MII_CLR(x) \ CSR_WRITE_1(sc, RL_MII, \ CSR_READ_1(sc, RL_MII) & ~x) /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void rl_mii_sync(sc) struct rl_softc *sc; { register int i; MII_SET(RL_MII_DIR|RL_MII_DATAOUT); for (i = 0; i < 32; i++) { MII_SET(RL_MII_CLK); DELAY(1); MII_CLR(RL_MII_CLK); DELAY(1); } return; } /* * Clock a series of bits through the MII. */ static void rl_mii_send(sc, bits, cnt) struct rl_softc *sc; u_int32_t bits; int cnt; { int i; MII_CLR(RL_MII_CLK); for (i = (0x1 << (cnt - 1)); i; i >>= 1) { if (bits & i) { MII_SET(RL_MII_DATAOUT); } else { MII_CLR(RL_MII_DATAOUT); } DELAY(1); MII_CLR(RL_MII_CLK); DELAY(1); MII_SET(RL_MII_CLK); } } /* * Read an PHY register through the MII. */ static int rl_mii_readreg(sc, frame) struct rl_softc *sc; struct rl_mii_frame *frame; { int i, ack; RL_LOCK(sc); /* * Set up frame for RX. */ frame->mii_stdelim = RL_MII_STARTDELIM; frame->mii_opcode = RL_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; CSR_WRITE_2(sc, RL_MII, 0); /* * Turn on data xmit. */ MII_SET(RL_MII_DIR); rl_mii_sync(sc); /* * Send command/address info. */ rl_mii_send(sc, frame->mii_stdelim, 2); rl_mii_send(sc, frame->mii_opcode, 2); rl_mii_send(sc, frame->mii_phyaddr, 5); rl_mii_send(sc, frame->mii_regaddr, 5); /* Idle bit */ MII_CLR((RL_MII_CLK|RL_MII_DATAOUT)); DELAY(1); MII_SET(RL_MII_CLK); DELAY(1); /* Turn off xmit. */ MII_CLR(RL_MII_DIR); /* Check for ack */ MII_CLR(RL_MII_CLK); DELAY(1); MII_SET(RL_MII_CLK); DELAY(1); ack = CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN; /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ if (ack) { for(i = 0; i < 16; i++) { MII_CLR(RL_MII_CLK); DELAY(1); MII_SET(RL_MII_CLK); DELAY(1); } goto fail; } for (i = 0x8000; i; i >>= 1) { MII_CLR(RL_MII_CLK); DELAY(1); if (!ack) { if (CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN) frame->mii_data |= i; DELAY(1); } MII_SET(RL_MII_CLK); DELAY(1); } fail: MII_CLR(RL_MII_CLK); DELAY(1); MII_SET(RL_MII_CLK); DELAY(1); RL_UNLOCK(sc); if (ack) return(1); return(0); } /* * Write to a PHY register through the MII. */ static int rl_mii_writereg(sc, frame) struct rl_softc *sc; struct rl_mii_frame *frame; { RL_LOCK(sc); /* * Set up frame for TX. */ frame->mii_stdelim = RL_MII_STARTDELIM; frame->mii_opcode = RL_MII_WRITEOP; frame->mii_turnaround = RL_MII_TURNAROUND; /* * Turn on data output. */ MII_SET(RL_MII_DIR); rl_mii_sync(sc); rl_mii_send(sc, frame->mii_stdelim, 2); rl_mii_send(sc, frame->mii_opcode, 2); rl_mii_send(sc, frame->mii_phyaddr, 5); rl_mii_send(sc, frame->mii_regaddr, 5); rl_mii_send(sc, frame->mii_turnaround, 2); rl_mii_send(sc, frame->mii_data, 16); /* Idle bit. */ MII_SET(RL_MII_CLK); DELAY(1); MII_CLR(RL_MII_CLK); DELAY(1); /* * Turn off xmit. */ MII_CLR(RL_MII_DIR); RL_UNLOCK(sc); return(0); } static int rl_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct rl_softc *sc; struct rl_mii_frame frame; u_int16_t rval = 0; u_int16_t rl8139_reg = 0; sc = device_get_softc(dev); RL_LOCK(sc); if (sc->rl_type == RL_8139) { /* Pretend the internal PHY is only at address 0 */ if (phy) { RL_UNLOCK(sc); return(0); } switch(reg) { case MII_BMCR: rl8139_reg = RL_BMCR; break; case MII_BMSR: rl8139_reg = RL_BMSR; break; case MII_ANAR: rl8139_reg = RL_ANAR; break; case MII_ANER: rl8139_reg = RL_ANER; break; case MII_ANLPAR: rl8139_reg = RL_LPAR; break; case MII_PHYIDR1: case MII_PHYIDR2: RL_UNLOCK(sc); return(0); break; default: printf("rl%d: bad phy register\n", sc->rl_unit); RL_UNLOCK(sc); return(0); } rval = CSR_READ_2(sc, rl8139_reg); RL_UNLOCK(sc); return(rval); } bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; rl_mii_readreg(sc, &frame); RL_UNLOCK(sc); return(frame.mii_data); } static int rl_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct rl_softc *sc; struct rl_mii_frame frame; u_int16_t rl8139_reg = 0; sc = device_get_softc(dev); RL_LOCK(sc); if (sc->rl_type == RL_8139) { /* Pretend the internal PHY is only at address 0 */ if (phy) { RL_UNLOCK(sc); return(0); } switch(reg) { case MII_BMCR: rl8139_reg = RL_BMCR; break; case MII_BMSR: rl8139_reg = RL_BMSR; break; case MII_ANAR: rl8139_reg = RL_ANAR; break; case MII_ANER: rl8139_reg = RL_ANER; break; case MII_ANLPAR: rl8139_reg = RL_LPAR; break; case MII_PHYIDR1: case MII_PHYIDR2: RL_UNLOCK(sc); return(0); break; default: printf("rl%d: bad phy register\n", sc->rl_unit); RL_UNLOCK(sc); return(0); } CSR_WRITE_2(sc, rl8139_reg, data); RL_UNLOCK(sc); return(0); } bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = data; rl_mii_writereg(sc, &frame); RL_UNLOCK(sc); return(0); } static void rl_miibus_statchg(dev) device_t dev; { return; } /* * Calculate CRC of a multicast group address, return the upper 6 bits. */ static u_int8_t rl_calchash(addr) caddr_t addr; { u_int32_t crc, carry; int i, j; u_int8_t c; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (i = 0; i < 6; i++) { c = *(addr + i); for (j = 0; j < 8; j++) { carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); crc <<= 1; c >>= 1; if (carry) crc = (crc ^ 0x04c11db6) | carry; } } /* return the filter bit position */ return(crc >> 26); } /* * Program the 64-bit multicast hash filter. */ static void rl_setmulti(sc) struct rl_softc *sc; { struct ifnet *ifp; int h = 0; u_int32_t hashes[2] = { 0, 0 }; struct ifmultiaddr *ifma; u_int32_t rxfilt; int mcnt = 0; ifp = &sc->arpcom.ac_if; rxfilt = CSR_READ_4(sc, RL_RXCFG); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { rxfilt |= RL_RXCFG_RX_MULTI; CSR_WRITE_4(sc, RL_RXCFG, rxfilt); CSR_WRITE_4(sc, RL_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, RL_MAR4, 0xFFFFFFFF); return; } /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, RL_MAR0, 0); CSR_WRITE_4(sc, RL_MAR4, 0); /* now program new ones */ - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = rl_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); mcnt++; } if (mcnt) rxfilt |= RL_RXCFG_RX_MULTI; else rxfilt &= ~RL_RXCFG_RX_MULTI; CSR_WRITE_4(sc, RL_RXCFG, rxfilt); CSR_WRITE_4(sc, RL_MAR0, hashes[0]); CSR_WRITE_4(sc, RL_MAR4, hashes[1]); return; } static void rl_reset(sc) struct rl_softc *sc; { register int i; CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); for (i = 0; i < RL_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) break; } if (i == RL_TIMEOUT) printf("rl%d: reset never completed!\n", sc->rl_unit); return; } /* * Probe for a RealTek 8129/8139 chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int rl_probe(dev) device_t dev; { struct rl_type *t; t = rl_devs; while(t->rl_name != NULL) { if ((pci_get_vendor(dev) == t->rl_vid) && (pci_get_device(dev) == t->rl_did)) { device_set_desc(dev, t->rl_name); return(0); } t++; } return(ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int rl_attach(dev) device_t dev; { u_char eaddr[ETHER_ADDR_LEN]; u_int32_t command; struct rl_softc *sc; struct ifnet *ifp; u_int16_t rl_did = 0; int unit, error = 0, rid; sc = device_get_softc(dev); unit = device_get_unit(dev); bzero(sc, sizeof(struct rl_softc)); mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE); RL_LOCK(sc); /* * Handle power management nonsense. */ if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { u_int32_t iobase, membase, irq; /* Save important PCI config data. */ iobase = pci_read_config(dev, RL_PCI_LOIO, 4); membase = pci_read_config(dev, RL_PCI_LOMEM, 4); irq = pci_read_config(dev, RL_PCI_INTLINE, 4); /* Reset the power state. */ printf("rl%d: chip is is in D%d power mode " "-- setting to D0\n", unit, pci_get_powerstate(dev)); pci_set_powerstate(dev, PCI_POWERSTATE_D0); /* Restore PCI config data. */ pci_write_config(dev, RL_PCI_LOIO, iobase, 4); pci_write_config(dev, RL_PCI_LOMEM, membase, 4); pci_write_config(dev, RL_PCI_INTLINE, irq, 4); } /* * Map control/status registers. */ command = pci_read_config(dev, PCIR_COMMAND, 4); command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, command, 4); command = pci_read_config(dev, PCIR_COMMAND, 4); #ifdef RL_USEIOSPACE if (!(command & PCIM_CMD_PORTEN)) { printf("rl%d: failed to enable I/O ports!\n", unit); error = ENXIO; goto fail; } #else if (!(command & PCIM_CMD_MEMEN)) { printf("rl%d: failed to enable memory mapping!\n", unit); error = ENXIO; goto fail; } #endif rid = RL_RID; sc->rl_res = bus_alloc_resource(dev, RL_RES, &rid, 0, ~0, 1, RF_ACTIVE); if (sc->rl_res == NULL) { printf ("rl%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } /* Detect the Realtek 8139B. For some reason, this chip is very * unstable when left to autoselect the media * The best workaround is to set the device to the required * media type or to set it to the 10 Meg speed. */ if ((rman_get_end(sc->rl_res)-rman_get_start(sc->rl_res))==0xff) { printf("rl%d: Realtek 8139B detected. Warning, this may be unstable in autoselect mode\n", unit); } sc->rl_btag = rman_get_bustag(sc->rl_res); sc->rl_bhandle = rman_get_bushandle(sc->rl_res); rid = 0; sc->rl_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->rl_irq == NULL) { printf("rl%d: couldn't map interrupt\n", unit); bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->rl_irq, INTR_TYPE_NET, rl_intr, sc, &sc->rl_intrhand); if (error) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq); bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res); printf("rl%d: couldn't set up irq\n", unit); goto fail; } callout_handle_init(&sc->rl_stat_ch); /* Reset the adapter. */ rl_reset(sc); sc->rl_eecmd_read = RL_EECMD_READ_6BIT; rl_read_eeprom(sc, (caddr_t)&rl_did, 0, 1, 0); if (rl_did != 0x8129) sc->rl_eecmd_read = RL_EECMD_READ_8BIT; /* * Get station address from the EEPROM. */ rl_read_eeprom(sc, (caddr_t)&eaddr, RL_EE_EADDR, 3, 0); /* * A RealTek chip was detected. Inform the world. */ printf("rl%d: Ethernet address: %6D\n", unit, eaddr, ":"); sc->rl_unit = unit; bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); /* * Now read the exact device type from the EEPROM to find * out if it's an 8129 or 8139. */ rl_read_eeprom(sc, (caddr_t)&rl_did, RL_EE_PCI_DID, 1, 0); if (rl_did == RT_DEVICEID_8139 || rl_did == ACCTON_DEVICEID_5030 || rl_did == DELTA_DEVICEID_8139 || rl_did == ADDTRON_DEVICEID_8139 || rl_did == RT_DEVICEID_8138) sc->rl_type = RL_8139; else if (rl_did == RT_DEVICEID_8129) sc->rl_type = RL_8129; else { printf("rl%d: unknown device ID: %x\n", unit, rl_did); bus_teardown_intr(dev, sc->rl_irq, sc->rl_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq); bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res); error = ENXIO; goto fail; } sc->rl_cdata.rl_rx_buf = contigmalloc(RL_RXBUFLEN + 1518, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->rl_cdata.rl_rx_buf == NULL) { printf("rl%d: no memory for list buffers!\n", unit); bus_teardown_intr(dev, sc->rl_irq, sc->rl_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq); bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res); error = ENXIO; goto fail; } /* Leave a few bytes before the start of the RX ring buffer. */ sc->rl_cdata.rl_rx_buf_ptr = sc->rl_cdata.rl_rx_buf; sc->rl_cdata.rl_rx_buf += sizeof(u_int64_t); /* Do MII setup */ if (mii_phy_probe(dev, &sc->rl_miibus, rl_ifmedia_upd, rl_ifmedia_sts)) { printf("rl%d: MII without any phy!\n", sc->rl_unit); bus_teardown_intr(dev, sc->rl_irq, sc->rl_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq); bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res); free(sc->rl_cdata.rl_rx_buf, M_DEVBUF); error = ENXIO; goto fail; } ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; ifp->if_unit = unit; ifp->if_name = "rl"; ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = rl_ioctl; ifp->if_output = ether_output; ifp->if_start = rl_start; ifp->if_watchdog = rl_watchdog; ifp->if_init = rl_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; /* * Call MI attach routine. */ ether_ifattach(ifp, ETHER_BPF_SUPPORTED); RL_UNLOCK(sc); return(0); fail: RL_UNLOCK(sc); mtx_destroy(&sc->rl_mtx); return(error); } static int rl_detach(dev) device_t dev; { struct rl_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); RL_LOCK(sc); ifp = &sc->arpcom.ac_if; ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); rl_stop(sc); bus_generic_detach(dev); device_delete_child(dev, sc->rl_miibus); bus_teardown_intr(dev, sc->rl_irq, sc->rl_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq); bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res); contigfree(sc->rl_cdata.rl_rx_buf, RL_RXBUFLEN + 32, M_DEVBUF); RL_UNLOCK(sc); mtx_destroy(&sc->rl_mtx); return(0); } /* * Initialize the transmit descriptors. */ static int rl_list_tx_init(sc) struct rl_softc *sc; { struct rl_chain_data *cd; int i; cd = &sc->rl_cdata; for (i = 0; i < RL_TX_LIST_CNT; i++) { cd->rl_tx_chain[i] = NULL; CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(u_int32_t)), 0x0000000); } sc->rl_cdata.cur_tx = 0; sc->rl_cdata.last_tx = 0; return(0); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. * * You know there's something wrong with a PCI bus-master chip design * when you have to use m_devget(). * * The receive operation is badly documented in the datasheet, so I'll * attempt to document it here. The driver provides a buffer area and * places its base address in the RX buffer start address register. * The chip then begins copying frames into the RX buffer. Each frame * is preceeded by a 32-bit RX status word which specifies the length * of the frame and certain other status bits. Each frame (starting with * the status word) is also 32-bit aligned. The frame length is in the * first 16 bits of the status word; the lower 15 bits correspond with * the 'rx status register' mentioned in the datasheet. * * Note: to make the Alpha happy, the frame payload needs to be aligned * on a 32-bit boundary. To achieve this, we cheat a bit by copying from * the ring buffer starting at an address two bytes before the actual * data location. We can then shave off the first two bytes using m_adj(). * The reason we do this is because m_devget() doesn't let us specify an * offset into the mbuf storage space, so we have to artificially create * one. The ring is allocated in such a way that there are a few unused * bytes of space preceecing it so that it will be safe for us to do the * 2-byte backstep even if reading from the ring at offset 0. */ static void rl_rxeof(sc) struct rl_softc *sc; { struct ether_header *eh; struct mbuf *m; struct ifnet *ifp; int total_len = 0; u_int32_t rxstat; caddr_t rxbufpos; int wrap = 0; u_int16_t cur_rx; u_int16_t limit; u_int16_t rx_bytes = 0, max_bytes; ifp = &sc->arpcom.ac_if; cur_rx = (CSR_READ_2(sc, RL_CURRXADDR) + 16) % RL_RXBUFLEN; /* Do not try to read past this point. */ limit = CSR_READ_2(sc, RL_CURRXBUF) % RL_RXBUFLEN; if (limit < cur_rx) max_bytes = (RL_RXBUFLEN - cur_rx) + limit; else max_bytes = limit - cur_rx; while((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) { rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx; rxstat = *(u_int32_t *)rxbufpos; /* * Here's a totally undocumented fact for you. When the * RealTek chip is in the process of copying a packet into * RAM for you, the length will be 0xfff0. If you spot a * packet header with this value, you need to stop. The * datasheet makes absolutely no mention of this and * RealTek should be shot for this. */ if ((u_int16_t)(rxstat >> 16) == RL_RXSTAT_UNFINISHED) break; if (!(rxstat & RL_RXSTAT_RXOK)) { ifp->if_ierrors++; rl_init(sc); return; } /* No errors; receive the packet. */ total_len = rxstat >> 16; rx_bytes += total_len + 4; /* * XXX The RealTek chip includes the CRC with every * received frame, and there's no way to turn this * behavior off (at least, I can't find anything in * the manual that explains how to do it) so we have * to trim off the CRC manually. */ total_len -= ETHER_CRC_LEN; /* * Avoid trying to read more bytes than we know * the chip has prepared for us. */ if (rx_bytes > max_bytes) break; rxbufpos = sc->rl_cdata.rl_rx_buf + ((cur_rx + sizeof(u_int32_t)) % RL_RXBUFLEN); if (rxbufpos == (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN)) rxbufpos = sc->rl_cdata.rl_rx_buf; wrap = (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN) - rxbufpos; if (total_len > wrap) { /* * Fool m_devget() into thinking we want to copy * the whole buffer so we don't end up fragmenting * the data. */ m = m_devget(rxbufpos - RL_ETHER_ALIGN, total_len + RL_ETHER_ALIGN, 0, ifp, NULL); if (m == NULL) { ifp->if_ierrors++; printf("rl%d: out of mbufs, tried to " "copy %d bytes\n", sc->rl_unit, wrap); } else { m_adj(m, RL_ETHER_ALIGN); m_copyback(m, wrap, total_len - wrap, sc->rl_cdata.rl_rx_buf); } cur_rx = (total_len - wrap + ETHER_CRC_LEN); } else { m = m_devget(rxbufpos - RL_ETHER_ALIGN, total_len + RL_ETHER_ALIGN, 0, ifp, NULL); if (m == NULL) { ifp->if_ierrors++; printf("rl%d: out of mbufs, tried to " "copy %d bytes\n", sc->rl_unit, total_len); } else m_adj(m, RL_ETHER_ALIGN); cur_rx += total_len + 4 + ETHER_CRC_LEN; } /* * Round up to 32-bit boundary. */ cur_rx = (cur_rx + 3) & ~3; CSR_WRITE_2(sc, RL_CURRXADDR, cur_rx - 16); if (m == NULL) continue; eh = mtod(m, struct ether_header *); ifp->if_ipackets++; /* Remove header from mbuf and pass it on. */ m_adj(m, sizeof(struct ether_header)); ether_input(ifp, eh, m); } return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void rl_txeof(sc) struct rl_softc *sc; { struct ifnet *ifp; u_int32_t txstat; ifp = &sc->arpcom.ac_if; /* Clear the timeout timer. */ ifp->if_timer = 0; /* * Go through our tx list and free mbufs for those * frames that have been uploaded. */ do { txstat = CSR_READ_4(sc, RL_LAST_TXSTAT(sc)); if (!(txstat & (RL_TXSTAT_TX_OK| RL_TXSTAT_TX_UNDERRUN|RL_TXSTAT_TXABRT))) break; ifp->if_collisions += (txstat & RL_TXSTAT_COLLCNT) >> 24; if (RL_LAST_TXMBUF(sc) != NULL) { m_freem(RL_LAST_TXMBUF(sc)); RL_LAST_TXMBUF(sc) = NULL; } if (txstat & RL_TXSTAT_TX_OK) ifp->if_opackets++; else { int oldthresh; ifp->if_oerrors++; if ((txstat & RL_TXSTAT_TXABRT) || (txstat & RL_TXSTAT_OUTOFWIN)) CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); oldthresh = sc->rl_txthresh; /* error recovery */ rl_reset(sc); rl_init(sc); /* * If there was a transmit underrun, * bump the TX threshold. */ if (txstat & RL_TXSTAT_TX_UNDERRUN) sc->rl_txthresh = oldthresh + 32; return; } RL_INC(sc->rl_cdata.last_tx); ifp->if_flags &= ~IFF_OACTIVE; } while (sc->rl_cdata.last_tx != sc->rl_cdata.cur_tx); return; } static void rl_tick(xsc) void *xsc; { struct rl_softc *sc; struct mii_data *mii; sc = xsc; RL_LOCK(sc); mii = device_get_softc(sc->rl_miibus); mii_tick(mii); sc->rl_stat_ch = timeout(rl_tick, sc, hz); RL_UNLOCK(sc); return; } static void rl_intr(arg) void *arg; { struct rl_softc *sc; struct ifnet *ifp; u_int16_t status; sc = arg; RL_LOCK(sc); ifp = &sc->arpcom.ac_if; /* Disable interrupts. */ CSR_WRITE_2(sc, RL_IMR, 0x0000); for (;;) { status = CSR_READ_2(sc, RL_ISR); if (status) CSR_WRITE_2(sc, RL_ISR, status); if ((status & RL_INTRS) == 0) break; if (status & RL_ISR_RX_OK) rl_rxeof(sc); if (status & RL_ISR_RX_ERR) rl_rxeof(sc); if ((status & RL_ISR_TX_OK) || (status & RL_ISR_TX_ERR)) rl_txeof(sc); if (status & RL_ISR_SYSTEM_ERR) { rl_reset(sc); rl_init(sc); } } /* Re-enable interrupts. */ CSR_WRITE_2(sc, RL_IMR, RL_INTRS); if (ifp->if_snd.ifq_head != NULL) rl_start(ifp); RL_UNLOCK(sc); return; } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int rl_encap(sc, m_head) struct rl_softc *sc; struct mbuf *m_head; { struct mbuf *m_new = NULL; /* * The RealTek is brain damaged and wants longword-aligned * TX buffers, plus we can only have one fragment buffer * per packet. We have to copy pretty much all the time. */ MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("rl%d: no memory for tx list", sc->rl_unit); return(1); } if (m_head->m_pkthdr.len > MHLEN) { MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); printf("rl%d: no memory for tx list", sc->rl_unit); return(1); } } m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t)); m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; m_freem(m_head); m_head = m_new; /* Pad frames to at least 60 bytes. */ if (m_head->m_pkthdr.len < RL_MIN_FRAMELEN) { /* * Make security concious people happy: zero out the * bytes in the pad area, since we don't know what * this mbuf cluster buffer's previous user might * have left in it. */ bzero(mtod(m_head, char *) + m_head->m_pkthdr.len, RL_MIN_FRAMELEN - m_head->m_pkthdr.len); m_head->m_pkthdr.len += (RL_MIN_FRAMELEN - m_head->m_pkthdr.len); m_head->m_len = m_head->m_pkthdr.len; } RL_CUR_TXMBUF(sc) = m_head; return(0); } /* * Main transmit routine. */ static void rl_start(ifp) struct ifnet *ifp; { struct rl_softc *sc; struct mbuf *m_head = NULL; sc = ifp->if_softc; RL_LOCK(sc); while(RL_CUR_TXMBUF(sc) == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (rl_encap(sc, m_head)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if (ifp->if_bpf) bpf_mtap(ifp, RL_CUR_TXMBUF(sc)); /* * Transmit the frame. */ CSR_WRITE_4(sc, RL_CUR_TXADDR(sc), vtophys(mtod(RL_CUR_TXMBUF(sc), caddr_t))); CSR_WRITE_4(sc, RL_CUR_TXSTAT(sc), RL_TXTHRESH(sc->rl_txthresh) | RL_CUR_TXMBUF(sc)->m_pkthdr.len); RL_INC(sc->rl_cdata.cur_tx); } /* * We broke out of the loop because all our TX slots are * full. Mark the NIC as busy until it drains some of the * packets from the queue. */ if (RL_CUR_TXMBUF(sc) != NULL) ifp->if_flags |= IFF_OACTIVE; /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; RL_UNLOCK(sc); return; } static void rl_init(xsc) void *xsc; { struct rl_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; struct mii_data *mii; int i; u_int32_t rxcfg = 0; RL_LOCK(sc); mii = device_get_softc(sc->rl_miibus); /* * Cancel pending I/O and free all RX/TX buffers. */ rl_stop(sc); /* Init our MAC address */ for (i = 0; i < ETHER_ADDR_LEN; i++) { CSR_WRITE_1(sc, RL_IDR0 + i, sc->arpcom.ac_enaddr[i]); } /* Init the RX buffer pointer register. */ CSR_WRITE_4(sc, RL_RXADDR, vtophys(sc->rl_cdata.rl_rx_buf)); /* Init TX descriptors. */ rl_list_tx_init(sc); /* * Enable transmit and receive. */ CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); /* * Set the initial TX and RX configuration. */ CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG); /* Set the individual bit to receive frames for this host only. */ rxcfg = CSR_READ_4(sc, RL_RXCFG); rxcfg |= RL_RXCFG_RX_INDIV; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { rxcfg |= RL_RXCFG_RX_ALLPHYS; CSR_WRITE_4(sc, RL_RXCFG, rxcfg); } else { rxcfg &= ~RL_RXCFG_RX_ALLPHYS; CSR_WRITE_4(sc, RL_RXCFG, rxcfg); } /* * Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { rxcfg |= RL_RXCFG_RX_BROAD; CSR_WRITE_4(sc, RL_RXCFG, rxcfg); } else { rxcfg &= ~RL_RXCFG_RX_BROAD; CSR_WRITE_4(sc, RL_RXCFG, rxcfg); } /* * Program the multicast filter, if necessary. */ rl_setmulti(sc); /* * Enable interrupts. */ CSR_WRITE_2(sc, RL_IMR, RL_INTRS); /* Set initial TX threshold */ sc->rl_txthresh = RL_TX_THRESH_INIT; /* Start RX/TX process. */ CSR_WRITE_4(sc, RL_MISSEDPKT, 0); /* Enable receiver and transmitter. */ CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); mii_mediachg(mii); CSR_WRITE_1(sc, RL_CFG1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->rl_stat_ch = timeout(rl_tick, sc, hz); RL_UNLOCK(sc); return; } /* * Set media options. */ static int rl_ifmedia_upd(ifp) struct ifnet *ifp; { struct rl_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->rl_miibus); mii_mediachg(mii); return(0); } /* * Report current media status. */ static void rl_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct rl_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->rl_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static int rl_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct rl_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; RL_LOCK(sc); switch(command) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: error = ether_ioctl(ifp, command, data); break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { rl_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) rl_stop(sc); } error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: rl_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->rl_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = EINVAL; break; } RL_UNLOCK(sc); return(error); } static void rl_watchdog(ifp) struct ifnet *ifp; { struct rl_softc *sc; sc = ifp->if_softc; RL_LOCK(sc); printf("rl%d: watchdog timeout\n", sc->rl_unit); ifp->if_oerrors++; rl_txeof(sc); rl_rxeof(sc); rl_init(sc); RL_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void rl_stop(sc) struct rl_softc *sc; { register int i; struct ifnet *ifp; RL_LOCK(sc); ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; untimeout(rl_tick, sc, sc->rl_stat_ch); CSR_WRITE_1(sc, RL_COMMAND, 0x00); CSR_WRITE_2(sc, RL_IMR, 0x0000); /* * Free the TX list buffers. */ for (i = 0; i < RL_TX_LIST_CNT; i++) { if (sc->rl_cdata.rl_tx_chain[i] != NULL) { m_freem(sc->rl_cdata.rl_tx_chain[i]); sc->rl_cdata.rl_tx_chain[i] = NULL; CSR_WRITE_4(sc, RL_TXADDR0 + i, 0x0000000); } } ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); RL_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void rl_shutdown(dev) device_t dev; { struct rl_softc *sc; sc = device_get_softc(dev); rl_stop(sc); return; } Index: head/sys/pci/if_sf.c =================================================================== --- head/sys/pci/if_sf.c (revision 71961) +++ head/sys/pci/if_sf.c (revision 71962) @@ -1,1512 +1,1511 @@ /* * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Adaptec AIC-6915 "Starfire" PCI fast ethernet driver for FreeBSD. * Programming manual is available from: * ftp.adaptec.com:/pub/BBS/userguides/aic6915_pg.pdf. * * Written by Bill Paul * Department of Electical Engineering * Columbia University, New York City */ /* * The Adaptec AIC-6915 "Starfire" is a 64-bit 10/100 PCI ethernet * controller designed with flexibility and reducing CPU load in mind. * The Starfire offers high and low priority buffer queues, a * producer/consumer index mechanism and several different buffer * queue and completion queue descriptor types. Any one of a number * of different driver designs can be used, depending on system and * OS requirements. This driver makes use of type0 transmit frame * descriptors (since BSD fragments packets across an mbuf chain) * and two RX buffer queues prioritized on size (one queue for small * frames that will fit into a single mbuf, another with full size * mbuf clusters for everything else). The producer/consumer indexes * and completion queues are also used. * * One downside to the Starfire has to do with alignment: buffer * queues must be aligned on 256-byte boundaries, and receive buffers * must be aligned on longword boundaries. The receive buffer alignment * causes problems on the Alpha platform, where the packet payload * should be longword aligned. There is no simple way around this. * * For receive filtering, the Starfire offers 16 perfect filter slots * and a 512-bit hash table. * * The Starfire has no internal transceiver, relying instead on an * external MII-based transceiver. Accessing registers on external * PHYs is done through a special register map rather than with the * usual bitbang MDIO method. * * Acesssing the registers on the Starfire is a little tricky. The * Starfire has a 512K internal register space. When programmed for * PCI memory mapped mode, the entire register space can be accessed * directly. However in I/O space mode, only 256 bytes are directly * mapped into PCI I/O space. The other registers can be accessed * indirectly using the SF_INDIRECTIO_ADDR and SF_INDIRECTIO_DATA * registers inside the 256-byte I/O window. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #include #include #define SF_USEIOSPACE #include MODULE_DEPEND(sf, miibus, 1, 1, 1); #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif static struct sf_type sf_devs[] = { { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX" }, { 0, 0, NULL } }; static int sf_probe __P((device_t)); static int sf_attach __P((device_t)); static int sf_detach __P((device_t)); static void sf_intr __P((void *)); static void sf_stats_update __P((void *)); static void sf_rxeof __P((struct sf_softc *)); static void sf_txeof __P((struct sf_softc *)); static int sf_encap __P((struct sf_softc *, struct sf_tx_bufdesc_type0 *, struct mbuf *)); static void sf_start __P((struct ifnet *)); static int sf_ioctl __P((struct ifnet *, u_long, caddr_t)); static void sf_init __P((void *)); static void sf_stop __P((struct sf_softc *)); static void sf_watchdog __P((struct ifnet *)); static void sf_shutdown __P((device_t)); static int sf_ifmedia_upd __P((struct ifnet *)); static void sf_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); static void sf_reset __P((struct sf_softc *)); static int sf_init_rx_ring __P((struct sf_softc *)); static void sf_init_tx_ring __P((struct sf_softc *)); static int sf_newbuf __P((struct sf_softc *, struct sf_rx_bufdesc_type0 *, struct mbuf *)); static void sf_setmulti __P((struct sf_softc *)); static int sf_setperf __P((struct sf_softc *, int, caddr_t)); static int sf_sethash __P((struct sf_softc *, caddr_t, int)); #ifdef notdef static int sf_setvlan __P((struct sf_softc *, int, u_int32_t)); #endif static u_int8_t sf_read_eeprom __P((struct sf_softc *, int)); static u_int32_t sf_calchash __P((caddr_t)); static int sf_miibus_readreg __P((device_t, int, int)); static int sf_miibus_writereg __P((device_t, int, int, int)); static void sf_miibus_statchg __P((device_t)); static u_int32_t csr_read_4 __P((struct sf_softc *, int)); static void csr_write_4 __P((struct sf_softc *, int, u_int32_t)); #ifdef SF_USEIOSPACE #define SF_RES SYS_RES_IOPORT #define SF_RID SF_PCI_LOIO #else #define SF_RES SYS_RES_MEMORY #define SF_RID SF_PCI_LOMEM #endif static device_method_t sf_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sf_probe), DEVMETHOD(device_attach, sf_attach), DEVMETHOD(device_detach, sf_detach), DEVMETHOD(device_shutdown, sf_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, sf_miibus_readreg), DEVMETHOD(miibus_writereg, sf_miibus_writereg), DEVMETHOD(miibus_statchg, sf_miibus_statchg), { 0, 0 } }; static driver_t sf_driver = { "sf", sf_methods, sizeof(struct sf_softc), }; static devclass_t sf_devclass; DRIVER_MODULE(if_sf, pci, sf_driver, sf_devclass, 0, 0); DRIVER_MODULE(miibus, sf, miibus_driver, miibus_devclass, 0, 0); #define SF_SETBIT(sc, reg, x) \ csr_write_4(sc, reg, csr_read_4(sc, reg) | x) #define SF_CLRBIT(sc, reg, x) \ csr_write_4(sc, reg, csr_read_4(sc, reg) & ~x) static u_int32_t csr_read_4(sc, reg) struct sf_softc *sc; int reg; { u_int32_t val; #ifdef SF_USEIOSPACE CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE); val = CSR_READ_4(sc, SF_INDIRECTIO_DATA); #else val = CSR_READ_4(sc, (reg + SF_RMAP_INTREG_BASE)); #endif return(val); } static u_int8_t sf_read_eeprom(sc, reg) struct sf_softc *sc; int reg; { u_int8_t val; val = (csr_read_4(sc, SF_EEADDR_BASE + (reg & 0xFFFFFFFC)) >> (8 * (reg & 3))) & 0xFF; return(val); } static void csr_write_4(sc, reg, val) struct sf_softc *sc; int reg; u_int32_t val; { #ifdef SF_USEIOSPACE CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE); CSR_WRITE_4(sc, SF_INDIRECTIO_DATA, val); #else CSR_WRITE_4(sc, (reg + SF_RMAP_INTREG_BASE), val); #endif return; } static u_int32_t sf_calchash(addr) caddr_t addr; { u_int32_t crc, carry; int i, j; u_int8_t c; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (i = 0; i < 6; i++) { c = *(addr + i); for (j = 0; j < 8; j++) { carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); crc <<= 1; c >>= 1; if (carry) crc = (crc ^ 0x04c11db6) | carry; } } /* return the filter bit position */ return(crc >> 23 & 0x1FF); } /* * Copy the address 'mac' into the perfect RX filter entry at * offset 'idx.' The perfect filter only has 16 entries so do * some sanity tests. */ static int sf_setperf(sc, idx, mac) struct sf_softc *sc; int idx; caddr_t mac; { u_int16_t *p; if (idx < 0 || idx > SF_RXFILT_PERFECT_CNT) return(EINVAL); if (mac == NULL) return(EINVAL); p = (u_int16_t *)mac; csr_write_4(sc, SF_RXFILT_PERFECT_BASE + (idx * SF_RXFILT_PERFECT_SKIP), htons(p[2])); csr_write_4(sc, SF_RXFILT_PERFECT_BASE + (idx * SF_RXFILT_PERFECT_SKIP) + 4, htons(p[1])); csr_write_4(sc, SF_RXFILT_PERFECT_BASE + (idx * SF_RXFILT_PERFECT_SKIP) + 8, htons(p[0])); return(0); } /* * Set the bit in the 512-bit hash table that corresponds to the * specified mac address 'mac.' If 'prio' is nonzero, update the * priority hash table instead of the filter hash table. */ static int sf_sethash(sc, mac, prio) struct sf_softc *sc; caddr_t mac; int prio; { u_int32_t h = 0; if (mac == NULL) return(EINVAL); h = sf_calchash(mac); if (prio) { SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_PRIOOFF + (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF))); } else { SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_ADDROFF + (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF))); } return(0); } #ifdef notdef /* * Set a VLAN tag in the receive filter. */ static int sf_setvlan(sc, idx, vlan) struct sf_softc *sc; int idx; u_int32_t vlan; { if (idx < 0 || idx >> SF_RXFILT_HASH_CNT) return(EINVAL); csr_write_4(sc, SF_RXFILT_HASH_BASE + (idx * SF_RXFILT_HASH_SKIP) + SF_RXFILT_HASH_VLANOFF, vlan); return(0); } #endif static int sf_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct sf_softc *sc; int i; u_int32_t val = 0; sc = device_get_softc(dev); for (i = 0; i < SF_TIMEOUT; i++) { val = csr_read_4(sc, SF_PHY_REG(phy, reg)); if (val & SF_MII_DATAVALID) break; } if (i == SF_TIMEOUT) return(0); if ((val & 0x0000FFFF) == 0xFFFF) return(0); return(val & 0x0000FFFF); } static int sf_miibus_writereg(dev, phy, reg, val) device_t dev; int phy, reg, val; { struct sf_softc *sc; int i; int busy; sc = device_get_softc(dev); csr_write_4(sc, SF_PHY_REG(phy, reg), val); for (i = 0; i < SF_TIMEOUT; i++) { busy = csr_read_4(sc, SF_PHY_REG(phy, reg)); if (!(busy & SF_MII_BUSY)) break; } return(0); } static void sf_miibus_statchg(dev) device_t dev; { struct sf_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->sf_miibus); if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_FULLDUPLEX); csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_FDX); } else { SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_FULLDUPLEX); csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_HDX); } return; } static void sf_setmulti(sc) struct sf_softc *sc; { struct ifnet *ifp; int i; struct ifmultiaddr *ifma; u_int8_t dummy[] = { 0, 0, 0, 0, 0, 0 }; ifp = &sc->arpcom.ac_if; /* First zot all the existing filters. */ for (i = 1; i < SF_RXFILT_PERFECT_CNT; i++) sf_setperf(sc, i, (char *)&dummy); for (i = SF_RXFILT_HASH_BASE; i < (SF_RXFILT_HASH_MAX + 1); i += 4) csr_write_4(sc, i, 0); SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_ALLMULTI); /* Now program new ones. */ if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_ALLMULTI); } else { i = 1; /* First find the tail of the list. */ - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_link.le_next == NULL) break; } /* Now traverse the list backwards. */ for (; ifma != NULL && ifma != (void *)&ifp->if_multiaddrs; ifma = (struct ifmultiaddr *)ifma->ifma_link.le_prev) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; /* * Program the first 15 multicast groups * into the perfect filter. For all others, * use the hash table. */ if (i < SF_RXFILT_PERFECT_CNT) { sf_setperf(sc, i, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); i++; continue; } sf_sethash(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 0); } } return; } /* * Set media options. */ static int sf_ifmedia_upd(ifp) struct ifnet *ifp; { struct sf_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->sf_miibus); sc->sf_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; miisc = LIST_NEXT(miisc, mii_list)) mii_phy_reset(miisc); } mii_mediachg(mii); return(0); } /* * Report current media status. */ static void sf_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct sf_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->sf_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static int sf_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct sf_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; SF_LOCK(sc); switch(command) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: error = ether_ioctl(ifp, command, data); break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->sf_if_flags & IFF_PROMISC)) { SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->sf_if_flags & IFF_PROMISC) { SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC); } else if (!(ifp->if_flags & IFF_RUNNING)) sf_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) sf_stop(sc); } sc->sf_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: sf_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->sf_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = EINVAL; break; } SF_UNLOCK(sc); return(error); } static void sf_reset(sc) struct sf_softc *sc; { register int i; csr_write_4(sc, SF_GEN_ETH_CTL, 0); SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); DELAY(1000); SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_RESET); for (i = 0; i < SF_TIMEOUT; i++) { DELAY(10); if (!(csr_read_4(sc, SF_PCI_DEVCFG) & SF_PCIDEVCFG_RESET)) break; } if (i == SF_TIMEOUT) printf("sf%d: reset never completed!\n", sc->sf_unit); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); return; } /* * Probe for an Adaptec AIC-6915 chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. * We also check the subsystem ID so that we can identify exactly which * NIC has been found, if possible. */ static int sf_probe(dev) device_t dev; { struct sf_type *t; t = sf_devs; while(t->sf_name != NULL) { if ((pci_get_vendor(dev) == t->sf_vid) && (pci_get_device(dev) == t->sf_did)) { switch((pci_read_config(dev, SF_PCI_SUBVEN_ID, 4) >> 16) & 0xFFFF) { case AD_SUBSYSID_62011_REV0: case AD_SUBSYSID_62011_REV1: device_set_desc(dev, "Adaptec ANA-62011 10/100BaseTX"); return(0); break; case AD_SUBSYSID_62022: device_set_desc(dev, "Adaptec ANA-62022 10/100BaseTX"); return(0); break; case AD_SUBSYSID_62044_REV0: case AD_SUBSYSID_62044_REV1: device_set_desc(dev, "Adaptec ANA-62044 10/100BaseTX"); return(0); break; case AD_SUBSYSID_62020: device_set_desc(dev, "Adaptec ANA-62020 10/100BaseFX"); return(0); break; case AD_SUBSYSID_69011: device_set_desc(dev, "Adaptec ANA-69011 10/100BaseTX"); return(0); break; default: device_set_desc(dev, t->sf_name); return(0); break; } } t++; } return(ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int sf_attach(dev) device_t dev; { int i; u_int32_t command; struct sf_softc *sc; struct ifnet *ifp; int unit, rid, error = 0; sc = device_get_softc(dev); unit = device_get_unit(dev); bzero(sc, sizeof(struct sf_softc)); mtx_init(&sc->sf_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE); SF_LOCK(sc); /* * Handle power management nonsense. */ command = pci_read_config(dev, SF_PCI_CAPID, 4) & 0x000000FF; if (command == 0x01) { command = pci_read_config(dev, SF_PCI_PWRMGMTCTRL, 4); if (command & SF_PSTATE_MASK) { u_int32_t iobase, membase, irq; /* Save important PCI config data. */ iobase = pci_read_config(dev, SF_PCI_LOIO, 4); membase = pci_read_config(dev, SF_PCI_LOMEM, 4); irq = pci_read_config(dev, SF_PCI_INTLINE, 4); /* Reset the power state. */ printf("sf%d: chip is in D%d power mode " "-- setting to D0\n", unit, command & SF_PSTATE_MASK); command &= 0xFFFFFFFC; pci_write_config(dev, SF_PCI_PWRMGMTCTRL, command, 4); /* Restore PCI config data. */ pci_write_config(dev, SF_PCI_LOIO, iobase, 4); pci_write_config(dev, SF_PCI_LOMEM, membase, 4); pci_write_config(dev, SF_PCI_INTLINE, irq, 4); } } /* * Map control/status registers. */ command = pci_read_config(dev, PCIR_COMMAND, 4); command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, command, 4); command = pci_read_config(dev, PCIR_COMMAND, 4); #ifdef SF_USEIOSPACE if (!(command & PCIM_CMD_PORTEN)) { printf("sf%d: failed to enable I/O ports!\n", unit); error = ENXIO; goto fail; } #else if (!(command & PCIM_CMD_MEMEN)) { printf("sf%d: failed to enable memory mapping!\n", unit); error = ENXIO; goto fail; } #endif rid = SF_RID; sc->sf_res = bus_alloc_resource(dev, SF_RES, &rid, 0, ~0, 1, RF_ACTIVE); if (sc->sf_res == NULL) { printf ("sf%d: couldn't map ports\n", unit); error = ENXIO; goto fail; } sc->sf_btag = rman_get_bustag(sc->sf_res); sc->sf_bhandle = rman_get_bushandle(sc->sf_res); /* Allocate interrupt */ rid = 0; sc->sf_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->sf_irq == NULL) { printf("sf%d: couldn't map interrupt\n", unit); bus_release_resource(dev, SF_RES, SF_RID, sc->sf_res); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->sf_irq, INTR_TYPE_NET, sf_intr, sc, &sc->sf_intrhand); if (error) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_res); bus_release_resource(dev, SF_RES, SF_RID, sc->sf_res); printf("sf%d: couldn't set up irq\n", unit); goto fail; } callout_handle_init(&sc->sf_stat_ch); /* Reset the adapter. */ sf_reset(sc); /* * Get station address from the EEPROM. */ for (i = 0; i < ETHER_ADDR_LEN; i++) sc->arpcom.ac_enaddr[i] = sf_read_eeprom(sc, SF_EE_NODEADDR + ETHER_ADDR_LEN - i); /* * An Adaptec chip was detected. Inform the world. */ printf("sf%d: Ethernet address: %6D\n", unit, sc->arpcom.ac_enaddr, ":"); sc->sf_unit = unit; /* Allocate the descriptor queues. */ sc->sf_ldata = contigmalloc(sizeof(struct sf_list_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->sf_ldata == NULL) { printf("sf%d: no memory for list buffers!\n", unit); bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq); bus_release_resource(dev, SF_RES, SF_RID, sc->sf_res); error = ENXIO; goto fail; } bzero(sc->sf_ldata, sizeof(struct sf_list_data)); /* Do MII setup. */ if (mii_phy_probe(dev, &sc->sf_miibus, sf_ifmedia_upd, sf_ifmedia_sts)) { printf("sf%d: MII without any phy!\n", sc->sf_unit); contigfree(sc->sf_ldata,sizeof(struct sf_list_data),M_DEVBUF); bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq); bus_release_resource(dev, SF_RES, SF_RID, sc->sf_res); error = ENXIO; goto fail; } ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; ifp->if_unit = unit; ifp->if_name = "sf"; ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = sf_ioctl; ifp->if_output = ether_output; ifp->if_start = sf_start; ifp->if_watchdog = sf_watchdog; ifp->if_init = sf_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = SF_TX_DLIST_CNT - 1; /* * Call MI attach routine. */ ether_ifattach(ifp, ETHER_BPF_SUPPORTED); SF_UNLOCK(sc); return(0); fail: SF_UNLOCK(sc); mtx_destroy(&sc->sf_mtx); return(error); } static int sf_detach(dev) device_t dev; { struct sf_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); SF_LOCK(sc); ifp = &sc->arpcom.ac_if; ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); sf_stop(sc); bus_generic_detach(dev); device_delete_child(dev, sc->sf_miibus); bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq); bus_release_resource(dev, SF_RES, SF_RID, sc->sf_res); contigfree(sc->sf_ldata, sizeof(struct sf_list_data), M_DEVBUF); SF_UNLOCK(sc); mtx_destroy(&sc->sf_mtx); return(0); } static int sf_init_rx_ring(sc) struct sf_softc *sc; { struct sf_list_data *ld; int i; ld = sc->sf_ldata; bzero((char *)ld->sf_rx_dlist_big, sizeof(struct sf_rx_bufdesc_type0) * SF_RX_DLIST_CNT); bzero((char *)ld->sf_rx_clist, sizeof(struct sf_rx_cmpdesc_type3) * SF_RX_CLIST_CNT); for (i = 0; i < SF_RX_DLIST_CNT; i++) { if (sf_newbuf(sc, &ld->sf_rx_dlist_big[i], NULL) == ENOBUFS) return(ENOBUFS); } return(0); } static void sf_init_tx_ring(sc) struct sf_softc *sc; { struct sf_list_data *ld; int i; ld = sc->sf_ldata; bzero((char *)ld->sf_tx_dlist, sizeof(struct sf_tx_bufdesc_type0) * SF_TX_DLIST_CNT); bzero((char *)ld->sf_tx_clist, sizeof(struct sf_tx_cmpdesc_type0) * SF_TX_CLIST_CNT); for (i = 0; i < SF_TX_DLIST_CNT; i++) ld->sf_tx_dlist[i].sf_id = SF_TX_BUFDESC_ID; for (i = 0; i < SF_TX_CLIST_CNT; i++) ld->sf_tx_clist[i].sf_type = SF_TXCMPTYPE_TX; ld->sf_tx_dlist[SF_TX_DLIST_CNT - 1].sf_end = 1; sc->sf_tx_cnt = 0; return; } static int sf_newbuf(sc, c, m) struct sf_softc *sc; struct sf_rx_bufdesc_type0 *c; struct mbuf *m; { struct mbuf *m_new = NULL; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("sf%d: no memory for rx list -- " "packet dropped!\n", sc->sf_unit); return(ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("sf%d: no memory for rx list -- " "packet dropped!\n", sc->sf_unit); m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, sizeof(u_int64_t)); c->sf_mbuf = m_new; c->sf_addrlo = SF_RX_HOSTADDR(vtophys(mtod(m_new, caddr_t))); c->sf_valid = 1; return(0); } /* * The starfire is programmed to use 'normal' mode for packet reception, * which means we use the consumer/producer model for both the buffer * descriptor queue and the completion descriptor queue. The only problem * with this is that it involves a lot of register accesses: we have to * read the RX completion consumer and producer indexes and the RX buffer * producer index, plus the RX completion consumer and RX buffer producer * indexes have to be updated. It would have been easier if Adaptec had * put each index in a separate register, especially given that the damn * NIC has a 512K register space. * * In spite of all the lovely features that Adaptec crammed into the 6915, * it is marred by one truly stupid design flaw, which is that receive * buffer addresses must be aligned on a longword boundary. This forces * the packet payload to be unaligned, which is suboptimal on the x86 and * completely unuseable on the Alpha. Our only recourse is to copy received * packets into properly aligned buffers before handing them off. */ static void sf_rxeof(sc) struct sf_softc *sc; { struct ether_header *eh; struct mbuf *m; struct ifnet *ifp; struct sf_rx_bufdesc_type0 *desc; struct sf_rx_cmpdesc_type3 *cur_rx; u_int32_t rxcons, rxprod; int cmpprodidx, cmpconsidx, bufprodidx; ifp = &sc->arpcom.ac_if; rxcons = csr_read_4(sc, SF_CQ_CONSIDX); rxprod = csr_read_4(sc, SF_RXDQ_PTR_Q1); cmpprodidx = SF_IDX_LO(csr_read_4(sc, SF_CQ_PRODIDX)); cmpconsidx = SF_IDX_LO(rxcons); bufprodidx = SF_IDX_LO(rxprod); while (cmpconsidx != cmpprodidx) { struct mbuf *m0; cur_rx = &sc->sf_ldata->sf_rx_clist[cmpconsidx]; desc = &sc->sf_ldata->sf_rx_dlist_big[cur_rx->sf_endidx]; m = desc->sf_mbuf; SF_INC(cmpconsidx, SF_RX_CLIST_CNT); SF_INC(bufprodidx, SF_RX_DLIST_CNT); if (!(cur_rx->sf_status1 & SF_RXSTAT1_OK)) { ifp->if_ierrors++; sf_newbuf(sc, desc, m); continue; } m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, cur_rx->sf_len + ETHER_ALIGN, 0, ifp, NULL); sf_newbuf(sc, desc, m); if (m0 == NULL) { ifp->if_ierrors++; continue; } m_adj(m0, ETHER_ALIGN); m = m0; eh = mtod(m, struct ether_header *); ifp->if_ipackets++; /* Remove header from mbuf and pass it on. */ m_adj(m, sizeof(struct ether_header)); ether_input(ifp, eh, m); } csr_write_4(sc, SF_CQ_CONSIDX, (rxcons & ~SF_CQ_CONSIDX_RXQ1) | cmpconsidx); csr_write_4(sc, SF_RXDQ_PTR_Q1, (rxprod & ~SF_RXDQ_PRODIDX) | bufprodidx); return; } /* * Read the transmit status from the completion queue and release * mbufs. Note that the buffer descriptor index in the completion * descriptor is an offset from the start of the transmit buffer * descriptor list in bytes. This is important because the manual * gives the impression that it should match the producer/consumer * index, which is the offset in 8 byte blocks. */ static void sf_txeof(sc) struct sf_softc *sc; { int txcons, cmpprodidx, cmpconsidx; struct sf_tx_cmpdesc_type1 *cur_cmp; struct sf_tx_bufdesc_type0 *cur_tx; struct ifnet *ifp; ifp = &sc->arpcom.ac_if; txcons = csr_read_4(sc, SF_CQ_CONSIDX); cmpprodidx = SF_IDX_HI(csr_read_4(sc, SF_CQ_PRODIDX)); cmpconsidx = SF_IDX_HI(txcons); while (cmpconsidx != cmpprodidx) { cur_cmp = &sc->sf_ldata->sf_tx_clist[cmpconsidx]; cur_tx = &sc->sf_ldata->sf_tx_dlist[cur_cmp->sf_index >> 7]; SF_INC(cmpconsidx, SF_TX_CLIST_CNT); if (cur_cmp->sf_txstat & SF_TXSTAT_TX_OK) ifp->if_opackets++; else ifp->if_oerrors++; sc->sf_tx_cnt--; if (cur_tx->sf_mbuf != NULL) { m_freem(cur_tx->sf_mbuf); cur_tx->sf_mbuf = NULL; } } ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; csr_write_4(sc, SF_CQ_CONSIDX, (txcons & ~SF_CQ_CONSIDX_TXQ) | ((cmpconsidx << 16) & 0xFFFF0000)); return; } static void sf_intr(arg) void *arg; { struct sf_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; SF_LOCK(sc); ifp = &sc->arpcom.ac_if; if (!(csr_read_4(sc, SF_ISR_SHADOW) & SF_ISR_PCIINT_ASSERTED)) { SF_UNLOCK(sc); return; } /* Disable interrupts. */ csr_write_4(sc, SF_IMR, 0x00000000); for (;;) { status = csr_read_4(sc, SF_ISR); if (status) csr_write_4(sc, SF_ISR, status); if (!(status & SF_INTRS)) break; if (status & SF_ISR_RXDQ1_DMADONE) sf_rxeof(sc); if (status & SF_ISR_TX_TXDONE) sf_txeof(sc); if (status & SF_ISR_ABNORMALINTR) { if (status & SF_ISR_STATSOFLOW) { untimeout(sf_stats_update, sc, sc->sf_stat_ch); sf_stats_update(sc); } else sf_init(sc); } } /* Re-enable interrupts. */ csr_write_4(sc, SF_IMR, SF_INTRS); if (ifp->if_snd.ifq_head != NULL) sf_start(ifp); SF_UNLOCK(sc); return; } static void sf_init(xsc) void *xsc; { struct sf_softc *sc; struct ifnet *ifp; struct mii_data *mii; int i; sc = xsc; SF_LOCK(sc); ifp = &sc->arpcom.ac_if; mii = device_get_softc(sc->sf_miibus); sf_stop(sc); sf_reset(sc); /* Init all the receive filter registers */ for (i = SF_RXFILT_PERFECT_BASE; i < (SF_RXFILT_HASH_MAX + 1); i += 4) csr_write_4(sc, i, 0); /* Empty stats counter registers. */ for (i = 0; i < sizeof(struct sf_stats)/sizeof(u_int32_t); i++) csr_write_4(sc, SF_STATS_BASE + (i + sizeof(u_int32_t)), 0); /* Init our MAC address */ csr_write_4(sc, SF_PAR0, *(u_int32_t *)(&sc->arpcom.ac_enaddr[0])); csr_write_4(sc, SF_PAR1, *(u_int32_t *)(&sc->arpcom.ac_enaddr[4])); sf_setperf(sc, 0, (caddr_t)&sc->arpcom.ac_enaddr); if (sf_init_rx_ring(sc) == ENOBUFS) { printf("sf%d: initialization failed: no " "memory for rx buffers\n", sc->sf_unit); SF_UNLOCK(sc); return; } sf_init_tx_ring(sc); csr_write_4(sc, SF_RXFILT, SF_PERFMODE_NORMAL|SF_HASHMODE_WITHVLAN); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC); } else { SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC); } if (ifp->if_flags & IFF_BROADCAST) { SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_BROAD); } else { SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_BROAD); } /* * Load the multicast filter. */ sf_setmulti(sc); /* Init the completion queue indexes */ csr_write_4(sc, SF_CQ_CONSIDX, 0); csr_write_4(sc, SF_CQ_PRODIDX, 0); /* Init the RX completion queue */ csr_write_4(sc, SF_RXCQ_CTL_1, vtophys(sc->sf_ldata->sf_rx_clist) & SF_RXCQ_ADDR); SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQTYPE_3); /* Init RX DMA control. */ SF_SETBIT(sc, SF_RXDMA_CTL, SF_RXDMA_REPORTBADPKTS); /* Init the RX buffer descriptor queue. */ csr_write_4(sc, SF_RXDQ_ADDR_Q1, vtophys(sc->sf_ldata->sf_rx_dlist_big)); csr_write_4(sc, SF_RXDQ_CTL_1, (MCLBYTES << 16) | SF_DESCSPACE_16BYTES); csr_write_4(sc, SF_RXDQ_PTR_Q1, SF_RX_DLIST_CNT - 1); /* Init the TX completion queue */ csr_write_4(sc, SF_TXCQ_CTL, vtophys(sc->sf_ldata->sf_tx_clist) & SF_RXCQ_ADDR); /* Init the TX buffer descriptor queue. */ csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, vtophys(sc->sf_ldata->sf_tx_dlist)); SF_SETBIT(sc, SF_TX_FRAMCTL, SF_TXFRMCTL_CPLAFTERTX); csr_write_4(sc, SF_TXDQ_CTL, SF_TXBUFDESC_TYPE0|SF_TXMINSPACE_128BYTES|SF_TXSKIPLEN_8BYTES); SF_SETBIT(sc, SF_TXDQ_CTL, SF_TXDQCTL_NODMACMP); /* Enable autopadding of short TX frames. */ SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_AUTOPAD); /* Enable interrupts. */ csr_write_4(sc, SF_IMR, SF_INTRS); SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_INTR_ENB); /* Enable the RX and TX engines. */ SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RX_ENB|SF_ETHCTL_RXDMA_ENB); SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TX_ENB|SF_ETHCTL_TXDMA_ENB); /*mii_mediachg(mii);*/ sf_ifmedia_upd(ifp); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->sf_stat_ch = timeout(sf_stats_update, sc, hz); SF_UNLOCK(sc); return; } static int sf_encap(sc, c, m_head) struct sf_softc *sc; struct sf_tx_bufdesc_type0 *c; struct mbuf *m_head; { int frag = 0; struct sf_frag *f = NULL; struct mbuf *m; m = m_head; for (m = m_head, frag = 0; m != NULL; m = m->m_next) { if (m->m_len != 0) { if (frag == SF_MAXFRAGS) break; f = &c->sf_frags[frag]; if (frag == 0) f->sf_pktlen = m_head->m_pkthdr.len; f->sf_fraglen = m->m_len; f->sf_addr = vtophys(mtod(m, vm_offset_t)); frag++; } } if (m != NULL) { struct mbuf *m_new = NULL; MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("sf%d: no memory for tx list", sc->sf_unit); return(1); } if (m_head->m_pkthdr.len > MHLEN) { MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); printf("sf%d: no memory for tx list", sc->sf_unit); return(1); } } m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t)); m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; m_freem(m_head); m_head = m_new; f = &c->sf_frags[0]; f->sf_fraglen = f->sf_pktlen = m_head->m_pkthdr.len; f->sf_addr = vtophys(mtod(m_head, caddr_t)); frag = 1; } c->sf_mbuf = m_head; c->sf_id = SF_TX_BUFDESC_ID; c->sf_fragcnt = frag; c->sf_intr = 1; c->sf_caltcp = 0; c->sf_crcen = 1; return(0); } static void sf_start(ifp) struct ifnet *ifp; { struct sf_softc *sc; struct sf_tx_bufdesc_type0 *cur_tx = NULL; struct mbuf *m_head = NULL; int i, txprod; sc = ifp->if_softc; SF_LOCK(sc); if (!sc->sf_link) { SF_UNLOCK(sc); return; } if (ifp->if_flags & IFF_OACTIVE) { SF_UNLOCK(sc); return; } txprod = csr_read_4(sc, SF_TXDQ_PRODIDX); i = SF_IDX_HI(txprod) >> 4; while(sc->sf_ldata->sf_tx_dlist[i].sf_mbuf == NULL) { if (sc->sf_tx_cnt == (SF_TX_DLIST_CNT - 2)) { ifp->if_flags |= IFF_OACTIVE; cur_tx = NULL; break; } IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; cur_tx = &sc->sf_ldata->sf_tx_dlist[i]; if (sf_encap(sc, cur_tx, m_head)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; cur_tx = NULL; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if (ifp->if_bpf) bpf_mtap(ifp, m_head); SF_INC(i, SF_TX_DLIST_CNT); sc->sf_tx_cnt++; } if (cur_tx == NULL) { SF_UNLOCK(sc); return; } /* Transmit */ csr_write_4(sc, SF_TXDQ_PRODIDX, (txprod & ~SF_TXDQ_PRODIDX_HIPRIO) | ((i << 20) & 0xFFFF0000)); ifp->if_timer = 5; SF_UNLOCK(sc); return; } static void sf_stop(sc) struct sf_softc *sc; { int i; struct ifnet *ifp; SF_LOCK(sc); ifp = &sc->arpcom.ac_if; untimeout(sf_stats_update, sc, sc->sf_stat_ch); csr_write_4(sc, SF_GEN_ETH_CTL, 0); csr_write_4(sc, SF_CQ_CONSIDX, 0); csr_write_4(sc, SF_CQ_PRODIDX, 0); csr_write_4(sc, SF_RXDQ_ADDR_Q1, 0); csr_write_4(sc, SF_RXDQ_CTL_1, 0); csr_write_4(sc, SF_RXDQ_PTR_Q1, 0); csr_write_4(sc, SF_TXCQ_CTL, 0); csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0); csr_write_4(sc, SF_TXDQ_CTL, 0); sf_reset(sc); sc->sf_link = 0; for (i = 0; i < SF_RX_DLIST_CNT; i++) { if (sc->sf_ldata->sf_rx_dlist_big[i].sf_mbuf != NULL) { m_freem(sc->sf_ldata->sf_rx_dlist_big[i].sf_mbuf); sc->sf_ldata->sf_rx_dlist_big[i].sf_mbuf = NULL; } } for (i = 0; i < SF_TX_DLIST_CNT; i++) { if (sc->sf_ldata->sf_tx_dlist[i].sf_mbuf != NULL) { m_freem(sc->sf_ldata->sf_tx_dlist[i].sf_mbuf); sc->sf_ldata->sf_tx_dlist[i].sf_mbuf = NULL; } } ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE); SF_UNLOCK(sc); return; } /* * Note: it is important that this function not be interrupted. We * use a two-stage register access scheme: if we are interrupted in * between setting the indirect address register and reading from the * indirect data register, the contents of the address register could * be changed out from under us. */ static void sf_stats_update(xsc) void *xsc; { struct sf_softc *sc; struct ifnet *ifp; struct mii_data *mii; struct sf_stats stats; u_int32_t *ptr; int i; sc = xsc; SF_LOCK(sc); ifp = &sc->arpcom.ac_if; mii = device_get_softc(sc->sf_miibus); ptr = (u_int32_t *)&stats; for (i = 0; i < sizeof(stats)/sizeof(u_int32_t); i++) ptr[i] = csr_read_4(sc, SF_STATS_BASE + (i + sizeof(u_int32_t))); for (i = 0; i < sizeof(stats)/sizeof(u_int32_t); i++) csr_write_4(sc, SF_STATS_BASE + (i + sizeof(u_int32_t)), 0); ifp->if_collisions += stats.sf_tx_single_colls + stats.sf_tx_multi_colls + stats.sf_tx_excess_colls; mii_tick(mii); if (!sc->sf_link) { mii_pollstat(mii); if (mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) sc->sf_link++; if (ifp->if_snd.ifq_head != NULL) sf_start(ifp); } sc->sf_stat_ch = timeout(sf_stats_update, sc, hz); SF_UNLOCK(sc); return; } static void sf_watchdog(ifp) struct ifnet *ifp; { struct sf_softc *sc; sc = ifp->if_softc; SF_LOCK(sc); ifp->if_oerrors++; printf("sf%d: watchdog timeout\n", sc->sf_unit); sf_stop(sc); sf_reset(sc); sf_init(sc); if (ifp->if_snd.ifq_head != NULL) sf_start(ifp); SF_UNLOCK(sc); return; } static void sf_shutdown(dev) device_t dev; { struct sf_softc *sc; sc = device_get_softc(dev); sf_stop(sc); return; } Index: head/sys/pci/if_sis.c =================================================================== --- head/sys/pci/if_sis.c (revision 71961) +++ head/sys/pci/if_sis.c (revision 71962) @@ -1,1679 +1,1677 @@ /* * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * SiS 900/SiS 7016 fast ethernet PCI NIC driver. Datasheets are * available from http://www.sis.com.tw. * * This driver also supports the NatSemi DP83815. Datasheets are * available from http://www.national.com. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The SiS 900 is a fairly simple chip. It uses bus master DMA with * simple TX and RX descriptors of 3 longwords in size. The receiver * has a single perfect filter entry for the station address and a * 128-bit multicast hash table. The SiS 900 has a built-in MII-based * transceiver while the 7016 requires an external transceiver chip. * Both chips offer the standard bit-bang MII interface as well as * an enchanced PHY interface which simplifies accessing MII registers. * * The only downside to this chipset is that RX descriptors must be * longword aligned. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include #include #include #define SIS_USEIOSPACE #include MODULE_DEPEND(sis, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif /* * Various supported device vendors/types and their names. */ static struct sis_type sis_devs[] = { { SIS_VENDORID, SIS_DEVICEID_900, "SiS 900 10/100BaseTX" }, { SIS_VENDORID, SIS_DEVICEID_7016, "SiS 7016 10/100BaseTX" }, { NS_VENDORID, NS_DEVICEID_DP83815, "NatSemi DP83815 10/100BaseTX" }, { 0, 0, NULL } }; static int sis_probe __P((device_t)); static int sis_attach __P((device_t)); static int sis_detach __P((device_t)); static int sis_newbuf __P((struct sis_softc *, struct sis_desc *, struct mbuf *)); static int sis_encap __P((struct sis_softc *, struct mbuf *, u_int32_t *)); static void sis_rxeof __P((struct sis_softc *)); static void sis_rxeoc __P((struct sis_softc *)); static void sis_txeof __P((struct sis_softc *)); static void sis_intr __P((void *)); static void sis_tick __P((void *)); static void sis_start __P((struct ifnet *)); static int sis_ioctl __P((struct ifnet *, u_long, caddr_t)); static void sis_init __P((void *)); static void sis_stop __P((struct sis_softc *)); static void sis_watchdog __P((struct ifnet *)); static void sis_shutdown __P((device_t)); static int sis_ifmedia_upd __P((struct ifnet *)); static void sis_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); static u_int16_t sis_reverse __P((u_int16_t)); static void sis_delay __P((struct sis_softc *)); static void sis_eeprom_idle __P((struct sis_softc *)); static void sis_eeprom_putbyte __P((struct sis_softc *, int)); static void sis_eeprom_getword __P((struct sis_softc *, int, u_int16_t *)); static void sis_read_eeprom __P((struct sis_softc *, caddr_t, int, int, int)); static int sis_miibus_readreg __P((device_t, int, int)); static int sis_miibus_writereg __P((device_t, int, int, int)); static void sis_miibus_statchg __P((device_t)); static void sis_setmulti_sis __P((struct sis_softc *)); static void sis_setmulti_ns __P((struct sis_softc *)); static u_int32_t sis_crc __P((struct sis_softc *, caddr_t)); static void sis_reset __P((struct sis_softc *)); static int sis_list_rx_init __P((struct sis_softc *)); static int sis_list_tx_init __P((struct sis_softc *)); #ifdef SIS_USEIOSPACE #define SIS_RES SYS_RES_IOPORT #define SIS_RID SIS_PCI_LOIO #else #define SIS_RES SYS_RES_MEMORY #define SIS_RID SIS_PCI_LOMEM #endif static device_method_t sis_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sis_probe), DEVMETHOD(device_attach, sis_attach), DEVMETHOD(device_detach, sis_detach), DEVMETHOD(device_shutdown, sis_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, sis_miibus_readreg), DEVMETHOD(miibus_writereg, sis_miibus_writereg), DEVMETHOD(miibus_statchg, sis_miibus_statchg), { 0, 0 } }; static driver_t sis_driver = { "sis", sis_methods, sizeof(struct sis_softc) }; static devclass_t sis_devclass; DRIVER_MODULE(if_sis, pci, sis_driver, sis_devclass, 0, 0); DRIVER_MODULE(miibus, sis, miibus_driver, miibus_devclass, 0, 0); #define SIS_SETBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) | (x)) #define SIS_CLRBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) & ~(x)) #define SIO_SET(x) \ CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) | x) #define SIO_CLR(x) \ CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) & ~x) /* * Routine to reverse the bits in a word. Stolen almost * verbatim from /usr/games/fortune. */ static u_int16_t sis_reverse(n) u_int16_t n; { n = ((n >> 1) & 0x5555) | ((n << 1) & 0xaaaa); n = ((n >> 2) & 0x3333) | ((n << 2) & 0xcccc); n = ((n >> 4) & 0x0f0f) | ((n << 4) & 0xf0f0); n = ((n >> 8) & 0x00ff) | ((n << 8) & 0xff00); return(n); } static void sis_delay(sc) struct sis_softc *sc; { int idx; for (idx = (300 / 33) + 1; idx > 0; idx--) CSR_READ_4(sc, SIS_CSR); return; } static void sis_eeprom_idle(sc) struct sis_softc *sc; { register int i; SIO_SET(SIS_EECTL_CSEL); sis_delay(sc); SIO_SET(SIS_EECTL_CLK); sis_delay(sc); for (i = 0; i < 25; i++) { SIO_CLR(SIS_EECTL_CLK); sis_delay(sc); SIO_SET(SIS_EECTL_CLK); sis_delay(sc); } SIO_CLR(SIS_EECTL_CLK); sis_delay(sc); SIO_CLR(SIS_EECTL_CSEL); sis_delay(sc); CSR_WRITE_4(sc, SIS_EECTL, 0x00000000); return; } /* * Send a read command and address to the EEPROM, check for ACK. */ static void sis_eeprom_putbyte(sc, addr) struct sis_softc *sc; int addr; { register int d, i; d = addr | SIS_EECMD_READ; /* * Feed in each bit and stobe the clock. */ for (i = 0x400; i; i >>= 1) { if (d & i) { SIO_SET(SIS_EECTL_DIN); } else { SIO_CLR(SIS_EECTL_DIN); } sis_delay(sc); SIO_SET(SIS_EECTL_CLK); sis_delay(sc); SIO_CLR(SIS_EECTL_CLK); sis_delay(sc); } return; } /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void sis_eeprom_getword(sc, addr, dest) struct sis_softc *sc; int addr; u_int16_t *dest; { register int i; u_int16_t word = 0; /* Force EEPROM to idle state. */ sis_eeprom_idle(sc); /* Enter EEPROM access mode. */ sis_delay(sc); SIO_CLR(SIS_EECTL_CLK); sis_delay(sc); SIO_SET(SIS_EECTL_CSEL); sis_delay(sc); /* * Send address of word we want to read. */ sis_eeprom_putbyte(sc, addr); /* * Start reading bits from EEPROM. */ for (i = 0x8000; i; i >>= 1) { SIO_SET(SIS_EECTL_CLK); sis_delay(sc); if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECTL_DOUT) word |= i; sis_delay(sc); SIO_CLR(SIS_EECTL_CLK); sis_delay(sc); } /* Turn off EEPROM access mode. */ sis_eeprom_idle(sc); *dest = word; return; } /* * Read a sequence of words from the EEPROM. */ static void sis_read_eeprom(sc, dest, off, cnt, swap) struct sis_softc *sc; caddr_t dest; int off; int cnt; int swap; { int i; u_int16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { sis_eeprom_getword(sc, off + i, &word); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } return; } static int sis_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct sis_softc *sc; int i, val = 0; sc = device_get_softc(dev); if (sc->sis_type == SIS_TYPE_83815) { if (phy != 0) return(0); /* * The NatSemi chip can take a while after * a reset to come ready, during which the BMSR * returns a value of 0. This is *never* supposed * to happen: some of the BMSR bits are meant to * be hardwired in the on position, and this can * confuse the miibus code a bit during the probe * and attach phase. So we make an effort to check * for this condition and wait for it to clear. */ if (!CSR_READ_4(sc, NS_BMSR)) DELAY(1000); val = CSR_READ_4(sc, NS_BMCR + (reg * 4)); return(val); } if (sc->sis_type == SIS_TYPE_900 && phy != 0) return(0); CSR_WRITE_4(sc, SIS_PHYCTL, (phy << 11) | (reg << 6) | SIS_PHYOP_READ); SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS); for (i = 0; i < SIS_TIMEOUT; i++) { if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS)) break; } if (i == SIS_TIMEOUT) { printf("sis%d: PHY failed to come ready\n", sc->sis_unit); return(0); } val = (CSR_READ_4(sc, SIS_PHYCTL) >> 16) & 0xFFFF; if (val == 0xFFFF) return(0); return(val); } static int sis_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct sis_softc *sc; int i; sc = device_get_softc(dev); if (sc->sis_type == SIS_TYPE_83815) { if (phy != 0) return(0); CSR_WRITE_4(sc, NS_BMCR + (reg * 4), data); return(0); } if (sc->sis_type == SIS_TYPE_900 && phy != 0) return(0); CSR_WRITE_4(sc, SIS_PHYCTL, (data << 16) | (phy << 11) | (reg << 6) | SIS_PHYOP_WRITE); SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS); for (i = 0; i < SIS_TIMEOUT; i++) { if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS)) break; } if (i == SIS_TIMEOUT) printf("sis%d: PHY failed to come ready\n", sc->sis_unit); return(0); } static void sis_miibus_statchg(dev) device_t dev; { struct sis_softc *sc; sc = device_get_softc(dev); sis_init(sc); return; } static u_int32_t sis_crc(sc, addr) struct sis_softc *sc; caddr_t addr; { u_int32_t crc, carry; int i, j; u_int8_t c; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (i = 0; i < 6; i++) { c = *(addr + i); for (j = 0; j < 8; j++) { carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); crc <<= 1; c >>= 1; if (carry) crc = (crc ^ 0x04c11db6) | carry; } } /* * return the filter bit position * * The NatSemi chip has a 512-bit filter, which is * different than the SiS, so we special-case it. */ if (sc->sis_type == SIS_TYPE_83815) return((crc >> 23) & 0x1FF); return((crc >> 25) & 0x0000007F); } static void sis_setmulti_ns(sc) struct sis_softc *sc; { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int32_t h = 0, i, filtsave; int bit, index; ifp = &sc->arpcom.ac_if; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { SIS_CLRBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH); SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI); return; } /* * We have to explicitly enable the multicast hash table * on the NatSemi chip if we want to use it, which we do. */ SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH); SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI); filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL); /* first, zot all the existing hash bits */ for (i = 0; i < 32; i++) { CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + (i*2)); CSR_WRITE_4(sc, SIS_RXFILT_DATA, 0); } - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = sis_crc(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); index = h >> 3; bit = h & 0x1F; CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + index); if (bit > 0xF) bit -= 0x10; SIS_SETBIT(sc, SIS_RXFILT_DATA, (1 << bit)); } CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave); return; } static void sis_setmulti_sis(sc) struct sis_softc *sc; { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int32_t h = 0, i, filtsave; ifp = &sc->arpcom.ac_if; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI); return; } SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI); filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL); /* first, zot all the existing hash bits */ for (i = 0; i < 8; i++) { CSR_WRITE_4(sc, SIS_RXFILT_CTL, (4 + ((i * 16) >> 4)) << 16); CSR_WRITE_4(sc, SIS_RXFILT_DATA, 0); } /* now program new ones */ - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = sis_crc(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); CSR_WRITE_4(sc, SIS_RXFILT_CTL, (4 + (h >> 4)) << 16); SIS_SETBIT(sc, SIS_RXFILT_DATA, (1 << (h & 0xF))); } CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave); return; } static void sis_reset(sc) struct sis_softc *sc; { register int i; SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RESET); for (i = 0; i < SIS_TIMEOUT; i++) { if (!(CSR_READ_4(sc, SIS_CSR) & SIS_CSR_RESET)) break; } if (i == SIS_TIMEOUT) printf("sis%d: reset never completed\n", sc->sis_unit); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); return; } /* * Probe for an SiS chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int sis_probe(dev) device_t dev; { struct sis_type *t; t = sis_devs; while(t->sis_name != NULL) { if ((pci_get_vendor(dev) == t->sis_vid) && (pci_get_device(dev) == t->sis_did)) { device_set_desc(dev, t->sis_name); return(0); } t++; } return(ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int sis_attach(dev) device_t dev; { u_char eaddr[ETHER_ADDR_LEN]; u_int32_t command; struct sis_softc *sc; struct ifnet *ifp; int unit, error = 0, rid; sc = device_get_softc(dev); unit = device_get_unit(dev); bzero(sc, sizeof(struct sis_softc)); mtx_init(&sc->sis_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE); SIS_LOCK(sc); if (pci_get_device(dev) == SIS_DEVICEID_900) sc->sis_type = SIS_TYPE_900; if (pci_get_device(dev) == SIS_DEVICEID_7016) sc->sis_type = SIS_TYPE_7016; if (pci_get_vendor(dev) == NS_VENDORID) sc->sis_type = SIS_TYPE_83815; /* * Handle power management nonsense. */ command = pci_read_config(dev, SIS_PCI_CAPID, 4) & 0x000000FF; if (command == 0x01) { command = pci_read_config(dev, SIS_PCI_PWRMGMTCTRL, 4); if (command & SIS_PSTATE_MASK) { u_int32_t iobase, membase, irq; /* Save important PCI config data. */ iobase = pci_read_config(dev, SIS_PCI_LOIO, 4); membase = pci_read_config(dev, SIS_PCI_LOMEM, 4); irq = pci_read_config(dev, SIS_PCI_INTLINE, 4); /* Reset the power state. */ printf("sis%d: chip is in D%d power mode " "-- setting to D0\n", unit, command & SIS_PSTATE_MASK); command &= 0xFFFFFFFC; pci_write_config(dev, SIS_PCI_PWRMGMTCTRL, command, 4); /* Restore PCI config data. */ pci_write_config(dev, SIS_PCI_LOIO, iobase, 4); pci_write_config(dev, SIS_PCI_LOMEM, membase, 4); pci_write_config(dev, SIS_PCI_INTLINE, irq, 4); } } /* * Map control/status registers. */ command = pci_read_config(dev, PCIR_COMMAND, 4); command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, command, 4); command = pci_read_config(dev, PCIR_COMMAND, 4); #ifdef SIS_USEIOSPACE if (!(command & PCIM_CMD_PORTEN)) { printf("sis%d: failed to enable I/O ports!\n", unit); error = ENXIO;; goto fail; } #else if (!(command & PCIM_CMD_MEMEN)) { printf("sis%d: failed to enable memory mapping!\n", unit); error = ENXIO;; goto fail; } #endif rid = SIS_RID; sc->sis_res = bus_alloc_resource(dev, SIS_RES, &rid, 0, ~0, 1, RF_ACTIVE); if (sc->sis_res == NULL) { printf("sis%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->sis_btag = rman_get_bustag(sc->sis_res); sc->sis_bhandle = rman_get_bushandle(sc->sis_res); /* Allocate interrupt */ rid = 0; sc->sis_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->sis_irq == NULL) { printf("sis%d: couldn't map interrupt\n", unit); bus_release_resource(dev, SIS_RES, SIS_RID, sc->sis_res); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->sis_irq, INTR_TYPE_NET, sis_intr, sc, &sc->sis_intrhand); if (error) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sis_irq); bus_release_resource(dev, SIS_RES, SIS_RID, sc->sis_res); printf("sis%d: couldn't set up irq\n", unit); goto fail; } /* Reset the adapter. */ sis_reset(sc); /* * Get station address from the EEPROM. */ switch (pci_get_vendor(dev)) { case NS_VENDORID: /* * Reading the MAC address out of the EEPROM on * the NatSemi chip takes a bit more work than * you'd expect. The address spans 4 16-bit words, * with the first word containing only a single bit. * You have to shift everything over one bit to * get it aligned properly. Also, the bits are * stored backwards (the LSB is really the MSB, * and so on) so you have to reverse them in order * to get the MAC address into the form we want. * Why? Who the hell knows. */ { u_int16_t tmp[4]; sis_read_eeprom(sc, (caddr_t)&tmp, NS_EE_NODEADDR, 4, 0); /* Shift everything over one bit. */ tmp[3] = tmp[3] >> 1; tmp[3] |= tmp[2] << 15; tmp[2] = tmp[2] >> 1; tmp[2] |= tmp[1] << 15; tmp[1] = tmp[1] >> 1; tmp[1] |= tmp[0] << 15; /* Now reverse all the bits. */ tmp[3] = sis_reverse(tmp[3]); tmp[2] = sis_reverse(tmp[2]); tmp[1] = sis_reverse(tmp[1]); bcopy((char *)&tmp[1], eaddr, ETHER_ADDR_LEN); } break; case SIS_VENDORID: default: sis_read_eeprom(sc, (caddr_t)&eaddr, SIS_EE_NODEADDR, 3, 0); break; } /* * A SiS chip was detected. Inform the world. */ printf("sis%d: Ethernet address: %6D\n", unit, eaddr, ":"); sc->sis_unit = unit; callout_handle_init(&sc->sis_stat_ch); bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); sc->sis_ldata = contigmalloc(sizeof(struct sis_list_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->sis_ldata == NULL) { printf("sis%d: no memory for list buffers!\n", unit); bus_teardown_intr(dev, sc->sis_irq, sc->sis_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sis_irq); bus_release_resource(dev, SIS_RES, SIS_RID, sc->sis_res); error = ENXIO; goto fail; } bzero(sc->sis_ldata, sizeof(struct sis_list_data)); ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; ifp->if_unit = unit; ifp->if_name = "sis"; ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = sis_ioctl; ifp->if_output = ether_output; ifp->if_start = sis_start; ifp->if_watchdog = sis_watchdog; ifp->if_init = sis_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = SIS_TX_LIST_CNT - 1; /* * Do MII setup. */ if (mii_phy_probe(dev, &sc->sis_miibus, sis_ifmedia_upd, sis_ifmedia_sts)) { printf("sis%d: MII without any PHY!\n", sc->sis_unit); bus_teardown_intr(dev, sc->sis_irq, sc->sis_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sis_irq); bus_release_resource(dev, SIS_RES, SIS_RID, sc->sis_res); error = ENXIO; goto fail; } /* * Call MI attach routine. */ ether_ifattach(ifp, ETHER_BPF_SUPPORTED); callout_handle_init(&sc->sis_stat_ch); SIS_UNLOCK(sc); return(0); fail: SIS_UNLOCK(sc); mtx_destroy(&sc->sis_mtx); return(error); } static int sis_detach(dev) device_t dev; { struct sis_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); SIS_LOCK(sc); ifp = &sc->arpcom.ac_if; sis_reset(sc); sis_stop(sc); ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); bus_generic_detach(dev); device_delete_child(dev, sc->sis_miibus); bus_teardown_intr(dev, sc->sis_irq, sc->sis_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sis_irq); bus_release_resource(dev, SIS_RES, SIS_RID, sc->sis_res); contigfree(sc->sis_ldata, sizeof(struct sis_list_data), M_DEVBUF); SIS_UNLOCK(sc); mtx_destroy(&sc->sis_mtx); return(0); } /* * Initialize the transmit descriptors. */ static int sis_list_tx_init(sc) struct sis_softc *sc; { struct sis_list_data *ld; struct sis_ring_data *cd; int i; cd = &sc->sis_cdata; ld = sc->sis_ldata; for (i = 0; i < SIS_TX_LIST_CNT; i++) { if (i == (SIS_TX_LIST_CNT - 1)) { ld->sis_tx_list[i].sis_nextdesc = &ld->sis_tx_list[0]; ld->sis_tx_list[i].sis_next = vtophys(&ld->sis_tx_list[0]); } else { ld->sis_tx_list[i].sis_nextdesc = &ld->sis_tx_list[i + 1]; ld->sis_tx_list[i].sis_next = vtophys(&ld->sis_tx_list[i + 1]); } ld->sis_tx_list[i].sis_mbuf = NULL; ld->sis_tx_list[i].sis_ptr = 0; ld->sis_tx_list[i].sis_ctl = 0; } cd->sis_tx_prod = cd->sis_tx_cons = cd->sis_tx_cnt = 0; return(0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int sis_list_rx_init(sc) struct sis_softc *sc; { struct sis_list_data *ld; struct sis_ring_data *cd; int i; ld = sc->sis_ldata; cd = &sc->sis_cdata; for (i = 0; i < SIS_RX_LIST_CNT; i++) { if (sis_newbuf(sc, &ld->sis_rx_list[i], NULL) == ENOBUFS) return(ENOBUFS); if (i == (SIS_RX_LIST_CNT - 1)) { ld->sis_rx_list[i].sis_nextdesc = &ld->sis_rx_list[0]; ld->sis_rx_list[i].sis_next = vtophys(&ld->sis_rx_list[0]); } else { ld->sis_rx_list[i].sis_nextdesc = &ld->sis_rx_list[i + 1]; ld->sis_rx_list[i].sis_next = vtophys(&ld->sis_rx_list[i + 1]); } } cd->sis_rx_prod = 0; return(0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int sis_newbuf(sc, c, m) struct sis_softc *sc; struct sis_desc *c; struct mbuf *m; { struct mbuf *m_new = NULL; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("sis%d: no memory for rx list " "-- packet dropped!\n", sc->sis_unit); return(ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("sis%d: no memory for rx list " "-- packet dropped!\n", sc->sis_unit); m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, sizeof(u_int64_t)); c->sis_mbuf = m_new; c->sis_ptr = vtophys(mtod(m_new, caddr_t)); c->sis_ctl = SIS_RXLEN; return(0); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static void sis_rxeof(sc) struct sis_softc *sc; { struct ether_header *eh; struct mbuf *m; struct ifnet *ifp; struct sis_desc *cur_rx; int i, total_len = 0; u_int32_t rxstat; ifp = &sc->arpcom.ac_if; i = sc->sis_cdata.sis_rx_prod; while(SIS_OWNDESC(&sc->sis_ldata->sis_rx_list[i])) { struct mbuf *m0 = NULL; cur_rx = &sc->sis_ldata->sis_rx_list[i]; rxstat = cur_rx->sis_rxstat; m = cur_rx->sis_mbuf; cur_rx->sis_mbuf = NULL; total_len = SIS_RXBYTES(cur_rx); SIS_INC(i, SIS_RX_LIST_CNT); /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (!(rxstat & SIS_CMDSTS_PKT_OK)) { ifp->if_ierrors++; if (rxstat & SIS_RXSTAT_COLL) ifp->if_collisions++; sis_newbuf(sc, cur_rx, m); continue; } /* No errors; receive the packet. */ m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, total_len + ETHER_ALIGN, 0, ifp, NULL); sis_newbuf(sc, cur_rx, m); if (m0 == NULL) { ifp->if_ierrors++; continue; } m_adj(m0, ETHER_ALIGN); m = m0; ifp->if_ipackets++; eh = mtod(m, struct ether_header *); /* Remove header from mbuf and pass it on. */ m_adj(m, sizeof(struct ether_header)); ether_input(ifp, eh, m); } sc->sis_cdata.sis_rx_prod = i; return; } void sis_rxeoc(sc) struct sis_softc *sc; { sis_rxeof(sc); sis_init(sc); return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void sis_txeof(sc) struct sis_softc *sc; { struct sis_desc *cur_tx = NULL; struct ifnet *ifp; u_int32_t idx; ifp = &sc->arpcom.ac_if; /* Clear the timeout timer. */ ifp->if_timer = 0; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ idx = sc->sis_cdata.sis_tx_cons; while (idx != sc->sis_cdata.sis_tx_prod) { cur_tx = &sc->sis_ldata->sis_tx_list[idx]; if (SIS_OWNDESC(cur_tx)) break; if (cur_tx->sis_ctl & SIS_CMDSTS_MORE) { sc->sis_cdata.sis_tx_cnt--; SIS_INC(idx, SIS_TX_LIST_CNT); continue; } if (!(cur_tx->sis_ctl & SIS_CMDSTS_PKT_OK)) { ifp->if_oerrors++; if (cur_tx->sis_txstat & SIS_TXSTAT_EXCESSCOLLS) ifp->if_collisions++; if (cur_tx->sis_txstat & SIS_TXSTAT_OUTOFWINCOLL) ifp->if_collisions++; } ifp->if_collisions += (cur_tx->sis_txstat & SIS_TXSTAT_COLLCNT) >> 16; ifp->if_opackets++; if (cur_tx->sis_mbuf != NULL) { m_freem(cur_tx->sis_mbuf); cur_tx->sis_mbuf = NULL; } sc->sis_cdata.sis_tx_cnt--; SIS_INC(idx, SIS_TX_LIST_CNT); ifp->if_timer = 0; } sc->sis_cdata.sis_tx_cons = idx; if (cur_tx != NULL) ifp->if_flags &= ~IFF_OACTIVE; return; } static void sis_tick(xsc) void *xsc; { struct sis_softc *sc; struct mii_data *mii; struct ifnet *ifp; sc = xsc; SIS_LOCK(sc); ifp = &sc->arpcom.ac_if; mii = device_get_softc(sc->sis_miibus); mii_tick(mii); if (!sc->sis_link) { mii_pollstat(mii); if (mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) sc->sis_link++; if (ifp->if_snd.ifq_head != NULL) sis_start(ifp); } sc->sis_stat_ch = timeout(sis_tick, sc, hz); SIS_UNLOCK(sc); return; } static void sis_intr(arg) void *arg; { struct sis_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; SIS_LOCK(sc); ifp = &sc->arpcom.ac_if; /* Supress unwanted interrupts */ if (!(ifp->if_flags & IFF_UP)) { sis_stop(sc); SIS_UNLOCK(sc); return; } /* Disable interrupts. */ CSR_WRITE_4(sc, SIS_IER, 0); for (;;) { /* Reading the ISR register clears all interrupts. */ status = CSR_READ_4(sc, SIS_ISR); if ((status & SIS_INTRS) == 0) break; if ((status & SIS_ISR_TX_DESC_OK) || (status & SIS_ISR_TX_ERR) || (status & SIS_ISR_TX_OK) || (status & SIS_ISR_TX_IDLE)) sis_txeof(sc); if ((status & SIS_ISR_RX_DESC_OK) || (status & SIS_ISR_RX_OK)) sis_rxeof(sc); if ((status & SIS_ISR_RX_ERR) || (status & SIS_ISR_RX_OFLOW)) { sis_rxeoc(sc); } if (status & SIS_ISR_SYSERR) { sis_reset(sc); sis_init(sc); } } /* Re-enable interrupts. */ CSR_WRITE_4(sc, SIS_IER, 1); if (ifp->if_snd.ifq_head != NULL) sis_start(ifp); SIS_UNLOCK(sc); return; } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int sis_encap(sc, m_head, txidx) struct sis_softc *sc; struct mbuf *m_head; u_int32_t *txidx; { struct sis_desc *f = NULL; struct mbuf *m; int frag, cur, cnt = 0; /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ m = m_head; cur = frag = *txidx; for (m = m_head; m != NULL; m = m->m_next) { if (m->m_len != 0) { if ((SIS_TX_LIST_CNT - (sc->sis_cdata.sis_tx_cnt + cnt)) < 2) return(ENOBUFS); f = &sc->sis_ldata->sis_tx_list[frag]; f->sis_ctl = SIS_CMDSTS_MORE | m->m_len; f->sis_ptr = vtophys(mtod(m, vm_offset_t)); if (cnt != 0) f->sis_ctl |= SIS_CMDSTS_OWN; cur = frag; SIS_INC(frag, SIS_TX_LIST_CNT); cnt++; } } if (m != NULL) return(ENOBUFS); sc->sis_ldata->sis_tx_list[cur].sis_mbuf = m_head; sc->sis_ldata->sis_tx_list[cur].sis_ctl &= ~SIS_CMDSTS_MORE; sc->sis_ldata->sis_tx_list[*txidx].sis_ctl |= SIS_CMDSTS_OWN; sc->sis_cdata.sis_tx_cnt += cnt; *txidx = frag; return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void sis_start(ifp) struct ifnet *ifp; { struct sis_softc *sc; struct mbuf *m_head = NULL; u_int32_t idx; sc = ifp->if_softc; SIS_LOCK(sc); if (!sc->sis_link) { SIS_UNLOCK(sc); return; } idx = sc->sis_cdata.sis_tx_prod; if (ifp->if_flags & IFF_OACTIVE) { SIS_UNLOCK(sc); return; } while(sc->sis_ldata->sis_tx_list[idx].sis_mbuf == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (sis_encap(sc, m_head, &idx)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if (ifp->if_bpf) bpf_mtap(ifp, m_head); } /* Transmit */ sc->sis_cdata.sis_tx_prod = idx; SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE); /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; SIS_UNLOCK(sc); return; } static void sis_init(xsc) void *xsc; { struct sis_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; struct mii_data *mii; SIS_LOCK(sc); /* * Cancel pending I/O and free all RX/TX buffers. */ sis_stop(sc); mii = device_get_softc(sc->sis_miibus); /* Set MAC address */ if (sc->sis_type == SIS_TYPE_83815) { CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR0); CSR_WRITE_4(sc, SIS_RXFILT_DATA, ((u_int16_t *)sc->arpcom.ac_enaddr)[0]); CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR1); CSR_WRITE_4(sc, SIS_RXFILT_DATA, ((u_int16_t *)sc->arpcom.ac_enaddr)[1]); CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR2); CSR_WRITE_4(sc, SIS_RXFILT_DATA, ((u_int16_t *)sc->arpcom.ac_enaddr)[2]); } else { CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0); CSR_WRITE_4(sc, SIS_RXFILT_DATA, ((u_int16_t *)sc->arpcom.ac_enaddr)[0]); CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1); CSR_WRITE_4(sc, SIS_RXFILT_DATA, ((u_int16_t *)sc->arpcom.ac_enaddr)[1]); CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2); CSR_WRITE_4(sc, SIS_RXFILT_DATA, ((u_int16_t *)sc->arpcom.ac_enaddr)[2]); } /* Init circular RX list. */ if (sis_list_rx_init(sc) == ENOBUFS) { printf("sis%d: initialization failed: no " "memory for rx buffers\n", sc->sis_unit); sis_stop(sc); SIS_UNLOCK(sc); return; } /* * Init tx descriptors. */ sis_list_tx_init(sc); /* * For the NatSemi chip, we have to explicitly enable the * reception of ARP frames, as well as turn on the 'perfect * match' filter where we store the station address, otherwise * we won't receive unicasts meant for this host. */ if (sc->sis_type == SIS_TYPE_83815) { SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_ARP); SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_PERFECT); } /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS); } else { SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS); } /* * Set the capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD); } else { SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD); } /* * Load the multicast filter. */ if (sc->sis_type == SIS_TYPE_83815) sis_setmulti_ns(sc); else sis_setmulti_sis(sc); /* Turn the receive filter on */ SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE); /* * Load the address of the RX and TX lists. */ CSR_WRITE_4(sc, SIS_RX_LISTPTR, vtophys(&sc->sis_ldata->sis_rx_list[0])); CSR_WRITE_4(sc, SIS_TX_LISTPTR, vtophys(&sc->sis_ldata->sis_tx_list[0])); /* Set RX configuration */ CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG); /* Set TX configuration */ if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) { CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_10); } else { CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100); } /* Set full/half duplex mode. */ if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { SIS_SETBIT(sc, SIS_TX_CFG, (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR)); SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS); } else { SIS_CLRBIT(sc, SIS_TX_CFG, (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR)); SIS_CLRBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS); } /* * Enable interrupts. */ CSR_WRITE_4(sc, SIS_IMR, SIS_INTRS); CSR_WRITE_4(sc, SIS_IER, 1); /* Enable receiver and transmitter. */ SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE); SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE); #ifdef notdef mii_mediachg(mii); #endif /* * Page 75 of the DP83815 manual recommends the * following register settings "for optimum * performance." Note however that at least three * of the registers are listed as "reserved" in * the register map, so who knows what they do. */ if (sc->sis_type == SIS_TYPE_83815) { CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001); CSR_WRITE_4(sc, NS_PHY_CR, 0x189C); CSR_WRITE_4(sc, NS_PHY_TDATA, 0x0000); CSR_WRITE_4(sc, NS_PHY_DSPCFG, 0x5040); CSR_WRITE_4(sc, NS_PHY_SDCFG, 0x008C); } ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->sis_stat_ch = timeout(sis_tick, sc, hz); SIS_UNLOCK(sc); return; } /* * Set media options. */ static int sis_ifmedia_upd(ifp) struct ifnet *ifp; { struct sis_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->sis_miibus); sc->sis_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; miisc = LIST_NEXT(miisc, mii_list)) mii_phy_reset(miisc); } mii_mediachg(mii); return(0); } /* * Report current media status. */ static void sis_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct sis_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->sis_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static int sis_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct sis_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; SIS_LOCK(sc); switch(command) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: error = ether_ioctl(ifp, command, data); break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { sis_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) sis_stop(sc); } error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: if (sc->sis_type == SIS_TYPE_83815) sis_setmulti_ns(sc); else sis_setmulti_sis(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->sis_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = EINVAL; break; } SIS_UNLOCK(sc); return(error); } static void sis_watchdog(ifp) struct ifnet *ifp; { struct sis_softc *sc; sc = ifp->if_softc; SIS_LOCK(sc); ifp->if_oerrors++; printf("sis%d: watchdog timeout\n", sc->sis_unit); sis_stop(sc); sis_reset(sc); sis_init(sc); if (ifp->if_snd.ifq_head != NULL) sis_start(ifp); SIS_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void sis_stop(sc) struct sis_softc *sc; { register int i; struct ifnet *ifp; SIS_LOCK(sc); ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; untimeout(sis_tick, sc, sc->sis_stat_ch); CSR_WRITE_4(sc, SIS_IER, 0); CSR_WRITE_4(sc, SIS_IMR, 0); SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE); DELAY(1000); CSR_WRITE_4(sc, SIS_TX_LISTPTR, 0); CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0); sc->sis_link = 0; /* * Free data in the RX lists. */ for (i = 0; i < SIS_RX_LIST_CNT; i++) { if (sc->sis_ldata->sis_rx_list[i].sis_mbuf != NULL) { m_freem(sc->sis_ldata->sis_rx_list[i].sis_mbuf); sc->sis_ldata->sis_rx_list[i].sis_mbuf = NULL; } } bzero((char *)&sc->sis_ldata->sis_rx_list, sizeof(sc->sis_ldata->sis_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < SIS_TX_LIST_CNT; i++) { if (sc->sis_ldata->sis_tx_list[i].sis_mbuf != NULL) { m_freem(sc->sis_ldata->sis_tx_list[i].sis_mbuf); sc->sis_ldata->sis_tx_list[i].sis_mbuf = NULL; } } bzero((char *)&sc->sis_ldata->sis_tx_list, sizeof(sc->sis_ldata->sis_tx_list)); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); SIS_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void sis_shutdown(dev) device_t dev; { struct sis_softc *sc; sc = device_get_softc(dev); SIS_LOCK(sc); sis_reset(sc); sis_stop(sc); SIS_UNLOCK(sc); return; } Index: head/sys/pci/if_sk.c =================================================================== --- head/sys/pci/if_sk.c (revision 71961) +++ head/sys/pci/if_sk.c (revision 71962) @@ -1,2236 +1,2235 @@ /* * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports * the SK-984x series adapters, both single port and dual port. * References: * The XaQti XMAC II datasheet, * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf * The SysKonnect GEnesis manual, http://www.syskonnect.com * * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the * XMAC II datasheet online. I have put my copy at people.freebsd.org as a * convenience to others until Vitesse corrects this problem: * * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf * * Written by Bill Paul * Department of Electrical Engineering * Columbia University, New York City */ /* * The SysKonnect gigabit ethernet adapters consist of two main * components: the SysKonnect GEnesis controller chip and the XaQti Corp. * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC * components and a PHY while the GEnesis controller provides a PCI * interface with DMA support. Each card may have between 512K and * 2MB of SRAM on board depending on the configuration. * * The SysKonnect GEnesis controller can have either one or two XMAC * chips connected to it, allowing single or dual port NIC configurations. * SysKonnect has the distinction of being the only vendor on the market * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs, * dual DMA queues, packet/MAC/transmit arbiters and direct access to the * XMAC registers. This driver takes advantage of these features to allow * both XMACs to operate as independent interfaces. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include #include #include #include #define SK_USEIOSPACE #include #include MODULE_DEPEND(sk, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif static struct sk_type sk_devs[] = { { SK_VENDORID, SK_DEVICEID_GE, "SysKonnect Gigabit Ethernet" }, { 0, 0, NULL } }; static int sk_probe __P((device_t)); static int sk_attach __P((device_t)); static int sk_detach __P((device_t)); static int sk_detach_xmac __P((device_t)); static int sk_probe_xmac __P((device_t)); static int sk_attach_xmac __P((device_t)); static void sk_tick __P((void *)); static void sk_intr __P((void *)); static void sk_intr_xmac __P((struct sk_if_softc *)); static void sk_intr_bcom __P((struct sk_if_softc *)); static void sk_rxeof __P((struct sk_if_softc *)); static void sk_txeof __P((struct sk_if_softc *)); static int sk_encap __P((struct sk_if_softc *, struct mbuf *, u_int32_t *)); static void sk_start __P((struct ifnet *)); static int sk_ioctl __P((struct ifnet *, u_long, caddr_t)); static void sk_init __P((void *)); static void sk_init_xmac __P((struct sk_if_softc *)); static void sk_stop __P((struct sk_if_softc *)); static void sk_watchdog __P((struct ifnet *)); static void sk_shutdown __P((device_t)); static int sk_ifmedia_upd __P((struct ifnet *)); static void sk_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); static void sk_reset __P((struct sk_softc *)); static int sk_newbuf __P((struct sk_if_softc *, struct sk_chain *, struct mbuf *)); static int sk_alloc_jumbo_mem __P((struct sk_if_softc *)); static void *sk_jalloc __P((struct sk_if_softc *)); static void sk_jfree __P((caddr_t, void *)); static int sk_init_rx_ring __P((struct sk_if_softc *)); static void sk_init_tx_ring __P((struct sk_if_softc *)); static u_int32_t sk_win_read_4 __P((struct sk_softc *, int)); static u_int16_t sk_win_read_2 __P((struct sk_softc *, int)); static u_int8_t sk_win_read_1 __P((struct sk_softc *, int)); static void sk_win_write_4 __P((struct sk_softc *, int, u_int32_t)); static void sk_win_write_2 __P((struct sk_softc *, int, u_int32_t)); static void sk_win_write_1 __P((struct sk_softc *, int, u_int32_t)); static u_int8_t sk_vpd_readbyte __P((struct sk_softc *, int)); static void sk_vpd_read_res __P((struct sk_softc *, struct vpd_res *, int)); static void sk_vpd_read __P((struct sk_softc *)); static int sk_miibus_readreg __P((device_t, int, int)); static int sk_miibus_writereg __P((device_t, int, int, int)); static void sk_miibus_statchg __P((device_t)); static u_int32_t sk_calchash __P((caddr_t)); static void sk_setfilt __P((struct sk_if_softc *, caddr_t, int)); static void sk_setmulti __P((struct sk_if_softc *)); #ifdef SK_USEIOSPACE #define SK_RES SYS_RES_IOPORT #define SK_RID SK_PCI_LOIO #else #define SK_RES SYS_RES_MEMORY #define SK_RID SK_PCI_LOMEM #endif /* * Note that we have newbus methods for both the GEnesis controller * itself and the XMAC(s). The XMACs are children of the GEnesis, and * the miibus code is a child of the XMACs. We need to do it this way * so that the miibus drivers can access the PHY registers on the * right PHY. It's not quite what I had in mind, but it's the only * design that achieves the desired effect. */ static device_method_t skc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sk_probe), DEVMETHOD(device_attach, sk_attach), DEVMETHOD(device_detach, sk_detach), DEVMETHOD(device_shutdown, sk_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), { 0, 0 } }; static driver_t skc_driver = { "skc", skc_methods, sizeof(struct sk_softc) }; static devclass_t skc_devclass; static device_method_t sk_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sk_probe_xmac), DEVMETHOD(device_attach, sk_attach_xmac), DEVMETHOD(device_detach, sk_detach_xmac), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, sk_miibus_readreg), DEVMETHOD(miibus_writereg, sk_miibus_writereg), DEVMETHOD(miibus_statchg, sk_miibus_statchg), { 0, 0 } }; static driver_t sk_driver = { "sk", sk_methods, sizeof(struct sk_if_softc) }; static devclass_t sk_devclass; DRIVER_MODULE(if_sk, pci, skc_driver, skc_devclass, 0, 0); DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0); DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0); #define SK_SETBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x) #define SK_CLRBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x) #define SK_WIN_SETBIT_4(sc, reg, x) \ sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x) #define SK_WIN_CLRBIT_4(sc, reg, x) \ sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x) #define SK_WIN_SETBIT_2(sc, reg, x) \ sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x) #define SK_WIN_CLRBIT_2(sc, reg, x) \ sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x) static u_int32_t sk_win_read_4(sc, reg) struct sk_softc *sc; int reg; { CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg))); } static u_int16_t sk_win_read_2(sc, reg) struct sk_softc *sc; int reg; { CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg))); } static u_int8_t sk_win_read_1(sc, reg) struct sk_softc *sc; int reg; { CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg))); } static void sk_win_write_4(sc, reg, val) struct sk_softc *sc; int reg; u_int32_t val; { CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val); return; } static void sk_win_write_2(sc, reg, val) struct sk_softc *sc; int reg; u_int32_t val; { CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), (u_int32_t)val); return; } static void sk_win_write_1(sc, reg, val) struct sk_softc *sc; int reg; u_int32_t val; { CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val); return; } /* * The VPD EEPROM contains Vital Product Data, as suggested in * the PCI 2.1 specification. The VPD data is separared into areas * denoted by resource IDs. The SysKonnect VPD contains an ID string * resource (the name of the adapter), a read-only area resource * containing various key/data fields and a read/write area which * can be used to store asset management information or log messages. * We read the ID string and read-only into buffers attached to * the controller softc structure for later use. At the moment, * we only use the ID string during sk_attach(). */ static u_int8_t sk_vpd_readbyte(sc, addr) struct sk_softc *sc; int addr; { int i; sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr); for (i = 0; i < SK_TIMEOUT; i++) { DELAY(1); if (sk_win_read_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG) break; } if (i == SK_TIMEOUT) return(0); return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA))); } static void sk_vpd_read_res(sc, res, addr) struct sk_softc *sc; struct vpd_res *res; int addr; { int i; u_int8_t *ptr; ptr = (u_int8_t *)res; for (i = 0; i < sizeof(struct vpd_res); i++) ptr[i] = sk_vpd_readbyte(sc, i + addr); return; } static void sk_vpd_read(sc) struct sk_softc *sc; { int pos = 0, i; struct vpd_res res; if (sc->sk_vpd_prodname != NULL) free(sc->sk_vpd_prodname, M_DEVBUF); if (sc->sk_vpd_readonly != NULL) free(sc->sk_vpd_readonly, M_DEVBUF); sc->sk_vpd_prodname = NULL; sc->sk_vpd_readonly = NULL; sk_vpd_read_res(sc, &res, pos); if (res.vr_id != VPD_RES_ID) { printf("skc%d: bad VPD resource id: expected %x got %x\n", sc->sk_unit, VPD_RES_ID, res.vr_id); return; } pos += sizeof(res); sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); for (i = 0; i < res.vr_len; i++) sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos); sc->sk_vpd_prodname[i] = '\0'; pos += i; sk_vpd_read_res(sc, &res, pos); if (res.vr_id != VPD_RES_READ) { printf("skc%d: bad VPD resource id: expected %x got %x\n", sc->sk_unit, VPD_RES_READ, res.vr_id); return; } pos += sizeof(res); sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); for (i = 0; i < res.vr_len + 1; i++) sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos); return; } static int sk_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct sk_if_softc *sc_if; int i; sc_if = device_get_softc(dev); if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0) return(0); SK_IF_LOCK(sc_if); SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); SK_XM_READ_2(sc_if, XM_PHY_DATA); if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { for (i = 0; i < SK_TIMEOUT; i++) { DELAY(1); if (SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYDATARDY) break; } if (i == SK_TIMEOUT) { printf("sk%d: phy failed to come ready\n", sc_if->sk_unit); return(0); } } DELAY(1); i = SK_XM_READ_2(sc_if, XM_PHY_DATA); SK_IF_UNLOCK(sc_if); return(i); } static int sk_miibus_writereg(dev, phy, reg, val) device_t dev; int phy, reg, val; { struct sk_if_softc *sc_if; int i; sc_if = device_get_softc(dev); SK_IF_LOCK(sc_if); SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); for (i = 0; i < SK_TIMEOUT; i++) { if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) break; } if (i == SK_TIMEOUT) { printf("sk%d: phy failed to come ready\n", sc_if->sk_unit); return(ETIMEDOUT); } SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val); for (i = 0; i < SK_TIMEOUT; i++) { DELAY(1); if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) break; } SK_IF_UNLOCK(sc_if); if (i == SK_TIMEOUT) printf("sk%d: phy write timed out\n", sc_if->sk_unit); return(0); } static void sk_miibus_statchg(dev) device_t dev; { struct sk_if_softc *sc_if; struct mii_data *mii; sc_if = device_get_softc(dev); mii = device_get_softc(sc_if->sk_miibus); SK_IF_LOCK(sc_if); /* * If this is a GMII PHY, manually set the XMAC's * duplex mode accordingly. */ if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); } else { SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); } } SK_IF_UNLOCK(sc_if); return; } #define SK_POLY 0xEDB88320 #define SK_BITS 6 static u_int32_t sk_calchash(addr) caddr_t addr; { u_int32_t idx, bit, data, crc; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (idx = 0; idx < 6; idx++) { for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) crc = (crc >> 1) ^ (((crc ^ data) & 1) ? SK_POLY : 0); } return (~crc & ((1 << SK_BITS) - 1)); } static void sk_setfilt(sc_if, addr, slot) struct sk_if_softc *sc_if; caddr_t addr; int slot; { int base; base = XM_RXFILT_ENTRY(slot); SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0])); SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2])); SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4])); return; } static void sk_setmulti(sc_if) struct sk_if_softc *sc_if; { struct ifnet *ifp; u_int32_t hashes[2] = { 0, 0 }; int h, i; struct ifmultiaddr *ifma; u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 }; ifp = &sc_if->arpcom.ac_if; /* First, zot all the existing filters. */ for (i = 1; i < XM_RXFILT_MAX; i++) sk_setfilt(sc_if, (caddr_t)&dummy, i); SK_XM_WRITE_4(sc_if, XM_MAR0, 0); SK_XM_WRITE_4(sc_if, XM_MAR2, 0); /* Now program new ones. */ if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { hashes[0] = 0xFFFFFFFF; hashes[1] = 0xFFFFFFFF; } else { i = 1; /* First find the tail of the list. */ - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_link.le_next == NULL) break; } /* Now traverse the list backwards. */ for (; ifma != NULL && ifma != (void *)&ifp->if_multiaddrs; ifma = (struct ifmultiaddr *)ifma->ifma_link.le_prev) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; /* * Program the first XM_RXFILT_MAX multicast groups * into the perfect filter. For all others, * use the hash table. */ if (i < XM_RXFILT_MAX) { sk_setfilt(sc_if, LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i); i++; continue; } h = sk_calchash( LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } } SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH| XM_MODE_RX_USE_PERFECT); SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]); SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]); return; } static int sk_init_rx_ring(sc_if) struct sk_if_softc *sc_if; { struct sk_chain_data *cd; struct sk_ring_data *rd; int i; cd = &sc_if->sk_cdata; rd = sc_if->sk_rdata; bzero((char *)rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT); for (i = 0; i < SK_RX_RING_CNT; i++) { cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i]; if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS) return(ENOBUFS); if (i == (SK_RX_RING_CNT - 1)) { cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[0]; rd->sk_rx_ring[i].sk_next = vtophys(&rd->sk_rx_ring[0]); } else { cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[i + 1]; rd->sk_rx_ring[i].sk_next = vtophys(&rd->sk_rx_ring[i + 1]); } } sc_if->sk_cdata.sk_rx_prod = 0; sc_if->sk_cdata.sk_rx_cons = 0; return(0); } static void sk_init_tx_ring(sc_if) struct sk_if_softc *sc_if; { struct sk_chain_data *cd; struct sk_ring_data *rd; int i; cd = &sc_if->sk_cdata; rd = sc_if->sk_rdata; bzero((char *)sc_if->sk_rdata->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT); for (i = 0; i < SK_TX_RING_CNT; i++) { cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i]; if (i == (SK_TX_RING_CNT - 1)) { cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[0]; rd->sk_tx_ring[i].sk_next = vtophys(&rd->sk_tx_ring[0]); } else { cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[i + 1]; rd->sk_tx_ring[i].sk_next = vtophys(&rd->sk_tx_ring[i + 1]); } } sc_if->sk_cdata.sk_tx_prod = 0; sc_if->sk_cdata.sk_tx_cons = 0; sc_if->sk_cdata.sk_tx_cnt = 0; return; } static int sk_newbuf(sc_if, c, m) struct sk_if_softc *sc_if; struct sk_chain *c; struct mbuf *m; { struct mbuf *m_new = NULL; struct sk_rx_desc *r; if (m == NULL) { caddr_t *buf = NULL; MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("sk%d: no memory for rx list -- " "packet dropped!\n", sc_if->sk_unit); return(ENOBUFS); } /* Allocate the jumbo buffer */ buf = sk_jalloc(sc_if); if (buf == NULL) { m_freem(m_new); #ifdef SK_VERBOSE printf("sk%d: jumbo allocation failed " "-- packet dropped!\n", sc_if->sk_unit); #endif return(ENOBUFS); } /* Attach the buffer to the mbuf */ MEXTADD(m_new, buf, SK_JLEN, sk_jfree, (struct sk_if_softc *)sc_if, 0, EXT_NET_DRV); m_new->m_data = (void *)buf; m_new->m_pkthdr.len = m_new->m_len = SK_JLEN; } else { /* * We're re-using a previously allocated mbuf; * be sure to re-init pointers and lengths to * default values. */ m_new = m; m_new->m_len = m_new->m_pkthdr.len = SK_JLEN; m_new->m_data = m_new->m_ext.ext_buf; } /* * Adjust alignment so packet payload begins on a * longword boundary. Mandatory for Alpha, useful on * x86 too. */ m_adj(m_new, ETHER_ALIGN); r = c->sk_desc; c->sk_mbuf = m_new; r->sk_data_lo = vtophys(mtod(m_new, caddr_t)); r->sk_ctl = m_new->m_len | SK_RXSTAT; return(0); } /* * Allocate jumbo buffer storage. The SysKonnect adapters support * "jumbograms" (9K frames), although SysKonnect doesn't currently * use them in their drivers. In order for us to use them, we need * large 9K receive buffers, however standard mbuf clusters are only * 2048 bytes in size. Consequently, we need to allocate and manage * our own jumbo buffer pool. Fortunately, this does not require an * excessive amount of additional code. */ static int sk_alloc_jumbo_mem(sc_if) struct sk_if_softc *sc_if; { caddr_t ptr; register int i; struct sk_jpool_entry *entry; /* Grab a big chunk o' storage. */ sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc_if->sk_cdata.sk_jumbo_buf == NULL) { printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit); return(ENOBUFS); } SLIST_INIT(&sc_if->sk_jfree_listhead); SLIST_INIT(&sc_if->sk_jinuse_listhead); /* * Now divide it up into 9K pieces and save the addresses * in an array. */ ptr = sc_if->sk_cdata.sk_jumbo_buf; for (i = 0; i < SK_JSLOTS; i++) { sc_if->sk_cdata.sk_jslots[i] = ptr; ptr += SK_JLEN; entry = malloc(sizeof(struct sk_jpool_entry), M_DEVBUF, M_NOWAIT); if (entry == NULL) { free(sc_if->sk_cdata.sk_jumbo_buf, M_DEVBUF); sc_if->sk_cdata.sk_jumbo_buf = NULL; printf("sk%d: no memory for jumbo " "buffer queue!\n", sc_if->sk_unit); return(ENOBUFS); } entry->slot = i; SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries); } return(0); } /* * Allocate a jumbo buffer. */ static void *sk_jalloc(sc_if) struct sk_if_softc *sc_if; { struct sk_jpool_entry *entry; entry = SLIST_FIRST(&sc_if->sk_jfree_listhead); if (entry == NULL) { #ifdef SK_VERBOSE printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit); #endif return(NULL); } SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries); return(sc_if->sk_cdata.sk_jslots[entry->slot]); } /* * Release a jumbo buffer. */ static void sk_jfree(buf, args) caddr_t buf; void *args; { struct sk_if_softc *sc_if; int i; struct sk_jpool_entry *entry; /* Extract the softc struct pointer. */ sc_if = (struct sk_if_softc *)args; if (sc_if == NULL) panic("sk_jfree: didn't get softc pointer!"); /* calculate the slot this buffer belongs to */ i = ((vm_offset_t)buf - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN; if ((i < 0) || (i >= SK_JSLOTS)) panic("sk_jfree: asked to free buffer that we don't manage!"); entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead); if (entry == NULL) panic("sk_jfree: buffer not in use!"); entry->slot = i; SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries); return; } /* * Set media options. */ static int sk_ifmedia_upd(ifp) struct ifnet *ifp; { struct sk_if_softc *sc_if; struct mii_data *mii; sc_if = ifp->if_softc; mii = device_get_softc(sc_if->sk_miibus); sk_init(sc_if); mii_mediachg(mii); return(0); } /* * Report current media status. */ static void sk_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct sk_if_softc *sc_if; struct mii_data *mii; sc_if = ifp->if_softc; mii = device_get_softc(sc_if->sk_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static int sk_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct sk_if_softc *sc_if = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int error = 0; struct mii_data *mii; SK_IF_LOCK(sc_if); switch(command) { case SIOCSIFADDR: case SIOCGIFADDR: error = ether_ioctl(ifp, command, data); break; case SIOCSIFMTU: if (ifr->ifr_mtu > SK_JUMBO_MTU) error = EINVAL; else { ifp->if_mtu = ifr->ifr_mtu; sk_init(sc_if); } break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc_if->sk_if_flags & IFF_PROMISC)) { SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); sk_setmulti(sc_if); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc_if->sk_if_flags & IFF_PROMISC) { SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); sk_setmulti(sc_if); } else sk_init(sc_if); } else { if (ifp->if_flags & IFF_RUNNING) sk_stop(sc_if); } sc_if->sk_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: sk_setmulti(sc_if); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc_if->sk_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = EINVAL; break; } SK_IF_UNLOCK(sc_if); return(error); } /* * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int sk_probe(dev) device_t dev; { struct sk_type *t; t = sk_devs; while(t->sk_name != NULL) { if ((pci_get_vendor(dev) == t->sk_vid) && (pci_get_device(dev) == t->sk_did)) { device_set_desc(dev, t->sk_name); return(0); } t++; } return(ENXIO); } /* * Force the GEnesis into reset, then bring it out of reset. */ static void sk_reset(sc) struct sk_softc *sc; { CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_RESET); CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_RESET); DELAY(1000); CSR_WRITE_4(sc, SK_CSR, SK_CSR_SW_UNRESET); CSR_WRITE_4(sc, SK_CSR, SK_CSR_MASTER_UNRESET); /* Configure packet arbiter */ sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET); sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT); sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT); sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT); sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT); /* Enable RAM interface */ sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET); /* * Configure interrupt moderation. The moderation timer * defers interrupts specified in the interrupt moderation * timer mask based on the timeout specified in the interrupt * moderation timer init register. Each bit in the timer * register represents 18.825ns, so to specify a timeout in * microseconds, we have to multiply by 54. */ sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(200)); sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF| SK_ISR_RX1_EOF|SK_ISR_RX2_EOF); sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START); return; } static int sk_probe_xmac(dev) device_t dev; { /* * Not much to do here. We always know there will be * at least one XMAC present, and if there are two, * sk_attach() will create a second device instance * for us. */ device_set_desc(dev, "XaQti Corp. XMAC II"); return(0); } /* * Each XMAC chip is attached as a separate logical IP interface. * Single port cards will have only one logical interface of course. */ static int sk_attach_xmac(dev) device_t dev; { struct sk_softc *sc; struct sk_if_softc *sc_if; struct ifnet *ifp; int i, port; if (dev == NULL) return(EINVAL); sc_if = device_get_softc(dev); sc = device_get_softc(device_get_parent(dev)); SK_LOCK(sc); port = *(int *)device_get_ivars(dev); free(device_get_ivars(dev), M_DEVBUF); device_set_ivars(dev, NULL); sc_if->sk_dev = dev; bzero((char *)sc_if, sizeof(struct sk_if_softc)); sc_if->sk_dev = dev; sc_if->sk_unit = device_get_unit(dev); sc_if->sk_port = port; sc_if->sk_softc = sc; sc->sk_if[port] = sc_if; if (port == SK_PORT_A) sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0; if (port == SK_PORT_B) sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1; /* * Get station address for this interface. Note that * dual port cards actually come with three station * addresses: one for each port, plus an extra. The * extra one is used by the SysKonnect driver software * as a 'virtual' station address for when both ports * are operating in failover mode. Currently we don't * use this extra address. */ for (i = 0; i < ETHER_ADDR_LEN; i++) sc_if->arpcom.ac_enaddr[i] = sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i); printf("sk%d: Ethernet address: %6D\n", sc_if->sk_unit, sc_if->arpcom.ac_enaddr, ":"); /* * Set up RAM buffer addresses. The NIC will have a certain * amount of SRAM on it, somewhere between 512K and 2MB. We * need to divide this up a) between the transmitter and * receiver and b) between the two XMACs, if this is a * dual port NIC. Our algotithm is to divide up the memory * evenly so that everyone gets a fair share. */ if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) { u_int32_t chunk, val; chunk = sc->sk_ramsize / 2; val = sc->sk_rboff / sizeof(u_int64_t); sc_if->sk_rx_ramstart = val; val += (chunk / sizeof(u_int64_t)); sc_if->sk_rx_ramend = val - 1; sc_if->sk_tx_ramstart = val; val += (chunk / sizeof(u_int64_t)); sc_if->sk_tx_ramend = val - 1; } else { u_int32_t chunk, val; chunk = sc->sk_ramsize / 4; val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) / sizeof(u_int64_t); sc_if->sk_rx_ramstart = val; val += (chunk / sizeof(u_int64_t)); sc_if->sk_rx_ramend = val - 1; sc_if->sk_tx_ramstart = val; val += (chunk / sizeof(u_int64_t)); sc_if->sk_tx_ramend = val - 1; } /* Read and save PHY type and set PHY address */ sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF; switch(sc_if->sk_phytype) { case SK_PHYTYPE_XMAC: sc_if->sk_phyaddr = SK_PHYADDR_XMAC; break; case SK_PHYTYPE_BCOM: sc_if->sk_phyaddr = SK_PHYADDR_BCOM; break; default: printf("skc%d: unsupported PHY type: %d\n", sc->sk_unit, sc_if->sk_phytype); SK_UNLOCK(sc); return(ENODEV); } /* Allocate the descriptor queues. */ sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc_if->sk_rdata == NULL) { printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit); sc->sk_if[port] = NULL; SK_UNLOCK(sc); return(ENOMEM); } bzero(sc_if->sk_rdata, sizeof(struct sk_ring_data)); /* Try to allocate memory for jumbo buffers. */ if (sk_alloc_jumbo_mem(sc_if)) { printf("sk%d: jumbo buffer allocation failed\n", sc_if->sk_unit); contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data), M_DEVBUF); sc->sk_if[port] = NULL; SK_UNLOCK(sc); return(ENOMEM); } ifp = &sc_if->arpcom.ac_if; ifp->if_softc = sc_if; ifp->if_unit = sc_if->sk_unit; ifp->if_name = "sk"; ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = sk_ioctl; ifp->if_output = ether_output; ifp->if_start = sk_start; ifp->if_watchdog = sk_watchdog; ifp->if_init = sk_init; ifp->if_baudrate = 1000000000; ifp->if_snd.ifq_maxlen = SK_TX_RING_CNT - 1; /* * Do miibus setup. */ sk_init_xmac(sc_if); if (mii_phy_probe(dev, &sc_if->sk_miibus, sk_ifmedia_upd, sk_ifmedia_sts)) { printf("skc%d: no PHY found!\n", sc_if->sk_unit); contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data), M_DEVBUF); SK_UNLOCK(sc); return(ENXIO); } /* * Call MI attach routine. */ ether_ifattach(ifp, ETHER_BPF_SUPPORTED); callout_handle_init(&sc_if->sk_tick_ch); SK_UNLOCK(sc); return(0); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int sk_attach(dev) device_t dev; { u_int32_t command; struct sk_softc *sc; int unit, error = 0, rid, *port; sc = device_get_softc(dev); unit = device_get_unit(dev); bzero(sc, sizeof(struct sk_softc)); mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE); SK_LOCK(sc); /* * Handle power management nonsense. */ command = pci_read_config(dev, SK_PCI_CAPID, 4) & 0x000000FF; if (command == 0x01) { command = pci_read_config(dev, SK_PCI_PWRMGMTCTRL, 4); if (command & SK_PSTATE_MASK) { u_int32_t iobase, membase, irq; /* Save important PCI config data. */ iobase = pci_read_config(dev, SK_PCI_LOIO, 4); membase = pci_read_config(dev, SK_PCI_LOMEM, 4); irq = pci_read_config(dev, SK_PCI_INTLINE, 4); /* Reset the power state. */ printf("skc%d: chip is in D%d power mode " "-- setting to D0\n", unit, command & SK_PSTATE_MASK); command &= 0xFFFFFFFC; pci_write_config(dev, SK_PCI_PWRMGMTCTRL, command, 4); /* Restore PCI config data. */ pci_write_config(dev, SK_PCI_LOIO, iobase, 4); pci_write_config(dev, SK_PCI_LOMEM, membase, 4); pci_write_config(dev, SK_PCI_INTLINE, irq, 4); } } /* * Map control/status registers. */ command = pci_read_config(dev, PCIR_COMMAND, 4); command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, command, 4); command = pci_read_config(dev, PCIR_COMMAND, 4); #ifdef SK_USEIOSPACE if (!(command & PCIM_CMD_PORTEN)) { printf("skc%d: failed to enable I/O ports!\n", unit); error = ENXIO; goto fail; } #else if (!(command & PCIM_CMD_MEMEN)) { printf("skc%d: failed to enable memory mapping!\n", unit); error = ENXIO; goto fail; } #endif rid = SK_RID; sc->sk_res = bus_alloc_resource(dev, SK_RES, &rid, 0, ~0, 1, RF_ACTIVE); if (sc->sk_res == NULL) { printf("sk%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->sk_btag = rman_get_bustag(sc->sk_res); sc->sk_bhandle = rman_get_bushandle(sc->sk_res); /* Allocate interrupt */ rid = 0; sc->sk_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->sk_irq == NULL) { printf("skc%d: couldn't map interrupt\n", unit); bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET, sk_intr, sc, &sc->sk_intrhand); if (error) { printf("skc%d: couldn't set up irq\n", unit); bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq); goto fail; } /* Reset the adapter. */ sk_reset(sc); sc->sk_unit = unit; /* Read and save vital product data from EEPROM. */ sk_vpd_read(sc); /* Read and save RAM size and RAMbuffer offset */ switch(sk_win_read_1(sc, SK_EPROM0)) { case SK_RAMSIZE_512K_64: sc->sk_ramsize = 0x80000; sc->sk_rboff = SK_RBOFF_0; break; case SK_RAMSIZE_1024K_64: sc->sk_ramsize = 0x100000; sc->sk_rboff = SK_RBOFF_80000; break; case SK_RAMSIZE_1024K_128: sc->sk_ramsize = 0x100000; sc->sk_rboff = SK_RBOFF_0; break; case SK_RAMSIZE_2048K_128: sc->sk_ramsize = 0x200000; sc->sk_rboff = SK_RBOFF_0; break; default: printf("skc%d: unknown ram size: %d\n", sc->sk_unit, sk_win_read_1(sc, SK_EPROM0)); bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq); bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res); error = ENXIO; goto fail; break; } /* Read and save physical media type */ switch(sk_win_read_1(sc, SK_PMDTYPE)) { case SK_PMD_1000BASESX: sc->sk_pmd = IFM_1000_SX; break; case SK_PMD_1000BASELX: sc->sk_pmd = IFM_1000_LX; break; case SK_PMD_1000BASECX: sc->sk_pmd = IFM_1000_CX; break; case SK_PMD_1000BASETX: sc->sk_pmd = IFM_1000_TX; break; default: printf("skc%d: unknown media type: 0x%x\n", sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE)); bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq); bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res); error = ENXIO; goto fail; } /* Announce the product name. */ printf("skc%d: %s\n", sc->sk_unit, sc->sk_vpd_prodname); sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1); port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); *port = SK_PORT_A; device_set_ivars(sc->sk_devs[SK_PORT_A], port); if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) { sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1); port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); *port = SK_PORT_B; device_set_ivars(sc->sk_devs[SK_PORT_B], port); } /* Turn on the 'driver is loaded' LED. */ CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON); bus_generic_attach(dev); SK_UNLOCK(sc); return(0); fail: SK_UNLOCK(sc); mtx_destroy(&sc->sk_mtx); return(error); } static int sk_detach_xmac(dev) device_t dev; { struct sk_softc *sc; struct sk_if_softc *sc_if; struct ifnet *ifp; sc = device_get_softc(device_get_parent(dev)); sc_if = device_get_softc(dev); SK_IF_LOCK(sc_if); ifp = &sc_if->arpcom.ac_if; sk_stop(sc_if); ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); bus_generic_detach(dev); if (sc_if->sk_miibus != NULL) device_delete_child(dev, sc_if->sk_miibus); contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF); contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data), M_DEVBUF); SK_IF_UNLOCK(sc_if); return(0); } static int sk_detach(dev) device_t dev; { struct sk_softc *sc; sc = device_get_softc(dev); SK_LOCK(sc); bus_generic_detach(dev); if (sc->sk_devs[SK_PORT_A] != NULL) device_delete_child(dev, sc->sk_devs[SK_PORT_A]); if (sc->sk_devs[SK_PORT_B] != NULL) device_delete_child(dev, sc->sk_devs[SK_PORT_B]); bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq); bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res); SK_UNLOCK(sc); mtx_destroy(&sc->sk_mtx); return(0); } static int sk_encap(sc_if, m_head, txidx) struct sk_if_softc *sc_if; struct mbuf *m_head; u_int32_t *txidx; { struct sk_tx_desc *f = NULL; struct mbuf *m; u_int32_t frag, cur, cnt = 0; m = m_head; cur = frag = *txidx; /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ for (m = m_head; m != NULL; m = m->m_next) { if (m->m_len != 0) { if ((SK_TX_RING_CNT - (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2) return(ENOBUFS); f = &sc_if->sk_rdata->sk_tx_ring[frag]; f->sk_data_lo = vtophys(mtod(m, vm_offset_t)); f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT; if (cnt == 0) f->sk_ctl |= SK_TXCTL_FIRSTFRAG; else f->sk_ctl |= SK_TXCTL_OWN; cur = frag; SK_INC(frag, SK_TX_RING_CNT); cnt++; } } if (m != NULL) return(ENOBUFS); sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |= SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR; sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head; sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN; sc_if->sk_cdata.sk_tx_cnt += cnt; *txidx = frag; return(0); } static void sk_start(ifp) struct ifnet *ifp; { struct sk_softc *sc; struct sk_if_softc *sc_if; struct mbuf *m_head = NULL; u_int32_t idx; sc_if = ifp->if_softc; sc = sc_if->sk_softc; SK_IF_LOCK(sc_if); idx = sc_if->sk_cdata.sk_tx_prod; while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (sk_encap(sc_if, m_head, &idx)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if (ifp->if_bpf) bpf_mtap(ifp, m_head); } /* Transmit */ sc_if->sk_cdata.sk_tx_prod = idx; CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); /* Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; SK_IF_UNLOCK(sc_if); return; } static void sk_watchdog(ifp) struct ifnet *ifp; { struct sk_if_softc *sc_if; sc_if = ifp->if_softc; printf("sk%d: watchdog timeout\n", sc_if->sk_unit); sk_init(sc_if); return; } static void sk_shutdown(dev) device_t dev; { struct sk_softc *sc; sc = device_get_softc(dev); SK_LOCK(sc); /* Turn off the 'driver is loaded' LED. */ CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF); /* * Reset the GEnesis controller. Doing this should also * assert the resets on the attached XMAC(s). */ sk_reset(sc); SK_UNLOCK(sc); return; } static void sk_rxeof(sc_if) struct sk_if_softc *sc_if; { struct ether_header *eh; struct mbuf *m; struct ifnet *ifp; struct sk_chain *cur_rx; int total_len = 0; int i; u_int32_t rxstat; ifp = &sc_if->arpcom.ac_if; i = sc_if->sk_cdata.sk_rx_prod; cur_rx = &sc_if->sk_cdata.sk_rx_chain[i]; while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) { cur_rx = &sc_if->sk_cdata.sk_rx_chain[i]; rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat; m = cur_rx->sk_mbuf; cur_rx->sk_mbuf = NULL; total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl); SK_INC(i, SK_RX_RING_CNT); if (rxstat & XM_RXSTAT_ERRFRAME) { ifp->if_ierrors++; sk_newbuf(sc_if, cur_rx, m); continue; } /* * Try to allocate a new jumbo buffer. If that * fails, copy the packet to mbufs and put the * jumbo buffer back in the ring so it can be * re-used. If allocating mbufs fails, then we * have to drop the packet. */ if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) { struct mbuf *m0; m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, total_len + ETHER_ALIGN, 0, ifp, NULL); sk_newbuf(sc_if, cur_rx, m); if (m0 == NULL) { printf("sk%d: no receive buffers " "available -- packet dropped!\n", sc_if->sk_unit); ifp->if_ierrors++; continue; } m_adj(m0, ETHER_ALIGN); m = m0; } else { m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; } ifp->if_ipackets++; eh = mtod(m, struct ether_header *); /* Remove header from mbuf and pass it on. */ m_adj(m, sizeof(struct ether_header)); ether_input(ifp, eh, m); } sc_if->sk_cdata.sk_rx_prod = i; return; } static void sk_txeof(sc_if) struct sk_if_softc *sc_if; { struct sk_tx_desc *cur_tx = NULL; struct ifnet *ifp; u_int32_t idx; ifp = &sc_if->arpcom.ac_if; /* * Go through our tx ring and free mbufs for those * frames that have been sent. */ idx = sc_if->sk_cdata.sk_tx_cons; while(idx != sc_if->sk_cdata.sk_tx_prod) { cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx]; if (cur_tx->sk_ctl & SK_TXCTL_OWN) break; if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG) ifp->if_opackets++; if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) { m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf); sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL; } sc_if->sk_cdata.sk_tx_cnt--; SK_INC(idx, SK_TX_RING_CNT); ifp->if_timer = 0; } sc_if->sk_cdata.sk_tx_cons = idx; if (cur_tx != NULL) ifp->if_flags &= ~IFF_OACTIVE; return; } static void sk_tick(xsc_if) void *xsc_if; { struct sk_if_softc *sc_if; struct mii_data *mii; struct ifnet *ifp; int i; sc_if = xsc_if; SK_IF_LOCK(sc_if); ifp = &sc_if->arpcom.ac_if; mii = device_get_softc(sc_if->sk_miibus); if (!(ifp->if_flags & IFF_UP)) { SK_IF_UNLOCK(sc_if); return; } if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { sk_intr_bcom(sc_if); SK_IF_UNLOCK(sc_if); return; } /* * According to SysKonnect, the correct way to verify that * the link has come back up is to poll bit 0 of the GPIO * register three times. This pin has the signal from the * link_sync pin connected to it; if we read the same link * state 3 times in a row, we know the link is up. */ for (i = 0; i < 3; i++) { if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET) break; } if (i != 3) { sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); SK_IF_UNLOCK(sc_if); return; } /* Turn the GP0 interrupt back on. */ SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); SK_XM_READ_2(sc_if, XM_ISR); mii_tick(mii); mii_pollstat(mii); untimeout(sk_tick, sc_if, sc_if->sk_tick_ch); SK_IF_UNLOCK(sc_if); return; } static void sk_intr_bcom(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc; struct mii_data *mii; struct ifnet *ifp; int status; sc = sc_if->sk_softc; mii = device_get_softc(sc_if->sk_miibus); ifp = &sc_if->arpcom.ac_if; SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); /* * Read the PHY interrupt register to make sure * we clear any pending interrupts. */ status = sk_miibus_readreg(sc_if->sk_dev, SK_PHYADDR_BCOM, BRGPHY_MII_ISR); if (!(ifp->if_flags & IFF_RUNNING)) { sk_init_xmac(sc_if); return; } if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) { int lstat; lstat = sk_miibus_readreg(sc_if->sk_dev, SK_PHYADDR_BCOM, BRGPHY_MII_AUXSTS); if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) { mii_mediachg(mii); /* Turn off the link LED. */ SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); sc_if->sk_link = 0; } else if (status & BRGPHY_ISR_LNK_CHG) { sk_miibus_writereg(sc_if->sk_dev, SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFF00); mii_tick(mii); sc_if->sk_link = 1; /* Turn on the link LED. */ SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF| SK_LINKLED_BLINK_OFF); mii_pollstat(mii); } else { mii_tick(mii); sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); } } SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); return; } static void sk_intr_xmac(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc; u_int16_t status; struct mii_data *mii; sc = sc_if->sk_softc; mii = device_get_softc(sc_if->sk_miibus); status = SK_XM_READ_2(sc_if, XM_ISR); /* * Link has gone down. Start MII tick timeout to * watch for link resync. */ if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) { if (status & XM_ISR_GP0_SET) { SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); } if (status & XM_ISR_AUTONEG_DONE) { sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); } } if (status & XM_IMR_TX_UNDERRUN) SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO); if (status & XM_IMR_RX_OVERRUN) SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO); status = SK_XM_READ_2(sc_if, XM_ISR); return; } static void sk_intr(xsc) void *xsc; { struct sk_softc *sc = xsc; struct sk_if_softc *sc_if0 = NULL, *sc_if1 = NULL; struct ifnet *ifp0 = NULL, *ifp1 = NULL; u_int32_t status; SK_LOCK(sc); sc_if0 = sc->sk_if[SK_PORT_A]; sc_if1 = sc->sk_if[SK_PORT_B]; if (sc_if0 != NULL) ifp0 = &sc_if0->arpcom.ac_if; if (sc_if1 != NULL) ifp1 = &sc_if1->arpcom.ac_if; for (;;) { status = CSR_READ_4(sc, SK_ISSR); if (!(status & sc->sk_intrmask)) break; /* Handle receive interrupts first. */ if (status & SK_ISR_RX1_EOF) { sk_rxeof(sc_if0); CSR_WRITE_4(sc, SK_BMU_RX_CSR0, SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); } if (status & SK_ISR_RX2_EOF) { sk_rxeof(sc_if1); CSR_WRITE_4(sc, SK_BMU_RX_CSR1, SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); } /* Then transmit interrupts. */ if (status & SK_ISR_TX1_S_EOF) { sk_txeof(sc_if0); CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, SK_TXBMU_CLR_IRQ_EOF); } if (status & SK_ISR_TX2_S_EOF) { sk_txeof(sc_if1); CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, SK_TXBMU_CLR_IRQ_EOF); } /* Then MAC interrupts. */ if (status & SK_ISR_MAC1 && ifp0->if_flags & IFF_RUNNING) sk_intr_xmac(sc_if0); if (status & SK_ISR_MAC2 && ifp1->if_flags & IFF_RUNNING) sk_intr_xmac(sc_if1); if (status & SK_ISR_EXTERNAL_REG) { if (ifp0 != NULL) sk_intr_bcom(sc_if0); if (ifp1 != NULL) sk_intr_bcom(sc_if1); } } CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); if (ifp0 != NULL && ifp0->if_snd.ifq_head != NULL) sk_start(ifp0); if (ifp1 != NULL && ifp1->if_snd.ifq_head != NULL) sk_start(ifp1); SK_UNLOCK(sc); return; } static void sk_init_xmac(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc; struct ifnet *ifp; struct sk_bcom_hack bhack[] = { { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, { 0, 0 } }; sc = sc_if->sk_softc; ifp = &sc_if->arpcom.ac_if; /* Unreset the XMAC. */ SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET); DELAY(1000); /* Reset the XMAC's internal state. */ SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); /* Save the XMAC II revision */ sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID)); /* * Perform additional initialization for external PHYs, * namely for the 1000baseTX cards that use the XMAC's * GMII mode. */ if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { int i = 0; u_int32_t val; /* Take PHY out of reset. */ val = sk_win_read_4(sc, SK_GPIO); if (sc_if->sk_port == SK_PORT_A) val |= SK_GPIO_DIR0|SK_GPIO_DAT0; else val |= SK_GPIO_DIR2|SK_GPIO_DAT2; sk_win_write_4(sc, SK_GPIO, val); /* Enable GMII mode on the XMAC. */ SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE); sk_miibus_writereg(sc_if->sk_dev, SK_PHYADDR_BCOM, BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET); DELAY(10000); sk_miibus_writereg(sc_if->sk_dev, SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFFF0); /* * Early versions of the BCM5400 apparently have * a bug that requires them to have their reserved * registers initialized to some magic values. I don't * know what the numbers do, I'm just the messenger. */ if (sk_miibus_readreg(sc_if->sk_dev, SK_PHYADDR_BCOM, 0x03) == 0x6041) { while(bhack[i].reg) { sk_miibus_writereg(sc_if->sk_dev, SK_PHYADDR_BCOM, bhack[i].reg, bhack[i].val); i++; } } } /* Set station address */ SK_XM_WRITE_2(sc_if, XM_PAR0, *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[0])); SK_XM_WRITE_2(sc_if, XM_PAR1, *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[2])); SK_XM_WRITE_2(sc_if, XM_PAR2, *(u_int16_t *)(&sc_if->arpcom.ac_enaddr[4])); SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION); if (ifp->if_flags & IFF_PROMISC) { SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); } else { SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); } if (ifp->if_flags & IFF_BROADCAST) { SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); } else { SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); } /* We don't need the FCS appended to the packet. */ SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS); /* We want short frames padded to 60 bytes. */ SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD); /* * Enable the reception of all error frames. This is is * a necessary evil due to the design of the XMAC. The * XMAC's receive FIFO is only 8K in size, however jumbo * frames can be up to 9000 bytes in length. When bad * frame filtering is enabled, the XMAC's RX FIFO operates * in 'store and forward' mode. For this to work, the * entire frame has to fit into the FIFO, but that means * that jumbo frames larger than 8192 bytes will be * truncated. Disabling all bad frame filtering causes * the RX FIFO to operate in streaming mode, in which * case the XMAC will start transfering frames out of the * RX FIFO as soon as the FIFO threshold is reached. */ SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES| XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS| XM_MODE_RX_INRANGELEN); if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); else SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); /* * Bump up the transmit threshold. This helps hold off transmit * underruns when we're blasting traffic from both ports at once. */ SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH); /* Set multicast filter */ sk_setmulti(sc_if); /* Clear and enable interrupts */ SK_XM_READ_2(sc_if, XM_ISR); if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS); else SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); /* Configure MAC arbiter */ switch(sc_if->sk_xmac_rev) { case XM_XMAC_REV_B2: sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2); sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2); sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2); sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2); sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2); sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2); sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2); sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2); sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); break; case XM_XMAC_REV_C1: sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1); sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1); sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1); sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1); sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1); sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1); sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1); sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1); sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); break; default: break; } sk_win_write_2(sc, SK_MACARB_CTL, SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF); sc_if->sk_link = 1; return; } /* * Note that to properly initialize any part of the GEnesis chip, * you first have to take it out of reset mode. */ static void sk_init(xsc) void *xsc; { struct sk_if_softc *sc_if = xsc; struct sk_softc *sc; struct ifnet *ifp; struct mii_data *mii; SK_IF_LOCK(sc_if); ifp = &sc_if->arpcom.ac_if; sc = sc_if->sk_softc; mii = device_get_softc(sc_if->sk_miibus); /* Cancel pending I/O and free all RX/TX buffers. */ sk_stop(sc_if); /* Configure LINK_SYNC LED */ SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON); SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_ON); /* Configure RX LED */ SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_START); /* Configure TX LED */ SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_START); /* Configure I2C registers */ /* Configure XMAC(s) */ sk_init_xmac(sc_if); mii_mediachg(mii); /* Configure MAC FIFOs */ SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET); SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END); SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON); SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET); SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END); SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON); /* Configure transmit arbiter(s) */ SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON); /* Configure RAMbuffers */ SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON); /* Configure BMUs */ SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE); SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, vtophys(&sc_if->sk_rdata->sk_rx_ring[0])); SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0); SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE); SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO, vtophys(&sc_if->sk_rdata->sk_tx_ring[0])); SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0); /* Init descriptors */ if (sk_init_rx_ring(sc_if) == ENOBUFS) { printf("sk%d: initialization failed: no " "memory for rx buffers\n", sc_if->sk_unit); sk_stop(sc_if); SK_IF_UNLOCK(sc_if); return; } sk_init_tx_ring(sc_if); /* Configure interrupt handling */ CSR_READ_4(sc, SK_ISSR); if (sc_if->sk_port == SK_PORT_A) sc->sk_intrmask |= SK_INTRS1; else sc->sk_intrmask |= SK_INTRS2; sc->sk_intrmask |= SK_ISR_EXTERNAL_REG; CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); /* Start BMUs. */ SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START); /* Enable XMACs TX and RX state machines */ SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE); SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; SK_IF_UNLOCK(sc_if); return; } static void sk_stop(sc_if) struct sk_if_softc *sc_if; { int i; struct sk_softc *sc; struct ifnet *ifp; SK_IF_LOCK(sc_if); sc = sc_if->sk_softc; ifp = &sc_if->arpcom.ac_if; untimeout(sk_tick, sc_if, sc_if->sk_tick_ch); if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { u_int32_t val; /* Put PHY back into reset. */ val = sk_win_read_4(sc, SK_GPIO); if (sc_if->sk_port == SK_PORT_A) { val |= SK_GPIO_DIR0; val &= ~SK_GPIO_DAT0; } else { val |= SK_GPIO_DIR2; val &= ~SK_GPIO_DAT2; } sk_win_write_4(sc, SK_GPIO, val); } /* Turn off various components of this interface. */ SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET); SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET); SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF); SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF); /* Disable interrupts */ if (sc_if->sk_port == SK_PORT_A) sc->sk_intrmask &= ~SK_INTRS1; else sc->sk_intrmask &= ~SK_INTRS2; CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); SK_XM_READ_2(sc_if, XM_ISR); SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); /* Free RX and TX mbufs still in the queues. */ for (i = 0; i < SK_RX_RING_CNT; i++) { if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) { m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf); sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL; } } for (i = 0; i < SK_TX_RING_CNT; i++) { if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) { m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf); sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL; } } ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE); SK_IF_UNLOCK(sc_if); return; } Index: head/sys/pci/if_ste.c =================================================================== --- head/sys/pci/if_ste.c (revision 71961) +++ head/sys/pci/if_ste.c (revision 71962) @@ -1,1595 +1,1594 @@ /* * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include #include #include /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #define STE_USEIOSPACE #include MODULE_DEPEND(ste, miibus, 1, 1, 1); #if !defined(lint) static const char rcsid[] = "$FreeBSD$"; #endif /* * Various supported device vendors/types and their names. */ static struct ste_type ste_devs[] = { { ST_VENDORID, ST_DEVICEID_ST201, "Sundance ST201 10/100BaseTX" }, { DL_VENDORID, DL_DEVICEID_550TX, "D-Link DFE-550TX 10/100BaseTX" }, { 0, 0, NULL } }; static int ste_probe __P((device_t)); static int ste_attach __P((device_t)); static int ste_detach __P((device_t)); static void ste_init __P((void *)); static void ste_intr __P((void *)); static void ste_rxeof __P((struct ste_softc *)); static void ste_txeoc __P((struct ste_softc *)); static void ste_txeof __P((struct ste_softc *)); static void ste_stats_update __P((void *)); static void ste_stop __P((struct ste_softc *)); static void ste_reset __P((struct ste_softc *)); static int ste_ioctl __P((struct ifnet *, u_long, caddr_t)); static int ste_encap __P((struct ste_softc *, struct ste_chain *, struct mbuf *)); static void ste_start __P((struct ifnet *)); static void ste_watchdog __P((struct ifnet *)); static void ste_shutdown __P((device_t)); static int ste_newbuf __P((struct ste_softc *, struct ste_chain_onefrag *, struct mbuf *)); static int ste_ifmedia_upd __P((struct ifnet *)); static void ste_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); static void ste_mii_sync __P((struct ste_softc *)); static void ste_mii_send __P((struct ste_softc *, u_int32_t, int)); static int ste_mii_readreg __P((struct ste_softc *, struct ste_mii_frame *)); static int ste_mii_writereg __P((struct ste_softc *, struct ste_mii_frame *)); static int ste_miibus_readreg __P((device_t, int, int)); static int ste_miibus_writereg __P((device_t, int, int, int)); static void ste_miibus_statchg __P((device_t)); static int ste_eeprom_wait __P((struct ste_softc *)); static int ste_read_eeprom __P((struct ste_softc *, caddr_t, int, int, int)); static void ste_wait __P((struct ste_softc *)); static u_int8_t ste_calchash __P((caddr_t)); static void ste_setmulti __P((struct ste_softc *)); static int ste_init_rx_list __P((struct ste_softc *)); static void ste_init_tx_list __P((struct ste_softc *)); #ifdef STE_USEIOSPACE #define STE_RES SYS_RES_IOPORT #define STE_RID STE_PCI_LOIO #else #define STE_RES SYS_RES_MEMORY #define STE_RID STE_PCI_LOMEM #endif static device_method_t ste_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ste_probe), DEVMETHOD(device_attach, ste_attach), DEVMETHOD(device_detach, ste_detach), DEVMETHOD(device_shutdown, ste_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, ste_miibus_readreg), DEVMETHOD(miibus_writereg, ste_miibus_writereg), DEVMETHOD(miibus_statchg, ste_miibus_statchg), { 0, 0 } }; static driver_t ste_driver = { "ste", ste_methods, sizeof(struct ste_softc) }; static devclass_t ste_devclass; DRIVER_MODULE(if_ste, pci, ste_driver, ste_devclass, 0, 0); DRIVER_MODULE(miibus, ste, miibus_driver, miibus_devclass, 0, 0); #define STE_SETBIT4(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x) #define STE_CLRBIT4(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x) #define STE_SETBIT2(sc, reg, x) \ CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | x) #define STE_CLRBIT2(sc, reg, x) \ CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~x) #define STE_SETBIT1(sc, reg, x) \ CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | x) #define STE_CLRBIT1(sc, reg, x) \ CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~x) #define MII_SET(x) STE_SETBIT1(sc, STE_PHYCTL, x) #define MII_CLR(x) STE_CLRBIT1(sc, STE_PHYCTL, x) /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void ste_mii_sync(sc) struct ste_softc *sc; { register int i; MII_SET(STE_PHYCTL_MDIR|STE_PHYCTL_MDATA); for (i = 0; i < 32; i++) { MII_SET(STE_PHYCTL_MCLK); DELAY(1); MII_CLR(STE_PHYCTL_MCLK); DELAY(1); } return; } /* * Clock a series of bits through the MII. */ static void ste_mii_send(sc, bits, cnt) struct ste_softc *sc; u_int32_t bits; int cnt; { int i; MII_CLR(STE_PHYCTL_MCLK); for (i = (0x1 << (cnt - 1)); i; i >>= 1) { if (bits & i) { MII_SET(STE_PHYCTL_MDATA); } else { MII_CLR(STE_PHYCTL_MDATA); } DELAY(1); MII_CLR(STE_PHYCTL_MCLK); DELAY(1); MII_SET(STE_PHYCTL_MCLK); } } /* * Read an PHY register through the MII. */ static int ste_mii_readreg(sc, frame) struct ste_softc *sc; struct ste_mii_frame *frame; { int i, ack; STE_LOCK(sc); /* * Set up frame for RX. */ frame->mii_stdelim = STE_MII_STARTDELIM; frame->mii_opcode = STE_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; CSR_WRITE_2(sc, STE_PHYCTL, 0); /* * Turn on data xmit. */ MII_SET(STE_PHYCTL_MDIR); ste_mii_sync(sc); /* * Send command/address info. */ ste_mii_send(sc, frame->mii_stdelim, 2); ste_mii_send(sc, frame->mii_opcode, 2); ste_mii_send(sc, frame->mii_phyaddr, 5); ste_mii_send(sc, frame->mii_regaddr, 5); /* Turn off xmit. */ MII_CLR(STE_PHYCTL_MDIR); /* Idle bit */ MII_CLR((STE_PHYCTL_MCLK|STE_PHYCTL_MDATA)); DELAY(1); MII_SET(STE_PHYCTL_MCLK); DELAY(1); /* Check for ack */ MII_CLR(STE_PHYCTL_MCLK); DELAY(1); MII_SET(STE_PHYCTL_MCLK); DELAY(1); ack = CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA; /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ if (ack) { for(i = 0; i < 16; i++) { MII_CLR(STE_PHYCTL_MCLK); DELAY(1); MII_SET(STE_PHYCTL_MCLK); DELAY(1); } goto fail; } for (i = 0x8000; i; i >>= 1) { MII_CLR(STE_PHYCTL_MCLK); DELAY(1); if (!ack) { if (CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA) frame->mii_data |= i; DELAY(1); } MII_SET(STE_PHYCTL_MCLK); DELAY(1); } fail: MII_CLR(STE_PHYCTL_MCLK); DELAY(1); MII_SET(STE_PHYCTL_MCLK); DELAY(1); STE_UNLOCK(sc); if (ack) return(1); return(0); } /* * Write to a PHY register through the MII. */ static int ste_mii_writereg(sc, frame) struct ste_softc *sc; struct ste_mii_frame *frame; { STE_LOCK(sc); /* * Set up frame for TX. */ frame->mii_stdelim = STE_MII_STARTDELIM; frame->mii_opcode = STE_MII_WRITEOP; frame->mii_turnaround = STE_MII_TURNAROUND; /* * Turn on data output. */ MII_SET(STE_PHYCTL_MDIR); ste_mii_sync(sc); ste_mii_send(sc, frame->mii_stdelim, 2); ste_mii_send(sc, frame->mii_opcode, 2); ste_mii_send(sc, frame->mii_phyaddr, 5); ste_mii_send(sc, frame->mii_regaddr, 5); ste_mii_send(sc, frame->mii_turnaround, 2); ste_mii_send(sc, frame->mii_data, 16); /* Idle bit. */ MII_SET(STE_PHYCTL_MCLK); DELAY(1); MII_CLR(STE_PHYCTL_MCLK); DELAY(1); /* * Turn off xmit. */ MII_CLR(STE_PHYCTL_MDIR); STE_UNLOCK(sc); return(0); } static int ste_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct ste_softc *sc; struct ste_mii_frame frame; sc = device_get_softc(dev); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; ste_mii_readreg(sc, &frame); return(frame.mii_data); } static int ste_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct ste_softc *sc; struct ste_mii_frame frame; sc = device_get_softc(dev); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = data; ste_mii_writereg(sc, &frame); return(0); } static void ste_miibus_statchg(dev) device_t dev; { struct ste_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); STE_LOCK(sc); mii = device_get_softc(sc->ste_miibus); if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { STE_SETBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX); } else { STE_CLRBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX); } STE_UNLOCK(sc); return; } static int ste_ifmedia_upd(ifp) struct ifnet *ifp; { struct ste_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->ste_miibus); sc->ste_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; miisc = LIST_NEXT(miisc, mii_list)) mii_phy_reset(miisc); } mii_mediachg(mii); return(0); } static void ste_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct ste_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->ste_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static void ste_wait(sc) struct ste_softc *sc; { register int i; for (i = 0; i < STE_TIMEOUT; i++) { if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG)) break; } if (i == STE_TIMEOUT) printf("ste%d: command never completed!\n", sc->ste_unit); return; } /* * The EEPROM is slow: give it time to come ready after issuing * it a command. */ static int ste_eeprom_wait(sc) struct ste_softc *sc; { int i; DELAY(1000); for (i = 0; i < 100; i++) { if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY) DELAY(1000); else break; } if (i == 100) { printf("ste%d: eeprom failed to come ready\n", sc->ste_unit); return(1); } return(0); } /* * Read a sequence of words from the EEPROM. Note that ethernet address * data is stored in the EEPROM in network byte order. */ static int ste_read_eeprom(sc, dest, off, cnt, swap) struct ste_softc *sc; caddr_t dest; int off; int cnt; int swap; { int err = 0, i; u_int16_t word = 0, *ptr; if (ste_eeprom_wait(sc)) return(1); for (i = 0; i < cnt; i++) { CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i)); err = ste_eeprom_wait(sc); if (err) break; word = CSR_READ_2(sc, STE_EEPROM_DATA); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } return(err ? 1 : 0); } static u_int8_t ste_calchash(addr) caddr_t addr; { u_int32_t crc, carry; int i, j; u_int8_t c; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (i = 0; i < 6; i++) { c = *(addr + i); for (j = 0; j < 8; j++) { carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); crc <<= 1; c >>= 1; if (carry) crc = (crc ^ 0x04c11db6) | carry; } } /* return the filter bit position */ return(crc & 0x0000003F); } static void ste_setmulti(sc) struct ste_softc *sc; { struct ifnet *ifp; int h = 0; u_int32_t hashes[2] = { 0, 0 }; struct ifmultiaddr *ifma; ifp = &sc->arpcom.ac_if; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI); STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH); return; } /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, STE_MAR0, 0); CSR_WRITE_4(sc, STE_MAR1, 0); /* now program new ones */ - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ste_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } CSR_WRITE_4(sc, STE_MAR0, hashes[0]); CSR_WRITE_4(sc, STE_MAR1, hashes[1]); STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI); STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH); return; } static void ste_intr(xsc) void *xsc; { struct ste_softc *sc; struct ifnet *ifp; u_int16_t status; sc = xsc; STE_LOCK(sc); ifp = &sc->arpcom.ac_if; /* See if this is really our interrupt. */ if (!(CSR_READ_2(sc, STE_ISR) & STE_ISR_INTLATCH)) { STE_UNLOCK(sc); return; } for (;;) { status = CSR_READ_2(sc, STE_ISR_ACK); if (!(status & STE_INTRS)) break; if (status & STE_ISR_RX_DMADONE) ste_rxeof(sc); if (status & STE_ISR_TX_DMADONE) ste_txeof(sc); if (status & STE_ISR_TX_DONE) ste_txeoc(sc); if (status & STE_ISR_STATS_OFLOW) { untimeout(ste_stats_update, sc, sc->ste_stat_ch); ste_stats_update(sc); } if (status & STE_ISR_HOSTERR) { ste_reset(sc); ste_init(sc); } } /* Re-enable interrupts */ CSR_WRITE_2(sc, STE_IMR, STE_INTRS); if (ifp->if_snd.ifq_head != NULL) ste_start(ifp); STE_UNLOCK(sc); return; } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static void ste_rxeof(sc) struct ste_softc *sc; { struct ether_header *eh; struct mbuf *m; struct ifnet *ifp; struct ste_chain_onefrag *cur_rx; int total_len = 0; u_int32_t rxstat; ifp = &sc->arpcom.ac_if; again: while((rxstat = sc->ste_cdata.ste_rx_head->ste_ptr->ste_status)) { cur_rx = sc->ste_cdata.ste_rx_head; sc->ste_cdata.ste_rx_head = cur_rx->ste_next; /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (rxstat & STE_RXSTAT_FRAME_ERR) { ifp->if_ierrors++; cur_rx->ste_ptr->ste_status = 0; continue; } /* * If there error bit was not set, the upload complete * bit should be set which means we have a valid packet. * If not, something truly strange has happened. */ if (!(rxstat & STE_RXSTAT_DMADONE)) { printf("ste%d: bad receive status -- packet dropped", sc->ste_unit); ifp->if_ierrors++; cur_rx->ste_ptr->ste_status = 0; continue; } /* No errors; receive the packet. */ m = cur_rx->ste_mbuf; total_len = cur_rx->ste_ptr->ste_status & STE_RXSTAT_FRAMELEN; /* * Try to conjure up a new mbuf cluster. If that * fails, it means we have an out of memory condition and * should leave the buffer in place and continue. This will * result in a lost packet, but there's little else we * can do in this situation. */ if (ste_newbuf(sc, cur_rx, NULL) == ENOBUFS) { ifp->if_ierrors++; cur_rx->ste_ptr->ste_status = 0; continue; } ifp->if_ipackets++; eh = mtod(m, struct ether_header *); m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; /* Remove header from mbuf and pass it on. */ m_adj(m, sizeof(struct ether_header)); ether_input(ifp, eh, m); } /* * Handle the 'end of channel' condition. When the upload * engine hits the end of the RX ring, it will stall. This * is our cue to flush the RX ring, reload the uplist pointer * register and unstall the engine. * XXX This is actually a little goofy. With the ThunderLAN * chip, you get an interrupt when the receiver hits the end * of the receive ring, which tells you exactly when you * you need to reload the ring pointer. Here we have to * fake it. I'm mad at myself for not being clever enough * to avoid the use of a goto here. */ if (CSR_READ_4(sc, STE_RX_DMALIST_PTR) == 0 || CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_RXDMA_STOPPED) { STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL); ste_wait(sc); CSR_WRITE_4(sc, STE_RX_DMALIST_PTR, vtophys(&sc->ste_ldata->ste_rx_list[0])); sc->ste_cdata.ste_rx_head = &sc->ste_cdata.ste_rx_chain[0]; STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); goto again; } return; } static void ste_txeoc(sc) struct ste_softc *sc; { u_int8_t txstat; struct ifnet *ifp; ifp = &sc->arpcom.ac_if; while ((txstat = CSR_READ_1(sc, STE_TX_STATUS)) & STE_TXSTATUS_TXDONE) { if (txstat & STE_TXSTATUS_UNDERRUN || txstat & STE_TXSTATUS_EXCESSCOLLS || txstat & STE_TXSTATUS_RECLAIMERR) { ifp->if_oerrors++; printf("ste%d: transmission error: %x\n", sc->ste_unit, txstat); ste_reset(sc); ste_init(sc); if (txstat & STE_TXSTATUS_UNDERRUN && sc->ste_tx_thresh < STE_PACKET_SIZE) { sc->ste_tx_thresh += STE_MIN_FRAMELEN; printf("ste%d: tx underrun, increasing tx" " start threshold to %d bytes\n", sc->ste_unit, sc->ste_tx_thresh); } CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh); CSR_WRITE_2(sc, STE_TX_RECLAIM_THRESH, (STE_PACKET_SIZE >> 4)); } ste_init(sc); CSR_WRITE_2(sc, STE_TX_STATUS, txstat); } return; } static void ste_txeof(sc) struct ste_softc *sc; { struct ste_chain *cur_tx = NULL; struct ifnet *ifp; int idx; ifp = &sc->arpcom.ac_if; idx = sc->ste_cdata.ste_tx_cons; while(idx != sc->ste_cdata.ste_tx_prod) { cur_tx = &sc->ste_cdata.ste_tx_chain[idx]; if (!(cur_tx->ste_ptr->ste_ctl & STE_TXCTL_DMADONE)) break; if (cur_tx->ste_mbuf != NULL) { m_freem(cur_tx->ste_mbuf); cur_tx->ste_mbuf = NULL; } ifp->if_opackets++; sc->ste_cdata.ste_tx_cnt--; STE_INC(idx, STE_TX_LIST_CNT); ifp->if_timer = 0; } sc->ste_cdata.ste_tx_cons = idx; if (cur_tx != NULL) ifp->if_flags &= ~IFF_OACTIVE; return; } static void ste_stats_update(xsc) void *xsc; { struct ste_softc *sc; struct ste_stats stats; struct ifnet *ifp; struct mii_data *mii; int i; u_int8_t *p; sc = xsc; STE_LOCK(sc); ifp = &sc->arpcom.ac_if; mii = device_get_softc(sc->ste_miibus); p = (u_int8_t *)&stats; for (i = 0; i < sizeof(stats); i++) { *p = CSR_READ_1(sc, STE_STATS + i); p++; } ifp->if_collisions += stats.ste_single_colls + stats.ste_multi_colls + stats.ste_late_colls; mii_tick(mii); if (!sc->ste_link) { mii_pollstat(mii); if (mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) sc->ste_link++; if (ifp->if_snd.ifq_head != NULL) ste_start(ifp); } sc->ste_stat_ch = timeout(ste_stats_update, sc, hz); STE_UNLOCK(sc); return; } /* * Probe for a Sundance ST201 chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int ste_probe(dev) device_t dev; { struct ste_type *t; t = ste_devs; while(t->ste_name != NULL) { if ((pci_get_vendor(dev) == t->ste_vid) && (pci_get_device(dev) == t->ste_did)) { device_set_desc(dev, t->ste_name); return(0); } t++; } return(ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int ste_attach(dev) device_t dev; { u_int32_t command; struct ste_softc *sc; struct ifnet *ifp; int unit, error = 0, rid; sc = device_get_softc(dev); unit = device_get_unit(dev); bzero(sc, sizeof(struct ste_softc)); mtx_init(&sc->ste_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE); STE_LOCK(sc); /* * Handle power management nonsense. */ command = pci_read_config(dev, STE_PCI_CAPID, 4) & 0x000000FF; if (command == 0x01) { command = pci_read_config(dev, STE_PCI_PWRMGMTCTRL, 4); if (command & STE_PSTATE_MASK) { u_int32_t iobase, membase, irq; /* Save important PCI config data. */ iobase = pci_read_config(dev, STE_PCI_LOIO, 4); membase = pci_read_config(dev, STE_PCI_LOMEM, 4); irq = pci_read_config(dev, STE_PCI_INTLINE, 4); /* Reset the power state. */ printf("ste%d: chip is in D%d power mode " "-- setting to D0\n", unit, command & STE_PSTATE_MASK); command &= 0xFFFFFFFC; pci_write_config(dev, STE_PCI_PWRMGMTCTRL, command, 4); /* Restore PCI config data. */ pci_write_config(dev, STE_PCI_LOIO, iobase, 4); pci_write_config(dev, STE_PCI_LOMEM, membase, 4); pci_write_config(dev, STE_PCI_INTLINE, irq, 4); } } /* * Map control/status registers. */ command = pci_read_config(dev, PCIR_COMMAND, 4); command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, command, 4); command = pci_read_config(dev, PCIR_COMMAND, 4); #ifdef STE_USEIOSPACE if (!(command & PCIM_CMD_PORTEN)) { printf("ste%d: failed to enable I/O ports!\n", unit); error = ENXIO; goto fail; } #else if (!(command & PCIM_CMD_MEMEN)) { printf("ste%d: failed to enable memory mapping!\n", unit); error = ENXIO; goto fail; } #endif rid = STE_RID; sc->ste_res = bus_alloc_resource(dev, STE_RES, &rid, 0, ~0, 1, RF_ACTIVE); if (sc->ste_res == NULL) { printf ("ste%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->ste_btag = rman_get_bustag(sc->ste_res); sc->ste_bhandle = rman_get_bushandle(sc->ste_res); rid = 0; sc->ste_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->ste_irq == NULL) { printf("ste%d: couldn't map interrupt\n", unit); bus_release_resource(dev, STE_RES, STE_RID, sc->ste_res); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->ste_irq, INTR_TYPE_NET, ste_intr, sc, &sc->ste_intrhand); if (error) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ste_irq); bus_release_resource(dev, STE_RES, STE_RID, sc->ste_res); printf("ste%d: couldn't set up irq\n", unit); goto fail; } callout_handle_init(&sc->ste_stat_ch); /* Reset the adapter. */ ste_reset(sc); /* * Get station address from the EEPROM. */ if (ste_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, STE_EEADDR_NODE0, 3, 0)) { printf("ste%d: failed to read station address\n", unit); bus_teardown_intr(dev, sc->ste_irq, sc->ste_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ste_irq); bus_release_resource(dev, STE_RES, STE_RID, sc->ste_res); error = ENXIO;; goto fail; } /* * A Sundance chip was detected. Inform the world. */ printf("ste%d: Ethernet address: %6D\n", unit, sc->arpcom.ac_enaddr, ":"); sc->ste_unit = unit; /* Allocate the descriptor queues. */ sc->ste_ldata = contigmalloc(sizeof(struct ste_list_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->ste_ldata == NULL) { printf("ste%d: no memory for list buffers!\n", unit); bus_teardown_intr(dev, sc->ste_irq, sc->ste_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ste_irq); bus_release_resource(dev, STE_RES, STE_RID, sc->ste_res); error = ENXIO; goto fail; } bzero(sc->ste_ldata, sizeof(struct ste_list_data)); /* Do MII setup. */ if (mii_phy_probe(dev, &sc->ste_miibus, ste_ifmedia_upd, ste_ifmedia_sts)) { printf("ste%d: MII without any phy!\n", sc->ste_unit); bus_teardown_intr(dev, sc->ste_irq, sc->ste_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ste_irq); bus_release_resource(dev, STE_RES, STE_RID, sc->ste_res); contigfree(sc->ste_ldata, sizeof(struct ste_list_data), M_DEVBUF); error = ENXIO; goto fail; } ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; ifp->if_unit = unit; ifp->if_name = "ste"; ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = ste_ioctl; ifp->if_output = ether_output; ifp->if_start = ste_start; ifp->if_watchdog = ste_watchdog; ifp->if_init = ste_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = STE_TX_LIST_CNT - 1; /* * Call MI attach routine. */ ether_ifattach(ifp, ETHER_BPF_SUPPORTED); STE_UNLOCK(sc); return(0); fail: STE_UNLOCK(sc); mtx_destroy(&sc->ste_mtx); return(error); } static int ste_detach(dev) device_t dev; { struct ste_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); STE_LOCK(sc); ifp = &sc->arpcom.ac_if; ste_stop(sc); ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); bus_generic_detach(dev); device_delete_child(dev, sc->ste_miibus); bus_teardown_intr(dev, sc->ste_irq, sc->ste_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ste_irq); bus_release_resource(dev, STE_RES, STE_RID, sc->ste_res); contigfree(sc->ste_ldata, sizeof(struct ste_list_data), M_DEVBUF); STE_UNLOCK(sc); mtx_destroy(&sc->ste_mtx); return(0); } static int ste_newbuf(sc, c, m) struct ste_softc *sc; struct ste_chain_onefrag *c; struct mbuf *m; { struct mbuf *m_new = NULL; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("ste%d: no memory for rx list -- " "packet dropped\n", sc->ste_unit); return(ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("ste%d: no memory for rx list -- " "packet dropped\n", sc->ste_unit); m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, ETHER_ALIGN); c->ste_mbuf = m_new; c->ste_ptr->ste_status = 0; c->ste_ptr->ste_frag.ste_addr = vtophys(mtod(m_new, caddr_t)); c->ste_ptr->ste_frag.ste_len = 1536 | STE_FRAG_LAST; return(0); } static int ste_init_rx_list(sc) struct ste_softc *sc; { struct ste_chain_data *cd; struct ste_list_data *ld; int i; cd = &sc->ste_cdata; ld = sc->ste_ldata; for (i = 0; i < STE_RX_LIST_CNT; i++) { cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i]; if (ste_newbuf(sc, &cd->ste_rx_chain[i], NULL) == ENOBUFS) return(ENOBUFS); if (i == (STE_RX_LIST_CNT - 1)) { cd->ste_rx_chain[i].ste_next = &cd->ste_rx_chain[0]; ld->ste_rx_list[i].ste_next = vtophys(&ld->ste_rx_list[0]); } else { cd->ste_rx_chain[i].ste_next = &cd->ste_rx_chain[i + 1]; ld->ste_rx_list[i].ste_next = vtophys(&ld->ste_rx_list[i + 1]); } } cd->ste_rx_head = &cd->ste_rx_chain[0]; return(0); } static void ste_init_tx_list(sc) struct ste_softc *sc; { struct ste_chain_data *cd; struct ste_list_data *ld; int i; cd = &sc->ste_cdata; ld = sc->ste_ldata; for (i = 0; i < STE_TX_LIST_CNT; i++) { cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i]; cd->ste_tx_chain[i].ste_phys = vtophys(&ld->ste_tx_list[i]); if (i == (STE_TX_LIST_CNT - 1)) cd->ste_tx_chain[i].ste_next = &cd->ste_tx_chain[0]; else cd->ste_tx_chain[i].ste_next = &cd->ste_tx_chain[i + 1]; if (i == 0) cd->ste_tx_chain[i].ste_prev = &cd->ste_tx_chain[STE_TX_LIST_CNT - 1]; else cd->ste_tx_chain[i].ste_prev = &cd->ste_tx_chain[i - 1]; } bzero((char *)ld->ste_tx_list, sizeof(struct ste_desc) * STE_TX_LIST_CNT); cd->ste_tx_prod = 0; cd->ste_tx_cons = 0; cd->ste_tx_cnt = 0; return; } static void ste_init(xsc) void *xsc; { struct ste_softc *sc; int i; struct ifnet *ifp; struct mii_data *mii; sc = xsc; STE_LOCK(sc); ifp = &sc->arpcom.ac_if; mii = device_get_softc(sc->ste_miibus); ste_stop(sc); /* Init our MAC address */ for (i = 0; i < ETHER_ADDR_LEN; i++) { CSR_WRITE_1(sc, STE_PAR0 + i, sc->arpcom.ac_enaddr[i]); } /* Init RX list */ if (ste_init_rx_list(sc) == ENOBUFS) { printf("ste%d: initialization failed: no " "memory for RX buffers\n", sc->ste_unit); ste_stop(sc); STE_UNLOCK(sc); return; } /* Init TX descriptors */ ste_init_tx_list(sc); /* Set the TX freethresh value */ CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, STE_PACKET_SIZE >> 8); /* Set the TX start threshold for best performance. */ CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh); /* Set the TX reclaim threshold. */ CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (STE_PACKET_SIZE >> 4)); /* Set up the RX filter. */ CSR_WRITE_1(sc, STE_RX_MODE, STE_RXMODE_UNICAST); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC); } else { STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC); } /* Set capture broadcast bit to accept broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST); } else { STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST); } ste_setmulti(sc); /* Load the address of the RX list. */ STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL); ste_wait(sc); CSR_WRITE_4(sc, STE_RX_DMALIST_PTR, vtophys(&sc->ste_ldata->ste_rx_list[0])); STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); /* Set TX polling interval */ CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64); /* Load address of the TX list */ STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); ste_wait(sc); CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, vtophys(&sc->ste_ldata->ste_tx_list[0])); STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); ste_wait(sc); /* Enable receiver and transmitter */ CSR_WRITE_2(sc, STE_MACCTL0, 0); STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE); STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE); /* Enable stats counters. */ STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE); /* Enable interrupts. */ CSR_WRITE_2(sc, STE_ISR, 0xFFFF); CSR_WRITE_2(sc, STE_IMR, STE_INTRS); ste_ifmedia_upd(ifp); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->ste_stat_ch = timeout(ste_stats_update, sc, hz); STE_UNLOCK(sc); return; } static void ste_stop(sc) struct ste_softc *sc; { int i; struct ifnet *ifp; STE_LOCK(sc); ifp = &sc->arpcom.ac_if; untimeout(ste_stats_update, sc, sc->ste_stat_ch); CSR_WRITE_2(sc, STE_IMR, 0); STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_DISABLE); STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_DISABLE); STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_DISABLE); STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL); ste_wait(sc); sc->ste_link = 0; for (i = 0; i < STE_RX_LIST_CNT; i++) { if (sc->ste_cdata.ste_rx_chain[i].ste_mbuf != NULL) { m_freem(sc->ste_cdata.ste_rx_chain[i].ste_mbuf); sc->ste_cdata.ste_rx_chain[i].ste_mbuf = NULL; } } for (i = 0; i < STE_TX_LIST_CNT; i++) { if (sc->ste_cdata.ste_tx_chain[i].ste_mbuf != NULL) { m_freem(sc->ste_cdata.ste_tx_chain[i].ste_mbuf); sc->ste_cdata.ste_tx_chain[i].ste_mbuf = NULL; } } ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE); STE_UNLOCK(sc); return; } static void ste_reset(sc) struct ste_softc *sc; { int i; STE_SETBIT4(sc, STE_ASICCTL, STE_ASICCTL_GLOBAL_RESET|STE_ASICCTL_RX_RESET| STE_ASICCTL_TX_RESET|STE_ASICCTL_DMA_RESET| STE_ASICCTL_FIFO_RESET|STE_ASICCTL_NETWORK_RESET| STE_ASICCTL_AUTOINIT_RESET|STE_ASICCTL_HOST_RESET| STE_ASICCTL_EXTRESET_RESET); DELAY(100000); for (i = 0; i < STE_TIMEOUT; i++) { if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY)) break; } if (i == STE_TIMEOUT) printf("ste%d: global reset never completed\n", sc->ste_unit); return; } static int ste_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct ste_softc *sc; struct ifreq *ifr; struct mii_data *mii; int error = 0; sc = ifp->if_softc; STE_LOCK(sc); ifr = (struct ifreq *)data; switch(command) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: error = ether_ioctl(ifp, command, data); break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->ste_if_flags & IFF_PROMISC)) { STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->ste_if_flags & IFF_PROMISC) { STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC); } else if (!(ifp->if_flags & IFF_RUNNING)) { sc->ste_tx_thresh = STE_MIN_FRAMELEN; ste_init(sc); } } else { if (ifp->if_flags & IFF_RUNNING) ste_stop(sc); } sc->ste_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: ste_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->ste_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = EINVAL; break; } STE_UNLOCK(sc); return(error); } static int ste_encap(sc, c, m_head) struct ste_softc *sc; struct ste_chain *c; struct mbuf *m_head; { int frag = 0; struct ste_frag *f = NULL; struct mbuf *m; struct ste_desc *d; int total_len = 0; d = c->ste_ptr; d->ste_ctl = 0; d->ste_next = 0; for (m = m_head, frag = 0; m != NULL; m = m->m_next) { if (m->m_len != 0) { if (frag == STE_MAXFRAGS) break; total_len += m->m_len; f = &c->ste_ptr->ste_frags[frag]; f->ste_addr = vtophys(mtod(m, vm_offset_t)); f->ste_len = m->m_len; frag++; } } c->ste_mbuf = m_head; c->ste_ptr->ste_frags[frag - 1].ste_len |= STE_FRAG_LAST; c->ste_ptr->ste_ctl = total_len; return(0); } static void ste_start(ifp) struct ifnet *ifp; { struct ste_softc *sc; struct mbuf *m_head = NULL; struct ste_chain *prev = NULL, *cur_tx = NULL, *start_tx; int idx; sc = ifp->if_softc; STE_LOCK(sc); if (!sc->ste_link) { STE_UNLOCK(sc); return; } if (ifp->if_flags & IFF_OACTIVE) { STE_UNLOCK(sc); return; } idx = sc->ste_cdata.ste_tx_prod; start_tx = &sc->ste_cdata.ste_tx_chain[idx]; while(sc->ste_cdata.ste_tx_chain[idx].ste_mbuf == NULL) { if ((STE_TX_LIST_CNT - sc->ste_cdata.ste_tx_cnt) < 3) { ifp->if_flags |= IFF_OACTIVE; break; } IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; cur_tx = &sc->ste_cdata.ste_tx_chain[idx]; ste_encap(sc, cur_tx, m_head); if (prev != NULL) prev->ste_ptr->ste_next = cur_tx->ste_phys; prev = cur_tx; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if (ifp->if_bpf) bpf_mtap(ifp, cur_tx->ste_mbuf); STE_INC(idx, STE_TX_LIST_CNT); sc->ste_cdata.ste_tx_cnt++; } if (cur_tx == NULL) { STE_UNLOCK(sc); return; } cur_tx->ste_ptr->ste_ctl |= STE_TXCTL_DMAINTR; /* Start transmission */ sc->ste_cdata.ste_tx_prod = idx; start_tx->ste_prev->ste_ptr->ste_next = start_tx->ste_phys; ifp->if_timer = 5; STE_UNLOCK(sc); return; } static void ste_watchdog(ifp) struct ifnet *ifp; { struct ste_softc *sc; sc = ifp->if_softc; STE_LOCK(sc); ifp->if_oerrors++; printf("ste%d: watchdog timeout\n", sc->ste_unit); ste_txeoc(sc); ste_txeof(sc); ste_rxeof(sc); ste_reset(sc); ste_init(sc); if (ifp->if_snd.ifq_head != NULL) ste_start(ifp); STE_UNLOCK(sc); return; } static void ste_shutdown(dev) device_t dev; { struct ste_softc *sc; sc = device_get_softc(dev); ste_stop(sc); return; } Index: head/sys/pci/if_ti.c =================================================================== --- head/sys/pci/if_ti.c (revision 71961) +++ head/sys/pci/if_ti.c (revision 71962) @@ -1,2506 +1,2505 @@ /* * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Alteon Networks Tigon PCI gigabit ethernet driver for FreeBSD. * Manuals, sample driver and firmware source kits are available * from http://www.alteon.com/support/openkits. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The Alteon Networks Tigon chip contains an embedded R4000 CPU, * gigabit MAC, dual DMA channels and a PCI interface unit. NICs * using the Tigon may have anywhere from 512K to 2MB of SRAM. The * Tigon supports hardware IP, TCP and UCP checksumming, multicast * filtering and jumbo (9014 byte) frames. The hardware is largely * controlled by firmware, which must be loaded into the NIC during * initialization. * * The Tigon 2 contains 2 R4000 CPUs and requires a newer firmware * revision, which supports new features such as extended commands, * extended jumbo receive ring desciptors and a mini receive ring. * * Alteon Networks is to be commended for releasing such a vast amount * of development material for the Tigon NIC without requiring an NDA * (although they really should have done it a long time ago). With * any luck, the other vendors will finally wise up and follow Alteon's * stellar example. * * The firmware for the Tigon 1 and 2 NICs is compiled directly into * this driver by #including it as a C header file. This bloats the * driver somewhat, but it's the easiest method considering that the * driver code and firmware code need to be kept in sync. The source * for the firmware is not provided with the FreeBSD distribution since * compiling it requires a GNU toolchain targeted for mips-sgi-irix5.3. * * The following people deserve special thanks: * - Terry Murphy of 3Com, for providing a 3c985 Tigon 1 board * for testing * - Raymond Lee of Netgear, for providing a pair of Netgear * GA620 Tigon 2 boards for testing * - Ulf Zimmermann, for bringing the GA260 to my attention and * convincing me to write this driver. * - Andrew Gallatin for providing FreeBSD/Alpha support. */ #include "vlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if NVLAN > 0 #include #include #endif #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include #include #include #define TI_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_IP_FRAGS) #if !defined(lint) static const char rcsid[] = "$FreeBSD$"; #endif /* * Various supported device vendors/types and their names. */ static struct ti_type ti_devs[] = { { ALT_VENDORID, ALT_DEVICEID_ACENIC, "Alteon AceNIC 1000baseSX Gigabit Ethernet" }, { ALT_VENDORID, ALT_DEVICEID_ACENIC_COPPER, "Alteon AceNIC 1000baseT Gigabit Ethernet" }, { TC_VENDORID, TC_DEVICEID_3C985, "3Com 3c985-SX Gigabit Ethernet" }, { NG_VENDORID, NG_DEVICEID_GA620, "Netgear GA620 1000baseSX Gigabit Ethernet" }, { NG_VENDORID, NG_DEVICEID_GA620T, "Netgear GA620 1000baseT Gigabit Ethernet" }, { SGI_VENDORID, SGI_DEVICEID_TIGON, "Silicon Graphics Gigabit Ethernet" }, { DEC_VENDORID, DEC_DEVICEID_FARALLON_PN9000SX, "Farallon PN9000SX Gigabit Ethernet" }, { 0, 0, NULL } }; static int ti_probe __P((device_t)); static int ti_attach __P((device_t)); static int ti_detach __P((device_t)); static void ti_txeof __P((struct ti_softc *)); static void ti_rxeof __P((struct ti_softc *)); static void ti_stats_update __P((struct ti_softc *)); static int ti_encap __P((struct ti_softc *, struct mbuf *, u_int32_t *)); static void ti_intr __P((void *)); static void ti_start __P((struct ifnet *)); static int ti_ioctl __P((struct ifnet *, u_long, caddr_t)); static void ti_init __P((void *)); static void ti_init2 __P((struct ti_softc *)); static void ti_stop __P((struct ti_softc *)); static void ti_watchdog __P((struct ifnet *)); static void ti_shutdown __P((device_t)); static int ti_ifmedia_upd __P((struct ifnet *)); static void ti_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); static u_int32_t ti_eeprom_putbyte __P((struct ti_softc *, int)); static u_int8_t ti_eeprom_getbyte __P((struct ti_softc *, int, u_int8_t *)); static int ti_read_eeprom __P((struct ti_softc *, caddr_t, int, int)); static void ti_add_mcast __P((struct ti_softc *, struct ether_addr *)); static void ti_del_mcast __P((struct ti_softc *, struct ether_addr *)); static void ti_setmulti __P((struct ti_softc *)); static void ti_mem __P((struct ti_softc *, u_int32_t, u_int32_t, caddr_t)); static void ti_loadfw __P((struct ti_softc *)); static void ti_cmd __P((struct ti_softc *, struct ti_cmd_desc *)); static void ti_cmd_ext __P((struct ti_softc *, struct ti_cmd_desc *, caddr_t, int)); static void ti_handle_events __P((struct ti_softc *)); static int ti_alloc_jumbo_mem __P((struct ti_softc *)); static void *ti_jalloc __P((struct ti_softc *)); static void ti_jfree __P((caddr_t, void *)); static int ti_newbuf_std __P((struct ti_softc *, int, struct mbuf *)); static int ti_newbuf_mini __P((struct ti_softc *, int, struct mbuf *)); static int ti_newbuf_jumbo __P((struct ti_softc *, int, struct mbuf *)); static int ti_init_rx_ring_std __P((struct ti_softc *)); static void ti_free_rx_ring_std __P((struct ti_softc *)); static int ti_init_rx_ring_jumbo __P((struct ti_softc *)); static void ti_free_rx_ring_jumbo __P((struct ti_softc *)); static int ti_init_rx_ring_mini __P((struct ti_softc *)); static void ti_free_rx_ring_mini __P((struct ti_softc *)); static void ti_free_tx_ring __P((struct ti_softc *)); static int ti_init_tx_ring __P((struct ti_softc *)); static int ti_64bitslot_war __P((struct ti_softc *)); static int ti_chipinit __P((struct ti_softc *)); static int ti_gibinit __P((struct ti_softc *)); static device_method_t ti_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ti_probe), DEVMETHOD(device_attach, ti_attach), DEVMETHOD(device_detach, ti_detach), DEVMETHOD(device_shutdown, ti_shutdown), { 0, 0 } }; static driver_t ti_driver = { "ti", ti_methods, sizeof(struct ti_softc) }; static devclass_t ti_devclass; DRIVER_MODULE(if_ti, pci, ti_driver, ti_devclass, 0, 0); /* * Send an instruction or address to the EEPROM, check for ACK. */ static u_int32_t ti_eeprom_putbyte(sc, byte) struct ti_softc *sc; int byte; { register int i, ack = 0; /* * Make sure we're in TX mode. */ TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); /* * Feed in each bit and stobe the clock. */ for (i = 0x80; i; i >>= 1) { if (byte & i) { TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); } else { TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); } DELAY(1); TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); DELAY(1); TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); } /* * Turn off TX mode. */ TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); /* * Check for ack. */ TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); ack = CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN; TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); return(ack); } /* * Read a byte of data stored in the EEPROM at address 'addr.' * We have to send two address bytes since the EEPROM can hold * more than 256 bytes of data. */ static u_int8_t ti_eeprom_getbyte(sc, addr, dest) struct ti_softc *sc; int addr; u_int8_t *dest; { register int i; u_int8_t byte = 0; EEPROM_START; /* * Send write control code to EEPROM. */ if (ti_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) { printf("ti%d: failed to send write command, status: %x\n", sc->ti_unit, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); return(1); } /* * Send first byte of address of byte we want to read. */ if (ti_eeprom_putbyte(sc, (addr >> 8) & 0xFF)) { printf("ti%d: failed to send address, status: %x\n", sc->ti_unit, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); return(1); } /* * Send second byte address of byte we want to read. */ if (ti_eeprom_putbyte(sc, addr & 0xFF)) { printf("ti%d: failed to send address, status: %x\n", sc->ti_unit, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); return(1); } EEPROM_STOP; EEPROM_START; /* * Send read control code to EEPROM. */ if (ti_eeprom_putbyte(sc, EEPROM_CTL_READ)) { printf("ti%d: failed to send read command, status: %x\n", sc->ti_unit, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); return(1); } /* * Start reading bits from EEPROM. */ TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); for (i = 0x80; i; i >>= 1) { TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); DELAY(1); if (CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN) byte |= i; TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); DELAY(1); } EEPROM_STOP; /* * No ACK generated for read, so just return byte. */ *dest = byte; return(0); } /* * Read a sequence of bytes from the EEPROM. */ static int ti_read_eeprom(sc, dest, off, cnt) struct ti_softc *sc; caddr_t dest; int off; int cnt; { int err = 0, i; u_int8_t byte = 0; for (i = 0; i < cnt; i++) { err = ti_eeprom_getbyte(sc, off + i, &byte); if (err) break; *(dest + i) = byte; } return(err ? 1 : 0); } /* * NIC memory access function. Can be used to either clear a section * of NIC local memory or (if buf is non-NULL) copy data into it. */ static void ti_mem(sc, addr, len, buf) struct ti_softc *sc; u_int32_t addr, len; caddr_t buf; { int segptr, segsize, cnt; caddr_t ti_winbase, ptr; segptr = addr; cnt = len; ti_winbase = (caddr_t)(sc->ti_vhandle + TI_WINDOW); ptr = buf; while(cnt) { if (cnt < TI_WINLEN) segsize = cnt; else segsize = TI_WINLEN - (segptr % TI_WINLEN); CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); if (buf == NULL) bzero((char *)ti_winbase + (segptr & (TI_WINLEN - 1)), segsize); else { bcopy((char *)ptr, (char *)ti_winbase + (segptr & (TI_WINLEN - 1)), segsize); ptr += segsize; } segptr += segsize; cnt -= segsize; } return; } /* * Load firmware image into the NIC. Check that the firmware revision * is acceptable and see if we want the firmware for the Tigon 1 or * Tigon 2. */ static void ti_loadfw(sc) struct ti_softc *sc; { switch(sc->ti_hwrev) { case TI_HWREV_TIGON: if (tigonFwReleaseMajor != TI_FIRMWARE_MAJOR || tigonFwReleaseMinor != TI_FIRMWARE_MINOR || tigonFwReleaseFix != TI_FIRMWARE_FIX) { printf("ti%d: firmware revision mismatch; want " "%d.%d.%d, got %d.%d.%d\n", sc->ti_unit, TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR, TI_FIRMWARE_FIX, tigonFwReleaseMajor, tigonFwReleaseMinor, tigonFwReleaseFix); return; } ti_mem(sc, tigonFwTextAddr, tigonFwTextLen, (caddr_t)tigonFwText); ti_mem(sc, tigonFwDataAddr, tigonFwDataLen, (caddr_t)tigonFwData); ti_mem(sc, tigonFwRodataAddr, tigonFwRodataLen, (caddr_t)tigonFwRodata); ti_mem(sc, tigonFwBssAddr, tigonFwBssLen, NULL); ti_mem(sc, tigonFwSbssAddr, tigonFwSbssLen, NULL); CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigonFwStartAddr); break; case TI_HWREV_TIGON_II: if (tigon2FwReleaseMajor != TI_FIRMWARE_MAJOR || tigon2FwReleaseMinor != TI_FIRMWARE_MINOR || tigon2FwReleaseFix != TI_FIRMWARE_FIX) { printf("ti%d: firmware revision mismatch; want " "%d.%d.%d, got %d.%d.%d\n", sc->ti_unit, TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR, TI_FIRMWARE_FIX, tigon2FwReleaseMajor, tigon2FwReleaseMinor, tigon2FwReleaseFix); return; } ti_mem(sc, tigon2FwTextAddr, tigon2FwTextLen, (caddr_t)tigon2FwText); ti_mem(sc, tigon2FwDataAddr, tigon2FwDataLen, (caddr_t)tigon2FwData); ti_mem(sc, tigon2FwRodataAddr, tigon2FwRodataLen, (caddr_t)tigon2FwRodata); ti_mem(sc, tigon2FwBssAddr, tigon2FwBssLen, NULL); ti_mem(sc, tigon2FwSbssAddr, tigon2FwSbssLen, NULL); CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigon2FwStartAddr); break; default: printf("ti%d: can't load firmware: unknown hardware rev\n", sc->ti_unit); break; } return; } /* * Send the NIC a command via the command ring. */ static void ti_cmd(sc, cmd) struct ti_softc *sc; struct ti_cmd_desc *cmd; { u_int32_t index; if (sc->ti_rdata->ti_cmd_ring == NULL) return; index = sc->ti_cmd_saved_prodidx; CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(cmd)); TI_INC(index, TI_CMD_RING_CNT); CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); sc->ti_cmd_saved_prodidx = index; return; } /* * Send the NIC an extended command. The 'len' parameter specifies the * number of command slots to include after the initial command. */ static void ti_cmd_ext(sc, cmd, arg, len) struct ti_softc *sc; struct ti_cmd_desc *cmd; caddr_t arg; int len; { u_int32_t index; register int i; if (sc->ti_rdata->ti_cmd_ring == NULL) return; index = sc->ti_cmd_saved_prodidx; CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(cmd)); TI_INC(index, TI_CMD_RING_CNT); for (i = 0; i < len; i++) { CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(&arg[i * 4])); TI_INC(index, TI_CMD_RING_CNT); } CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); sc->ti_cmd_saved_prodidx = index; return; } /* * Handle events that have triggered interrupts. */ static void ti_handle_events(sc) struct ti_softc *sc; { struct ti_event_desc *e; if (sc->ti_rdata->ti_event_ring == NULL) return; while (sc->ti_ev_saved_considx != sc->ti_ev_prodidx.ti_idx) { e = &sc->ti_rdata->ti_event_ring[sc->ti_ev_saved_considx]; switch(e->ti_event) { case TI_EV_LINKSTAT_CHANGED: sc->ti_linkstat = e->ti_code; if (e->ti_code == TI_EV_CODE_LINK_UP) printf("ti%d: 10/100 link up\n", sc->ti_unit); else if (e->ti_code == TI_EV_CODE_GIG_LINK_UP) printf("ti%d: gigabit link up\n", sc->ti_unit); else if (e->ti_code == TI_EV_CODE_LINK_DOWN) printf("ti%d: link down\n", sc->ti_unit); break; case TI_EV_ERROR: if (e->ti_code == TI_EV_CODE_ERR_INVAL_CMD) printf("ti%d: invalid command\n", sc->ti_unit); else if (e->ti_code == TI_EV_CODE_ERR_UNIMP_CMD) printf("ti%d: unknown command\n", sc->ti_unit); else if (e->ti_code == TI_EV_CODE_ERR_BADCFG) printf("ti%d: bad config data\n", sc->ti_unit); break; case TI_EV_FIRMWARE_UP: ti_init2(sc); break; case TI_EV_STATS_UPDATED: ti_stats_update(sc); break; case TI_EV_RESET_JUMBO_RING: case TI_EV_MCAST_UPDATED: /* Who cares. */ break; default: printf("ti%d: unknown event: %d\n", sc->ti_unit, e->ti_event); break; } /* Advance the consumer index. */ TI_INC(sc->ti_ev_saved_considx, TI_EVENT_RING_CNT); CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, sc->ti_ev_saved_considx); } return; } /* * Memory management for the jumbo receive ring is a pain in the * butt. We need to allocate at least 9018 bytes of space per frame, * _and_ it has to be contiguous (unless you use the extended * jumbo descriptor format). Using malloc() all the time won't * work: malloc() allocates memory in powers of two, which means we * would end up wasting a considerable amount of space by allocating * 9K chunks. We don't have a jumbo mbuf cluster pool. Thus, we have * to do our own memory management. * * The driver needs to allocate a contiguous chunk of memory at boot * time. We then chop this up ourselves into 9K pieces and use them * as external mbuf storage. * * One issue here is how much memory to allocate. The jumbo ring has * 256 slots in it, but at 9K per slot than can consume over 2MB of * RAM. This is a bit much, especially considering we also need * RAM for the standard ring and mini ring (on the Tigon 2). To * save space, we only actually allocate enough memory for 64 slots * by default, which works out to between 500 and 600K. This can * be tuned by changing a #define in if_tireg.h. */ static int ti_alloc_jumbo_mem(sc) struct ti_softc *sc; { caddr_t ptr; register int i; struct ti_jpool_entry *entry; /* Grab a big chunk o' storage. */ sc->ti_cdata.ti_jumbo_buf = contigmalloc(TI_JMEM, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->ti_cdata.ti_jumbo_buf == NULL) { printf("ti%d: no memory for jumbo buffers!\n", sc->ti_unit); return(ENOBUFS); } SLIST_INIT(&sc->ti_jfree_listhead); SLIST_INIT(&sc->ti_jinuse_listhead); /* * Now divide it up into 9K pieces and save the addresses * in an array. */ ptr = sc->ti_cdata.ti_jumbo_buf; for (i = 0; i < TI_JSLOTS; i++) { sc->ti_cdata.ti_jslots[i] = ptr; ptr += TI_JLEN; entry = malloc(sizeof(struct ti_jpool_entry), M_DEVBUF, M_NOWAIT); if (entry == NULL) { contigfree(sc->ti_cdata.ti_jumbo_buf, TI_JMEM, M_DEVBUF); sc->ti_cdata.ti_jumbo_buf = NULL; printf("ti%d: no memory for jumbo " "buffer queue!\n", sc->ti_unit); return(ENOBUFS); } entry->slot = i; SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jpool_entries); } return(0); } /* * Allocate a jumbo buffer. */ static void *ti_jalloc(sc) struct ti_softc *sc; { struct ti_jpool_entry *entry; entry = SLIST_FIRST(&sc->ti_jfree_listhead); if (entry == NULL) { printf("ti%d: no free jumbo buffers\n", sc->ti_unit); return(NULL); } SLIST_REMOVE_HEAD(&sc->ti_jfree_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc->ti_jinuse_listhead, entry, jpool_entries); return(sc->ti_cdata.ti_jslots[entry->slot]); } /* * Release a jumbo buffer. */ static void ti_jfree(buf, args) caddr_t buf; void *args; { struct ti_softc *sc; int i; struct ti_jpool_entry *entry; /* Extract the softc struct pointer. */ sc = (struct ti_softc *)args; if (sc == NULL) panic("ti_jfree: didn't get softc pointer!"); /* calculate the slot this buffer belongs to */ i = ((vm_offset_t)buf - (vm_offset_t)sc->ti_cdata.ti_jumbo_buf) / TI_JLEN; if ((i < 0) || (i >= TI_JSLOTS)) panic("ti_jfree: asked to free buffer that we don't manage!"); entry = SLIST_FIRST(&sc->ti_jinuse_listhead); if (entry == NULL) panic("ti_jfree: buffer not in use!"); entry->slot = i; SLIST_REMOVE_HEAD(&sc->ti_jinuse_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jpool_entries); return; } /* * Intialize a standard receive ring descriptor. */ static int ti_newbuf_std(sc, i, m) struct ti_softc *sc; int i; struct mbuf *m; { struct mbuf *m_new = NULL; struct ti_rx_desc *r; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("ti%d: mbuf allocation failed " "-- packet dropped!\n", sc->ti_unit); return(ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("ti%d: cluster allocation failed " "-- packet dropped!\n", sc->ti_unit); m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, ETHER_ALIGN); sc->ti_cdata.ti_rx_std_chain[i] = m_new; r = &sc->ti_rdata->ti_rx_std_ring[i]; TI_HOSTADDR(r->ti_addr) = vtophys(mtod(m_new, caddr_t)); r->ti_type = TI_BDTYPE_RECV_BD; r->ti_flags = 0; if (sc->arpcom.ac_if.if_hwassist) r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; r->ti_len = m_new->m_len; r->ti_idx = i; return(0); } /* * Intialize a mini receive ring descriptor. This only applies to * the Tigon 2. */ static int ti_newbuf_mini(sc, i, m) struct ti_softc *sc; int i; struct mbuf *m; { struct mbuf *m_new = NULL; struct ti_rx_desc *r; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("ti%d: mbuf allocation failed " "-- packet dropped!\n", sc->ti_unit); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MHLEN; } else { m_new = m; m_new->m_data = m_new->m_pktdat; m_new->m_len = m_new->m_pkthdr.len = MHLEN; } m_adj(m_new, ETHER_ALIGN); r = &sc->ti_rdata->ti_rx_mini_ring[i]; sc->ti_cdata.ti_rx_mini_chain[i] = m_new; TI_HOSTADDR(r->ti_addr) = vtophys(mtod(m_new, caddr_t)); r->ti_type = TI_BDTYPE_RECV_BD; r->ti_flags = TI_BDFLAG_MINI_RING; if (sc->arpcom.ac_if.if_hwassist) r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; r->ti_len = m_new->m_len; r->ti_idx = i; return(0); } /* * Initialize a jumbo receive ring descriptor. This allocates * a jumbo buffer from the pool managed internally by the driver. */ static int ti_newbuf_jumbo(sc, i, m) struct ti_softc *sc; int i; struct mbuf *m; { struct mbuf *m_new = NULL; struct ti_rx_desc *r; if (m == NULL) { caddr_t *buf = NULL; /* Allocate the mbuf. */ MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("ti%d: mbuf allocation failed " "-- packet dropped!\n", sc->ti_unit); return(ENOBUFS); } /* Allocate the jumbo buffer */ buf = ti_jalloc(sc); if (buf == NULL) { m_freem(m_new); printf("ti%d: jumbo allocation failed " "-- packet dropped!\n", sc->ti_unit); return(ENOBUFS); } /* Attach the buffer to the mbuf. */ m_new->m_data = (void *) buf; m_new->m_len = m_new->m_pkthdr.len = TI_JUMBO_FRAMELEN; MEXTADD(m_new, buf, TI_JUMBO_FRAMELEN, ti_jfree, (struct ti_softc *)sc, 0, EXT_NET_DRV); } else { m_new = m; m_new->m_data = m_new->m_ext.ext_buf; m_new->m_ext.ext_size = TI_JUMBO_FRAMELEN; } m_adj(m_new, ETHER_ALIGN); /* Set up the descriptor. */ r = &sc->ti_rdata->ti_rx_jumbo_ring[i]; sc->ti_cdata.ti_rx_jumbo_chain[i] = m_new; TI_HOSTADDR(r->ti_addr) = vtophys(mtod(m_new, caddr_t)); r->ti_type = TI_BDTYPE_RECV_JUMBO_BD; r->ti_flags = TI_BDFLAG_JUMBO_RING; if (sc->arpcom.ac_if.if_hwassist) r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; r->ti_len = m_new->m_len; r->ti_idx = i; return(0); } /* * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, * that's 1MB or memory, which is a lot. For now, we fill only the first * 256 ring entries and hope that our CPU is fast enough to keep up with * the NIC. */ static int ti_init_rx_ring_std(sc) struct ti_softc *sc; { register int i; struct ti_cmd_desc cmd; for (i = 0; i < TI_SSLOTS; i++) { if (ti_newbuf_std(sc, i, NULL) == ENOBUFS) return(ENOBUFS); }; TI_UPDATE_STDPROD(sc, i - 1); sc->ti_std = i - 1; return(0); } static void ti_free_rx_ring_std(sc) struct ti_softc *sc; { register int i; for (i = 0; i < TI_STD_RX_RING_CNT; i++) { if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) { m_freem(sc->ti_cdata.ti_rx_std_chain[i]); sc->ti_cdata.ti_rx_std_chain[i] = NULL; } bzero((char *)&sc->ti_rdata->ti_rx_std_ring[i], sizeof(struct ti_rx_desc)); } return; } static int ti_init_rx_ring_jumbo(sc) struct ti_softc *sc; { register int i; struct ti_cmd_desc cmd; for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { if (ti_newbuf_jumbo(sc, i, NULL) == ENOBUFS) return(ENOBUFS); }; TI_UPDATE_JUMBOPROD(sc, i - 1); sc->ti_jumbo = i - 1; return(0); } static void ti_free_rx_ring_jumbo(sc) struct ti_softc *sc; { register int i; for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) { m_freem(sc->ti_cdata.ti_rx_jumbo_chain[i]); sc->ti_cdata.ti_rx_jumbo_chain[i] = NULL; } bzero((char *)&sc->ti_rdata->ti_rx_jumbo_ring[i], sizeof(struct ti_rx_desc)); } return; } static int ti_init_rx_ring_mini(sc) struct ti_softc *sc; { register int i; for (i = 0; i < TI_MSLOTS; i++) { if (ti_newbuf_mini(sc, i, NULL) == ENOBUFS) return(ENOBUFS); }; TI_UPDATE_MINIPROD(sc, i - 1); sc->ti_mini = i - 1; return(0); } static void ti_free_rx_ring_mini(sc) struct ti_softc *sc; { register int i; for (i = 0; i < TI_MINI_RX_RING_CNT; i++) { if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) { m_freem(sc->ti_cdata.ti_rx_mini_chain[i]); sc->ti_cdata.ti_rx_mini_chain[i] = NULL; } bzero((char *)&sc->ti_rdata->ti_rx_mini_ring[i], sizeof(struct ti_rx_desc)); } return; } static void ti_free_tx_ring(sc) struct ti_softc *sc; { register int i; if (sc->ti_rdata->ti_tx_ring == NULL) return; for (i = 0; i < TI_TX_RING_CNT; i++) { if (sc->ti_cdata.ti_tx_chain[i] != NULL) { m_freem(sc->ti_cdata.ti_tx_chain[i]); sc->ti_cdata.ti_tx_chain[i] = NULL; } bzero((char *)&sc->ti_rdata->ti_tx_ring[i], sizeof(struct ti_tx_desc)); } return; } static int ti_init_tx_ring(sc) struct ti_softc *sc; { sc->ti_txcnt = 0; sc->ti_tx_saved_considx = 0; CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, 0); return(0); } /* * The Tigon 2 firmware has a new way to add/delete multicast addresses, * but we have to support the old way too so that Tigon 1 cards will * work. */ void ti_add_mcast(sc, addr) struct ti_softc *sc; struct ether_addr *addr; { struct ti_cmd_desc cmd; u_int16_t *m; u_int32_t ext[2] = {0, 0}; m = (u_int16_t *)&addr->octet[0]; switch(sc->ti_hwrev) { case TI_HWREV_TIGON: CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); TI_DO_CMD(TI_CMD_ADD_MCAST_ADDR, 0, 0); break; case TI_HWREV_TIGON_II: ext[0] = htons(m[0]); ext[1] = (htons(m[1]) << 16) | htons(m[2]); TI_DO_CMD_EXT(TI_CMD_EXT_ADD_MCAST, 0, 0, (caddr_t)&ext, 2); break; default: printf("ti%d: unknown hwrev\n", sc->ti_unit); break; } return; } void ti_del_mcast(sc, addr) struct ti_softc *sc; struct ether_addr *addr; { struct ti_cmd_desc cmd; u_int16_t *m; u_int32_t ext[2] = {0, 0}; m = (u_int16_t *)&addr->octet[0]; switch(sc->ti_hwrev) { case TI_HWREV_TIGON: CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); TI_DO_CMD(TI_CMD_DEL_MCAST_ADDR, 0, 0); break; case TI_HWREV_TIGON_II: ext[0] = htons(m[0]); ext[1] = (htons(m[1]) << 16) | htons(m[2]); TI_DO_CMD_EXT(TI_CMD_EXT_DEL_MCAST, 0, 0, (caddr_t)&ext, 2); break; default: printf("ti%d: unknown hwrev\n", sc->ti_unit); break; } return; } /* * Configure the Tigon's multicast address filter. * * The actual multicast table management is a bit of a pain, thanks to * slight brain damage on the part of both Alteon and us. With our * multicast code, we are only alerted when the multicast address table * changes and at that point we only have the current list of addresses: * we only know the current state, not the previous state, so we don't * actually know what addresses were removed or added. The firmware has * state, but we can't get our grubby mits on it, and there is no 'delete * all multicast addresses' command. Hence, we have to maintain our own * state so we know what addresses have been programmed into the NIC at * any given time. */ static void ti_setmulti(sc) struct ti_softc *sc; { struct ifnet *ifp; struct ifmultiaddr *ifma; struct ti_cmd_desc cmd; struct ti_mc_entry *mc; u_int32_t intrs; ifp = &sc->arpcom.ac_if; if (ifp->if_flags & IFF_ALLMULTI) { TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_ENB, 0); return; } else { TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_DIS, 0); } /* Disable interrupts. */ intrs = CSR_READ_4(sc, TI_MB_HOSTINTR); CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); /* First, zot all the existing filters. */ while (sc->ti_mc_listhead.slh_first != NULL) { mc = sc->ti_mc_listhead.slh_first; ti_del_mcast(sc, &mc->mc_addr); SLIST_REMOVE_HEAD(&sc->ti_mc_listhead, mc_entries); free(mc, M_DEVBUF); } /* Now program new ones. */ - for (ifma = ifp->if_multiaddrs.lh_first; - ifma != NULL; ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; mc = malloc(sizeof(struct ti_mc_entry), M_DEVBUF, M_NOWAIT); bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), (char *)&mc->mc_addr, ETHER_ADDR_LEN); SLIST_INSERT_HEAD(&sc->ti_mc_listhead, mc, mc_entries); ti_add_mcast(sc, &mc->mc_addr); } /* Re-enable interrupts. */ CSR_WRITE_4(sc, TI_MB_HOSTINTR, intrs); return; } /* * Check to see if the BIOS has configured us for a 64 bit slot when * we aren't actually in one. If we detect this condition, we can work * around it on the Tigon 2 by setting a bit in the PCI state register, * but for the Tigon 1 we must give up and abort the interface attach. */ static int ti_64bitslot_war(sc) struct ti_softc *sc; { if (!(CSR_READ_4(sc, TI_PCI_STATE) & TI_PCISTATE_32BIT_BUS)) { CSR_WRITE_4(sc, 0x600, 0); CSR_WRITE_4(sc, 0x604, 0); CSR_WRITE_4(sc, 0x600, 0x5555AAAA); if (CSR_READ_4(sc, 0x604) == 0x5555AAAA) { if (sc->ti_hwrev == TI_HWREV_TIGON) return(EINVAL); else { TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_32BIT_BUS); return(0); } } } return(0); } /* * Do endian, PCI and DMA initialization. Also check the on-board ROM * self-test results. */ static int ti_chipinit(sc) struct ti_softc *sc; { u_int32_t cacheline; u_int32_t pci_writemax = 0; /* Initialize link to down state. */ sc->ti_linkstat = TI_EV_CODE_LINK_DOWN; sc->arpcom.ac_if.if_hwassist = TI_CSUM_FEATURES; /* Set endianness before we access any non-PCI registers. */ #if BYTE_ORDER == BIG_ENDIAN CSR_WRITE_4(sc, TI_MISC_HOST_CTL, TI_MHC_BIGENDIAN_INIT | (TI_MHC_BIGENDIAN_INIT << 24)); #else CSR_WRITE_4(sc, TI_MISC_HOST_CTL, TI_MHC_LITTLEENDIAN_INIT | (TI_MHC_LITTLEENDIAN_INIT << 24)); #endif /* Check the ROM failed bit to see if self-tests passed. */ if (CSR_READ_4(sc, TI_CPU_STATE) & TI_CPUSTATE_ROMFAIL) { printf("ti%d: board self-diagnostics failed!\n", sc->ti_unit); return(ENODEV); } /* Halt the CPU. */ TI_SETBIT(sc, TI_CPU_STATE, TI_CPUSTATE_HALT); /* Figure out the hardware revision. */ switch(CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_CHIP_REV_MASK) { case TI_REV_TIGON_I: sc->ti_hwrev = TI_HWREV_TIGON; break; case TI_REV_TIGON_II: sc->ti_hwrev = TI_HWREV_TIGON_II; break; default: printf("ti%d: unsupported chip revision\n", sc->ti_unit); return(ENODEV); } /* Do special setup for Tigon 2. */ if (sc->ti_hwrev == TI_HWREV_TIGON_II) { TI_SETBIT(sc, TI_CPU_CTL_B, TI_CPUSTATE_HALT); TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_SRAM_BANK_256K); TI_SETBIT(sc, TI_MISC_CONF, TI_MCR_SRAM_SYNCHRONOUS); } /* Set up the PCI state register. */ CSR_WRITE_4(sc, TI_PCI_STATE, TI_PCI_READ_CMD|TI_PCI_WRITE_CMD); if (sc->ti_hwrev == TI_HWREV_TIGON_II) { TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_USE_MEM_RD_MULT); } /* Clear the read/write max DMA parameters. */ TI_CLRBIT(sc, TI_PCI_STATE, (TI_PCISTATE_WRITE_MAXDMA| TI_PCISTATE_READ_MAXDMA)); /* Get cache line size. */ cacheline = CSR_READ_4(sc, TI_PCI_BIST) & 0xFF; /* * If the system has set enabled the PCI memory write * and invalidate command in the command register, set * the write max parameter accordingly. This is necessary * to use MWI with the Tigon 2. */ if (CSR_READ_4(sc, TI_PCI_CMDSTAT) & PCIM_CMD_MWIEN) { switch(cacheline) { case 1: case 4: case 8: case 16: case 32: case 64: break; default: /* Disable PCI memory write and invalidate. */ if (bootverbose) printf("ti%d: cache line size %d not " "supported; disabling PCI MWI\n", sc->ti_unit, cacheline); CSR_WRITE_4(sc, TI_PCI_CMDSTAT, CSR_READ_4(sc, TI_PCI_CMDSTAT) & ~PCIM_CMD_MWIEN); break; } } #ifdef __brokenalpha__ /* * From the Alteon sample driver: * Must insure that we do not cross an 8K (bytes) boundary * for DMA reads. Our highest limit is 1K bytes. This is a * restriction on some ALPHA platforms with early revision * 21174 PCI chipsets, such as the AlphaPC 164lx */ TI_SETBIT(sc, TI_PCI_STATE, pci_writemax|TI_PCI_READMAX_1024); #else TI_SETBIT(sc, TI_PCI_STATE, pci_writemax); #endif /* This sets the min dma param all the way up (0xff). */ TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_MINDMA); /* Configure DMA variables. */ #if BYTE_ORDER == BIG_ENDIAN CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_BD | TI_OPMODE_BYTESWAP_DATA | TI_OPMODE_WORDSWAP_BD | TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB | TI_OPMODE_DONT_FRAG_JUMBO); #else CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_DATA| TI_OPMODE_WORDSWAP_BD|TI_OPMODE_DONT_FRAG_JUMBO| TI_OPMODE_WARN_ENB|TI_OPMODE_FATAL_ENB); #endif /* * Only allow 1 DMA channel to be active at a time. * I don't think this is a good idea, but without it * the firmware racks up lots of nicDmaReadRingFull * errors. This is not compatible with hardware checksums. */ if (sc->arpcom.ac_if.if_hwassist == 0) TI_SETBIT(sc, TI_GCR_OPMODE, TI_OPMODE_1_DMA_ACTIVE); /* Recommended settings from Tigon manual. */ CSR_WRITE_4(sc, TI_GCR_DMA_WRITECFG, TI_DMA_STATE_THRESH_8W); CSR_WRITE_4(sc, TI_GCR_DMA_READCFG, TI_DMA_STATE_THRESH_8W); if (ti_64bitslot_war(sc)) { printf("ti%d: bios thinks we're in a 64 bit slot, " "but we aren't", sc->ti_unit); return(EINVAL); } return(0); } /* * Initialize the general information block and firmware, and * start the CPU(s) running. */ static int ti_gibinit(sc) struct ti_softc *sc; { struct ti_rcb *rcb; int i; struct ifnet *ifp; ifp = &sc->arpcom.ac_if; /* Disable interrupts for now. */ CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); /* Tell the chip where to find the general information block. */ CSR_WRITE_4(sc, TI_GCR_GENINFO_HI, 0); CSR_WRITE_4(sc, TI_GCR_GENINFO_LO, vtophys(&sc->ti_rdata->ti_info)); /* Load the firmware into SRAM. */ ti_loadfw(sc); /* Set up the contents of the general info and ring control blocks. */ /* Set up the event ring and producer pointer. */ rcb = &sc->ti_rdata->ti_info.ti_ev_rcb; TI_HOSTADDR(rcb->ti_hostaddr) = vtophys(&sc->ti_rdata->ti_event_ring); rcb->ti_flags = 0; TI_HOSTADDR(sc->ti_rdata->ti_info.ti_ev_prodidx_ptr) = vtophys(&sc->ti_ev_prodidx); sc->ti_ev_prodidx.ti_idx = 0; CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, 0); sc->ti_ev_saved_considx = 0; /* Set up the command ring and producer mailbox. */ rcb = &sc->ti_rdata->ti_info.ti_cmd_rcb; sc->ti_rdata->ti_cmd_ring = (struct ti_cmd_desc *)(sc->ti_vhandle + TI_GCR_CMDRING); TI_HOSTADDR(rcb->ti_hostaddr) = TI_GCR_NIC_ADDR(TI_GCR_CMDRING); rcb->ti_flags = 0; rcb->ti_max_len = 0; for (i = 0; i < TI_CMD_RING_CNT; i++) { CSR_WRITE_4(sc, TI_GCR_CMDRING + (i * 4), 0); } CSR_WRITE_4(sc, TI_GCR_CMDCONS_IDX, 0); CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, 0); sc->ti_cmd_saved_prodidx = 0; /* * Assign the address of the stats refresh buffer. * We re-use the current stats buffer for this to * conserve memory. */ TI_HOSTADDR(sc->ti_rdata->ti_info.ti_refresh_stats_ptr) = vtophys(&sc->ti_rdata->ti_info.ti_stats); /* Set up the standard receive ring. */ rcb = &sc->ti_rdata->ti_info.ti_std_rx_rcb; TI_HOSTADDR(rcb->ti_hostaddr) = vtophys(&sc->ti_rdata->ti_rx_std_ring); rcb->ti_max_len = TI_FRAMELEN; rcb->ti_flags = 0; if (sc->arpcom.ac_if.if_hwassist) rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; #if NVLAN > 0 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; #endif /* Set up the jumbo receive ring. */ rcb = &sc->ti_rdata->ti_info.ti_jumbo_rx_rcb; TI_HOSTADDR(rcb->ti_hostaddr) = vtophys(&sc->ti_rdata->ti_rx_jumbo_ring); rcb->ti_max_len = TI_JUMBO_FRAMELEN; rcb->ti_flags = 0; if (sc->arpcom.ac_if.if_hwassist) rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; #if NVLAN > 0 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; #endif /* * Set up the mini ring. Only activated on the * Tigon 2 but the slot in the config block is * still there on the Tigon 1. */ rcb = &sc->ti_rdata->ti_info.ti_mini_rx_rcb; TI_HOSTADDR(rcb->ti_hostaddr) = vtophys(&sc->ti_rdata->ti_rx_mini_ring); rcb->ti_max_len = MHLEN - ETHER_ALIGN; if (sc->ti_hwrev == TI_HWREV_TIGON) rcb->ti_flags = TI_RCB_FLAG_RING_DISABLED; else rcb->ti_flags = 0; if (sc->arpcom.ac_if.if_hwassist) rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; #if NVLAN > 0 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; #endif /* * Set up the receive return ring. */ rcb = &sc->ti_rdata->ti_info.ti_return_rcb; TI_HOSTADDR(rcb->ti_hostaddr) = vtophys(&sc->ti_rdata->ti_rx_return_ring); rcb->ti_flags = 0; rcb->ti_max_len = TI_RETURN_RING_CNT; TI_HOSTADDR(sc->ti_rdata->ti_info.ti_return_prodidx_ptr) = vtophys(&sc->ti_return_prodidx); /* * Set up the tx ring. Note: for the Tigon 2, we have the option * of putting the transmit ring in the host's address space and * letting the chip DMA it instead of leaving the ring in the NIC's * memory and accessing it through the shared memory region. We * do this for the Tigon 2, but it doesn't work on the Tigon 1, * so we have to revert to the shared memory scheme if we detect * a Tigon 1 chip. */ CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE); if (sc->ti_hwrev == TI_HWREV_TIGON) { sc->ti_rdata->ti_tx_ring_nic = (struct ti_tx_desc *)(sc->ti_vhandle + TI_WINDOW); } bzero((char *)sc->ti_rdata->ti_tx_ring, TI_TX_RING_CNT * sizeof(struct ti_tx_desc)); rcb = &sc->ti_rdata->ti_info.ti_tx_rcb; if (sc->ti_hwrev == TI_HWREV_TIGON) rcb->ti_flags = 0; else rcb->ti_flags = TI_RCB_FLAG_HOST_RING; #if NVLAN > 0 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; #endif if (sc->arpcom.ac_if.if_hwassist) rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; rcb->ti_max_len = TI_TX_RING_CNT; if (sc->ti_hwrev == TI_HWREV_TIGON) TI_HOSTADDR(rcb->ti_hostaddr) = TI_TX_RING_BASE; else TI_HOSTADDR(rcb->ti_hostaddr) = vtophys(&sc->ti_rdata->ti_tx_ring); TI_HOSTADDR(sc->ti_rdata->ti_info.ti_tx_considx_ptr) = vtophys(&sc->ti_tx_considx); /* Set up tuneables */ if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, (sc->ti_rx_coal_ticks / 10)); else CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, sc->ti_rx_coal_ticks); CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS, sc->ti_tx_coal_ticks); CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks); CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD, sc->ti_rx_max_coal_bds); CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD, sc->ti_tx_max_coal_bds); CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO, sc->ti_tx_buf_ratio); /* Turn interrupts on. */ CSR_WRITE_4(sc, TI_GCR_MASK_INTRS, 0); CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); /* Start CPU. */ TI_CLRBIT(sc, TI_CPU_STATE, (TI_CPUSTATE_HALT|TI_CPUSTATE_STEP)); return(0); } /* * Probe for a Tigon chip. Check the PCI vendor and device IDs * against our list and return its name if we find a match. */ static int ti_probe(dev) device_t dev; { struct ti_type *t; t = ti_devs; while(t->ti_name != NULL) { if ((pci_get_vendor(dev) == t->ti_vid) && (pci_get_device(dev) == t->ti_did)) { device_set_desc(dev, t->ti_name); return(0); } t++; } return(ENXIO); } static int ti_attach(dev) device_t dev; { u_int32_t command; struct ifnet *ifp; struct ti_softc *sc; int unit, error = 0, rid; sc = device_get_softc(dev); unit = device_get_unit(dev); bzero(sc, sizeof(struct ti_softc)); mtx_init(&sc->ti_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE); TI_LOCK(sc); /* * Map control/status registers. */ command = pci_read_config(dev, PCIR_COMMAND, 4); command |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, command, 4); command = pci_read_config(dev, PCIR_COMMAND, 4); if (!(command & PCIM_CMD_MEMEN)) { printf("ti%d: failed to enable memory mapping!\n", unit); error = ENXIO; goto fail; } rid = TI_PCI_LOMEM; sc->ti_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE|PCI_RF_DENSE); if (sc->ti_res == NULL) { printf ("ti%d: couldn't map memory\n", unit); error = ENXIO; goto fail; } sc->ti_btag = rman_get_bustag(sc->ti_res); sc->ti_bhandle = rman_get_bushandle(sc->ti_res); sc->ti_vhandle = (vm_offset_t)rman_get_virtual(sc->ti_res); /* Allocate interrupt */ rid = 0; sc->ti_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->ti_irq == NULL) { printf("ti%d: couldn't map interrupt\n", unit); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->ti_irq, INTR_TYPE_NET, ti_intr, sc, &sc->ti_intrhand); if (error) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq); bus_release_resource(dev, SYS_RES_MEMORY, TI_PCI_LOMEM, sc->ti_res); printf("ti%d: couldn't set up irq\n", unit); goto fail; } sc->ti_unit = unit; if (ti_chipinit(sc)) { printf("ti%d: chip initialization failed\n", sc->ti_unit); bus_teardown_intr(dev, sc->ti_irq, sc->ti_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq); bus_release_resource(dev, SYS_RES_MEMORY, TI_PCI_LOMEM, sc->ti_res); error = ENXIO; goto fail; } /* Zero out the NIC's on-board SRAM. */ ti_mem(sc, 0x2000, 0x100000 - 0x2000, NULL); /* Init again -- zeroing memory may have clobbered some registers. */ if (ti_chipinit(sc)) { printf("ti%d: chip initialization failed\n", sc->ti_unit); bus_teardown_intr(dev, sc->ti_irq, sc->ti_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq); bus_release_resource(dev, SYS_RES_MEMORY, TI_PCI_LOMEM, sc->ti_res); error = ENXIO; goto fail; } /* * Get station address from the EEPROM. Note: the manual states * that the MAC address is at offset 0x8c, however the data is * stored as two longwords (since that's how it's loaded into * the NIC). This means the MAC address is actually preceeded * by two zero bytes. We need to skip over those. */ if (ti_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, TI_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { printf("ti%d: failed to read station address\n", unit); bus_teardown_intr(dev, sc->ti_irq, sc->ti_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq); bus_release_resource(dev, SYS_RES_MEMORY, TI_PCI_LOMEM, sc->ti_res); error = ENXIO; goto fail; } /* * A Tigon chip was detected. Inform the world. */ printf("ti%d: Ethernet address: %6D\n", unit, sc->arpcom.ac_enaddr, ":"); /* Allocate the general information block and ring buffers. */ sc->ti_rdata = contigmalloc(sizeof(struct ti_ring_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->ti_rdata == NULL) { bus_teardown_intr(dev, sc->ti_irq, sc->ti_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq); bus_release_resource(dev, SYS_RES_MEMORY, TI_PCI_LOMEM, sc->ti_res); error = ENXIO; printf("ti%d: no memory for list buffers!\n", sc->ti_unit); goto fail; } bzero(sc->ti_rdata, sizeof(struct ti_ring_data)); /* Try to allocate memory for jumbo buffers. */ if (ti_alloc_jumbo_mem(sc)) { printf("ti%d: jumbo buffer allocation failed\n", sc->ti_unit); bus_teardown_intr(dev, sc->ti_irq, sc->ti_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq); bus_release_resource(dev, SYS_RES_MEMORY, TI_PCI_LOMEM, sc->ti_res); contigfree(sc->ti_rdata, sizeof(struct ti_ring_data), M_DEVBUF); error = ENXIO; goto fail; } /* * We really need a better way to tell a 1000baseTX card * from a 1000baseSX one, since in theory there could be * OEMed 1000baseTX cards from lame vendors who aren't * clever enough to change the PCI ID. For the moment * though, the AceNIC is the only copper card available. */ if (pci_get_vendor(dev) == ALT_VENDORID && pci_get_device(dev) == ALT_DEVICEID_ACENIC_COPPER) sc->ti_copper = 1; /* Ok, it's not the only copper card available. */ if (pci_get_vendor(dev) == NG_VENDORID && pci_get_device(dev) == NG_DEVICEID_GA620T) sc->ti_copper = 1; /* Set default tuneable values. */ sc->ti_stat_ticks = 2 * TI_TICKS_PER_SEC; sc->ti_rx_coal_ticks = TI_TICKS_PER_SEC / 5000; sc->ti_tx_coal_ticks = TI_TICKS_PER_SEC / 500; sc->ti_rx_max_coal_bds = 64; sc->ti_tx_max_coal_bds = 128; sc->ti_tx_buf_ratio = 21; /* Set up ifnet structure */ ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; ifp->if_unit = sc->ti_unit; ifp->if_name = "ti"; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = ti_ioctl; ifp->if_output = ether_output; ifp->if_start = ti_start; ifp->if_watchdog = ti_watchdog; ifp->if_init = ti_init; ifp->if_mtu = ETHERMTU; ifp->if_snd.ifq_maxlen = TI_TX_RING_CNT - 1; /* Set up ifmedia support. */ ifmedia_init(&sc->ifmedia, IFM_IMASK, ti_ifmedia_upd, ti_ifmedia_sts); if (sc->ti_copper) { /* * Copper cards allow manual 10/100 mode selection, * but not manual 1000baseTX mode selection. Why? * Becuase currently there's no way to specify the * master/slave setting through the firmware interface, * so Alteon decided to just bag it and handle it * via autonegotiation. */ ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_TX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_TX|IFM_FDX, 0, NULL); } else { /* Fiber cards don't support 10/100 modes. */ ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); } ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_AUTO); /* * Call MI attach routine. */ ether_ifattach(ifp, ETHER_BPF_SUPPORTED); TI_UNLOCK(sc); return(0); fail: TI_UNLOCK(sc); mtx_destroy(&sc->ti_mtx); return(error); } static int ti_detach(dev) device_t dev; { struct ti_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); TI_LOCK(sc); ifp = &sc->arpcom.ac_if; ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); ti_stop(sc); bus_teardown_intr(dev, sc->ti_irq, sc->ti_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq); bus_release_resource(dev, SYS_RES_MEMORY, TI_PCI_LOMEM, sc->ti_res); contigfree(sc->ti_cdata.ti_jumbo_buf, TI_JMEM, M_DEVBUF); contigfree(sc->ti_rdata, sizeof(struct ti_ring_data), M_DEVBUF); ifmedia_removeall(&sc->ifmedia); TI_UNLOCK(sc); mtx_destroy(&sc->ti_mtx); return(0); } /* * Frame reception handling. This is called if there's a frame * on the receive return list. * * Note: we have to be able to handle three possibilities here: * 1) the frame is from the mini receive ring (can only happen) * on Tigon 2 boards) * 2) the frame is from the jumbo recieve ring * 3) the frame is from the standard receive ring */ static void ti_rxeof(sc) struct ti_softc *sc; { struct ifnet *ifp; struct ti_cmd_desc cmd; ifp = &sc->arpcom.ac_if; while(sc->ti_rx_saved_considx != sc->ti_return_prodidx.ti_idx) { struct ti_rx_desc *cur_rx; u_int32_t rxidx; struct ether_header *eh; struct mbuf *m = NULL; #if NVLAN > 0 u_int16_t vlan_tag = 0; int have_tag = 0; #endif cur_rx = &sc->ti_rdata->ti_rx_return_ring[sc->ti_rx_saved_considx]; rxidx = cur_rx->ti_idx; TI_INC(sc->ti_rx_saved_considx, TI_RETURN_RING_CNT); #if NVLAN > 0 if (cur_rx->ti_flags & TI_BDFLAG_VLAN_TAG) { have_tag = 1; vlan_tag = cur_rx->ti_vlan_tag; } #endif if (cur_rx->ti_flags & TI_BDFLAG_JUMBO_RING) { TI_INC(sc->ti_jumbo, TI_JUMBO_RX_RING_CNT); m = sc->ti_cdata.ti_rx_jumbo_chain[rxidx]; sc->ti_cdata.ti_rx_jumbo_chain[rxidx] = NULL; if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { ifp->if_ierrors++; ti_newbuf_jumbo(sc, sc->ti_jumbo, m); continue; } if (ti_newbuf_jumbo(sc, sc->ti_jumbo, NULL) == ENOBUFS) { ifp->if_ierrors++; ti_newbuf_jumbo(sc, sc->ti_jumbo, m); continue; } } else if (cur_rx->ti_flags & TI_BDFLAG_MINI_RING) { TI_INC(sc->ti_mini, TI_MINI_RX_RING_CNT); m = sc->ti_cdata.ti_rx_mini_chain[rxidx]; sc->ti_cdata.ti_rx_mini_chain[rxidx] = NULL; if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { ifp->if_ierrors++; ti_newbuf_mini(sc, sc->ti_mini, m); continue; } if (ti_newbuf_mini(sc, sc->ti_mini, NULL) == ENOBUFS) { ifp->if_ierrors++; ti_newbuf_mini(sc, sc->ti_mini, m); continue; } } else { TI_INC(sc->ti_std, TI_STD_RX_RING_CNT); m = sc->ti_cdata.ti_rx_std_chain[rxidx]; sc->ti_cdata.ti_rx_std_chain[rxidx] = NULL; if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { ifp->if_ierrors++; ti_newbuf_std(sc, sc->ti_std, m); continue; } if (ti_newbuf_std(sc, sc->ti_std, NULL) == ENOBUFS) { ifp->if_ierrors++; ti_newbuf_std(sc, sc->ti_std, m); continue; } } m->m_pkthdr.len = m->m_len = cur_rx->ti_len; ifp->if_ipackets++; eh = mtod(m, struct ether_header *); m->m_pkthdr.rcvif = ifp; /* Remove header from mbuf and pass it on. */ m_adj(m, sizeof(struct ether_header)); if (ifp->if_hwassist) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_DATA_VALID; if ((cur_rx->ti_ip_cksum ^ 0xffff) == 0) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; m->m_pkthdr.csum_data = cur_rx->ti_tcp_udp_cksum; } #if NVLAN > 0 /* * If we received a packet with a vlan tag, pass it * to vlan_input() instead of ether_input(). */ if (have_tag) { vlan_input_tag(eh, m, vlan_tag); have_tag = vlan_tag = 0; continue; } #endif ether_input(ifp, eh, m); } /* Only necessary on the Tigon 1. */ if (sc->ti_hwrev == TI_HWREV_TIGON) CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, sc->ti_rx_saved_considx); TI_UPDATE_STDPROD(sc, sc->ti_std); TI_UPDATE_MINIPROD(sc, sc->ti_mini); TI_UPDATE_JUMBOPROD(sc, sc->ti_jumbo); return; } static void ti_txeof(sc) struct ti_softc *sc; { struct ti_tx_desc *cur_tx = NULL; struct ifnet *ifp; ifp = &sc->arpcom.ac_if; /* * Go through our tx ring and free mbufs for those * frames that have been sent. */ while (sc->ti_tx_saved_considx != sc->ti_tx_considx.ti_idx) { u_int32_t idx = 0; idx = sc->ti_tx_saved_considx; if (sc->ti_hwrev == TI_HWREV_TIGON) { if (idx > 383) CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE + 6144); else if (idx > 255) CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE + 4096); else if (idx > 127) CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE + 2048); else CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE); cur_tx = &sc->ti_rdata->ti_tx_ring_nic[idx % 128]; } else cur_tx = &sc->ti_rdata->ti_tx_ring[idx]; if (cur_tx->ti_flags & TI_BDFLAG_END) ifp->if_opackets++; if (sc->ti_cdata.ti_tx_chain[idx] != NULL) { m_freem(sc->ti_cdata.ti_tx_chain[idx]); sc->ti_cdata.ti_tx_chain[idx] = NULL; } sc->ti_txcnt--; TI_INC(sc->ti_tx_saved_considx, TI_TX_RING_CNT); ifp->if_timer = 0; } if (cur_tx != NULL) ifp->if_flags &= ~IFF_OACTIVE; return; } static void ti_intr(xsc) void *xsc; { struct ti_softc *sc; struct ifnet *ifp; sc = xsc; TI_LOCK(sc); ifp = &sc->arpcom.ac_if; #ifdef notdef /* Avoid this for now -- checking this register is expensive. */ /* Make sure this is really our interrupt. */ if (!(CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_INTSTATE)) { TI_UNLOCK(sc); return; } #endif /* Ack interrupt and stop others from occuring. */ CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); if (ifp->if_flags & IFF_RUNNING) { /* Check RX return ring producer/consumer */ ti_rxeof(sc); /* Check TX ring producer/consumer */ ti_txeof(sc); } ti_handle_events(sc); /* Re-enable interrupts. */ CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL) ti_start(ifp); TI_UNLOCK(sc); return; } static void ti_stats_update(sc) struct ti_softc *sc; { struct ifnet *ifp; ifp = &sc->arpcom.ac_if; ifp->if_collisions += (sc->ti_rdata->ti_info.ti_stats.dot3StatsSingleCollisionFrames + sc->ti_rdata->ti_info.ti_stats.dot3StatsMultipleCollisionFrames + sc->ti_rdata->ti_info.ti_stats.dot3StatsExcessiveCollisions + sc->ti_rdata->ti_info.ti_stats.dot3StatsLateCollisions) - ifp->if_collisions; return; } /* * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data * pointers to descriptors. */ static int ti_encap(sc, m_head, txidx) struct ti_softc *sc; struct mbuf *m_head; u_int32_t *txidx; { struct ti_tx_desc *f = NULL; struct mbuf *m; u_int32_t frag, cur, cnt = 0; u_int16_t csum_flags = 0; #if NVLAN > 0 struct ifvlan *ifv = NULL; if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) && m_head->m_pkthdr.rcvif != NULL && m_head->m_pkthdr.rcvif->if_type == IFT_8021_VLAN) ifv = m_head->m_pkthdr.rcvif->if_softc; #endif m = m_head; cur = frag = *txidx; if (m_head->m_pkthdr.csum_flags) { if (m_head->m_pkthdr.csum_flags & CSUM_IP) csum_flags |= TI_BDFLAG_IP_CKSUM; if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) csum_flags |= TI_BDFLAG_TCP_UDP_CKSUM; if (m_head->m_flags & M_LASTFRAG) csum_flags |= TI_BDFLAG_IP_FRAG_END; else if (m_head->m_flags & M_FRAG) csum_flags |= TI_BDFLAG_IP_FRAG; } /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ for (m = m_head; m != NULL; m = m->m_next) { if (m->m_len != 0) { if (sc->ti_hwrev == TI_HWREV_TIGON) { if (frag > 383) CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE + 6144); else if (frag > 255) CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE + 4096); else if (frag > 127) CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE + 2048); else CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE); f = &sc->ti_rdata->ti_tx_ring_nic[frag % 128]; } else f = &sc->ti_rdata->ti_tx_ring[frag]; if (sc->ti_cdata.ti_tx_chain[frag] != NULL) break; TI_HOSTADDR(f->ti_addr) = vtophys(mtod(m, vm_offset_t)); f->ti_len = m->m_len; f->ti_flags = csum_flags; #if NVLAN > 0 if (ifv != NULL) { f->ti_flags |= TI_BDFLAG_VLAN_TAG; f->ti_vlan_tag = ifv->ifv_tag; } else { f->ti_vlan_tag = 0; } #endif /* * Sanity check: avoid coming within 16 descriptors * of the end of the ring. */ if ((TI_TX_RING_CNT - (sc->ti_txcnt + cnt)) < 16) return(ENOBUFS); cur = frag; TI_INC(frag, TI_TX_RING_CNT); cnt++; } } if (m != NULL) return(ENOBUFS); if (frag == sc->ti_tx_saved_considx) return(ENOBUFS); if (sc->ti_hwrev == TI_HWREV_TIGON) sc->ti_rdata->ti_tx_ring_nic[cur % 128].ti_flags |= TI_BDFLAG_END; else sc->ti_rdata->ti_tx_ring[cur].ti_flags |= TI_BDFLAG_END; sc->ti_cdata.ti_tx_chain[cur] = m_head; sc->ti_txcnt += cnt; *txidx = frag; return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit descriptors. */ static void ti_start(ifp) struct ifnet *ifp; { struct ti_softc *sc; struct mbuf *m_head = NULL; u_int32_t prodidx = 0; sc = ifp->if_softc; TI_LOCK(sc); prodidx = CSR_READ_4(sc, TI_MB_SENDPROD_IDX); while(sc->ti_cdata.ti_tx_chain[prodidx] == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * XXX * safety overkill. If this is a fragmented packet chain * with delayed TCP/UDP checksums, then only encapsulate * it if we have enough descriptors to handle the entire * chain at once. * (paranoia -- may not actually be needed) */ if (m_head->m_flags & M_FIRSTFRAG && m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { if ((TI_TX_RING_CNT - sc->ti_txcnt) < m_head->m_pkthdr.csum_data + 16) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } } /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (ti_encap(sc, m_head, &prodidx)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if (ifp->if_bpf) bpf_mtap(ifp, m_head); } /* Transmit */ CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, prodidx); /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; TI_UNLOCK(sc); return; } static void ti_init(xsc) void *xsc; { struct ti_softc *sc = xsc; /* Cancel pending I/O and flush buffers. */ ti_stop(sc); TI_LOCK(sc); /* Init the gen info block, ring control blocks and firmware. */ if (ti_gibinit(sc)) { printf("ti%d: initialization failure\n", sc->ti_unit); TI_UNLOCK(sc); return; } TI_UNLOCK(sc); return; } static void ti_init2(sc) struct ti_softc *sc; { struct ti_cmd_desc cmd; struct ifnet *ifp; u_int16_t *m; struct ifmedia *ifm; int tmp; ifp = &sc->arpcom.ac_if; /* Specify MTU and interface index. */ CSR_WRITE_4(sc, TI_GCR_IFINDEX, ifp->if_unit); CSR_WRITE_4(sc, TI_GCR_IFMTU, ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN); TI_DO_CMD(TI_CMD_UPDATE_GENCOM, 0, 0); /* Load our MAC address. */ m = (u_int16_t *)&sc->arpcom.ac_enaddr[0]; CSR_WRITE_4(sc, TI_GCR_PAR0, htons(m[0])); CSR_WRITE_4(sc, TI_GCR_PAR1, (htons(m[1]) << 16) | htons(m[2])); TI_DO_CMD(TI_CMD_SET_MAC_ADDR, 0, 0); /* Enable or disable promiscuous mode as needed. */ if (ifp->if_flags & IFF_PROMISC) { TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_ENB, 0); } else { TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_DIS, 0); } /* Program multicast filter. */ ti_setmulti(sc); /* * If this is a Tigon 1, we should tell the * firmware to use software packet filtering. */ if (sc->ti_hwrev == TI_HWREV_TIGON) { TI_DO_CMD(TI_CMD_FDR_FILTERING, TI_CMD_CODE_FILT_ENB, 0); } /* Init RX ring. */ ti_init_rx_ring_std(sc); /* Init jumbo RX ring. */ if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) ti_init_rx_ring_jumbo(sc); /* * If this is a Tigon 2, we can also configure the * mini ring. */ if (sc->ti_hwrev == TI_HWREV_TIGON_II) ti_init_rx_ring_mini(sc); CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 0); sc->ti_rx_saved_considx = 0; /* Init TX ring. */ ti_init_tx_ring(sc); /* Tell firmware we're alive. */ TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_UP, 0); /* Enable host interrupts. */ CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; /* * Make sure to set media properly. We have to do this * here since we have to issue commands in order to set * the link negotiation and we can't issue commands until * the firmware is running. */ ifm = &sc->ifmedia; tmp = ifm->ifm_media; ifm->ifm_media = ifm->ifm_cur->ifm_media; ti_ifmedia_upd(ifp); ifm->ifm_media = tmp; return; } /* * Set media options. */ static int ti_ifmedia_upd(ifp) struct ifnet *ifp; { struct ti_softc *sc; struct ifmedia *ifm; struct ti_cmd_desc cmd; sc = ifp->if_softc; ifm = &sc->ifmedia; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return(EINVAL); switch(IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB| TI_GLNK_FULL_DUPLEX|TI_GLNK_RX_FLOWCTL_Y| TI_GLNK_AUTONEGENB|TI_GLNK_ENB); CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_100MB|TI_LNK_10MB| TI_LNK_FULL_DUPLEX|TI_LNK_HALF_DUPLEX| TI_LNK_AUTONEGENB|TI_LNK_ENB); TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, TI_CMD_CODE_NEGOTIATE_BOTH, 0); break; case IFM_1000_SX: case IFM_1000_TX: CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB| TI_GLNK_RX_FLOWCTL_Y|TI_GLNK_ENB); CSR_WRITE_4(sc, TI_GCR_LINK, 0); if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { TI_SETBIT(sc, TI_GCR_GLINK, TI_GLNK_FULL_DUPLEX); } TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, TI_CMD_CODE_NEGOTIATE_GIGABIT, 0); break; case IFM_100_FX: case IFM_10_FL: case IFM_100_TX: case IFM_10_T: CSR_WRITE_4(sc, TI_GCR_GLINK, 0); CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_ENB|TI_LNK_PREF); if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_FX || IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_100MB); } else { TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_10MB); } if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_FULL_DUPLEX); } else { TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_HALF_DUPLEX); } TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, TI_CMD_CODE_NEGOTIATE_10_100, 0); break; } return(0); } /* * Report current media status. */ static void ti_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct ti_softc *sc; u_int32_t media = 0; sc = ifp->if_softc; ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) return; ifmr->ifm_status |= IFM_ACTIVE; if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) { media = CSR_READ_4(sc, TI_GCR_GLINK_STAT); if (sc->ti_copper) ifmr->ifm_active |= IFM_1000_TX; else ifmr->ifm_active |= IFM_1000_SX; if (media & TI_GLNK_FULL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } else if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) { media = CSR_READ_4(sc, TI_GCR_LINK_STAT); if (sc->ti_copper) { if (media & TI_LNK_100MB) ifmr->ifm_active |= IFM_100_TX; if (media & TI_LNK_10MB) ifmr->ifm_active |= IFM_10_T; } else { if (media & TI_LNK_100MB) ifmr->ifm_active |= IFM_100_FX; if (media & TI_LNK_10MB) ifmr->ifm_active |= IFM_10_FL; } if (media & TI_LNK_FULL_DUPLEX) ifmr->ifm_active |= IFM_FDX; if (media & TI_LNK_HALF_DUPLEX) ifmr->ifm_active |= IFM_HDX; } return; } static int ti_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct ti_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int error = 0; struct ti_cmd_desc cmd; TI_LOCK(sc); switch(command) { case SIOCSIFADDR: case SIOCGIFADDR: error = ether_ioctl(ifp, command, data); break; case SIOCSIFMTU: if (ifr->ifr_mtu > TI_JUMBO_MTU) error = EINVAL; else { ifp->if_mtu = ifr->ifr_mtu; ti_init(sc); } break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { /* * If only the state of the PROMISC flag changed, * then just use the 'set promisc mode' command * instead of reinitializing the entire NIC. Doing * a full re-init means reloading the firmware and * waiting for it to start up, which may take a * second or two. */ if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->ti_if_flags & IFF_PROMISC)) { TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_ENB, 0); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->ti_if_flags & IFF_PROMISC) { TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_DIS, 0); } else ti_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) { ti_stop(sc); } } sc->ti_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: if (ifp->if_flags & IFF_RUNNING) { ti_setmulti(sc); error = 0; } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); break; default: error = EINVAL; break; } TI_UNLOCK(sc); return(error); } static void ti_watchdog(ifp) struct ifnet *ifp; { struct ti_softc *sc; sc = ifp->if_softc; TI_LOCK(sc); printf("ti%d: watchdog timeout -- resetting\n", sc->ti_unit); ti_stop(sc); ti_init(sc); ifp->if_oerrors++; TI_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void ti_stop(sc) struct ti_softc *sc; { struct ifnet *ifp; struct ti_cmd_desc cmd; TI_LOCK(sc); ifp = &sc->arpcom.ac_if; /* Disable host interrupts. */ CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); /* * Tell firmware we're shutting down. */ TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_DOWN, 0); /* Halt and reinitialize. */ ti_chipinit(sc); ti_mem(sc, 0x2000, 0x100000 - 0x2000, NULL); ti_chipinit(sc); /* Free the RX lists. */ ti_free_rx_ring_std(sc); /* Free jumbo RX list. */ ti_free_rx_ring_jumbo(sc); /* Free mini RX list. */ ti_free_rx_ring_mini(sc); /* Free TX buffers. */ ti_free_tx_ring(sc); sc->ti_ev_prodidx.ti_idx = 0; sc->ti_return_prodidx.ti_idx = 0; sc->ti_tx_considx.ti_idx = 0; sc->ti_tx_saved_considx = TI_TXCONS_UNSET; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); TI_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void ti_shutdown(dev) device_t dev; { struct ti_softc *sc; sc = device_get_softc(dev); TI_LOCK(sc); ti_chipinit(sc); TI_UNLOCK(sc); return; } Index: head/sys/pci/if_tl.c =================================================================== --- head/sys/pci/if_tl.c (revision 71961) +++ head/sys/pci/if_tl.c (revision 71962) @@ -1,2349 +1,2348 @@ /* * Copyright (c) 1997, 1998 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Texas Instruments ThunderLAN driver for FreeBSD 2.2.6 and 3.x. * Supports many Compaq PCI NICs based on the ThunderLAN ethernet controller, * the National Semiconductor DP83840A physical interface and the * Microchip Technology 24Cxx series serial EEPROM. * * Written using the following four documents: * * Texas Instruments ThunderLAN Programmer's Guide (www.ti.com) * National Semiconductor DP83840A data sheet (www.national.com) * Microchip Technology 24C02C data sheet (www.microchip.com) * Micro Linear ML6692 100BaseTX only PHY data sheet (www.microlinear.com) * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * Some notes about the ThunderLAN: * * The ThunderLAN controller is a single chip containing PCI controller * logic, approximately 3K of on-board SRAM, a LAN controller, and media * independent interface (MII) bus. The MII allows the ThunderLAN chip to * control up to 32 different physical interfaces (PHYs). The ThunderLAN * also has a built-in 10baseT PHY, allowing a single ThunderLAN controller * to act as a complete ethernet interface. * * Other PHYs may be attached to the ThunderLAN; the Compaq 10/100 cards * use a National Semiconductor DP83840A PHY that supports 10 or 100Mb/sec * in full or half duplex. Some of the Compaq Deskpro machines use a * Level 1 LXT970 PHY with the same capabilities. Certain Olicom adapters * use a Micro Linear ML6692 100BaseTX only PHY, which can be used in * concert with the ThunderLAN's internal PHY to provide full 10/100 * support. This is cheaper than using a standalone external PHY for both * 10/100 modes and letting the ThunderLAN's internal PHY go to waste. * A serial EEPROM is also attached to the ThunderLAN chip to provide * power-up default register settings and for storing the adapter's * station address. Although not supported by this driver, the ThunderLAN * chip can also be connected to token ring PHYs. * * The ThunderLAN has a set of registers which can be used to issue * commands, acknowledge interrupts, and to manipulate other internal * registers on its DIO bus. The primary registers can be accessed * using either programmed I/O (inb/outb) or via PCI memory mapping, * depending on how the card is configured during the PCI probing * phase. It is even possible to have both PIO and memory mapped * access turned on at the same time. * * Frame reception and transmission with the ThunderLAN chip is done * using frame 'lists.' A list structure looks more or less like this: * * struct tl_frag { * u_int32_t fragment_address; * u_int32_t fragment_size; * }; * struct tl_list { * u_int32_t forward_pointer; * u_int16_t cstat; * u_int16_t frame_size; * struct tl_frag fragments[10]; * }; * * The forward pointer in the list header can be either a 0 or the address * of another list, which allows several lists to be linked together. Each * list contains up to 10 fragment descriptors. This means the chip allows * ethernet frames to be broken up into up to 10 chunks for transfer to * and from the SRAM. Note that the forward pointer and fragment buffer * addresses are physical memory addresses, not virtual. Note also that * a single ethernet frame can not span lists: if the host wants to * transmit a frame and the frame data is split up over more than 10 * buffers, the frame has to collapsed before it can be transmitted. * * To receive frames, the driver sets up a number of lists and populates * the fragment descriptors, then it sends an RX GO command to the chip. * When a frame is received, the chip will DMA it into the memory regions * specified by the fragment descriptors and then trigger an RX 'end of * frame interrupt' when done. The driver may choose to use only one * fragment per list; this may result is slighltly less efficient use * of memory in exchange for improving performance. * * To transmit frames, the driver again sets up lists and fragment * descriptors, only this time the buffers contain frame data that * is to be DMA'ed into the chip instead of out of it. Once the chip * has transfered the data into its on-board SRAM, it will trigger a * TX 'end of frame' interrupt. It will also generate an 'end of channel' * interrupt when it reaches the end of the list. */ /* * Some notes about this driver: * * The ThunderLAN chip provides a couple of different ways to organize * reception, transmission and interrupt handling. The simplest approach * is to use one list each for transmission and reception. In this mode, * the ThunderLAN will generate two interrupts for every received frame * (one RX EOF and one RX EOC) and two for each transmitted frame (one * TX EOF and one TX EOC). This may make the driver simpler but it hurts * performance to have to handle so many interrupts. * * Initially I wanted to create a circular list of receive buffers so * that the ThunderLAN chip would think there was an infinitely long * receive channel and never deliver an RXEOC interrupt. However this * doesn't work correctly under heavy load: while the manual says the * chip will trigger an RXEOF interrupt each time a frame is copied into * memory, you can't count on the chip waiting around for you to acknowledge * the interrupt before it starts trying to DMA the next frame. The result * is that the chip might traverse the entire circular list and then wrap * around before you have a chance to do anything about it. Consequently, * the receive list is terminated (with a 0 in the forward pointer in the * last element). Each time an RXEOF interrupt arrives, the used list * is shifted to the end of the list. This gives the appearance of an * infinitely large RX chain so long as the driver doesn't fall behind * the chip and allow all of the lists to be filled up. * * If all the lists are filled, the adapter will deliver an RX 'end of * channel' interrupt when it hits the 0 forward pointer at the end of * the chain. The RXEOC handler then cleans out the RX chain and resets * the list head pointer in the ch_parm register and restarts the receiver. * * For frame transmission, it is possible to program the ThunderLAN's * transmit interrupt threshold so that the chip can acknowledge multiple * lists with only a single TX EOF interrupt. This allows the driver to * queue several frames in one shot, and only have to handle a total * two interrupts (one TX EOF and one TX EOC) no matter how many frames * are transmitted. Frame transmission is done directly out of the * mbufs passed to the tl_start() routine via the interface send queue. * The driver simply sets up the fragment descriptors in the transmit * lists to point to the mbuf data regions and sends a TX GO command. * * Note that since the RX and TX lists themselves are always used * only by the driver, the are malloc()ed once at driver initialization * time and never free()ed. * * Also, in order to remain as platform independent as possible, this * driver uses memory mapped register access to manipulate the card * as opposed to programmed I/O. This avoids the use of the inb/outb * (and related) instructions which are specific to the i386 platform. * * Using these techniques, this driver achieves very high performance * by minimizing the amount of interrupts generated during large * transfers and by completely avoiding buffer copies. Frame transfer * to and from the ThunderLAN chip is performed entirely by the chip * itself thereby reducing the load on the host CPU. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include #include #include /* * Default to using PIO register access mode to pacify certain * laptop docking stations with built-in ThunderLAN chips that * don't seem to handle memory mapped mode properly. */ #define TL_USEIOSPACE #include MODULE_DEPEND(tl, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #if !defined(lint) static const char rcsid[] = "$FreeBSD$"; #endif /* * Various supported device vendors/types and their names. */ static struct tl_type tl_devs[] = { { TI_VENDORID, TI_DEVICEID_THUNDERLAN, "Texas Instruments ThunderLAN" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10, "Compaq Netelligent 10" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100, "Compaq Netelligent 10/100" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_PROLIANT, "Compaq Netelligent 10/100 Proliant" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_DUAL, "Compaq Netelligent 10/100 Dual Port" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_INTEGRATED, "Compaq NetFlex-3/P Integrated" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P, "Compaq NetFlex-3/P" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_BNC, "Compaq NetFlex 3/P w/ BNC" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_EMBEDDED, "Compaq Netelligent 10/100 TX Embedded UTP" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_T2_UTP_COAX, "Compaq Netelligent 10 T/2 PCI UTP/Coax" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_TX_UTP, "Compaq Netelligent 10/100 TX UTP" }, { OLICOM_VENDORID, OLICOM_DEVICEID_OC2183, "Olicom OC-2183/2185" }, { OLICOM_VENDORID, OLICOM_DEVICEID_OC2325, "Olicom OC-2325" }, { OLICOM_VENDORID, OLICOM_DEVICEID_OC2326, "Olicom OC-2326 10/100 TX UTP" }, { 0, 0, NULL } }; static int tl_probe __P((device_t)); static int tl_attach __P((device_t)); static int tl_detach __P((device_t)); static int tl_intvec_rxeoc __P((void *, u_int32_t)); static int tl_intvec_txeoc __P((void *, u_int32_t)); static int tl_intvec_txeof __P((void *, u_int32_t)); static int tl_intvec_rxeof __P((void *, u_int32_t)); static int tl_intvec_adchk __P((void *, u_int32_t)); static int tl_intvec_netsts __P((void *, u_int32_t)); static int tl_newbuf __P((struct tl_softc *, struct tl_chain_onefrag *)); static void tl_stats_update __P((void *)); static int tl_encap __P((struct tl_softc *, struct tl_chain *, struct mbuf *)); static void tl_intr __P((void *)); static void tl_start __P((struct ifnet *)); static int tl_ioctl __P((struct ifnet *, u_long, caddr_t)); static void tl_init __P((void *)); static void tl_stop __P((struct tl_softc *)); static void tl_watchdog __P((struct ifnet *)); static void tl_shutdown __P((device_t)); static int tl_ifmedia_upd __P((struct ifnet *)); static void tl_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); static u_int8_t tl_eeprom_putbyte __P((struct tl_softc *, int)); static u_int8_t tl_eeprom_getbyte __P((struct tl_softc *, int, u_int8_t *)); static int tl_read_eeprom __P((struct tl_softc *, caddr_t, int, int)); static void tl_mii_sync __P((struct tl_softc *)); static void tl_mii_send __P((struct tl_softc *, u_int32_t, int)); static int tl_mii_readreg __P((struct tl_softc *, struct tl_mii_frame *)); static int tl_mii_writereg __P((struct tl_softc *, struct tl_mii_frame *)); static int tl_miibus_readreg __P((device_t, int, int)); static int tl_miibus_writereg __P((device_t, int, int, int)); static void tl_miibus_statchg __P((device_t)); static void tl_setmode __P((struct tl_softc *, int)); static int tl_calchash __P((caddr_t)); static void tl_setmulti __P((struct tl_softc *)); static void tl_setfilt __P((struct tl_softc *, caddr_t, int)); static void tl_softreset __P((struct tl_softc *, int)); static void tl_hardreset __P((device_t)); static int tl_list_rx_init __P((struct tl_softc *)); static int tl_list_tx_init __P((struct tl_softc *)); static u_int8_t tl_dio_read8 __P((struct tl_softc *, int)); static u_int16_t tl_dio_read16 __P((struct tl_softc *, int)); static u_int32_t tl_dio_read32 __P((struct tl_softc *, int)); static void tl_dio_write8 __P((struct tl_softc *, int, int)); static void tl_dio_write16 __P((struct tl_softc *, int, int)); static void tl_dio_write32 __P((struct tl_softc *, int, int)); static void tl_dio_setbit __P((struct tl_softc *, int, int)); static void tl_dio_clrbit __P((struct tl_softc *, int, int)); static void tl_dio_setbit16 __P((struct tl_softc *, int, int)); static void tl_dio_clrbit16 __P((struct tl_softc *, int, int)); #ifdef TL_USEIOSPACE #define TL_RES SYS_RES_IOPORT #define TL_RID TL_PCI_LOIO #else #define TL_RES SYS_RES_MEMORY #define TL_RID TL_PCI_LOMEM #endif static device_method_t tl_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tl_probe), DEVMETHOD(device_attach, tl_attach), DEVMETHOD(device_detach, tl_detach), DEVMETHOD(device_shutdown, tl_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, tl_miibus_readreg), DEVMETHOD(miibus_writereg, tl_miibus_writereg), DEVMETHOD(miibus_statchg, tl_miibus_statchg), { 0, 0 } }; static driver_t tl_driver = { "tl", tl_methods, sizeof(struct tl_softc) }; static devclass_t tl_devclass; DRIVER_MODULE(if_tl, pci, tl_driver, tl_devclass, 0, 0); DRIVER_MODULE(miibus, tl, miibus_driver, miibus_devclass, 0, 0); static u_int8_t tl_dio_read8(sc, reg) struct tl_softc *sc; int reg; { CSR_WRITE_2(sc, TL_DIO_ADDR, reg); return(CSR_READ_1(sc, TL_DIO_DATA + (reg & 3))); } static u_int16_t tl_dio_read16(sc, reg) struct tl_softc *sc; int reg; { CSR_WRITE_2(sc, TL_DIO_ADDR, reg); return(CSR_READ_2(sc, TL_DIO_DATA + (reg & 3))); } static u_int32_t tl_dio_read32(sc, reg) struct tl_softc *sc; int reg; { CSR_WRITE_2(sc, TL_DIO_ADDR, reg); return(CSR_READ_4(sc, TL_DIO_DATA + (reg & 3))); } static void tl_dio_write8(sc, reg, val) struct tl_softc *sc; int reg; int val; { CSR_WRITE_2(sc, TL_DIO_ADDR, reg); CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), val); return; } static void tl_dio_write16(sc, reg, val) struct tl_softc *sc; int reg; int val; { CSR_WRITE_2(sc, TL_DIO_ADDR, reg); CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), val); return; } static void tl_dio_write32(sc, reg, val) struct tl_softc *sc; int reg; int val; { CSR_WRITE_2(sc, TL_DIO_ADDR, reg); CSR_WRITE_4(sc, TL_DIO_DATA + (reg & 3), val); return; } static void tl_dio_setbit(sc, reg, bit) struct tl_softc *sc; int reg; int bit; { u_int8_t f; CSR_WRITE_2(sc, TL_DIO_ADDR, reg); f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)); f |= bit; CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f); return; } static void tl_dio_clrbit(sc, reg, bit) struct tl_softc *sc; int reg; int bit; { u_int8_t f; CSR_WRITE_2(sc, TL_DIO_ADDR, reg); f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)); f &= ~bit; CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f); return; } static void tl_dio_setbit16(sc, reg, bit) struct tl_softc *sc; int reg; int bit; { u_int16_t f; CSR_WRITE_2(sc, TL_DIO_ADDR, reg); f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)); f |= bit; CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f); return; } static void tl_dio_clrbit16(sc, reg, bit) struct tl_softc *sc; int reg; int bit; { u_int16_t f; CSR_WRITE_2(sc, TL_DIO_ADDR, reg); f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)); f &= ~bit; CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f); return; } /* * Send an instruction or address to the EEPROM, check for ACK. */ static u_int8_t tl_eeprom_putbyte(sc, byte) struct tl_softc *sc; int byte; { register int i, ack = 0; /* * Make sure we're in TX mode. */ tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ETXEN); /* * Feed in each bit and stobe the clock. */ for (i = 0x80; i; i >>= 1) { if (byte & i) { tl_dio_setbit(sc, TL_NETSIO, TL_SIO_EDATA); } else { tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_EDATA); } DELAY(1); tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); DELAY(1); tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); } /* * Turn off TX mode. */ tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN); /* * Check for ack. */ tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA; tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); return(ack); } /* * Read a byte of data stored in the EEPROM at address 'addr.' */ static u_int8_t tl_eeprom_getbyte(sc, addr, dest) struct tl_softc *sc; int addr; u_int8_t *dest; { register int i; u_int8_t byte = 0; tl_dio_write8(sc, TL_NETSIO, 0); EEPROM_START; /* * Send write control code to EEPROM. */ if (tl_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) { printf("tl%d: failed to send write command, status: %x\n", sc->tl_unit, tl_dio_read8(sc, TL_NETSIO)); return(1); } /* * Send address of byte we want to read. */ if (tl_eeprom_putbyte(sc, addr)) { printf("tl%d: failed to send address, status: %x\n", sc->tl_unit, tl_dio_read8(sc, TL_NETSIO)); return(1); } EEPROM_STOP; EEPROM_START; /* * Send read control code to EEPROM. */ if (tl_eeprom_putbyte(sc, EEPROM_CTL_READ)) { printf("tl%d: failed to send write command, status: %x\n", sc->tl_unit, tl_dio_read8(sc, TL_NETSIO)); return(1); } /* * Start reading bits from EEPROM. */ tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN); for (i = 0x80; i; i >>= 1) { tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); DELAY(1); if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA) byte |= i; tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); DELAY(1); } EEPROM_STOP; /* * No ACK generated for read, so just return byte. */ *dest = byte; return(0); } /* * Read a sequence of bytes from the EEPROM. */ static int tl_read_eeprom(sc, dest, off, cnt) struct tl_softc *sc; caddr_t dest; int off; int cnt; { int err = 0, i; u_int8_t byte = 0; for (i = 0; i < cnt; i++) { err = tl_eeprom_getbyte(sc, off + i, &byte); if (err) break; *(dest + i) = byte; } return(err ? 1 : 0); } static void tl_mii_sync(sc) struct tl_softc *sc; { register int i; tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN); for (i = 0; i < 32; i++) { tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); } return; } static void tl_mii_send(sc, bits, cnt) struct tl_softc *sc; u_int32_t bits; int cnt; { int i; for (i = (0x1 << (cnt - 1)); i; i >>= 1) { tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); if (bits & i) { tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MDATA); } else { tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MDATA); } tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); } } static int tl_mii_readreg(sc, frame) struct tl_softc *sc; struct tl_mii_frame *frame; { int i, ack; int minten = 0; TL_LOCK(sc); tl_mii_sync(sc); /* * Set up frame for RX. */ frame->mii_stdelim = TL_MII_STARTDELIM; frame->mii_opcode = TL_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; /* * Turn off MII interrupt by forcing MINTEN low. */ minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN; if (minten) { tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN); } /* * Turn on data xmit. */ tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN); /* * Send command/address info. */ tl_mii_send(sc, frame->mii_stdelim, 2); tl_mii_send(sc, frame->mii_opcode, 2); tl_mii_send(sc, frame->mii_phyaddr, 5); tl_mii_send(sc, frame->mii_regaddr, 5); /* * Turn off xmit. */ tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN); /* Idle bit */ tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); /* Check for ack */ tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA; /* Complete the cycle */ tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHYs in sync. */ if (ack) { for(i = 0; i < 16; i++) { tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); } goto fail; } for (i = 0x8000; i; i >>= 1) { tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); if (!ack) { if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA) frame->mii_data |= i; } tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); } fail: tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); /* Reenable interrupts */ if (minten) { tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN); } TL_UNLOCK(sc); if (ack) return(1); return(0); } static int tl_mii_writereg(sc, frame) struct tl_softc *sc; struct tl_mii_frame *frame; { int minten; TL_LOCK(sc); tl_mii_sync(sc); /* * Set up frame for TX. */ frame->mii_stdelim = TL_MII_STARTDELIM; frame->mii_opcode = TL_MII_WRITEOP; frame->mii_turnaround = TL_MII_TURNAROUND; /* * Turn off MII interrupt by forcing MINTEN low. */ minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN; if (minten) { tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN); } /* * Turn on data output. */ tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN); tl_mii_send(sc, frame->mii_stdelim, 2); tl_mii_send(sc, frame->mii_opcode, 2); tl_mii_send(sc, frame->mii_phyaddr, 5); tl_mii_send(sc, frame->mii_regaddr, 5); tl_mii_send(sc, frame->mii_turnaround, 2); tl_mii_send(sc, frame->mii_data, 16); tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); /* * Turn off xmit. */ tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN); /* Reenable interrupts */ if (minten) tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN); TL_UNLOCK(sc); return(0); } static int tl_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct tl_softc *sc; struct tl_mii_frame frame; sc = device_get_softc(dev); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; tl_mii_readreg(sc, &frame); return(frame.mii_data); } static int tl_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct tl_softc *sc; struct tl_mii_frame frame; sc = device_get_softc(dev); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = data; tl_mii_writereg(sc, &frame); return(0); } static void tl_miibus_statchg(dev) device_t dev; { struct tl_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); TL_LOCK(sc); mii = device_get_softc(sc->tl_miibus); if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX); } else { tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX); } TL_UNLOCK(sc); return; } /* * Set modes for bitrate devices. */ static void tl_setmode(sc, media) struct tl_softc *sc; int media; { if (IFM_SUBTYPE(media) == IFM_10_5) tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD1); if (IFM_SUBTYPE(media) == IFM_10_T) { tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD1); if ((media & IFM_GMASK) == IFM_FDX) { tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD3); tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX); } else { tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD3); tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX); } } return; } /* * Calculate the hash of a MAC address for programming the multicast hash * table. This hash is simply the address split into 6-bit chunks * XOR'd, e.g. * byte: 000000|00 1111|1111 22|222222|333333|33 4444|4444 55|555555 * bit: 765432|10 7654|3210 76|543210|765432|10 7654|3210 76|543210 * Bytes 0-2 and 3-5 are symmetrical, so are folded together. Then * the folded 24-bit value is split into 6-bit portions and XOR'd. */ static int tl_calchash(addr) caddr_t addr; { int t; t = (addr[0] ^ addr[3]) << 16 | (addr[1] ^ addr[4]) << 8 | (addr[2] ^ addr[5]); return ((t >> 18) ^ (t >> 12) ^ (t >> 6) ^ t) & 0x3f; } /* * The ThunderLAN has a perfect MAC address filter in addition to * the multicast hash filter. The perfect filter can be programmed * with up to four MAC addresses. The first one is always used to * hold the station address, which leaves us free to use the other * three for multicast addresses. */ static void tl_setfilt(sc, addr, slot) struct tl_softc *sc; caddr_t addr; int slot; { int i; u_int16_t regaddr; regaddr = TL_AREG0_B5 + (slot * ETHER_ADDR_LEN); for (i = 0; i < ETHER_ADDR_LEN; i++) tl_dio_write8(sc, regaddr + i, *(addr + i)); return; } /* * XXX In FreeBSD 3.0, multicast addresses are managed using a doubly * linked list. This is fine, except addresses are added from the head * end of the list. We want to arrange for 224.0.0.1 (the "all hosts") * group to always be in the perfect filter, but as more groups are added, * the 224.0.0.1 entry (which is always added first) gets pushed down * the list and ends up at the tail. So after 3 or 4 multicast groups * are added, the all-hosts entry gets pushed out of the perfect filter * and into the hash table. * * Because the multicast list is a doubly-linked list as opposed to a * circular queue, we don't have the ability to just grab the tail of * the list and traverse it backwards. Instead, we have to traverse * the list once to find the tail, then traverse it again backwards to * update the multicast filter. */ static void tl_setmulti(sc) struct tl_softc *sc; { struct ifnet *ifp; u_int32_t hashes[2] = { 0, 0 }; int h, i; struct ifmultiaddr *ifma; u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 }; ifp = &sc->arpcom.ac_if; /* First, zot all the existing filters. */ for (i = 1; i < 4; i++) tl_setfilt(sc, (caddr_t)&dummy, i); tl_dio_write32(sc, TL_HASH1, 0); tl_dio_write32(sc, TL_HASH2, 0); /* Now program new ones. */ if (ifp->if_flags & IFF_ALLMULTI) { hashes[0] = 0xFFFFFFFF; hashes[1] = 0xFFFFFFFF; } else { i = 1; /* First find the tail of the list. */ - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_link.le_next == NULL) break; } /* Now traverse the list backwards. */ for (; ifma != NULL && ifma != (void *)&ifp->if_multiaddrs; ifma = (struct ifmultiaddr *)ifma->ifma_link.le_prev) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; /* * Program the first three multicast groups * into the perfect filter. For all others, * use the hash table. */ if (i < 4) { tl_setfilt(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i); i++; continue; } h = tl_calchash( LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } } tl_dio_write32(sc, TL_HASH1, hashes[0]); tl_dio_write32(sc, TL_HASH2, hashes[1]); return; } /* * This routine is recommended by the ThunderLAN manual to insure that * the internal PHY is powered up correctly. It also recommends a one * second pause at the end to 'wait for the clocks to start' but in my * experience this isn't necessary. */ static void tl_hardreset(dev) device_t dev; { struct tl_softc *sc; int i; u_int16_t flags; sc = device_get_softc(dev); tl_mii_sync(sc); flags = BMCR_LOOP|BMCR_ISO|BMCR_PDOWN; for (i = 0; i < MII_NPHY; i++) tl_miibus_writereg(dev, i, MII_BMCR, flags); tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_ISO); DELAY(50000); tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_LOOP|BMCR_ISO); tl_mii_sync(sc); while(tl_miibus_readreg(dev, 31, MII_BMCR) & BMCR_RESET); DELAY(50000); return; } static void tl_softreset(sc, internal) struct tl_softc *sc; int internal; { u_int32_t cmd, dummy, i; /* Assert the adapter reset bit. */ CMD_SET(sc, TL_CMD_ADRST); /* Turn off interrupts */ CMD_SET(sc, TL_CMD_INTSOFF); /* First, clear the stats registers. */ for (i = 0; i < 5; i++) dummy = tl_dio_read32(sc, TL_TXGOODFRAMES); /* Clear Areg and Hash registers */ for (i = 0; i < 8; i++) tl_dio_write32(sc, TL_AREG0_B5, 0x00000000); /* * Set up Netconfig register. Enable one channel and * one fragment mode. */ tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_ONECHAN|TL_CFG_ONEFRAG); if (internal && !sc->tl_bitrate) { tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN); } else { tl_dio_clrbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN); } /* Handle cards with bitrate devices. */ if (sc->tl_bitrate) tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_BITRATE); /* * Load adapter irq pacing timer and tx threshold. * We make the transmit threshold 1 initially but we may * change that later. */ cmd = CSR_READ_4(sc, TL_HOSTCMD); cmd |= TL_CMD_NES; cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK); CMD_PUT(sc, cmd | (TL_CMD_LDTHR | TX_THR)); CMD_PUT(sc, cmd | (TL_CMD_LDTMR | 0x00000003)); /* Unreset the MII */ tl_dio_setbit(sc, TL_NETSIO, TL_SIO_NMRST); /* Take the adapter out of reset */ tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NRESET|TL_CMD_NWRAP); /* Wait for things to settle down a little. */ DELAY(500); return; } /* * Probe for a ThunderLAN chip. Check the PCI vendor and device IDs * against our list and return its name if we find a match. */ static int tl_probe(dev) device_t dev; { struct tl_type *t; t = tl_devs; while(t->tl_name != NULL) { if ((pci_get_vendor(dev) == t->tl_vid) && (pci_get_device(dev) == t->tl_did)) { device_set_desc(dev, t->tl_name); return(0); } t++; } return(ENXIO); } static int tl_attach(dev) device_t dev; { int i; u_int32_t command; u_int16_t did, vid; struct tl_type *t; struct ifnet *ifp; struct tl_softc *sc; int unit, error = 0, rid; vid = pci_get_vendor(dev); did = pci_get_device(dev); sc = device_get_softc(dev); unit = device_get_unit(dev); bzero(sc, sizeof(struct tl_softc)); t = tl_devs; while(t->tl_name != NULL) { if (vid == t->tl_vid && did == t->tl_did) break; t++; } if (t->tl_name == NULL) { printf("tl%d: unknown device!?\n", unit); goto fail; } mtx_init(&sc->tl_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE); TL_LOCK(sc); /* * Map control/status registers. */ command = pci_read_config(dev, PCIR_COMMAND, 4); command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, command, 4); command = pci_read_config(dev, PCIR_COMMAND, 4); #ifdef TL_USEIOSPACE if (!(command & PCIM_CMD_PORTEN)) { printf("tl%d: failed to enable I/O ports!\n", unit); error = ENXIO; goto fail; } rid = TL_PCI_LOIO; sc->tl_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, 1, RF_ACTIVE); /* * Some cards have the I/O and memory mapped address registers * reversed. Try both combinations before giving up. */ if (sc->tl_res == NULL) { rid = TL_PCI_LOMEM; sc->tl_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0, ~0, 1, RF_ACTIVE); } #else if (!(command & PCIM_CMD_MEMEN)) { printf("tl%d: failed to enable memory mapping!\n", unit); error = ENXIO; goto fail; } rid = TL_PCI_LOMEM; sc->tl_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); if (sc->tl_res == NULL) { rid = TL_PCI_LOIO; sc->tl_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); } #endif if (sc->tl_res == NULL) { printf("tl%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->tl_btag = rman_get_bustag(sc->tl_res); sc->tl_bhandle = rman_get_bushandle(sc->tl_res); #ifdef notdef /* * The ThunderLAN manual suggests jacking the PCI latency * timer all the way up to its maximum value. I'm not sure * if this is really necessary, but what the manual wants, * the manual gets. */ command = pci_read_config(dev, TL_PCI_LATENCY_TIMER, 4); command |= 0x0000FF00; pci_write_config(dev, TL_PCI_LATENCY_TIMER, command, 4); #endif /* Allocate interrupt */ rid = 0; sc->tl_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->tl_irq == NULL) { bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res); printf("tl%d: couldn't map interrupt\n", unit); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->tl_irq, INTR_TYPE_NET, tl_intr, sc, &sc->tl_intrhand); if (error) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq); bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res); printf("tl%d: couldn't set up irq\n", unit); goto fail; } /* * Now allocate memory for the TX and RX lists. */ sc->tl_ldata = contigmalloc(sizeof(struct tl_list_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->tl_ldata == NULL) { bus_teardown_intr(dev, sc->tl_irq, sc->tl_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq); bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res); printf("tl%d: no memory for list buffers!\n", unit); error = ENXIO; goto fail; } bzero(sc->tl_ldata, sizeof(struct tl_list_data)); sc->tl_unit = unit; sc->tl_dinfo = t; if (t->tl_vid == COMPAQ_VENDORID || t->tl_vid == TI_VENDORID) sc->tl_eeaddr = TL_EEPROM_EADDR; if (t->tl_vid == OLICOM_VENDORID) sc->tl_eeaddr = TL_EEPROM_EADDR_OC; /* Reset the adapter. */ tl_softreset(sc, 1); tl_hardreset(dev); tl_softreset(sc, 1); /* * Get station address from the EEPROM. */ if (tl_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, sc->tl_eeaddr, ETHER_ADDR_LEN)) { bus_teardown_intr(dev, sc->tl_irq, sc->tl_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq); bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res); contigfree(sc->tl_ldata, sizeof(struct tl_list_data), M_DEVBUF); printf("tl%d: failed to read station address\n", unit); error = ENXIO; goto fail; } /* * XXX Olicom, in its desire to be different from the * rest of the world, has done strange things with the * encoding of the station address in the EEPROM. First * of all, they store the address at offset 0xF8 rather * than at 0x83 like the ThunderLAN manual suggests. * Second, they store the address in three 16-bit words in * network byte order, as opposed to storing it sequentially * like all the other ThunderLAN cards. In order to get * the station address in a form that matches what the Olicom * diagnostic utility specifies, we have to byte-swap each * word. To make things even more confusing, neither 00:00:28 * nor 00:00:24 appear in the IEEE OUI database. */ if (sc->tl_dinfo->tl_vid == OLICOM_VENDORID) { for (i = 0; i < ETHER_ADDR_LEN; i += 2) { u_int16_t *p; p = (u_int16_t *)&sc->arpcom.ac_enaddr[i]; *p = ntohs(*p); } } /* * A ThunderLAN chip was detected. Inform the world. */ printf("tl%d: Ethernet address: %6D\n", unit, sc->arpcom.ac_enaddr, ":"); ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; ifp->if_unit = sc->tl_unit; ifp->if_name = "tl"; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = tl_ioctl; ifp->if_output = ether_output; ifp->if_start = tl_start; ifp->if_watchdog = tl_watchdog; ifp->if_init = tl_init; ifp->if_mtu = ETHERMTU; ifp->if_snd.ifq_maxlen = TL_TX_LIST_CNT - 1; callout_handle_init(&sc->tl_stat_ch); /* Reset the adapter again. */ tl_softreset(sc, 1); tl_hardreset(dev); tl_softreset(sc, 1); /* * Do MII setup. If no PHYs are found, then this is a * bitrate ThunderLAN chip that only supports 10baseT * and AUI/BNC. */ if (mii_phy_probe(dev, &sc->tl_miibus, tl_ifmedia_upd, tl_ifmedia_sts)) { struct ifmedia *ifm; sc->tl_bitrate = 1; ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL); ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_10_T); /* Reset again, this time setting bitrate mode. */ tl_softreset(sc, 1); ifm = &sc->ifmedia; ifm->ifm_media = ifm->ifm_cur->ifm_media; tl_ifmedia_upd(ifp); } /* * Call MI attach routine. */ ether_ifattach(ifp, ETHER_BPF_SUPPORTED); TL_UNLOCK(sc); return(0); fail: TL_UNLOCK(sc); mtx_destroy(&sc->tl_mtx); return(error); } static int tl_detach(dev) device_t dev; { struct tl_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); TL_LOCK(sc); ifp = &sc->arpcom.ac_if; tl_stop(sc); ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); bus_generic_detach(dev); device_delete_child(dev, sc->tl_miibus); contigfree(sc->tl_ldata, sizeof(struct tl_list_data), M_DEVBUF); if (sc->tl_bitrate) ifmedia_removeall(&sc->ifmedia); bus_teardown_intr(dev, sc->tl_irq, sc->tl_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq); bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res); TL_UNLOCK(sc); mtx_destroy(&sc->tl_mtx); return(0); } /* * Initialize the transmit lists. */ static int tl_list_tx_init(sc) struct tl_softc *sc; { struct tl_chain_data *cd; struct tl_list_data *ld; int i; cd = &sc->tl_cdata; ld = sc->tl_ldata; for (i = 0; i < TL_TX_LIST_CNT; i++) { cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i]; if (i == (TL_TX_LIST_CNT - 1)) cd->tl_tx_chain[i].tl_next = NULL; else cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1]; } cd->tl_tx_free = &cd->tl_tx_chain[0]; cd->tl_tx_tail = cd->tl_tx_head = NULL; sc->tl_txeoc = 1; return(0); } /* * Initialize the RX lists and allocate mbufs for them. */ static int tl_list_rx_init(sc) struct tl_softc *sc; { struct tl_chain_data *cd; struct tl_list_data *ld; int i; cd = &sc->tl_cdata; ld = sc->tl_ldata; for (i = 0; i < TL_RX_LIST_CNT; i++) { cd->tl_rx_chain[i].tl_ptr = (struct tl_list_onefrag *)&ld->tl_rx_list[i]; if (tl_newbuf(sc, &cd->tl_rx_chain[i]) == ENOBUFS) return(ENOBUFS); if (i == (TL_RX_LIST_CNT - 1)) { cd->tl_rx_chain[i].tl_next = NULL; ld->tl_rx_list[i].tlist_fptr = 0; } else { cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1]; ld->tl_rx_list[i].tlist_fptr = vtophys(&ld->tl_rx_list[i + 1]); } } cd->tl_rx_head = &cd->tl_rx_chain[0]; cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1]; return(0); } static int tl_newbuf(sc, c) struct tl_softc *sc; struct tl_chain_onefrag *c; { struct mbuf *m_new = NULL; MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("tl%d: no memory for rx list -- packet dropped!\n", sc->tl_unit); return(ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("tl%d: no memory for rx list -- packet dropped!\n", sc->tl_unit); m_freem(m_new); return(ENOBUFS); } #ifdef __alpha__ m_new->m_data += 2; #endif c->tl_mbuf = m_new; c->tl_next = NULL; c->tl_ptr->tlist_frsize = MCLBYTES; c->tl_ptr->tlist_fptr = 0; c->tl_ptr->tl_frag.tlist_dadr = vtophys(mtod(m_new, caddr_t)); c->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES; c->tl_ptr->tlist_cstat = TL_CSTAT_READY; return(0); } /* * Interrupt handler for RX 'end of frame' condition (EOF). This * tells us that a full ethernet frame has been captured and we need * to handle it. * * Reception is done using 'lists' which consist of a header and a * series of 10 data count/data address pairs that point to buffers. * Initially you're supposed to create a list, populate it with pointers * to buffers, then load the physical address of the list into the * ch_parm register. The adapter is then supposed to DMA the received * frame into the buffers for you. * * To make things as fast as possible, we have the chip DMA directly * into mbufs. This saves us from having to do a buffer copy: we can * just hand the mbufs directly to ether_input(). Once the frame has * been sent on its way, the 'list' structure is assigned a new buffer * and moved to the end of the RX chain. As long we we stay ahead of * the chip, it will always think it has an endless receive channel. * * If we happen to fall behind and the chip manages to fill up all of * the buffers, it will generate an end of channel interrupt and wait * for us to empty the chain and restart the receiver. */ static int tl_intvec_rxeof(xsc, type) void *xsc; u_int32_t type; { struct tl_softc *sc; int r = 0, total_len = 0; struct ether_header *eh; struct mbuf *m; struct ifnet *ifp; struct tl_chain_onefrag *cur_rx; sc = xsc; ifp = &sc->arpcom.ac_if; while(sc->tl_cdata.tl_rx_head != NULL) { cur_rx = sc->tl_cdata.tl_rx_head; if (!(cur_rx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP)) break; r++; sc->tl_cdata.tl_rx_head = cur_rx->tl_next; m = cur_rx->tl_mbuf; total_len = cur_rx->tl_ptr->tlist_frsize; if (tl_newbuf(sc, cur_rx) == ENOBUFS) { ifp->if_ierrors++; cur_rx->tl_ptr->tlist_frsize = MCLBYTES; cur_rx->tl_ptr->tlist_cstat = TL_CSTAT_READY; cur_rx->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES; continue; } sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr = vtophys(cur_rx->tl_ptr); sc->tl_cdata.tl_rx_tail->tl_next = cur_rx; sc->tl_cdata.tl_rx_tail = cur_rx; eh = mtod(m, struct ether_header *); m->m_pkthdr.rcvif = ifp; /* * Note: when the ThunderLAN chip is in 'capture all * frames' mode, it will receive its own transmissions. * We drop don't need to process our own transmissions, * so we drop them here and continue. */ /*if (ifp->if_flags & IFF_PROMISC && */ if (!bcmp(eh->ether_shost, sc->arpcom.ac_enaddr, ETHER_ADDR_LEN)) { m_freem(m); continue; } /* Remove header from mbuf and pass it on. */ m->m_pkthdr.len = m->m_len = total_len - sizeof(struct ether_header); m->m_data += sizeof(struct ether_header); ether_input(ifp, eh, m); } return(r); } /* * The RX-EOC condition hits when the ch_parm address hasn't been * initialized or the adapter reached a list with a forward pointer * of 0 (which indicates the end of the chain). In our case, this means * the card has hit the end of the receive buffer chain and we need to * empty out the buffers and shift the pointer back to the beginning again. */ static int tl_intvec_rxeoc(xsc, type) void *xsc; u_int32_t type; { struct tl_softc *sc; int r; struct tl_chain_data *cd; sc = xsc; cd = &sc->tl_cdata; /* Flush out the receive queue and ack RXEOF interrupts. */ r = tl_intvec_rxeof(xsc, type); CMD_PUT(sc, TL_CMD_ACK | r | (type & ~(0x00100000))); r = 1; cd->tl_rx_head = &cd->tl_rx_chain[0]; cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1]; CSR_WRITE_4(sc, TL_CH_PARM, vtophys(sc->tl_cdata.tl_rx_head->tl_ptr)); r |= (TL_CMD_GO|TL_CMD_RT); return(r); } static int tl_intvec_txeof(xsc, type) void *xsc; u_int32_t type; { struct tl_softc *sc; int r = 0; struct tl_chain *cur_tx; sc = xsc; /* * Go through our tx list and free mbufs for those * frames that have been sent. */ while (sc->tl_cdata.tl_tx_head != NULL) { cur_tx = sc->tl_cdata.tl_tx_head; if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP)) break; sc->tl_cdata.tl_tx_head = cur_tx->tl_next; r++; m_freem(cur_tx->tl_mbuf); cur_tx->tl_mbuf = NULL; cur_tx->tl_next = sc->tl_cdata.tl_tx_free; sc->tl_cdata.tl_tx_free = cur_tx; if (!cur_tx->tl_ptr->tlist_fptr) break; } return(r); } /* * The transmit end of channel interrupt. The adapter triggers this * interrupt to tell us it hit the end of the current transmit list. * * A note about this: it's possible for a condition to arise where * tl_start() may try to send frames between TXEOF and TXEOC interrupts. * You have to avoid this since the chip expects things to go in a * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC. * When the TXEOF handler is called, it will free all of the transmitted * frames and reset the tx_head pointer to NULL. However, a TXEOC * interrupt should be received and acknowledged before any more frames * are queued for transmission. If tl_statrt() is called after TXEOF * resets the tx_head pointer but _before_ the TXEOC interrupt arrives, * it could attempt to issue a transmit command prematurely. * * To guard against this, tl_start() will only issue transmit commands * if the tl_txeoc flag is set, and only the TXEOC interrupt handler * can set this flag once tl_start() has cleared it. */ static int tl_intvec_txeoc(xsc, type) void *xsc; u_int32_t type; { struct tl_softc *sc; struct ifnet *ifp; u_int32_t cmd; sc = xsc; ifp = &sc->arpcom.ac_if; /* Clear the timeout timer. */ ifp->if_timer = 0; if (sc->tl_cdata.tl_tx_head == NULL) { ifp->if_flags &= ~IFF_OACTIVE; sc->tl_cdata.tl_tx_tail = NULL; sc->tl_txeoc = 1; } else { sc->tl_txeoc = 0; /* First we have to ack the EOC interrupt. */ CMD_PUT(sc, TL_CMD_ACK | 0x00000001 | type); /* Then load the address of the next TX list. */ CSR_WRITE_4(sc, TL_CH_PARM, vtophys(sc->tl_cdata.tl_tx_head->tl_ptr)); /* Restart TX channel. */ cmd = CSR_READ_4(sc, TL_HOSTCMD); cmd &= ~TL_CMD_RT; cmd |= TL_CMD_GO|TL_CMD_INTSON; CMD_PUT(sc, cmd); return(0); } return(1); } static int tl_intvec_adchk(xsc, type) void *xsc; u_int32_t type; { struct tl_softc *sc; sc = xsc; if (type) printf("tl%d: adapter check: %x\n", sc->tl_unit, (unsigned int)CSR_READ_4(sc, TL_CH_PARM)); tl_softreset(sc, 1); tl_stop(sc); tl_init(sc); CMD_SET(sc, TL_CMD_INTSON); return(0); } static int tl_intvec_netsts(xsc, type) void *xsc; u_int32_t type; { struct tl_softc *sc; u_int16_t netsts; sc = xsc; netsts = tl_dio_read16(sc, TL_NETSTS); tl_dio_write16(sc, TL_NETSTS, netsts); printf("tl%d: network status: %x\n", sc->tl_unit, netsts); return(1); } static void tl_intr(xsc) void *xsc; { struct tl_softc *sc; struct ifnet *ifp; int r = 0; u_int32_t type = 0; u_int16_t ints = 0; u_int8_t ivec = 0; sc = xsc; TL_LOCK(sc); /* Disable interrupts */ ints = CSR_READ_2(sc, TL_HOST_INT); CSR_WRITE_2(sc, TL_HOST_INT, ints); type = (ints << 16) & 0xFFFF0000; ivec = (ints & TL_VEC_MASK) >> 5; ints = (ints & TL_INT_MASK) >> 2; ifp = &sc->arpcom.ac_if; switch(ints) { case (TL_INTR_INVALID): #ifdef DIAGNOSTIC printf("tl%d: got an invalid interrupt!\n", sc->tl_unit); #endif /* Re-enable interrupts but don't ack this one. */ CMD_PUT(sc, type); r = 0; break; case (TL_INTR_TXEOF): r = tl_intvec_txeof((void *)sc, type); break; case (TL_INTR_TXEOC): r = tl_intvec_txeoc((void *)sc, type); break; case (TL_INTR_STATOFLOW): tl_stats_update(sc); r = 1; break; case (TL_INTR_RXEOF): r = tl_intvec_rxeof((void *)sc, type); break; case (TL_INTR_DUMMY): printf("tl%d: got a dummy interrupt\n", sc->tl_unit); r = 1; break; case (TL_INTR_ADCHK): if (ivec) r = tl_intvec_adchk((void *)sc, type); else r = tl_intvec_netsts((void *)sc, type); break; case (TL_INTR_RXEOC): r = tl_intvec_rxeoc((void *)sc, type); break; default: printf("tl%d: bogus interrupt type\n", ifp->if_unit); break; } /* Re-enable interrupts */ if (r) { CMD_PUT(sc, TL_CMD_ACK | r | type); } if (ifp->if_snd.ifq_head != NULL) tl_start(ifp); TL_UNLOCK(sc); return; } static void tl_stats_update(xsc) void *xsc; { struct tl_softc *sc; struct ifnet *ifp; struct tl_stats tl_stats; struct mii_data *mii; u_int32_t *p; bzero((char *)&tl_stats, sizeof(struct tl_stats)); sc = xsc; TL_LOCK(sc); ifp = &sc->arpcom.ac_if; p = (u_int32_t *)&tl_stats; CSR_WRITE_2(sc, TL_DIO_ADDR, TL_TXGOODFRAMES|TL_DIO_ADDR_INC); *p++ = CSR_READ_4(sc, TL_DIO_DATA); *p++ = CSR_READ_4(sc, TL_DIO_DATA); *p++ = CSR_READ_4(sc, TL_DIO_DATA); *p++ = CSR_READ_4(sc, TL_DIO_DATA); *p++ = CSR_READ_4(sc, TL_DIO_DATA); ifp->if_opackets += tl_tx_goodframes(tl_stats); ifp->if_collisions += tl_stats.tl_tx_single_collision + tl_stats.tl_tx_multi_collision; ifp->if_ipackets += tl_rx_goodframes(tl_stats); ifp->if_ierrors += tl_stats.tl_crc_errors + tl_stats.tl_code_errors + tl_rx_overrun(tl_stats); ifp->if_oerrors += tl_tx_underrun(tl_stats); if (tl_tx_underrun(tl_stats)) { u_int8_t tx_thresh; tx_thresh = tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_TXTHRESH; if (tx_thresh != TL_AC_TXTHRESH_WHOLEPKT) { tx_thresh >>= 4; tx_thresh++; printf("tl%d: tx underrun -- increasing " "tx threshold to %d bytes\n", sc->tl_unit, (64 * (tx_thresh * 4))); tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH); tl_dio_setbit(sc, TL_ACOMMIT, tx_thresh << 4); } } sc->tl_stat_ch = timeout(tl_stats_update, sc, hz); if (!sc->tl_bitrate) { mii = device_get_softc(sc->tl_miibus); mii_tick(mii); } TL_UNLOCK(sc); return; } /* * Encapsulate an mbuf chain in a list by coupling the mbuf data * pointers to the fragment pointers. */ static int tl_encap(sc, c, m_head) struct tl_softc *sc; struct tl_chain *c; struct mbuf *m_head; { int frag = 0; struct tl_frag *f = NULL; int total_len; struct mbuf *m; /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ m = m_head; total_len = 0; for (m = m_head, frag = 0; m != NULL; m = m->m_next) { if (m->m_len != 0) { if (frag == TL_MAXFRAGS) break; total_len+= m->m_len; c->tl_ptr->tl_frag[frag].tlist_dadr = vtophys(mtod(m, vm_offset_t)); c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len; frag++; } } /* * Handle special cases. * Special case #1: we used up all 10 fragments, but * we have more mbufs left in the chain. Copy the * data into an mbuf cluster. Note that we don't * bother clearing the values in the other fragment * pointers/counters; it wouldn't gain us anything, * and would waste cycles. */ if (m != NULL) { struct mbuf *m_new = NULL; MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("tl%d: no memory for tx list\n", sc->tl_unit); return(1); } if (m_head->m_pkthdr.len > MHLEN) { MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); printf("tl%d: no memory for tx list\n", sc->tl_unit); return(1); } } m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t)); m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; m_freem(m_head); m_head = m_new; f = &c->tl_ptr->tl_frag[0]; f->tlist_dadr = vtophys(mtod(m_new, caddr_t)); f->tlist_dcnt = total_len = m_new->m_len; frag = 1; } /* * Special case #2: the frame is smaller than the minimum * frame size. We have to pad it to make the chip happy. */ if (total_len < TL_MIN_FRAMELEN) { if (frag == TL_MAXFRAGS) printf("tl%d: all frags filled but " "frame still to small!\n", sc->tl_unit); f = &c->tl_ptr->tl_frag[frag]; f->tlist_dcnt = TL_MIN_FRAMELEN - total_len; f->tlist_dadr = vtophys(&sc->tl_ldata->tl_pad); total_len += f->tlist_dcnt; frag++; } c->tl_mbuf = m_head; c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG; c->tl_ptr->tlist_frsize = total_len; c->tl_ptr->tlist_cstat = TL_CSTAT_READY; c->tl_ptr->tlist_fptr = 0; return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void tl_start(ifp) struct ifnet *ifp; { struct tl_softc *sc; struct mbuf *m_head = NULL; u_int32_t cmd; struct tl_chain *prev = NULL, *cur_tx = NULL, *start_tx; sc = ifp->if_softc; TL_LOCK(sc); /* * Check for an available queue slot. If there are none, * punt. */ if (sc->tl_cdata.tl_tx_free == NULL) { ifp->if_flags |= IFF_OACTIVE; TL_UNLOCK(sc); return; } start_tx = sc->tl_cdata.tl_tx_free; while(sc->tl_cdata.tl_tx_free != NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* Pick a chain member off the free list. */ cur_tx = sc->tl_cdata.tl_tx_free; sc->tl_cdata.tl_tx_free = cur_tx->tl_next; cur_tx->tl_next = NULL; /* Pack the data into the list. */ tl_encap(sc, cur_tx, m_head); /* Chain it together */ if (prev != NULL) { prev->tl_next = cur_tx; prev->tl_ptr->tlist_fptr = vtophys(cur_tx->tl_ptr); } prev = cur_tx; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if (ifp->if_bpf) bpf_mtap(ifp, cur_tx->tl_mbuf); } /* * If there are no packets queued, bail. */ if (cur_tx == NULL) { TL_UNLOCK(sc); return; } /* * That's all we can stands, we can't stands no more. * If there are no other transfers pending, then issue the * TX GO command to the adapter to start things moving. * Otherwise, just leave the data in the queue and let * the EOF/EOC interrupt handler send. */ if (sc->tl_cdata.tl_tx_head == NULL) { sc->tl_cdata.tl_tx_head = start_tx; sc->tl_cdata.tl_tx_tail = cur_tx; if (sc->tl_txeoc) { sc->tl_txeoc = 0; CSR_WRITE_4(sc, TL_CH_PARM, vtophys(start_tx->tl_ptr)); cmd = CSR_READ_4(sc, TL_HOSTCMD); cmd &= ~TL_CMD_RT; cmd |= TL_CMD_GO|TL_CMD_INTSON; CMD_PUT(sc, cmd); } } else { sc->tl_cdata.tl_tx_tail->tl_next = start_tx; sc->tl_cdata.tl_tx_tail = cur_tx; } /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; TL_UNLOCK(sc); return; } static void tl_init(xsc) void *xsc; { struct tl_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; struct mii_data *mii; TL_LOCK(sc); ifp = &sc->arpcom.ac_if; /* * Cancel pending I/O. */ tl_stop(sc); /* Initialize TX FIFO threshold */ tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH); tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH_16LONG); /* Set PCI burst size */ tl_dio_write8(sc, TL_BSIZEREG, TL_RXBURST_16LONG|TL_TXBURST_16LONG); /* * Set 'capture all frames' bit for promiscuous mode. */ if (ifp->if_flags & IFF_PROMISC) tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF); else tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF); /* * Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_NOBRX); else tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NOBRX); tl_dio_write16(sc, TL_MAXRX, MCLBYTES); /* Init our MAC address */ tl_setfilt(sc, (caddr_t)&sc->arpcom.ac_enaddr, 0); /* Init multicast filter, if needed. */ tl_setmulti(sc); /* Init circular RX list. */ if (tl_list_rx_init(sc) == ENOBUFS) { printf("tl%d: initialization failed: no " "memory for rx buffers\n", sc->tl_unit); tl_stop(sc); TL_UNLOCK(sc); return; } /* Init TX pointers. */ tl_list_tx_init(sc); /* Enable PCI interrupts. */ CMD_SET(sc, TL_CMD_INTSON); /* Load the address of the rx list */ CMD_SET(sc, TL_CMD_RT); CSR_WRITE_4(sc, TL_CH_PARM, vtophys(&sc->tl_ldata->tl_rx_list[0])); if (!sc->tl_bitrate) { if (sc->tl_miibus != NULL) { mii = device_get_softc(sc->tl_miibus); mii_mediachg(mii); } } /* Send the RX go command */ CMD_SET(sc, TL_CMD_GO|TL_CMD_NES|TL_CMD_RT); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; /* Start the stats update counter */ sc->tl_stat_ch = timeout(tl_stats_update, sc, hz); TL_UNLOCK(sc); return; } /* * Set media options. */ static int tl_ifmedia_upd(ifp) struct ifnet *ifp; { struct tl_softc *sc; struct mii_data *mii = NULL; sc = ifp->if_softc; if (sc->tl_bitrate) tl_setmode(sc, sc->ifmedia.ifm_media); else { mii = device_get_softc(sc->tl_miibus); mii_mediachg(mii); } return(0); } /* * Report current media status. */ static void tl_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct tl_softc *sc; struct mii_data *mii; sc = ifp->if_softc; ifmr->ifm_active = IFM_ETHER; if (sc->tl_bitrate) { if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD1) ifmr->ifm_active = IFM_ETHER|IFM_10_5; else ifmr->ifm_active = IFM_ETHER|IFM_10_T; if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD3) ifmr->ifm_active |= IFM_HDX; else ifmr->ifm_active |= IFM_FDX; return; } else { mii = device_get_softc(sc->tl_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } return; } static int tl_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct tl_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int s, error = 0; s = splimp(); switch(command) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: error = ether_ioctl(ifp, command, data); break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->tl_if_flags & IFF_PROMISC)) { tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF); tl_setmulti(sc); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->tl_if_flags & IFF_PROMISC) { tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF); tl_setmulti(sc); } else tl_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) { tl_stop(sc); } } sc->tl_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: tl_setmulti(sc); error = 0; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: if (sc->tl_bitrate) error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); else { struct mii_data *mii; mii = device_get_softc(sc->tl_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } break; default: error = EINVAL; break; } (void)splx(s); return(error); } static void tl_watchdog(ifp) struct ifnet *ifp; { struct tl_softc *sc; sc = ifp->if_softc; printf("tl%d: device timeout\n", sc->tl_unit); ifp->if_oerrors++; tl_softreset(sc, 1); tl_init(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void tl_stop(sc) struct tl_softc *sc; { register int i; struct ifnet *ifp; TL_LOCK(sc); ifp = &sc->arpcom.ac_if; /* Stop the stats updater. */ untimeout(tl_stats_update, sc, sc->tl_stat_ch); /* Stop the transmitter */ CMD_CLR(sc, TL_CMD_RT); CMD_SET(sc, TL_CMD_STOP); CSR_WRITE_4(sc, TL_CH_PARM, 0); /* Stop the receiver */ CMD_SET(sc, TL_CMD_RT); CMD_SET(sc, TL_CMD_STOP); CSR_WRITE_4(sc, TL_CH_PARM, 0); /* * Disable host interrupts. */ CMD_SET(sc, TL_CMD_INTSOFF); /* * Clear list pointer. */ CSR_WRITE_4(sc, TL_CH_PARM, 0); /* * Free the RX lists. */ for (i = 0; i < TL_RX_LIST_CNT; i++) { if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) { m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf); sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL; } } bzero((char *)&sc->tl_ldata->tl_rx_list, sizeof(sc->tl_ldata->tl_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < TL_TX_LIST_CNT; i++) { if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) { m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf); sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL; } } bzero((char *)&sc->tl_ldata->tl_tx_list, sizeof(sc->tl_ldata->tl_tx_list)); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); TL_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void tl_shutdown(dev) device_t dev; { struct tl_softc *sc; sc = device_get_softc(dev); tl_stop(sc); return; } Index: head/sys/pci/if_vr.c =================================================================== --- head/sys/pci/if_vr.c (revision 71961) +++ head/sys/pci/if_vr.c (revision 71962) @@ -1,1649 +1,1648 @@ /* * Copyright (c) 1997, 1998 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * VIA Rhine fast ethernet PCI NIC driver * * Supports various network adapters based on the VIA Rhine * and Rhine II PCI controllers, including the D-Link DFE530TX. * Datasheets are available at http://www.via.com.tw. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The VIA Rhine controllers are similar in some respects to the * the DEC tulip chips, except less complicated. The controller * uses an MII bus and an external physical layer interface. The * receiver has a one entry perfect filter and a 64-bit hash table * multicast filter. Transmit and receive descriptors are similar * to the tulip. * * The Rhine has a serious flaw in its transmit DMA mechanism: * transmit buffers must be longword aligned. Unfortunately, * FreeBSD doesn't guarantee that mbufs will be filled in starting * at longword boundaries, so we have to do a buffer copy before * transmission. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include #include #include #define VR_USEIOSPACE #include MODULE_DEPEND(vr, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif /* * Various supported device vendors/types and their names. */ static struct vr_type vr_devs[] = { { VIA_VENDORID, VIA_DEVICEID_RHINE, "VIA VT3043 Rhine I 10/100BaseTX" }, { VIA_VENDORID, VIA_DEVICEID_RHINE_II, "VIA VT86C100A Rhine II 10/100BaseTX" }, { VIA_VENDORID, VIA_DEVICEID_RHINE_II_2, "VIA VT6102 Rhine II 10/100BaseTX" }, { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II, "Delta Electronics Rhine II 10/100BaseTX" }, { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II, "Addtron Technology Rhine II 10/100BaseTX" }, { 0, 0, NULL } }; static int vr_probe __P((device_t)); static int vr_attach __P((device_t)); static int vr_detach __P((device_t)); static int vr_newbuf __P((struct vr_softc *, struct vr_chain_onefrag *, struct mbuf *)); static int vr_encap __P((struct vr_softc *, struct vr_chain *, struct mbuf * )); static void vr_rxeof __P((struct vr_softc *)); static void vr_rxeoc __P((struct vr_softc *)); static void vr_txeof __P((struct vr_softc *)); static void vr_txeoc __P((struct vr_softc *)); static void vr_tick __P((void *)); static void vr_intr __P((void *)); static void vr_start __P((struct ifnet *)); static int vr_ioctl __P((struct ifnet *, u_long, caddr_t)); static void vr_init __P((void *)); static void vr_stop __P((struct vr_softc *)); static void vr_watchdog __P((struct ifnet *)); static void vr_shutdown __P((device_t)); static int vr_ifmedia_upd __P((struct ifnet *)); static void vr_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); static void vr_mii_sync __P((struct vr_softc *)); static void vr_mii_send __P((struct vr_softc *, u_int32_t, int)); static int vr_mii_readreg __P((struct vr_softc *, struct vr_mii_frame *)); static int vr_mii_writereg __P((struct vr_softc *, struct vr_mii_frame *)); static int vr_miibus_readreg __P((device_t, int, int)); static int vr_miibus_writereg __P((device_t, int, int, int)); static void vr_miibus_statchg __P((device_t)); static void vr_setcfg __P((struct vr_softc *, int)); static u_int8_t vr_calchash __P((u_int8_t *)); static void vr_setmulti __P((struct vr_softc *)); static void vr_reset __P((struct vr_softc *)); static int vr_list_rx_init __P((struct vr_softc *)); static int vr_list_tx_init __P((struct vr_softc *)); #ifdef VR_USEIOSPACE #define VR_RES SYS_RES_IOPORT #define VR_RID VR_PCI_LOIO #else #define VR_RES SYS_RES_MEMORY #define VR_RID VR_PCI_LOMEM #endif static device_method_t vr_methods[] = { /* Device interface */ DEVMETHOD(device_probe, vr_probe), DEVMETHOD(device_attach, vr_attach), DEVMETHOD(device_detach, vr_detach), DEVMETHOD(device_shutdown, vr_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, vr_miibus_readreg), DEVMETHOD(miibus_writereg, vr_miibus_writereg), DEVMETHOD(miibus_statchg, vr_miibus_statchg), { 0, 0 } }; static driver_t vr_driver = { "vr", vr_methods, sizeof(struct vr_softc) }; static devclass_t vr_devclass; DRIVER_MODULE(if_vr, pci, vr_driver, vr_devclass, 0, 0); DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0); #define VR_SETBIT(sc, reg, x) \ CSR_WRITE_1(sc, reg, \ CSR_READ_1(sc, reg) | x) #define VR_CLRBIT(sc, reg, x) \ CSR_WRITE_1(sc, reg, \ CSR_READ_1(sc, reg) & ~x) #define VR_SETBIT16(sc, reg, x) \ CSR_WRITE_2(sc, reg, \ CSR_READ_2(sc, reg) | x) #define VR_CLRBIT16(sc, reg, x) \ CSR_WRITE_2(sc, reg, \ CSR_READ_2(sc, reg) & ~x) #define VR_SETBIT32(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) | x) #define VR_CLRBIT32(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) & ~x) #define SIO_SET(x) \ CSR_WRITE_1(sc, VR_MIICMD, \ CSR_READ_1(sc, VR_MIICMD) | x) #define SIO_CLR(x) \ CSR_WRITE_1(sc, VR_MIICMD, \ CSR_READ_1(sc, VR_MIICMD) & ~x) /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void vr_mii_sync(sc) struct vr_softc *sc; { register int i; SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN); for (i = 0; i < 32; i++) { SIO_SET(VR_MIICMD_CLK); DELAY(1); SIO_CLR(VR_MIICMD_CLK); DELAY(1); } return; } /* * Clock a series of bits through the MII. */ static void vr_mii_send(sc, bits, cnt) struct vr_softc *sc; u_int32_t bits; int cnt; { int i; SIO_CLR(VR_MIICMD_CLK); for (i = (0x1 << (cnt - 1)); i; i >>= 1) { if (bits & i) { SIO_SET(VR_MIICMD_DATAIN); } else { SIO_CLR(VR_MIICMD_DATAIN); } DELAY(1); SIO_CLR(VR_MIICMD_CLK); DELAY(1); SIO_SET(VR_MIICMD_CLK); } } /* * Read an PHY register through the MII. */ static int vr_mii_readreg(sc, frame) struct vr_softc *sc; struct vr_mii_frame *frame; { int i, ack; VR_LOCK(sc); /* * Set up frame for RX. */ frame->mii_stdelim = VR_MII_STARTDELIM; frame->mii_opcode = VR_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; CSR_WRITE_1(sc, VR_MIICMD, 0); VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); /* * Turn on data xmit. */ SIO_SET(VR_MIICMD_DIR); vr_mii_sync(sc); /* * Send command/address info. */ vr_mii_send(sc, frame->mii_stdelim, 2); vr_mii_send(sc, frame->mii_opcode, 2); vr_mii_send(sc, frame->mii_phyaddr, 5); vr_mii_send(sc, frame->mii_regaddr, 5); /* Idle bit */ SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN)); DELAY(1); SIO_SET(VR_MIICMD_CLK); DELAY(1); /* Turn off xmit. */ SIO_CLR(VR_MIICMD_DIR); /* Check for ack */ SIO_CLR(VR_MIICMD_CLK); DELAY(1); SIO_SET(VR_MIICMD_CLK); DELAY(1); ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT; /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ if (ack) { for(i = 0; i < 16; i++) { SIO_CLR(VR_MIICMD_CLK); DELAY(1); SIO_SET(VR_MIICMD_CLK); DELAY(1); } goto fail; } for (i = 0x8000; i; i >>= 1) { SIO_CLR(VR_MIICMD_CLK); DELAY(1); if (!ack) { if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT) frame->mii_data |= i; DELAY(1); } SIO_SET(VR_MIICMD_CLK); DELAY(1); } fail: SIO_CLR(VR_MIICMD_CLK); DELAY(1); SIO_SET(VR_MIICMD_CLK); DELAY(1); VR_UNLOCK(sc); if (ack) return(1); return(0); } /* * Write to a PHY register through the MII. */ static int vr_mii_writereg(sc, frame) struct vr_softc *sc; struct vr_mii_frame *frame; { VR_LOCK(sc); CSR_WRITE_1(sc, VR_MIICMD, 0); VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); /* * Set up frame for TX. */ frame->mii_stdelim = VR_MII_STARTDELIM; frame->mii_opcode = VR_MII_WRITEOP; frame->mii_turnaround = VR_MII_TURNAROUND; /* * Turn on data output. */ SIO_SET(VR_MIICMD_DIR); vr_mii_sync(sc); vr_mii_send(sc, frame->mii_stdelim, 2); vr_mii_send(sc, frame->mii_opcode, 2); vr_mii_send(sc, frame->mii_phyaddr, 5); vr_mii_send(sc, frame->mii_regaddr, 5); vr_mii_send(sc, frame->mii_turnaround, 2); vr_mii_send(sc, frame->mii_data, 16); /* Idle bit. */ SIO_SET(VR_MIICMD_CLK); DELAY(1); SIO_CLR(VR_MIICMD_CLK); DELAY(1); /* * Turn off xmit. */ SIO_CLR(VR_MIICMD_DIR); VR_UNLOCK(sc); return(0); } static int vr_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct vr_softc *sc; struct vr_mii_frame frame; sc = device_get_softc(dev); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; vr_mii_readreg(sc, &frame); return(frame.mii_data); } static int vr_miibus_writereg(dev, phy, reg, data) device_t dev; u_int16_t phy, reg, data; { struct vr_softc *sc; struct vr_mii_frame frame; sc = device_get_softc(dev); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = data; vr_mii_writereg(sc, &frame); return(0); } static void vr_miibus_statchg(dev) device_t dev; { struct vr_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); VR_LOCK(sc); mii = device_get_softc(sc->vr_miibus); vr_setcfg(sc, mii->mii_media_active); VR_UNLOCK(sc); return; } /* * Calculate CRC of a multicast group address, return the lower 6 bits. */ static u_int8_t vr_calchash(addr) u_int8_t *addr; { u_int32_t crc, carry; int i, j; u_int8_t c; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (i = 0; i < 6; i++) { c = *(addr + i); for (j = 0; j < 8; j++) { carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); crc <<= 1; c >>= 1; if (carry) crc = (crc ^ 0x04c11db6) | carry; } } /* return the filter bit position */ return((crc >> 26) & 0x0000003F); } /* * Program the 64-bit multicast hash filter. */ static void vr_setmulti(sc) struct vr_softc *sc; { struct ifnet *ifp; int h = 0; u_int32_t hashes[2] = { 0, 0 }; struct ifmultiaddr *ifma; u_int8_t rxfilt; int mcnt = 0; ifp = &sc->arpcom.ac_if; rxfilt = CSR_READ_1(sc, VR_RXCFG); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { rxfilt |= VR_RXCFG_RX_MULTI; CSR_WRITE_1(sc, VR_RXCFG, rxfilt); CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); return; } /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, VR_MAR0, 0); CSR_WRITE_4(sc, VR_MAR1, 0); /* now program new ones */ - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = vr_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); mcnt++; } if (mcnt) rxfilt |= VR_RXCFG_RX_MULTI; else rxfilt &= ~VR_RXCFG_RX_MULTI; CSR_WRITE_4(sc, VR_MAR0, hashes[0]); CSR_WRITE_4(sc, VR_MAR1, hashes[1]); CSR_WRITE_1(sc, VR_RXCFG, rxfilt); return; } /* * In order to fiddle with the * 'full-duplex' and '100Mbps' bits in the netconfig register, we * first have to put the transmit and/or receive logic in the idle state. */ static void vr_setcfg(sc, media) struct vr_softc *sc; int media; { int restart = 0; if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) { restart = 1; VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON)); } if ((media & IFM_GMASK) == IFM_FDX) VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); else VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); if (restart) VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON); return; } static void vr_reset(sc) struct vr_softc *sc; { register int i; VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET); for (i = 0; i < VR_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET)) break; } if (i == VR_TIMEOUT) printf("vr%d: reset never completed!\n", sc->vr_unit); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); return; } /* * Probe for a VIA Rhine chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int vr_probe(dev) device_t dev; { struct vr_type *t; t = vr_devs; while(t->vr_name != NULL) { if ((pci_get_vendor(dev) == t->vr_vid) && (pci_get_device(dev) == t->vr_did)) { device_set_desc(dev, t->vr_name); return(0); } t++; } return(ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int vr_attach(dev) device_t dev; { int i; u_char eaddr[ETHER_ADDR_LEN]; u_int32_t command; struct vr_softc *sc; struct ifnet *ifp; int unit, error = 0, rid; sc = device_get_softc(dev); unit = device_get_unit(dev); bzero(sc, sizeof(struct vr_softc *)); mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE); VR_LOCK(sc); /* * Handle power management nonsense. */ command = pci_read_config(dev, VR_PCI_CAPID, 4) & 0x000000FF; if (command == 0x01) { command = pci_read_config(dev, VR_PCI_PWRMGMTCTRL, 4); if (command & VR_PSTATE_MASK) { u_int32_t iobase, membase, irq; /* Save important PCI config data. */ iobase = pci_read_config(dev, VR_PCI_LOIO, 4); membase = pci_read_config(dev, VR_PCI_LOMEM, 4); irq = pci_read_config(dev, VR_PCI_INTLINE, 4); /* Reset the power state. */ printf("vr%d: chip is in D%d power mode " "-- setting to D0\n", unit, command & VR_PSTATE_MASK); command &= 0xFFFFFFFC; pci_write_config(dev, VR_PCI_PWRMGMTCTRL, command, 4); /* Restore PCI config data. */ pci_write_config(dev, VR_PCI_LOIO, iobase, 4); pci_write_config(dev, VR_PCI_LOMEM, membase, 4); pci_write_config(dev, VR_PCI_INTLINE, irq, 4); } } /* * Map control/status registers. */ command = pci_read_config(dev, PCIR_COMMAND, 4); command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, command, 4); command = pci_read_config(dev, PCIR_COMMAND, 4); #ifdef VR_USEIOSPACE if (!(command & PCIM_CMD_PORTEN)) { printf("vr%d: failed to enable I/O ports!\n", unit); free(sc, M_DEVBUF); goto fail; } #else if (!(command & PCIM_CMD_MEMEN)) { printf("vr%d: failed to enable memory mapping!\n", unit); goto fail; } #endif rid = VR_RID; sc->vr_res = bus_alloc_resource(dev, VR_RES, &rid, 0, ~0, 1, RF_ACTIVE); if (sc->vr_res == NULL) { printf("vr%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->vr_btag = rman_get_bustag(sc->vr_res); sc->vr_bhandle = rman_get_bushandle(sc->vr_res); /* Allocate interrupt */ rid = 0; sc->vr_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->vr_irq == NULL) { printf("vr%d: couldn't map interrupt\n", unit); bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET, vr_intr, sc, &sc->vr_intrhand); if (error) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res); printf("vr%d: couldn't set up irq\n", unit); goto fail; } /* Reset the adapter. */ vr_reset(sc); /* * Get station address. The way the Rhine chips work, * you're not allowed to directly access the EEPROM once * they've been programmed a special way. Consequently, * we need to read the node address from the PAR0 and PAR1 * registers. */ VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); DELAY(200); for (i = 0; i < ETHER_ADDR_LEN; i++) eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); /* * A Rhine chip was detected. Inform the world. */ printf("vr%d: Ethernet address: %6D\n", unit, eaddr, ":"); sc->vr_unit = unit; bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); sc->vr_ldata = contigmalloc(sizeof(struct vr_list_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->vr_ldata == NULL) { printf("vr%d: no memory for list buffers!\n", unit); bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res); error = ENXIO; goto fail; } bzero(sc->vr_ldata, sizeof(struct vr_list_data)); ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; ifp->if_unit = unit; ifp->if_name = "vr"; ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = vr_ioctl; ifp->if_output = ether_output; ifp->if_start = vr_start; ifp->if_watchdog = vr_watchdog; ifp->if_init = vr_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1; /* * Do MII setup. */ if (mii_phy_probe(dev, &sc->vr_miibus, vr_ifmedia_upd, vr_ifmedia_sts)) { printf("vr%d: MII without any phy!\n", sc->vr_unit); bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res); contigfree(sc->vr_ldata, sizeof(struct vr_list_data), M_DEVBUF); error = ENXIO; goto fail; } callout_handle_init(&sc->vr_stat_ch); /* * Call MI attach routine. */ ether_ifattach(ifp, ETHER_BPF_SUPPORTED); VR_UNLOCK(sc); return(0); fail: VR_UNLOCK(sc); mtx_destroy(&sc->vr_mtx); return(error); } static int vr_detach(dev) device_t dev; { struct vr_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); VR_LOCK(sc); ifp = &sc->arpcom.ac_if; vr_stop(sc); ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); bus_generic_detach(dev); device_delete_child(dev, sc->vr_miibus); bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res); contigfree(sc->vr_ldata, sizeof(struct vr_list_data), M_DEVBUF); VR_UNLOCK(sc); mtx_destroy(&sc->vr_mtx); return(0); } /* * Initialize the transmit descriptors. */ static int vr_list_tx_init(sc) struct vr_softc *sc; { struct vr_chain_data *cd; struct vr_list_data *ld; int i; cd = &sc->vr_cdata; ld = sc->vr_ldata; for (i = 0; i < VR_TX_LIST_CNT; i++) { cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i]; if (i == (VR_TX_LIST_CNT - 1)) cd->vr_tx_chain[i].vr_nextdesc = &cd->vr_tx_chain[0]; else cd->vr_tx_chain[i].vr_nextdesc = &cd->vr_tx_chain[i + 1]; } cd->vr_tx_free = &cd->vr_tx_chain[0]; cd->vr_tx_tail = cd->vr_tx_head = NULL; return(0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int vr_list_rx_init(sc) struct vr_softc *sc; { struct vr_chain_data *cd; struct vr_list_data *ld; int i; cd = &sc->vr_cdata; ld = sc->vr_ldata; for (i = 0; i < VR_RX_LIST_CNT; i++) { cd->vr_rx_chain[i].vr_ptr = (struct vr_desc *)&ld->vr_rx_list[i]; if (vr_newbuf(sc, &cd->vr_rx_chain[i], NULL) == ENOBUFS) return(ENOBUFS); if (i == (VR_RX_LIST_CNT - 1)) { cd->vr_rx_chain[i].vr_nextdesc = &cd->vr_rx_chain[0]; ld->vr_rx_list[i].vr_next = vtophys(&ld->vr_rx_list[0]); } else { cd->vr_rx_chain[i].vr_nextdesc = &cd->vr_rx_chain[i + 1]; ld->vr_rx_list[i].vr_next = vtophys(&ld->vr_rx_list[i + 1]); } } cd->vr_rx_head = &cd->vr_rx_chain[0]; return(0); } /* * Initialize an RX descriptor and attach an MBUF cluster. * Note: the length fields are only 11 bits wide, which means the * largest size we can specify is 2047. This is important because * MCLBYTES is 2048, so we have to subtract one otherwise we'll * overflow the field and make a mess. */ static int vr_newbuf(sc, c, m) struct vr_softc *sc; struct vr_chain_onefrag *c; struct mbuf *m; { struct mbuf *m_new = NULL; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("vr%d: no memory for rx list " "-- packet dropped!\n", sc->vr_unit); return(ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("vr%d: no memory for rx list " "-- packet dropped!\n", sc->vr_unit); m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, sizeof(u_int64_t)); c->vr_mbuf = m_new; c->vr_ptr->vr_status = VR_RXSTAT; c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t)); c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN; return(0); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static void vr_rxeof(sc) struct vr_softc *sc; { struct ether_header *eh; struct mbuf *m; struct ifnet *ifp; struct vr_chain_onefrag *cur_rx; int total_len = 0; u_int32_t rxstat; ifp = &sc->arpcom.ac_if; while(!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) & VR_RXSTAT_OWN)) { struct mbuf *m0 = NULL; cur_rx = sc->vr_cdata.vr_rx_head; sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc; m = cur_rx->vr_mbuf; /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (rxstat & VR_RXSTAT_RXERR) { ifp->if_ierrors++; printf("vr%d: rx error: ", sc->vr_unit); switch(rxstat & 0x000000FF) { case VR_RXSTAT_CRCERR: printf("crc error\n"); break; case VR_RXSTAT_FRAMEALIGNERR: printf("frame alignment error\n"); break; case VR_RXSTAT_FIFOOFLOW: printf("FIFO overflow\n"); break; case VR_RXSTAT_GIANT: printf("received giant packet\n"); break; case VR_RXSTAT_RUNT: printf("received runt packet\n"); break; case VR_RXSTAT_BUSERR: printf("system bus error\n"); break; case VR_RXSTAT_BUFFERR: printf("rx buffer error\n"); break; default: printf("unknown rx error\n"); break; } vr_newbuf(sc, cur_rx, m); continue; } /* No errors; receive the packet. */ total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status); /* * XXX The VIA Rhine chip includes the CRC with every * received frame, and there's no way to turn this * behavior off (at least, I can't find anything in * the manual that explains how to do it) so we have * to trim off the CRC manually. */ total_len -= ETHER_CRC_LEN; m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, total_len + ETHER_ALIGN, 0, ifp, NULL); vr_newbuf(sc, cur_rx, m); if (m0 == NULL) { ifp->if_ierrors++; continue; } m_adj(m0, ETHER_ALIGN); m = m0; ifp->if_ipackets++; eh = mtod(m, struct ether_header *); /* Remove header from mbuf and pass it on. */ m_adj(m, sizeof(struct ether_header)); ether_input(ifp, eh, m); } return; } void vr_rxeoc(sc) struct vr_softc *sc; { vr_rxeof(sc); VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr)); VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO); return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void vr_txeof(sc) struct vr_softc *sc; { struct vr_chain *cur_tx; struct ifnet *ifp; ifp = &sc->arpcom.ac_if; /* Clear the timeout timer. */ ifp->if_timer = 0; /* Sanity check. */ if (sc->vr_cdata.vr_tx_head == NULL) return; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ while(sc->vr_cdata.vr_tx_head->vr_mbuf != NULL) { u_int32_t txstat; cur_tx = sc->vr_cdata.vr_tx_head; txstat = cur_tx->vr_ptr->vr_status; if (txstat & VR_TXSTAT_OWN) break; if (txstat & VR_TXSTAT_ERRSUM) { ifp->if_oerrors++; if (txstat & VR_TXSTAT_DEFER) ifp->if_collisions++; if (txstat & VR_TXSTAT_LATECOLL) ifp->if_collisions++; } ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3; ifp->if_opackets++; if (cur_tx->vr_mbuf != NULL) { m_freem(cur_tx->vr_mbuf); cur_tx->vr_mbuf = NULL; } if (sc->vr_cdata.vr_tx_head == sc->vr_cdata.vr_tx_tail) { sc->vr_cdata.vr_tx_head = NULL; sc->vr_cdata.vr_tx_tail = NULL; break; } sc->vr_cdata.vr_tx_head = cur_tx->vr_nextdesc; } return; } /* * TX 'end of channel' interrupt handler. */ static void vr_txeoc(sc) struct vr_softc *sc; { struct ifnet *ifp; ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; if (sc->vr_cdata.vr_tx_head == NULL) { ifp->if_flags &= ~IFF_OACTIVE; sc->vr_cdata.vr_tx_tail = NULL; } return; } static void vr_tick(xsc) void *xsc; { struct vr_softc *sc; struct mii_data *mii; sc = xsc; VR_LOCK(sc); mii = device_get_softc(sc->vr_miibus); mii_tick(mii); sc->vr_stat_ch = timeout(vr_tick, sc, hz); VR_UNLOCK(sc); return; } static void vr_intr(arg) void *arg; { struct vr_softc *sc; struct ifnet *ifp; u_int16_t status; sc = arg; VR_LOCK(sc); ifp = &sc->arpcom.ac_if; /* Supress unwanted interrupts. */ if (!(ifp->if_flags & IFF_UP)) { vr_stop(sc); VR_UNLOCK(sc); return; } /* Disable interrupts. */ CSR_WRITE_2(sc, VR_IMR, 0x0000); for (;;) { status = CSR_READ_2(sc, VR_ISR); if (status) CSR_WRITE_2(sc, VR_ISR, status); if ((status & VR_INTRS) == 0) break; if (status & VR_ISR_RX_OK) vr_rxeof(sc); if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW) || (status & VR_ISR_RX_DROPPED)) { vr_rxeof(sc); vr_rxeoc(sc); } if (status & VR_ISR_TX_OK) { vr_txeof(sc); vr_txeoc(sc); } if ((status & VR_ISR_TX_UNDERRUN)||(status & VR_ISR_TX_ABRT)){ ifp->if_oerrors++; vr_txeof(sc); if (sc->vr_cdata.vr_tx_head != NULL) { VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON); VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO); } } if (status & VR_ISR_BUSERR) { vr_reset(sc); vr_init(sc); } } /* Re-enable interrupts. */ CSR_WRITE_2(sc, VR_IMR, VR_INTRS); if (ifp->if_snd.ifq_head != NULL) { vr_start(ifp); } VR_UNLOCK(sc); return; } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int vr_encap(sc, c, m_head) struct vr_softc *sc; struct vr_chain *c; struct mbuf *m_head; { int frag = 0; struct vr_desc *f = NULL; int total_len; struct mbuf *m; m = m_head; total_len = 0; /* * The VIA Rhine wants packet buffers to be longword * aligned, but very often our mbufs aren't. Rather than * waste time trying to decide when to copy and when not * to copy, just do it all the time. */ if (m != NULL) { struct mbuf *m_new = NULL; MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("vr%d: no memory for tx list\n", sc->vr_unit); return(1); } if (m_head->m_pkthdr.len > MHLEN) { MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); printf("vr%d: no memory for tx list\n", sc->vr_unit); return(1); } } m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t)); m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; m_freem(m_head); m_head = m_new; /* * The Rhine chip doesn't auto-pad, so we have to make * sure to pad short frames out to the minimum frame length * ourselves. */ if (m_head->m_len < VR_MIN_FRAMELEN) { m_new->m_pkthdr.len += VR_MIN_FRAMELEN - m_new->m_len; m_new->m_len = m_new->m_pkthdr.len; } f = c->vr_ptr; f->vr_data = vtophys(mtod(m_new, caddr_t)); f->vr_ctl = total_len = m_new->m_len; f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG; f->vr_status = 0; frag = 1; } c->vr_mbuf = m_head; c->vr_ptr->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT; c->vr_ptr->vr_next = vtophys(c->vr_nextdesc->vr_ptr); return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void vr_start(ifp) struct ifnet *ifp; { struct vr_softc *sc; struct mbuf *m_head = NULL; struct vr_chain *cur_tx = NULL, *start_tx; sc = ifp->if_softc; VR_LOCK(sc); if (ifp->if_flags & IFF_OACTIVE) { VR_UNLOCK(sc); return; } /* * Check for an available queue slot. If there are none, * punt. */ if (sc->vr_cdata.vr_tx_free->vr_mbuf != NULL) { ifp->if_flags |= IFF_OACTIVE; return; } start_tx = sc->vr_cdata.vr_tx_free; while(sc->vr_cdata.vr_tx_free->vr_mbuf == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* Pick a descriptor off the free list. */ cur_tx = sc->vr_cdata.vr_tx_free; sc->vr_cdata.vr_tx_free = cur_tx->vr_nextdesc; /* Pack the data into the descriptor. */ if (vr_encap(sc, cur_tx, m_head)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; cur_tx = NULL; break; } if (cur_tx != start_tx) VR_TXOWN(cur_tx) = VR_TXSTAT_OWN; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if (ifp->if_bpf) bpf_mtap(ifp, cur_tx->vr_mbuf); VR_TXOWN(cur_tx) = VR_TXSTAT_OWN; VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/VR_CMD_TX_GO); } /* * If there are no frames queued, bail. */ if (cur_tx == NULL) { VR_UNLOCK(sc); return; } sc->vr_cdata.vr_tx_tail = cur_tx; if (sc->vr_cdata.vr_tx_head == NULL) sc->vr_cdata.vr_tx_head = start_tx; /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; VR_UNLOCK(sc); return; } static void vr_init(xsc) void *xsc; { struct vr_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; struct mii_data *mii; VR_LOCK(sc); mii = device_get_softc(sc->vr_miibus); /* * Cancel pending I/O and free all RX/TX buffers. */ vr_stop(sc); vr_reset(sc); VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_STORENFWD); VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD); /* Init circular RX list. */ if (vr_list_rx_init(sc) == ENOBUFS) { printf("vr%d: initialization failed: no " "memory for rx buffers\n", sc->vr_unit); vr_stop(sc); VR_UNLOCK(sc); return; } /* * Init tx descriptors. */ vr_list_tx_init(sc); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); else VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); /* Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); else VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); /* * Program the multicast filter, if necessary. */ vr_setmulti(sc); /* * Load the address of the RX list. */ CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr)); /* Enable receiver and transmitter. */ CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START| VR_CMD_TX_ON|VR_CMD_RX_ON| VR_CMD_RX_GO); CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0])); /* * Enable interrupts. */ CSR_WRITE_2(sc, VR_ISR, 0xFFFF); CSR_WRITE_2(sc, VR_IMR, VR_INTRS); mii_mediachg(mii); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->vr_stat_ch = timeout(vr_tick, sc, hz); VR_UNLOCK(sc); return; } /* * Set media options. */ static int vr_ifmedia_upd(ifp) struct ifnet *ifp; { struct vr_softc *sc; sc = ifp->if_softc; if (ifp->if_flags & IFF_UP) vr_init(sc); return(0); } /* * Report current media status. */ static void vr_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct vr_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->vr_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static int vr_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct vr_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; VR_LOCK(sc); switch(command) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: error = ether_ioctl(ifp, command, data); break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { vr_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) vr_stop(sc); } error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: vr_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->vr_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = EINVAL; break; } VR_UNLOCK(sc); return(error); } static void vr_watchdog(ifp) struct ifnet *ifp; { struct vr_softc *sc; sc = ifp->if_softc; VR_LOCK(sc); ifp->if_oerrors++; printf("vr%d: watchdog timeout\n", sc->vr_unit); vr_stop(sc); vr_reset(sc); vr_init(sc); if (ifp->if_snd.ifq_head != NULL) vr_start(ifp); VR_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void vr_stop(sc) struct vr_softc *sc; { register int i; struct ifnet *ifp; VR_LOCK(sc); ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; untimeout(vr_tick, sc, sc->vr_stat_ch); VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP); VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON)); CSR_WRITE_2(sc, VR_IMR, 0x0000); CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); /* * Free data in the RX lists. */ for (i = 0; i < VR_RX_LIST_CNT; i++) { if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) { m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf); sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL; } } bzero((char *)&sc->vr_ldata->vr_rx_list, sizeof(sc->vr_ldata->vr_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < VR_TX_LIST_CNT; i++) { if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) { m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf); sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL; } } bzero((char *)&sc->vr_ldata->vr_tx_list, sizeof(sc->vr_ldata->vr_tx_list)); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); VR_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void vr_shutdown(dev) device_t dev; { struct vr_softc *sc; sc = device_get_softc(dev); vr_stop(sc); return; } Index: head/sys/pci/if_wb.c =================================================================== --- head/sys/pci/if_wb.c (revision 71961) +++ head/sys/pci/if_wb.c (revision 71962) @@ -1,1895 +1,1894 @@ /* * Copyright (c) 1997, 1998 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Winbond fast ethernet PCI NIC driver * * Supports various cheap network adapters based on the Winbond W89C840F * fast ethernet controller chip. This includes adapters manufactured by * Winbond itself and some made by Linksys. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The Winbond W89C840F chip is a bus master; in some ways it resembles * a DEC 'tulip' chip, only not as complicated. Unfortunately, it has * one major difference which is that while the registers do many of * the same things as a tulip adapter, the offsets are different: where * tulip registers are typically spaced 8 bytes apart, the Winbond * registers are spaced 4 bytes apart. The receiver filter is also * programmed differently. * * Like the tulip, the Winbond chip uses small descriptors containing * a status word, a control word and 32-bit areas that can either be used * to point to two external data blocks, or to point to a single block * and another descriptor in a linked list. Descriptors can be grouped * together in blocks to form fixed length rings or can be chained * together in linked lists. A single packet may be spread out over * several descriptors if necessary. * * For the receive ring, this driver uses a linked list of descriptors, * each pointing to a single mbuf cluster buffer, which us large enough * to hold an entire packet. The link list is looped back to created a * closed ring. * * For transmission, the driver creates a linked list of 'super descriptors' * which each contain several individual descriptors linked toghether. * Each 'super descriptor' contains WB_MAXFRAGS descriptors, which we * abuse as fragment pointers. This allows us to use a buffer managment * scheme very similar to that used in the ThunderLAN and Etherlink XL * drivers. * * Autonegotiation is performed using the external PHY via the MII bus. * The sample boards I have all use a Davicom PHY. * * Note: the author of the Linux driver for the Winbond chip alludes * to some sort of flaw in the chip's design that seems to mandate some * drastic workaround which signigicantly impairs transmit performance. * I have no idea what he's on about: transmit performance with all * three of my test boards seems fine. */ #include "opt_bdg.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include #include #include /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #define WB_USEIOSPACE #include MODULE_DEPEND(wb, miibus, 1, 1, 1); #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif /* * Various supported device vendors/types and their names. */ static struct wb_type wb_devs[] = { { WB_VENDORID, WB_DEVICEID_840F, "Winbond W89C840F 10/100BaseTX" }, { CP_VENDORID, CP_DEVICEID_RL100, "Compex RL100-ATX 10/100baseTX" }, { 0, 0, NULL } }; static int wb_probe __P((device_t)); static int wb_attach __P((device_t)); static int wb_detach __P((device_t)); static void wb_bfree __P((caddr_t, void *args)); static int wb_newbuf __P((struct wb_softc *, struct wb_chain_onefrag *, struct mbuf *)); static int wb_encap __P((struct wb_softc *, struct wb_chain *, struct mbuf *)); static void wb_rxeof __P((struct wb_softc *)); static void wb_rxeoc __P((struct wb_softc *)); static void wb_txeof __P((struct wb_softc *)); static void wb_txeoc __P((struct wb_softc *)); static void wb_intr __P((void *)); static void wb_tick __P((void *)); static void wb_start __P((struct ifnet *)); static int wb_ioctl __P((struct ifnet *, u_long, caddr_t)); static void wb_init __P((void *)); static void wb_stop __P((struct wb_softc *)); static void wb_watchdog __P((struct ifnet *)); static void wb_shutdown __P((device_t)); static int wb_ifmedia_upd __P((struct ifnet *)); static void wb_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); static void wb_eeprom_putbyte __P((struct wb_softc *, int)); static void wb_eeprom_getword __P((struct wb_softc *, int, u_int16_t *)); static void wb_read_eeprom __P((struct wb_softc *, caddr_t, int, int, int)); static void wb_mii_sync __P((struct wb_softc *)); static void wb_mii_send __P((struct wb_softc *, u_int32_t, int)); static int wb_mii_readreg __P((struct wb_softc *, struct wb_mii_frame *)); static int wb_mii_writereg __P((struct wb_softc *, struct wb_mii_frame *)); static void wb_setcfg __P((struct wb_softc *, u_int32_t)); static u_int8_t wb_calchash __P((caddr_t)); static void wb_setmulti __P((struct wb_softc *)); static void wb_reset __P((struct wb_softc *)); static void wb_fixmedia __P((struct wb_softc *)); static int wb_list_rx_init __P((struct wb_softc *)); static int wb_list_tx_init __P((struct wb_softc *)); static int wb_miibus_readreg __P((device_t, int, int)); static int wb_miibus_writereg __P((device_t, int, int, int)); static void wb_miibus_statchg __P((device_t)); #ifdef WB_USEIOSPACE #define WB_RES SYS_RES_IOPORT #define WB_RID WB_PCI_LOIO #else #define WB_RES SYS_RES_MEMORY #define WB_RID WB_PCI_LOMEM #endif static device_method_t wb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, wb_probe), DEVMETHOD(device_attach, wb_attach), DEVMETHOD(device_detach, wb_detach), DEVMETHOD(device_shutdown, wb_shutdown), /* bus interface, for miibus */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, wb_miibus_readreg), DEVMETHOD(miibus_writereg, wb_miibus_writereg), DEVMETHOD(miibus_statchg, wb_miibus_statchg), { 0, 0 } }; static driver_t wb_driver = { "wb", wb_methods, sizeof(struct wb_softc) }; static devclass_t wb_devclass; DRIVER_MODULE(if_wb, pci, wb_driver, wb_devclass, 0, 0); DRIVER_MODULE(miibus, wb, miibus_driver, miibus_devclass, 0, 0); #define WB_SETBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) | x) #define WB_CLRBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) & ~x) #define SIO_SET(x) \ CSR_WRITE_4(sc, WB_SIO, \ CSR_READ_4(sc, WB_SIO) | x) #define SIO_CLR(x) \ CSR_WRITE_4(sc, WB_SIO, \ CSR_READ_4(sc, WB_SIO) & ~x) /* * Send a read command and address to the EEPROM, check for ACK. */ static void wb_eeprom_putbyte(sc, addr) struct wb_softc *sc; int addr; { register int d, i; d = addr | WB_EECMD_READ; /* * Feed in each bit and stobe the clock. */ for (i = 0x400; i; i >>= 1) { if (d & i) { SIO_SET(WB_SIO_EE_DATAIN); } else { SIO_CLR(WB_SIO_EE_DATAIN); } DELAY(100); SIO_SET(WB_SIO_EE_CLK); DELAY(150); SIO_CLR(WB_SIO_EE_CLK); DELAY(100); } return; } /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void wb_eeprom_getword(sc, addr, dest) struct wb_softc *sc; int addr; u_int16_t *dest; { register int i; u_int16_t word = 0; /* Enter EEPROM access mode. */ CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS); /* * Send address of word we want to read. */ wb_eeprom_putbyte(sc, addr); CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS); /* * Start reading bits from EEPROM. */ for (i = 0x8000; i; i >>= 1) { SIO_SET(WB_SIO_EE_CLK); DELAY(100); if (CSR_READ_4(sc, WB_SIO) & WB_SIO_EE_DATAOUT) word |= i; SIO_CLR(WB_SIO_EE_CLK); DELAY(100); } /* Turn off EEPROM access mode. */ CSR_WRITE_4(sc, WB_SIO, 0); *dest = word; return; } /* * Read a sequence of words from the EEPROM. */ static void wb_read_eeprom(sc, dest, off, cnt, swap) struct wb_softc *sc; caddr_t dest; int off; int cnt; int swap; { int i; u_int16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { wb_eeprom_getword(sc, off + i, &word); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } return; } /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void wb_mii_sync(sc) struct wb_softc *sc; { register int i; SIO_SET(WB_SIO_MII_DIR|WB_SIO_MII_DATAIN); for (i = 0; i < 32; i++) { SIO_SET(WB_SIO_MII_CLK); DELAY(1); SIO_CLR(WB_SIO_MII_CLK); DELAY(1); } return; } /* * Clock a series of bits through the MII. */ static void wb_mii_send(sc, bits, cnt) struct wb_softc *sc; u_int32_t bits; int cnt; { int i; SIO_CLR(WB_SIO_MII_CLK); for (i = (0x1 << (cnt - 1)); i; i >>= 1) { if (bits & i) { SIO_SET(WB_SIO_MII_DATAIN); } else { SIO_CLR(WB_SIO_MII_DATAIN); } DELAY(1); SIO_CLR(WB_SIO_MII_CLK); DELAY(1); SIO_SET(WB_SIO_MII_CLK); } } /* * Read an PHY register through the MII. */ static int wb_mii_readreg(sc, frame) struct wb_softc *sc; struct wb_mii_frame *frame; { int i, ack; WB_LOCK(sc); /* * Set up frame for RX. */ frame->mii_stdelim = WB_MII_STARTDELIM; frame->mii_opcode = WB_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; CSR_WRITE_4(sc, WB_SIO, 0); /* * Turn on data xmit. */ SIO_SET(WB_SIO_MII_DIR); wb_mii_sync(sc); /* * Send command/address info. */ wb_mii_send(sc, frame->mii_stdelim, 2); wb_mii_send(sc, frame->mii_opcode, 2); wb_mii_send(sc, frame->mii_phyaddr, 5); wb_mii_send(sc, frame->mii_regaddr, 5); /* Idle bit */ SIO_CLR((WB_SIO_MII_CLK|WB_SIO_MII_DATAIN)); DELAY(1); SIO_SET(WB_SIO_MII_CLK); DELAY(1); /* Turn off xmit. */ SIO_CLR(WB_SIO_MII_DIR); /* Check for ack */ SIO_CLR(WB_SIO_MII_CLK); DELAY(1); SIO_SET(WB_SIO_MII_CLK); DELAY(1); ack = CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT; SIO_CLR(WB_SIO_MII_CLK); DELAY(1); SIO_SET(WB_SIO_MII_CLK); DELAY(1); /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ if (ack) { for(i = 0; i < 16; i++) { SIO_CLR(WB_SIO_MII_CLK); DELAY(1); SIO_SET(WB_SIO_MII_CLK); DELAY(1); } goto fail; } for (i = 0x8000; i; i >>= 1) { SIO_CLR(WB_SIO_MII_CLK); DELAY(1); if (!ack) { if (CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT) frame->mii_data |= i; DELAY(1); } SIO_SET(WB_SIO_MII_CLK); DELAY(1); } fail: SIO_CLR(WB_SIO_MII_CLK); DELAY(1); SIO_SET(WB_SIO_MII_CLK); DELAY(1); WB_UNLOCK(sc); if (ack) return(1); return(0); } /* * Write to a PHY register through the MII. */ static int wb_mii_writereg(sc, frame) struct wb_softc *sc; struct wb_mii_frame *frame; { WB_LOCK(sc); /* * Set up frame for TX. */ frame->mii_stdelim = WB_MII_STARTDELIM; frame->mii_opcode = WB_MII_WRITEOP; frame->mii_turnaround = WB_MII_TURNAROUND; /* * Turn on data output. */ SIO_SET(WB_SIO_MII_DIR); wb_mii_sync(sc); wb_mii_send(sc, frame->mii_stdelim, 2); wb_mii_send(sc, frame->mii_opcode, 2); wb_mii_send(sc, frame->mii_phyaddr, 5); wb_mii_send(sc, frame->mii_regaddr, 5); wb_mii_send(sc, frame->mii_turnaround, 2); wb_mii_send(sc, frame->mii_data, 16); /* Idle bit. */ SIO_SET(WB_SIO_MII_CLK); DELAY(1); SIO_CLR(WB_SIO_MII_CLK); DELAY(1); /* * Turn off xmit. */ SIO_CLR(WB_SIO_MII_DIR); WB_UNLOCK(sc); return(0); } static int wb_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct wb_softc *sc; struct wb_mii_frame frame; sc = device_get_softc(dev); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; wb_mii_readreg(sc, &frame); return(frame.mii_data); } static int wb_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct wb_softc *sc; struct wb_mii_frame frame; sc = device_get_softc(dev); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = data; wb_mii_writereg(sc, &frame); return(0); } static void wb_miibus_statchg(dev) device_t dev; { struct wb_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); WB_LOCK(sc); mii = device_get_softc(sc->wb_miibus); wb_setcfg(sc, mii->mii_media_active); WB_UNLOCK(sc); return; } static u_int8_t wb_calchash(addr) caddr_t addr; { u_int32_t crc, carry; int i, j; u_int8_t c; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (i = 0; i < 6; i++) { c = *(addr + i); for (j = 0; j < 8; j++) { carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); crc <<= 1; c >>= 1; if (carry) crc = (crc ^ 0x04c11db6) | carry; } } /* * return the filter bit position * Note: I arrived at the following nonsense * through experimentation. It's not the usual way to * generate the bit position but it's the only thing * I could come up with that works. */ return(~(crc >> 26) & 0x0000003F); } /* * Program the 64-bit multicast hash filter. */ static void wb_setmulti(sc) struct wb_softc *sc; { struct ifnet *ifp; int h = 0; u_int32_t hashes[2] = { 0, 0 }; struct ifmultiaddr *ifma; u_int32_t rxfilt; int mcnt = 0; ifp = &sc->arpcom.ac_if; rxfilt = CSR_READ_4(sc, WB_NETCFG); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { rxfilt |= WB_NETCFG_RX_MULTI; CSR_WRITE_4(sc, WB_NETCFG, rxfilt); CSR_WRITE_4(sc, WB_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, WB_MAR1, 0xFFFFFFFF); return; } /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, WB_MAR0, 0); CSR_WRITE_4(sc, WB_MAR1, 0); /* now program new ones */ - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = wb_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); mcnt++; } if (mcnt) rxfilt |= WB_NETCFG_RX_MULTI; else rxfilt &= ~WB_NETCFG_RX_MULTI; CSR_WRITE_4(sc, WB_MAR0, hashes[0]); CSR_WRITE_4(sc, WB_MAR1, hashes[1]); CSR_WRITE_4(sc, WB_NETCFG, rxfilt); return; } /* * The Winbond manual states that in order to fiddle with the * 'full-duplex' and '100Mbps' bits in the netconfig register, we * first have to put the transmit and/or receive logic in the idle state. */ static void wb_setcfg(sc, media) struct wb_softc *sc; u_int32_t media; { int i, restart = 0; if (CSR_READ_4(sc, WB_NETCFG) & (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON)) { restart = 1; WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON)); for (i = 0; i < WB_TIMEOUT; i++) { DELAY(10); if ((CSR_READ_4(sc, WB_ISR) & WB_ISR_TX_IDLE) && (CSR_READ_4(sc, WB_ISR) & WB_ISR_RX_IDLE)) break; } if (i == WB_TIMEOUT) printf("wb%d: failed to force tx and " "rx to idle state\n", sc->wb_unit); } if (IFM_SUBTYPE(media) == IFM_10_T) WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS); else WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS); if ((media & IFM_GMASK) == IFM_FDX) WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX); else WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX); if (restart) WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON|WB_NETCFG_RX_ON); return; } static void wb_reset(sc) struct wb_softc *sc; { register int i; struct mii_data *mii; CSR_WRITE_4(sc, WB_NETCFG, 0); CSR_WRITE_4(sc, WB_BUSCTL, 0); CSR_WRITE_4(sc, WB_TXADDR, 0); CSR_WRITE_4(sc, WB_RXADDR, 0); WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET); WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET); for (i = 0; i < WB_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_4(sc, WB_BUSCTL) & WB_BUSCTL_RESET)) break; } if (i == WB_TIMEOUT) printf("wb%d: reset never completed!\n", sc->wb_unit); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); if (sc->wb_miibus == NULL) return; mii = device_get_softc(sc->wb_miibus); if (mii == NULL) return; if (mii->mii_instance) { struct mii_softc *miisc; for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; miisc = LIST_NEXT(miisc, mii_list)) mii_phy_reset(miisc); } return; } static void wb_fixmedia(sc) struct wb_softc *sc; { struct mii_data *mii = NULL; struct ifnet *ifp; u_int32_t media; if (sc->wb_miibus == NULL) return; mii = device_get_softc(sc->wb_miibus); ifp = &sc->arpcom.ac_if; mii_pollstat(mii); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) { media = mii->mii_media_active & ~IFM_10_T; media |= IFM_100_TX; } else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) { media = mii->mii_media_active & ~IFM_100_TX; media |= IFM_10_T; } else return; ifmedia_set(&mii->mii_media, media); return; } /* * Probe for a Winbond chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int wb_probe(dev) device_t dev; { struct wb_type *t; t = wb_devs; while(t->wb_name != NULL) { if ((pci_get_vendor(dev) == t->wb_vid) && (pci_get_device(dev) == t->wb_did)) { device_set_desc(dev, t->wb_name); return(0); } t++; } return(ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int wb_attach(dev) device_t dev; { u_char eaddr[ETHER_ADDR_LEN]; u_int32_t command; struct wb_softc *sc; struct ifnet *ifp; int unit, error = 0, rid; sc = device_get_softc(dev); unit = device_get_unit(dev); mtx_init(&sc->wb_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE); WB_LOCK(sc); /* * Handle power management nonsense. */ command = pci_read_config(dev, WB_PCI_CAPID, 4) & 0x000000FF; if (command == 0x01) { command = pci_read_config(dev, WB_PCI_PWRMGMTCTRL, 4); if (command & WB_PSTATE_MASK) { u_int32_t iobase, membase, irq; /* Save important PCI config data. */ iobase = pci_read_config(dev, WB_PCI_LOIO, 4); membase = pci_read_config(dev, WB_PCI_LOMEM, 4); irq = pci_read_config(dev, WB_PCI_INTLINE, 4); /* Reset the power state. */ printf("wb%d: chip is in D%d power mode " "-- setting to D0\n", unit, command & WB_PSTATE_MASK); command &= 0xFFFFFFFC; pci_write_config(dev, WB_PCI_PWRMGMTCTRL, command, 4); /* Restore PCI config data. */ pci_write_config(dev, WB_PCI_LOIO, iobase, 4); pci_write_config(dev, WB_PCI_LOMEM, membase, 4); pci_write_config(dev, WB_PCI_INTLINE, irq, 4); } } /* * Map control/status registers. */ command = pci_read_config(dev, PCIR_COMMAND, 4); command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, command, 4); command = pci_read_config(dev, PCIR_COMMAND, 4); #ifdef WB_USEIOSPACE if (!(command & PCIM_CMD_PORTEN)) { printf("wb%d: failed to enable I/O ports!\n", unit); error = ENXIO; goto fail; } #else if (!(command & PCIM_CMD_MEMEN)) { printf("wb%d: failed to enable memory mapping!\n", unit); error = ENXIO; goto fail; } #endif rid = WB_RID; sc->wb_res = bus_alloc_resource(dev, WB_RES, &rid, 0, ~0, 1, RF_ACTIVE); if (sc->wb_res == NULL) { printf("wb%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->wb_btag = rman_get_bustag(sc->wb_res); sc->wb_bhandle = rman_get_bushandle(sc->wb_res); /* Allocate interrupt */ rid = 0; sc->wb_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->wb_irq == NULL) { printf("wb%d: couldn't map interrupt\n", unit); bus_release_resource(dev, WB_RES, WB_RID, sc->wb_res); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->wb_irq, INTR_TYPE_NET, wb_intr, sc, &sc->wb_intrhand); if (error) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->wb_irq); bus_release_resource(dev, WB_RES, WB_RID, sc->wb_res); printf("wb%d: couldn't set up irq\n", unit); goto fail; } /* Save the cache line size. */ sc->wb_cachesize = pci_read_config(dev, WB_PCI_CACHELEN, 4) & 0xFF; /* Reset the adapter. */ wb_reset(sc); /* * Get station address from the EEPROM. */ wb_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 0); /* * A Winbond chip was detected. Inform the world. */ printf("wb%d: Ethernet address: %6D\n", unit, eaddr, ":"); sc->wb_unit = unit; bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); sc->wb_ldata = contigmalloc(sizeof(struct wb_list_data) + 8, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->wb_ldata == NULL) { printf("wb%d: no memory for list buffers!\n", unit); bus_teardown_intr(dev, sc->wb_irq, sc->wb_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->wb_irq); bus_release_resource(dev, WB_RES, WB_RID, sc->wb_res); error = ENXIO; goto fail; } bzero(sc->wb_ldata, sizeof(struct wb_list_data)); ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; ifp->if_unit = unit; ifp->if_name = "wb"; ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = wb_ioctl; ifp->if_output = ether_output; ifp->if_start = wb_start; ifp->if_watchdog = wb_watchdog; ifp->if_init = wb_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = WB_TX_LIST_CNT - 1; /* * Do MII setup. */ if (mii_phy_probe(dev, &sc->wb_miibus, wb_ifmedia_upd, wb_ifmedia_sts)) { bus_teardown_intr(dev, sc->wb_irq, sc->wb_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->wb_irq); bus_release_resource(dev, WB_RES, WB_RID, sc->wb_res); free(sc->wb_ldata_ptr, M_DEVBUF); error = ENXIO; goto fail; } /* * Call MI attach routine. */ ether_ifattach(ifp, ETHER_BPF_SUPPORTED); WB_UNLOCK(sc); return(0); fail: if (error) device_delete_child(dev, sc->wb_miibus); WB_UNLOCK(sc); mtx_destroy(&sc->wb_mtx); return(error); } static int wb_detach(dev) device_t dev; { struct wb_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); WB_LOCK(sc); ifp = &sc->arpcom.ac_if; wb_stop(sc); ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); /* Delete any miibus and phy devices attached to this interface */ bus_generic_detach(dev); device_delete_child(dev, sc->wb_miibus); bus_teardown_intr(dev, sc->wb_irq, sc->wb_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->wb_irq); bus_release_resource(dev, WB_RES, WB_RID, sc->wb_res); free(sc->wb_ldata_ptr, M_DEVBUF); WB_UNLOCK(sc); mtx_destroy(&sc->wb_mtx); return(0); } /* * Initialize the transmit descriptors. */ static int wb_list_tx_init(sc) struct wb_softc *sc; { struct wb_chain_data *cd; struct wb_list_data *ld; int i; cd = &sc->wb_cdata; ld = sc->wb_ldata; for (i = 0; i < WB_TX_LIST_CNT; i++) { cd->wb_tx_chain[i].wb_ptr = &ld->wb_tx_list[i]; if (i == (WB_TX_LIST_CNT - 1)) { cd->wb_tx_chain[i].wb_nextdesc = &cd->wb_tx_chain[0]; } else { cd->wb_tx_chain[i].wb_nextdesc = &cd->wb_tx_chain[i + 1]; } } cd->wb_tx_free = &cd->wb_tx_chain[0]; cd->wb_tx_tail = cd->wb_tx_head = NULL; return(0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int wb_list_rx_init(sc) struct wb_softc *sc; { struct wb_chain_data *cd; struct wb_list_data *ld; int i; cd = &sc->wb_cdata; ld = sc->wb_ldata; for (i = 0; i < WB_RX_LIST_CNT; i++) { cd->wb_rx_chain[i].wb_ptr = (struct wb_desc *)&ld->wb_rx_list[i]; cd->wb_rx_chain[i].wb_buf = (void *)&ld->wb_rxbufs[i]; if (wb_newbuf(sc, &cd->wb_rx_chain[i], NULL) == ENOBUFS) return(ENOBUFS); if (i == (WB_RX_LIST_CNT - 1)) { cd->wb_rx_chain[i].wb_nextdesc = &cd->wb_rx_chain[0]; ld->wb_rx_list[i].wb_next = vtophys(&ld->wb_rx_list[0]); } else { cd->wb_rx_chain[i].wb_nextdesc = &cd->wb_rx_chain[i + 1]; ld->wb_rx_list[i].wb_next = vtophys(&ld->wb_rx_list[i + 1]); } } cd->wb_rx_head = &cd->wb_rx_chain[0]; return(0); } static void wb_bfree(buf, args) caddr_t buf; void *args; { return; } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int wb_newbuf(sc, c, m) struct wb_softc *sc; struct wb_chain_onefrag *c; struct mbuf *m; { struct mbuf *m_new = NULL; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("wb%d: no memory for rx " "list -- packet dropped!\n", sc->wb_unit); return(ENOBUFS); } m_new->m_data = c->wb_buf; m_new->m_pkthdr.len = m_new->m_len = WB_BUFBYTES; MEXTADD(m_new, c->wb_buf, WB_BUFBYTES, wb_bfree, NULL, 0, EXT_NET_DRV); } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = WB_BUFBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, sizeof(u_int64_t)); c->wb_mbuf = m_new; c->wb_ptr->wb_data = vtophys(mtod(m_new, caddr_t)); c->wb_ptr->wb_ctl = WB_RXCTL_RLINK | 1536; c->wb_ptr->wb_status = WB_RXSTAT; return(0); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static void wb_rxeof(sc) struct wb_softc *sc; { struct ether_header *eh; struct mbuf *m = NULL; struct ifnet *ifp; struct wb_chain_onefrag *cur_rx; int total_len = 0; u_int32_t rxstat; ifp = &sc->arpcom.ac_if; while(!((rxstat = sc->wb_cdata.wb_rx_head->wb_ptr->wb_status) & WB_RXSTAT_OWN)) { struct mbuf *m0 = NULL; cur_rx = sc->wb_cdata.wb_rx_head; sc->wb_cdata.wb_rx_head = cur_rx->wb_nextdesc; m = cur_rx->wb_mbuf; if ((rxstat & WB_RXSTAT_MIIERR) || (WB_RXBYTES(cur_rx->wb_ptr->wb_status) < WB_MIN_FRAMELEN) || (WB_RXBYTES(cur_rx->wb_ptr->wb_status) > 1536) || !(rxstat & WB_RXSTAT_LASTFRAG) || !(rxstat & WB_RXSTAT_RXCMP)) { ifp->if_ierrors++; wb_newbuf(sc, cur_rx, m); printf("wb%x: receiver babbling: possible chip " "bug, forcing reset\n", sc->wb_unit); wb_fixmedia(sc); wb_reset(sc); wb_init(sc); return; } if (rxstat & WB_RXSTAT_RXERR) { ifp->if_ierrors++; wb_newbuf(sc, cur_rx, m); break; } /* No errors; receive the packet. */ total_len = WB_RXBYTES(cur_rx->wb_ptr->wb_status); /* * XXX The Winbond chip includes the CRC with every * received frame, and there's no way to turn this * behavior off (at least, I can't find anything in * the manual that explains how to do it) so we have * to trim off the CRC manually. */ total_len -= ETHER_CRC_LEN; m0 = m_devget(mtod(m, char *) - ETHER_ALIGN, total_len + ETHER_ALIGN, 0, ifp, NULL); wb_newbuf(sc, cur_rx, m); if (m0 == NULL) { ifp->if_ierrors++; break; } m_adj(m0, ETHER_ALIGN); m = m0; ifp->if_ipackets++; eh = mtod(m, struct ether_header *); /* Remove header from mbuf and pass it on. */ m_adj(m, sizeof(struct ether_header)); ether_input(ifp, eh, m); } } void wb_rxeoc(sc) struct wb_softc *sc; { wb_rxeof(sc); WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); CSR_WRITE_4(sc, WB_RXADDR, vtophys(&sc->wb_ldata->wb_rx_list[0])); WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); if (CSR_READ_4(sc, WB_ISR) & WB_RXSTATE_SUSPEND) CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF); return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void wb_txeof(sc) struct wb_softc *sc; { struct wb_chain *cur_tx; struct ifnet *ifp; ifp = &sc->arpcom.ac_if; /* Clear the timeout timer. */ ifp->if_timer = 0; if (sc->wb_cdata.wb_tx_head == NULL) return; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ while(sc->wb_cdata.wb_tx_head->wb_mbuf != NULL) { u_int32_t txstat; cur_tx = sc->wb_cdata.wb_tx_head; txstat = WB_TXSTATUS(cur_tx); if ((txstat & WB_TXSTAT_OWN) || txstat == WB_UNSENT) break; if (txstat & WB_TXSTAT_TXERR) { ifp->if_oerrors++; if (txstat & WB_TXSTAT_ABORT) ifp->if_collisions++; if (txstat & WB_TXSTAT_LATECOLL) ifp->if_collisions++; } ifp->if_collisions += (txstat & WB_TXSTAT_COLLCNT) >> 3; ifp->if_opackets++; m_freem(cur_tx->wb_mbuf); cur_tx->wb_mbuf = NULL; if (sc->wb_cdata.wb_tx_head == sc->wb_cdata.wb_tx_tail) { sc->wb_cdata.wb_tx_head = NULL; sc->wb_cdata.wb_tx_tail = NULL; break; } sc->wb_cdata.wb_tx_head = cur_tx->wb_nextdesc; } return; } /* * TX 'end of channel' interrupt handler. */ static void wb_txeoc(sc) struct wb_softc *sc; { struct ifnet *ifp; ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; if (sc->wb_cdata.wb_tx_head == NULL) { ifp->if_flags &= ~IFF_OACTIVE; sc->wb_cdata.wb_tx_tail = NULL; } else { if (WB_TXOWN(sc->wb_cdata.wb_tx_head) == WB_UNSENT) { WB_TXOWN(sc->wb_cdata.wb_tx_head) = WB_TXSTAT_OWN; ifp->if_timer = 5; CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF); } } return; } static void wb_intr(arg) void *arg; { struct wb_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; WB_LOCK(sc); ifp = &sc->arpcom.ac_if; if (!(ifp->if_flags & IFF_UP)) { WB_UNLOCK(sc); return; } /* Disable interrupts. */ CSR_WRITE_4(sc, WB_IMR, 0x00000000); for (;;) { status = CSR_READ_4(sc, WB_ISR); if (status) CSR_WRITE_4(sc, WB_ISR, status); if ((status & WB_INTRS) == 0) break; if ((status & WB_ISR_RX_NOBUF) || (status & WB_ISR_RX_ERR)) { ifp->if_ierrors++; wb_reset(sc); if (status & WB_ISR_RX_ERR) wb_fixmedia(sc); wb_init(sc); continue; } if (status & WB_ISR_RX_OK) wb_rxeof(sc); if (status & WB_ISR_RX_IDLE) wb_rxeoc(sc); if (status & WB_ISR_TX_OK) wb_txeof(sc); if (status & WB_ISR_TX_NOBUF) wb_txeoc(sc); if (status & WB_ISR_TX_IDLE) { wb_txeof(sc); if (sc->wb_cdata.wb_tx_head != NULL) { WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF); } } if (status & WB_ISR_TX_UNDERRUN) { ifp->if_oerrors++; wb_txeof(sc); WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); /* Jack up TX threshold */ sc->wb_txthresh += WB_TXTHRESH_CHUNK; WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH); WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh)); WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); } if (status & WB_ISR_BUS_ERR) { wb_reset(sc); wb_init(sc); } } /* Re-enable interrupts. */ CSR_WRITE_4(sc, WB_IMR, WB_INTRS); if (ifp->if_snd.ifq_head != NULL) { wb_start(ifp); } WB_UNLOCK(sc); return; } static void wb_tick(xsc) void *xsc; { struct wb_softc *sc; struct mii_data *mii; sc = xsc; WB_LOCK(sc); mii = device_get_softc(sc->wb_miibus); mii_tick(mii); sc->wb_stat_ch = timeout(wb_tick, sc, hz); WB_UNLOCK(sc); return; } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int wb_encap(sc, c, m_head) struct wb_softc *sc; struct wb_chain *c; struct mbuf *m_head; { int frag = 0; struct wb_desc *f = NULL; int total_len; struct mbuf *m; /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ m = m_head; total_len = 0; for (m = m_head, frag = 0; m != NULL; m = m->m_next) { if (m->m_len != 0) { if (frag == WB_MAXFRAGS) break; total_len += m->m_len; f = &c->wb_ptr->wb_frag[frag]; f->wb_ctl = WB_TXCTL_TLINK | m->m_len; if (frag == 0) { f->wb_ctl |= WB_TXCTL_FIRSTFRAG; f->wb_status = 0; } else f->wb_status = WB_TXSTAT_OWN; f->wb_next = vtophys(&c->wb_ptr->wb_frag[frag + 1]); f->wb_data = vtophys(mtod(m, vm_offset_t)); frag++; } } /* * Handle special case: we used up all 16 fragments, * but we have more mbufs left in the chain. Copy the * data into an mbuf cluster. Note that we don't * bother clearing the values in the other fragment * pointers/counters; it wouldn't gain us anything, * and would waste cycles. */ if (m != NULL) { struct mbuf *m_new = NULL; MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("wb%d: no memory for tx list", sc->wb_unit); return(1); } if (m_head->m_pkthdr.len > MHLEN) { MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); printf("wb%d: no memory for tx list", sc->wb_unit); return(1); } } m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t)); m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; m_freem(m_head); m_head = m_new; f = &c->wb_ptr->wb_frag[0]; f->wb_status = 0; f->wb_data = vtophys(mtod(m_new, caddr_t)); f->wb_ctl = total_len = m_new->m_len; f->wb_ctl |= WB_TXCTL_TLINK|WB_TXCTL_FIRSTFRAG; frag = 1; } if (total_len < WB_MIN_FRAMELEN) { f = &c->wb_ptr->wb_frag[frag]; f->wb_ctl = WB_MIN_FRAMELEN - total_len; f->wb_data = vtophys(&sc->wb_cdata.wb_pad); f->wb_ctl |= WB_TXCTL_TLINK; f->wb_status = WB_TXSTAT_OWN; frag++; } c->wb_mbuf = m_head; c->wb_lastdesc = frag - 1; WB_TXCTL(c) |= WB_TXCTL_LASTFRAG; WB_TXNEXT(c) = vtophys(&c->wb_nextdesc->wb_ptr->wb_frag[0]); return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void wb_start(ifp) struct ifnet *ifp; { struct wb_softc *sc; struct mbuf *m_head = NULL; struct wb_chain *cur_tx = NULL, *start_tx; sc = ifp->if_softc; WB_LOCK(sc); /* * Check for an available queue slot. If there are none, * punt. */ if (sc->wb_cdata.wb_tx_free->wb_mbuf != NULL) { ifp->if_flags |= IFF_OACTIVE; WB_UNLOCK(sc); return; } start_tx = sc->wb_cdata.wb_tx_free; while(sc->wb_cdata.wb_tx_free->wb_mbuf == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* Pick a descriptor off the free list. */ cur_tx = sc->wb_cdata.wb_tx_free; sc->wb_cdata.wb_tx_free = cur_tx->wb_nextdesc; /* Pack the data into the descriptor. */ wb_encap(sc, cur_tx, m_head); if (cur_tx != start_tx) WB_TXOWN(cur_tx) = WB_TXSTAT_OWN; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if (ifp->if_bpf) bpf_mtap(ifp, cur_tx->wb_mbuf); } /* * If there are no packets queued, bail. */ if (cur_tx == NULL) { WB_UNLOCK(sc); return; } /* * Place the request for the upload interrupt * in the last descriptor in the chain. This way, if * we're chaining several packets at once, we'll only * get an interupt once for the whole chain rather than * once for each packet. */ WB_TXCTL(cur_tx) |= WB_TXCTL_FINT; cur_tx->wb_ptr->wb_frag[0].wb_ctl |= WB_TXCTL_FINT; sc->wb_cdata.wb_tx_tail = cur_tx; if (sc->wb_cdata.wb_tx_head == NULL) { sc->wb_cdata.wb_tx_head = start_tx; WB_TXOWN(start_tx) = WB_TXSTAT_OWN; CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF); } else { /* * We need to distinguish between the case where * the own bit is clear because the chip cleared it * and where the own bit is clear because we haven't * set it yet. The magic value WB_UNSET is just some * ramdomly chosen number which doesn't have the own * bit set. When we actually transmit the frame, the * status word will have _only_ the own bit set, so * the txeoc handler will be able to tell if it needs * to initiate another transmission to flush out pending * frames. */ WB_TXOWN(start_tx) = WB_UNSENT; } /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; WB_UNLOCK(sc); return; } static void wb_init(xsc) void *xsc; { struct wb_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; int i; struct mii_data *mii; WB_LOCK(sc); mii = device_get_softc(sc->wb_miibus); /* * Cancel pending I/O and free all RX/TX buffers. */ wb_stop(sc); wb_reset(sc); sc->wb_txthresh = WB_TXTHRESH_INIT; /* * Set cache alignment and burst length. */ #ifdef foo CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_CONFIG); WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH); WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh)); #endif CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_MUSTBEONE|WB_BUSCTL_ARBITRATION); WB_SETBIT(sc, WB_BUSCTL, WB_BURSTLEN_16LONG); switch(sc->wb_cachesize) { case 32: WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_32LONG); break; case 16: WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_16LONG); break; case 8: WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_8LONG); break; case 0: default: WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_NONE); break; } /* This doesn't tend to work too well at 100Mbps. */ WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_EARLY_ON); /* Init our MAC address */ for (i = 0; i < ETHER_ADDR_LEN; i++) { CSR_WRITE_1(sc, WB_NODE0 + i, sc->arpcom.ac_enaddr[i]); } /* Init circular RX list. */ if (wb_list_rx_init(sc) == ENOBUFS) { printf("wb%d: initialization failed: no " "memory for rx buffers\n", sc->wb_unit); wb_stop(sc); WB_UNLOCK(sc); return; } /* Init TX descriptors. */ wb_list_tx_init(sc); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS); } else { WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS); } /* * Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD); } else { WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD); } /* * Program the multicast filter, if necessary. */ wb_setmulti(sc); /* * Load the address of the RX list. */ WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); CSR_WRITE_4(sc, WB_RXADDR, vtophys(&sc->wb_ldata->wb_rx_list[0])); /* * Enable interrupts. */ CSR_WRITE_4(sc, WB_IMR, WB_INTRS); CSR_WRITE_4(sc, WB_ISR, 0xFFFFFFFF); /* Enable receiver and transmitter. */ WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF); WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); CSR_WRITE_4(sc, WB_TXADDR, vtophys(&sc->wb_ldata->wb_tx_list[0])); WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); mii_mediachg(mii); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->wb_stat_ch = timeout(wb_tick, sc, hz); WB_UNLOCK(sc); return; } /* * Set media options. */ static int wb_ifmedia_upd(ifp) struct ifnet *ifp; { struct wb_softc *sc; sc = ifp->if_softc; if (ifp->if_flags & IFF_UP) wb_init(sc); return(0); } /* * Report current media status. */ static void wb_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct wb_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->wb_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static int wb_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct wb_softc *sc = ifp->if_softc; struct mii_data *mii; struct ifreq *ifr = (struct ifreq *) data; int error = 0; WB_LOCK(sc); switch(command) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: error = ether_ioctl(ifp, command, data); break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { wb_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) wb_stop(sc); } error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: wb_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->wb_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = EINVAL; break; } WB_UNLOCK(sc); return(error); } static void wb_watchdog(ifp) struct ifnet *ifp; { struct wb_softc *sc; sc = ifp->if_softc; WB_LOCK(sc); ifp->if_oerrors++; printf("wb%d: watchdog timeout\n", sc->wb_unit); #ifdef foo if (!(wb_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT)) printf("wb%d: no carrier - transceiver cable problem?\n", sc->wb_unit); #endif wb_stop(sc); wb_reset(sc); wb_init(sc); if (ifp->if_snd.ifq_head != NULL) wb_start(ifp); WB_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void wb_stop(sc) struct wb_softc *sc; { register int i; struct ifnet *ifp; WB_LOCK(sc); ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; untimeout(wb_tick, sc, sc->wb_stat_ch); WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_RX_ON|WB_NETCFG_TX_ON)); CSR_WRITE_4(sc, WB_IMR, 0x00000000); CSR_WRITE_4(sc, WB_TXADDR, 0x00000000); CSR_WRITE_4(sc, WB_RXADDR, 0x00000000); /* * Free data in the RX lists. */ for (i = 0; i < WB_RX_LIST_CNT; i++) { if (sc->wb_cdata.wb_rx_chain[i].wb_mbuf != NULL) { m_freem(sc->wb_cdata.wb_rx_chain[i].wb_mbuf); sc->wb_cdata.wb_rx_chain[i].wb_mbuf = NULL; } } bzero((char *)&sc->wb_ldata->wb_rx_list, sizeof(sc->wb_ldata->wb_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < WB_TX_LIST_CNT; i++) { if (sc->wb_cdata.wb_tx_chain[i].wb_mbuf != NULL) { m_freem(sc->wb_cdata.wb_tx_chain[i].wb_mbuf); sc->wb_cdata.wb_tx_chain[i].wb_mbuf = NULL; } } bzero((char *)&sc->wb_ldata->wb_tx_list, sizeof(sc->wb_ldata->wb_tx_list)); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); WB_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void wb_shutdown(dev) device_t dev; { struct wb_softc *sc; sc = device_get_softc(dev); wb_stop(sc); return; } Index: head/sys/pci/if_xl.c =================================================================== --- head/sys/pci/if_xl.c (revision 71961) +++ head/sys/pci/if_xl.c (revision 71962) @@ -1,3000 +1,2998 @@ /* * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * 3Com 3c90x Etherlink XL PCI NIC driver * * Supports the 3Com "boomerang", "cyclone" and "hurricane" PCI * bus-master chips (3c90x cards and embedded controllers) including * the following: * * 3Com 3c900-TPO 10Mbps/RJ-45 * 3Com 3c900-COMBO 10Mbps/RJ-45,AUI,BNC * 3Com 3c905-TX 10/100Mbps/RJ-45 * 3Com 3c905-T4 10/100Mbps/RJ-45 * 3Com 3c900B-TPO 10Mbps/RJ-45 * 3Com 3c900B-COMBO 10Mbps/RJ-45,AUI,BNC * 3Com 3c900B-TPC 10Mbps/RJ-45,BNC * 3Com 3c900B-FL 10Mbps/Fiber-optic * 3Com 3c905B-COMBO 10/100Mbps/RJ-45,AUI,BNC * 3Com 3c905B-TX 10/100Mbps/RJ-45 * 3Com 3c905B-FL/FX 10/100Mbps/Fiber-optic * 3Com 3c905C-TX 10/100Mbps/RJ-45 (Tornado ASIC) * 3Com 3c980-TX 10/100Mbps server adapter (Hurricane ASIC) * 3Com 3c980C-TX 10/100Mbps server adapter (Tornado ASIC) * 3Com 3cSOHO100-TX 10/100Mbps/RJ-45 (Hurricane ASIC) * 3Com 3c450-TX 10/100Mbps/RJ-45 (Tornado ASIC) * 3Com 3c556 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC) * 3Com 3c556B 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC) * 3Com 3c575TX 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3c575B 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3c575C 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3cxfem656c 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45 * Dell on-board 3c920 10/100Mbps/RJ-45 * Dell Precision on-board 3c905B 10/100Mbps/RJ-45 * Dell Latitude laptop docking station embedded 3c905-TX * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The 3c90x series chips use a bus-master DMA interface for transfering * packets to and from the controller chip. Some of the "vortex" cards * (3c59x) also supported a bus master mode, however for those chips * you could only DMA packets to/from a contiguous memory buffer. For * transmission this would mean copying the contents of the queued mbuf * chain into a an mbuf cluster and then DMAing the cluster. This extra * copy would sort of defeat the purpose of the bus master support for * any packet that doesn't fit into a single mbuf. * * By contrast, the 3c90x cards support a fragment-based bus master * mode where mbuf chains can be encapsulated using TX descriptors. * This is similar to other PCI chips such as the Texas Instruments * ThunderLAN and the Intel 82557/82558. * * The "vortex" driver (if_vx.c) happens to work for the "boomerang" * bus master chips because they maintain the old PIO interface for * backwards compatibility, but starting with the 3c905B and the * "cyclone" chips, the compatibility interface has been dropped. * Since using bus master DMA is a big win, we use this driver to * support the PCI "boomerang" chips even though they work with the * "vortex" driver in order to obtain better performance. * * This driver is in the /sys/pci directory because it only supports * PCI-based NICs. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(xl, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * The following #define causes the code to use PIO to access the * chip's registers instead of memory mapped mode. The reason PIO mode * is on by default is that the Etherlink XL manual seems to indicate * that only the newer revision chips (3c905B) support both PIO and * memory mapped access. Since we want to be compatible with the older * bus master chips, we use PIO here. If you comment this out, the * driver will use memory mapped I/O, which may be faster but which * might not work on some devices. */ #define XL_USEIOSPACE #include #if !defined(lint) static const char rcsid[] = "$FreeBSD$"; #endif /* * Various supported device vendors/types and their names. */ static struct xl_type xl_devs[] = { { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT, "3Com 3c900-TPO Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT_COMBO, "3Com 3c900-COMBO Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_BOOMERANG_10_100BT, "3Com 3c905-TX Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_BOOMERANG_100BT4, "3Com 3c905-T4 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT, "3Com 3c900B-TPO Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_COMBO, "3Com 3c900B-COMBO Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_TPC, "3Com 3c900B-TPC Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_CYCLONE_10FL, "3Com 3c900B-FL Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT, "3Com 3c905B-TX Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT4, "3Com 3c905B-T4 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100FX, "3Com 3c905B-FX/SC Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100_COMBO, "3Com 3c905B-COMBO Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT, "3Com 3c905C-TX Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT_SERV, "3Com 3c980 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_SERV, "3Com 3c980C Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_SOHO100TX, "3Com 3cSOHO100-TX OfficeConnect" }, { TC_VENDORID, TC_DEVICEID_TORNADO_HOMECONNECT, "3Com 3c450-TX HomeConnect" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_556, "3Com 3c556 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_556B, "3Com 3c556B Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_575A, "3Com 3c575TX Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_575B, "3Com 3c575B Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_575C, "3Com 3c575C Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_656C, "3Com 3c656C Fast Etherlink XL" }, { 0, 0, NULL } }; static int xl_probe __P((device_t)); static int xl_attach __P((device_t)); static int xl_detach __P((device_t)); static int xl_newbuf __P((struct xl_softc *, struct xl_chain_onefrag *)); static void xl_stats_update __P((void *)); static int xl_encap __P((struct xl_softc *, struct xl_chain *, struct mbuf * )); static int xl_encap_90xB __P((struct xl_softc *, struct xl_chain *, struct mbuf * )); static void xl_rxeof __P((struct xl_softc *)); static int xl_rx_resync __P((struct xl_softc *)); static void xl_txeof __P((struct xl_softc *)); static void xl_txeof_90xB __P((struct xl_softc *)); static void xl_txeoc __P((struct xl_softc *)); static void xl_intr __P((void *)); static void xl_start __P((struct ifnet *)); static void xl_start_90xB __P((struct ifnet *)); static int xl_ioctl __P((struct ifnet *, u_long, caddr_t)); static void xl_init __P((void *)); static void xl_stop __P((struct xl_softc *)); static void xl_watchdog __P((struct ifnet *)); static void xl_shutdown __P((device_t)); static int xl_ifmedia_upd __P((struct ifnet *)); static void xl_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); static int xl_eeprom_wait __P((struct xl_softc *)); static int xl_read_eeprom __P((struct xl_softc *, caddr_t, int, int, int)); static void xl_mii_sync __P((struct xl_softc *)); static void xl_mii_send __P((struct xl_softc *, u_int32_t, int)); static int xl_mii_readreg __P((struct xl_softc *, struct xl_mii_frame *)); static int xl_mii_writereg __P((struct xl_softc *, struct xl_mii_frame *)); static void xl_setcfg __P((struct xl_softc *)); static void xl_setmode __P((struct xl_softc *, int)); static u_int8_t xl_calchash __P((caddr_t)); static void xl_setmulti __P((struct xl_softc *)); static void xl_setmulti_hash __P((struct xl_softc *)); static void xl_reset __P((struct xl_softc *)); static int xl_list_rx_init __P((struct xl_softc *)); static int xl_list_tx_init __P((struct xl_softc *)); static int xl_list_tx_init_90xB __P((struct xl_softc *)); static void xl_wait __P((struct xl_softc *)); static void xl_mediacheck __P((struct xl_softc *)); static void xl_choose_xcvr __P((struct xl_softc *, int)); #ifdef notdef static void xl_testpacket __P((struct xl_softc *)); #endif static int xl_miibus_readreg __P((device_t, int, int)); static int xl_miibus_writereg __P((device_t, int, int, int)); static void xl_miibus_statchg __P((device_t)); static void xl_miibus_mediainit __P((device_t)); #ifdef XL_USEIOSPACE #define XL_RES SYS_RES_IOPORT #define XL_RID XL_PCI_LOIO #else #define XL_RES SYS_RES_MEMORY #define XL_RID XL_PCI_LOMEM #endif static device_method_t xl_methods[] = { /* Device interface */ DEVMETHOD(device_probe, xl_probe), DEVMETHOD(device_attach, xl_attach), DEVMETHOD(device_detach, xl_detach), DEVMETHOD(device_shutdown, xl_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, xl_miibus_readreg), DEVMETHOD(miibus_writereg, xl_miibus_writereg), DEVMETHOD(miibus_statchg, xl_miibus_statchg), DEVMETHOD(miibus_mediainit, xl_miibus_mediainit), { 0, 0 } }; static driver_t xl_driver = { "xl", xl_methods, sizeof(struct xl_softc) }; static devclass_t xl_devclass; DRIVER_MODULE(if_xl, cardbus, xl_driver, xl_devclass, 0, 0); DRIVER_MODULE(if_xl, pci, xl_driver, xl_devclass, 0, 0); DRIVER_MODULE(miibus, xl, miibus_driver, miibus_devclass, 0, 0); /* * Murphy's law says that it's possible the chip can wedge and * the 'command in progress' bit may never clear. Hence, we wait * only a finite amount of time to avoid getting caught in an * infinite loop. Normally this delay routine would be a macro, * but it isn't called during normal operation so we can afford * to make it a function. */ static void xl_wait(sc) struct xl_softc *sc; { register int i; for (i = 0; i < XL_TIMEOUT; i++) { if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY)) break; } if (i == XL_TIMEOUT) printf("xl%d: command never completed!\n", sc->xl_unit); return; } /* * MII access routines are provided for adapters with external * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in * autoneg logic that's faked up to look like a PHY (3c905B-TX). * Note: if you don't perform the MDIO operations just right, * it's possible to end up with code that works correctly with * some chips/CPUs/processor speeds/bus speeds/etc but not * with others. */ #define MII_SET(x) \ CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \ CSR_READ_2(sc, XL_W4_PHY_MGMT) | x) #define MII_CLR(x) \ CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \ CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~x) /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void xl_mii_sync(sc) struct xl_softc *sc; { register int i; XL_SEL_WIN(4); MII_SET(XL_MII_DIR|XL_MII_DATA); for (i = 0; i < 32; i++) { MII_SET(XL_MII_CLK); DELAY(1); MII_CLR(XL_MII_CLK); DELAY(1); } return; } /* * Clock a series of bits through the MII. */ static void xl_mii_send(sc, bits, cnt) struct xl_softc *sc; u_int32_t bits; int cnt; { int i; XL_SEL_WIN(4); MII_CLR(XL_MII_CLK); for (i = (0x1 << (cnt - 1)); i; i >>= 1) { if (bits & i) { MII_SET(XL_MII_DATA); } else { MII_CLR(XL_MII_DATA); } DELAY(1); MII_CLR(XL_MII_CLK); DELAY(1); MII_SET(XL_MII_CLK); } } /* * Read an PHY register through the MII. */ static int xl_mii_readreg(sc, frame) struct xl_softc *sc; struct xl_mii_frame *frame; { int i, ack; XL_LOCK(sc); /* * Set up frame for RX. */ frame->mii_stdelim = XL_MII_STARTDELIM; frame->mii_opcode = XL_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; /* * Select register window 4. */ XL_SEL_WIN(4); CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0); /* * Turn on data xmit. */ MII_SET(XL_MII_DIR); xl_mii_sync(sc); /* * Send command/address info. */ xl_mii_send(sc, frame->mii_stdelim, 2); xl_mii_send(sc, frame->mii_opcode, 2); xl_mii_send(sc, frame->mii_phyaddr, 5); xl_mii_send(sc, frame->mii_regaddr, 5); /* Idle bit */ MII_CLR((XL_MII_CLK|XL_MII_DATA)); DELAY(1); MII_SET(XL_MII_CLK); DELAY(1); /* Turn off xmit. */ MII_CLR(XL_MII_DIR); /* Check for ack */ MII_CLR(XL_MII_CLK); DELAY(1); MII_SET(XL_MII_CLK); DELAY(1); ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA; /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ if (ack) { for(i = 0; i < 16; i++) { MII_CLR(XL_MII_CLK); DELAY(1); MII_SET(XL_MII_CLK); DELAY(1); } goto fail; } for (i = 0x8000; i; i >>= 1) { MII_CLR(XL_MII_CLK); DELAY(1); if (!ack) { if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA) frame->mii_data |= i; DELAY(1); } MII_SET(XL_MII_CLK); DELAY(1); } fail: MII_CLR(XL_MII_CLK); DELAY(1); MII_SET(XL_MII_CLK); DELAY(1); XL_UNLOCK(sc); if (ack) return(1); return(0); } /* * Write to a PHY register through the MII. */ static int xl_mii_writereg(sc, frame) struct xl_softc *sc; struct xl_mii_frame *frame; { XL_LOCK(sc); /* * Set up frame for TX. */ frame->mii_stdelim = XL_MII_STARTDELIM; frame->mii_opcode = XL_MII_WRITEOP; frame->mii_turnaround = XL_MII_TURNAROUND; /* * Select the window 4. */ XL_SEL_WIN(4); /* * Turn on data output. */ MII_SET(XL_MII_DIR); xl_mii_sync(sc); xl_mii_send(sc, frame->mii_stdelim, 2); xl_mii_send(sc, frame->mii_opcode, 2); xl_mii_send(sc, frame->mii_phyaddr, 5); xl_mii_send(sc, frame->mii_regaddr, 5); xl_mii_send(sc, frame->mii_turnaround, 2); xl_mii_send(sc, frame->mii_data, 16); /* Idle bit. */ MII_SET(XL_MII_CLK); DELAY(1); MII_CLR(XL_MII_CLK); DELAY(1); /* * Turn off xmit. */ MII_CLR(XL_MII_DIR); XL_UNLOCK(sc); return(0); } static int xl_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct xl_softc *sc; struct xl_mii_frame frame; sc = device_get_softc(dev); /* * Pretend that PHYs are only available at MII address 24. * This is to guard against problems with certain 3Com ASIC * revisions that incorrectly map the internal transceiver * control registers at all MII addresses. This can cause * the miibus code to attach the same PHY several times over. */ if ((!(sc->xl_flags & XL_FLAG_PHYOK)) && phy != 24) return(0); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; xl_mii_readreg(sc, &frame); return(frame.mii_data); } static int xl_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct xl_softc *sc; struct xl_mii_frame frame; sc = device_get_softc(dev); if ((!(sc->xl_flags & XL_FLAG_PHYOK)) && phy != 24) return(0); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = data; xl_mii_writereg(sc, &frame); return(0); } static void xl_miibus_statchg(dev) device_t dev; { struct xl_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->xl_miibus); XL_LOCK(sc); xl_setcfg(sc); /* Set ASIC's duplex mode to match the PHY. */ XL_SEL_WIN(3); if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX); else CSR_WRITE_1(sc, XL_W3_MAC_CTRL, (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX)); XL_UNLOCK(sc); return; } /* * Special support for the 3c905B-COMBO. This card has 10/100 support * plus BNC and AUI ports. This means we will have both an miibus attached * plus some non-MII media settings. In order to allow this, we have to * add the extra media to the miibus's ifmedia struct, but we can't do * that during xl_attach() because the miibus hasn't been attached yet. * So instead, we wait until the miibus probe/attach is done, at which * point we will get a callback telling is that it's safe to add our * extra media. */ static void xl_miibus_mediainit(dev) device_t dev; { struct xl_softc *sc; struct mii_data *mii; struct ifmedia *ifm; sc = device_get_softc(dev); mii = device_get_softc(sc->xl_miibus); ifm = &mii->mii_media; XL_LOCK(sc); if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) { /* * Check for a 10baseFL board in disguise. */ if (sc->xl_type == XL_TYPE_905B && sc->xl_media == XL_MEDIAOPT_10FL) { if (bootverbose) printf("xl%d: found 10baseFL\n", sc->xl_unit); ifmedia_add(ifm, IFM_ETHER|IFM_10_FL, 0, NULL); ifmedia_add(ifm, IFM_ETHER|IFM_10_FL|IFM_HDX, 0, NULL); if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) ifmedia_add(ifm, IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL); } else { if (bootverbose) printf("xl%d: found AUI\n", sc->xl_unit); ifmedia_add(ifm, IFM_ETHER|IFM_10_5, 0, NULL); } } if (sc->xl_media & XL_MEDIAOPT_BNC) { if (bootverbose) printf("xl%d: found BNC\n", sc->xl_unit); ifmedia_add(ifm, IFM_ETHER|IFM_10_2, 0, NULL); } XL_UNLOCK(sc); return; } /* * The EEPROM is slow: give it time to come ready after issuing * it a command. */ static int xl_eeprom_wait(sc) struct xl_softc *sc; { int i; for (i = 0; i < 100; i++) { if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY) DELAY(162); else break; } if (i == 100) { printf("xl%d: eeprom failed to come ready\n", sc->xl_unit); return(1); } return(0); } /* * Read a sequence of words from the EEPROM. Note that ethernet address * data is stored in the EEPROM in network byte order. */ static int xl_read_eeprom(sc, dest, off, cnt, swap) struct xl_softc *sc; caddr_t dest; int off; int cnt; int swap; { int err = 0, i; u_int16_t word = 0, *ptr; #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F)) #define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F) /* WARNING! DANGER! * It's easy to accidentally overwrite the rom content! * Note: the 3c575 uses 8bit EEPROM offsets. */ XL_SEL_WIN(0); if (xl_eeprom_wait(sc)) return(1); if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30) off += 0x30; for (i = 0; i < cnt; i++) { if (sc->xl_flags & XL_FLAG_8BITROM) CSR_WRITE_2(sc, XL_W0_EE_CMD, XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i)); else CSR_WRITE_2(sc, XL_W0_EE_CMD, XL_EE_READ | EEPROM_5BIT_OFFSET(off + i)); err = xl_eeprom_wait(sc); if (err) break; word = CSR_READ_2(sc, XL_W0_EE_DATA); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } return(err ? 1 : 0); } /* * This routine is taken from the 3Com Etherlink XL manual, * page 10-7. It calculates a CRC of the supplied multicast * group address and returns the lower 8 bits, which are used * as the multicast filter position. * Note: the 3c905B currently only supports a 64-bit hash table, * which means we really only need 6 bits, but the manual indicates * that future chip revisions will have a 256-bit hash table, * hence the routine is set up to calculate 8 bits of position * info in case we need it some day. * Note II, The Sequel: _CURRENT_ versions of the 3c905B have a * 256 bit hash table. This means we have to use all 8 bits regardless. * On older cards, the upper 2 bits will be ignored. Grrrr.... */ static u_int8_t xl_calchash(addr) caddr_t addr; { u_int32_t crc, carry; int i, j; u_int8_t c; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (i = 0; i < 6; i++) { c = *(addr + i); for (j = 0; j < 8; j++) { carry = ((crc & 0x80000000) ? 1 : 0) ^ (c & 0x01); crc <<= 1; c >>= 1; if (carry) crc = (crc ^ 0x04c11db6) | carry; } } /* return the filter bit position */ return(crc & 0x000000FF); } /* * NICs older than the 3c905B have only one multicast option, which * is to enable reception of all multicast frames. */ static void xl_setmulti(sc) struct xl_softc *sc; { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int8_t rxfilt; int mcnt = 0; ifp = &sc->arpcom.ac_if; XL_SEL_WIN(5); rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); if (ifp->if_flags & IFF_ALLMULTI) { rxfilt |= XL_RXFILTER_ALLMULTI; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); return; } - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) mcnt++; if (mcnt) rxfilt |= XL_RXFILTER_ALLMULTI; else rxfilt &= ~XL_RXFILTER_ALLMULTI; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); return; } /* * 3c905B adapters have a hash filter that we can program. */ static void xl_setmulti_hash(sc) struct xl_softc *sc; { struct ifnet *ifp; int h = 0, i; struct ifmultiaddr *ifma; u_int8_t rxfilt; int mcnt = 0; ifp = &sc->arpcom.ac_if; XL_SEL_WIN(5); rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); if (ifp->if_flags & IFF_ALLMULTI) { rxfilt |= XL_RXFILTER_ALLMULTI; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); return; } else rxfilt &= ~XL_RXFILTER_ALLMULTI; /* first, zot all the existing hash bits */ for (i = 0; i < XL_HASHFILT_SIZE; i++) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i); /* now program new ones */ - for (ifma = ifp->if_multiaddrs.lh_first; ifma != NULL; - ifma = ifma->ifma_link.le_next) { + LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = xl_calchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|XL_HASH_SET|h); mcnt++; } if (mcnt) rxfilt |= XL_RXFILTER_MULTIHASH; else rxfilt &= ~XL_RXFILTER_MULTIHASH; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); return; } #ifdef notdef static void xl_testpacket(sc) struct xl_softc *sc; { struct mbuf *m; struct ifnet *ifp; ifp = &sc->arpcom.ac_if; MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) return; bcopy(&sc->arpcom.ac_enaddr, mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN); bcopy(&sc->arpcom.ac_enaddr, mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN); mtod(m, struct ether_header *)->ether_type = htons(3); mtod(m, unsigned char *)[14] = 0; mtod(m, unsigned char *)[15] = 0; mtod(m, unsigned char *)[16] = 0xE3; m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3; IF_ENQUEUE(&ifp->if_snd, m); xl_start(ifp); return; } #endif static void xl_setcfg(sc) struct xl_softc *sc; { u_int32_t icfg; XL_SEL_WIN(3); icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG); icfg &= ~XL_ICFG_CONNECTOR_MASK; if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BT4) icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS); if (sc->xl_media & XL_MEDIAOPT_BTX) icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS); CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); return; } static void xl_setmode(sc, media) struct xl_softc *sc; int media; { u_int32_t icfg; u_int16_t mediastat; printf("xl%d: selecting ", sc->xl_unit); XL_SEL_WIN(4); mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); XL_SEL_WIN(3); icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG); if (sc->xl_media & XL_MEDIAOPT_BT) { if (IFM_SUBTYPE(media) == IFM_10_T) { printf("10baseT transceiver, "); sc->xl_xcvr = XL_XCVR_10BT; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS); mediastat |= XL_MEDIASTAT_LINKBEAT| XL_MEDIASTAT_JABGUARD; mediastat &= ~XL_MEDIASTAT_SQEENB; } } if (sc->xl_media & XL_MEDIAOPT_BFX) { if (IFM_SUBTYPE(media) == IFM_100_FX) { printf("100baseFX port, "); sc->xl_xcvr = XL_XCVR_100BFX; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS); mediastat |= XL_MEDIASTAT_LINKBEAT; mediastat &= ~XL_MEDIASTAT_SQEENB; } } if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) { if (IFM_SUBTYPE(media) == IFM_10_5) { printf("AUI port, "); sc->xl_xcvr = XL_XCVR_AUI; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS); mediastat &= ~(XL_MEDIASTAT_LINKBEAT| XL_MEDIASTAT_JABGUARD); mediastat |= ~XL_MEDIASTAT_SQEENB; } if (IFM_SUBTYPE(media) == IFM_10_FL) { printf("10baseFL transceiver, "); sc->xl_xcvr = XL_XCVR_AUI; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS); mediastat &= ~(XL_MEDIASTAT_LINKBEAT| XL_MEDIASTAT_JABGUARD); mediastat |= ~XL_MEDIASTAT_SQEENB; } } if (sc->xl_media & XL_MEDIAOPT_BNC) { if (IFM_SUBTYPE(media) == IFM_10_2) { printf("BNC port, "); sc->xl_xcvr = XL_XCVR_COAX; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS); mediastat &= ~(XL_MEDIASTAT_LINKBEAT| XL_MEDIASTAT_JABGUARD| XL_MEDIASTAT_SQEENB); } } if ((media & IFM_GMASK) == IFM_FDX || IFM_SUBTYPE(media) == IFM_100_FX) { printf("full duplex\n"); XL_SEL_WIN(3); CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX); } else { printf("half duplex\n"); XL_SEL_WIN(3); CSR_WRITE_1(sc, XL_W3_MAC_CTRL, (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX)); } if (IFM_SUBTYPE(media) == IFM_10_2) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START); else CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg); XL_SEL_WIN(4); CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat); DELAY(800); XL_SEL_WIN(7); return; } static void xl_reset(sc) struct xl_softc *sc; { register int i; XL_SEL_WIN(0); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET | ((sc->xl_flags & XL_FLAG_WEIRDRESET) ? XL_RESETOPT_DISADVFD:0)); for (i = 0; i < XL_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY)) break; } if (i == XL_TIMEOUT) printf("xl%d: reset didn't complete\n", sc->xl_unit); DELAY(100000); /* Reset TX and RX. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR || sc->xl_flags & XL_FLAG_INVERT_MII_PWR) { XL_SEL_WIN(2); CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, CSR_READ_2(sc, XL_W2_RESET_OPTIONS) | ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR)?XL_RESETOPT_INVERT_LED:0) | ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR)?XL_RESETOPT_INVERT_MII:0) ); } /* Wait a little while for the chip to get its brains in order. */ DELAY(100000); return; } /* * Probe for a 3Com Etherlink XL chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int xl_probe(dev) device_t dev; { struct xl_type *t; t = xl_devs; while(t->xl_name != NULL) { if ((pci_get_vendor(dev) == t->xl_vid) && (pci_get_device(dev) == t->xl_did)) { device_set_desc(dev, t->xl_name); return(0); } t++; } return(ENXIO); } /* * This routine is a kludge to work around possible hardware faults * or manufacturing defects that can cause the media options register * (or reset options register, as it's called for the first generation * 3c90x adapters) to return an incorrect result. I have encountered * one Dell Latitude laptop docking station with an integrated 3c905-TX * which doesn't have any of the 'mediaopt' bits set. This screws up * the attach routine pretty badly because it doesn't know what media * to look for. If we find ourselves in this predicament, this routine * will try to guess the media options values and warn the user of a * possible manufacturing defect with his adapter/system/whatever. */ static void xl_mediacheck(sc) struct xl_softc *sc; { /* * If some of the media options bits are set, assume they are * correct. If not, try to figure it out down below. * XXX I should check for 10baseFL, but I don't have an adapter * to test with. */ if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) { /* * Check the XCVR value. If it's not in the normal range * of values, we need to fake it up here. */ if (sc->xl_xcvr <= XL_XCVR_AUTO) return; else { printf("xl%d: bogus xcvr value " "in EEPROM (%x)\n", sc->xl_unit, sc->xl_xcvr); printf("xl%d: choosing new default based " "on card type\n", sc->xl_unit); } } else { if (sc->xl_type == XL_TYPE_905B && sc->xl_media & XL_MEDIAOPT_10FL) return; printf("xl%d: WARNING: no media options bits set in " "the media options register!!\n", sc->xl_unit); printf("xl%d: this could be a manufacturing defect in " "your adapter or system\n", sc->xl_unit); printf("xl%d: attempting to guess media type; you " "should probably consult your vendor\n", sc->xl_unit); } xl_choose_xcvr(sc, 1); return; } static void xl_choose_xcvr(sc, verbose) struct xl_softc *sc; int verbose; { u_int16_t devid; /* * Read the device ID from the EEPROM. * This is what's loaded into the PCI device ID register, so it has * to be correct otherwise we wouldn't have gotten this far. */ xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0); switch(devid) { case TC_DEVICEID_BOOMERANG_10BT: /* 3c900-TPO */ case TC_DEVICEID_KRAKATOA_10BT: /* 3c900B-TPO */ sc->xl_media = XL_MEDIAOPT_BT; sc->xl_xcvr = XL_XCVR_10BT; if (verbose) printf("xl%d: guessing 10BaseT " "transceiver\n", sc->xl_unit); break; case TC_DEVICEID_BOOMERANG_10BT_COMBO: /* 3c900-COMBO */ case TC_DEVICEID_KRAKATOA_10BT_COMBO: /* 3c900B-COMBO */ sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI; sc->xl_xcvr = XL_XCVR_10BT; if (verbose) printf("xl%d: guessing COMBO " "(AUI/BNC/TP)\n", sc->xl_unit); break; case TC_DEVICEID_KRAKATOA_10BT_TPC: /* 3c900B-TPC */ sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC; sc->xl_xcvr = XL_XCVR_10BT; if (verbose) printf("xl%d: guessing TPC (BNC/TP)\n", sc->xl_unit); break; case TC_DEVICEID_CYCLONE_10FL: /* 3c900B-FL */ sc->xl_media = XL_MEDIAOPT_10FL; sc->xl_xcvr = XL_XCVR_AUI; if (verbose) printf("xl%d: guessing 10baseFL\n", sc->xl_unit); break; case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */ case TC_DEVICEID_HURRICANE_556: /* 3c556 */ case TC_DEVICEID_HURRICANE_556B: /* 3c556B */ case TC_DEVICEID_HURRICANE_575A: /* 3c575TX */ case TC_DEVICEID_HURRICANE_575B: /* 3c575B */ case TC_DEVICEID_HURRICANE_575C: /* 3c575C */ case TC_DEVICEID_HURRICANE_656C: /* 3c565C */ sc->xl_media = XL_MEDIAOPT_MII; sc->xl_xcvr = XL_XCVR_MII; if (verbose) printf("xl%d: guessing MII\n", sc->xl_unit); break; case TC_DEVICEID_BOOMERANG_100BT4: /* 3c905-T4 */ case TC_DEVICEID_CYCLONE_10_100BT4: /* 3c905B-T4 */ sc->xl_media = XL_MEDIAOPT_BT4; sc->xl_xcvr = XL_XCVR_MII; if (verbose) printf("xl%d: guessing 100BaseT4/MII\n", sc->xl_unit); break; case TC_DEVICEID_HURRICANE_10_100BT: /* 3c905B-TX */ case TC_DEVICEID_HURRICANE_10_100BT_SERV:/*3c980-TX */ case TC_DEVICEID_TORNADO_10_100BT_SERV: /* 3c980C-TX */ case TC_DEVICEID_HURRICANE_SOHO100TX: /* 3cSOHO100-TX */ case TC_DEVICEID_TORNADO_10_100BT: /* 3c905C-TX */ case TC_DEVICEID_TORNADO_HOMECONNECT: /* 3c450-TX */ sc->xl_media = XL_MEDIAOPT_BTX; sc->xl_xcvr = XL_XCVR_AUTO; if (verbose) printf("xl%d: guessing 10/100 internal\n", sc->xl_unit); break; case TC_DEVICEID_CYCLONE_10_100_COMBO: /* 3c905B-COMBO */ sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI; sc->xl_xcvr = XL_XCVR_AUTO; if (verbose) printf("xl%d: guessing 10/100 " "plus BNC/AUI\n", sc->xl_unit); break; default: printf("xl%d: unknown device ID: %x -- " "defaulting to 10baseT\n", sc->xl_unit, devid); sc->xl_media = XL_MEDIAOPT_BT; break; } return; } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int xl_attach(dev) device_t dev; { u_char eaddr[ETHER_ADDR_LEN]; u_int32_t command; struct xl_softc *sc; struct ifnet *ifp; int media = IFM_ETHER|IFM_100_TX|IFM_FDX; int unit, error = 0, rid; sc = device_get_softc(dev); unit = device_get_unit(dev); mtx_init(&sc->xl_mtx, device_get_nameunit(dev), MTX_DEF | MTX_RECURSE); XL_LOCK(sc); sc->xl_flags = 0; if (pci_get_device(dev) == TC_DEVICEID_HURRICANE_556 || pci_get_device(dev) == TC_DEVICEID_HURRICANE_556B) sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK | XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_WEIRDRESET | XL_FLAG_INVERT_LED_PWR | XL_FLAG_INVERT_MII_PWR; if (pci_get_device(dev) == TC_DEVICEID_HURRICANE_556) sc->xl_flags |= XL_FLAG_8BITROM; if (pci_get_device(dev) == TC_DEVICEID_HURRICANE_575A || pci_get_device(dev) == TC_DEVICEID_HURRICANE_575B || pci_get_device(dev) == TC_DEVICEID_HURRICANE_575C || pci_get_device(dev) == TC_DEVICEID_HURRICANE_656C) sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK | XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_8BITROM; if (pci_get_device(dev) == TC_DEVICEID_HURRICANE_575B) sc->xl_flags |= XL_FLAG_INVERT_LED_PWR; if (pci_get_device(dev) == TC_DEVICEID_HURRICANE_575C) sc->xl_flags |= XL_FLAG_INVERT_MII_PWR; if (pci_get_device(dev) == TC_DEVICEID_HURRICANE_656C) sc->xl_flags |= XL_FLAG_INVERT_MII_PWR; /* * If this is a 3c905B, we have to check one extra thing. * The 905B supports power management and may be placed in * a low-power mode (D3 mode), typically by certain operating * systems which shall not be named. The PCI BIOS is supposed * to reset the NIC and bring it out of low-power mode, but * some do not. Consequently, we have to see if this chip * supports power management, and if so, make sure it's not * in low-power mode. If power management is available, the * capid byte will be 0x01. * * I _think_ that what actually happens is that the chip * loses its PCI configuration during the transition from * D3 back to D0; this means that it should be possible for * us to save the PCI iobase, membase and IRQ, put the chip * back in the D0 state, then restore the PCI config ourselves. */ if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { u_int32_t iobase, membase, irq; /* Save important PCI config data. */ iobase = pci_read_config(dev, XL_PCI_LOIO, 4); membase = pci_read_config(dev, XL_PCI_LOMEM, 4); irq = pci_read_config(dev, XL_PCI_INTLINE, 4); /* Reset the power state. */ printf("xl%d: chip is in D%d power mode " "-- setting to D0\n", unit, pci_get_powerstate(dev)); pci_set_powerstate(dev, PCI_POWERSTATE_D0); /* Restore PCI config data. */ pci_write_config(dev, XL_PCI_LOIO, iobase, 4); pci_write_config(dev, XL_PCI_LOMEM, membase, 4); pci_write_config(dev, XL_PCI_INTLINE, irq, 4); } /* * Map control/status registers. */ command = pci_read_config(dev, PCIR_COMMAND, 4); command |= (PCIM_CMD_PORTEN|PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, command, 4); command = pci_read_config(dev, PCIR_COMMAND, 4); #ifdef XL_USEIOSPACE if (!(command & PCIM_CMD_PORTEN)) { printf("xl%d: failed to enable I/O ports!\n", unit); error = ENXIO; goto fail; } #else if (!(command & PCIM_CMD_MEMEN)) { printf("xl%d: failed to enable memory mapping!\n", unit); error = ENXIO; goto fail; } #endif rid = XL_RID; sc->xl_res = bus_alloc_resource(dev, XL_RES, &rid, 0, ~0, 1, RF_ACTIVE); if (sc->xl_res == NULL) { printf ("xl%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->xl_btag = rman_get_bustag(sc->xl_res); sc->xl_bhandle = rman_get_bushandle(sc->xl_res); if (sc->xl_flags & XL_FLAG_FUNCREG) { rid = XL_PCI_FUNCMEM; sc->xl_fres = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); if (sc->xl_fres == NULL) { printf ("xl%d: couldn't map ports/memory\n", unit); bus_release_resource(dev, XL_RES, XL_RID, sc->xl_res); error = ENXIO; goto fail; } sc->xl_ftag = rman_get_bustag(sc->xl_fres); sc->xl_fhandle = rman_get_bushandle(sc->xl_fres); } rid = 0; sc->xl_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->xl_irq == NULL) { printf("xl%d: couldn't map interrupt\n", unit); if (sc->xl_fres != NULL) bus_release_resource(dev, SYS_RES_MEMORY, XL_PCI_FUNCMEM, sc->xl_fres); bus_release_resource(dev, XL_RES, XL_RID, sc->xl_res); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->xl_irq, INTR_TYPE_NET, xl_intr, sc, &sc->xl_intrhand); if (error) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->xl_irq); if (sc->xl_fres != NULL) bus_release_resource(dev, SYS_RES_MEMORY, XL_PCI_FUNCMEM, sc->xl_fres); bus_release_resource(dev, XL_RES, XL_RID, sc->xl_res); printf("xl%d: couldn't set up irq\n", unit); goto fail; } /* Reset the adapter. */ xl_reset(sc); /* * Get station address from the EEPROM. */ if (xl_read_eeprom(sc, (caddr_t)&eaddr, XL_EE_OEM_ADR0, 3, 1)) { printf("xl%d: failed to read station address\n", sc->xl_unit); bus_teardown_intr(dev, sc->xl_irq, sc->xl_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->xl_irq); if (sc->xl_fres != NULL) bus_release_resource(dev, SYS_RES_MEMORY, XL_PCI_FUNCMEM, sc->xl_fres); bus_release_resource(dev, XL_RES, XL_RID, sc->xl_res); error = ENXIO; goto fail; } /* * A 3Com chip was detected. Inform the world. */ printf("xl%d: Ethernet address: %6D\n", unit, eaddr, ":"); sc->xl_unit = unit; callout_handle_init(&sc->xl_stat_ch); bcopy(eaddr, (char *)&sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); sc->xl_ldata = contigmalloc(sizeof(struct xl_list_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->xl_ldata == NULL) { printf("xl%d: no memory for list buffers!\n", unit); bus_teardown_intr(dev, sc->xl_irq, sc->xl_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->xl_irq); if (sc->xl_fres != NULL) bus_release_resource(dev, SYS_RES_MEMORY, XL_PCI_FUNCMEM, sc->xl_fres); bus_release_resource(dev, XL_RES, XL_RID, sc->xl_res); error = ENXIO; goto fail; } bzero(sc->xl_ldata, sizeof(struct xl_list_data)); /* * Figure out the card type. 3c905B adapters have the * 'supportsNoTxLength' bit set in the capabilities * word in the EEPROM. */ xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0); if (sc->xl_caps & XL_CAPS_NO_TXLENGTH) sc->xl_type = XL_TYPE_905B; else sc->xl_type = XL_TYPE_90X; ifp = &sc->arpcom.ac_if; ifp->if_softc = sc; ifp->if_unit = unit; ifp->if_name = "xl"; ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = xl_ioctl; ifp->if_output = ether_output; if (sc->xl_type == XL_TYPE_905B) ifp->if_start = xl_start_90xB; else ifp->if_start = xl_start; ifp->if_watchdog = xl_watchdog; ifp->if_init = xl_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = XL_TX_LIST_CNT - 1; /* * Now we have to see what sort of media we have. * This includes probing for an MII interace and a * possible PHY. */ XL_SEL_WIN(3); sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT); if (bootverbose) printf("xl%d: media options word: %x\n", sc->xl_unit, sc->xl_media); xl_read_eeprom(sc, (char *)&sc->xl_xcvr, XL_EE_ICFG_0, 2, 0); sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK; sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS; xl_mediacheck(sc); if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX || sc->xl_media & XL_MEDIAOPT_BT4) { if (bootverbose) printf("xl%d: found MII/AUTO\n", sc->xl_unit); xl_setcfg(sc); if (mii_phy_probe(dev, &sc->xl_miibus, xl_ifmedia_upd, xl_ifmedia_sts)) { printf("xl%d: no PHY found!\n", sc->xl_unit); bus_teardown_intr(dev, sc->xl_irq, sc->xl_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->xl_irq); bus_release_resource(dev, XL_RES, XL_RID, sc->xl_res); contigfree(sc->xl_ldata, sizeof(struct xl_list_data), M_DEVBUF); error = ENXIO; goto fail; } goto done; } /* * Sanity check. If the user has selected "auto" and this isn't * a 10/100 card of some kind, we need to force the transceiver * type to something sane. */ if (sc->xl_xcvr == XL_XCVR_AUTO) xl_choose_xcvr(sc, bootverbose); /* * Do ifmedia setup. */ ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts); if (sc->xl_media & XL_MEDIAOPT_BT) { if (bootverbose) printf("xl%d: found 10baseT\n", sc->xl_unit); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); } if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) { /* * Check for a 10baseFL board in disguise. */ if (sc->xl_type == XL_TYPE_905B && sc->xl_media == XL_MEDIAOPT_10FL) { if (bootverbose) printf("xl%d: found 10baseFL\n", sc->xl_unit); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_HDX, 0, NULL); if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL); } else { if (bootverbose) printf("xl%d: found AUI\n", sc->xl_unit); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL); } } if (sc->xl_media & XL_MEDIAOPT_BNC) { if (bootverbose) printf("xl%d: found BNC\n", sc->xl_unit); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL); } if (sc->xl_media & XL_MEDIAOPT_BFX) { if (bootverbose) printf("xl%d: found 100baseFX\n", sc->xl_unit); ifp->if_baudrate = 100000000; ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_FX, 0, NULL); } /* Choose a default media. */ switch(sc->xl_xcvr) { case XL_XCVR_10BT: media = IFM_ETHER|IFM_10_T; xl_setmode(sc, media); break; case XL_XCVR_AUI: if (sc->xl_type == XL_TYPE_905B && sc->xl_media == XL_MEDIAOPT_10FL) { media = IFM_ETHER|IFM_10_FL; xl_setmode(sc, media); } else { media = IFM_ETHER|IFM_10_5; xl_setmode(sc, media); } break; case XL_XCVR_COAX: media = IFM_ETHER|IFM_10_2; xl_setmode(sc, media); break; case XL_XCVR_AUTO: case XL_XCVR_100BTX: case XL_XCVR_MII: /* Chosen by miibus */ break; case XL_XCVR_100BFX: media = IFM_ETHER|IFM_100_FX; break; default: printf("xl%d: unknown XCVR type: %d\n", sc->xl_unit, sc->xl_xcvr); /* * This will probably be wrong, but it prevents * the ifmedia code from panicking. */ media = IFM_ETHER|IFM_10_T; break; } if (sc->xl_miibus == NULL) ifmedia_set(&sc->ifmedia, media); done: /* * Call MI attach routine. */ ether_ifattach(ifp, ETHER_BPF_SUPPORTED); XL_UNLOCK(sc); return(0); fail: XL_UNLOCK(sc); mtx_destroy(&sc->xl_mtx); return(error); } static int xl_detach(dev) device_t dev; { struct xl_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); XL_LOCK(sc); ifp = &sc->arpcom.ac_if; xl_reset(sc); xl_stop(sc); ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); /* Delete any miibus and phy devices attached to this interface */ if (sc->xl_miibus != NULL) { bus_generic_detach(dev); device_delete_child(dev, sc->xl_miibus); } bus_teardown_intr(dev, sc->xl_irq, sc->xl_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->xl_irq); if (sc->xl_fres != NULL) bus_release_resource(dev, SYS_RES_MEMORY, XL_PCI_FUNCMEM, sc->xl_fres); bus_release_resource(dev, XL_RES, XL_RID, sc->xl_res); ifmedia_removeall(&sc->ifmedia); contigfree(sc->xl_ldata, sizeof(struct xl_list_data), M_DEVBUF); XL_UNLOCK(sc); mtx_destroy(&sc->xl_mtx); return(0); } /* * Initialize the transmit descriptors. */ static int xl_list_tx_init(sc) struct xl_softc *sc; { struct xl_chain_data *cd; struct xl_list_data *ld; int i; cd = &sc->xl_cdata; ld = sc->xl_ldata; for (i = 0; i < XL_TX_LIST_CNT; i++) { cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i]; if (i == (XL_TX_LIST_CNT - 1)) cd->xl_tx_chain[i].xl_next = NULL; else cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1]; } cd->xl_tx_free = &cd->xl_tx_chain[0]; cd->xl_tx_tail = cd->xl_tx_head = NULL; return(0); } /* * Initialize the transmit descriptors. */ static int xl_list_tx_init_90xB(sc) struct xl_softc *sc; { struct xl_chain_data *cd; struct xl_list_data *ld; int i; cd = &sc->xl_cdata; ld = sc->xl_ldata; for (i = 0; i < XL_TX_LIST_CNT; i++) { cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i]; cd->xl_tx_chain[i].xl_phys = vtophys(&ld->xl_tx_list[i]); if (i == (XL_TX_LIST_CNT - 1)) cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[0]; else cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1]; if (i == 0) cd->xl_tx_chain[i].xl_prev = &cd->xl_tx_chain[XL_TX_LIST_CNT - 1]; else cd->xl_tx_chain[i].xl_prev = &cd->xl_tx_chain[i - 1]; } bzero((char *)ld->xl_tx_list, sizeof(struct xl_list) * XL_TX_LIST_CNT); ld->xl_tx_list[0].xl_status = XL_TXSTAT_EMPTY; cd->xl_tx_prod = 1; cd->xl_tx_cons = 1; cd->xl_tx_cnt = 0; return(0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int xl_list_rx_init(sc) struct xl_softc *sc; { struct xl_chain_data *cd; struct xl_list_data *ld; int i; cd = &sc->xl_cdata; ld = sc->xl_ldata; for (i = 0; i < XL_RX_LIST_CNT; i++) { cd->xl_rx_chain[i].xl_ptr = (struct xl_list_onefrag *)&ld->xl_rx_list[i]; if (xl_newbuf(sc, &cd->xl_rx_chain[i]) == ENOBUFS) return(ENOBUFS); if (i == (XL_RX_LIST_CNT - 1)) { cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[0]; ld->xl_rx_list[i].xl_next = vtophys(&ld->xl_rx_list[0]); } else { cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[i + 1]; ld->xl_rx_list[i].xl_next = vtophys(&ld->xl_rx_list[i + 1]); } } cd->xl_rx_head = &cd->xl_rx_chain[0]; return(0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int xl_newbuf(sc, c) struct xl_softc *sc; struct xl_chain_onefrag *c; { struct mbuf *m_new = NULL; MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("xl%d: no memory for rx list -- " "packet dropped!\n", sc->xl_unit); return(ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("xl%d: no memory for rx list -- " "packet dropped!\n", sc->xl_unit); m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; /* Force longword alignment for packet payload. */ m_adj(m_new, ETHER_ALIGN); c->xl_mbuf = m_new; c->xl_ptr->xl_frag.xl_addr = vtophys(mtod(m_new, caddr_t)); c->xl_ptr->xl_frag.xl_len = MCLBYTES | XL_LAST_FRAG; c->xl_ptr->xl_status = 0; return(0); } static int xl_rx_resync(sc) struct xl_softc *sc; { struct xl_chain_onefrag *pos; int i; pos = sc->xl_cdata.xl_rx_head; for (i = 0; i < XL_RX_LIST_CNT; i++) { if (pos->xl_ptr->xl_status) break; pos = pos->xl_next; } if (i == XL_RX_LIST_CNT) return(0); sc->xl_cdata.xl_rx_head = pos; return(EAGAIN); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static void xl_rxeof(sc) struct xl_softc *sc; { struct ether_header *eh; struct mbuf *m; struct ifnet *ifp; struct xl_chain_onefrag *cur_rx; int total_len = 0; u_int16_t rxstat; ifp = &sc->arpcom.ac_if; again: while((rxstat = sc->xl_cdata.xl_rx_head->xl_ptr->xl_status)) { cur_rx = sc->xl_cdata.xl_rx_head; sc->xl_cdata.xl_rx_head = cur_rx->xl_next; /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (rxstat & XL_RXSTAT_UP_ERROR) { ifp->if_ierrors++; cur_rx->xl_ptr->xl_status = 0; continue; } /* * If there error bit was not set, the upload complete * bit should be set which means we have a valid packet. * If not, something truly strange has happened. */ if (!(rxstat & XL_RXSTAT_UP_CMPLT)) { printf("xl%d: bad receive status -- " "packet dropped", sc->xl_unit); ifp->if_ierrors++; cur_rx->xl_ptr->xl_status = 0; continue; } /* No errors; receive the packet. */ m = cur_rx->xl_mbuf; total_len = cur_rx->xl_ptr->xl_status & XL_RXSTAT_LENMASK; /* * Try to conjure up a new mbuf cluster. If that * fails, it means we have an out of memory condition and * should leave the buffer in place and continue. This will * result in a lost packet, but there's little else we * can do in this situation. */ if (xl_newbuf(sc, cur_rx) == ENOBUFS) { ifp->if_ierrors++; cur_rx->xl_ptr->xl_status = 0; continue; } ifp->if_ipackets++; eh = mtod(m, struct ether_header *); m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; /* Remove header from mbuf and pass it on. */ m_adj(m, sizeof(struct ether_header)); ether_input(ifp, eh, m); } /* * Handle the 'end of channel' condition. When the upload * engine hits the end of the RX ring, it will stall. This * is our cue to flush the RX ring, reload the uplist pointer * register and unstall the engine. * XXX This is actually a little goofy. With the ThunderLAN * chip, you get an interrupt when the receiver hits the end * of the receive ring, which tells you exactly when you * you need to reload the ring pointer. Here we have to * fake it. I'm mad at myself for not being clever enough * to avoid the use of a goto here. */ if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 || CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL); xl_wait(sc); CSR_WRITE_4(sc, XL_UPLIST_PTR, vtophys(&sc->xl_ldata->xl_rx_list[0])); sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0]; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL); goto again; } return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void xl_txeof(sc) struct xl_softc *sc; { struct xl_chain *cur_tx; struct ifnet *ifp; ifp = &sc->arpcom.ac_if; /* Clear the timeout timer. */ ifp->if_timer = 0; /* * Go through our tx list and free mbufs for those * frames that have been uploaded. Note: the 3c905B * sets a special bit in the status word to let us * know that a frame has been downloaded, but the * original 3c900/3c905 adapters don't do that. * Consequently, we have to use a different test if * xl_type != XL_TYPE_905B. */ while(sc->xl_cdata.xl_tx_head != NULL) { cur_tx = sc->xl_cdata.xl_tx_head; if (CSR_READ_4(sc, XL_DOWNLIST_PTR)) break; sc->xl_cdata.xl_tx_head = cur_tx->xl_next; m_freem(cur_tx->xl_mbuf); cur_tx->xl_mbuf = NULL; ifp->if_opackets++; cur_tx->xl_next = sc->xl_cdata.xl_tx_free; sc->xl_cdata.xl_tx_free = cur_tx; } if (sc->xl_cdata.xl_tx_head == NULL) { ifp->if_flags &= ~IFF_OACTIVE; sc->xl_cdata.xl_tx_tail = NULL; } else { if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED || !CSR_READ_4(sc, XL_DOWNLIST_PTR)) { CSR_WRITE_4(sc, XL_DOWNLIST_PTR, vtophys(sc->xl_cdata.xl_tx_head->xl_ptr)); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); } } return; } static void xl_txeof_90xB(sc) struct xl_softc *sc; { struct xl_chain *cur_tx = NULL; struct ifnet *ifp; int idx; ifp = &sc->arpcom.ac_if; idx = sc->xl_cdata.xl_tx_cons; while(idx != sc->xl_cdata.xl_tx_prod) { cur_tx = &sc->xl_cdata.xl_tx_chain[idx]; if (!(cur_tx->xl_ptr->xl_status & XL_TXSTAT_DL_COMPLETE)) break; if (cur_tx->xl_mbuf != NULL) { m_freem(cur_tx->xl_mbuf); cur_tx->xl_mbuf = NULL; } ifp->if_opackets++; sc->xl_cdata.xl_tx_cnt--; XL_INC(idx, XL_TX_LIST_CNT); ifp->if_timer = 0; } sc->xl_cdata.xl_tx_cons = idx; if (cur_tx != NULL) ifp->if_flags &= ~IFF_OACTIVE; return; } /* * TX 'end of channel' interrupt handler. Actually, we should * only get a 'TX complete' interrupt if there's a transmit error, * so this is really TX error handler. */ static void xl_txeoc(sc) struct xl_softc *sc; { u_int8_t txstat; while((txstat = CSR_READ_1(sc, XL_TX_STATUS))) { if (txstat & XL_TXSTATUS_UNDERRUN || txstat & XL_TXSTATUS_JABBER || txstat & XL_TXSTATUS_RECLAIM) { printf("xl%d: transmission error: %x\n", sc->xl_unit, txstat); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); if (sc->xl_type == XL_TYPE_905B) { if (sc->xl_cdata.xl_tx_cnt) { int i; struct xl_chain *c; i = sc->xl_cdata.xl_tx_cons; c = &sc->xl_cdata.xl_tx_chain[i]; CSR_WRITE_4(sc, XL_DOWNLIST_PTR, c->xl_phys); CSR_WRITE_1(sc, XL_DOWN_POLL, 64); } } else { if (sc->xl_cdata.xl_tx_head != NULL) CSR_WRITE_4(sc, XL_DOWNLIST_PTR, vtophys(sc->xl_cdata.xl_tx_head->xl_ptr)); } /* * Remember to set this for the * first generation 3c90X chips. */ CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8); if (txstat & XL_TXSTATUS_UNDERRUN && sc->xl_tx_thresh < XL_PACKET_SIZE) { sc->xl_tx_thresh += XL_MIN_FRAMELEN; printf("xl%d: tx underrun, increasing tx start" " threshold to %d bytes\n", sc->xl_unit, sc->xl_tx_thresh); } CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh); if (sc->xl_type == XL_TYPE_905B) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4)); } CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); } else { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); } /* * Write an arbitrary byte to the TX_STATUS register * to clear this interrupt/error and advance to the next. */ CSR_WRITE_1(sc, XL_TX_STATUS, 0x01); } return; } static void xl_intr(arg) void *arg; { struct xl_softc *sc; struct ifnet *ifp; u_int16_t status; sc = arg; XL_LOCK(sc); ifp = &sc->arpcom.ac_if; while((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS && status != 0xFFFF) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|(status & XL_INTRS)); if (status & XL_STAT_UP_COMPLETE) { int curpkts; curpkts = ifp->if_ipackets; xl_rxeof(sc); if (curpkts == ifp->if_ipackets) { while (xl_rx_resync(sc)) xl_rxeof(sc); } } if (status & XL_STAT_DOWN_COMPLETE) { if (sc->xl_type == XL_TYPE_905B) xl_txeof_90xB(sc); else xl_txeof(sc); } if (status & XL_STAT_TX_COMPLETE) { ifp->if_oerrors++; xl_txeoc(sc); } if (status & XL_STAT_ADFAIL) { xl_reset(sc); xl_init(sc); } if (status & XL_STAT_STATSOFLOW) { sc->xl_stats_no_timeout = 1; xl_stats_update(sc); sc->xl_stats_no_timeout = 0; } } if (ifp->if_snd.ifq_head != NULL) (*ifp->if_start)(ifp); XL_UNLOCK(sc); return; } static void xl_stats_update(xsc) void *xsc; { struct xl_softc *sc; struct ifnet *ifp; struct xl_stats xl_stats; u_int8_t *p; int i; struct mii_data *mii = NULL; bzero((char *)&xl_stats, sizeof(struct xl_stats)); sc = xsc; ifp = &sc->arpcom.ac_if; if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); p = (u_int8_t *)&xl_stats; /* Read all the stats registers. */ XL_SEL_WIN(6); for (i = 0; i < 16; i++) *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i); ifp->if_ierrors += xl_stats.xl_rx_overrun; ifp->if_collisions += xl_stats.xl_tx_multi_collision + xl_stats.xl_tx_single_collision + xl_stats.xl_tx_late_collision; /* * Boomerang and cyclone chips have an extra stats counter * in window 4 (BadSSD). We have to read this too in order * to clear out all the stats registers and avoid a statsoflow * interrupt. */ XL_SEL_WIN(4); CSR_READ_1(sc, XL_W4_BADSSD); if (mii != NULL) mii_tick(mii); XL_SEL_WIN(7); if (!sc->xl_stats_no_timeout) sc->xl_stat_ch = timeout(xl_stats_update, sc, hz); return; } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int xl_encap(sc, c, m_head) struct xl_softc *sc; struct xl_chain *c; struct mbuf *m_head; { int frag = 0; struct xl_frag *f = NULL; int total_len; struct mbuf *m; /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ m = m_head; total_len = 0; for (m = m_head, frag = 0; m != NULL; m = m->m_next) { if (m->m_len != 0) { if (frag == XL_MAXFRAGS) break; total_len+= m->m_len; c->xl_ptr->xl_frag[frag].xl_addr = vtophys(mtod(m, vm_offset_t)); c->xl_ptr->xl_frag[frag].xl_len = m->m_len; frag++; } } /* * Handle special case: we used up all 63 fragments, * but we have more mbufs left in the chain. Copy the * data into an mbuf cluster. Note that we don't * bother clearing the values in the other fragment * pointers/counters; it wouldn't gain us anything, * and would waste cycles. */ if (m != NULL) { struct mbuf *m_new = NULL; MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("xl%d: no memory for tx list", sc->xl_unit); return(1); } if (m_head->m_pkthdr.len > MHLEN) { MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); printf("xl%d: no memory for tx list", sc->xl_unit); return(1); } } m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t)); m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; m_freem(m_head); m_head = m_new; f = &c->xl_ptr->xl_frag[0]; f->xl_addr = vtophys(mtod(m_new, caddr_t)); f->xl_len = total_len = m_new->m_len; frag = 1; } c->xl_mbuf = m_head; c->xl_ptr->xl_frag[frag - 1].xl_len |= XL_LAST_FRAG; c->xl_ptr->xl_status = total_len; c->xl_ptr->xl_next = 0; return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void xl_start(ifp) struct ifnet *ifp; { struct xl_softc *sc; struct mbuf *m_head = NULL; struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx; sc = ifp->if_softc; XL_LOCK(sc); /* * Check for an available queue slot. If there are none, * punt. */ if (sc->xl_cdata.xl_tx_free == NULL) { xl_txeoc(sc); xl_txeof(sc); if (sc->xl_cdata.xl_tx_free == NULL) { ifp->if_flags |= IFF_OACTIVE; XL_UNLOCK(sc); return; } } start_tx = sc->xl_cdata.xl_tx_free; while(sc->xl_cdata.xl_tx_free != NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* Pick a descriptor off the free list. */ cur_tx = sc->xl_cdata.xl_tx_free; sc->xl_cdata.xl_tx_free = cur_tx->xl_next; cur_tx->xl_next = NULL; /* Pack the data into the descriptor. */ xl_encap(sc, cur_tx, m_head); /* Chain it together. */ if (prev != NULL) { prev->xl_next = cur_tx; prev->xl_ptr->xl_next = vtophys(cur_tx->xl_ptr); } prev = cur_tx; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if (ifp->if_bpf) bpf_mtap(ifp, cur_tx->xl_mbuf); } /* * If there are no packets queued, bail. */ if (cur_tx == NULL) { XL_UNLOCK(sc); return; } /* * Place the request for the upload interrupt * in the last descriptor in the chain. This way, if * we're chaining several packets at once, we'll only * get an interupt once for the whole chain rather than * once for each packet. */ cur_tx->xl_ptr->xl_status |= XL_TXSTAT_DL_INTR; /* * Queue the packets. If the TX channel is clear, update * the downlist pointer register. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL); xl_wait(sc); if (sc->xl_cdata.xl_tx_head != NULL) { sc->xl_cdata.xl_tx_tail->xl_next = start_tx; sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next = vtophys(start_tx->xl_ptr); sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status &= ~XL_TXSTAT_DL_INTR; sc->xl_cdata.xl_tx_tail = cur_tx; } else { sc->xl_cdata.xl_tx_head = start_tx; sc->xl_cdata.xl_tx_tail = cur_tx; } if (!CSR_READ_4(sc, XL_DOWNLIST_PTR)) CSR_WRITE_4(sc, XL_DOWNLIST_PTR, vtophys(start_tx->xl_ptr)); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); XL_SEL_WIN(7); /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; /* * XXX Under certain conditions, usually on slower machines * where interrupts may be dropped, it's possible for the * adapter to chew up all the buffers in the receive ring * and stall, without us being able to do anything about it. * To guard against this, we need to make a pass over the * RX queue to make sure there aren't any packets pending. * Doing it here means we can flush the receive ring at the * same time the chip is DMAing the transmit descriptors we * just gave it. * * 3Com goes to some lengths to emphasize the Parallel Tasking (tm) * nature of their chips in all their marketing literature; * we may as well take advantage of it. :) */ xl_rxeof(sc); XL_UNLOCK(sc); return; } static int xl_encap_90xB(sc, c, m_head) struct xl_softc *sc; struct xl_chain *c; struct mbuf *m_head; { int frag = 0; struct xl_frag *f = NULL; struct mbuf *m; struct xl_list *d; /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ d = c->xl_ptr; d->xl_status = 0; d->xl_next = 0; for (m = m_head, frag = 0; m != NULL; m = m->m_next) { if (m->m_len != 0) { if (frag == XL_MAXFRAGS) break; f = &d->xl_frag[frag]; f->xl_addr = vtophys(mtod(m, vm_offset_t)); f->xl_len = m->m_len; frag++; } } c->xl_mbuf = m_head; c->xl_ptr->xl_frag[frag - 1].xl_len |= XL_LAST_FRAG; c->xl_ptr->xl_status = XL_TXSTAT_RND_DEFEAT; return(0); } static void xl_start_90xB(ifp) struct ifnet *ifp; { struct xl_softc *sc; struct mbuf *m_head = NULL; struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx; int idx; sc = ifp->if_softc; XL_LOCK(sc); if (ifp->if_flags & IFF_OACTIVE) { XL_UNLOCK(sc); return; } idx = sc->xl_cdata.xl_tx_prod; start_tx = &sc->xl_cdata.xl_tx_chain[idx]; while (sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL) { if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) { ifp->if_flags |= IFF_OACTIVE; break; } IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; cur_tx = &sc->xl_cdata.xl_tx_chain[idx]; /* Pack the data into the descriptor. */ xl_encap_90xB(sc, cur_tx, m_head); /* Chain it together. */ if (prev != NULL) prev->xl_ptr->xl_next = cur_tx->xl_phys; prev = cur_tx; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if (ifp->if_bpf) bpf_mtap(ifp, cur_tx->xl_mbuf); XL_INC(idx, XL_TX_LIST_CNT); sc->xl_cdata.xl_tx_cnt++; } /* * If there are no packets queued, bail. */ if (cur_tx == NULL) { XL_UNLOCK(sc); return; } /* * Place the request for the upload interrupt * in the last descriptor in the chain. This way, if * we're chaining several packets at once, we'll only * get an interupt once for the whole chain rather than * once for each packet. */ cur_tx->xl_ptr->xl_status |= XL_TXSTAT_DL_INTR; /* Start transmission */ sc->xl_cdata.xl_tx_prod = idx; start_tx->xl_prev->xl_ptr->xl_next = start_tx->xl_phys; /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; XL_UNLOCK(sc); return; } static void xl_init(xsc) void *xsc; { struct xl_softc *sc = xsc; struct ifnet *ifp = &sc->arpcom.ac_if; int i; u_int16_t rxfilt = 0; struct mii_data *mii = NULL; XL_LOCK(sc); /* * Cancel pending I/O and free all RX/TX buffers. */ xl_stop(sc); if (sc->xl_miibus == NULL) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); xl_wait(sc); } CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); DELAY(10000); if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); /* Init our MAC address */ XL_SEL_WIN(2); for (i = 0; i < ETHER_ADDR_LEN; i++) { CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i, sc->arpcom.ac_enaddr[i]); } /* Clear the station mask. */ for (i = 0; i < 3; i++) CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0); #ifdef notdef /* Reset TX and RX. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); #endif /* Init circular RX list. */ if (xl_list_rx_init(sc) == ENOBUFS) { printf("xl%d: initialization failed: no " "memory for rx buffers\n", sc->xl_unit); xl_stop(sc); return; } /* Init TX descriptors. */ if (sc->xl_type == XL_TYPE_905B) xl_list_tx_init_90xB(sc); else xl_list_tx_init(sc); /* * Set the TX freethresh value. * Note that this has no effect on 3c905B "cyclone" * cards but is required for 3c900/3c905 "boomerang" * cards in order to enable the download engine. */ CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8); /* Set the TX start threshold for best performance. */ sc->xl_tx_thresh = XL_MIN_FRAMELEN; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh); /* * If this is a 3c905B, also set the tx reclaim threshold. * This helps cut down on the number of tx reclaim errors * that could happen on a busy network. The chip multiplies * the register value by 16 to obtain the actual threshold * in bytes, so we divide by 16 when setting the value here. * The existing threshold value can be examined by reading * the register at offset 9 in window 5. */ if (sc->xl_type == XL_TYPE_905B) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4)); } /* Set RX filter bits. */ XL_SEL_WIN(5); rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); /* Set the individual bit to receive frames for this host only. */ rxfilt |= XL_RXFILTER_INDIVIDUAL; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { rxfilt |= XL_RXFILTER_ALLFRAMES; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); } else { rxfilt &= ~XL_RXFILTER_ALLFRAMES; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); } /* * Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { rxfilt |= XL_RXFILTER_BROADCAST; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); } else { rxfilt &= ~XL_RXFILTER_BROADCAST; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); } /* * Program the multicast filter, if necessary. */ if (sc->xl_type == XL_TYPE_905B) xl_setmulti_hash(sc); else xl_setmulti(sc); /* * Load the address of the RX list. We have to * stall the upload engine before we can manipulate * the uplist pointer register, then unstall it when * we're finished. We also have to wait for the * stall command to complete before proceeding. * Note that we have to do this after any RX resets * have completed since the uplist register is cleared * by a reset. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL); xl_wait(sc); CSR_WRITE_4(sc, XL_UPLIST_PTR, vtophys(&sc->xl_ldata->xl_rx_list[0])); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL); xl_wait(sc); if (sc->xl_type == XL_TYPE_905B) { /* Set polling interval */ CSR_WRITE_1(sc, XL_DOWN_POLL, 64); /* Load the address of the TX list */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL); xl_wait(sc); CSR_WRITE_4(sc, XL_DOWNLIST_PTR, vtophys(&sc->xl_ldata->xl_tx_list[0])); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); xl_wait(sc); } /* * If the coax transceiver is on, make sure to enable * the DC-DC converter. */ XL_SEL_WIN(3); if (sc->xl_xcvr == XL_XCVR_COAX) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START); else CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); /* Clear out the stats counters. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE); sc->xl_stats_no_timeout = 1; xl_stats_update(sc); sc->xl_stats_no_timeout = 0; XL_SEL_WIN(4); CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE); /* * Enable interrupts. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS); if (sc->xl_flags & XL_FLAG_FUNCREG) bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000); /* Set the RX early threshold */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2)); CSR_WRITE_2(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY); /* Enable receiver and transmitter. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE); xl_wait(sc); if (mii != NULL) mii_mediachg(mii); /* Select window 7 for normal operations. */ XL_SEL_WIN(7); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->xl_stat_ch = timeout(xl_stats_update, sc, hz); XL_UNLOCK(sc); return; } /* * Set media options. */ static int xl_ifmedia_upd(ifp) struct ifnet *ifp; { struct xl_softc *sc; struct ifmedia *ifm = NULL; struct mii_data *mii = NULL; sc = ifp->if_softc; if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); if (mii == NULL) ifm = &sc->ifmedia; else ifm = &mii->mii_media; switch(IFM_SUBTYPE(ifm->ifm_media)) { case IFM_100_FX: case IFM_10_FL: case IFM_10_2: case IFM_10_5: xl_setmode(sc, ifm->ifm_media); return(0); break; default: break; } if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX || sc->xl_media & XL_MEDIAOPT_BT4) { xl_init(sc); } else { xl_setmode(sc, ifm->ifm_media); } return(0); } /* * Report current media status. */ static void xl_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct xl_softc *sc; u_int32_t icfg; struct mii_data *mii = NULL; sc = ifp->if_softc; if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); XL_SEL_WIN(3); icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK; icfg >>= XL_ICFG_CONNECTOR_BITS; ifmr->ifm_active = IFM_ETHER; switch(icfg) { case XL_XCVR_10BT: ifmr->ifm_active = IFM_ETHER|IFM_10_T; if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; break; case XL_XCVR_AUI: if (sc->xl_type == XL_TYPE_905B && sc->xl_media == XL_MEDIAOPT_10FL) { ifmr->ifm_active = IFM_ETHER|IFM_10_FL; if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } else ifmr->ifm_active = IFM_ETHER|IFM_10_5; break; case XL_XCVR_COAX: ifmr->ifm_active = IFM_ETHER|IFM_10_2; break; /* * XXX MII and BTX/AUTO should be separate cases. */ case XL_XCVR_100BTX: case XL_XCVR_AUTO: case XL_XCVR_MII: if (mii != NULL) { mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } break; case XL_XCVR_100BFX: ifmr->ifm_active = IFM_ETHER|IFM_100_FX; break; default: printf("xl%d: unknown XCVR type: %d\n", sc->xl_unit, icfg); break; } return; } static int xl_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct xl_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int error = 0; struct mii_data *mii = NULL; u_int8_t rxfilt; XL_LOCK(sc); switch(command) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: error = ether_ioctl(ifp, command, data); break; case SIOCSIFFLAGS: XL_SEL_WIN(5); rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->xl_if_flags & IFF_PROMISC)) { rxfilt |= XL_RXFILTER_ALLFRAMES; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); XL_SEL_WIN(7); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->xl_if_flags & IFF_PROMISC) { rxfilt &= ~XL_RXFILTER_ALLFRAMES; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); XL_SEL_WIN(7); } else xl_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) xl_stop(sc); } sc->xl_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: if (sc->xl_type == XL_TYPE_905B) xl_setmulti_hash(sc); else xl_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); if (mii == NULL) error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); else error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = EINVAL; break; } XL_UNLOCK(sc); return(error); } static void xl_watchdog(ifp) struct ifnet *ifp; { struct xl_softc *sc; u_int16_t status = 0; sc = ifp->if_softc; XL_LOCK(sc); ifp->if_oerrors++; XL_SEL_WIN(4); status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); printf("xl%d: watchdog timeout\n", sc->xl_unit); if (status & XL_MEDIASTAT_CARRIER) printf("xl%d: no carrier - transceiver cable problem?\n", sc->xl_unit); xl_txeoc(sc); xl_txeof(sc); xl_rxeof(sc); xl_reset(sc); xl_init(sc); if (ifp->if_snd.ifq_head != NULL) (*ifp->if_start)(ifp); XL_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void xl_stop(sc) struct xl_softc *sc; { register int i; struct ifnet *ifp; XL_LOCK(sc); ifp = &sc->arpcom.ac_if; ifp->if_timer = 0; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); DELAY(800); #ifdef foo CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); #endif CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0); if (sc->xl_flags & XL_FLAG_FUNCREG) bus_space_write_4 (sc->xl_ftag, sc->xl_fhandle, 4, 0x8000); /* Stop the stats updater. */ untimeout(xl_stats_update, sc, sc->xl_stat_ch); /* * Free data in the RX lists. */ for (i = 0; i < XL_RX_LIST_CNT; i++) { if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) { m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf); sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL; } } bzero((char *)&sc->xl_ldata->xl_rx_list, sizeof(sc->xl_ldata->xl_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < XL_TX_LIST_CNT; i++) { if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) { m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf); sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL; } } bzero((char *)&sc->xl_ldata->xl_tx_list, sizeof(sc->xl_ldata->xl_tx_list)); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); XL_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void xl_shutdown(dev) device_t dev; { struct xl_softc *sc; sc = device_get_softc(dev); XL_LOCK(sc); xl_reset(sc); xl_stop(sc); XL_UNLOCK(sc); return; }