Index: stable/6/sys/dev/ath/if_ath.c =================================================================== --- stable/6/sys/dev/ath/if_ath.c (revision 149421) +++ stable/6/sys/dev/ath/if_ath.c (revision 149422) @@ -1,5020 +1,5022 @@ /*- * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Driver for the Atheros Wireless LAN controller. * * This software is derived from work of Atsushi Onoe; his contribution * is greatly appreciated. */ #include "opt_inet.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #endif #define AR_DEBUG #include #include #include /* XXX for softled */ /* unaligned little endian access */ #define LE_READ_2(p) \ ((u_int16_t) \ ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8))) #define LE_READ_4(p) \ ((u_int32_t) \ ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8) | \ (((u_int8_t *)(p))[2] << 16) | (((u_int8_t *)(p))[3] << 24))) enum { ATH_LED_TX, ATH_LED_RX, ATH_LED_POLL, }; static void ath_init(void *); static void ath_stop_locked(struct ifnet *); static void ath_stop(struct ifnet *); static void ath_start(struct ifnet *); static int ath_reset(struct ifnet *); static int ath_media_change(struct ifnet *); static void ath_watchdog(struct ifnet *); static int ath_ioctl(struct ifnet *, u_long, caddr_t); static void ath_fatal_proc(void *, int); static void ath_rxorn_proc(void *, int); static void ath_bmiss_proc(void *, int); static int ath_key_alloc(struct ieee80211com *, const struct ieee80211_key *); static int ath_key_delete(struct ieee80211com *, const struct ieee80211_key *); static int ath_key_set(struct ieee80211com *, const struct ieee80211_key *, const u_int8_t mac[IEEE80211_ADDR_LEN]); static void ath_key_update_begin(struct ieee80211com *); static void ath_key_update_end(struct ieee80211com *); static void ath_mode_init(struct ath_softc *); static void ath_setslottime(struct ath_softc *); static void ath_updateslot(struct ifnet *); static int ath_beaconq_setup(struct ath_hal *); static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *); static void ath_beacon_setup(struct ath_softc *, struct ath_buf *); static void ath_beacon_proc(void *, int); static void ath_bstuck_proc(void *, int); static void ath_beacon_free(struct ath_softc *); static void ath_beacon_config(struct ath_softc *); static void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *, ath_bufhead *); static int ath_desc_alloc(struct ath_softc *); static void ath_desc_free(struct ath_softc *); static struct ieee80211_node *ath_node_alloc(struct ieee80211_node_table *); static void ath_node_free(struct ieee80211_node *); static u_int8_t ath_node_getrssi(const struct ieee80211_node *); static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *); static void ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m, struct ieee80211_node *ni, int subtype, int rssi, u_int32_t rstamp); static void ath_setdefantenna(struct ath_softc *, u_int); static void ath_rx_proc(void *, int); static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); static int ath_tx_setup(struct ath_softc *, int, int); static int ath_wme_update(struct ieee80211com *); static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); static void ath_tx_cleanup(struct ath_softc *); static int ath_tx_start(struct ath_softc *, struct ieee80211_node *, struct ath_buf *, struct mbuf *); static void ath_tx_proc_q0(void *, int); static void ath_tx_proc_q0123(void *, int); static void ath_tx_proc(void *, int); static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); static void ath_draintxq(struct ath_softc *); static void ath_stoprecv(struct ath_softc *); static int ath_startrecv(struct ath_softc *); static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); static void ath_next_scan(void *); static void ath_calibrate(void *); static int ath_newstate(struct ieee80211com *, enum ieee80211_state, int); static void ath_setup_stationkey(struct ieee80211_node *); static void ath_newassoc(struct ieee80211_node *, int); static int ath_getchannels(struct ath_softc *, u_int cc, HAL_BOOL outdoor, HAL_BOOL xchanmode); static void ath_led_event(struct ath_softc *, int); static void ath_update_txpow(struct ath_softc *); static int ath_rate_setup(struct ath_softc *, u_int mode); static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); static void ath_sysctlattach(struct ath_softc *); static void ath_bpfattach(struct ath_softc *); static void ath_announce(struct ath_softc *); SYSCTL_DECL(_hw_ath); /* XXX validate sysctl values */ static int ath_dwelltime = 200; /* 5 channels/second */ SYSCTL_INT(_hw_ath, OID_AUTO, dwell, CTLFLAG_RW, &ath_dwelltime, 0, "channel dwell time (ms) for AP/station scanning"); static int ath_calinterval = 30; /* calibrate every 30 secs */ SYSCTL_INT(_hw_ath, OID_AUTO, calibrate, CTLFLAG_RW, &ath_calinterval, 0, "chip calibration interval (secs)"); static int ath_outdoor = AH_TRUE; /* outdoor operation */ SYSCTL_INT(_hw_ath, OID_AUTO, outdoor, CTLFLAG_RD, &ath_outdoor, 0, "outdoor operation"); TUNABLE_INT("hw.ath.outdoor", &ath_outdoor); static int ath_xchanmode = AH_TRUE; /* extended channel use */ SYSCTL_INT(_hw_ath, OID_AUTO, xchanmode, CTLFLAG_RD, &ath_xchanmode, 0, "extended channel mode"); TUNABLE_INT("hw.ath.xchanmode", &ath_xchanmode); static int ath_countrycode = CTRY_DEFAULT; /* country code */ SYSCTL_INT(_hw_ath, OID_AUTO, countrycode, CTLFLAG_RD, &ath_countrycode, 0, "country code"); TUNABLE_INT("hw.ath.countrycode", &ath_countrycode); static int ath_regdomain = 0; /* regulatory domain */ SYSCTL_INT(_hw_ath, OID_AUTO, regdomain, CTLFLAG_RD, &ath_regdomain, 0, "regulatory domain"); #ifdef AR_DEBUG static int ath_debug = 0; SYSCTL_INT(_hw_ath, OID_AUTO, debug, CTLFLAG_RW, &ath_debug, 0, "control debugging printfs"); TUNABLE_INT("hw.ath.debug", &ath_debug); enum { ATH_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ ATH_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */ ATH_DEBUG_RECV = 0x00000004, /* basic recv operation */ ATH_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */ ATH_DEBUG_RATE = 0x00000010, /* rate control */ ATH_DEBUG_RESET = 0x00000020, /* reset processing */ ATH_DEBUG_MODE = 0x00000040, /* mode init/setup */ ATH_DEBUG_BEACON = 0x00000080, /* beacon handling */ ATH_DEBUG_WATCHDOG = 0x00000100, /* watchdog timeout */ ATH_DEBUG_INTR = 0x00001000, /* ISR */ ATH_DEBUG_TX_PROC = 0x00002000, /* tx ISR proc */ ATH_DEBUG_RX_PROC = 0x00004000, /* rx ISR proc */ ATH_DEBUG_BEACON_PROC = 0x00008000, /* beacon ISR proc */ ATH_DEBUG_CALIBRATE = 0x00010000, /* periodic calibration */ ATH_DEBUG_KEYCACHE = 0x00020000, /* key cache management */ ATH_DEBUG_STATE = 0x00040000, /* 802.11 state transitions */ ATH_DEBUG_NODE = 0x00080000, /* node management */ ATH_DEBUG_LED = 0x00100000, /* led management */ ATH_DEBUG_FATAL = 0x80000000, /* fatal errors */ ATH_DEBUG_ANY = 0xffffffff }; #define IFF_DUMPPKTS(sc, m) \ ((sc->sc_debug & (m)) || \ (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2)) #define DPRINTF(sc, m, fmt, ...) do { \ if (sc->sc_debug & (m)) \ printf(fmt, __VA_ARGS__); \ } while (0) #define KEYPRINTF(sc, ix, hk, mac) do { \ if (sc->sc_debug & ATH_DEBUG_KEYCACHE) \ ath_keyprint(__func__, ix, hk, mac); \ } while (0) static void ath_printrxbuf(struct ath_buf *bf, int); static void ath_printtxbuf(struct ath_buf *bf, int); #else #define IFF_DUMPPKTS(sc, m) \ ((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2)) #define DPRINTF(m, fmt, ...) #define KEYPRINTF(sc, k, ix, mac) #endif MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); int ath_attach(u_int16_t devid, struct ath_softc *sc) { struct ifnet *ifp; struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = NULL; HAL_STATUS status; int error = 0, i; DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); ifp = sc->sc_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(sc->sc_dev, "can not if_alloc()\n"); error = ENOSPC; goto bad; } /* set these up early for if_printf use */ if_initname(ifp, device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev)); ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, &status); if (ah == NULL) { if_printf(ifp, "unable to attach hardware; HAL status %u\n", status); error = ENXIO; goto bad; } if (ah->ah_abi != HAL_ABI_VERSION) { if_printf(ifp, "HAL ABI mismatch detected " "(HAL:0x%x != driver:0x%x)\n", ah->ah_abi, HAL_ABI_VERSION); error = ENXIO; goto bad; } sc->sc_ah = ah; sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ /* * Check if the MAC has multi-rate retry support. * We do this by trying to setup a fake extended * descriptor. MAC's that don't have support will * return false w/o doing anything. MAC's that do * support it will return true w/o doing anything. */ sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); /* * Check if the device has hardware counters for PHY * errors. If so we need to enable the MIB interrupt * so we can act on stat triggers. */ if (ath_hal_hwphycounters(ah)) sc->sc_needmib = 1; /* * Get the hardware key cache size. */ sc->sc_keymax = ath_hal_keycachesize(ah); if (sc->sc_keymax > ATH_KEYMAX) { if_printf(ifp, "Warning, using only %u of %u key cache slots\n", ATH_KEYMAX, sc->sc_keymax); sc->sc_keymax = ATH_KEYMAX; } /* * Reset the key cache since some parts do not * reset the contents on initial power up. */ for (i = 0; i < sc->sc_keymax; i++) ath_hal_keyreset(ah, i); /* * Mark key cache slots associated with global keys * as in use. If we knew TKIP was not to be used we * could leave the +32, +64, and +32+64 slots free. * XXX only for splitmic. */ for (i = 0; i < IEEE80211_WEP_NKID; i++) { setbit(sc->sc_keymap, i); setbit(sc->sc_keymap, i+32); setbit(sc->sc_keymap, i+64); setbit(sc->sc_keymap, i+32+64); } /* * Collect the channel list using the default country * code and including outdoor channels. The 802.11 layer * is resposible for filtering this list based on settings * like the phy mode. */ error = ath_getchannels(sc, ath_countrycode, ath_outdoor, ath_xchanmode); if (error != 0) goto bad; /* * Setup rate tables for all potential media types. */ ath_rate_setup(sc, IEEE80211_MODE_11A); ath_rate_setup(sc, IEEE80211_MODE_11B); ath_rate_setup(sc, IEEE80211_MODE_11G); ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); /* NB: setup here so ath_rate_update is happy */ ath_setcurmode(sc, IEEE80211_MODE_11A); /* * Allocate tx+rx descriptors and populate the lists. */ error = ath_desc_alloc(sc); if (error != 0) { if_printf(ifp, "failed to allocate descriptors: %d\n", error); goto bad; } callout_init(&sc->sc_scan_ch, debug_mpsafenet ? CALLOUT_MPSAFE : 0); callout_init(&sc->sc_cal_ch, CALLOUT_MPSAFE); ATH_TXBUF_LOCK_INIT(sc); TASK_INIT(&sc->sc_rxtask, 0, ath_rx_proc, sc); TASK_INIT(&sc->sc_rxorntask, 0, ath_rxorn_proc, sc); TASK_INIT(&sc->sc_fataltask, 0, ath_fatal_proc, sc); TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); TASK_INIT(&sc->sc_bstucktask, 0, ath_bstuck_proc, sc); /* * Allocate hardware transmit queues: one queue for * beacon frames and one data queue for each QoS * priority. Note that the hal handles reseting * these queues at the needed time. * * XXX PS-Poll */ sc->sc_bhalq = ath_beaconq_setup(ah); if (sc->sc_bhalq == (u_int) -1) { if_printf(ifp, "unable to setup a beacon xmit queue!\n"); error = EIO; goto bad2; } sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); if (sc->sc_cabq == NULL) { if_printf(ifp, "unable to setup CAB xmit queue!\n"); error = EIO; goto bad2; } /* NB: insure BK queue is the lowest priority h/w queue */ if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", ieee80211_wme_acnames[WME_AC_BK]); error = EIO; goto bad2; } if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { /* * Not enough hardware tx queues to properly do WME; * just punt and assign them all to the same h/w queue. * We could do a better job of this if, for example, * we allocate queues when we switch from station to * AP mode. */ if (sc->sc_ac2q[WME_AC_VI] != NULL) ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); if (sc->sc_ac2q[WME_AC_BE] != NULL) ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; } /* * Special case certain configurations. Note the * CAB queue is handled by these specially so don't * include them when checking the txq setup mask. */ switch (sc->sc_txqsetup &~ (1<sc_cabq->axq_qnum)) { case 0x01: TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); break; case 0x0f: TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); break; default: TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); break; } /* * Setup rate control. Some rate control modules * call back to change the anntena state so expose * the necessary entry points. * XXX maybe belongs in struct ath_ratectrl? */ sc->sc_setdefantenna = ath_setdefantenna; sc->sc_rc = ath_rate_attach(sc); if (sc->sc_rc == NULL) { error = EIO; goto bad2; } sc->sc_blinking = 0; sc->sc_ledstate = 1; sc->sc_ledon = 0; /* low true */ sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE); /* * Auto-enable soft led processing for IBM cards and for * 5211 minipci cards. Users can also manually enable/disable * support with a sysctl. */ sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); if (sc->sc_softled) { ath_hal_gpioCfgOutput(ah, sc->sc_ledpin); ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon); } ifp->if_softc = sc; ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; ifp->if_start = ath_start; ifp->if_watchdog = ath_watchdog; ifp->if_ioctl = ath_ioctl; ifp->if_init = ath_init; IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); ic->ic_ifp = ifp; ic->ic_reset = ath_reset; ic->ic_newassoc = ath_newassoc; ic->ic_updateslot = ath_updateslot; ic->ic_wme.wme_update = ath_wme_update; /* XXX not right but it's not used anywhere important */ ic->ic_phytype = IEEE80211_T_OFDM; ic->ic_opmode = IEEE80211_M_STA; ic->ic_caps = IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ | IEEE80211_C_HOSTAP /* hostap mode */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ ; /* * Query the hal to figure out h/w crypto support. */ if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) ic->ic_caps |= IEEE80211_C_WEP; if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) ic->ic_caps |= IEEE80211_C_AES; if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) ic->ic_caps |= IEEE80211_C_AES_CCM; if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) ic->ic_caps |= IEEE80211_C_CKIP; if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { ic->ic_caps |= IEEE80211_C_TKIP; /* * Check if h/w does the MIC and/or whether the * separate key cache entries are required to * handle both tx+rx MIC keys. */ if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) ic->ic_caps |= IEEE80211_C_TKIPMIC; if (ath_hal_tkipsplit(ah)) sc->sc_splitmic = 1; } sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); /* * TPC support can be done either with a global cap or * per-packet support. The latter is not available on * all parts. We're a bit pedantic here as all parts * support a global cap. */ if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) ic->ic_caps |= IEEE80211_C_TXPMGT; /* * Mark WME capability only if we have sufficient * hardware queues to do proper priority scheduling. */ if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) ic->ic_caps |= IEEE80211_C_WME; /* * Check for misc other capabilities. */ if (ath_hal_hasbursting(ah)) ic->ic_caps |= IEEE80211_C_BURST; /* * Indicate we need the 802.11 header padded to a * 32-bit boundary for 4-address and QoS frames. */ ic->ic_flags |= IEEE80211_F_DATAPAD; /* * Query the hal about antenna support. */ sc->sc_defant = ath_hal_getdefantenna(ah); /* * Not all chips have the VEOL support we want to * use with IBSS beacons; check here for it. */ sc->sc_hasveol = ath_hal_hasveol(ah); /* get mac address from hardware */ ath_hal_getmac(ah, ic->ic_myaddr); /* call MI attach routine. */ ieee80211_ifattach(ic); /* override default methods */ ic->ic_node_alloc = ath_node_alloc; sc->sc_node_free = ic->ic_node_free; ic->ic_node_free = ath_node_free; ic->ic_node_getrssi = ath_node_getrssi; sc->sc_recv_mgmt = ic->ic_recv_mgmt; ic->ic_recv_mgmt = ath_recv_mgmt; sc->sc_newstate = ic->ic_newstate; ic->ic_newstate = ath_newstate; ic->ic_crypto.cs_key_alloc = ath_key_alloc; ic->ic_crypto.cs_key_delete = ath_key_delete; ic->ic_crypto.cs_key_set = ath_key_set; ic->ic_crypto.cs_key_update_begin = ath_key_update_begin; ic->ic_crypto.cs_key_update_end = ath_key_update_end; /* complete initialization */ ieee80211_media_init(ic, ath_media_change, ieee80211_media_status); ath_bpfattach(sc); /* * Setup dynamic sysctl's now that country code and * regdomain are available from the hal. */ ath_sysctlattach(sc); if (bootverbose) ieee80211_announce(ic); ath_announce(sc); return 0; bad2: ath_tx_cleanup(sc); ath_desc_free(sc); bad: if (ah) ath_hal_detach(ah); if (ifp != NULL) if_free(ifp); sc->sc_invalid = 1; return error; } int ath_detach(struct ath_softc *sc) { struct ifnet *ifp = sc->sc_ifp; DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", __func__, ifp->if_flags); ath_stop(ifp); bpfdetach(ifp); /* * NB: the order of these is important: * o call the 802.11 layer before detaching the hal to * insure callbacks into the driver to delete global * key cache entries can be handled * o reclaim the tx queue data structures after calling * the 802.11 layer as we'll get called back to reclaim * node state and potentially want to use them * o to cleanup the tx queues the hal is called, so detach * it last * Other than that, it's straightforward... */ ieee80211_ifdetach(&sc->sc_ic); ath_rate_detach(sc->sc_rc); ath_desc_free(sc); ath_tx_cleanup(sc); ath_hal_detach(sc->sc_ah); return 0; } void ath_suspend(struct ath_softc *sc) { struct ifnet *ifp = sc->sc_ifp; DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", __func__, ifp->if_flags); ath_stop(ifp); } void ath_resume(struct ath_softc *sc) { struct ifnet *ifp = sc->sc_ifp; DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", __func__, ifp->if_flags); if (ifp->if_flags & IFF_UP) { ath_init(sc); if (ifp->if_flags & IFF_RUNNING) ath_start(ifp); } if (sc->sc_softled) { ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin); ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon); } } void ath_shutdown(struct ath_softc *sc) { struct ifnet *ifp = sc->sc_ifp; DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", __func__, ifp->if_flags); ath_stop(ifp); } /* * Interrupt handler. Most of the actual processing is deferred. */ void ath_intr(void *arg) { struct ath_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ath_hal *ah = sc->sc_ah; HAL_INT status; if (sc->sc_invalid) { /* * The hardware is not ready/present, don't touch anything. * Note this can happen early on if the IRQ is shared. */ DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); return; } if (!ath_hal_intrpend(ah)) /* shared irq, not for us */ return; if ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) != (IFF_RUNNING|IFF_UP)) { DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", __func__, ifp->if_flags); ath_hal_getisr(ah, &status); /* clear ISR */ ath_hal_intrset(ah, 0); /* disable further intr's */ return; } /* * Figure out the reason(s) for the interrupt. Note * that the hal returns a pseudo-ISR that may include * bits we haven't explicitly enabled so we mask the * value to insure we only process bits we requested. */ ath_hal_getisr(ah, &status); /* NB: clears ISR too */ DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); status &= sc->sc_imask; /* discard unasked for bits */ if (status & HAL_INT_FATAL) { /* * Fatal errors are unrecoverable. Typically * these are caused by DMA errors. Unfortunately * the exact reason is not (presently) returned * by the hal. */ sc->sc_stats.ast_hardware++; ath_hal_intrset(ah, 0); /* disable intr's until reset */ taskqueue_enqueue(taskqueue_swi, &sc->sc_fataltask); } else if (status & HAL_INT_RXORN) { sc->sc_stats.ast_rxorn++; ath_hal_intrset(ah, 0); /* disable intr's until reset */ taskqueue_enqueue(taskqueue_swi, &sc->sc_rxorntask); } else { if (status & HAL_INT_SWBA) { /* * Software beacon alert--time to send a beacon. * Handle beacon transmission directly; deferring * this is too slow to meet timing constraints * under load. */ ath_beacon_proc(sc, 0); } if (status & HAL_INT_RXEOL) { /* * NB: the hardware should re-read the link when * RXE bit is written, but it doesn't work at * least on older hardware revs. */ sc->sc_stats.ast_rxeol++; sc->sc_rxlink = NULL; } if (status & HAL_INT_TXURN) { sc->sc_stats.ast_txurn++; /* bump tx trigger level */ ath_hal_updatetxtriglevel(ah, AH_TRUE); } if (status & HAL_INT_RX) taskqueue_enqueue(taskqueue_swi, &sc->sc_rxtask); if (status & HAL_INT_TX) taskqueue_enqueue(taskqueue_swi, &sc->sc_txtask); if (status & HAL_INT_BMISS) { sc->sc_stats.ast_bmiss++; taskqueue_enqueue(taskqueue_swi, &sc->sc_bmisstask); } if (status & HAL_INT_MIB) { sc->sc_stats.ast_mib++; /* * Disable interrupts until we service the MIB * interrupt; otherwise it will continue to fire. */ ath_hal_intrset(ah, 0); /* * Let the hal handle the event. We assume it will * clear whatever condition caused the interrupt. */ ath_hal_mibevent(ah, &ATH_NODE(sc->sc_ic.ic_bss)->an_halstats); ath_hal_intrset(ah, sc->sc_imask); } } } static void ath_fatal_proc(void *arg, int pending) { struct ath_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; if_printf(ifp, "hardware error; resetting\n"); ath_reset(ifp); } static void ath_rxorn_proc(void *arg, int pending) { struct ath_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; if_printf(ifp, "rx FIFO overrun; resetting\n"); ath_reset(ifp); } static void ath_bmiss_proc(void *arg, int pending) { struct ath_softc *sc = arg; struct ieee80211com *ic = &sc->sc_ic; DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); KASSERT(ic->ic_opmode == IEEE80211_M_STA, ("unexpect operating mode %u", ic->ic_opmode)); if (ic->ic_state == IEEE80211_S_RUN) { /* * Rather than go directly to scan state, try to * reassociate first. If that fails then the state * machine will drop us into scanning after timing * out waiting for a probe response. */ NET_LOCK_GIANT(); ieee80211_new_state(ic, IEEE80211_S_ASSOC, -1); NET_UNLOCK_GIANT(); } } static u_int ath_chan2flags(struct ieee80211com *ic, struct ieee80211_channel *chan) { #define N(a) (sizeof(a) / sizeof(a[0])) static const u_int modeflags[] = { 0, /* IEEE80211_MODE_AUTO */ CHANNEL_A, /* IEEE80211_MODE_11A */ CHANNEL_B, /* IEEE80211_MODE_11B */ CHANNEL_PUREG, /* IEEE80211_MODE_11G */ 0, /* IEEE80211_MODE_FH */ CHANNEL_T, /* IEEE80211_MODE_TURBO_A */ CHANNEL_108G /* IEEE80211_MODE_TURBO_G */ }; enum ieee80211_phymode mode = ieee80211_chan2mode(ic, chan); KASSERT(mode < N(modeflags), ("unexpected phy mode %u", mode)); KASSERT(modeflags[mode] != 0, ("mode %u undefined", mode)); return modeflags[mode]; #undef N } static void ath_init(void *arg) { struct ath_softc *sc = (struct ath_softc *) arg; struct ieee80211com *ic = &sc->sc_ic; struct ifnet *ifp = sc->sc_ifp; struct ieee80211_node *ni; struct ath_hal *ah = sc->sc_ah; HAL_STATUS status; DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", __func__, ifp->if_flags); ATH_LOCK(sc); /* * Stop anything previously setup. This is safe * whether this is the first time through or not. */ ath_stop_locked(ifp); /* * The basic interface to setting the hardware in a good * state is ``reset''. On return the hardware is known to * be powered up and with interrupts disabled. This must * be followed by initialization of the appropriate bits * and then setup of the interrupt mask. */ sc->sc_curchan.channel = ic->ic_ibss_chan->ic_freq; sc->sc_curchan.channelFlags = ath_chan2flags(ic, ic->ic_ibss_chan); if (!ath_hal_reset(ah, ic->ic_opmode, &sc->sc_curchan, AH_FALSE, &status)) { if_printf(ifp, "unable to reset hardware; hal status %u\n", status); goto done; } /* * This is needed only to setup initial state * but it's best done after a reset. */ ath_update_txpow(sc); /* * Likewise this is set during reset so update * state cached in the driver. */ sc->sc_diversity = ath_hal_getdiversity(ah); /* * Setup the hardware after reset: the key cache * is filled as needed and the receive engine is * set going. Frame transmit is handled entirely * in the frame output path; there's nothing to do * here except setup the interrupt mask. */ if (ath_startrecv(sc) != 0) { if_printf(ifp, "unable to start recv logic\n"); goto done; } /* * Enable interrupts. */ sc->sc_imask = HAL_INT_RX | HAL_INT_TX | HAL_INT_RXEOL | HAL_INT_RXORN | HAL_INT_FATAL | HAL_INT_GLOBAL; /* * Enable MIB interrupts when there are hardware phy counters. * Note we only do this (at the moment) for station mode. */ if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) sc->sc_imask |= HAL_INT_MIB; ath_hal_intrset(ah, sc->sc_imask); ifp->if_flags |= IFF_RUNNING; ic->ic_state = IEEE80211_S_INIT; /* * The hardware should be ready to go now so it's safe * to kick the 802.11 state machine as it's likely to * immediately call back to us to send mgmt frames. */ ni = ic->ic_bss; ni->ni_chan = ic->ic_ibss_chan; ath_chan_change(sc, ni->ni_chan); if (ic->ic_opmode != IEEE80211_M_MONITOR) { if (ic->ic_roaming != IEEE80211_ROAMING_MANUAL) ieee80211_new_state(ic, IEEE80211_S_SCAN, -1); } else ieee80211_new_state(ic, IEEE80211_S_RUN, -1); done: ATH_UNLOCK(sc); } static void ath_stop_locked(struct ifnet *ifp) { struct ath_softc *sc = ifp->if_softc; struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = sc->sc_ah; DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", __func__, sc->sc_invalid, ifp->if_flags); ATH_LOCK_ASSERT(sc); if (ifp->if_flags & IFF_RUNNING) { /* * Shutdown the hardware and driver: * reset 802.11 state machine * turn off timers * disable interrupts * turn off the radio * clear transmit machinery * clear receive machinery * drain and release tx queues * reclaim beacon resources * power down hardware * * Note that some of this work is not possible if the * hardware is gone (invalid). */ ieee80211_new_state(ic, IEEE80211_S_INIT, -1); ifp->if_flags &= ~IFF_RUNNING; ifp->if_timer = 0; if (!sc->sc_invalid) { if (sc->sc_softled) { callout_stop(&sc->sc_ledtimer); ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon); sc->sc_blinking = 0; } ath_hal_intrset(ah, 0); } ath_draintxq(sc); if (!sc->sc_invalid) { ath_stoprecv(sc); ath_hal_phydisable(ah); } else sc->sc_rxlink = NULL; IFQ_DRV_PURGE(&ifp->if_snd); ath_beacon_free(sc); } } static void ath_stop(struct ifnet *ifp) { struct ath_softc *sc = ifp->if_softc; ATH_LOCK(sc); ath_stop_locked(ifp); if (!sc->sc_invalid) { /* * Set the chip in full sleep mode. Note that we are * careful to do this only when bringing the interface * completely to a stop. When the chip is in this state * it must be carefully woken up or references to * registers in the PCI clock domain may freeze the bus * (and system). This varies by chip and is mostly an * issue with newer parts that go to sleep more quickly. */ ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP, 0); } ATH_UNLOCK(sc); } /* * Reset the hardware w/o losing operational state. This is * basically a more efficient way of doing ath_stop, ath_init, * followed by state transitions to the current 802.11 * operational state. Used to recover from various errors and * to reset or reload hardware state. */ static int ath_reset(struct ifnet *ifp) { struct ath_softc *sc = ifp->if_softc; struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = sc->sc_ah; struct ieee80211_channel *c; HAL_STATUS status; /* * Convert to a HAL channel description with the flags * constrained to reflect the current operating mode. */ c = ic->ic_ibss_chan; sc->sc_curchan.channel = c->ic_freq; sc->sc_curchan.channelFlags = ath_chan2flags(ic, c); ath_hal_intrset(ah, 0); /* disable interrupts */ ath_draintxq(sc); /* stop xmit side */ ath_stoprecv(sc); /* stop recv side */ /* NB: indicate channel change so we do a full reset */ if (!ath_hal_reset(ah, ic->ic_opmode, &sc->sc_curchan, AH_TRUE, &status)) if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", __func__, status); ath_update_txpow(sc); /* update tx power state */ sc->sc_diversity = ath_hal_getdiversity(ah); if (ath_startrecv(sc) != 0) /* restart recv */ if_printf(ifp, "%s: unable to start recv logic\n", __func__); /* * We may be doing a reset in response to an ioctl * that changes the channel so update any state that * might change as a result. */ ath_chan_change(sc, c); if (ic->ic_state == IEEE80211_S_RUN) ath_beacon_config(sc); /* restart beacons */ ath_hal_intrset(ah, sc->sc_imask); ath_start(ifp); /* restart xmit */ return 0; } static void ath_start(struct ifnet *ifp) { struct ath_softc *sc = ifp->if_softc; struct ath_hal *ah = sc->sc_ah; struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_node *ni; struct ath_buf *bf; struct mbuf *m; struct ieee80211_frame *wh; struct ether_header *eh; if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid) return; for (;;) { /* * Grab a TX buffer and associated resources. */ ATH_TXBUF_LOCK(sc); bf = STAILQ_FIRST(&sc->sc_txbuf); if (bf != NULL) STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list); ATH_TXBUF_UNLOCK(sc); if (bf == NULL) { DPRINTF(sc, ATH_DEBUG_ANY, "%s: out of xmit buffers\n", __func__); sc->sc_stats.ast_tx_qstop++; ifp->if_flags |= IFF_OACTIVE; break; } /* * Poll the management queue for frames; they * have priority over normal data frames. */ IF_DEQUEUE(&ic->ic_mgtq, m); if (m == NULL) { /* * No data frames go out unless we're associated. */ if (ic->ic_state != IEEE80211_S_RUN) { DPRINTF(sc, ATH_DEBUG_ANY, "%s: ignore data packet, state %u\n", __func__, ic->ic_state); sc->sc_stats.ast_tx_discard++; ATH_TXBUF_LOCK(sc); STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); ATH_TXBUF_UNLOCK(sc); break; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m); /* XXX: LOCK */ if (m == NULL) { ATH_TXBUF_LOCK(sc); STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); ATH_TXBUF_UNLOCK(sc); break; } /* * Find the node for the destination so we can do * things like power save and fast frames aggregation. */ if (m->m_len < sizeof(struct ether_header) && (m = m_pullup(m, sizeof(struct ether_header))) == NULL) { ic->ic_stats.is_tx_nobuf++; /* XXX */ ni = NULL; goto bad; } eh = mtod(m, struct ether_header *); ni = ieee80211_find_txnode(ic, eh->ether_dhost); if (ni == NULL) { /* NB: ieee80211_find_txnode does stat+msg */ m_freem(m); goto bad; } if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) && (m->m_flags & M_PWR_SAV) == 0) { /* * Station in power save mode; pass the frame * to the 802.11 layer and continue. We'll get * the frame back when the time is right. */ ieee80211_pwrsave(ic, ni, m); goto reclaim; } /* calculate priority so we can find the tx queue */ if (ieee80211_classify(ic, m, ni)) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard, classification failure\n", __func__); m_freem(m); goto bad; } ifp->if_opackets++; BPF_MTAP(ifp, m); /* * Encapsulate the packet in prep for transmission. */ m = ieee80211_encap(ic, m, ni); if (m == NULL) { DPRINTF(sc, ATH_DEBUG_ANY, "%s: encapsulation failure\n", __func__); sc->sc_stats.ast_tx_encap++; goto bad; } } else { /* * Hack! The referenced node pointer is in the * rcvif field of the packet header. This is * placed there by ieee80211_mgmt_output because * we need to hold the reference with the frame * and there's no other way (other than packet * tags which we consider too expensive to use) * to pass it along. */ ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; m->m_pkthdr.rcvif = NULL; wh = mtod(m, struct ieee80211_frame *); if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_PROBE_RESP) { /* fill time stamp */ u_int64_t tsf; u_int32_t *tstamp; tsf = ath_hal_gettsf64(ah); /* XXX: adjust 100us delay to xmit */ tsf += 100; tstamp = (u_int32_t *)&wh[1]; tstamp[0] = htole32(tsf & 0xffffffff); tstamp[1] = htole32(tsf >> 32); } sc->sc_stats.ast_tx_mgmt++; } if (ath_tx_start(sc, ni, bf, m)) { bad: ifp->if_oerrors++; reclaim: ATH_TXBUF_LOCK(sc); STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); ATH_TXBUF_UNLOCK(sc); if (ni != NULL) ieee80211_free_node(ni); continue; } sc->sc_tx_timer = 5; ifp->if_timer = 1; } } static int ath_media_change(struct ifnet *ifp) { #define IS_UP(ifp) \ ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) == (IFF_RUNNING|IFF_UP)) int error; error = ieee80211_media_change(ifp); if (error == ENETRESET) { if (IS_UP(ifp)) ath_init(ifp->if_softc); /* XXX lose error */ error = 0; } return error; #undef IS_UP } #ifdef AR_DEBUG static void ath_keyprint(const char *tag, u_int ix, const HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN]) { static const char *ciphers[] = { "WEP", "AES-OCB", "AES-CCM", "CKIP", "TKIP", "CLR", }; int i, n; printf("%s: [%02u] %-7s ", tag, ix, ciphers[hk->kv_type]); for (i = 0, n = hk->kv_len; i < n; i++) printf("%02x", hk->kv_val[i]); printf(" mac %s", ether_sprintf(mac)); if (hk->kv_type == HAL_CIPHER_TKIP) { printf(" mic "); for (i = 0; i < sizeof(hk->kv_mic); i++) printf("%02x", hk->kv_mic[i]); } printf("\n"); } #endif /* * Set a TKIP key into the hardware. This handles the * potential distribution of key state to multiple key * cache slots for TKIP. */ static int ath_keyset_tkip(struct ath_softc *sc, const struct ieee80211_key *k, HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN]) { #define IEEE80211_KEY_XR (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV) static const u_int8_t zerobssid[IEEE80211_ADDR_LEN]; struct ath_hal *ah = sc->sc_ah; KASSERT(k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP, ("got a non-TKIP key, cipher %u", k->wk_cipher->ic_cipher)); KASSERT(sc->sc_splitmic, ("key cache !split")); if ((k->wk_flags & IEEE80211_KEY_XR) == IEEE80211_KEY_XR) { /* * TX key goes at first index, RX key at +32. * The hal handles the MIC keys at index+64. */ memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_mic)); KEYPRINTF(sc, k->wk_keyix, hk, zerobssid); if (!ath_hal_keyset(ah, k->wk_keyix, hk, zerobssid)) return 0; memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic)); KEYPRINTF(sc, k->wk_keyix+32, hk, mac); /* XXX delete tx key on failure? */ return ath_hal_keyset(ah, k->wk_keyix+32, hk, mac); } else if (k->wk_flags & IEEE80211_KEY_XR) { /* * TX/RX key goes at first index. * The hal handles the MIC keys are index+64. */ memcpy(hk->kv_mic, k->wk_flags & IEEE80211_KEY_XMIT ? k->wk_txmic : k->wk_rxmic, sizeof(hk->kv_mic)); KEYPRINTF(sc, k->wk_keyix, hk, mac); return ath_hal_keyset(ah, k->wk_keyix, hk, mac); } return 0; #undef IEEE80211_KEY_XR } /* * Set a net80211 key into the hardware. This handles the * potential distribution of key state to multiple key * cache slots for TKIP with hardware MIC support. */ static int ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k, const u_int8_t mac0[IEEE80211_ADDR_LEN], struct ieee80211_node *bss) { #define N(a) (sizeof(a)/sizeof(a[0])) static const u_int8_t ciphermap[] = { HAL_CIPHER_WEP, /* IEEE80211_CIPHER_WEP */ HAL_CIPHER_TKIP, /* IEEE80211_CIPHER_TKIP */ HAL_CIPHER_AES_OCB, /* IEEE80211_CIPHER_AES_OCB */ HAL_CIPHER_AES_CCM, /* IEEE80211_CIPHER_AES_CCM */ (u_int8_t) -1, /* 4 is not allocated */ HAL_CIPHER_CKIP, /* IEEE80211_CIPHER_CKIP */ HAL_CIPHER_CLR, /* IEEE80211_CIPHER_NONE */ }; struct ath_hal *ah = sc->sc_ah; const struct ieee80211_cipher *cip = k->wk_cipher; u_int8_t gmac[IEEE80211_ADDR_LEN]; const u_int8_t *mac; HAL_KEYVAL hk; memset(&hk, 0, sizeof(hk)); /* * Software crypto uses a "clear key" so non-crypto * state kept in the key cache are maintained and * so that rx frames have an entry to match. */ if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) { KASSERT(cip->ic_cipher < N(ciphermap), ("invalid cipher type %u", cip->ic_cipher)); hk.kv_type = ciphermap[cip->ic_cipher]; hk.kv_len = k->wk_keylen; memcpy(hk.kv_val, k->wk_key, k->wk_keylen); } else hk.kv_type = HAL_CIPHER_CLR; if ((k->wk_flags & IEEE80211_KEY_GROUP) && sc->sc_mcastkey) { /* * Group keys on hardware that supports multicast frame * key search use a mac that is the sender's address with * the high bit set instead of the app-specified address. */ IEEE80211_ADDR_COPY(gmac, bss->ni_macaddr); gmac[0] |= 0x80; mac = gmac; } else mac = mac0; if (hk.kv_type == HAL_CIPHER_TKIP && (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic) { return ath_keyset_tkip(sc, k, &hk, mac); } else { KEYPRINTF(sc, k->wk_keyix, &hk, mac); return ath_hal_keyset(ah, k->wk_keyix, &hk, mac); } #undef N } /* * Allocate tx/rx key slots for TKIP. We allocate two slots for * each key, one for decrypt/encrypt and the other for the MIC. */ static u_int16_t key_alloc_2pair(struct ath_softc *sc) { #define N(a) (sizeof(a)/sizeof(a[0])) u_int i, keyix; KASSERT(sc->sc_splitmic, ("key cache !split")); /* XXX could optimize */ for (i = 0; i < N(sc->sc_keymap)/4; i++) { u_int8_t b = sc->sc_keymap[i]; if (b != 0xff) { /* * One or more slots in this byte are free. */ keyix = i*NBBY; while (b & 1) { again: keyix++; b >>= 1; } /* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */ if (isset(sc->sc_keymap, keyix+32) || isset(sc->sc_keymap, keyix+64) || isset(sc->sc_keymap, keyix+32+64)) { /* full pair unavailable */ /* XXX statistic */ if (keyix == (i+1)*NBBY) { /* no slots were appropriate, advance */ continue; } goto again; } setbit(sc->sc_keymap, keyix); setbit(sc->sc_keymap, keyix+64); setbit(sc->sc_keymap, keyix+32); setbit(sc->sc_keymap, keyix+32+64); DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key pair %u,%u %u,%u\n", __func__, keyix, keyix+64, keyix+32, keyix+32+64); return keyix; } } DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__); return IEEE80211_KEYIX_NONE; #undef N } /* * Allocate a single key cache slot. */ static u_int16_t key_alloc_single(struct ath_softc *sc) { #define N(a) (sizeof(a)/sizeof(a[0])) u_int i, keyix; /* XXX try i,i+32,i+64,i+32+64 to minimize key pair conflicts */ for (i = 0; i < N(sc->sc_keymap); i++) { u_int8_t b = sc->sc_keymap[i]; if (b != 0xff) { /* * One or more slots are free. */ keyix = i*NBBY; while (b & 1) keyix++, b >>= 1; setbit(sc->sc_keymap, keyix); DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key %u\n", __func__, keyix); return keyix; } } DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of space\n", __func__); return IEEE80211_KEYIX_NONE; #undef N } /* * Allocate one or more key cache slots for a uniacst key. The * key itself is needed only to identify the cipher. For hardware * TKIP with split cipher+MIC keys we allocate two key cache slot * pairs so that we can setup separate TX and RX MIC keys. Note * that the MIC key for a TKIP key at slot i is assumed by the * hardware to be at slot i+64. This limits TKIP keys to the first * 64 entries. */ static int ath_key_alloc(struct ieee80211com *ic, const struct ieee80211_key *k) { struct ath_softc *sc = ic->ic_ifp->if_softc; /* * Group key allocation must be handled specially for * parts that do not support multicast key cache search * functionality. For those parts the key id must match * the h/w key index so lookups find the right key. On * parts w/ the key search facility we install the sender's * mac address (with the high bit set) and let the hardware * find the key w/o using the key id. This is preferred as * it permits us to support multiple users for adhoc and/or * multi-station operation. */ if ((k->wk_flags & IEEE80211_KEY_GROUP) && !sc->sc_mcastkey) { u_int keyix; if (!(&ic->ic_nw_keys[0] <= k && k < &ic->ic_nw_keys[IEEE80211_WEP_NKID])) { /* should not happen */ DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: bogus group key\n", __func__); return IEEE80211_KEYIX_NONE; } keyix = k - ic->ic_nw_keys; /* * XXX we pre-allocate the global keys so * have no way to check if they've already been allocated. */ return keyix; } /* * We allocate two pair for TKIP when using the h/w to do * the MIC. For everything else, including software crypto, * we allocate a single entry. Note that s/w crypto requires * a pass-through slot on the 5211 and 5212. The 5210 does * not support pass-through cache entries and we map all * those requests to slot 0. */ if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { return key_alloc_single(sc); } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP && (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic) { return key_alloc_2pair(sc); } else { return key_alloc_single(sc); } } /* * Delete an entry in the key cache allocated by ath_key_alloc. */ static int ath_key_delete(struct ieee80211com *ic, const struct ieee80211_key *k) { struct ath_softc *sc = ic->ic_ifp->if_softc; struct ath_hal *ah = sc->sc_ah; const struct ieee80211_cipher *cip = k->wk_cipher; struct ieee80211_node *ni; u_int keyix = k->wk_keyix; DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: delete key %u\n", __func__, keyix); ath_hal_keyreset(ah, keyix); /* * Check the key->node map and flush any ref. */ ni = sc->sc_keyixmap[keyix]; if (ni != NULL) { ieee80211_free_node(ni); sc->sc_keyixmap[keyix] = NULL; } /* * Handle split tx/rx keying required for TKIP with h/w MIC. */ if (cip->ic_cipher == IEEE80211_CIPHER_TKIP && (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic) { ath_hal_keyreset(ah, keyix+32); /* RX key */ ni = sc->sc_keyixmap[keyix+32]; if (ni != NULL) { /* as above... */ ieee80211_free_node(ni); sc->sc_keyixmap[keyix+32] = NULL; } } if (keyix >= IEEE80211_WEP_NKID) { /* * Don't touch keymap entries for global keys so * they are never considered for dynamic allocation. */ clrbit(sc->sc_keymap, keyix); if (cip->ic_cipher == IEEE80211_CIPHER_TKIP && (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic) { clrbit(sc->sc_keymap, keyix+64); /* TX key MIC */ clrbit(sc->sc_keymap, keyix+32); /* RX key */ clrbit(sc->sc_keymap, keyix+32+64); /* RX key MIC */ } } return 1; } /* * Set the key cache contents for the specified key. Key cache * slot(s) must already have been allocated by ath_key_alloc. */ static int ath_key_set(struct ieee80211com *ic, const struct ieee80211_key *k, const u_int8_t mac[IEEE80211_ADDR_LEN]) { struct ath_softc *sc = ic->ic_ifp->if_softc; return ath_keyset(sc, k, mac, ic->ic_bss); } /* * Block/unblock tx+rx processing while a key change is done. * We assume the caller serializes key management operations * so we only need to worry about synchronization with other * uses that originate in the driver. */ static void ath_key_update_begin(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct ath_softc *sc = ifp->if_softc; DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); #if 0 tasklet_disable(&sc->sc_rxtq); #endif IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */ } static void ath_key_update_end(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct ath_softc *sc = ifp->if_softc; DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); IF_UNLOCK(&ifp->if_snd); #if 0 tasklet_enable(&sc->sc_rxtq); #endif } /* * Calculate the receive filter according to the * operating mode and state: * * o always accept unicast, broadcast, and multicast traffic * o maintain current state of phy error reception (the hal * may enable phy error frames for noise immunity work) * o probe request frames are accepted only when operating in * hostap, adhoc, or monitor modes * o enable promiscuous mode according to the interface state * o accept beacons: * - when operating in adhoc mode so the 802.11 layer creates * node table entries for peers, * - when operating in station mode for collecting rssi data when * the station is otherwise quiet, or * - when scanning */ static u_int32_t ath_calcrxfilter(struct ath_softc *sc, enum ieee80211_state state) { struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = sc->sc_ah; struct ifnet *ifp = sc->sc_ifp; u_int32_t rfilt; rfilt = (ath_hal_getrxfilter(ah) & HAL_RX_FILTER_PHYERR) | HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; if (ic->ic_opmode != IEEE80211_M_STA) rfilt |= HAL_RX_FILTER_PROBEREQ; if (ic->ic_opmode != IEEE80211_M_HOSTAP && (ifp->if_flags & IFF_PROMISC)) rfilt |= HAL_RX_FILTER_PROM; if (ic->ic_opmode == IEEE80211_M_STA || ic->ic_opmode == IEEE80211_M_IBSS || state == IEEE80211_S_SCAN) rfilt |= HAL_RX_FILTER_BEACON; return rfilt; } static void ath_mode_init(struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = sc->sc_ah; struct ifnet *ifp = sc->sc_ifp; u_int32_t rfilt, mfilt[2], val; u_int8_t pos; struct ifmultiaddr *ifma; /* configure rx filter */ rfilt = ath_calcrxfilter(sc, ic->ic_state); ath_hal_setrxfilter(ah, rfilt); /* configure operational mode */ ath_hal_setopmode(ah); /* * Handle any link-level address change. Note that we only * need to force ic_myaddr; any other addresses are handled * as a byproduct of the ifnet code marking the interface * down then up. * * XXX should get from lladdr instead of arpcom but that's more work */ IEEE80211_ADDR_COPY(ic->ic_myaddr, IFP2ENADDR(ifp)); ath_hal_setmac(ah, ic->ic_myaddr); /* calculate and install multicast filter */ if ((ifp->if_flags & IFF_ALLMULTI) == 0) { mfilt[0] = mfilt[1] = 0; + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { caddr_t dl; /* calculate XOR of eight 6bit values */ dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); val = LE_READ_4(dl + 0); pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; val = LE_READ_4(dl + 3); pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; pos &= 0x3f; mfilt[pos / 32] |= (1 << (pos % 32)); } + IF_ADDR_UNLOCK(ifp); } else { mfilt[0] = mfilt[1] = ~0; } ath_hal_setmcastfilter(ah, mfilt[0], mfilt[1]); DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, MC filter %08x:%08x\n", __func__, rfilt, mfilt[0], mfilt[1]); } /* * Set the slot time based on the current setting. */ static void ath_setslottime(struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = sc->sc_ah; if (ic->ic_flags & IEEE80211_F_SHSLOT) ath_hal_setslottime(ah, HAL_SLOT_TIME_9); else ath_hal_setslottime(ah, HAL_SLOT_TIME_20); sc->sc_updateslot = OK; } /* * Callback from the 802.11 layer to update the * slot time based on the current setting. */ static void ath_updateslot(struct ifnet *ifp) { struct ath_softc *sc = ifp->if_softc; struct ieee80211com *ic = &sc->sc_ic; /* * When not coordinating the BSS, change the hardware * immediately. For other operation we defer the change * until beacon updates have propagated to the stations. */ if (ic->ic_opmode == IEEE80211_M_HOSTAP) sc->sc_updateslot = UPDATE; else ath_setslottime(sc); } /* * Setup a h/w transmit queue for beacons. */ static int ath_beaconq_setup(struct ath_hal *ah) { HAL_TXQ_INFO qi; memset(&qi, 0, sizeof(qi)); qi.tqi_aifs = HAL_TXQ_USEDEFAULT; qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; /* NB: for dynamic turbo, don't enable any other interrupts */ qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE; return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi); } /* * Setup the transmit queue parameters for the beacon queue. */ static int ath_beaconq_config(struct ath_softc *sc) { #define ATH_EXPONENT_TO_VALUE(v) ((1<<(v))-1) struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = sc->sc_ah; HAL_TXQ_INFO qi; ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi); if (ic->ic_opmode == IEEE80211_M_HOSTAP) { /* * Always burst out beacon and CAB traffic. */ qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT; qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT; qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT; } else { struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE]; /* * Adhoc mode; important thing is to use 2x cwmin. */ qi.tqi_aifs = wmep->wmep_aifsn; qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); } if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) { device_printf(sc->sc_dev, "unable to update parameters for " "beacon hardware queue!\n"); return 0; } else { ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */ return 1; } #undef ATH_EXPONENT_TO_VALUE } /* * Allocate and setup an initial beacon frame. */ static int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct ath_buf *bf; struct mbuf *m; int error; bf = STAILQ_FIRST(&sc->sc_bbuf); if (bf == NULL) { DPRINTF(sc, ATH_DEBUG_BEACON, "%s: no dma buffers\n", __func__); sc->sc_stats.ast_be_nombuf++; /* XXX */ return ENOMEM; /* XXX */ } /* * NB: the beacon data buffer must be 32-bit aligned; * we assume the mbuf routines will return us something * with this alignment (perhaps should assert). */ m = ieee80211_beacon_alloc(ic, ni, &sc->sc_boff); if (m == NULL) { DPRINTF(sc, ATH_DEBUG_BEACON, "%s: cannot get mbuf\n", __func__); sc->sc_stats.ast_be_nombuf++; return ENOMEM; } error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error == 0) { bf->bf_m = m; bf->bf_node = ieee80211_ref_node(ni); } else { m_freem(m); } return error; } /* * Setup the beacon frame for transmit. */ static void ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf) { #define USE_SHPREAMBLE(_ic) \ (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\ == IEEE80211_F_SHPREAMBLE) struct ieee80211_node *ni = bf->bf_node; struct ieee80211com *ic = ni->ni_ic; struct mbuf *m = bf->bf_m; struct ath_hal *ah = sc->sc_ah; struct ath_node *an = ATH_NODE(ni); struct ath_desc *ds; int flags, antenna; u_int8_t rate; DPRINTF(sc, ATH_DEBUG_BEACON, "%s: m %p len %u\n", __func__, m, m->m_len); /* setup descriptors */ ds = bf->bf_desc; flags = HAL_TXDESC_NOACK; if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) { ds->ds_link = bf->bf_daddr; /* self-linked */ flags |= HAL_TXDESC_VEOL; /* * Let hardware handle antenna switching. */ antenna = sc->sc_txantenna; } else { ds->ds_link = 0; /* * Switch antenna every 4 beacons. * XXX assumes two antenna */ antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1); } KASSERT(bf->bf_nseg == 1, ("multi-segment beacon frame; nseg %u", bf->bf_nseg)); ds->ds_data = bf->bf_segs[0].ds_addr; /* * Calculate rate code. * XXX everything at min xmit rate */ if (USE_SHPREAMBLE(ic)) rate = an->an_tx_mgtratesp; else rate = an->an_tx_mgtrate; ath_hal_setuptxdesc(ah, ds , m->m_len + IEEE80211_CRC_LEN /* frame length */ , sizeof(struct ieee80211_frame)/* header length */ , HAL_PKT_TYPE_BEACON /* Atheros packet type */ , ni->ni_txpower /* txpower XXX */ , rate, 1 /* series 0 rate/tries */ , HAL_TXKEYIX_INVALID /* no encryption */ , antenna /* antenna mode */ , flags /* no ack, veol for beacons */ , 0 /* rts/cts rate */ , 0 /* rts/cts duration */ ); /* NB: beacon's BufLen must be a multiple of 4 bytes */ ath_hal_filltxdesc(ah, ds , roundup(m->m_len, 4) /* buffer length */ , AH_TRUE /* first segment */ , AH_TRUE /* last segment */ , ds /* first descriptor */ ); #undef USE_SHPREAMBLE } /* * Transmit a beacon frame at SWBA. Dynamic updates to the * frame contents are done as needed and the slot time is * also adjusted based on current state. */ static void ath_beacon_proc(void *arg, int pending) { struct ath_softc *sc = arg; struct ath_buf *bf = STAILQ_FIRST(&sc->sc_bbuf); struct ieee80211_node *ni = bf->bf_node; struct ieee80211com *ic = ni->ni_ic; struct ath_hal *ah = sc->sc_ah; struct mbuf *m; int ncabq, error, otherant; DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n", __func__, pending); if (ic->ic_opmode == IEEE80211_M_STA || ic->ic_opmode == IEEE80211_M_MONITOR || bf == NULL || bf->bf_m == NULL) { DPRINTF(sc, ATH_DEBUG_ANY, "%s: ic_flags=%x bf=%p bf_m=%p\n", __func__, ic->ic_flags, bf, bf ? bf->bf_m : NULL); return; } /* * Check if the previous beacon has gone out. If * not don't don't try to post another, skip this * period and wait for the next. Missed beacons * indicate a problem and should not occur. If we * miss too many consecutive beacons reset the device. */ if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { sc->sc_bmisscount++; DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: missed %u consecutive beacons\n", __func__, sc->sc_bmisscount); if (sc->sc_bmisscount > 3) /* NB: 3 is a guess */ taskqueue_enqueue(taskqueue_swi, &sc->sc_bstucktask); return; } if (sc->sc_bmisscount != 0) { DPRINTF(sc, ATH_DEBUG_BEACON, "%s: resume beacon xmit after %u misses\n", __func__, sc->sc_bmisscount); sc->sc_bmisscount = 0; } /* * Update dynamic beacon contents. If this returns * non-zero then we need to remap the memory because * the beacon frame changed size (probably because * of the TIM bitmap). */ m = bf->bf_m; ncabq = ath_hal_numtxpending(ah, sc->sc_cabq->axq_qnum); if (ieee80211_beacon_update(ic, bf->bf_node, &sc->sc_boff, m, ncabq)) { /* XXX too conservative? */ bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error != 0) { if_printf(ic->ic_ifp, "%s: bus_dmamap_load_mbuf_sg failed, error %u\n", __func__, error); return; } } /* * Handle slot time change when a non-ERP station joins/leaves * an 11g network. The 802.11 layer notifies us via callback, * we mark updateslot, then wait one beacon before effecting * the change. This gives associated stations at least one * beacon interval to note the state change. */ /* XXX locking */ if (sc->sc_updateslot == UPDATE) sc->sc_updateslot = COMMIT; /* commit next beacon */ else if (sc->sc_updateslot == COMMIT) ath_setslottime(sc); /* commit change to h/w */ /* * Check recent per-antenna transmit statistics and flip * the default antenna if noticeably more frames went out * on the non-default antenna. * XXX assumes 2 anntenae */ otherant = sc->sc_defant & 1 ? 2 : 1; if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2) ath_setdefantenna(sc, otherant); sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; /* * Construct tx descriptor. */ ath_beacon_setup(sc, bf); /* * Stop any current dma and put the new frame on the queue. * This should never fail since we check above that no frames * are still pending on the queue. */ if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { DPRINTF(sc, ATH_DEBUG_ANY, "%s: beacon queue %u did not stop?\n", __func__, sc->sc_bhalq); } bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); /* * Enable the CAB queue before the beacon queue to * insure cab frames are triggered by this beacon. */ if (sc->sc_boff.bo_tim[4] & 1) /* NB: only at DTIM */ ath_hal_txstart(ah, sc->sc_cabq->axq_qnum); ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); ath_hal_txstart(ah, sc->sc_bhalq); DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: TXDP[%u] = %p (%p)\n", __func__, sc->sc_bhalq, (caddr_t)bf->bf_daddr, bf->bf_desc); sc->sc_stats.ast_be_xmit++; } /* * Reset the hardware after detecting beacons have stopped. */ static void ath_bstuck_proc(void *arg, int pending) { struct ath_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", sc->sc_bmisscount); ath_reset(ifp); } /* * Reclaim beacon resources. */ static void ath_beacon_free(struct ath_softc *sc) { struct ath_buf *bf; STAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) { if (bf->bf_m != NULL) { bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); m_freem(bf->bf_m); bf->bf_m = NULL; } if (bf->bf_node != NULL) { ieee80211_free_node(bf->bf_node); bf->bf_node = NULL; } } } /* * Configure the beacon and sleep timers. * * When operating as an AP this resets the TSF and sets * up the hardware to notify us when we need to issue beacons. * * When operating in station mode this sets up the beacon * timers according to the timestamp of the last received * beacon and the current TSF, configures PCF and DTIM * handling, programs the sleep registers so the hardware * will wakeup in time to receive beacons, and configures * the beacon miss handling so we'll receive a BMISS * interrupt when we stop seeing beacons from the AP * we've associated with. */ static void ath_beacon_config(struct ath_softc *sc) { #define TSF_TO_TU(_h,_l) (((_h) << 22) | ((_l) >> 10)) struct ath_hal *ah = sc->sc_ah; struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_node *ni = ic->ic_bss; u_int32_t nexttbtt, intval; /* extract tstamp from last beacon and convert to TU */ nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4), LE_READ_4(ni->ni_tstamp.data)); /* NB: the beacon interval is kept internally in TU's */ intval = ni->ni_intval & HAL_BEACON_PERIOD; if (nexttbtt == 0) /* e.g. for ap mode */ nexttbtt = intval; else if (intval) /* NB: can be 0 for monitor mode */ nexttbtt = roundup(nexttbtt, intval); DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n", __func__, nexttbtt, intval, ni->ni_intval); if (ic->ic_opmode == IEEE80211_M_STA) { HAL_BEACON_STATE bs; u_int64_t tsf; u_int32_t tsftu; int dtimperiod, dtimcount; int cfpperiod, cfpcount; /* * Setup dtim and cfp parameters according to * last beacon we received (which may be none). */ dtimperiod = ni->ni_dtim_period; if (dtimperiod <= 0) /* NB: 0 if not known */ dtimperiod = 1; dtimcount = ni->ni_dtim_count; if (dtimcount >= dtimperiod) /* NB: sanity check */ dtimcount = 0; /* XXX? */ cfpperiod = 1; /* NB: no PCF support yet */ cfpcount = 0; #define FUDGE 2 /* * Pull nexttbtt forward to reflect the current * TSF and calculate dtim+cfp state for the result. */ tsf = ath_hal_gettsf64(ah); tsftu = TSF_TO_TU((u_int32_t)(tsf>>32), (u_int32_t)tsf) + FUDGE; do { nexttbtt += intval; if (--dtimcount < 0) { dtimcount = dtimperiod - 1; if (--cfpcount < 0) cfpcount = cfpperiod - 1; } } while (nexttbtt < tsftu); #undef FUDGE memset(&bs, 0, sizeof(bs)); bs.bs_intval = intval; bs.bs_nexttbtt = nexttbtt; bs.bs_dtimperiod = dtimperiod*intval; bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval; bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod; bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod; bs.bs_cfpmaxduration = 0; #if 0 /* * The 802.11 layer records the offset to the DTIM * bitmap while receiving beacons; use it here to * enable h/w detection of our AID being marked in * the bitmap vector (to indicate frames for us are * pending at the AP). * XXX do DTIM handling in s/w to WAR old h/w bugs * XXX enable based on h/w rev for newer chips */ bs.bs_timoffset = ni->ni_timoff; #endif /* * Calculate the number of consecutive beacons to miss * before taking a BMISS interrupt. The configuration * is specified in ms, so we need to convert that to * TU's and then calculate based on the beacon interval. * Note that we clamp the result to at most 10 beacons. */ bs.bs_bmissthreshold = howmany(ic->ic_bmisstimeout, intval); if (bs.bs_bmissthreshold > 10) bs.bs_bmissthreshold = 10; else if (bs.bs_bmissthreshold <= 0) bs.bs_bmissthreshold = 1; /* * Calculate sleep duration. The configuration is * given in ms. We insure a multiple of the beacon * period is used. Also, if the sleep duration is * greater than the DTIM period then it makes senses * to make it a multiple of that. * * XXX fixed at 100ms */ bs.bs_sleepduration = roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval); if (bs.bs_sleepduration > bs.bs_dtimperiod) bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod); DPRINTF(sc, ATH_DEBUG_BEACON, "%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n" , __func__ , tsf, tsftu , bs.bs_intval , bs.bs_nexttbtt , bs.bs_dtimperiod , bs.bs_nextdtim , bs.bs_bmissthreshold , bs.bs_sleepduration , bs.bs_cfpperiod , bs.bs_cfpmaxduration , bs.bs_cfpnext , bs.bs_timoffset ); ath_hal_intrset(ah, 0); ath_hal_beacontimers(ah, &bs); sc->sc_imask |= HAL_INT_BMISS; ath_hal_intrset(ah, sc->sc_imask); } else { ath_hal_intrset(ah, 0); if (nexttbtt == intval) intval |= HAL_BEACON_RESET_TSF; if (ic->ic_opmode == IEEE80211_M_IBSS) { /* * In IBSS mode enable the beacon timers but only * enable SWBA interrupts if we need to manually * prepare beacon frames. Otherwise we use a * self-linked tx descriptor and let the hardware * deal with things. */ intval |= HAL_BEACON_ENA; if (!sc->sc_hasveol) sc->sc_imask |= HAL_INT_SWBA; ath_beaconq_config(sc); } else if (ic->ic_opmode == IEEE80211_M_HOSTAP) { /* * In AP mode we enable the beacon timers and * SWBA interrupts to prepare beacon frames. */ intval |= HAL_BEACON_ENA; sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */ ath_beaconq_config(sc); } ath_hal_beaconinit(ah, nexttbtt, intval); sc->sc_bmisscount = 0; ath_hal_intrset(ah, sc->sc_imask); /* * When using a self-linked beacon descriptor in * ibss mode load it once here. */ if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) ath_beacon_proc(sc, 0); } #undef TSF_TO_TU } static void ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; KASSERT(error == 0, ("error %u on bus_dma callback", error)); *paddr = segs->ds_addr; } static int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, ath_bufhead *head, const char *name, int nbuf, int ndesc) { #define DS2PHYS(_dd, _ds) \ ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) struct ifnet *ifp = sc->sc_ifp; struct ath_desc *ds; struct ath_buf *bf; int i, bsize, error; DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n", __func__, name, nbuf, ndesc); dd->dd_name = name; dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc; /* * Setup DMA descriptor area. */ error = bus_dma_tag_create(NULL, /* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dd->dd_desc_len, /* maxsize */ 1, /* nsegments */ BUS_SPACE_MAXADDR, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dd->dd_dmat); if (error != 0) { if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); return error; } /* allocate descriptors */ error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap); if (error != 0) { if_printf(ifp, "unable to create dmamap for %s descriptors, " "error %u\n", dd->dd_name, error); goto fail0; } error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, BUS_DMA_NOWAIT, &dd->dd_dmamap); if (error != 0) { if_printf(ifp, "unable to alloc memory for %u %s descriptors, " "error %u\n", nbuf * ndesc, dd->dd_name, error); goto fail1; } error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, dd->dd_desc, dd->dd_desc_len, ath_load_cb, &dd->dd_desc_paddr, BUS_DMA_NOWAIT); if (error != 0) { if_printf(ifp, "unable to map %s descriptors, error %u\n", dd->dd_name, error); goto fail2; } ds = dd->dd_desc; DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); /* allocate rx buffers */ bsize = sizeof(struct ath_buf) * nbuf; bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); if (bf == NULL) { if_printf(ifp, "malloc of %s buffers failed, size %u\n", dd->dd_name, bsize); goto fail3; } dd->dd_bufptr = bf; STAILQ_INIT(head); for (i = 0; i < nbuf; i++, bf++, ds += ndesc) { bf->bf_desc = ds; bf->bf_daddr = DS2PHYS(dd, ds); error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &bf->bf_dmamap); if (error != 0) { if_printf(ifp, "unable to create dmamap for %s " "buffer %u, error %u\n", dd->dd_name, i, error); ath_descdma_cleanup(sc, dd, head); return error; } STAILQ_INSERT_TAIL(head, bf, bf_list); } return 0; fail3: bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); fail2: bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); fail1: bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); fail0: bus_dma_tag_destroy(dd->dd_dmat); memset(dd, 0, sizeof(*dd)); return error; #undef DS2PHYS } static void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd, ath_bufhead *head) { struct ath_buf *bf; struct ieee80211_node *ni; bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); bus_dma_tag_destroy(dd->dd_dmat); STAILQ_FOREACH(bf, head, bf_list) { if (bf->bf_m) { m_freem(bf->bf_m); bf->bf_m = NULL; } if (bf->bf_dmamap != NULL) { bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); bf->bf_dmamap = NULL; } ni = bf->bf_node; bf->bf_node = NULL; if (ni != NULL) { /* * Reclaim node reference. */ ieee80211_free_node(ni); } } STAILQ_INIT(head); free(dd->dd_bufptr, M_ATHDEV); memset(dd, 0, sizeof(*dd)); } static int ath_desc_alloc(struct ath_softc *sc) { int error; error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, "rx", ATH_RXBUF, 1); if (error != 0) return error; error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, "tx", ATH_TXBUF, ATH_TXDESC); if (error != 0) { ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); return error; } error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, "beacon", 1, 1); if (error != 0) { ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); return error; } return 0; } static void ath_desc_free(struct ath_softc *sc) { if (sc->sc_bdma.dd_desc_len != 0) ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); if (sc->sc_txdma.dd_desc_len != 0) ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); if (sc->sc_rxdma.dd_desc_len != 0) ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); } static struct ieee80211_node * ath_node_alloc(struct ieee80211_node_table *nt) { struct ieee80211com *ic = nt->nt_ic; struct ath_softc *sc = ic->ic_ifp->if_softc; const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; struct ath_node *an; an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); if (an == NULL) { /* XXX stat+msg */ return NULL; } an->an_avgrssi = ATH_RSSI_DUMMY_MARKER; an->an_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; an->an_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; an->an_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; ath_rate_node_init(sc, an); DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an); return &an->an_node; } static void ath_node_free(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct ath_softc *sc = ic->ic_ifp->if_softc; DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni); ath_rate_node_cleanup(sc, ATH_NODE(ni)); sc->sc_node_free(ni); } static u_int8_t ath_node_getrssi(const struct ieee80211_node *ni) { #define HAL_EP_RND(x, mul) \ ((((x)%(mul)) >= ((mul)/2)) ? ((x) + ((mul) - 1)) / (mul) : (x)/(mul)) u_int32_t avgrssi = ATH_NODE_CONST(ni)->an_avgrssi; int32_t rssi; /* * When only one frame is received there will be no state in * avgrssi so fallback on the value recorded by the 802.11 layer. */ if (avgrssi != ATH_RSSI_DUMMY_MARKER) rssi = HAL_EP_RND(avgrssi, HAL_RSSI_EP_MULTIPLIER); else rssi = ni->ni_rssi; /* NB: theoretically we shouldn't need this, but be paranoid */ return rssi < 0 ? 0 : rssi > 127 ? 127 : rssi; #undef HAL_EP_RND } static int ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) { struct ath_hal *ah = sc->sc_ah; int error; struct mbuf *m; struct ath_desc *ds; m = bf->bf_m; if (m == NULL) { /* * NB: by assigning a page to the rx dma buffer we * implicitly satisfy the Atheros requirement that * this buffer be cache-line-aligned and sized to be * multiple of the cache line size. Not doing this * causes weird stuff to happen (for the 5210 at least). */ m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { DPRINTF(sc, ATH_DEBUG_ANY, "%s: no mbuf/cluster\n", __func__); sc->sc_stats.ast_rx_nombuf++; return ENOMEM; } bf->bf_m = m; m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error != 0) { DPRINTF(sc, ATH_DEBUG_ANY, "%s: bus_dmamap_load_mbuf_sg failed; error %d\n", __func__, error); sc->sc_stats.ast_rx_busdma++; return error; } KASSERT(bf->bf_nseg == 1, ("multi-segment packet; nseg %u", bf->bf_nseg)); } bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD); /* * Setup descriptors. For receive we always terminate * the descriptor list with a self-linked entry so we'll * not get overrun under high load (as can happen with a * 5212 when ANI processing enables PHY error frames). * * To insure the last descriptor is self-linked we create * each descriptor as self-linked and add it to the end. As * each additional descriptor is added the previous self-linked * entry is ``fixed'' naturally. This should be safe even * if DMA is happening. When processing RX interrupts we * never remove/process the last, self-linked, entry on the * descriptor list. This insures the hardware always has * someplace to write a new frame. */ ds = bf->bf_desc; ds->ds_link = bf->bf_daddr; /* link to self */ ds->ds_data = bf->bf_segs[0].ds_addr; ath_hal_setuprxdesc(ah, ds , m->m_len /* buffer size */ , 0 ); if (sc->sc_rxlink != NULL) *sc->sc_rxlink = bf->bf_daddr; sc->sc_rxlink = &ds->ds_link; return 0; } /* * Extend 15-bit time stamp from rx descriptor to * a full 64-bit TSF using the current h/w TSF. */ static __inline u_int64_t ath_extend_tsf(struct ath_hal *ah, u_int32_t rstamp) { u_int64_t tsf; tsf = ath_hal_gettsf64(ah); if ((tsf & 0x7fff) < rstamp) tsf -= 0x8000; return ((tsf &~ 0x7fff) | rstamp); } /* * Intercept management frames to collect beacon rssi data * and to do ibss merges. */ static void ath_recv_mgmt(struct ieee80211com *ic, struct mbuf *m, struct ieee80211_node *ni, int subtype, int rssi, u_int32_t rstamp) { struct ath_softc *sc = ic->ic_ifp->if_softc; /* * Call up first so subsequent work can use information * potentially stored in the node (e.g. for ibss merge). */ sc->sc_recv_mgmt(ic, m, ni, subtype, rssi, rstamp); switch (subtype) { case IEEE80211_FC0_SUBTYPE_BEACON: /* update rssi statistics for use by the hal */ ATH_RSSI_LPF(ATH_NODE(ni)->an_halstats.ns_avgbrssi, rssi); /* fall thru... */ case IEEE80211_FC0_SUBTYPE_PROBE_RESP: if (ic->ic_opmode == IEEE80211_M_IBSS && ic->ic_state == IEEE80211_S_RUN) { u_int64_t tsf = ath_extend_tsf(sc->sc_ah, rstamp); /* * Handle ibss merge as needed; check the tsf on the * frame before attempting the merge. The 802.11 spec * says the station should change it's bssid to match * the oldest station with the same ssid, where oldest * is determined by the tsf. Note that hardware * reconfiguration happens through callback to * ath_newstate as the state machine will go from * RUN -> RUN when this happens. */ if (le64toh(ni->ni_tstamp.tsf) >= tsf) { DPRINTF(sc, ATH_DEBUG_STATE, "ibss merge, rstamp %u tsf %ju " "tstamp %ju\n", rstamp, (uintmax_t)tsf, (uintmax_t)ni->ni_tstamp.tsf); (void) ieee80211_ibss_merge(ni); } } break; } } /* * Set the default antenna. */ static void ath_setdefantenna(struct ath_softc *sc, u_int antenna) { struct ath_hal *ah = sc->sc_ah; /* XXX block beacon interrupts */ ath_hal_setdefantenna(ah, antenna); if (sc->sc_defant != antenna) sc->sc_stats.ast_ant_defswitch++; sc->sc_defant = antenna; sc->sc_rxotherant = 0; } static void ath_rx_proc(void *arg, int npending) { #define PA2DESC(_sc, _pa) \ ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) struct ath_softc *sc = arg; struct ath_buf *bf; struct ieee80211com *ic = &sc->sc_ic; struct ifnet *ifp = sc->sc_ifp; struct ath_hal *ah = sc->sc_ah; struct ath_desc *ds; struct mbuf *m; struct ieee80211_node *ni; struct ath_node *an; int len, type; u_int phyerr; HAL_STATUS status; NET_LOCK_GIANT(); /* XXX */ DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending); do { bf = STAILQ_FIRST(&sc->sc_rxbuf); if (bf == NULL) { /* NB: shouldn't happen */ if_printf(ifp, "%s: no buffer!\n", __func__); break; } ds = bf->bf_desc; if (ds->ds_link == bf->bf_daddr) { /* NB: never process the self-linked entry at the end */ break; } m = bf->bf_m; if (m == NULL) { /* NB: shouldn't happen */ if_printf(ifp, "%s: no mbuf!\n", __func__); continue; } /* XXX sync descriptor memory */ /* * Must provide the virtual address of the current * descriptor, the physical address, and the virtual * address of the next descriptor in the h/w chain. * This allows the HAL to look ahead to see if the * hardware is done with a descriptor by checking the * done bit in the following descriptor and the address * of the current descriptor the DMA engine is working * on. All this is necessary because of our use of * a self-linked list to avoid rx overruns. */ status = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr, PA2DESC(sc, ds->ds_link)); #ifdef AR_DEBUG if (sc->sc_debug & ATH_DEBUG_RECV_DESC) ath_printrxbuf(bf, status == HAL_OK); #endif if (status == HAL_EINPROGRESS) break; STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list); if (ds->ds_rxstat.rs_more) { /* * Frame spans multiple descriptors; this * cannot happen yet as we don't support * jumbograms. If not in monitor mode, * discard the frame. */ if (ic->ic_opmode != IEEE80211_M_MONITOR) { sc->sc_stats.ast_rx_toobig++; goto rx_next; } /* fall thru for monitor mode handling... */ } else if (ds->ds_rxstat.rs_status != 0) { if (ds->ds_rxstat.rs_status & HAL_RXERR_CRC) sc->sc_stats.ast_rx_crcerr++; if (ds->ds_rxstat.rs_status & HAL_RXERR_FIFO) sc->sc_stats.ast_rx_fifoerr++; if (ds->ds_rxstat.rs_status & HAL_RXERR_PHY) { sc->sc_stats.ast_rx_phyerr++; phyerr = ds->ds_rxstat.rs_phyerr & 0x1f; sc->sc_stats.ast_rx_phy[phyerr]++; goto rx_next; } if (ds->ds_rxstat.rs_status & HAL_RXERR_DECRYPT) { /* * Decrypt error. If the error occurred * because there was no hardware key, then * let the frame through so the upper layers * can process it. This is necessary for 5210 * parts which have no way to setup a ``clear'' * key cache entry. * * XXX do key cache faulting */ if (ds->ds_rxstat.rs_keyix == HAL_RXKEYIX_INVALID) goto rx_accept; sc->sc_stats.ast_rx_badcrypt++; } if (ds->ds_rxstat.rs_status & HAL_RXERR_MIC) { sc->sc_stats.ast_rx_badmic++; /* * Do minimal work required to hand off * the 802.11 header for notifcation. */ /* XXX frag's and qos frames */ len = ds->ds_rxstat.rs_datalen; if (len >= sizeof (struct ieee80211_frame)) { bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTREAD); ieee80211_notify_michael_failure(ic, mtod(m, struct ieee80211_frame *), sc->sc_splitmic ? ds->ds_rxstat.rs_keyix-32 : ds->ds_rxstat.rs_keyix ); } } ifp->if_ierrors++; /* * Reject error frames, we normally don't want * to see them in monitor mode (in monitor mode * allow through packets that have crypto problems). */ if ((ds->ds_rxstat.rs_status &~ (HAL_RXERR_DECRYPT|HAL_RXERR_MIC)) || sc->sc_ic.ic_opmode != IEEE80211_M_MONITOR) goto rx_next; } rx_accept: /* * Sync and unmap the frame. At this point we're * committed to passing the mbuf somewhere so clear * bf_m; this means a new sk_buff must be allocated * when the rx descriptor is setup again to receive * another frame. */ bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); bf->bf_m = NULL; m->m_pkthdr.rcvif = ifp; len = ds->ds_rxstat.rs_datalen; m->m_pkthdr.len = m->m_len = len; sc->sc_stats.ast_ant_rx[ds->ds_rxstat.rs_antenna]++; if (sc->sc_drvbpf) { u_int8_t rix; /* * Discard anything shorter than an ack or cts. */ if (len < IEEE80211_ACK_LEN) { DPRINTF(sc, ATH_DEBUG_RECV, "%s: runt packet %d\n", __func__, len); sc->sc_stats.ast_rx_tooshort++; m_freem(m); goto rx_next; } rix = ds->ds_rxstat.rs_rate; sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags; sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate; sc->sc_rx_th.wr_antsignal = ds->ds_rxstat.rs_rssi; sc->sc_rx_th.wr_antenna = ds->ds_rxstat.rs_antenna; /* XXX TSF */ bpf_mtap2(sc->sc_drvbpf, &sc->sc_rx_th, sc->sc_rx_th_len, m); } /* * From this point on we assume the frame is at least * as large as ieee80211_frame_min; verify that. */ if (len < IEEE80211_MIN_LEN) { DPRINTF(sc, ATH_DEBUG_RECV, "%s: short packet %d\n", __func__, len); sc->sc_stats.ast_rx_tooshort++; m_freem(m); goto rx_next; } if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) { ieee80211_dump_pkt(mtod(m, caddr_t), len, sc->sc_hwmap[ds->ds_rxstat.rs_rate].ieeerate, ds->ds_rxstat.rs_rssi); } m_adj(m, -IEEE80211_CRC_LEN); /* * Locate the node for sender, track state, and then * pass the (referenced) node up to the 802.11 layer * for its use. If the sender is unknown spam the * frame; it'll be dropped where it's not wanted. */ if (ds->ds_rxstat.rs_keyix != HAL_RXKEYIX_INVALID && (ni = sc->sc_keyixmap[ds->ds_rxstat.rs_keyix]) != NULL) { /* * Fast path: node is present in the key map; * grab a reference for processing the frame. */ an = ATH_NODE(ieee80211_ref_node(ni)); ATH_RSSI_LPF(an->an_avgrssi, ds->ds_rxstat.rs_rssi); type = ieee80211_input(ic, m, ni, ds->ds_rxstat.rs_rssi, ds->ds_rxstat.rs_tstamp); } else { /* * Locate the node for sender, track state, and then * pass the (referenced) node up to the 802.11 layer * for its use. */ ni = ieee80211_find_rxnode(ic, mtod(m, const struct ieee80211_frame_min *)); /* * Track rx rssi and do any rx antenna management. */ an = ATH_NODE(ni); ATH_RSSI_LPF(an->an_avgrssi, ds->ds_rxstat.rs_rssi); /* * Send frame up for processing. */ type = ieee80211_input(ic, m, ni, ds->ds_rxstat.rs_rssi, ds->ds_rxstat.rs_tstamp); if (ni != ic->ic_bss) { u_int16_t keyix; /* * If the station has a key cache slot assigned * update the key->node mapping table. */ keyix = ni->ni_ucastkey.wk_keyix; if (keyix != IEEE80211_KEYIX_NONE && sc->sc_keyixmap[keyix] == NULL) sc->sc_keyixmap[keyix] = ieee80211_ref_node(ni); } } ieee80211_free_node(ni); if (sc->sc_diversity) { /* * When using fast diversity, change the default rx * antenna if diversity chooses the other antenna 3 * times in a row. */ if (sc->sc_defant != ds->ds_rxstat.rs_antenna) { if (++sc->sc_rxotherant >= 3) ath_setdefantenna(sc, ds->ds_rxstat.rs_antenna); } else sc->sc_rxotherant = 0; } if (sc->sc_softled) { /* * Blink for any data frame. Otherwise do a * heartbeat-style blink when idle. The latter * is mainly for station mode where we depend on * periodic beacon frames to trigger the poll event. */ if (type == IEEE80211_FC0_TYPE_DATA) { sc->sc_rxrate = ds->ds_rxstat.rs_rate; ath_led_event(sc, ATH_LED_RX); } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle) ath_led_event(sc, ATH_LED_POLL); } rx_next: STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); } while (ath_rxbuf_init(sc, bf) == 0); /* rx signal state monitoring */ ath_hal_rxmonitor(ah, &ATH_NODE(ic->ic_bss)->an_halstats); NET_UNLOCK_GIANT(); /* XXX */ #undef PA2DESC } /* * Setup a h/w transmit queue. */ static struct ath_txq * ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) { #define N(a) (sizeof(a)/sizeof(a[0])) struct ath_hal *ah = sc->sc_ah; HAL_TXQ_INFO qi; int qnum; memset(&qi, 0, sizeof(qi)); qi.tqi_subtype = subtype; qi.tqi_aifs = HAL_TXQ_USEDEFAULT; qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; /* * Enable interrupts only for EOL and DESC conditions. * We mark tx descriptors to receive a DESC interrupt * when a tx queue gets deep; otherwise waiting for the * EOL to reap descriptors. Note that this is done to * reduce interrupt load and this only defers reaping * descriptors, never transmitting frames. Aside from * reducing interrupts this also permits more concurrency. * The only potential downside is if the tx queue backs * up in which case the top half of the kernel may backup * due to a lack of tx descriptors. */ qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | TXQ_FLAG_TXDESCINT_ENABLE; qnum = ath_hal_setuptxqueue(ah, qtype, &qi); if (qnum == -1) { /* * NB: don't print a message, this happens * normally on parts with too few tx queues */ return NULL; } if (qnum >= N(sc->sc_txq)) { device_printf(sc->sc_dev, "hal qnum %u out of range, max %zu!\n", qnum, N(sc->sc_txq)); ath_hal_releasetxqueue(ah, qnum); return NULL; } if (!ATH_TXQ_SETUP(sc, qnum)) { struct ath_txq *txq = &sc->sc_txq[qnum]; txq->axq_qnum = qnum; txq->axq_depth = 0; txq->axq_intrcnt = 0; txq->axq_link = NULL; STAILQ_INIT(&txq->axq_q); ATH_TXQ_LOCK_INIT(sc, txq); sc->sc_txqsetup |= 1<sc_txq[qnum]; #undef N } /* * Setup a hardware data transmit queue for the specified * access control. The hal may not support all requested * queues in which case it will return a reference to a * previously setup queue. We record the mapping from ac's * to h/w queues for use by ath_tx_start and also track * the set of h/w queues being used to optimize work in the * transmit interrupt handler and related routines. */ static int ath_tx_setup(struct ath_softc *sc, int ac, int haltype) { #define N(a) (sizeof(a)/sizeof(a[0])) struct ath_txq *txq; if (ac >= N(sc->sc_ac2q)) { device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", ac, N(sc->sc_ac2q)); return 0; } txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); if (txq != NULL) { sc->sc_ac2q[ac] = txq; return 1; } else return 0; #undef N } /* * Update WME parameters for a transmit queue. */ static int ath_txq_update(struct ath_softc *sc, int ac) { #define ATH_EXPONENT_TO_VALUE(v) ((1<sc_ic; struct ath_txq *txq = sc->sc_ac2q[ac]; struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; struct ath_hal *ah = sc->sc_ah; HAL_TXQ_INFO qi; ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); qi.tqi_aifs = wmep->wmep_aifsn; qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { device_printf(sc->sc_dev, "unable to update hardware queue " "parameters for %s traffic!\n", ieee80211_wme_acnames[ac]); return 0; } else { ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ return 1; } #undef ATH_TXOP_TO_US #undef ATH_EXPONENT_TO_VALUE } /* * Callback from the 802.11 layer to update WME parameters. */ static int ath_wme_update(struct ieee80211com *ic) { struct ath_softc *sc = ic->ic_ifp->if_softc; return !ath_txq_update(sc, WME_AC_BE) || !ath_txq_update(sc, WME_AC_BK) || !ath_txq_update(sc, WME_AC_VI) || !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; } /* * Reclaim resources for a setup queue. */ static void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) { ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); ATH_TXQ_LOCK_DESTROY(txq); sc->sc_txqsetup &= ~(1<axq_qnum); } /* * Reclaim all tx queue resources. */ static void ath_tx_cleanup(struct ath_softc *sc) { int i; ATH_TXBUF_LOCK_DESTROY(sc); for (i = 0; i < HAL_NUM_TX_QUEUES; i++) if (ATH_TXQ_SETUP(sc, i)) ath_tx_cleanupq(sc, &sc->sc_txq[i]); } /* * Defragment an mbuf chain, returning at most maxfrags separate * mbufs+clusters. If this is not possible NULL is returned and * the original mbuf chain is left in it's present (potentially * modified) state. We use two techniques: collapsing consecutive * mbufs and replacing consecutive mbufs by a cluster. */ static struct mbuf * ath_defrag(struct mbuf *m0, int how, int maxfrags) { struct mbuf *m, *n, *n2, **prev; u_int curfrags; /* * Calculate the current number of frags. */ curfrags = 0; for (m = m0; m != NULL; m = m->m_next) curfrags++; /* * First, try to collapse mbufs. Note that we always collapse * towards the front so we don't need to deal with moving the * pkthdr. This may be suboptimal if the first mbuf has much * less data than the following. */ m = m0; again: for (;;) { n = m->m_next; if (n == NULL) break; if ((m->m_flags & M_RDONLY) == 0 && n->m_len < M_TRAILINGSPACE(m)) { bcopy(mtod(n, void *), mtod(m, char *) + m->m_len, n->m_len); m->m_len += n->m_len; m->m_next = n->m_next; m_free(n); if (--curfrags <= maxfrags) return m0; } else m = n; } KASSERT(maxfrags > 1, ("maxfrags %u, but normal collapse failed", maxfrags)); /* * Collapse consecutive mbufs to a cluster. */ prev = &m0->m_next; /* NB: not the first mbuf */ while ((n = *prev) != NULL) { if ((n2 = n->m_next) != NULL && n->m_len + n2->m_len < MCLBYTES) { m = m_getcl(how, MT_DATA, 0); if (m == NULL) goto bad; bcopy(mtod(n, void *), mtod(m, void *), n->m_len); bcopy(mtod(n2, void *), mtod(m, char *) + n->m_len, n2->m_len); m->m_len = n->m_len + n2->m_len; m->m_next = n2->m_next; *prev = m; m_free(n); m_free(n2); if (--curfrags <= maxfrags) /* +1 cl -2 mbufs */ return m0; /* * Still not there, try the normal collapse * again before we allocate another cluster. */ goto again; } prev = &n->m_next; } /* * No place where we can collapse to a cluster; punt. * This can occur if, for example, you request 2 frags * but the packet requires that both be clusters (we * never reallocate the first mbuf to avoid moving the * packet header). */ bad: return NULL; } static int ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0) { #define CTS_DURATION \ ath_hal_computetxtime(ah, rt, IEEE80211_ACK_LEN, cix, AH_TRUE) #define updateCTSForBursting(_ah, _ds, _txq) \ ath_hal_updateCTSForBursting(_ah, _ds, \ _txq->axq_linkbuf != NULL ? _txq->axq_linkbuf->bf_desc : NULL, \ _txq->axq_lastdsWithCTS, _txq->axq_gatingds, \ txopLimit, CTS_DURATION) struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = sc->sc_ah; struct ifnet *ifp = sc->sc_ifp; const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; int i, error, iswep, ismcast, keyix, hdrlen, pktlen, try0; u_int8_t rix, txrate, ctsrate; u_int8_t cix = 0xff; /* NB: silence compiler */ struct ath_desc *ds, *ds0; struct ath_txq *txq; struct ieee80211_frame *wh; u_int subtype, flags, ctsduration; HAL_PKT_TYPE atype; const HAL_RATE_TABLE *rt; HAL_BOOL shortPreamble; struct ath_node *an; struct mbuf *m; u_int pri; wh = mtod(m0, struct ieee80211_frame *); iswep = wh->i_fc[1] & IEEE80211_FC1_WEP; ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); hdrlen = ieee80211_anyhdrsize(wh); /* * Packet length must not include any * pad bytes; deduct them here. */ pktlen = m0->m_pkthdr.len - (hdrlen & 3); if (iswep) { const struct ieee80211_cipher *cip; struct ieee80211_key *k; /* * Construct the 802.11 header+trailer for an encrypted * frame. The only reason this can fail is because of an * unknown or unsupported cipher/key type. */ k = ieee80211_crypto_encap(ic, ni, m0); if (k == NULL) { /* * This can happen when the key is yanked after the * frame was queued. Just discard the frame; the * 802.11 layer counts failures and provides * debugging/diagnostics. */ m_freem(m0); return EIO; } /* * Adjust the packet + header lengths for the crypto * additions and calculate the h/w key index. When * a s/w mic is done the frame will have had any mic * added to it prior to entry so skb->len above will * account for it. Otherwise we need to add it to the * packet length. */ cip = k->wk_cipher; hdrlen += cip->ic_header; pktlen += cip->ic_header + cip->ic_trailer; if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0) pktlen += cip->ic_miclen; keyix = k->wk_keyix; /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { /* * Use station key cache slot, if assigned. */ keyix = ni->ni_ucastkey.wk_keyix; if (keyix == IEEE80211_KEYIX_NONE) keyix = HAL_TXKEYIX_INVALID; } else keyix = HAL_TXKEYIX_INVALID; pktlen += IEEE80211_CRC_LEN; /* * Load the DMA map so any coalescing is done. This * also calculates the number of descriptors we need. */ error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error == EFBIG) { /* XXX packet requires too many descriptors */ bf->bf_nseg = ATH_TXDESC+1; } else if (error != 0) { sc->sc_stats.ast_tx_busdma++; m_freem(m0); return error; } /* * Discard null packets and check for packets that * require too many TX descriptors. We try to convert * the latter to a cluster. */ if (bf->bf_nseg > ATH_TXDESC) { /* too many desc's, linearize */ sc->sc_stats.ast_tx_linear++; m = ath_defrag(m0, M_DONTWAIT, ATH_TXDESC); if (m == NULL) { m_freem(m0); sc->sc_stats.ast_tx_nombuf++; return ENOMEM; } m0 = m; error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error != 0) { sc->sc_stats.ast_tx_busdma++; m_freem(m0); return error; } KASSERT(bf->bf_nseg <= ATH_TXDESC, ("too many segments after defrag; nseg %u", bf->bf_nseg)); } else if (bf->bf_nseg == 0) { /* null packet, discard */ sc->sc_stats.ast_tx_nodata++; m_freem(m0); return EIO; } DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", __func__, m0, pktlen); bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); bf->bf_m = m0; bf->bf_node = ni; /* NB: held reference */ /* setup descriptors */ ds = bf->bf_desc; rt = sc->sc_currates; KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); /* * NB: the 802.11 layer marks whether or not we should * use short preamble based on the current mode and * negotiated parameters. */ if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { shortPreamble = AH_TRUE; sc->sc_stats.ast_tx_shortpre++; } else { shortPreamble = AH_FALSE; } an = ATH_NODE(ni); flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ /* * Calculate Atheros packet type from IEEE80211 packet header, * setup for rate calculations, and select h/w transmit queue. */ switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { case IEEE80211_FC0_TYPE_MGT: subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) atype = HAL_PKT_TYPE_BEACON; else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) atype = HAL_PKT_TYPE_PROBE_RESP; else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) atype = HAL_PKT_TYPE_ATIM; else atype = HAL_PKT_TYPE_NORMAL; /* XXX */ rix = 0; /* XXX lowest rate */ try0 = ATH_TXMAXTRY; if (shortPreamble) txrate = an->an_tx_mgtratesp; else txrate = an->an_tx_mgtrate; /* NB: force all management frames to highest queue */ if (ni->ni_flags & IEEE80211_NODE_QOS) { /* NB: force all management frames to highest queue */ pri = WME_AC_VO; } else pri = WME_AC_BE; flags |= HAL_TXDESC_INTREQ; /* force interrupt */ break; case IEEE80211_FC0_TYPE_CTL: atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ rix = 0; /* XXX lowest rate */ try0 = ATH_TXMAXTRY; if (shortPreamble) txrate = an->an_tx_mgtratesp; else txrate = an->an_tx_mgtrate; /* NB: force all ctl frames to highest queue */ if (ni->ni_flags & IEEE80211_NODE_QOS) { /* NB: force all ctl frames to highest queue */ pri = WME_AC_VO; } else pri = WME_AC_BE; flags |= HAL_TXDESC_INTREQ; /* force interrupt */ break; case IEEE80211_FC0_TYPE_DATA: atype = HAL_PKT_TYPE_NORMAL; /* default */ /* * Data frames; consult the rate control module. */ ath_rate_findrate(sc, an, shortPreamble, pktlen, &rix, &try0, &txrate); sc->sc_txrate = txrate; /* for LED blinking */ /* * Default all non-QoS traffic to the background queue. */ if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) { pri = M_WME_GETAC(m0); if (cap->cap_wmeParams[pri].wmep_noackPolicy) { flags |= HAL_TXDESC_NOACK; sc->sc_stats.ast_tx_noack++; } } else pri = WME_AC_BE; break; default: if_printf(ifp, "bogus frame type 0x%x (%s)\n", wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); /* XXX statistic */ m_freem(m0); return EIO; } txq = sc->sc_ac2q[pri]; /* * When servicing one or more stations in power-save mode * multicast frames must be buffered until after the beacon. * We use the CAB queue for that. */ if (ismcast && ic->ic_ps_sta) { txq = sc->sc_cabq; /* XXX? more bit in 802.11 frame header */ } /* * Calculate miscellaneous flags. */ if (ismcast) { flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ sc->sc_stats.ast_tx_noack++; } else if (pktlen > ic->ic_rtsthreshold) { flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ cix = rt->info[rix].controlRate; sc->sc_stats.ast_tx_rts++; } /* * If 802.11g protection is enabled, determine whether * to use RTS/CTS or just CTS. Note that this is only * done for OFDM unicast frames. */ if ((ic->ic_flags & IEEE80211_F_USEPROT) && rt->info[rix].phy == IEEE80211_T_OFDM && (flags & HAL_TXDESC_NOACK) == 0) { /* XXX fragments must use CCK rates w/ protection */ if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) flags |= HAL_TXDESC_RTSENA; else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) flags |= HAL_TXDESC_CTSENA; cix = rt->info[sc->sc_protrix].controlRate; sc->sc_stats.ast_tx_protect++; } /* * Calculate duration. This logically belongs in the 802.11 * layer but it lacks sufficient information to calculate it. */ if ((flags & HAL_TXDESC_NOACK) == 0 && (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { u_int16_t dur; /* * XXX not right with fragmentation. */ if (shortPreamble) dur = rt->info[rix].spAckDuration; else dur = rt->info[rix].lpAckDuration; *(u_int16_t *)wh->i_dur = htole16(dur); } /* * Calculate RTS/CTS rate and duration if needed. */ ctsduration = 0; if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) { /* * CTS transmit rate is derived from the transmit rate * by looking in the h/w rate table. We must also factor * in whether or not a short preamble is to be used. */ /* NB: cix is set above where RTS/CTS is enabled */ KASSERT(cix != 0xff, ("cix not setup")); ctsrate = rt->info[cix].rateCode; /* * Compute the transmit duration based on the frame * size and the size of an ACK frame. We call into the * HAL to do the computation since it depends on the * characteristics of the actual PHY being used. * * NB: CTS is assumed the same size as an ACK so we can * use the precalculated ACK durations. */ if (shortPreamble) { ctsrate |= rt->info[cix].shortPreamble; if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ ctsduration += rt->info[cix].spAckDuration; ctsduration += ath_hal_computetxtime(ah, rt, pktlen, rix, AH_TRUE); if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ ctsduration += rt->info[cix].spAckDuration; } else { if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ ctsduration += rt->info[cix].lpAckDuration; ctsduration += ath_hal_computetxtime(ah, rt, pktlen, rix, AH_FALSE); if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ ctsduration += rt->info[cix].lpAckDuration; } /* * Must disable multi-rate retry when using RTS/CTS. */ try0 = ATH_TXMAXTRY; } else ctsrate = 0; if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) ieee80211_dump_pkt(mtod(m0, caddr_t), m0->m_len, sc->sc_hwmap[txrate].ieeerate, -1); if (ic->ic_rawbpf) bpf_mtap(ic->ic_rawbpf, m0); if (sc->sc_drvbpf) { sc->sc_tx_th.wt_flags = sc->sc_hwmap[txrate].txflags; if (iswep) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; sc->sc_tx_th.wt_rate = sc->sc_hwmap[txrate].ieeerate; sc->sc_tx_th.wt_txpower = ni->ni_txpower; sc->sc_tx_th.wt_antenna = sc->sc_txantenna; bpf_mtap2(sc->sc_drvbpf, &sc->sc_tx_th, sc->sc_tx_th_len, m0); } /* * Determine if a tx interrupt should be generated for * this descriptor. We take a tx interrupt to reap * descriptors when the h/w hits an EOL condition or * when the descriptor is specifically marked to generate * an interrupt. We periodically mark descriptors in this * way to insure timely replenishing of the supply needed * for sending frames. Defering interrupts reduces system * load and potentially allows more concurrent work to be * done but if done to aggressively can cause senders to * backup. * * NB: use >= to deal with sc_txintrperiod changing * dynamically through sysctl. */ if (flags & HAL_TXDESC_INTREQ) { txq->axq_intrcnt = 0; } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { flags |= HAL_TXDESC_INTREQ; txq->axq_intrcnt = 0; } /* * Formulate first tx descriptor with tx controls. */ /* XXX check return value? */ ath_hal_setuptxdesc(ah, ds , pktlen /* packet length */ , hdrlen /* header length */ , atype /* Atheros packet type */ , ni->ni_txpower /* txpower */ , txrate, try0 /* series 0 rate/tries */ , keyix /* key cache index */ , sc->sc_txantenna /* antenna mode */ , flags /* flags */ , ctsrate /* rts/cts rate */ , ctsduration /* rts/cts duration */ ); bf->bf_flags = flags; /* * Setup the multi-rate retry state only when we're * going to use it. This assumes ath_hal_setuptxdesc * initializes the descriptors (so we don't have to) * when the hardware supports multi-rate retry and * we don't use it. */ if (try0 != ATH_TXMAXTRY) ath_rate_setupxtxdesc(sc, an, ds, shortPreamble, rix); /* * Fillin the remainder of the descriptor info. */ ds0 = ds; for (i = 0; i < bf->bf_nseg; i++, ds++) { ds->ds_data = bf->bf_segs[i].ds_addr; if (i == bf->bf_nseg - 1) ds->ds_link = 0; else ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1); ath_hal_filltxdesc(ah, ds , bf->bf_segs[i].ds_len /* segment length */ , i == 0 /* first segment */ , i == bf->bf_nseg - 1 /* last segment */ , ds0 /* first descriptor */ ); DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %d: %08x %08x %08x %08x %08x %08x\n", __func__, i, ds->ds_link, ds->ds_data, ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]); } /* * Insert the frame on the outbound list and * pass it on to the hardware. */ ATH_TXQ_LOCK(txq); if (flags & (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) { u_int32_t txopLimit = IEEE80211_TXOP_TO_US( cap->cap_wmeParams[pri].wmep_txopLimit); /* * When bursting, potentially extend the CTS duration * of a previously queued frame to cover this frame * and not exceed the txopLimit. If that can be done * then disable RTS/CTS on this frame since it's now * covered (burst extension). Otherwise we must terminate * the burst before this frame goes out so as not to * violate the WME parameters. All this is complicated * as we need to update the state of packets on the * (live) hardware queue. The logic is buried in the hal * because it's highly chip-specific. */ if (txopLimit != 0) { sc->sc_stats.ast_tx_ctsburst++; if (updateCTSForBursting(ah, ds0, txq) == 0) { /* * This frame was not covered by RTS/CTS from * the previous frame in the burst; update the * descriptor pointers so this frame is now * treated as the last frame for extending a * burst. */ txq->axq_lastdsWithCTS = ds0; /* set gating Desc to final desc */ txq->axq_gatingds = (struct ath_desc *)txq->axq_link; } else sc->sc_stats.ast_tx_ctsext++; } } ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); if (txq->axq_link == NULL) { ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); DPRINTF(sc, ATH_DEBUG_XMIT, "%s: TXDP[%u] = %p (%p) depth %d\n", __func__, txq->axq_qnum, (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth); } else { *txq->axq_link = bf->bf_daddr; DPRINTF(sc, ATH_DEBUG_XMIT, "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, txq->axq_qnum, txq->axq_link, (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth); } txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link; /* * The CAB queue is started from the SWBA handler since * frames only go out on DTIM and to avoid possible races. */ if (txq != sc->sc_cabq) ath_hal_txstart(ah, txq->axq_qnum); ATH_TXQ_UNLOCK(txq); return 0; #undef updateCTSForBursting #undef CTS_DURATION } /* * Process completed xmit descriptors from the specified queue. */ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) { struct ath_hal *ah = sc->sc_ah; struct ieee80211com *ic = &sc->sc_ic; struct ath_buf *bf; struct ath_desc *ds, *ds0; struct ieee80211_node *ni; struct ath_node *an; int sr, lr, pri; HAL_STATUS status; DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", __func__, txq->axq_qnum, (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), txq->axq_link); for (;;) { ATH_TXQ_LOCK(txq); txq->axq_intrcnt = 0; /* reset periodic desc intr count */ bf = STAILQ_FIRST(&txq->axq_q); if (bf == NULL) { txq->axq_link = NULL; ATH_TXQ_UNLOCK(txq); break; } ds0 = &bf->bf_desc[0]; ds = &bf->bf_desc[bf->bf_nseg - 1]; status = ath_hal_txprocdesc(ah, ds); #ifdef AR_DEBUG if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) ath_printtxbuf(bf, status == HAL_OK); #endif if (status == HAL_EINPROGRESS) { ATH_TXQ_UNLOCK(txq); break; } if (ds0 == txq->axq_lastdsWithCTS) txq->axq_lastdsWithCTS = NULL; if (ds == txq->axq_gatingds) txq->axq_gatingds = NULL; ATH_TXQ_REMOVE_HEAD(txq, bf_list); ATH_TXQ_UNLOCK(txq); ni = bf->bf_node; if (ni != NULL) { an = ATH_NODE(ni); if (ds->ds_txstat.ts_status == 0) { u_int8_t txant = ds->ds_txstat.ts_antenna; sc->sc_stats.ast_ant_tx[txant]++; sc->sc_ant_tx[txant]++; if (ds->ds_txstat.ts_rate & HAL_TXSTAT_ALTRATE) sc->sc_stats.ast_tx_altrate++; sc->sc_stats.ast_tx_rssi = ds->ds_txstat.ts_rssi; ATH_RSSI_LPF(an->an_halstats.ns_avgtxrssi, ds->ds_txstat.ts_rssi); pri = M_WME_GETAC(bf->bf_m); if (pri >= WME_AC_VO) ic->ic_wme.wme_hipri_traffic++; ni->ni_inact = ni->ni_inact_reload; } else { if (ds->ds_txstat.ts_status & HAL_TXERR_XRETRY) sc->sc_stats.ast_tx_xretries++; if (ds->ds_txstat.ts_status & HAL_TXERR_FIFO) sc->sc_stats.ast_tx_fifoerr++; if (ds->ds_txstat.ts_status & HAL_TXERR_FILT) sc->sc_stats.ast_tx_filtered++; } sr = ds->ds_txstat.ts_shortretry; lr = ds->ds_txstat.ts_longretry; sc->sc_stats.ast_tx_shortretry += sr; sc->sc_stats.ast_tx_longretry += lr; /* * Hand the descriptor to the rate control algorithm. */ if ((ds->ds_txstat.ts_status & HAL_TXERR_FILT) == 0 && (bf->bf_flags & HAL_TXDESC_NOACK) == 0) ath_rate_tx_complete(sc, an, ds, ds0); /* * Reclaim reference to node. * * NB: the node may be reclaimed here if, for example * this is a DEAUTH message that was sent and the * node was timed out due to inactivity. */ ieee80211_free_node(ni); } bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); m_freem(bf->bf_m); bf->bf_m = NULL; bf->bf_node = NULL; ATH_TXBUF_LOCK(sc); STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); ATH_TXBUF_UNLOCK(sc); } } /* * Deferred processing of transmit interrupt; special-cased * for a single hardware transmit queue (e.g. 5210 and 5211). */ static void ath_tx_proc_q0(void *arg, int npending) { struct ath_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; ath_tx_processq(sc, &sc->sc_txq[0]); ath_tx_processq(sc, sc->sc_cabq); ifp->if_flags &= ~IFF_OACTIVE; sc->sc_tx_timer = 0; if (sc->sc_softled) ath_led_event(sc, ATH_LED_TX); ath_start(ifp); } /* * Deferred processing of transmit interrupt; special-cased * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). */ static void ath_tx_proc_q0123(void *arg, int npending) { struct ath_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; /* * Process each active queue. */ ath_tx_processq(sc, &sc->sc_txq[0]); ath_tx_processq(sc, &sc->sc_txq[1]); ath_tx_processq(sc, &sc->sc_txq[2]); ath_tx_processq(sc, &sc->sc_txq[3]); ath_tx_processq(sc, sc->sc_cabq); ifp->if_flags &= ~IFF_OACTIVE; sc->sc_tx_timer = 0; if (sc->sc_softled) ath_led_event(sc, ATH_LED_TX); ath_start(ifp); } /* * Deferred processing of transmit interrupt. */ static void ath_tx_proc(void *arg, int npending) { struct ath_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; int i; /* * Process each active queue. */ /* XXX faster to read ISR_S0_S and ISR_S1_S to determine q's? */ for (i = 0; i < HAL_NUM_TX_QUEUES; i++) if (ATH_TXQ_SETUP(sc, i)) ath_tx_processq(sc, &sc->sc_txq[i]); ifp->if_flags &= ~IFF_OACTIVE; sc->sc_tx_timer = 0; if (sc->sc_softled) ath_led_event(sc, ATH_LED_TX); ath_start(ifp); } static void ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) { struct ath_hal *ah = sc->sc_ah; struct ieee80211_node *ni; struct ath_buf *bf; /* * NB: this assumes output has been stopped and * we do not need to block ath_tx_tasklet */ for (;;) { ATH_TXQ_LOCK(txq); bf = STAILQ_FIRST(&txq->axq_q); if (bf == NULL) { txq->axq_link = NULL; ATH_TXQ_UNLOCK(txq); break; } ATH_TXQ_REMOVE_HEAD(txq, bf_list); ATH_TXQ_UNLOCK(txq); #ifdef AR_DEBUG if (sc->sc_debug & ATH_DEBUG_RESET) ath_printtxbuf(bf, ath_hal_txprocdesc(ah, bf->bf_desc) == HAL_OK); #endif /* AR_DEBUG */ bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); m_freem(bf->bf_m); bf->bf_m = NULL; ni = bf->bf_node; bf->bf_node = NULL; if (ni != NULL) { /* * Reclaim node reference. */ ieee80211_free_node(ni); } ATH_TXBUF_LOCK(sc); STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); ATH_TXBUF_UNLOCK(sc); } } static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) { struct ath_hal *ah = sc->sc_ah; (void) ath_hal_stoptxdma(ah, txq->axq_qnum); DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", __func__, txq->axq_qnum, (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), txq->axq_link); } /* * Drain the transmit queues and reclaim resources. */ static void ath_draintxq(struct ath_softc *sc) { struct ath_hal *ah = sc->sc_ah; struct ifnet *ifp = sc->sc_ifp; int i; /* XXX return value */ if (!sc->sc_invalid) { /* don't touch the hardware if marked invalid */ (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); DPRINTF(sc, ATH_DEBUG_RESET, "%s: beacon queue %p\n", __func__, (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq)); for (i = 0; i < HAL_NUM_TX_QUEUES; i++) if (ATH_TXQ_SETUP(sc, i)) ath_tx_stopdma(sc, &sc->sc_txq[i]); } for (i = 0; i < HAL_NUM_TX_QUEUES; i++) if (ATH_TXQ_SETUP(sc, i)) ath_tx_draintxq(sc, &sc->sc_txq[i]); ifp->if_flags &= ~IFF_OACTIVE; sc->sc_tx_timer = 0; } /* * Disable the receive h/w in preparation for a reset. */ static void ath_stoprecv(struct ath_softc *sc) { #define PA2DESC(_sc, _pa) \ ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) struct ath_hal *ah = sc->sc_ah; ath_hal_stoppcurecv(ah); /* disable PCU */ ath_hal_setrxfilter(ah, 0); /* clear recv filter */ ath_hal_stopdmarecv(ah); /* disable DMA engine */ DELAY(3000); /* 3ms is long enough for 1 frame */ #ifdef AR_DEBUG if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) { struct ath_buf *bf; printf("%s: rx queue %p, link %p\n", __func__, (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink); STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { struct ath_desc *ds = bf->bf_desc; HAL_STATUS status = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr, PA2DESC(sc, ds->ds_link)); if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL)) ath_printrxbuf(bf, status == HAL_OK); } } #endif sc->sc_rxlink = NULL; /* just in case */ #undef PA2DESC } /* * Enable the receive h/w following a reset. */ static int ath_startrecv(struct ath_softc *sc) { struct ath_hal *ah = sc->sc_ah; struct ath_buf *bf; sc->sc_rxlink = NULL; STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { int error = ath_rxbuf_init(sc, bf); if (error != 0) { DPRINTF(sc, ATH_DEBUG_RECV, "%s: ath_rxbuf_init failed %d\n", __func__, error); return error; } } bf = STAILQ_FIRST(&sc->sc_rxbuf); ath_hal_putrxbuf(ah, bf->bf_daddr); ath_hal_rxena(ah); /* enable recv descriptors */ ath_mode_init(sc); /* set filters, etc. */ ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ return 0; } /* * Update internal state after a channel change. */ static void ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) { struct ieee80211com *ic = &sc->sc_ic; enum ieee80211_phymode mode; u_int16_t flags; /* * Change channels and update the h/w rate map * if we're switching; e.g. 11a to 11b/g. */ mode = ieee80211_chan2mode(ic, chan); if (mode != sc->sc_curmode) ath_setcurmode(sc, mode); /* * Update BPF state. NB: ethereal et. al. don't handle * merged flags well so pick a unique mode for their use. */ if (IEEE80211_IS_CHAN_A(chan)) flags = IEEE80211_CHAN_A; /* XXX 11g schizophrenia */ else if (IEEE80211_IS_CHAN_G(chan) || IEEE80211_IS_CHAN_PUREG(chan)) flags = IEEE80211_CHAN_G; else flags = IEEE80211_CHAN_B; if (IEEE80211_IS_CHAN_T(chan)) flags |= IEEE80211_CHAN_TURBO; sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq); sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags = htole16(flags); } /* * Set/change channels. If the channel is really being changed, * it's done by reseting the chip. To accomplish this we must * first cleanup any pending DMA, then restart stuff after a la * ath_init. */ static int ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) { struct ath_hal *ah = sc->sc_ah; struct ieee80211com *ic = &sc->sc_ic; HAL_CHANNEL hchan; /* * Convert to a HAL channel description with * the flags constrained to reflect the current * operating mode. */ hchan.channel = chan->ic_freq; hchan.channelFlags = ath_chan2flags(ic, chan); DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz) -> %u (%u MHz)\n", __func__, ath_hal_mhz2ieee(sc->sc_curchan.channel, sc->sc_curchan.channelFlags), sc->sc_curchan.channel, ath_hal_mhz2ieee(hchan.channel, hchan.channelFlags), hchan.channel); if (hchan.channel != sc->sc_curchan.channel || hchan.channelFlags != sc->sc_curchan.channelFlags) { HAL_STATUS status; /* * To switch channels clear any pending DMA operations; * wait long enough for the RX fifo to drain, reset the * hardware at the new frequency, and then re-enable * the relevant bits of the h/w. */ ath_hal_intrset(ah, 0); /* disable interrupts */ ath_draintxq(sc); /* clear pending tx frames */ ath_stoprecv(sc); /* turn off frame recv */ if (!ath_hal_reset(ah, ic->ic_opmode, &hchan, AH_TRUE, &status)) { if_printf(ic->ic_ifp, "ath_chan_set: unable to reset " "channel %u (%u Mhz)\n", ieee80211_chan2ieee(ic, chan), chan->ic_freq); return EIO; } sc->sc_curchan = hchan; ath_update_txpow(sc); /* update tx power state */ sc->sc_diversity = ath_hal_getdiversity(ah); /* * Re-enable rx framework. */ if (ath_startrecv(sc) != 0) { if_printf(ic->ic_ifp, "ath_chan_set: unable to restart recv logic\n"); return EIO; } /* * Change channels and update the h/w rate map * if we're switching; e.g. 11a to 11b/g. */ ic->ic_ibss_chan = chan; ath_chan_change(sc, chan); /* * Re-enable interrupts. */ ath_hal_intrset(ah, sc->sc_imask); } return 0; } static void ath_next_scan(void *arg) { struct ath_softc *sc = arg; struct ieee80211com *ic = &sc->sc_ic; if (ic->ic_state == IEEE80211_S_SCAN) ieee80211_next_scan(ic); } /* * Periodically recalibrate the PHY to account * for temperature/environment changes. */ static void ath_calibrate(void *arg) { struct ath_softc *sc = arg; struct ath_hal *ah = sc->sc_ah; sc->sc_stats.ast_per_cal++; DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: channel %u/%x\n", __func__, sc->sc_curchan.channel, sc->sc_curchan.channelFlags); if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { /* * Rfgain is out of bounds, reset the chip * to load new gain values. */ sc->sc_stats.ast_per_rfgain++; ath_reset(sc->sc_ifp); } if (!ath_hal_calibrate(ah, &sc->sc_curchan)) { DPRINTF(sc, ATH_DEBUG_ANY, "%s: calibration of channel %u failed\n", __func__, sc->sc_curchan.channel); sc->sc_stats.ast_per_calfail++; } callout_reset(&sc->sc_cal_ch, ath_calinterval * hz, ath_calibrate, sc); } static int ath_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) { struct ifnet *ifp = ic->ic_ifp; struct ath_softc *sc = ifp->if_softc; struct ath_hal *ah = sc->sc_ah; struct ieee80211_node *ni; int i, error; const u_int8_t *bssid; u_int32_t rfilt; static const HAL_LED_STATE leds[] = { HAL_LED_INIT, /* IEEE80211_S_INIT */ HAL_LED_SCAN, /* IEEE80211_S_SCAN */ HAL_LED_AUTH, /* IEEE80211_S_AUTH */ HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ HAL_LED_RUN, /* IEEE80211_S_RUN */ }; DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[ic->ic_state], ieee80211_state_name[nstate]); callout_stop(&sc->sc_scan_ch); callout_stop(&sc->sc_cal_ch); ath_hal_setledstate(ah, leds[nstate]); /* set LED */ if (nstate == IEEE80211_S_INIT) { sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); /* * NB: disable interrupts so we don't rx frames. */ ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); /* * Notify the rate control algorithm. */ ath_rate_newstate(sc, nstate); goto done; } ni = ic->ic_bss; error = ath_chan_set(sc, ni->ni_chan); if (error != 0) goto bad; rfilt = ath_calcrxfilter(sc, nstate); if (nstate == IEEE80211_S_SCAN) bssid = ifp->if_broadcastaddr; else bssid = ni->ni_bssid; ath_hal_setrxfilter(ah, rfilt); DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s\n", __func__, rfilt, ether_sprintf(bssid)); if (nstate == IEEE80211_S_RUN && ic->ic_opmode == IEEE80211_M_STA) ath_hal_setassocid(ah, bssid, ni->ni_associd); else ath_hal_setassocid(ah, bssid, 0); if (ic->ic_flags & IEEE80211_F_PRIVACY) { for (i = 0; i < IEEE80211_WEP_NKID; i++) if (ath_hal_keyisvalid(ah, i)) ath_hal_keysetmac(ah, i, bssid); } /* * Notify the rate control algorithm so rates * are setup should ath_beacon_alloc be called. */ ath_rate_newstate(sc, nstate); if (ic->ic_opmode == IEEE80211_M_MONITOR) { /* nothing to do */; } else if (nstate == IEEE80211_S_RUN) { DPRINTF(sc, ATH_DEBUG_STATE, "%s(RUN): ic_flags=0x%08x iv=%d bssid=%s " "capinfo=0x%04x chan=%d\n" , __func__ , ic->ic_flags , ni->ni_intval , ether_sprintf(ni->ni_bssid) , ni->ni_capinfo , ieee80211_chan2ieee(ic, ni->ni_chan)); switch (ic->ic_opmode) { case IEEE80211_M_HOSTAP: case IEEE80211_M_IBSS: /* * Allocate and setup the beacon frame. * * Stop any previous beacon DMA. This may be * necessary, for example, when an ibss merge * causes reconfiguration; there will be a state * transition from RUN->RUN that means we may * be called with beacon transmission active. */ ath_hal_stoptxdma(ah, sc->sc_bhalq); ath_beacon_free(sc); error = ath_beacon_alloc(sc, ni); if (error != 0) goto bad; break; case IEEE80211_M_STA: /* * Allocate a key cache slot to the station. */ if ((ic->ic_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) ath_setup_stationkey(ni); break; default: break; } /* * Configure the beacon and sleep timers. */ ath_beacon_config(sc); } else { ath_hal_intrset(ah, sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); } done: /* * Invoke the parent method to complete the work. */ error = sc->sc_newstate(ic, nstate, arg); /* * Finally, start any timers. */ if (nstate == IEEE80211_S_RUN) { /* start periodic recalibration timer */ callout_reset(&sc->sc_cal_ch, ath_calinterval * hz, ath_calibrate, sc); } else if (nstate == IEEE80211_S_SCAN) { /* start ap/neighbor scan timer */ callout_reset(&sc->sc_scan_ch, (ath_dwelltime * hz) / 1000, ath_next_scan, sc); } bad: return error; } /* * Allocate a key cache slot to the station so we can * setup a mapping from key index to node. The key cache * slot is needed for managing antenna state and for * compression when stations do not use crypto. We do * it uniliaterally here; if crypto is employed this slot * will be reassigned. */ static void ath_setup_stationkey(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct ath_softc *sc = ic->ic_ifp->if_softc; u_int16_t keyix; keyix = ath_key_alloc(ic, &ni->ni_ucastkey); if (keyix == IEEE80211_KEYIX_NONE) { /* * Key cache is full; we'll fall back to doing * the more expensive lookup in software. Note * this also means no h/w compression. */ /* XXX msg+statistic */ } else { ni->ni_ucastkey.wk_keyix = keyix; /* NB: this will create a pass-thru key entry */ ath_keyset(sc, &ni->ni_ucastkey, ni->ni_macaddr, ic->ic_bss); } } /* * Setup driver-specific state for a newly associated node. * Note that we're called also on a re-associate, the isnew * param tells us if this is the first time or not. */ static void ath_newassoc(struct ieee80211_node *ni, int isnew) { struct ieee80211com *ic = ni->ni_ic; struct ath_softc *sc = ic->ic_ifp->if_softc; ath_rate_newassoc(sc, ATH_NODE(ni), isnew); if (isnew && (ic->ic_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey) { KASSERT(ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE, ("new assoc with a unicast key already setup (keyix %u)", ni->ni_ucastkey.wk_keyix)); ath_setup_stationkey(ni); } } static int ath_getchannels(struct ath_softc *sc, u_int cc, HAL_BOOL outdoor, HAL_BOOL xchanmode) { struct ieee80211com *ic = &sc->sc_ic; struct ifnet *ifp = sc->sc_ifp; struct ath_hal *ah = sc->sc_ah; HAL_CHANNEL *chans; int i, ix, nchan; chans = malloc(IEEE80211_CHAN_MAX * sizeof(HAL_CHANNEL), M_TEMP, M_NOWAIT); if (chans == NULL) { if_printf(ifp, "unable to allocate channel table\n"); return ENOMEM; } if (!ath_hal_init_channels(ah, chans, IEEE80211_CHAN_MAX, &nchan, cc, HAL_MODE_ALL, outdoor, xchanmode)) { u_int32_t rd; ath_hal_getregdomain(ah, &rd); if_printf(ifp, "unable to collect channel list from hal; " "regdomain likely %u country code %u\n", rd, cc); free(chans, M_TEMP); return EINVAL; } /* * Convert HAL channels to ieee80211 ones and insert * them in the table according to their channel number. */ for (i = 0; i < nchan; i++) { HAL_CHANNEL *c = &chans[i]; ix = ath_hal_mhz2ieee(c->channel, c->channelFlags); if (ix > IEEE80211_CHAN_MAX) { if_printf(ifp, "bad hal channel %u (%u/%x) ignored\n", ix, c->channel, c->channelFlags); continue; } /* NB: flags are known to be compatible */ if (ic->ic_channels[ix].ic_freq == 0) { ic->ic_channels[ix].ic_freq = c->channel; ic->ic_channels[ix].ic_flags = c->channelFlags; } else { /* channels overlap; e.g. 11g and 11b */ ic->ic_channels[ix].ic_flags |= c->channelFlags; } } free(chans, M_TEMP); return 0; } static void ath_led_done(void *arg) { struct ath_softc *sc = arg; sc->sc_blinking = 0; } /* * Turn the LED off: flip the pin and then set a timer so no * update will happen for the specified duration. */ static void ath_led_off(void *arg) { struct ath_softc *sc = arg; ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon); callout_reset(&sc->sc_ledtimer, sc->sc_ledoff, ath_led_done, sc); } /* * Blink the LED according to the specified on/off times. */ static void ath_led_blink(struct ath_softc *sc, int on, int off) { DPRINTF(sc, ATH_DEBUG_LED, "%s: on %u off %u\n", __func__, on, off); ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, sc->sc_ledon); sc->sc_blinking = 1; sc->sc_ledoff = off; callout_reset(&sc->sc_ledtimer, on, ath_led_off, sc); } static void ath_led_event(struct ath_softc *sc, int event) { sc->sc_ledevent = ticks; /* time of last event */ if (sc->sc_blinking) /* don't interrupt active blink */ return; switch (event) { case ATH_LED_POLL: ath_led_blink(sc, sc->sc_hwmap[0].ledon, sc->sc_hwmap[0].ledoff); break; case ATH_LED_TX: ath_led_blink(sc, sc->sc_hwmap[sc->sc_txrate].ledon, sc->sc_hwmap[sc->sc_txrate].ledoff); break; case ATH_LED_RX: ath_led_blink(sc, sc->sc_hwmap[sc->sc_rxrate].ledon, sc->sc_hwmap[sc->sc_rxrate].ledoff); break; } } static void ath_update_txpow(struct ath_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ath_hal *ah = sc->sc_ah; u_int32_t txpow; if (sc->sc_curtxpow != ic->ic_txpowlimit) { ath_hal_settxpowlimit(ah, ic->ic_txpowlimit); /* read back in case value is clamped */ ath_hal_gettxpowlimit(ah, &txpow); ic->ic_txpowlimit = sc->sc_curtxpow = txpow; } /* * Fetch max tx power level for status requests. */ ath_hal_getmaxtxpow(sc->sc_ah, &txpow); ic->ic_bss->ni_txpower = txpow; } static int ath_rate_setup(struct ath_softc *sc, u_int mode) { struct ath_hal *ah = sc->sc_ah; struct ieee80211com *ic = &sc->sc_ic; const HAL_RATE_TABLE *rt; struct ieee80211_rateset *rs; int i, maxrates; switch (mode) { case IEEE80211_MODE_11A: sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_11A); break; case IEEE80211_MODE_11B: sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_11B); break; case IEEE80211_MODE_11G: sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_11G); break; case IEEE80211_MODE_TURBO_A: sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_TURBO); break; case IEEE80211_MODE_TURBO_G: sc->sc_rates[mode] = ath_hal_getratetable(ah, HAL_MODE_108G); break; default: DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", __func__, mode); return 0; } rt = sc->sc_rates[mode]; if (rt == NULL) return 0; if (rt->rateCount > IEEE80211_RATE_MAXSIZE) { DPRINTF(sc, ATH_DEBUG_ANY, "%s: rate table too small (%u > %u)\n", __func__, rt->rateCount, IEEE80211_RATE_MAXSIZE); maxrates = IEEE80211_RATE_MAXSIZE; } else maxrates = rt->rateCount; rs = &ic->ic_sup_rates[mode]; for (i = 0; i < maxrates; i++) rs->rs_rates[i] = rt->info[i].dot11Rate; rs->rs_nrates = maxrates; return 1; } static void ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) { #define N(a) (sizeof(a)/sizeof(a[0])) /* NB: on/off times from the Atheros NDIS driver, w/ permission */ static const struct { u_int rate; /* tx/rx 802.11 rate */ u_int16_t timeOn; /* LED on time (ms) */ u_int16_t timeOff; /* LED off time (ms) */ } blinkrates[] = { { 108, 40, 10 }, { 96, 44, 11 }, { 72, 50, 13 }, { 48, 57, 14 }, { 36, 67, 16 }, { 24, 80, 20 }, { 22, 100, 25 }, { 18, 133, 34 }, { 12, 160, 40 }, { 10, 200, 50 }, { 6, 240, 58 }, { 4, 267, 66 }, { 2, 400, 100 }, { 0, 500, 130 }, }; const HAL_RATE_TABLE *rt; int i, j; memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); rt = sc->sc_rates[mode]; KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); for (i = 0; i < rt->rateCount; i++) sc->sc_rixmap[rt->info[i].dot11Rate & IEEE80211_RATE_VAL] = i; memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); for (i = 0; i < 32; i++) { u_int8_t ix = rt->rateCodeToIndex[i]; if (ix == 0xff) { sc->sc_hwmap[i].ledon = (500 * hz) / 1000; sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; continue; } sc->sc_hwmap[i].ieeerate = rt->info[ix].dot11Rate & IEEE80211_RATE_VAL; sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; if (rt->info[ix].shortPreamble || rt->info[ix].phy == IEEE80211_T_OFDM) sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; /* NB: receive frames include FCS */ sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags | IEEE80211_RADIOTAP_F_FCS; /* setup blink rate table to avoid per-packet lookup */ for (j = 0; j < N(blinkrates)-1; j++) if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) break; /* NB: this uses the last entry if the rate isn't found */ /* XXX beware of overlow */ sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; } sc->sc_currates = rt; sc->sc_curmode = mode; /* * All protection frames are transmited at 2Mb/s for * 11g, otherwise at 1Mb/s. * XXX select protection rate index from rate table. */ sc->sc_protrix = (mode == IEEE80211_MODE_11G ? 1 : 0); /* NB: caller is responsible for reseting rate control state */ #undef N } #ifdef AR_DEBUG static void ath_printrxbuf(struct ath_buf *bf, int done) { struct ath_desc *ds; int i; for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { printf("R%d (%p %p) %08x %08x %08x %08x %08x %08x %c\n", i, ds, (struct ath_desc *)bf->bf_daddr + i, ds->ds_link, ds->ds_data, ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1], !done ? ' ' : (ds->ds_rxstat.rs_status == 0) ? '*' : '!'); } } static void ath_printtxbuf(struct ath_buf *bf, int done) { struct ath_desc *ds; int i; for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { printf("T%d (%p %p) %08x %08x %08x %08x %08x %08x %08x %08x %c\n", i, ds, (struct ath_desc *)bf->bf_daddr + i, ds->ds_link, ds->ds_data, ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3], !done ? ' ' : (ds->ds_txstat.ts_status == 0) ? '*' : '!'); } } #endif /* AR_DEBUG */ static void ath_watchdog(struct ifnet *ifp) { struct ath_softc *sc = ifp->if_softc; struct ieee80211com *ic = &sc->sc_ic; ifp->if_timer = 0; if ((ifp->if_flags & IFF_RUNNING) == 0 || sc->sc_invalid) return; if (sc->sc_tx_timer) { if (--sc->sc_tx_timer == 0) { if_printf(ifp, "device timeout\n"); ath_reset(ifp); ifp->if_oerrors++; sc->sc_stats.ast_watchdog++; } else ifp->if_timer = 1; } ieee80211_watchdog(ic); } /* * Diagnostic interface to the HAL. This is used by various * tools to do things like retrieve register contents for * debugging. The mechanism is intentionally opaque so that * it can change frequently w/o concern for compatiblity. */ static int ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) { struct ath_hal *ah = sc->sc_ah; u_int id = ad->ad_id & ATH_DIAG_ID; void *indata = NULL; void *outdata = NULL; u_int32_t insize = ad->ad_in_size; u_int32_t outsize = ad->ad_out_size; int error = 0; if (ad->ad_id & ATH_DIAG_IN) { /* * Copy in data. */ indata = malloc(insize, M_TEMP, M_NOWAIT); if (indata == NULL) { error = ENOMEM; goto bad; } error = copyin(ad->ad_in_data, indata, insize); if (error) goto bad; } if (ad->ad_id & ATH_DIAG_DYN) { /* * Allocate a buffer for the results (otherwise the HAL * returns a pointer to a buffer where we can read the * results). Note that we depend on the HAL leaving this * pointer for us to use below in reclaiming the buffer; * may want to be more defensive. */ outdata = malloc(outsize, M_TEMP, M_NOWAIT); if (outdata == NULL) { error = ENOMEM; goto bad; } } if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { if (outsize < ad->ad_out_size) ad->ad_out_size = outsize; if (outdata != NULL) error = copyout(outdata, ad->ad_out_data, ad->ad_out_size); } else { error = EINVAL; } bad: if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) free(indata, M_TEMP); if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) free(outdata, M_TEMP); return error; } static int ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { #define IS_RUNNING(ifp) \ ((ifp->if_flags & (IFF_RUNNING|IFF_UP)) == (IFF_RUNNING|IFF_UP)) struct ath_softc *sc = ifp->if_softc; struct ieee80211com *ic = &sc->sc_ic; struct ifreq *ifr = (struct ifreq *)data; int error = 0; ATH_LOCK(sc); switch (cmd) { case SIOCSIFFLAGS: if (IS_RUNNING(ifp)) { /* * To avoid rescanning another access point, * do not call ath_init() here. Instead, * only reflect promisc mode settings. */ ath_mode_init(sc); } else if (ifp->if_flags & IFF_UP) { /* * Beware of being called during attach/detach * to reset promiscuous mode. In that case we * will still be marked UP but not RUNNING. * However trying to re-init the interface * is the wrong thing to do as we've already * torn down much of our state. There's * probably a better way to deal with this. */ if (!sc->sc_invalid && ic->ic_bss != NULL) ath_init(sc); /* XXX lose error */ } else ath_stop_locked(ifp); break; case SIOCADDMULTI: case SIOCDELMULTI: /* * The upper layer has already installed/removed * the multicast address(es), just recalculate the * multicast filter for the card. */ if (ifp->if_flags & IFF_RUNNING) ath_mode_init(sc); break; case SIOCGATHSTATS: /* NB: embed these numbers to get a consistent view */ sc->sc_stats.ast_tx_packets = ifp->if_opackets; sc->sc_stats.ast_rx_packets = ifp->if_ipackets; sc->sc_stats.ast_rx_rssi = ieee80211_getrssi(ic); ATH_UNLOCK(sc); /* * NB: Drop the softc lock in case of a page fault; * we'll accept any potential inconsisentcy in the * statistics. The alternative is to copy the data * to a local structure. */ return copyout(&sc->sc_stats, ifr->ifr_data, sizeof (sc->sc_stats)); case SIOCGATHDIAG: error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); break; default: error = ieee80211_ioctl(ic, cmd, data); if (error == ENETRESET) { if (IS_RUNNING(ifp) && ic->ic_roaming != IEEE80211_ROAMING_MANUAL) ath_init(sc); /* XXX lose error */ error = 0; } if (error == ERESTART) error = IS_RUNNING(ifp) ? ath_reset(ifp) : 0; break; } ATH_UNLOCK(sc); return error; #undef IS_RUNNING } static int ath_sysctl_slottime(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int slottime = ath_hal_getslottime(sc->sc_ah); int error; error = sysctl_handle_int(oidp, &slottime, 0, req); if (error || !req->newptr) return error; return !ath_hal_setslottime(sc->sc_ah, slottime) ? EINVAL : 0; } static int ath_sysctl_acktimeout(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int acktimeout = ath_hal_getacktimeout(sc->sc_ah); int error; error = sysctl_handle_int(oidp, &acktimeout, 0, req); if (error || !req->newptr) return error; return !ath_hal_setacktimeout(sc->sc_ah, acktimeout) ? EINVAL : 0; } static int ath_sysctl_ctstimeout(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int ctstimeout = ath_hal_getctstimeout(sc->sc_ah); int error; error = sysctl_handle_int(oidp, &ctstimeout, 0, req); if (error || !req->newptr) return error; return !ath_hal_setctstimeout(sc->sc_ah, ctstimeout) ? EINVAL : 0; } static int ath_sysctl_softled(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; int softled = sc->sc_softled; int error; error = sysctl_handle_int(oidp, &softled, 0, req); if (error || !req->newptr) return error; softled = (softled != 0); if (softled != sc->sc_softled) { if (softled) { /* NB: handle any sc_ledpin change */ ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin); ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon); } sc->sc_softled = softled; } return 0; } static int ath_sysctl_rxantenna(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int defantenna = ath_hal_getdefantenna(sc->sc_ah); int error; error = sysctl_handle_int(oidp, &defantenna, 0, req); if (!error && req->newptr) ath_hal_setdefantenna(sc->sc_ah, defantenna); return error; } static int ath_sysctl_diversity(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int diversity = ath_hal_getdiversity(sc->sc_ah); int error; error = sysctl_handle_int(oidp, &diversity, 0, req); if (error || !req->newptr) return error; if (!ath_hal_setdiversity(sc->sc_ah, diversity)) return EINVAL; sc->sc_diversity = diversity; return 0; } static int ath_sysctl_diag(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int32_t diag; int error; if (!ath_hal_getdiag(sc->sc_ah, &diag)) return EINVAL; error = sysctl_handle_int(oidp, &diag, 0, req); if (error || !req->newptr) return error; return !ath_hal_setdiag(sc->sc_ah, diag) ? EINVAL : 0; } static int ath_sysctl_tpscale(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; struct ifnet *ifp = sc->sc_ifp; u_int32_t scale; int error; ath_hal_gettpscale(sc->sc_ah, &scale); error = sysctl_handle_int(oidp, &scale, 0, req); if (error || !req->newptr) return error; return !ath_hal_settpscale(sc->sc_ah, scale) ? EINVAL : ath_reset(ifp); } static int ath_sysctl_tpc(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int tpc = ath_hal_gettpc(sc->sc_ah); int error; error = sysctl_handle_int(oidp, &tpc, 0, req); if (error || !req->newptr) return error; return !ath_hal_settpc(sc->sc_ah, tpc) ? EINVAL : 0; } static void ath_sysctlattach(struct ath_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); struct ath_hal *ah = sc->sc_ah; ath_hal_getcountrycode(sc->sc_ah, &sc->sc_countrycode); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "countrycode", CTLFLAG_RD, &sc->sc_countrycode, 0, "EEPROM country code"); ath_hal_getregdomain(sc->sc_ah, &sc->sc_regdomain); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "regdomain", CTLFLAG_RD, &sc->sc_regdomain, 0, "EEPROM regdomain code"); sc->sc_debug = ath_debug; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "slottime", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_slottime, "I", "802.11 slot time (us)"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "acktimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_acktimeout, "I", "802.11 ACK timeout (us)"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "ctstimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_ctstimeout, "I", "802.11 CTS timeout (us)"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "softled", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_softled, "I", "enable/disable software LED support"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "ledpin", CTLFLAG_RW, &sc->sc_ledpin, 0, "GPIO pin connected to LED"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "ledon", CTLFLAG_RW, &sc->sc_ledon, 0, "setting to turn LED on"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "ledidle", CTLFLAG_RW, &sc->sc_ledidle, 0, "idle time for inactivity LED (ticks)"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "txantenna", CTLFLAG_RW, &sc->sc_txantenna, 0, "tx antenna (0=auto)"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "rxantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_rxantenna, "I", "default/rx antenna"); if (ath_hal_hasdiversity(ah)) SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "diversity", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_diversity, "I", "antenna diversity"); sc->sc_txintrperiod = ATH_TXINTR_PERIOD; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "txintrperiod", CTLFLAG_RW, &sc->sc_txintrperiod, 0, "tx descriptor batching"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "diag", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_diag, "I", "h/w diagnostic control"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tpscale", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_tpscale, "I", "tx power scaling"); if (ath_hal_hastpc(ah)) SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tpc", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_tpc, "I", "enable/disable per-packet TPC"); } static void ath_bpfattach(struct ath_softc *sc) { struct ifnet *ifp = sc->sc_ifp; bpfattach2(ifp, DLT_IEEE802_11_RADIO, sizeof(struct ieee80211_frame) + sizeof(sc->sc_tx_th), &sc->sc_drvbpf); /* * Initialize constant fields. * XXX make header lengths a multiple of 32-bits so subsequent * headers are properly aligned; this is a kludge to keep * certain applications happy. * * NB: the channel is setup each time we transition to the * RUN state to avoid filling it in for each frame. */ sc->sc_tx_th_len = roundup(sizeof(sc->sc_tx_th), sizeof(u_int32_t)); sc->sc_tx_th.wt_ihdr.it_len = htole16(sc->sc_tx_th_len); sc->sc_tx_th.wt_ihdr.it_present = htole32(ATH_TX_RADIOTAP_PRESENT); sc->sc_rx_th_len = roundup(sizeof(sc->sc_rx_th), sizeof(u_int32_t)); sc->sc_rx_th.wr_ihdr.it_len = htole16(sc->sc_rx_th_len); sc->sc_rx_th.wr_ihdr.it_present = htole32(ATH_RX_RADIOTAP_PRESENT); } /* * Announce various information on device/driver attach. */ static void ath_announce(struct ath_softc *sc) { #define HAL_MODE_DUALBAND (HAL_MODE_11A|HAL_MODE_11B) struct ifnet *ifp = sc->sc_ifp; struct ath_hal *ah = sc->sc_ah; u_int modes, cc; if_printf(ifp, "mac %d.%d phy %d.%d", ah->ah_macVersion, ah->ah_macRev, ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); /* * Print radio revision(s). We check the wireless modes * to avoid falsely printing revs for inoperable parts. * Dual-band radio revs are returned in the 5Ghz rev number. */ ath_hal_getcountrycode(ah, &cc); modes = ath_hal_getwirelessmodes(ah, cc); if ((modes & HAL_MODE_DUALBAND) == HAL_MODE_DUALBAND) { if (ah->ah_analog5GhzRev && ah->ah_analog2GhzRev) printf(" 5ghz radio %d.%d 2ghz radio %d.%d", ah->ah_analog5GhzRev >> 4, ah->ah_analog5GhzRev & 0xf, ah->ah_analog2GhzRev >> 4, ah->ah_analog2GhzRev & 0xf); else printf(" radio %d.%d", ah->ah_analog5GhzRev >> 4, ah->ah_analog5GhzRev & 0xf); } else printf(" radio %d.%d", ah->ah_analog5GhzRev >> 4, ah->ah_analog5GhzRev & 0xf); printf("\n"); if (bootverbose) { int i; for (i = 0; i <= WME_AC_VO; i++) { struct ath_txq *txq = sc->sc_ac2q[i]; if_printf(ifp, "Use hw queue %u for %s traffic\n", txq->axq_qnum, ieee80211_wme_acnames[i]); } if_printf(ifp, "Use hw queue %u for CAB traffic\n", sc->sc_cabq->axq_qnum); if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); } #undef HAL_MODE_DUALBAND } Index: stable/6/sys/dev/awi/awi.c =================================================================== --- stable/6/sys/dev/awi/awi.c (revision 149421) +++ stable/6/sys/dev/awi/awi.c (revision 149422) @@ -1,2204 +1,2208 @@ /* $NetBSD: awi.c,v 1.62 2004/01/16 14:13:15 onoe Exp $ */ /*- * Copyright (c) 1999,2000,2001 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Bill Sommerfeld * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the NetBSD * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Driver for AMD 802.11 firmware. * Uses am79c930 chip driver to talk to firmware running on the am79c930. * * More-or-less a generic ethernet-like if driver, with 802.11 gorp added. */ /* * todo: * - flush tx queue on resynch. * - clear oactive on "down". * - rewrite copy-into-mbuf code * - mgmt state machine gets stuck retransmitting assoc requests. * - multicast filter. * - fix device reset so it's more likely to work * - show status goo through ifmedia. * * more todo: * - deal with more 802.11 frames. * - send reassoc request * - deal with reassoc response * - send/deal with disassociation * - deal with "full" access points (no room for me). * - power save mode * * later: * - SSID preferences * - need ioctls for poking at the MIBs * - implement ad-hoc mode (including bss creation). * - decide when to do "ad hoc" vs. infrastructure mode (IFF_LINK flags?) * (focus on inf. mode since that will be needed for ietf) * - deal with DH vs. FH versions of the card * - deal with faster cards (2mb/s) * - ?WEP goo (mmm, rc4) (it looks not particularly useful). * - ifmedia revision. * - common 802.11 mibish things. * - common 802.11 media layer. */ /* * Driver for AMD 802.11 PCnetMobile firmware. * Uses am79c930 chip driver to talk to firmware running on the am79c930. * * The initial version of the driver was written by * Bill Sommerfeld . * Then the driver module completely rewritten to support cards with DS phy * and to support adhoc mode by Atsushi Onoe */ #include #ifdef __NetBSD__ __KERNEL_RCSID(0, "$NetBSD: awi.c,v 1.62 2004/01/16 14:13:15 onoe Exp $"); #endif #ifdef __FreeBSD__ __FBSDID("$FreeBSD$"); #endif #include "opt_inet.h" #ifdef __NetBSD__ #include "bpfilter.h" #endif #ifdef __FreeBSD__ #define NBPFILTER 1 #endif #include #include #include #include #include #include #include #include #include #include #ifdef __FreeBSD__ #include #endif #ifdef __NetBSD__ #include #endif #include #include #ifdef __NetBSD__ #include #endif #ifdef __FreeBSD__ #include #include #endif #include #include #include #ifdef __NetBSD__ #include #endif #if NBPFILTER > 0 #include #endif #include #include #ifdef __NetBSD__ #include #include #include #include #endif #ifdef __FreeBSD__ #include #include #include #include #endif #ifdef __FreeBSD__ static void awi_init0(void *); #endif static int awi_init(struct ifnet *); static void awi_stop(struct ifnet *, int); static void awi_start(struct ifnet *); static void awi_watchdog(struct ifnet *); static int awi_ioctl(struct ifnet *, u_long, caddr_t); static int awi_media_change(struct ifnet *); static void awi_media_status(struct ifnet *, struct ifmediareq *); static int awi_mode_init(struct awi_softc *); static void awi_rx_int(struct awi_softc *); static void awi_tx_int(struct awi_softc *); static struct mbuf *awi_devget(struct awi_softc *, u_int32_t, u_int16_t); static int awi_hw_init(struct awi_softc *); static int awi_init_mibs(struct awi_softc *); static int awi_mib(struct awi_softc *, u_int8_t, u_int8_t, int); static int awi_cmd(struct awi_softc *, u_int8_t, int); static int awi_cmd_wait(struct awi_softc *); static void awi_cmd_done(struct awi_softc *); static int awi_next_txd(struct awi_softc *, int, u_int32_t *, u_int32_t *); static int awi_lock(struct awi_softc *); static void awi_unlock(struct awi_softc *); static int awi_intr_lock(struct awi_softc *); static void awi_intr_unlock(struct awi_softc *); static int awi_newstate(struct ieee80211com *, enum ieee80211_state, int); static void awi_recv_mgmt(struct ieee80211com *, struct mbuf *, struct ieee80211_node *, int, int, u_int32_t); static int awi_send_mgmt(struct ieee80211com *, struct ieee80211_node *, int, int); static struct mbuf *awi_ether_encap(struct awi_softc *, struct mbuf *); static struct mbuf *awi_ether_modcap(struct awi_softc *, struct mbuf *); /* unaligned little endian access */ #define LE_READ_2(p) \ ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8)) #define LE_READ_4(p) \ ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8) | \ (((u_int8_t *)(p))[2] << 16) | (((u_int8_t *)(p))[3] << 24)) #define LE_WRITE_2(p, v) \ ((((u_int8_t *)(p))[0] = (((u_int32_t)(v) ) & 0xff)), \ (((u_int8_t *)(p))[1] = (((u_int32_t)(v) >> 8) & 0xff))) #define LE_WRITE_4(p, v) \ ((((u_int8_t *)(p))[0] = (((u_int32_t)(v) ) & 0xff)), \ (((u_int8_t *)(p))[1] = (((u_int32_t)(v) >> 8) & 0xff)), \ (((u_int8_t *)(p))[2] = (((u_int32_t)(v) >> 16) & 0xff)), \ (((u_int8_t *)(p))[3] = (((u_int32_t)(v) >> 24) & 0xff))) struct awi_chanset awi_chanset[] = { /* PHY type domain min max def */ { AWI_PHY_TYPE_FH, AWI_REG_DOMAIN_JP, 6, 17, 6 }, { AWI_PHY_TYPE_FH, AWI_REG_DOMAIN_ES, 0, 26, 1 }, { AWI_PHY_TYPE_FH, AWI_REG_DOMAIN_FR, 0, 32, 1 }, { AWI_PHY_TYPE_FH, AWI_REG_DOMAIN_US, 0, 77, 1 }, { AWI_PHY_TYPE_FH, AWI_REG_DOMAIN_CA, 0, 77, 1 }, { AWI_PHY_TYPE_FH, AWI_REG_DOMAIN_EU, 0, 77, 1 }, { AWI_PHY_TYPE_DS, AWI_REG_DOMAIN_JP, 14, 14, 14 }, { AWI_PHY_TYPE_DS, AWI_REG_DOMAIN_ES, 10, 11, 10 }, { AWI_PHY_TYPE_DS, AWI_REG_DOMAIN_FR, 10, 13, 10 }, { AWI_PHY_TYPE_DS, AWI_REG_DOMAIN_US, 1, 11, 3 }, { AWI_PHY_TYPE_DS, AWI_REG_DOMAIN_CA, 1, 11, 3 }, { AWI_PHY_TYPE_DS, AWI_REG_DOMAIN_EU, 1, 13, 3 }, { 0, 0 } }; #ifdef __FreeBSD__ devclass_t awi_devclass; #if __FreeBSD_version < 500043 static char *ether_sprintf(u_int8_t *); static char * ether_sprintf(u_int8_t *enaddr) { static char strbuf[18]; sprintf(strbuf, "%6D", enaddr, ":"); return strbuf; } #endif #if 0 /* ALTQ */ #define IFQ_PURGE(ifq) IF_DRAIN(ifq) #define IF_POLL(ifq, m) ((m) = (ifq)->ifq_head) #define IFQ_POLL(ifq, m) IF_POLL((ifq), (m)) #define IFQ_DEQUEUE(ifq, m) IF_DEQUEUE((ifq), (m)) #endif #endif #ifdef AWI_DEBUG int awi_debug = 0; #define DPRINTF(X) if (awi_debug) printf X #define DPRINTF2(X) if (awi_debug > 1) printf X #else #define DPRINTF(X) #define DPRINTF2(X) #endif int awi_attach(struct awi_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ifnet *ifp = AC2IFP(&sc->sc_arp); int s, i, error, nrate; int mword; enum ieee80211_phymode mode; s = splnet(); sc->sc_busy = 1; sc->sc_attached = 0; sc->sc_substate = AWI_ST_NONE; if ((error = awi_hw_init(sc)) != 0) { sc->sc_invalid = 1; splx(s); return error; } error = awi_init_mibs(sc); if (error != 0) { sc->sc_invalid = 1; splx(s); return error; } ifp->if_softc = sc; ifp->if_flags = #ifdef IFF_NOTRAILERS IFF_NOTRAILERS | #endif IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST | IFF_NEEDSGIANT; ifp->if_ioctl = awi_ioctl; ifp->if_start = awi_start; ifp->if_watchdog = awi_watchdog; #ifdef __NetBSD__ ifp->if_init = awi_init; ifp->if_stop = awi_stop; IFQ_SET_READY(&ifp->if_snd); memcpy(ifp->if_xname, sc->sc_dev.dv_xname, IFNAMSIZ); #endif #ifdef __FreeBSD__ ifp->if_init = awi_init0; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; if_initname(ifp, device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev)); #endif ic->ic_ifp = ifp; ic->ic_caps = IEEE80211_C_WEP | IEEE80211_C_IBSS | IEEE80211_C_HOSTAP; if (sc->sc_mib_phy.IEEE_PHY_Type == AWI_PHY_TYPE_FH) { ic->ic_phytype = IEEE80211_T_FH; mode = IEEE80211_MODE_FH; } else { ic->ic_phytype = IEEE80211_T_DS; ic->ic_caps |= IEEE80211_C_AHDEMO; mode = IEEE80211_MODE_11B; } ic->ic_opmode = IEEE80211_M_STA; nrate = sc->sc_mib_phy.aSuprt_Data_Rates[1]; memcpy(ic->ic_sup_rates[mode].rs_rates, sc->sc_mib_phy.aSuprt_Data_Rates + 2, nrate); ic->ic_sup_rates[mode].rs_nrates = nrate; IEEE80211_ADDR_COPY(ic->ic_myaddr, sc->sc_mib_addr.aMAC_Address); printf("%s: IEEE802.11 %s (firmware %s)\n", ifp->if_xname, (ic->ic_phytype == IEEE80211_T_FH) ? "FH" : "DS", sc->sc_banner); #ifdef __NetBSD__ if_attach(ifp); #endif ieee80211_ifattach(ic); sc->sc_newstate = ic->ic_newstate; ic->ic_newstate = awi_newstate; sc->sc_recv_mgmt = ic->ic_recv_mgmt; ic->ic_recv_mgmt = awi_recv_mgmt; sc->sc_send_mgmt = ic->ic_send_mgmt; ic->ic_send_mgmt = awi_send_mgmt; ieee80211_media_init(ic, awi_media_change, awi_media_status); /* Melco compatibility mode. */ #define ADD(s, o) ifmedia_add(&ic->ic_media, \ IFM_MAKEWORD(IFM_IEEE80211, (s), (o), 0), 0, NULL) ADD(IFM_AUTO, IFM_FLAG0); for (i = 0; i < nrate; i++) { mword = ieee80211_rate2media(ic, ic->ic_sup_rates[mode].rs_rates[i], mode); if (mword == 0) continue; ADD(mword, IFM_FLAG0); } #undef ADD #ifdef __NetBSD__ if ((sc->sc_sdhook = shutdownhook_establish(awi_shutdown, sc)) == NULL) printf("%s: WARNING: unable to establish shutdown hook\n", ifp->if_xname); if ((sc->sc_powerhook = powerhook_establish(awi_power, sc)) == NULL) printf("%s: WARNING: unable to establish power hook\n", ifp->if_xname); #endif sc->sc_attached = 1; splx(s); /* ready to accept ioctl */ awi_unlock(sc); ieee80211_announce(ic); return 0; } int awi_detach(struct awi_softc *sc) { struct ifnet *ifp = AC2IFP(&sc->sc_arp); int s; if (!sc->sc_attached) return 0; s = splnet(); sc->sc_invalid = 1; awi_stop(ifp, 1); while (sc->sc_sleep_cnt > 0) { wakeup(sc); (void)tsleep(sc, PWAIT, "awidet", 1); } sc->sc_attached = 0; ieee80211_ifdetach(&sc->sc_ic); #ifdef __NetBSD__ if_detach(ifp); shutdownhook_disestablish(sc->sc_sdhook); powerhook_disestablish(sc->sc_powerhook); #endif splx(s); return 0; } #ifdef __NetBSD__ int awi_activate(struct device *self, enum devact act) { struct awi_softc *sc = (struct awi_softc *)self; struct ifnet *ifp = AC2IFP(&sc->sc_arp); int s, error = 0; s = splnet(); switch (act) { case DVACT_ACTIVATE: error = EOPNOTSUPP; break; case DVACT_DEACTIVATE: sc->sc_invalid = 1; if_deactivate(ifp); break; } splx(s); return error; } void awi_power(int why, void *arg) { struct awi_softc *sc = arg; struct ifnet *ifp = AC2IFP(&sc->sc_arp); int s; int ocansleep; DPRINTF(("awi_power: %d\n", why)); s = splnet(); ocansleep = sc->sc_cansleep; sc->sc_cansleep = 0; switch (why) { case PWR_SUSPEND: case PWR_STANDBY: awi_stop(ifp, 1); break; case PWR_RESUME: if (ifp->if_flags & IFF_UP) { awi_init(ifp); (void)awi_intr(sc); /* make sure */ } break; case PWR_SOFTSUSPEND: case PWR_SOFTSTANDBY: case PWR_SOFTRESUME: break; } sc->sc_cansleep = ocansleep; splx(s); } #endif /* __NetBSD__ */ void awi_shutdown(void *arg) { struct awi_softc *sc = arg; struct ifnet *ifp = AC2IFP(&sc->sc_arp); if (sc->sc_attached) awi_stop(ifp, 1); } int awi_intr(void *arg) { struct awi_softc *sc = arg; u_int16_t status; int handled = 0, ocansleep; #ifdef AWI_DEBUG static const char *intname[] = { "CMD", "RX", "TX", "SCAN_CMPLT", "CFP_START", "DTIM", "CFP_ENDING", "GROGGY", "TXDATA", "TXBCAST", "TXPS", "TXCF", "TXMGT", "#13", "RXDATA", "RXMGT" }; #endif if (!sc->sc_enabled || !sc->sc_enab_intr || sc->sc_invalid) { DPRINTF(("awi_intr: stray interrupt: " "enabled %d enab_intr %d invalid %d\n", sc->sc_enabled, sc->sc_enab_intr, sc->sc_invalid)); return 0; } am79c930_gcr_setbits(&sc->sc_chip, AM79C930_GCR_DISPWDN | AM79C930_GCR_ECINT); awi_write_1(sc, AWI_DIS_PWRDN, 1); ocansleep = sc->sc_cansleep; sc->sc_cansleep = 0; for (;;) { if (awi_intr_lock(sc) != 0) break; status = awi_read_1(sc, AWI_INTSTAT); awi_write_1(sc, AWI_INTSTAT, 0); awi_write_1(sc, AWI_INTSTAT, 0); status |= awi_read_1(sc, AWI_INTSTAT2) << 8; awi_write_1(sc, AWI_INTSTAT2, 0); DELAY(10); awi_intr_unlock(sc); if (!sc->sc_cmd_inprog) status &= ~AWI_INT_CMD; /* make sure */ if (status == 0) break; #ifdef AWI_DEBUG if (awi_debug > 1) { int i; printf("awi_intr: status 0x%04x", status); for (i = 0; i < sizeof(intname)/sizeof(intname[0]); i++) { if (status & (1 << i)) printf(" %s", intname[i]); } printf("\n"); } #endif handled = 1; if (status & AWI_INT_RX) awi_rx_int(sc); if (status & AWI_INT_TX) awi_tx_int(sc); if (status & AWI_INT_CMD) awi_cmd_done(sc); if (status & AWI_INT_SCAN_CMPLT) { if (sc->sc_ic.ic_state == IEEE80211_S_SCAN && sc->sc_substate == AWI_ST_NONE) ieee80211_next_scan(&sc->sc_ic); } } sc->sc_cansleep = ocansleep; am79c930_gcr_clearbits(&sc->sc_chip, AM79C930_GCR_DISPWDN); awi_write_1(sc, AWI_DIS_PWRDN, 0); return handled; } #ifdef __FreeBSD__ static void awi_init0(void *arg) { struct awi_softc *sc = arg; (void)awi_init(AC2IFP(&sc->sc_arp)); } #endif static int awi_init(struct ifnet *ifp) { struct awi_softc *sc = ifp->if_softc; struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_node *ni = ic->ic_bss; struct ieee80211_rateset *rs; int error, rate, i; DPRINTF(("awi_init: enabled=%d\n", sc->sc_enabled)); if (sc->sc_enabled) { awi_stop(ifp, 0); } else { if (sc->sc_enable) (*sc->sc_enable)(sc); sc->sc_enabled = 1; if ((error = awi_hw_init(sc)) != 0) { if (sc->sc_disable) (*sc->sc_disable)(sc); sc->sc_enabled = 0; return error; } } ic->ic_state = IEEE80211_S_INIT; ic->ic_flags &= ~IEEE80211_F_IBSSON; switch (ic->ic_opmode) { case IEEE80211_M_STA: sc->sc_mib_local.Network_Mode = 1; sc->sc_mib_local.Acting_as_AP = 0; break; case IEEE80211_M_IBSS: ic->ic_flags |= IEEE80211_F_IBSSON; /* FALLTHRU */ case IEEE80211_M_AHDEMO: sc->sc_mib_local.Network_Mode = 0; sc->sc_mib_local.Acting_as_AP = 0; break; case IEEE80211_M_HOSTAP: sc->sc_mib_local.Network_Mode = 1; sc->sc_mib_local.Acting_as_AP = 1; break; case IEEE80211_M_MONITOR: return ENODEV; } #if 0 IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(ifp->if_sadl)); #endif memset(&sc->sc_mib_mac.aDesired_ESS_ID, 0, AWI_ESS_ID_SIZE); sc->sc_mib_mac.aDesired_ESS_ID[0] = IEEE80211_ELEMID_SSID; sc->sc_mib_mac.aDesired_ESS_ID[1] = ic->ic_des_esslen; memcpy(&sc->sc_mib_mac.aDesired_ESS_ID[2], ic->ic_des_essid, ic->ic_des_esslen); /* configure basic rate */ if (ic->ic_phytype == IEEE80211_T_FH) rs = &ic->ic_sup_rates[IEEE80211_MODE_FH]; else rs = &ic->ic_sup_rates[IEEE80211_MODE_11B]; if (ic->ic_fixed_rate != IEEE80211_FIXED_RATE_NONE) { rate = rs->rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL; } else { rate = 0; for (i = 0; i < rs->rs_nrates; i++) { if ((rs->rs_rates[i] & IEEE80211_RATE_BASIC) && rate < (rs->rs_rates[i] & IEEE80211_RATE_VAL)) rate = rs->rs_rates[i] & IEEE80211_RATE_VAL; } } rate *= 5; LE_WRITE_2(&sc->sc_mib_mac.aStation_Basic_Rate, rate); if ((error = awi_mode_init(sc)) != 0) { DPRINTF(("awi_init: awi_mode_init failed %d\n", error)); awi_stop(ifp, 1); return error; } /* start transmitter */ sc->sc_txdone = sc->sc_txnext = sc->sc_txbase; awi_write_4(sc, sc->sc_txbase + AWI_TXD_START, 0); awi_write_4(sc, sc->sc_txbase + AWI_TXD_NEXT, 0); awi_write_4(sc, sc->sc_txbase + AWI_TXD_LENGTH, 0); awi_write_1(sc, sc->sc_txbase + AWI_TXD_RATE, 0); awi_write_4(sc, sc->sc_txbase + AWI_TXD_NDA, 0); awi_write_4(sc, sc->sc_txbase + AWI_TXD_NRA, 0); awi_write_1(sc, sc->sc_txbase + AWI_TXD_STATE, 0); awi_write_4(sc, AWI_CA_TX_DATA, sc->sc_txbase); awi_write_4(sc, AWI_CA_TX_MGT, 0); awi_write_4(sc, AWI_CA_TX_BCAST, 0); awi_write_4(sc, AWI_CA_TX_PS, 0); awi_write_4(sc, AWI_CA_TX_CF, 0); if ((error = awi_cmd(sc, AWI_CMD_INIT_TX, AWI_WAIT)) != 0) { DPRINTF(("awi_init: failed to start transmitter: %d\n", error)); awi_stop(ifp, 1); return error; } /* start receiver */ if ((error = awi_cmd(sc, AWI_CMD_INIT_RX, AWI_WAIT)) != 0) { DPRINTF(("awi_init: failed to start receiver: %d\n", error)); awi_stop(ifp, 1); return error; } sc->sc_rxdoff = awi_read_4(sc, AWI_CA_IRX_DATA_DESC); sc->sc_rxmoff = awi_read_4(sc, AWI_CA_IRX_PS_DESC); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; ic->ic_state = IEEE80211_S_INIT; if (ic->ic_opmode == IEEE80211_M_AHDEMO || ic->ic_opmode == IEEE80211_M_HOSTAP) { ni->ni_chan = ic->ic_ibss_chan; ni->ni_intval = ic->ic_lintval; ni->ni_rssi = 0; ni->ni_rstamp = 0; memset(&ni->ni_tstamp, 0, sizeof(ni->ni_tstamp)); ni->ni_rates = ic->ic_sup_rates[ieee80211_chan2mode(ic, ni->ni_chan)]; IEEE80211_ADDR_COPY(ni->ni_macaddr, ic->ic_myaddr); if (ic->ic_opmode == IEEE80211_M_HOSTAP) { IEEE80211_ADDR_COPY(ni->ni_bssid, ic->ic_myaddr); ni->ni_esslen = ic->ic_des_esslen; memcpy(ni->ni_essid, ic->ic_des_essid, ni->ni_esslen); ni->ni_capinfo = IEEE80211_CAPINFO_ESS; if (ic->ic_phytype == IEEE80211_T_FH) { ni->ni_fhdwell = 200; /* XXX */ ni->ni_fhindex = 1; } } else { ni->ni_capinfo = IEEE80211_CAPINFO_IBSS; memset(ni->ni_bssid, 0, IEEE80211_ADDR_LEN); ni->ni_esslen = 0; } if (ic->ic_flags & IEEE80211_F_PRIVACY) ni->ni_capinfo |= IEEE80211_CAPINFO_PRIVACY; if (ic->ic_opmode != IEEE80211_M_AHDEMO) ic->ic_flags |= IEEE80211_F_SIBSS; ic->ic_state = IEEE80211_S_SCAN; /*XXX*/ sc->sc_substate = AWI_ST_NONE; ieee80211_new_state(ic, IEEE80211_S_RUN, -1); } else { /* XXX check sc->sc_cur_chan */ ni->ni_chan = &ic->ic_channels[sc->sc_cur_chan]; ieee80211_new_state(ic, IEEE80211_S_SCAN, -1); } return 0; } static void awi_stop(struct ifnet *ifp, int disable) { struct awi_softc *sc = ifp->if_softc; if (!sc->sc_enabled) return; DPRINTF(("awi_stop(%d)\n", disable)); ieee80211_new_state(&sc->sc_ic, IEEE80211_S_INIT, -1); if (!sc->sc_invalid) { if (sc->sc_cmd_inprog) (void)awi_cmd_wait(sc); (void)awi_cmd(sc, AWI_CMD_KILL_RX, AWI_WAIT); sc->sc_cmd_inprog = AWI_CMD_FLUSH_TX; awi_write_1(sc, AWI_CA_FTX_DATA, 1); awi_write_1(sc, AWI_CA_FTX_MGT, 0); awi_write_1(sc, AWI_CA_FTX_BCAST, 0); awi_write_1(sc, AWI_CA_FTX_PS, 0); awi_write_1(sc, AWI_CA_FTX_CF, 0); (void)awi_cmd(sc, AWI_CMD_FLUSH_TX, AWI_WAIT); } ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE); ifp->if_timer = 0; sc->sc_tx_timer = sc->sc_rx_timer = 0; if (sc->sc_rxpend != NULL) { m_freem(sc->sc_rxpend); sc->sc_rxpend = NULL; } IFQ_PURGE(&ifp->if_snd); if (disable) { if (!sc->sc_invalid) am79c930_gcr_setbits(&sc->sc_chip, AM79C930_GCR_CORESET); if (sc->sc_disable) (*sc->sc_disable)(sc); sc->sc_enabled = 0; } } static void awi_start(struct ifnet *ifp) { struct awi_softc *sc = ifp->if_softc; struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_node *ni; struct ieee80211_frame *wh; struct ether_header *eh; struct mbuf *m, *m0; int len, dowep; u_int32_t txd, frame, ntxd; u_int8_t rate; if (!sc->sc_enabled || sc->sc_invalid) return; for (;;) { txd = sc->sc_txnext; IF_POLL(&ic->ic_mgtq, m0); dowep = 0; ni = NULL; if (m0 != NULL) { len = m0->m_pkthdr.len; if (awi_next_txd(sc, len, &frame, &ntxd)) { ifp->if_flags |= IFF_OACTIVE; break; } IF_DEQUEUE(&ic->ic_mgtq, m0); ni = (struct ieee80211_node *) m0->m_pkthdr.rcvif; m0->m_pkthdr.rcvif = NULL; } else { if (ic->ic_state != IEEE80211_S_RUN) break; IFQ_POLL(&ifp->if_snd, m0); if (m0 == NULL) break; /* * Need to calculate the real length to determine * if the transmit buffer has a room for the packet. */ len = m0->m_pkthdr.len + sizeof(struct ieee80211_frame); if (!(ifp->if_flags & IFF_LINK0) && !sc->sc_adhoc_ap) len += sizeof(struct llc) - sizeof(struct ether_header); if (ic->ic_flags & IEEE80211_F_PRIVACY) { /* XXX other crypto */ dowep = 1; len += IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN + IEEE80211_WEP_CRCLEN; } if (awi_next_txd(sc, len, &frame, &ntxd)) { ifp->if_flags |= IFF_OACTIVE; break; } IFQ_DEQUEUE(&ifp->if_snd, m0); #if NBPFILTER > 0 if (ifp->if_bpf) bpf_mtap(ifp->if_bpf, m0); #endif if ((ifp->if_flags & IFF_LINK0) || sc->sc_adhoc_ap) m0 = awi_ether_encap(sc, m0); else { if (m0->m_len < sizeof(struct ether_header) && ((m0 = m_pullup(m0, sizeof(struct ether_header)))) == NULL) { ifp->if_oerrors++; continue; } eh = mtod(m0, struct ether_header *); ni = ieee80211_find_txnode(ic, eh->ether_dhost); if (ni == NULL) goto bad; if ((ni->ni_flags & IEEE80211_NODE_PWR_MGT) && (m0->m_flags & M_PWR_SAV) == 0) { ieee80211_pwrsave(ic, ni, m0); continue; } m0 = ieee80211_encap(ic, m0, ni); } if (m0 == NULL) goto bad; wh = mtod(m0, struct ieee80211_frame *); if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && (ic->ic_opmode == IEEE80211_M_HOSTAP || ic->ic_opmode == IEEE80211_M_IBSS) && sc->sc_adhoc_ap == 0 && (ifp->if_flags & IFF_LINK0) == 0 && (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_DATA && ni == NULL) { bad: if (m0 != NULL) m_freem(m0); ifp->if_oerrors++; if (ni != NULL) ieee80211_free_node(ni); continue; } ifp->if_opackets++; } #if NBPFILTER > 0 if (ic->ic_rawbpf) bpf_mtap(ic->ic_rawbpf, m0); #endif if (dowep) { struct ieee80211_key *k; k = ieee80211_crypto_encap(ic, ni, m0); if (k == NULL) { if (ni != NULL) ieee80211_free_node(ni); m_freem(m0); continue; } } #ifdef DIAGNOSTIC if (m0->m_pkthdr.len != len) { if_printf(ifp, "length %d should be %d\n", m0->m_pkthdr.len, len); m_freem(m0); ifp->if_oerrors++; if (ni != NULL) ieee80211_free_node(ni); continue; } #endif if ((ifp->if_flags & IFF_DEBUG) && (ifp->if_flags & IFF_LINK2)) ieee80211_dump_pkt(m0->m_data, m0->m_len, ic->ic_bss->ni_rates. rs_rates[ic->ic_bss->ni_txrate] & IEEE80211_RATE_VAL, -1); for (m = m0, len = 0; m != NULL; m = m->m_next) { awi_write_bytes(sc, frame + len, mtod(m, u_int8_t *), m->m_len); len += m->m_len; } m_freem(m0); rate = (ic->ic_bss->ni_rates.rs_rates[ic->ic_bss->ni_txrate] & IEEE80211_RATE_VAL) * 5; awi_write_1(sc, ntxd + AWI_TXD_STATE, 0); awi_write_4(sc, txd + AWI_TXD_START, frame); awi_write_4(sc, txd + AWI_TXD_NEXT, ntxd); awi_write_4(sc, txd + AWI_TXD_LENGTH, len); awi_write_1(sc, txd + AWI_TXD_RATE, rate); awi_write_4(sc, txd + AWI_TXD_NDA, 0); awi_write_4(sc, txd + AWI_TXD_NRA, 0); awi_write_1(sc, txd + AWI_TXD_STATE, AWI_TXD_ST_OWN); sc->sc_txnext = ntxd; sc->sc_tx_timer = 5; ifp->if_timer = 1; } } static void awi_watchdog(struct ifnet *ifp) { struct awi_softc *sc = ifp->if_softc; u_int32_t prevdone; int ocansleep; ifp->if_timer = 0; if (!sc->sc_enabled || sc->sc_invalid) return; ocansleep = sc->sc_cansleep; sc->sc_cansleep = 0; if (sc->sc_tx_timer) { if (--sc->sc_tx_timer == 0) { printf("%s: device timeout\n", ifp->if_xname); prevdone = sc->sc_txdone; awi_tx_int(sc); if (sc->sc_txdone == prevdone) { ifp->if_oerrors++; awi_init(ifp); goto out; } } ifp->if_timer = 1; } if (sc->sc_rx_timer) { if (--sc->sc_rx_timer == 0) { if (sc->sc_ic.ic_state == IEEE80211_S_RUN) { ieee80211_new_state(&sc->sc_ic, IEEE80211_S_SCAN, -1); goto out; } } else ifp->if_timer = 1; } /* TODO: rate control */ ieee80211_watchdog(&sc->sc_ic); out: sc->sc_cansleep = ocansleep; } static int awi_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct awi_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int s, error; s = splnet(); /* serialize ioctl, since we may sleep */ if ((error = awi_lock(sc)) != 0) goto cantlock; switch (cmd) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (sc->sc_enabled) { /* * To avoid rescanning another access point, * do not call awi_init() here. Instead, * only reflect promisc mode settings. */ error = awi_mode_init(sc); } else error = awi_init(ifp); } else if (sc->sc_enabled) awi_stop(ifp, 1); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->sc_ic.ic_media, cmd); break; case SIOCADDMULTI: case SIOCDELMULTI: #ifdef __FreeBSD__ error = ENETRESET; /* XXX */ #else error = (cmd == SIOCADDMULTI) ? ether_addmulti(ifr, &sc->sc_ic.ic_ec) : ether_delmulti(ifr, &sc->sc_ic.ic_ec); #endif if (error == ENETRESET) { /* do not rescan */ if (sc->sc_enabled) error = awi_mode_init(sc); else error = 0; } break; default: error = ieee80211_ioctl(&sc->sc_ic, cmd, data); if (error == ENETRESET) { if (sc->sc_enabled) error = awi_init(ifp); else error = 0; } break; } awi_unlock(sc); cantlock: splx(s); return error; } /* * Called from ifmedia_ioctl via awi_ioctl with lock obtained. * * TBD factor with ieee80211_media_change */ static int awi_media_change(struct ifnet *ifp) { struct awi_softc *sc = ifp->if_softc; struct ieee80211com *ic = &sc->sc_ic; struct ifmedia_entry *ime; enum ieee80211_opmode newmode; int i, rate, newadhoc_ap, error = 0; ime = ic->ic_media.ifm_cur; if (IFM_SUBTYPE(ime->ifm_media) == IFM_AUTO) { i = -1; } else { struct ieee80211_rateset *rs = &ic->ic_sup_rates[(ic->ic_phytype == IEEE80211_T_FH) ? IEEE80211_MODE_FH : IEEE80211_MODE_11B]; rate = ieee80211_media2rate(ime->ifm_media); if (rate == 0) return EINVAL; for (i = 0; i < rs->rs_nrates; i++) { if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) == rate) break; } if (i == rs->rs_nrates) return EINVAL; } if (ic->ic_fixed_rate != i) { ic->ic_fixed_rate = i; error = ENETRESET; } /* * combination of mediaopt * * hostap adhoc flag0 opmode adhoc_ap comment * + - - HOSTAP 0 HostAP * - + - IBSS 0 IBSS * - + + AHDEMO 0 WaveLAN adhoc * - - + IBSS 1 Melco old Sta * also LINK0 * - - - STA 0 Infra Station */ newadhoc_ap = 0; if (ime->ifm_media & IFM_IEEE80211_HOSTAP) newmode = IEEE80211_M_HOSTAP; else if (ime->ifm_media & IFM_IEEE80211_ADHOC) { if (ic->ic_phytype == IEEE80211_T_DS && (ime->ifm_media & IFM_FLAG0)) newmode = IEEE80211_M_AHDEMO; else newmode = IEEE80211_M_IBSS; } else if (ime->ifm_media & IFM_FLAG0) { newmode = IEEE80211_M_IBSS; newadhoc_ap = 1; } else newmode = IEEE80211_M_STA; if (ic->ic_opmode != newmode || sc->sc_adhoc_ap != newadhoc_ap) { ic->ic_opmode = newmode; sc->sc_adhoc_ap = newadhoc_ap; error = ENETRESET; } if (error == ENETRESET) { if (sc->sc_enabled) error = awi_init(ifp); else error = 0; } return error; } static void awi_media_status(struct ifnet *ifp, struct ifmediareq *imr) { struct awi_softc *sc = ifp->if_softc; struct ieee80211com *ic = &sc->sc_ic; int rate; enum ieee80211_phymode mode; imr->ifm_status = IFM_AVALID; if (ic->ic_state == IEEE80211_S_RUN) imr->ifm_status |= IFM_ACTIVE; imr->ifm_active = IFM_IEEE80211; if (ic->ic_phytype == IEEE80211_T_FH) mode = IEEE80211_MODE_FH; else mode = IEEE80211_MODE_11B; if (ic->ic_state == IEEE80211_S_RUN) { rate = ic->ic_bss->ni_rates.rs_rates[ic->ic_bss->ni_txrate] & IEEE80211_RATE_VAL; } else { if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) rate = 0; else rate = ic->ic_sup_rates[mode]. rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL; } imr->ifm_active |= ieee80211_rate2media(ic, rate, mode); switch (ic->ic_opmode) { case IEEE80211_M_MONITOR: /* we should never reach here */ break; case IEEE80211_M_STA: break; case IEEE80211_M_IBSS: if (sc->sc_adhoc_ap) imr->ifm_active |= IFM_FLAG0; else imr->ifm_active |= IFM_IEEE80211_ADHOC; break; case IEEE80211_M_AHDEMO: imr->ifm_active |= IFM_IEEE80211_ADHOC | IFM_FLAG0; break; case IEEE80211_M_HOSTAP: imr->ifm_active |= IFM_IEEE80211_HOSTAP; break; } } static int awi_mode_init(struct awi_softc *sc) { struct ifnet *ifp = AC2IFP(&sc->sc_arp); int n, error; #ifdef __FreeBSD__ struct ifmultiaddr *ifma; #else struct ether_multi *enm; struct ether_multistep step; #endif /* reinitialize muticast filter */ n = 0; sc->sc_mib_local.Accept_All_Multicast_Dis = 0; if (sc->sc_ic.ic_opmode != IEEE80211_M_HOSTAP && (ifp->if_flags & IFF_PROMISC)) { sc->sc_mib_mac.aPromiscuous_Enable = 1; goto set_mib; } sc->sc_mib_mac.aPromiscuous_Enable = 0; #ifdef __FreeBSD__ if (ifp->if_flags & IFF_ALLMULTI) goto set_mib; + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; - if (n == AWI_GROUP_ADDR_SIZE) + if (n == AWI_GROUP_ADDR_SIZE) { + IF_ADDR_UNLOCK(ifp); goto set_mib; + } IEEE80211_ADDR_COPY(sc->sc_mib_addr.aGroup_Addresses[n], LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); n++; } + IF_ADDR_UNLOCK(ifp); #else ETHER_FIRST_MULTI(step, &sc->sc_ic.ic_ec, enm); while (enm != NULL) { if (n == AWI_GROUP_ADDR_SIZE || !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) goto set_mib; IEEE80211_ADDR_COPY(sc->sc_mib_addr.aGroup_Addresses[n], enm->enm_addrlo); n++; ETHER_NEXT_MULTI(step, enm); } #endif for (; n < AWI_GROUP_ADDR_SIZE; n++) memset(sc->sc_mib_addr.aGroup_Addresses[n], 0, IEEE80211_ADDR_LEN); sc->sc_mib_local.Accept_All_Multicast_Dis = 1; set_mib: #ifndef __FreeBSD__ if (sc->sc_mib_local.Accept_All_Multicast_Dis) ifp->if_flags &= ~IFF_ALLMULTI; else ifp->if_flags |= IFF_ALLMULTI; #endif sc->sc_mib_mgt.Wep_Required = (sc->sc_ic.ic_flags & IEEE80211_F_PRIVACY) ? AWI_WEP_ON : AWI_WEP_OFF; if ((error = awi_mib(sc, AWI_CMD_SET_MIB, AWI_MIB_LOCAL, AWI_WAIT)) || (error = awi_mib(sc, AWI_CMD_SET_MIB, AWI_MIB_ADDR, AWI_WAIT)) || (error = awi_mib(sc, AWI_CMD_SET_MIB, AWI_MIB_MAC, AWI_WAIT)) || (error = awi_mib(sc, AWI_CMD_SET_MIB, AWI_MIB_MGT, AWI_WAIT)) || (error = awi_mib(sc, AWI_CMD_SET_MIB, AWI_MIB_PHY, AWI_WAIT))) { DPRINTF(("awi_mode_init: MIB set failed: %d\n", error)); return error; } return 0; } static void awi_rx_int(struct awi_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ifnet *ifp = AC2IFP(&sc->sc_arp); struct ieee80211_node *ni; u_int8_t state, rate, rssi; u_int16_t len; u_int32_t frame, next, rstamp, rxoff; struct mbuf *m; rxoff = sc->sc_rxdoff; for (;;) { state = awi_read_1(sc, rxoff + AWI_RXD_HOST_DESC_STATE); if (state & AWI_RXD_ST_OWN) break; if (!(state & AWI_RXD_ST_CONSUMED)) { if (sc->sc_substate != AWI_ST_NONE) goto rx_next; if (state & AWI_RXD_ST_RXERROR) { ifp->if_ierrors++; goto rx_next; } len = awi_read_2(sc, rxoff + AWI_RXD_LEN); rate = awi_read_1(sc, rxoff + AWI_RXD_RATE); rssi = awi_read_1(sc, rxoff + AWI_RXD_RSSI); frame = awi_read_4(sc, rxoff + AWI_RXD_START_FRAME) & 0x7fff; rstamp = awi_read_4(sc, rxoff + AWI_RXD_LOCALTIME); m = awi_devget(sc, frame, len); if (m == NULL) { ifp->if_ierrors++; goto rx_next; } if (state & AWI_RXD_ST_LF) { /* TODO check my bss */ if (!(sc->sc_ic.ic_flags & IEEE80211_F_SIBSS) && sc->sc_ic.ic_state == IEEE80211_S_RUN) { sc->sc_rx_timer = 10; ifp->if_timer = 1; } if ((ifp->if_flags & IFF_DEBUG) && (ifp->if_flags & IFF_LINK2)) ieee80211_dump_pkt(m->m_data, m->m_len, rate / 5, rssi); if ((ifp->if_flags & IFF_LINK0) || sc->sc_adhoc_ap) m = awi_ether_modcap(sc, m); else m = m_pullup(m, sizeof(struct ieee80211_frame_min)); if (m == NULL) { ifp->if_ierrors++; goto rx_next; } ni = ieee80211_find_rxnode(ic, mtod(m, struct ieee80211_frame_min *)); ieee80211_input(ic, m, ni, rssi, rstamp); ieee80211_free_node(ni); } else sc->sc_rxpend = m; rx_next: state |= AWI_RXD_ST_CONSUMED; awi_write_1(sc, rxoff + AWI_RXD_HOST_DESC_STATE, state); } next = awi_read_4(sc, rxoff + AWI_RXD_NEXT); if (next & AWI_RXD_NEXT_LAST) break; /* make sure the next pointer is correct */ if (next != awi_read_4(sc, rxoff + AWI_RXD_NEXT)) break; state |= AWI_RXD_ST_OWN; awi_write_1(sc, rxoff + AWI_RXD_HOST_DESC_STATE, state); rxoff = next & 0x7fff; } sc->sc_rxdoff = rxoff; } static void awi_tx_int(struct awi_softc *sc) { struct ifnet *ifp = AC2IFP(&sc->sc_arp); u_int8_t flags; while (sc->sc_txdone != sc->sc_txnext) { flags = awi_read_1(sc, sc->sc_txdone + AWI_TXD_STATE); if ((flags & AWI_TXD_ST_OWN) || !(flags & AWI_TXD_ST_DONE)) break; if (flags & AWI_TXD_ST_ERROR) ifp->if_oerrors++; sc->sc_txdone = awi_read_4(sc, sc->sc_txdone + AWI_TXD_NEXT) & 0x7fff; } DPRINTF2(("awi_txint: txdone %d txnext %d txbase %d txend %d\n", sc->sc_txdone, sc->sc_txnext, sc->sc_txbase, sc->sc_txend)); sc->sc_tx_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; awi_start(ifp); } static struct mbuf * awi_devget(struct awi_softc *sc, u_int32_t off, u_int16_t len) { struct ifnet *ifp = AC2IFP(&sc->sc_arp); struct mbuf *m; struct mbuf *top, **mp; u_int tlen; top = sc->sc_rxpend; mp = ⊤ if (top != NULL) { sc->sc_rxpend = NULL; top->m_pkthdr.len += len; m = top; while (*mp != NULL) { m = *mp; mp = &m->m_next; } if (m->m_flags & M_EXT) tlen = m->m_ext.ext_size; else if (m->m_flags & M_PKTHDR) tlen = MHLEN; else tlen = MLEN; tlen -= m->m_len; if (tlen > len) tlen = len; awi_read_bytes(sc, off, mtod(m, u_int8_t *) + m->m_len, tlen); off += tlen; len -= tlen; } while (len > 0) { if (top == NULL) { MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) return NULL; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = len; m->m_len = MHLEN; m->m_flags |= M_HASFCS; } else { MGET(m, M_DONTWAIT, MT_DATA); if (m == NULL) { m_freem(top); return NULL; } m->m_len = MLEN; } if (len >= MINCLSIZE) { MCLGET(m, M_DONTWAIT); if (m->m_flags & M_EXT) m->m_len = m->m_ext.ext_size; } if (top == NULL) { int hdrlen = sizeof(struct ieee80211_frame) + sizeof(struct llc); caddr_t newdata = (caddr_t) ALIGN(m->m_data + hdrlen) - hdrlen; m->m_len -= newdata - m->m_data; m->m_data = newdata; } if (m->m_len > len) m->m_len = len; awi_read_bytes(sc, off, mtod(m, u_int8_t *), m->m_len); off += m->m_len; len -= m->m_len; *mp = m; mp = &m->m_next; } return top; } /* * Initialize hardware and start firmware to accept commands. * Called everytime after power on firmware. */ static int awi_hw_init(struct awi_softc *sc) { u_int8_t status; u_int16_t intmask; int i, error; sc->sc_enab_intr = 0; sc->sc_invalid = 0; /* XXX: really? */ awi_drvstate(sc, AWI_DRV_RESET); /* reset firmware */ am79c930_gcr_setbits(&sc->sc_chip, AM79C930_GCR_CORESET); DELAY(100); awi_write_1(sc, AWI_SELFTEST, 0); awi_write_1(sc, AWI_CMD, 0); awi_write_1(sc, AWI_BANNER, 0); am79c930_gcr_clearbits(&sc->sc_chip, AM79C930_GCR_CORESET); DELAY(100); /* wait for selftest completion */ for (i = 0; ; i++) { if (sc->sc_invalid) return ENXIO; if (i >= AWI_SELFTEST_TIMEOUT*hz/1000) { printf("%s: failed to complete selftest (timeout)\n", AC2IFP(&sc->sc_arp)->if_xname); return ENXIO; } status = awi_read_1(sc, AWI_SELFTEST); if ((status & 0xf0) == 0xf0) break; if (sc->sc_cansleep) { sc->sc_sleep_cnt++; (void)tsleep(sc, PWAIT, "awitst", 1); sc->sc_sleep_cnt--; } else { DELAY(1000*1000/hz); } } if (status != AWI_SELFTEST_PASSED) { printf("%s: failed to complete selftest (code %x)\n", AC2IFP(&sc->sc_arp)->if_xname, status); return ENXIO; } /* check banner to confirm firmware write it */ awi_read_bytes(sc, AWI_BANNER, sc->sc_banner, AWI_BANNER_LEN); if (memcmp(sc->sc_banner, "PCnetMobile:", 12) != 0) { printf("%s: failed to complete selftest (bad banner)\n", AC2IFP(&sc->sc_arp)->if_xname); for (i = 0; i < AWI_BANNER_LEN; i++) printf("%s%02x", i ? ":" : "\t", sc->sc_banner[i]); printf("\n"); return ENXIO; } /* initializing interrupt */ sc->sc_enab_intr = 1; error = awi_intr_lock(sc); if (error) return error; intmask = AWI_INT_GROGGY | AWI_INT_SCAN_CMPLT | AWI_INT_TX | AWI_INT_RX | AWI_INT_CMD; awi_write_1(sc, AWI_INTMASK, ~intmask & 0xff); awi_write_1(sc, AWI_INTMASK2, 0); awi_write_1(sc, AWI_INTSTAT, 0); awi_write_1(sc, AWI_INTSTAT2, 0); awi_intr_unlock(sc); am79c930_gcr_setbits(&sc->sc_chip, AM79C930_GCR_ENECINT); /* issuing interface test command */ error = awi_cmd(sc, AWI_CMD_NOP, AWI_WAIT); if (error) { printf("%s: failed to complete selftest", AC2IFP(&sc->sc_arp)->if_xname); if (error == ENXIO) printf(" (no hardware)\n"); else if (error != EWOULDBLOCK) printf(" (error %d)\n", error); else if (sc->sc_cansleep) printf(" (lost interrupt)\n"); else printf(" (command timeout)\n"); return error; } /* Initialize VBM */ awi_write_1(sc, AWI_VBM_OFFSET, 0); awi_write_1(sc, AWI_VBM_LENGTH, 1); awi_write_1(sc, AWI_VBM_BITMAP, 0); return 0; } /* * Extract the factory default MIB value from firmware and assign the driver * default value. * Called once at attaching the interface. */ static int awi_init_mibs(struct awi_softc *sc) { int chan, i, error; struct ieee80211com *ic = &sc->sc_ic; struct awi_chanset *cs; if ((error = awi_mib(sc, AWI_CMD_GET_MIB, AWI_MIB_LOCAL, AWI_WAIT)) || (error = awi_mib(sc, AWI_CMD_GET_MIB, AWI_MIB_ADDR, AWI_WAIT)) || (error = awi_mib(sc, AWI_CMD_GET_MIB, AWI_MIB_MAC, AWI_WAIT)) || (error = awi_mib(sc, AWI_CMD_GET_MIB, AWI_MIB_MGT, AWI_WAIT)) || (error = awi_mib(sc, AWI_CMD_GET_MIB, AWI_MIB_PHY, AWI_WAIT))) { printf("%s: failed to get default mib value (error %d)\n", AC2IFP(&sc->sc_arp)->if_xname, error); return error; } memset(&sc->sc_ic.ic_chan_avail, 0, sizeof(sc->sc_ic.ic_chan_avail)); for (cs = awi_chanset; ; cs++) { if (cs->cs_type == 0) { printf("%s: failed to set available channel\n", AC2IFP(&sc->sc_arp)->if_xname); return ENXIO; } if (cs->cs_type == sc->sc_mib_phy.IEEE_PHY_Type && cs->cs_region == sc->sc_mib_phy.aCurrent_Reg_Domain) break; } if (sc->sc_mib_phy.IEEE_PHY_Type == AWI_PHY_TYPE_FH) { for (i = cs->cs_min; i <= cs->cs_max; i++) { chan = IEEE80211_FH_CHAN(i % 3 + 1, i); setbit(sc->sc_ic.ic_chan_avail, chan); /* XXX for FHSS, does frequency matter? */ ic->ic_channels[chan].ic_freq = 0; ic->ic_channels[chan].ic_flags = IEEE80211_CHAN_FHSS; /* * According to the IEEE 802.11 specification, * hop pattern parameter for FH phy should be * incremented by 3 for given hop chanset, i.e., * the chanset parameter is calculated for given * hop patter. However, BayStack 650 Access Points * apparently use fixed hop chanset parameter value * 1 for any hop pattern. So we also try this * combination of hop chanset and pattern. */ chan = IEEE80211_FH_CHAN(1, i); setbit(sc->sc_ic.ic_chan_avail, chan); ic->ic_channels[chan].ic_freq = 0; /* XXX */ ic->ic_channels[chan].ic_flags = IEEE80211_CHAN_FHSS; } } else { for (i = cs->cs_min; i <= cs->cs_max; i++) { setbit(sc->sc_ic.ic_chan_avail, i); ic->ic_channels[i].ic_freq = ieee80211_ieee2mhz(i, IEEE80211_CHAN_2GHZ); ic->ic_channels[i].ic_flags = IEEE80211_CHAN_B; } } sc->sc_cur_chan = cs->cs_def; ic->ic_ibss_chan = &ic->ic_channels[cs->cs_def]; sc->sc_mib_local.Fragmentation_Dis = 1; sc->sc_mib_local.Add_PLCP_Dis = 0; sc->sc_mib_local.MAC_Hdr_Prsv = 0; sc->sc_mib_local.Rx_Mgmt_Que_En = 0; sc->sc_mib_local.Re_Assembly_Dis = 1; sc->sc_mib_local.Strip_PLCP_Dis = 0; sc->sc_mib_local.Power_Saving_Mode_Dis = 1; sc->sc_mib_local.Accept_All_Multicast_Dis = 1; sc->sc_mib_local.Check_Seq_Cntl_Dis = 0; sc->sc_mib_local.Flush_CFP_Queue_On_CF_End = 0; sc->sc_mib_local.Network_Mode = 1; sc->sc_mib_local.PWD_Lvl = 0; sc->sc_mib_local.CFP_Mode = 0; /* allocate buffers */ sc->sc_txbase = AWI_BUFFERS; sc->sc_txend = sc->sc_txbase + (AWI_TXD_SIZE + sizeof(struct ieee80211_frame) + sizeof(struct ether_header) + ETHERMTU) * AWI_NTXBUFS; LE_WRITE_4(&sc->sc_mib_local.Tx_Buffer_Offset, sc->sc_txbase); LE_WRITE_4(&sc->sc_mib_local.Tx_Buffer_Size, sc->sc_txend - sc->sc_txbase); LE_WRITE_4(&sc->sc_mib_local.Rx_Buffer_Offset, sc->sc_txend); LE_WRITE_4(&sc->sc_mib_local.Rx_Buffer_Size, AWI_BUFFERS_END - sc->sc_txend); sc->sc_mib_local.Acting_as_AP = 0; sc->sc_mib_local.Fill_CFP = 0; memset(&sc->sc_mib_mac.aDesired_ESS_ID, 0, AWI_ESS_ID_SIZE); sc->sc_mib_mac.aDesired_ESS_ID[0] = IEEE80211_ELEMID_SSID; sc->sc_mib_mgt.aPower_Mgt_Mode = 0; sc->sc_mib_mgt.aDTIM_Period = 1; LE_WRITE_2(&sc->sc_mib_mgt.aATIM_Window, 0); return 0; } static int awi_mib(struct awi_softc *sc, u_int8_t cmd, u_int8_t mib, int wflag) { int error; u_int8_t size, *ptr; switch (mib) { case AWI_MIB_LOCAL: ptr = (u_int8_t *)&sc->sc_mib_local; size = sizeof(sc->sc_mib_local); break; case AWI_MIB_ADDR: ptr = (u_int8_t *)&sc->sc_mib_addr; size = sizeof(sc->sc_mib_addr); break; case AWI_MIB_MAC: ptr = (u_int8_t *)&sc->sc_mib_mac; size = sizeof(sc->sc_mib_mac); break; case AWI_MIB_STAT: ptr = (u_int8_t *)&sc->sc_mib_stat; size = sizeof(sc->sc_mib_stat); break; case AWI_MIB_MGT: ptr = (u_int8_t *)&sc->sc_mib_mgt; size = sizeof(sc->sc_mib_mgt); break; case AWI_MIB_PHY: ptr = (u_int8_t *)&sc->sc_mib_phy; size = sizeof(sc->sc_mib_phy); break; default: return EINVAL; } if (sc->sc_cmd_inprog) { if ((error = awi_cmd_wait(sc)) != 0) { if (error == EWOULDBLOCK) DPRINTF(("awi_mib: cmd %d inprog", sc->sc_cmd_inprog)); return error; } } sc->sc_cmd_inprog = cmd; if (cmd == AWI_CMD_SET_MIB) awi_write_bytes(sc, AWI_CA_MIB_DATA, ptr, size); awi_write_1(sc, AWI_CA_MIB_TYPE, mib); awi_write_1(sc, AWI_CA_MIB_SIZE, size); awi_write_1(sc, AWI_CA_MIB_INDEX, 0); if ((error = awi_cmd(sc, cmd, wflag)) != 0) return error; if (cmd == AWI_CMD_GET_MIB) { awi_read_bytes(sc, AWI_CA_MIB_DATA, ptr, size); #ifdef AWI_DEBUG if (awi_debug) { int i; printf("awi_mib: #%d:", mib); for (i = 0; i < size; i++) printf(" %02x", ptr[i]); printf("\n"); } #endif } return 0; } static int awi_cmd(struct awi_softc *sc, u_int8_t cmd, int wflag) { u_int8_t status; int error = 0; #ifdef AWI_DEBUG static const char *cmdname[] = { "IDLE", "NOP", "SET_MIB", "INIT_TX", "FLUSH_TX", "INIT_RX", "KILL_RX", "SLEEP", "WAKE", "GET_MIB", "SCAN", "SYNC", "RESUME" }; #endif #ifdef AWI_DEBUG if (awi_debug > 1) { if (cmd >= sizeof(cmdname)/sizeof(cmdname[0])) printf("awi_cmd: #%d", cmd); else printf("awi_cmd: %s", cmdname[cmd]); printf(" %s\n", wflag == AWI_NOWAIT ? "nowait" : "wait"); } #endif sc->sc_cmd_inprog = cmd; awi_write_1(sc, AWI_CMD_STATUS, AWI_STAT_IDLE); awi_write_1(sc, AWI_CMD, cmd); if (wflag == AWI_NOWAIT) return EINPROGRESS; if ((error = awi_cmd_wait(sc)) != 0) return error; status = awi_read_1(sc, AWI_CMD_STATUS); awi_write_1(sc, AWI_CMD, 0); switch (status) { case AWI_STAT_OK: break; case AWI_STAT_BADPARM: return EINVAL; default: printf("%s: command %d failed %x\n", AC2IFP(&sc->sc_arp)->if_xname, cmd, status); return ENXIO; } return 0; } static int awi_cmd_wait(struct awi_softc *sc) { int i, error = 0; i = 0; while (sc->sc_cmd_inprog) { if (sc->sc_invalid) return ENXIO; if (awi_read_1(sc, AWI_CMD) != sc->sc_cmd_inprog) { printf("%s: failed to access hardware\n", AC2IFP(&sc->sc_arp)->if_xname); sc->sc_invalid = 1; return ENXIO; } if (sc->sc_cansleep) { sc->sc_sleep_cnt++; error = tsleep(sc, PWAIT, "awicmd", AWI_CMD_TIMEOUT*hz/1000); sc->sc_sleep_cnt--; } else { if (awi_read_1(sc, AWI_CMD_STATUS) != AWI_STAT_IDLE) { awi_cmd_done(sc); break; } if (i++ >= AWI_CMD_TIMEOUT*1000/10) error = EWOULDBLOCK; else DELAY(10); } if (error) break; } if (error) { DPRINTF(("awi_cmd_wait: cmd 0x%x, error %d\n", sc->sc_cmd_inprog, error)); } return error; } static void awi_cmd_done(struct awi_softc *sc) { u_int8_t cmd, status; status = awi_read_1(sc, AWI_CMD_STATUS); if (status == AWI_STAT_IDLE) return; /* stray interrupt */ cmd = sc->sc_cmd_inprog; sc->sc_cmd_inprog = 0; wakeup(sc); awi_write_1(sc, AWI_CMD, 0); if (status != AWI_STAT_OK) { printf("%s: command %d failed %x\n", AC2IFP(&sc->sc_arp)->if_xname, cmd, status); sc->sc_substate = AWI_ST_NONE; return; } if (sc->sc_substate != AWI_ST_NONE) (void)ieee80211_new_state(&sc->sc_ic, sc->sc_nstate, -1); } static int awi_next_txd(struct awi_softc *sc, int len, u_int32_t *framep, u_int32_t *ntxdp) { u_int32_t txd, ntxd, frame; txd = sc->sc_txnext; frame = txd + AWI_TXD_SIZE; if (frame + len > sc->sc_txend) frame = sc->sc_txbase; ntxd = frame + len; if (ntxd + AWI_TXD_SIZE > sc->sc_txend) ntxd = sc->sc_txbase; *framep = frame; *ntxdp = ntxd; /* * Determine if there are any room in ring buffer. * --- send wait, === new data, +++ conflict (ENOBUFS) * base........................end * done----txd=====ntxd OK * --txd=====done++++ntxd-- full * --txd=====ntxd done-- OK * ==ntxd done----txd=== OK * ==done++++ntxd----txd=== full * ++ntxd txd=====done++ full */ if (txd < ntxd) { if (txd < sc->sc_txdone && ntxd + AWI_TXD_SIZE > sc->sc_txdone) return ENOBUFS; } else { if (txd < sc->sc_txdone || ntxd + AWI_TXD_SIZE > sc->sc_txdone) return ENOBUFS; } return 0; } static int awi_lock(struct awi_softc *sc) { int error = 0; #ifdef __NetBSD__ if (curlwp == NULL) #else if (curproc == NULL) #endif { /* * XXX * Though driver ioctl should be called with context, * KAME ipv6 stack calls ioctl in interrupt for now. * We simply abort the request if there are other * ioctl requests in progress. */ if (sc->sc_busy) { if (sc->sc_invalid) return ENXIO; return EWOULDBLOCK; } sc->sc_busy = 1; sc->sc_cansleep = 0; return 0; } while (sc->sc_busy) { if (sc->sc_invalid) return ENXIO; sc->sc_sleep_cnt++; error = tsleep(sc, PWAIT | PCATCH, "awilck", 0); sc->sc_sleep_cnt--; if (error) return error; } sc->sc_busy = 1; sc->sc_cansleep = 1; return 0; } static void awi_unlock(struct awi_softc *sc) { sc->sc_busy = 0; sc->sc_cansleep = 0; if (sc->sc_sleep_cnt) wakeup(sc); } static int awi_intr_lock(struct awi_softc *sc) { u_int8_t status; int i, retry; status = 1; for (retry = 0; retry < 10; retry++) { for (i = 0; i < AWI_LOCKOUT_TIMEOUT*1000/5; i++) { if ((status = awi_read_1(sc, AWI_LOCKOUT_HOST)) == 0) break; DELAY(5); } if (status != 0) break; awi_write_1(sc, AWI_LOCKOUT_MAC, 1); if ((status = awi_read_1(sc, AWI_LOCKOUT_HOST)) == 0) break; awi_write_1(sc, AWI_LOCKOUT_MAC, 0); } if (status != 0) { printf("%s: failed to lock interrupt\n", AC2IFP(&sc->sc_arp)->if_xname); return ENXIO; } return 0; } static void awi_intr_unlock(struct awi_softc *sc) { awi_write_1(sc, AWI_LOCKOUT_MAC, 0); } static int awi_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) { struct ifnet *ifp = ic->ic_ifp; struct awi_softc *sc = ifp->if_softc; struct ieee80211_node *ni; int error; u_int8_t newmode; enum ieee80211_state ostate; #ifdef AWI_DEBUG static const char *stname[] = { "INIT", "SCAN", "AUTH", "ASSOC", "RUN" }; static const char *substname[] = { "NONE", "SCAN_INIT", "SCAN_SETMIB", "SCAN_SCCMD", "SUB_INIT", "SUB_SETSS", "SUB_SYNC" }; #endif /* AWI_DEBUG */ ostate = ic->ic_state; DPRINTF(("awi_newstate: %s (%s/%s) -> %s\n", stname[ostate], stname[sc->sc_nstate], substname[sc->sc_substate], stname[nstate])); /* set LED */ switch (nstate) { case IEEE80211_S_INIT: awi_drvstate(sc, AWI_DRV_RESET); break; case IEEE80211_S_SCAN: if (ic->ic_opmode == IEEE80211_M_IBSS || ic->ic_opmode == IEEE80211_M_AHDEMO) awi_drvstate(sc, AWI_DRV_ADHSC); else awi_drvstate(sc, AWI_DRV_INFSY); break; case IEEE80211_S_AUTH: awi_drvstate(sc, AWI_DRV_INFSY); break; case IEEE80211_S_ASSOC: awi_drvstate(sc, AWI_DRV_INFAUTH); break; case IEEE80211_S_RUN: if (ic->ic_opmode == IEEE80211_M_IBSS || ic->ic_opmode == IEEE80211_M_AHDEMO) awi_drvstate(sc, AWI_DRV_ADHSY); else awi_drvstate(sc, AWI_DRV_INFASSOC); break; } if (nstate == IEEE80211_S_INIT) { sc->sc_substate = AWI_ST_NONE; ic->ic_flags &= ~IEEE80211_F_SIBSS; return (*sc->sc_newstate)(ic, nstate, arg); } /* state transition */ if (nstate == IEEE80211_S_SCAN) { /* SCAN substate */ if (sc->sc_substate == AWI_ST_NONE) { sc->sc_nstate = nstate; /* next state in transition */ sc->sc_substate = AWI_ST_SCAN_INIT; } switch (sc->sc_substate) { case AWI_ST_SCAN_INIT: sc->sc_substate = AWI_ST_SCAN_SETMIB; switch (ostate) { case IEEE80211_S_RUN: /* beacon miss */ if (ifp->if_flags & IFF_DEBUG) printf("%s: no recent beacons from %s;" " rescanning\n", ifp->if_xname, ether_sprintf(ic->ic_bss->ni_bssid)); /* FALLTHRU */ case IEEE80211_S_AUTH: case IEEE80211_S_ASSOC: case IEEE80211_S_INIT: ieee80211_begin_scan(ic, 0); break; case IEEE80211_S_SCAN: /* scan next */ break; } if (ic->ic_flags & IEEE80211_F_ASCAN) newmode = AWI_SCAN_ACTIVE; else newmode = AWI_SCAN_PASSIVE; if (sc->sc_mib_mgt.aScan_Mode != newmode) { sc->sc_mib_mgt.aScan_Mode = newmode; if ((error = awi_mib(sc, AWI_CMD_SET_MIB, AWI_MIB_MGT, AWI_NOWAIT)) != 0) break; } /* FALLTHRU */ case AWI_ST_SCAN_SETMIB: sc->sc_substate = AWI_ST_SCAN_SCCMD; if (sc->sc_cmd_inprog) { if ((error = awi_cmd_wait(sc)) != 0) break; } sc->sc_cmd_inprog = AWI_CMD_SCAN; ni = ic->ic_bss; awi_write_2(sc, AWI_CA_SCAN_DURATION, (ic->ic_flags & IEEE80211_F_ASCAN) ? AWI_ASCAN_DURATION : AWI_PSCAN_DURATION); if (sc->sc_mib_phy.IEEE_PHY_Type == AWI_PHY_TYPE_FH) { awi_write_1(sc, AWI_CA_SCAN_SET, IEEE80211_FH_CHANSET( ieee80211_chan2ieee(ic, ni->ni_chan))); awi_write_1(sc, AWI_CA_SCAN_PATTERN, IEEE80211_FH_CHANPAT( ieee80211_chan2ieee(ic, ni->ni_chan))); awi_write_1(sc, AWI_CA_SCAN_IDX, 1); } else { awi_write_1(sc, AWI_CA_SCAN_SET, ieee80211_chan2ieee(ic, ni->ni_chan)); awi_write_1(sc, AWI_CA_SCAN_PATTERN, 0); awi_write_1(sc, AWI_CA_SCAN_IDX, 0); } awi_write_1(sc, AWI_CA_SCAN_SUSP, 0); sc->sc_cur_chan = ieee80211_chan2ieee(ic, ni->ni_chan); if ((error = awi_cmd(sc, AWI_CMD_SCAN, AWI_NOWAIT)) != 0) break; /* FALLTHRU */ case AWI_ST_SCAN_SCCMD: ic->ic_state = nstate; sc->sc_substate = AWI_ST_NONE; error = EINPROGRESS; break; default: DPRINTF(("awi_newstate: unexpected state %s/%s\n", stname[nstate], substname[sc->sc_substate])); sc->sc_substate = AWI_ST_NONE; error = EIO; break; } goto out; } if (ostate == IEEE80211_S_SCAN) { /* set SSID and channel */ /* substate */ if (sc->sc_substate == AWI_ST_NONE) { sc->sc_nstate = nstate; /* next state in transition */ sc->sc_substate = AWI_ST_SUB_INIT; } ni = ic->ic_bss; switch (sc->sc_substate) { case AWI_ST_SUB_INIT: sc->sc_substate = AWI_ST_SUB_SETSS; IEEE80211_ADDR_COPY(&sc->sc_mib_mgt.aCurrent_BSS_ID, ni->ni_bssid); memset(&sc->sc_mib_mgt.aCurrent_ESS_ID, 0, AWI_ESS_ID_SIZE); sc->sc_mib_mgt.aCurrent_ESS_ID[0] = IEEE80211_ELEMID_SSID; sc->sc_mib_mgt.aCurrent_ESS_ID[1] = ni->ni_esslen; memcpy(&sc->sc_mib_mgt.aCurrent_ESS_ID[2], ni->ni_essid, ni->ni_esslen); LE_WRITE_2(&sc->sc_mib_mgt.aBeacon_Period, ni->ni_intval); if ((error = awi_mib(sc, AWI_CMD_SET_MIB, AWI_MIB_MGT, AWI_NOWAIT)) != 0) break; /* FALLTHRU */ case AWI_ST_SUB_SETSS: sc->sc_substate = AWI_ST_SUB_SYNC; if (sc->sc_cmd_inprog) { if ((error = awi_cmd_wait(sc)) != 0) break; } sc->sc_cmd_inprog = AWI_CMD_SYNC; if (sc->sc_mib_phy.IEEE_PHY_Type == AWI_PHY_TYPE_FH) { awi_write_1(sc, AWI_CA_SYNC_SET, IEEE80211_FH_CHANSET( ieee80211_chan2ieee(ic, ni->ni_chan))); awi_write_1(sc, AWI_CA_SYNC_PATTERN, IEEE80211_FH_CHANPAT( ieee80211_chan2ieee(ic, ni->ni_chan))); awi_write_1(sc, AWI_CA_SYNC_IDX, ni->ni_fhindex); awi_write_2(sc, AWI_CA_SYNC_DWELL, ni->ni_fhdwell); } else { awi_write_1(sc, AWI_CA_SYNC_SET, ieee80211_chan2ieee(ic, ni->ni_chan)); awi_write_1(sc, AWI_CA_SYNC_PATTERN, 0); awi_write_1(sc, AWI_CA_SYNC_IDX, 0); awi_write_2(sc, AWI_CA_SYNC_DWELL, 0); } if (ic->ic_flags & IEEE80211_F_SIBSS) { memset(&ni->ni_tstamp, 0, sizeof(ni->ni_tstamp)); ni->ni_rstamp = 0; awi_write_1(sc, AWI_CA_SYNC_STARTBSS, 1); } else awi_write_1(sc, AWI_CA_SYNC_STARTBSS, 0); awi_write_2(sc, AWI_CA_SYNC_MBZ, 0); awi_write_bytes(sc, AWI_CA_SYNC_TIMESTAMP, ni->ni_tstamp.data, 8); awi_write_4(sc, AWI_CA_SYNC_REFTIME, ni->ni_rstamp); sc->sc_cur_chan = ieee80211_chan2ieee(ic, ni->ni_chan); if ((error = awi_cmd(sc, AWI_CMD_SYNC, AWI_NOWAIT)) != 0) break; /* FALLTHRU */ case AWI_ST_SUB_SYNC: sc->sc_substate = AWI_ST_NONE; if (ic->ic_flags & IEEE80211_F_SIBSS) { if ((error = awi_mib(sc, AWI_CMD_GET_MIB, AWI_MIB_MGT, AWI_WAIT)) != 0) break; IEEE80211_ADDR_COPY(ni->ni_bssid, &sc->sc_mib_mgt.aCurrent_BSS_ID); } else { if (nstate == IEEE80211_S_RUN) { sc->sc_rx_timer = 10; ifp->if_timer = 1; } } error = 0; break; default: DPRINTF(("awi_newstate: unexpected state %s/%s\n", stname[nstate], substname[sc->sc_substate])); sc->sc_substate = AWI_ST_NONE; error = EIO; break; } goto out; } sc->sc_substate = AWI_ST_NONE; return (*sc->sc_newstate)(ic, nstate, arg); out: if (error != 0) { if (error == EINPROGRESS) error = 0; return error; } return (*sc->sc_newstate)(ic, nstate, arg); } static void awi_recv_mgmt(struct ieee80211com *ic, struct mbuf *m0, struct ieee80211_node *ni, int subtype, int rssi, u_int32_t rstamp) { struct awi_softc *sc = ic->ic_ifp->if_softc; /* probe request is handled by hardware */ if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_REQ) return; (*sc->sc_recv_mgmt)(ic, m0, ni, subtype, rssi, rstamp); } static int awi_send_mgmt(struct ieee80211com *ic, struct ieee80211_node *ni, int type, int arg) { struct awi_softc *sc = ic->ic_ifp->if_softc; /* probe request is handled by hardware */ if (type == IEEE80211_FC0_SUBTYPE_PROBE_REQ) return 0; return (*sc->sc_send_mgmt)(ic, ni, type, arg); } static struct mbuf * awi_ether_encap(struct awi_softc *sc, struct mbuf *m) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_node *ni = ic->ic_bss; struct ether_header *eh; struct ieee80211_frame *wh; if (m->m_len < sizeof(struct ether_header)) { m = m_pullup(m, sizeof(struct ether_header)); if (m == NULL) return NULL; } eh = mtod(m, struct ether_header *); M_PREPEND(m, sizeof(struct ieee80211_frame), M_DONTWAIT); if (m == NULL) return NULL; wh = mtod(m, struct ieee80211_frame *); wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA; *(u_int16_t *)wh->i_dur = 0; *(u_int16_t *)wh->i_seq = htole16(ni->ni_txseqs[0] << IEEE80211_SEQ_SEQ_SHIFT); ni->ni_txseqs[0]++; if (ic->ic_opmode == IEEE80211_M_IBSS || ic->ic_opmode == IEEE80211_M_AHDEMO) { wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; if (sc->sc_adhoc_ap) IEEE80211_ADDR_COPY(wh->i_addr1, ni->ni_macaddr); else IEEE80211_ADDR_COPY(wh->i_addr1, eh->ether_dhost); IEEE80211_ADDR_COPY(wh->i_addr2, eh->ether_shost); IEEE80211_ADDR_COPY(wh->i_addr3, ni->ni_bssid); } else { wh->i_fc[1] = IEEE80211_FC1_DIR_TODS; IEEE80211_ADDR_COPY(wh->i_addr1, ni->ni_bssid); IEEE80211_ADDR_COPY(wh->i_addr2, eh->ether_shost); IEEE80211_ADDR_COPY(wh->i_addr3, eh->ether_dhost); } return m; } static struct mbuf * awi_ether_modcap(struct awi_softc *sc, struct mbuf *m) { struct ieee80211com *ic = &sc->sc_ic; struct ether_header eh; struct ieee80211_frame wh; struct llc *llc; if (m->m_len < sizeof(wh) + sizeof(eh)) { m = m_pullup(m, sizeof(wh) + sizeof(eh)); if (m == NULL) return NULL; } memcpy(&wh, mtod(m, caddr_t), sizeof(wh)); if (wh.i_fc[0] != (IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_DATA)) return m; memcpy(&eh, mtod(m, caddr_t) + sizeof(wh), sizeof(eh)); m_adj(m, sizeof(eh) - sizeof(*llc)); if (ic->ic_opmode == IEEE80211_M_IBSS || ic->ic_opmode == IEEE80211_M_AHDEMO) IEEE80211_ADDR_COPY(wh.i_addr2, eh.ether_shost); memcpy(mtod(m, caddr_t), &wh, sizeof(wh)); llc = (struct llc *)(mtod(m, caddr_t) + sizeof(wh)); llc->llc_dsap = llc->llc_ssap = LLC_SNAP_LSAP; llc->llc_control = LLC_UI; llc->llc_snap.org_code[0] = 0; llc->llc_snap.org_code[1] = 0; llc->llc_snap.org_code[2] = 0; llc->llc_snap.ether_type = eh.ether_type; return m; } Index: stable/6/sys/dev/bfe/if_bfe.c =================================================================== --- stable/6/sys/dev/bfe/if_bfe.c (revision 149421) +++ stable/6/sys/dev/bfe/if_bfe.c (revision 149422) @@ -1,1586 +1,1588 @@ /*- * Copyright (c) 2003 Stuart Walsh * and Duncan Barclay * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS 'AS IS' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for DELAY */ #include #include #include #include #include #include #include "miidevs.h" #include #include #include MODULE_DEPEND(bfe, pci, 1, 1, 1); MODULE_DEPEND(bfe, ether, 1, 1, 1); MODULE_DEPEND(bfe, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #define BFE_DEVDESC_MAX 64 /* Maximum device description length */ static struct bfe_type bfe_devs[] = { { BCOM_VENDORID, BCOM_DEVICEID_BCM4401, "Broadcom BCM4401 Fast Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM4401B0, "Broadcom BCM4401-B0 Fast Ethernet" }, { 0, 0, NULL } }; static int bfe_probe (device_t); static int bfe_attach (device_t); static int bfe_detach (device_t); static void bfe_release_resources (struct bfe_softc *); static void bfe_intr (void *); static void bfe_start (struct ifnet *); static void bfe_start_locked (struct ifnet *); static int bfe_ioctl (struct ifnet *, u_long, caddr_t); static void bfe_init (void *); static void bfe_init_locked (void *); static void bfe_stop (struct bfe_softc *); static void bfe_watchdog (struct ifnet *); static void bfe_shutdown (device_t); static void bfe_tick (void *); static void bfe_txeof (struct bfe_softc *); static void bfe_rxeof (struct bfe_softc *); static void bfe_set_rx_mode (struct bfe_softc *); static int bfe_list_rx_init (struct bfe_softc *); static int bfe_list_newbuf (struct bfe_softc *, int, struct mbuf*); static void bfe_rx_ring_free (struct bfe_softc *); static void bfe_pci_setup (struct bfe_softc *, u_int32_t); static int bfe_ifmedia_upd (struct ifnet *); static void bfe_ifmedia_sts (struct ifnet *, struct ifmediareq *); static int bfe_miibus_readreg (device_t, int, int); static int bfe_miibus_writereg (device_t, int, int, int); static void bfe_miibus_statchg (device_t); static int bfe_wait_bit (struct bfe_softc *, u_int32_t, u_int32_t, u_long, const int); static void bfe_get_config (struct bfe_softc *sc); static void bfe_read_eeprom (struct bfe_softc *, u_int8_t *); static void bfe_stats_update (struct bfe_softc *); static void bfe_clear_stats (struct bfe_softc *); static int bfe_readphy (struct bfe_softc *, u_int32_t, u_int32_t*); static int bfe_writephy (struct bfe_softc *, u_int32_t, u_int32_t); static int bfe_resetphy (struct bfe_softc *); static int bfe_setupphy (struct bfe_softc *); static void bfe_chip_reset (struct bfe_softc *); static void bfe_chip_halt (struct bfe_softc *); static void bfe_core_reset (struct bfe_softc *); static void bfe_core_disable (struct bfe_softc *); static int bfe_dma_alloc (device_t); static void bfe_dma_map_desc (void *, bus_dma_segment_t *, int, int); static void bfe_dma_map (void *, bus_dma_segment_t *, int, int); static void bfe_cam_write (struct bfe_softc *, u_char *, int); static device_method_t bfe_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bfe_probe), DEVMETHOD(device_attach, bfe_attach), DEVMETHOD(device_detach, bfe_detach), DEVMETHOD(device_shutdown, bfe_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, bfe_miibus_readreg), DEVMETHOD(miibus_writereg, bfe_miibus_writereg), DEVMETHOD(miibus_statchg, bfe_miibus_statchg), { 0, 0 } }; static driver_t bfe_driver = { "bfe", bfe_methods, sizeof(struct bfe_softc) }; static devclass_t bfe_devclass; DRIVER_MODULE(bfe, pci, bfe_driver, bfe_devclass, 0, 0); DRIVER_MODULE(miibus, bfe, miibus_driver, miibus_devclass, 0, 0); /* * Probe for a Broadcom 4401 chip. */ static int bfe_probe(device_t dev) { struct bfe_type *t; struct bfe_softc *sc; t = bfe_devs; sc = device_get_softc(dev); bzero(sc, sizeof(struct bfe_softc)); sc->bfe_unit = device_get_unit(dev); sc->bfe_dev = dev; while(t->bfe_name != NULL) { if ((pci_get_vendor(dev) == t->bfe_vid) && (pci_get_device(dev) == t->bfe_did)) { device_set_desc_copy(dev, t->bfe_name); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } static int bfe_dma_alloc(device_t dev) { struct bfe_softc *sc; int error, i; sc = device_get_softc(dev); /* parent tag */ error = bus_dma_tag_create(NULL, /* parent */ PAGE_SIZE, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR_32BIT, /* highaddr */ NULL, NULL, /* filter, filterarg */ MAXBSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* num of segments */ BUS_SPACE_MAXSIZE_32BIT, /* max segment size */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->bfe_parent_tag); /* tag for TX ring */ error = bus_dma_tag_create(sc->bfe_parent_tag, BFE_TX_LIST_SIZE, BFE_TX_LIST_SIZE, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BFE_TX_LIST_SIZE, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->bfe_tx_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* tag for RX ring */ error = bus_dma_tag_create(sc->bfe_parent_tag, BFE_RX_LIST_SIZE, BFE_RX_LIST_SIZE, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BFE_RX_LIST_SIZE, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->bfe_rx_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* tag for mbufs */ error = bus_dma_tag_create(sc->bfe_parent_tag, ETHER_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->bfe_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* pre allocate dmamaps for RX list */ for (i = 0; i < BFE_RX_LIST_CNT; i++) { error = bus_dmamap_create(sc->bfe_tag, 0, &sc->bfe_rx_ring[i].bfe_map); if (error) { device_printf(dev, "cannot create DMA map for RX\n"); return (ENOMEM); } } /* pre allocate dmamaps for TX list */ for (i = 0; i < BFE_TX_LIST_CNT; i++) { error = bus_dmamap_create(sc->bfe_tag, 0, &sc->bfe_tx_ring[i].bfe_map); if (error) { device_printf(dev, "cannot create DMA map for TX\n"); return (ENOMEM); } } /* Alloc dma for rx ring */ error = bus_dmamem_alloc(sc->bfe_rx_tag, (void *)&sc->bfe_rx_list, BUS_DMA_NOWAIT, &sc->bfe_rx_map); if(error) return (ENOMEM); bzero(sc->bfe_rx_list, BFE_RX_LIST_SIZE); error = bus_dmamap_load(sc->bfe_rx_tag, sc->bfe_rx_map, sc->bfe_rx_list, sizeof(struct bfe_desc), bfe_dma_map, &sc->bfe_rx_dma, 0); if(error) return (ENOMEM); bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, BUS_DMASYNC_PREREAD); error = bus_dmamem_alloc(sc->bfe_tx_tag, (void *)&sc->bfe_tx_list, BUS_DMA_NOWAIT, &sc->bfe_tx_map); if (error) return (ENOMEM); error = bus_dmamap_load(sc->bfe_tx_tag, sc->bfe_tx_map, sc->bfe_tx_list, sizeof(struct bfe_desc), bfe_dma_map, &sc->bfe_tx_dma, 0); if(error) return (ENOMEM); bzero(sc->bfe_tx_list, BFE_TX_LIST_SIZE); bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map, BUS_DMASYNC_PREREAD); return (0); } static int bfe_attach(device_t dev) { struct ifnet *ifp = NULL; struct bfe_softc *sc; int unit, error = 0, rid; sc = device_get_softc(dev); mtx_init(&sc->bfe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); unit = device_get_unit(dev); sc->bfe_dev = dev; sc->bfe_unit = unit; /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = BFE_PCI_MEMLO; sc->bfe_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->bfe_res == NULL) { printf ("bfe%d: couldn't map memory\n", unit); error = ENXIO; goto fail; } sc->bfe_btag = rman_get_bustag(sc->bfe_res); sc->bfe_bhandle = rman_get_bushandle(sc->bfe_res); sc->bfe_vhandle = (vm_offset_t)rman_get_virtual(sc->bfe_res); /* Allocate interrupt */ rid = 0; sc->bfe_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->bfe_irq == NULL) { printf("bfe%d: couldn't map interrupt\n", unit); error = ENXIO; goto fail; } if (bfe_dma_alloc(dev)) { printf("bfe%d: failed to allocate DMA resources\n", sc->bfe_unit); bfe_release_resources(sc); error = ENXIO; goto fail; } /* Set up ifnet structure */ ifp = sc->bfe_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { printf("bfe%d: failed to if_alloc()\n", sc->bfe_unit); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = bfe_ioctl; ifp->if_start = bfe_start; ifp->if_watchdog = bfe_watchdog; ifp->if_init = bfe_init; ifp->if_mtu = ETHERMTU; ifp->if_baudrate = 100000000; IFQ_SET_MAXLEN(&ifp->if_snd, BFE_TX_QLEN); ifp->if_snd.ifq_drv_maxlen = BFE_TX_QLEN; IFQ_SET_READY(&ifp->if_snd); bfe_get_config(sc); /* Reset the chip and turn on the PHY */ BFE_LOCK(sc); bfe_chip_reset(sc); BFE_UNLOCK(sc); if (mii_phy_probe(dev, &sc->bfe_miibus, bfe_ifmedia_upd, bfe_ifmedia_sts)) { printf("bfe%d: MII without any PHY!\n", sc->bfe_unit); error = ENXIO; goto fail; } ether_ifattach(ifp, sc->bfe_enaddr); callout_handle_init(&sc->bfe_stat_ch); /* * Tell the upper layer(s) we support long frames. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable |= IFCAP_VLAN_MTU; /* * Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->bfe_irq, INTR_TYPE_NET | INTR_MPSAFE, bfe_intr, sc, &sc->bfe_intrhand); if (error) { bfe_release_resources(sc); printf("bfe%d: couldn't set up irq\n", unit); goto fail; } fail: if(error) { bfe_release_resources(sc); if (ifp != NULL) if_free(ifp); } return (error); } static int bfe_detach(device_t dev) { struct bfe_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->bfe_mtx), ("bfe mutex not initialized")); BFE_LOCK(sc); ifp = sc->bfe_ifp; if (device_is_attached(dev)) { bfe_stop(sc); ether_ifdetach(ifp); if_free(ifp); } bfe_chip_reset(sc); bus_generic_detach(dev); if(sc->bfe_miibus != NULL) device_delete_child(dev, sc->bfe_miibus); bfe_release_resources(sc); BFE_UNLOCK(sc); mtx_destroy(&sc->bfe_mtx); return (0); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void bfe_shutdown(device_t dev) { struct bfe_softc *sc; sc = device_get_softc(dev); BFE_LOCK(sc); bfe_stop(sc); BFE_UNLOCK(sc); return; } static int bfe_miibus_readreg(device_t dev, int phy, int reg) { struct bfe_softc *sc; u_int32_t ret; sc = device_get_softc(dev); if(phy != sc->bfe_phyaddr) return (0); bfe_readphy(sc, reg, &ret); return (ret); } static int bfe_miibus_writereg(device_t dev, int phy, int reg, int val) { struct bfe_softc *sc; sc = device_get_softc(dev); if(phy != sc->bfe_phyaddr) return (0); bfe_writephy(sc, reg, val); return (0); } static void bfe_miibus_statchg(device_t dev) { return; } static void bfe_tx_ring_free(struct bfe_softc *sc) { int i; for(i = 0; i < BFE_TX_LIST_CNT; i++) { if(sc->bfe_tx_ring[i].bfe_mbuf != NULL) { m_freem(sc->bfe_tx_ring[i].bfe_mbuf); sc->bfe_tx_ring[i].bfe_mbuf = NULL; bus_dmamap_unload(sc->bfe_tag, sc->bfe_tx_ring[i].bfe_map); } } bzero(sc->bfe_tx_list, BFE_TX_LIST_SIZE); bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map, BUS_DMASYNC_PREREAD); } static void bfe_rx_ring_free(struct bfe_softc *sc) { int i; for (i = 0; i < BFE_RX_LIST_CNT; i++) { if (sc->bfe_rx_ring[i].bfe_mbuf != NULL) { m_freem(sc->bfe_rx_ring[i].bfe_mbuf); sc->bfe_rx_ring[i].bfe_mbuf = NULL; bus_dmamap_unload(sc->bfe_tag, sc->bfe_rx_ring[i].bfe_map); } } bzero(sc->bfe_rx_list, BFE_RX_LIST_SIZE); bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, BUS_DMASYNC_PREREAD); } static int bfe_list_rx_init(struct bfe_softc *sc) { int i; for(i = 0; i < BFE_RX_LIST_CNT; i++) { if(bfe_list_newbuf(sc, i, NULL) == ENOBUFS) return (ENOBUFS); } bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, BUS_DMASYNC_PREREAD); CSR_WRITE_4(sc, BFE_DMARX_PTR, (i * sizeof(struct bfe_desc))); sc->bfe_rx_cons = 0; return (0); } static int bfe_list_newbuf(struct bfe_softc *sc, int c, struct mbuf *m) { struct bfe_rxheader *rx_header; struct bfe_desc *d; struct bfe_data *r; u_int32_t ctrl; if ((c < 0) || (c >= BFE_RX_LIST_CNT)) return (EINVAL); if(m == NULL) { m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if(m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; } else m->m_data = m->m_ext.ext_buf; rx_header = mtod(m, struct bfe_rxheader *); rx_header->len = 0; rx_header->flags = 0; /* Map the mbuf into DMA */ sc->bfe_rx_cnt = c; d = &sc->bfe_rx_list[c]; r = &sc->bfe_rx_ring[c]; bus_dmamap_load(sc->bfe_tag, r->bfe_map, mtod(m, void *), MCLBYTES, bfe_dma_map_desc, d, 0); bus_dmamap_sync(sc->bfe_tag, r->bfe_map, BUS_DMASYNC_PREREAD); ctrl = ETHER_MAX_LEN + 32; if(c == BFE_RX_LIST_CNT - 1) ctrl |= BFE_DESC_EOT; d->bfe_ctrl = ctrl; r->bfe_mbuf = m; bus_dmamap_sync(sc->bfe_rx_tag, sc->bfe_rx_map, BUS_DMASYNC_PREREAD); return (0); } static void bfe_get_config(struct bfe_softc *sc) { u_int8_t eeprom[128]; bfe_read_eeprom(sc, eeprom); sc->bfe_enaddr[0] = eeprom[79]; sc->bfe_enaddr[1] = eeprom[78]; sc->bfe_enaddr[2] = eeprom[81]; sc->bfe_enaddr[3] = eeprom[80]; sc->bfe_enaddr[4] = eeprom[83]; sc->bfe_enaddr[5] = eeprom[82]; sc->bfe_phyaddr = eeprom[90] & 0x1f; sc->bfe_mdc_port = (eeprom[90] >> 14) & 0x1; sc->bfe_core_unit = 0; sc->bfe_dma_offset = BFE_PCI_DMA; } static void bfe_pci_setup(struct bfe_softc *sc, u_int32_t cores) { u_int32_t bar_orig, pci_rev, val; bar_orig = pci_read_config(sc->bfe_dev, BFE_BAR0_WIN, 4); pci_write_config(sc->bfe_dev, BFE_BAR0_WIN, BFE_REG_PCI, 4); pci_rev = CSR_READ_4(sc, BFE_SBIDHIGH) & BFE_RC_MASK; val = CSR_READ_4(sc, BFE_SBINTVEC); val |= cores; CSR_WRITE_4(sc, BFE_SBINTVEC, val); val = CSR_READ_4(sc, BFE_SSB_PCI_TRANS_2); val |= BFE_SSB_PCI_PREF | BFE_SSB_PCI_BURST; CSR_WRITE_4(sc, BFE_SSB_PCI_TRANS_2, val); pci_write_config(sc->bfe_dev, BFE_BAR0_WIN, bar_orig, 4); } static void bfe_clear_stats(struct bfe_softc *sc) { u_long reg; BFE_LOCK_ASSERT(sc); CSR_WRITE_4(sc, BFE_MIB_CTRL, BFE_MIB_CLR_ON_READ); for (reg = BFE_TX_GOOD_O; reg <= BFE_TX_PAUSE; reg += 4) CSR_READ_4(sc, reg); for (reg = BFE_RX_GOOD_O; reg <= BFE_RX_NPAUSE; reg += 4) CSR_READ_4(sc, reg); } static int bfe_resetphy(struct bfe_softc *sc) { u_int32_t val; bfe_writephy(sc, 0, BMCR_RESET); DELAY(100); bfe_readphy(sc, 0, &val); if (val & BMCR_RESET) { printf("bfe%d: PHY Reset would not complete.\n", sc->bfe_unit); return (ENXIO); } return (0); } static void bfe_chip_halt(struct bfe_softc *sc) { BFE_LOCK_ASSERT(sc); /* disable interrupts - not that it actually does..*/ CSR_WRITE_4(sc, BFE_IMASK, 0); CSR_READ_4(sc, BFE_IMASK); CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE); bfe_wait_bit(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE, 200, 1); CSR_WRITE_4(sc, BFE_DMARX_CTRL, 0); CSR_WRITE_4(sc, BFE_DMATX_CTRL, 0); DELAY(10); } static void bfe_chip_reset(struct bfe_softc *sc) { u_int32_t val; BFE_LOCK_ASSERT(sc); /* Set the interrupt vector for the enet core */ bfe_pci_setup(sc, BFE_INTVEC_ENET0); /* is core up? */ val = CSR_READ_4(sc, BFE_SBTMSLOW) & (BFE_RESET | BFE_REJECT | BFE_CLOCK); if (val == BFE_CLOCK) { /* It is, so shut it down */ CSR_WRITE_4(sc, BFE_RCV_LAZY, 0); CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE); bfe_wait_bit(sc, BFE_ENET_CTRL, BFE_ENET_DISABLE, 100, 1); CSR_WRITE_4(sc, BFE_DMATX_CTRL, 0); sc->bfe_tx_cnt = sc->bfe_tx_prod = sc->bfe_tx_cons = 0; if (CSR_READ_4(sc, BFE_DMARX_STAT) & BFE_STAT_EMASK) bfe_wait_bit(sc, BFE_DMARX_STAT, BFE_STAT_SIDLE, 100, 0); CSR_WRITE_4(sc, BFE_DMARX_CTRL, 0); sc->bfe_rx_prod = sc->bfe_rx_cons = 0; } bfe_core_reset(sc); bfe_clear_stats(sc); /* * We want the phy registers to be accessible even when * the driver is "downed" so initialize MDC preamble, frequency, * and whether internal or external phy here. */ /* 4402 has 62.5Mhz SB clock and internal phy */ CSR_WRITE_4(sc, BFE_MDIO_CTRL, 0x8d); /* Internal or external PHY? */ val = CSR_READ_4(sc, BFE_DEVCTRL); if(!(val & BFE_IPP)) CSR_WRITE_4(sc, BFE_ENET_CTRL, BFE_ENET_EPSEL); else if(CSR_READ_4(sc, BFE_DEVCTRL) & BFE_EPR) { BFE_AND(sc, BFE_DEVCTRL, ~BFE_EPR); DELAY(100); } /* Enable CRC32 generation and set proper LED modes */ BFE_OR(sc, BFE_MAC_CTRL, BFE_CTRL_CRC32_ENAB | BFE_CTRL_LED); /* Reset or clear powerdown control bit */ BFE_AND(sc, BFE_MAC_CTRL, ~BFE_CTRL_PDOWN); CSR_WRITE_4(sc, BFE_RCV_LAZY, ((1 << BFE_LAZY_FC_SHIFT) & BFE_LAZY_FC_MASK)); /* * We don't want lazy interrupts, so just send them at * the end of a frame, please */ BFE_OR(sc, BFE_RCV_LAZY, 0); /* Set max lengths, accounting for VLAN tags */ CSR_WRITE_4(sc, BFE_RXMAXLEN, ETHER_MAX_LEN+32); CSR_WRITE_4(sc, BFE_TXMAXLEN, ETHER_MAX_LEN+32); /* Set watermark XXX - magic */ CSR_WRITE_4(sc, BFE_TX_WMARK, 56); /* * Initialise DMA channels * - not forgetting dma addresses need to be added to BFE_PCI_DMA */ CSR_WRITE_4(sc, BFE_DMATX_CTRL, BFE_TX_CTRL_ENABLE); CSR_WRITE_4(sc, BFE_DMATX_ADDR, sc->bfe_tx_dma + BFE_PCI_DMA); CSR_WRITE_4(sc, BFE_DMARX_CTRL, (BFE_RX_OFFSET << BFE_RX_CTRL_ROSHIFT) | BFE_RX_CTRL_ENABLE); CSR_WRITE_4(sc, BFE_DMARX_ADDR, sc->bfe_rx_dma + BFE_PCI_DMA); bfe_resetphy(sc); bfe_setupphy(sc); } static void bfe_core_disable(struct bfe_softc *sc) { if((CSR_READ_4(sc, BFE_SBTMSLOW)) & BFE_RESET) return; /* * Set reject, wait for it set, then wait for the core to stop * being busy, then set reset and reject and enable the clocks. */ CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_REJECT | BFE_CLOCK)); bfe_wait_bit(sc, BFE_SBTMSLOW, BFE_REJECT, 1000, 0); bfe_wait_bit(sc, BFE_SBTMSHIGH, BFE_BUSY, 1000, 1); CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_FGC | BFE_CLOCK | BFE_REJECT | BFE_RESET)); CSR_READ_4(sc, BFE_SBTMSLOW); DELAY(10); /* Leave reset and reject set */ CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_REJECT | BFE_RESET)); DELAY(10); } static void bfe_core_reset(struct bfe_softc *sc) { u_int32_t val; /* Disable the core */ bfe_core_disable(sc); /* and bring it back up */ CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_RESET | BFE_CLOCK | BFE_FGC)); CSR_READ_4(sc, BFE_SBTMSLOW); DELAY(10); /* Chip bug, clear SERR, IB and TO if they are set. */ if (CSR_READ_4(sc, BFE_SBTMSHIGH) & BFE_SERR) CSR_WRITE_4(sc, BFE_SBTMSHIGH, 0); val = CSR_READ_4(sc, BFE_SBIMSTATE); if (val & (BFE_IBE | BFE_TO)) CSR_WRITE_4(sc, BFE_SBIMSTATE, val & ~(BFE_IBE | BFE_TO)); /* Clear reset and allow it to move through the core */ CSR_WRITE_4(sc, BFE_SBTMSLOW, (BFE_CLOCK | BFE_FGC)); CSR_READ_4(sc, BFE_SBTMSLOW); DELAY(10); /* Leave the clock set */ CSR_WRITE_4(sc, BFE_SBTMSLOW, BFE_CLOCK); CSR_READ_4(sc, BFE_SBTMSLOW); DELAY(10); } static void bfe_cam_write(struct bfe_softc *sc, u_char *data, int index) { u_int32_t val; val = ((u_int32_t) data[2]) << 24; val |= ((u_int32_t) data[3]) << 16; val |= ((u_int32_t) data[4]) << 8; val |= ((u_int32_t) data[5]); CSR_WRITE_4(sc, BFE_CAM_DATA_LO, val); val = (BFE_CAM_HI_VALID | (((u_int32_t) data[0]) << 8) | (((u_int32_t) data[1]))); CSR_WRITE_4(sc, BFE_CAM_DATA_HI, val); CSR_WRITE_4(sc, BFE_CAM_CTRL, (BFE_CAM_WRITE | ((u_int32_t) index << BFE_CAM_INDEX_SHIFT))); bfe_wait_bit(sc, BFE_CAM_CTRL, BFE_CAM_BUSY, 10000, 1); } static void bfe_set_rx_mode(struct bfe_softc *sc) { struct ifnet *ifp = sc->bfe_ifp; struct ifmultiaddr *ifma; u_int32_t val; int i = 0; val = CSR_READ_4(sc, BFE_RXCONF); if (ifp->if_flags & IFF_PROMISC) val |= BFE_RXCONF_PROMISC; else val &= ~BFE_RXCONF_PROMISC; if (ifp->if_flags & IFF_BROADCAST) val &= ~BFE_RXCONF_DBCAST; else val |= BFE_RXCONF_DBCAST; CSR_WRITE_4(sc, BFE_CAM_CTRL, 0); bfe_cam_write(sc, IFP2ENADDR(sc->bfe_ifp), i++); if (ifp->if_flags & IFF_ALLMULTI) val |= BFE_RXCONF_ALLMULTI; else { val &= ~BFE_RXCONF_ALLMULTI; + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; bfe_cam_write(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i++); } + IF_ADDR_UNLOCK(ifp); } CSR_WRITE_4(sc, BFE_RXCONF, val); BFE_OR(sc, BFE_CAM_CTRL, BFE_CAM_ENABLE); } static void bfe_dma_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) { u_int32_t *ptr; ptr = arg; *ptr = segs->ds_addr; } static void bfe_dma_map_desc(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct bfe_desc *d; d = arg; /* The chip needs all addresses to be added to BFE_PCI_DMA */ d->bfe_addr = segs->ds_addr + BFE_PCI_DMA; } static void bfe_release_resources(struct bfe_softc *sc) { device_t dev; int i; dev = sc->bfe_dev; if (sc->bfe_vpd_prodname != NULL) free(sc->bfe_vpd_prodname, M_DEVBUF); if (sc->bfe_vpd_readonly != NULL) free(sc->bfe_vpd_readonly, M_DEVBUF); if (sc->bfe_intrhand != NULL) bus_teardown_intr(dev, sc->bfe_irq, sc->bfe_intrhand); if (sc->bfe_irq != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bfe_irq); if (sc->bfe_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, 0x10, sc->bfe_res); if(sc->bfe_tx_tag != NULL) { bus_dmamap_unload(sc->bfe_tx_tag, sc->bfe_tx_map); bus_dmamem_free(sc->bfe_tx_tag, sc->bfe_tx_list, sc->bfe_tx_map); bus_dma_tag_destroy(sc->bfe_tx_tag); sc->bfe_tx_tag = NULL; } if(sc->bfe_rx_tag != NULL) { bus_dmamap_unload(sc->bfe_rx_tag, sc->bfe_rx_map); bus_dmamem_free(sc->bfe_rx_tag, sc->bfe_rx_list, sc->bfe_rx_map); bus_dma_tag_destroy(sc->bfe_rx_tag); sc->bfe_rx_tag = NULL; } if(sc->bfe_tag != NULL) { for(i = 0; i < BFE_TX_LIST_CNT; i++) { bus_dmamap_destroy(sc->bfe_tag, sc->bfe_tx_ring[i].bfe_map); } for(i = 0; i < BFE_RX_LIST_CNT; i++) { bus_dmamap_destroy(sc->bfe_tag, sc->bfe_rx_ring[i].bfe_map); } bus_dma_tag_destroy(sc->bfe_tag); sc->bfe_tag = NULL; } if(sc->bfe_parent_tag != NULL) bus_dma_tag_destroy(sc->bfe_parent_tag); return; } static void bfe_read_eeprom(struct bfe_softc *sc, u_int8_t *data) { long i; u_int16_t *ptr = (u_int16_t *)data; for(i = 0; i < 128; i += 2) ptr[i/2] = CSR_READ_4(sc, 4096 + i); } static int bfe_wait_bit(struct bfe_softc *sc, u_int32_t reg, u_int32_t bit, u_long timeout, const int clear) { u_long i; for (i = 0; i < timeout; i++) { u_int32_t val = CSR_READ_4(sc, reg); if (clear && !(val & bit)) break; if (!clear && (val & bit)) break; DELAY(10); } if (i == timeout) { printf("bfe%d: BUG! Timeout waiting for bit %08x of register " "%x to %s.\n", sc->bfe_unit, bit, reg, (clear ? "clear" : "set")); return (-1); } return (0); } static int bfe_readphy(struct bfe_softc *sc, u_int32_t reg, u_int32_t *val) { int err; /* Clear MII ISR */ CSR_WRITE_4(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII); CSR_WRITE_4(sc, BFE_MDIO_DATA, (BFE_MDIO_SB_START | (BFE_MDIO_OP_READ << BFE_MDIO_OP_SHIFT) | (sc->bfe_phyaddr << BFE_MDIO_PMD_SHIFT) | (reg << BFE_MDIO_RA_SHIFT) | (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT))); err = bfe_wait_bit(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 100, 0); *val = CSR_READ_4(sc, BFE_MDIO_DATA) & BFE_MDIO_DATA_DATA; return (err); } static int bfe_writephy(struct bfe_softc *sc, u_int32_t reg, u_int32_t val) { int status; CSR_WRITE_4(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII); CSR_WRITE_4(sc, BFE_MDIO_DATA, (BFE_MDIO_SB_START | (BFE_MDIO_OP_WRITE << BFE_MDIO_OP_SHIFT) | (sc->bfe_phyaddr << BFE_MDIO_PMD_SHIFT) | (reg << BFE_MDIO_RA_SHIFT) | (BFE_MDIO_TA_VALID << BFE_MDIO_TA_SHIFT) | (val & BFE_MDIO_DATA_DATA))); status = bfe_wait_bit(sc, BFE_EMAC_ISTAT, BFE_EMAC_INT_MII, 100, 0); return (status); } /* * XXX - I think this is handled by the PHY driver, but it can't hurt to do it * twice */ static int bfe_setupphy(struct bfe_softc *sc) { u_int32_t val; /* Enable activity LED */ bfe_readphy(sc, 26, &val); bfe_writephy(sc, 26, val & 0x7fff); bfe_readphy(sc, 26, &val); /* Enable traffic meter LED mode */ bfe_readphy(sc, 27, &val); bfe_writephy(sc, 27, val | (1 << 6)); return (0); } static void bfe_stats_update(struct bfe_softc *sc) { u_long reg; u_int32_t *val; val = &sc->bfe_hwstats.tx_good_octets; for (reg = BFE_TX_GOOD_O; reg <= BFE_TX_PAUSE; reg += 4) { *val++ += CSR_READ_4(sc, reg); } val = &sc->bfe_hwstats.rx_good_octets; for (reg = BFE_RX_GOOD_O; reg <= BFE_RX_NPAUSE; reg += 4) { *val++ += CSR_READ_4(sc, reg); } } static void bfe_txeof(struct bfe_softc *sc) { struct ifnet *ifp; int i, chipidx; BFE_LOCK_ASSERT(sc); ifp = sc->bfe_ifp; chipidx = CSR_READ_4(sc, BFE_DMATX_STAT) & BFE_STAT_CDMASK; chipidx /= sizeof(struct bfe_desc); i = sc->bfe_tx_cons; /* Go through the mbufs and free those that have been transmitted */ while(i != chipidx) { struct bfe_data *r = &sc->bfe_tx_ring[i]; if(r->bfe_mbuf != NULL) { ifp->if_opackets++; m_freem(r->bfe_mbuf); r->bfe_mbuf = NULL; bus_dmamap_unload(sc->bfe_tag, r->bfe_map); } sc->bfe_tx_cnt--; BFE_INC(i, BFE_TX_LIST_CNT); } if(i != sc->bfe_tx_cons) { /* we freed up some mbufs */ sc->bfe_tx_cons = i; ifp->if_flags &= ~IFF_OACTIVE; } if(sc->bfe_tx_cnt == 0) ifp->if_timer = 0; else ifp->if_timer = 5; } /* Pass a received packet up the stack */ static void bfe_rxeof(struct bfe_softc *sc) { struct mbuf *m; struct ifnet *ifp; struct bfe_rxheader *rxheader; struct bfe_data *r; int cons; u_int32_t status, current, len, flags; BFE_LOCK_ASSERT(sc); cons = sc->bfe_rx_cons; status = CSR_READ_4(sc, BFE_DMARX_STAT); current = (status & BFE_STAT_CDMASK) / sizeof(struct bfe_desc); ifp = sc->bfe_ifp; while(current != cons) { r = &sc->bfe_rx_ring[cons]; m = r->bfe_mbuf; rxheader = mtod(m, struct bfe_rxheader*); bus_dmamap_sync(sc->bfe_tag, r->bfe_map, BUS_DMASYNC_POSTWRITE); len = rxheader->len; r->bfe_mbuf = NULL; bus_dmamap_unload(sc->bfe_tag, r->bfe_map); flags = rxheader->flags; len -= ETHER_CRC_LEN; /* flag an error and try again */ if ((len > ETHER_MAX_LEN+32) || (flags & BFE_RX_FLAG_ERRORS)) { ifp->if_ierrors++; if (flags & BFE_RX_FLAG_SERR) ifp->if_collisions++; bfe_list_newbuf(sc, cons, m); BFE_INC(cons, BFE_RX_LIST_CNT); continue; } /* Go past the rx header */ if (bfe_list_newbuf(sc, cons, NULL) == 0) { m_adj(m, BFE_RX_OFFSET); m->m_len = m->m_pkthdr.len = len; } else { bfe_list_newbuf(sc, cons, m); ifp->if_ierrors++; BFE_INC(cons, BFE_RX_LIST_CNT); continue; } ifp->if_ipackets++; m->m_pkthdr.rcvif = ifp; BFE_UNLOCK(sc); (*ifp->if_input)(ifp, m); BFE_LOCK(sc); BFE_INC(cons, BFE_RX_LIST_CNT); } sc->bfe_rx_cons = cons; } static void bfe_intr(void *xsc) { struct bfe_softc *sc = xsc; struct ifnet *ifp; u_int32_t istat, imask, flag; ifp = sc->bfe_ifp; BFE_LOCK(sc); istat = CSR_READ_4(sc, BFE_ISTAT); imask = CSR_READ_4(sc, BFE_IMASK); /* * Defer unsolicited interrupts - This is necessary because setting the * chips interrupt mask register to 0 doesn't actually stop the * interrupts */ istat &= imask; CSR_WRITE_4(sc, BFE_ISTAT, istat); CSR_READ_4(sc, BFE_ISTAT); /* not expecting this interrupt, disregard it */ if(istat == 0) { BFE_UNLOCK(sc); return; } if(istat & BFE_ISTAT_ERRORS) { flag = CSR_READ_4(sc, BFE_DMATX_STAT); if(flag & BFE_STAT_EMASK) ifp->if_oerrors++; flag = CSR_READ_4(sc, BFE_DMARX_STAT); if(flag & BFE_RX_FLAG_ERRORS) ifp->if_ierrors++; ifp->if_flags &= ~IFF_RUNNING; bfe_init_locked(sc); } /* A packet was received */ if(istat & BFE_ISTAT_RX) bfe_rxeof(sc); /* A packet was sent */ if(istat & BFE_ISTAT_TX) bfe_txeof(sc); /* We have packets pending, fire them out */ if (ifp->if_flags & IFF_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) bfe_start_locked(ifp); BFE_UNLOCK(sc); } static int bfe_encap(struct bfe_softc *sc, struct mbuf *m_head, u_int32_t *txidx) { struct bfe_desc *d = NULL; struct bfe_data *r = NULL; struct mbuf *m; u_int32_t frag, cur, cnt = 0; int chainlen = 0; if(BFE_TX_LIST_CNT - sc->bfe_tx_cnt < 2) return (ENOBUFS); /* * Count the number of frags in this chain to see if * we need to m_defrag. Since the descriptor list is shared * by all packets, we'll m_defrag long chains so that they * do not use up the entire list, even if they would fit. */ for(m = m_head; m != NULL; m = m->m_next) chainlen++; if ((chainlen > BFE_TX_LIST_CNT / 4) || ((BFE_TX_LIST_CNT - (chainlen + sc->bfe_tx_cnt)) < 2)) { m = m_defrag(m_head, M_DONTWAIT); if (m == NULL) return (ENOBUFS); m_head = m; } /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ m = m_head; cur = frag = *txidx; cnt = 0; for(m = m_head; m != NULL; m = m->m_next) { if(m->m_len != 0) { if((BFE_TX_LIST_CNT - (sc->bfe_tx_cnt + cnt)) < 2) return (ENOBUFS); d = &sc->bfe_tx_list[cur]; r = &sc->bfe_tx_ring[cur]; d->bfe_ctrl = BFE_DESC_LEN & m->m_len; /* always intterupt on completion */ d->bfe_ctrl |= BFE_DESC_IOC; if(cnt == 0) /* Set start of frame */ d->bfe_ctrl |= BFE_DESC_SOF; if(cur == BFE_TX_LIST_CNT - 1) /* * Tell the chip to wrap to the start of * the descriptor list */ d->bfe_ctrl |= BFE_DESC_EOT; bus_dmamap_load(sc->bfe_tag, r->bfe_map, mtod(m, void*), m->m_len, bfe_dma_map_desc, d, 0); bus_dmamap_sync(sc->bfe_tag, r->bfe_map, BUS_DMASYNC_PREREAD); frag = cur; BFE_INC(cur, BFE_TX_LIST_CNT); cnt++; } } if (m != NULL) return (ENOBUFS); sc->bfe_tx_list[frag].bfe_ctrl |= BFE_DESC_EOF; sc->bfe_tx_ring[frag].bfe_mbuf = m_head; bus_dmamap_sync(sc->bfe_tx_tag, sc->bfe_tx_map, BUS_DMASYNC_PREREAD); *txidx = cur; sc->bfe_tx_cnt += cnt; return (0); } /* * Set up to transmit a packet. */ static void bfe_start(struct ifnet *ifp) { BFE_LOCK((struct bfe_softc *)ifp->if_softc); bfe_start_locked(ifp); BFE_UNLOCK((struct bfe_softc *)ifp->if_softc); } /* * Set up to transmit a packet. The softc is already locked. */ static void bfe_start_locked(struct ifnet *ifp) { struct bfe_softc *sc; struct mbuf *m_head = NULL; int idx, queued = 0; sc = ifp->if_softc; idx = sc->bfe_tx_prod; BFE_LOCK_ASSERT(sc); /* * Not much point trying to send if the link is down * or we have nothing to send. */ if (!sc->bfe_link && ifp->if_snd.ifq_len < 10) return; if (ifp->if_flags & IFF_OACTIVE) return; while(sc->bfe_tx_ring[idx].bfe_mbuf == NULL) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if(m_head == NULL) break; /* * Pack the data into the tx ring. If we dont have * enough room, let the chip drain the ring. */ if(bfe_encap(sc, m_head, &idx)) { IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } queued++; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); } if (queued) { sc->bfe_tx_prod = idx; /* Transmit - twice due to apparent hardware bug */ CSR_WRITE_4(sc, BFE_DMATX_PTR, idx * sizeof(struct bfe_desc)); CSR_WRITE_4(sc, BFE_DMATX_PTR, idx * sizeof(struct bfe_desc)); /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; } } static void bfe_init(void *xsc) { BFE_LOCK((struct bfe_softc *)xsc); bfe_init_locked(xsc); BFE_UNLOCK((struct bfe_softc *)xsc); } static void bfe_init_locked(void *xsc) { struct bfe_softc *sc = (struct bfe_softc*)xsc; struct ifnet *ifp = sc->bfe_ifp; BFE_LOCK_ASSERT(sc); if (ifp->if_flags & IFF_RUNNING) return; bfe_stop(sc); bfe_chip_reset(sc); if (bfe_list_rx_init(sc) == ENOBUFS) { printf("bfe%d: bfe_init: Not enough memory for list buffers\n", sc->bfe_unit); bfe_stop(sc); return; } bfe_set_rx_mode(sc); /* Enable the chip and core */ BFE_OR(sc, BFE_ENET_CTRL, BFE_ENET_ENABLE); /* Enable interrupts */ CSR_WRITE_4(sc, BFE_IMASK, BFE_IMASK_DEF); bfe_ifmedia_upd(ifp); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->bfe_stat_ch = timeout(bfe_tick, sc, hz); } /* * Set media options. */ static int bfe_ifmedia_upd(struct ifnet *ifp) { struct bfe_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->bfe_miibus); sc->bfe_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; miisc = LIST_NEXT(miisc, mii_list)) mii_phy_reset(miisc); } mii_mediachg(mii); return (0); } /* * Report current media status. */ static void bfe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct bfe_softc *sc = ifp->if_softc; struct mii_data *mii; mii = device_get_softc(sc->bfe_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static int bfe_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct bfe_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; switch(command) { case SIOCSIFFLAGS: BFE_LOCK(sc); if(ifp->if_flags & IFF_UP) if(ifp->if_flags & IFF_RUNNING) bfe_set_rx_mode(sc); else bfe_init_locked(sc); else if(ifp->if_flags & IFF_RUNNING) bfe_stop(sc); BFE_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: BFE_LOCK(sc); if(ifp->if_flags & IFF_RUNNING) bfe_set_rx_mode(sc); BFE_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->bfe_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void bfe_watchdog(struct ifnet *ifp) { struct bfe_softc *sc; sc = ifp->if_softc; BFE_LOCK(sc); printf("bfe%d: watchdog timeout -- resetting\n", sc->bfe_unit); ifp->if_flags &= ~IFF_RUNNING; bfe_init_locked(sc); ifp->if_oerrors++; BFE_UNLOCK(sc); } static void bfe_tick(void *xsc) { struct bfe_softc *sc = xsc; struct mii_data *mii; if (sc == NULL) return; BFE_LOCK(sc); mii = device_get_softc(sc->bfe_miibus); bfe_stats_update(sc); sc->bfe_stat_ch = timeout(bfe_tick, sc, hz); if(sc->bfe_link) { BFE_UNLOCK(sc); return; } mii_tick(mii); if (!sc->bfe_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) sc->bfe_link++; BFE_UNLOCK(sc); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void bfe_stop(struct bfe_softc *sc) { struct ifnet *ifp; BFE_LOCK_ASSERT(sc); untimeout(bfe_tick, sc, sc->bfe_stat_ch); ifp = sc->bfe_ifp; bfe_chip_halt(sc); bfe_tx_ring_free(sc); bfe_rx_ring_free(sc); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); } Index: stable/6/sys/dev/bge/if_bge.c =================================================================== --- stable/6/sys/dev/bge/if_bge.c (revision 149421) +++ stable/6/sys/dev/bge/if_bge.c (revision 149422) @@ -1,3784 +1,3786 @@ /*- * Copyright (c) 2001 Wind River Systems * Copyright (c) 1997, 1998, 1999, 2001 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Broadcom BCM570x family gigabit ethernet driver for FreeBSD. * * The Broadcom BCM5700 is based on technology originally developed by * Alteon Networks as part of the Tigon I and Tigon II gigabit ethernet * MAC chips. The BCM5700, sometimes refered to as the Tigon III, has * two on-board MIPS R4000 CPUs and can have as much as 16MB of external * SSRAM. The BCM5700 supports TCP, UDP and IP checksum offload, jumbo * frames, highly configurable RX filtering, and 16 RX and TX queues * (which, along with RX filter rules, can be used for QOS applications). * Other features, such as TCP segmentation, may be available as part * of value-added firmware updates. Unlike the Tigon I and Tigon II, * firmware images can be stored in hardware and need not be compiled * into the driver. * * The BCM5700 supports the PCI v2.2 and PCI-X v1.0 standards, and will * function in a 32-bit/64-bit 33/66Mhz bus, or a 64-bit/133Mhz bus. * * The BCM5701 is a single-chip solution incorporating both the BCM5700 * MAC and a BCM5401 10/100/1000 PHY. Unlike the BCM5700, the BCM5701 * does not support external SSRAM. * * Broadcom also produces a variation of the BCM5700 under the "Altima" * brand name, which is functionally similar but lacks PCI-X support. * * Without external SSRAM, you can only have at most 4 TX rings, * and the use of the mini RX ring is disabled. This seems to imply * that these features are simply not available on the BCM5701. As a * result, this driver does not implement any support for the mini RX * ring. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for DELAY */ #include #include #include #include #include #include #include "miidevs.h" #include #include #include #include #include "opt_bge.h" #define BGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) MODULE_DEPEND(bge, pci, 1, 1, 1); MODULE_DEPEND(bge, ether, 1, 1, 1); MODULE_DEPEND(bge, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Various supported device vendors/types and their names. Note: the * spec seems to indicate that the hardware still has Alteon's vendor * ID burned into it, though it will always be overriden by the vendor * ID in the EEPROM. Just to be safe, we cover all possibilities. */ #define BGE_DEVDESC_MAX 64 /* Maximum device description length */ static struct bge_type bge_devs[] = { { ALT_VENDORID, ALT_DEVICEID_BCM5700, "Broadcom BCM5700 Gigabit Ethernet" }, { ALT_VENDORID, ALT_DEVICEID_BCM5701, "Broadcom BCM5701 Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5700, "Broadcom BCM5700 Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5701, "Broadcom BCM5701 Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5702, "Broadcom BCM5702 Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5702X, "Broadcom BCM5702X Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5703, "Broadcom BCM5703 Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5703X, "Broadcom BCM5703X Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5704C, "Broadcom BCM5704C Dual Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5704S, "Broadcom BCM5704S Dual Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705, "Broadcom BCM5705 Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705K, "Broadcom BCM5705K Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M, "Broadcom BCM5705M Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5705M_ALT, "Broadcom BCM5705M Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5714C, "Broadcom BCM5714C Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5721, "Broadcom BCM5721 Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5750, "Broadcom BCM5750 Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5750M, "Broadcom BCM5750M Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5751, "Broadcom BCM5751 Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5751M, "Broadcom BCM5751M Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5782, "Broadcom BCM5782 Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5788, "Broadcom BCM5788 Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5789, "Broadcom BCM5789 Gigabit Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5901, "Broadcom BCM5901 Fast Ethernet" }, { BCOM_VENDORID, BCOM_DEVICEID_BCM5901A2, "Broadcom BCM5901A2 Fast Ethernet" }, { SK_VENDORID, SK_DEVICEID_ALTIMA, "SysKonnect Gigabit Ethernet" }, { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1000, "Altima AC1000 Gigabit Ethernet" }, { ALTIMA_VENDORID, ALTIMA_DEVICE_AC1002, "Altima AC1002 Gigabit Ethernet" }, { ALTIMA_VENDORID, ALTIMA_DEVICE_AC9100, "Altima AC9100 Gigabit Ethernet" }, { 0, 0, NULL } }; static int bge_probe (device_t); static int bge_attach (device_t); static int bge_detach (device_t); static void bge_release_resources (struct bge_softc *); static void bge_dma_map_addr (void *, bus_dma_segment_t *, int, int); static void bge_dma_map_tx_desc (void *, bus_dma_segment_t *, int, bus_size_t, int); static int bge_dma_alloc (device_t); static void bge_dma_free (struct bge_softc *); static void bge_txeof (struct bge_softc *); static void bge_rxeof (struct bge_softc *); static void bge_tick_locked (struct bge_softc *); static void bge_tick (void *); static void bge_stats_update (struct bge_softc *); static void bge_stats_update_regs (struct bge_softc *); static int bge_encap (struct bge_softc *, struct mbuf *, u_int32_t *); static void bge_intr (void *); static void bge_start_locked (struct ifnet *); static void bge_start (struct ifnet *); static int bge_ioctl (struct ifnet *, u_long, caddr_t); static void bge_init_locked (struct bge_softc *); static void bge_init (void *); static void bge_stop (struct bge_softc *); static void bge_watchdog (struct ifnet *); static void bge_shutdown (device_t); static int bge_ifmedia_upd (struct ifnet *); static void bge_ifmedia_sts (struct ifnet *, struct ifmediareq *); static u_int8_t bge_eeprom_getbyte (struct bge_softc *, int, u_int8_t *); static int bge_read_eeprom (struct bge_softc *, caddr_t, int, int); static void bge_setmulti (struct bge_softc *); static void bge_handle_events (struct bge_softc *); static int bge_alloc_jumbo_mem (struct bge_softc *); static void bge_free_jumbo_mem (struct bge_softc *); static void *bge_jalloc (struct bge_softc *); static void bge_jfree (void *, void *); static int bge_newbuf_std (struct bge_softc *, int, struct mbuf *); static int bge_newbuf_jumbo (struct bge_softc *, int, struct mbuf *); static int bge_init_rx_ring_std (struct bge_softc *); static void bge_free_rx_ring_std (struct bge_softc *); static int bge_init_rx_ring_jumbo (struct bge_softc *); static void bge_free_rx_ring_jumbo (struct bge_softc *); static void bge_free_tx_ring (struct bge_softc *); static int bge_init_tx_ring (struct bge_softc *); static int bge_chipinit (struct bge_softc *); static int bge_blockinit (struct bge_softc *); #ifdef notdef static u_int8_t bge_vpd_readbyte(struct bge_softc *, int); static void bge_vpd_read_res (struct bge_softc *, struct vpd_res *, int); static void bge_vpd_read (struct bge_softc *); #endif static u_int32_t bge_readmem_ind (struct bge_softc *, int); static void bge_writemem_ind (struct bge_softc *, int, int); #ifdef notdef static u_int32_t bge_readreg_ind (struct bge_softc *, int); #endif static void bge_writereg_ind (struct bge_softc *, int, int); static int bge_miibus_readreg (device_t, int, int); static int bge_miibus_writereg (device_t, int, int, int); static void bge_miibus_statchg (device_t); static void bge_reset (struct bge_softc *); static device_method_t bge_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bge_probe), DEVMETHOD(device_attach, bge_attach), DEVMETHOD(device_detach, bge_detach), DEVMETHOD(device_shutdown, bge_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, bge_miibus_readreg), DEVMETHOD(miibus_writereg, bge_miibus_writereg), DEVMETHOD(miibus_statchg, bge_miibus_statchg), { 0, 0 } }; static driver_t bge_driver = { "bge", bge_methods, sizeof(struct bge_softc) }; static devclass_t bge_devclass; DRIVER_MODULE(bge, pci, bge_driver, bge_devclass, 0, 0); DRIVER_MODULE(miibus, bge, miibus_driver, miibus_devclass, 0, 0); static u_int32_t bge_readmem_ind(sc, off) struct bge_softc *sc; int off; { device_t dev; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); return(pci_read_config(dev, BGE_PCI_MEMWIN_DATA, 4)); } static void bge_writemem_ind(sc, off, val) struct bge_softc *sc; int off, val; { device_t dev; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_MEMWIN_BASEADDR, off, 4); pci_write_config(dev, BGE_PCI_MEMWIN_DATA, val, 4); return; } #ifdef notdef static u_int32_t bge_readreg_ind(sc, off) struct bge_softc *sc; int off; { device_t dev; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); return(pci_read_config(dev, BGE_PCI_REG_DATA, 4)); } #endif static void bge_writereg_ind(sc, off, val) struct bge_softc *sc; int off, val; { device_t dev; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_REG_BASEADDR, off, 4); pci_write_config(dev, BGE_PCI_REG_DATA, val, 4); return; } /* * Map a single buffer address. */ static void bge_dma_map_addr(arg, segs, nseg, error) void *arg; bus_dma_segment_t *segs; int nseg; int error; { struct bge_dmamap_arg *ctx; if (error) return; ctx = arg; if (nseg > ctx->bge_maxsegs) { ctx->bge_maxsegs = 0; return; } ctx->bge_busaddr = segs->ds_addr; return; } /* * Map an mbuf chain into an TX ring. */ static void bge_dma_map_tx_desc(arg, segs, nseg, mapsize, error) void *arg; bus_dma_segment_t *segs; int nseg; bus_size_t mapsize; int error; { struct bge_dmamap_arg *ctx; struct bge_tx_bd *d = NULL; int i = 0, idx; if (error) return; ctx = arg; /* Signal error to caller if there's too many segments */ if (nseg > ctx->bge_maxsegs) { ctx->bge_maxsegs = 0; return; } idx = ctx->bge_idx; while(1) { d = &ctx->bge_ring[idx]; d->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(segs[i].ds_addr)); d->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(segs[i].ds_addr)); d->bge_len = htole16(segs[i].ds_len); d->bge_flags = htole16(ctx->bge_flags); i++; if (i == nseg) break; BGE_INC(idx, BGE_TX_RING_CNT); } d->bge_flags |= htole16(BGE_TXBDFLAG_END); ctx->bge_maxsegs = nseg; ctx->bge_idx = idx; return; } #ifdef notdef static u_int8_t bge_vpd_readbyte(sc, addr) struct bge_softc *sc; int addr; { int i; device_t dev; u_int32_t val; dev = sc->bge_dev; pci_write_config(dev, BGE_PCI_VPD_ADDR, addr, 2); for (i = 0; i < BGE_TIMEOUT * 10; i++) { DELAY(10); if (pci_read_config(dev, BGE_PCI_VPD_ADDR, 2) & BGE_VPD_FLAG) break; } if (i == BGE_TIMEOUT) { printf("bge%d: VPD read timed out\n", sc->bge_unit); return(0); } val = pci_read_config(dev, BGE_PCI_VPD_DATA, 4); return((val >> ((addr % 4) * 8)) & 0xFF); } static void bge_vpd_read_res(sc, res, addr) struct bge_softc *sc; struct vpd_res *res; int addr; { int i; u_int8_t *ptr; ptr = (u_int8_t *)res; for (i = 0; i < sizeof(struct vpd_res); i++) ptr[i] = bge_vpd_readbyte(sc, i + addr); return; } static void bge_vpd_read(sc) struct bge_softc *sc; { int pos = 0, i; struct vpd_res res; if (sc->bge_vpd_prodname != NULL) free(sc->bge_vpd_prodname, M_DEVBUF); if (sc->bge_vpd_readonly != NULL) free(sc->bge_vpd_readonly, M_DEVBUF); sc->bge_vpd_prodname = NULL; sc->bge_vpd_readonly = NULL; bge_vpd_read_res(sc, &res, pos); if (res.vr_id != VPD_RES_ID) { printf("bge%d: bad VPD resource id: expected %x got %x\n", sc->bge_unit, VPD_RES_ID, res.vr_id); return; } pos += sizeof(res); sc->bge_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); for (i = 0; i < res.vr_len; i++) sc->bge_vpd_prodname[i] = bge_vpd_readbyte(sc, i + pos); sc->bge_vpd_prodname[i] = '\0'; pos += i; bge_vpd_read_res(sc, &res, pos); if (res.vr_id != VPD_RES_READ) { printf("bge%d: bad VPD resource id: expected %x got %x\n", sc->bge_unit, VPD_RES_READ, res.vr_id); return; } pos += sizeof(res); sc->bge_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); for (i = 0; i < res.vr_len + 1; i++) sc->bge_vpd_readonly[i] = bge_vpd_readbyte(sc, i + pos); return; } #endif /* * Read a byte of data stored in the EEPROM at address 'addr.' The * BCM570x supports both the traditional bitbang interface and an * auto access interface for reading the EEPROM. We use the auto * access method. */ static u_int8_t bge_eeprom_getbyte(sc, addr, dest) struct bge_softc *sc; int addr; u_int8_t *dest; { int i; u_int32_t byte = 0; /* * Enable use of auto EEPROM access so we can avoid * having to use the bitbang method. */ BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_AUTO_EEPROM); /* Reset the EEPROM, load the clock period. */ CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EEADDR_RESET|BGE_EEHALFCLK(BGE_HALFCLK_384SCL)); DELAY(20); /* Issue the read EEPROM command. */ CSR_WRITE_4(sc, BGE_EE_ADDR, BGE_EE_READCMD | addr); /* Wait for completion */ for(i = 0; i < BGE_TIMEOUT * 10; i++) { DELAY(10); if (CSR_READ_4(sc, BGE_EE_ADDR) & BGE_EEADDR_DONE) break; } if (i == BGE_TIMEOUT) { printf("bge%d: eeprom read timed out\n", sc->bge_unit); return(0); } /* Get result. */ byte = CSR_READ_4(sc, BGE_EE_DATA); *dest = (byte >> ((addr % 4) * 8)) & 0xFF; return(0); } /* * Read a sequence of bytes from the EEPROM. */ static int bge_read_eeprom(sc, dest, off, cnt) struct bge_softc *sc; caddr_t dest; int off; int cnt; { int err = 0, i; u_int8_t byte = 0; for (i = 0; i < cnt; i++) { err = bge_eeprom_getbyte(sc, off + i, &byte); if (err) break; *(dest + i) = byte; } return(err ? 1 : 0); } static int bge_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct bge_softc *sc; u_int32_t val, autopoll; int i; sc = device_get_softc(dev); /* * Broadcom's own driver always assumes the internal * PHY is at GMII address 1. On some chips, the PHY responds * to accesses at all addresses, which could cause us to * bogusly attach the PHY 32 times at probe type. Always * restricting the lookup to address 1 is simpler than * trying to figure out which chips revisions should be * special-cased. */ if (phy != 1) return(0); /* Reading with autopolling on may trigger PCI errors */ autopoll = CSR_READ_4(sc, BGE_MI_MODE); if (autopoll & BGE_MIMODE_AUTOPOLL) { BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); DELAY(40); } CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_READ|BGE_MICOMM_BUSY| BGE_MIPHY(phy)|BGE_MIREG(reg)); for (i = 0; i < BGE_TIMEOUT; i++) { val = CSR_READ_4(sc, BGE_MI_COMM); if (!(val & BGE_MICOMM_BUSY)) break; } if (i == BGE_TIMEOUT) { printf("bge%d: PHY read timed out\n", sc->bge_unit); val = 0; goto done; } val = CSR_READ_4(sc, BGE_MI_COMM); done: if (autopoll & BGE_MIMODE_AUTOPOLL) { BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); DELAY(40); } if (val & BGE_MICOMM_READFAIL) return(0); return(val & 0xFFFF); } static int bge_miibus_writereg(dev, phy, reg, val) device_t dev; int phy, reg, val; { struct bge_softc *sc; u_int32_t autopoll; int i; sc = device_get_softc(dev); /* Reading with autopolling on may trigger PCI errors */ autopoll = CSR_READ_4(sc, BGE_MI_MODE); if (autopoll & BGE_MIMODE_AUTOPOLL) { BGE_CLRBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); DELAY(40); } CSR_WRITE_4(sc, BGE_MI_COMM, BGE_MICMD_WRITE|BGE_MICOMM_BUSY| BGE_MIPHY(phy)|BGE_MIREG(reg)|val); for (i = 0; i < BGE_TIMEOUT; i++) { if (!(CSR_READ_4(sc, BGE_MI_COMM) & BGE_MICOMM_BUSY)) break; } if (autopoll & BGE_MIMODE_AUTOPOLL) { BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL); DELAY(40); } if (i == BGE_TIMEOUT) { printf("bge%d: PHY read timed out\n", sc->bge_unit); return(0); } return(0); } static void bge_miibus_statchg(dev) device_t dev; { struct bge_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->bge_miibus); BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_PORTMODE); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T) { BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_GMII); } else { BGE_SETBIT(sc, BGE_MAC_MODE, BGE_PORTMODE_MII); } if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); } else { BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); } return; } /* * Handle events that have triggered interrupts. */ static void bge_handle_events(sc) struct bge_softc *sc; { return; } /* * Memory management for jumbo frames. */ static int bge_alloc_jumbo_mem(sc) struct bge_softc *sc; { caddr_t ptr; register int i, error; struct bge_jpool_entry *entry; /* Create tag for jumbo buffer block */ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BGE_JMEM, 1, BGE_JMEM, 0, NULL, NULL, &sc->bge_cdata.bge_jumbo_tag); if (error) { printf("bge%d: could not allocate jumbo dma tag\n", sc->bge_unit); return (ENOMEM); } /* Allocate DMA'able memory for jumbo buffer block */ error = bus_dmamem_alloc(sc->bge_cdata.bge_jumbo_tag, (void **)&sc->bge_ldata.bge_jumbo_buf, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_jumbo_map); if (error) return (ENOMEM); SLIST_INIT(&sc->bge_jfree_listhead); SLIST_INIT(&sc->bge_jinuse_listhead); /* * Now divide it up into 9K pieces and save the addresses * in an array. */ ptr = sc->bge_ldata.bge_jumbo_buf; for (i = 0; i < BGE_JSLOTS; i++) { sc->bge_cdata.bge_jslots[i] = ptr; ptr += BGE_JLEN; entry = malloc(sizeof(struct bge_jpool_entry), M_DEVBUF, M_NOWAIT); if (entry == NULL) { bge_free_jumbo_mem(sc); sc->bge_ldata.bge_jumbo_buf = NULL; printf("bge%d: no memory for jumbo " "buffer queue!\n", sc->bge_unit); return(ENOBUFS); } entry->slot = i; SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); } return(0); } static void bge_free_jumbo_mem(sc) struct bge_softc *sc; { int i; struct bge_jpool_entry *entry; for (i = 0; i < BGE_JSLOTS; i++) { entry = SLIST_FIRST(&sc->bge_jfree_listhead); SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); free(entry, M_DEVBUF); } /* Destroy jumbo buffer block */ if (sc->bge_ldata.bge_rx_jumbo_ring) bus_dmamem_free(sc->bge_cdata.bge_jumbo_tag, sc->bge_ldata.bge_jumbo_buf, sc->bge_cdata.bge_jumbo_map); if (sc->bge_cdata.bge_rx_jumbo_ring_map) bus_dmamap_destroy(sc->bge_cdata.bge_jumbo_tag, sc->bge_cdata.bge_jumbo_map); if (sc->bge_cdata.bge_jumbo_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_jumbo_tag); return; } /* * Allocate a jumbo buffer. */ static void * bge_jalloc(sc) struct bge_softc *sc; { struct bge_jpool_entry *entry; entry = SLIST_FIRST(&sc->bge_jfree_listhead); if (entry == NULL) { printf("bge%d: no free jumbo buffers\n", sc->bge_unit); return(NULL); } SLIST_REMOVE_HEAD(&sc->bge_jfree_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc->bge_jinuse_listhead, entry, jpool_entries); return(sc->bge_cdata.bge_jslots[entry->slot]); } /* * Release a jumbo buffer. */ static void bge_jfree(buf, args) void *buf; void *args; { struct bge_jpool_entry *entry; struct bge_softc *sc; int i; /* Extract the softc struct pointer. */ sc = (struct bge_softc *)args; if (sc == NULL) panic("bge_jfree: can't find softc pointer!"); /* calculate the slot this buffer belongs to */ i = ((vm_offset_t)buf - (vm_offset_t)sc->bge_ldata.bge_jumbo_buf) / BGE_JLEN; if ((i < 0) || (i >= BGE_JSLOTS)) panic("bge_jfree: asked to free buffer that we don't manage!"); entry = SLIST_FIRST(&sc->bge_jinuse_listhead); if (entry == NULL) panic("bge_jfree: buffer not in use!"); entry->slot = i; SLIST_REMOVE_HEAD(&sc->bge_jinuse_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc->bge_jfree_listhead, entry, jpool_entries); return; } /* * Intialize a standard receive ring descriptor. */ static int bge_newbuf_std(sc, i, m) struct bge_softc *sc; int i; struct mbuf *m; { struct mbuf *m_new = NULL; struct bge_rx_bd *r; struct bge_dmamap_arg ctx; int error; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { return(ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } if (!sc->bge_rx_alignment_bug) m_adj(m_new, ETHER_ALIGN); sc->bge_cdata.bge_rx_std_chain[i] = m_new; r = &sc->bge_ldata.bge_rx_std_ring[i]; ctx.bge_maxsegs = 1; ctx.sc = sc; error = bus_dmamap_load(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_rx_std_dmamap[i], mtod(m_new, void *), m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error || ctx.bge_maxsegs == 0) { if (m == NULL) m_freem(m_new); return(ENOMEM); } r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr)); r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr)); r->bge_flags = htole16(BGE_RXBDFLAG_END); r->bge_len = htole16(m_new->m_len); r->bge_idx = htole16(i); bus_dmamap_sync(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_rx_std_dmamap[i], BUS_DMASYNC_PREREAD); return(0); } /* * Initialize a jumbo receive ring descriptor. This allocates * a jumbo buffer from the pool managed internally by the driver. */ static int bge_newbuf_jumbo(sc, i, m) struct bge_softc *sc; int i; struct mbuf *m; { struct mbuf *m_new = NULL; struct bge_rx_bd *r; struct bge_dmamap_arg ctx; int error; if (m == NULL) { caddr_t *buf = NULL; /* Allocate the mbuf. */ MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { return(ENOBUFS); } /* Allocate the jumbo buffer */ buf = bge_jalloc(sc); if (buf == NULL) { m_freem(m_new); printf("bge%d: jumbo allocation failed " "-- packet dropped!\n", sc->bge_unit); return(ENOBUFS); } /* Attach the buffer to the mbuf. */ m_new->m_data = (void *) buf; m_new->m_len = m_new->m_pkthdr.len = BGE_JUMBO_FRAMELEN; MEXTADD(m_new, buf, BGE_JUMBO_FRAMELEN, bge_jfree, (struct bge_softc *)sc, 0, EXT_NET_DRV); } else { m_new = m; m_new->m_data = m_new->m_ext.ext_buf; m_new->m_ext.ext_size = BGE_JUMBO_FRAMELEN; } if (!sc->bge_rx_alignment_bug) m_adj(m_new, ETHER_ALIGN); /* Set up the descriptor. */ sc->bge_cdata.bge_rx_jumbo_chain[i] = m_new; r = &sc->bge_ldata.bge_rx_jumbo_ring[i]; ctx.bge_maxsegs = 1; ctx.sc = sc; error = bus_dmamap_load(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i], mtod(m_new, void *), m_new->m_len, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error || ctx.bge_maxsegs == 0) { if (m == NULL) m_freem(m_new); return(ENOMEM); } r->bge_addr.bge_addr_lo = htole32(BGE_ADDR_LO(ctx.bge_busaddr)); r->bge_addr.bge_addr_hi = htole32(BGE_ADDR_HI(ctx.bge_busaddr)); r->bge_flags = htole16(BGE_RXBDFLAG_END|BGE_RXBDFLAG_JUMBO_RING); r->bge_len = htole16(m_new->m_len); r->bge_idx = htole16(i); bus_dmamap_sync(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_rx_jumbo_dmamap[i], BUS_DMASYNC_PREREAD); return(0); } /* * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, * that's 1MB or memory, which is a lot. For now, we fill only the first * 256 ring entries and hope that our CPU is fast enough to keep up with * the NIC. */ static int bge_init_rx_ring_std(sc) struct bge_softc *sc; { int i; for (i = 0; i < BGE_SSLOTS; i++) { if (bge_newbuf_std(sc, i, NULL) == ENOBUFS) return(ENOBUFS); }; bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); sc->bge_std = i - 1; CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); return(0); } static void bge_free_rx_ring_std(sc) struct bge_softc *sc; { int i; for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_std_chain[i] != NULL) { m_freem(sc->bge_cdata.bge_rx_std_chain[i]); sc->bge_cdata.bge_rx_std_chain[i] = NULL; bus_dmamap_unload(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_rx_std_dmamap[i]); } bzero((char *)&sc->bge_ldata.bge_rx_std_ring[i], sizeof(struct bge_rx_bd)); } return; } static int bge_init_rx_ring_jumbo(sc) struct bge_softc *sc; { int i; struct bge_rcb *rcb; for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { if (bge_newbuf_jumbo(sc, i, NULL) == ENOBUFS) return(ENOBUFS); }; bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); sc->bge_jumbo = i - 1; rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, 0); CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); return(0); } static void bge_free_rx_ring_jumbo(sc) struct bge_softc *sc; { int i; for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_jumbo_chain[i] != NULL) { m_freem(sc->bge_cdata.bge_rx_jumbo_chain[i]); sc->bge_cdata.bge_rx_jumbo_chain[i] = NULL; bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i]); } bzero((char *)&sc->bge_ldata.bge_rx_jumbo_ring[i], sizeof(struct bge_rx_bd)); } return; } static void bge_free_tx_ring(sc) struct bge_softc *sc; { int i; if (sc->bge_ldata.bge_tx_ring == NULL) return; for (i = 0; i < BGE_TX_RING_CNT; i++) { if (sc->bge_cdata.bge_tx_chain[i] != NULL) { m_freem(sc->bge_cdata.bge_tx_chain[i]); sc->bge_cdata.bge_tx_chain[i] = NULL; bus_dmamap_unload(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_tx_dmamap[i]); } bzero((char *)&sc->bge_ldata.bge_tx_ring[i], sizeof(struct bge_tx_bd)); } return; } static int bge_init_tx_ring(sc) struct bge_softc *sc; { sc->bge_txcnt = 0; sc->bge_tx_saved_considx = 0; CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); /* 5700 b2 errata */ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, 0); CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); /* 5700 b2 errata */ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) CSR_WRITE_4(sc, BGE_MBX_TX_NIC_PROD0_LO, 0); return(0); } static void bge_setmulti(sc) struct bge_softc *sc; { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int32_t hashes[4] = { 0, 0, 0, 0 }; int h, i; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { for (i = 0; i < 4; i++) CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0xFFFFFFFF); return; } /* First, zot all the existing filters. */ for (i = 0; i < 4; i++) CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), 0); /* Now program new ones. */ + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_le(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) & 0x7F; hashes[(h & 0x60) >> 5] |= 1 << (h & 0x1F); } + IF_ADDR_UNLOCK(ifp); for (i = 0; i < 4; i++) CSR_WRITE_4(sc, BGE_MAR0 + (i * 4), hashes[i]); return; } /* * Do endian, PCI and DMA initialization. Also check the on-board ROM * self-test results. */ static int bge_chipinit(sc) struct bge_softc *sc; { int i; u_int32_t dma_rw_ctl; /* Set endianness before we access any non-PCI registers. */ #if BYTE_ORDER == BIG_ENDIAN pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_BIGENDIAN_INIT, 4); #else pci_write_config(sc->bge_dev, BGE_PCI_MISC_CTL, BGE_LITTLEENDIAN_INIT, 4); #endif /* * Check the 'ROM failed' bit on the RX CPU to see if * self-tests passed. */ if (CSR_READ_4(sc, BGE_RXCPU_MODE) & BGE_RXCPUMODE_ROMFAIL) { printf("bge%d: RX CPU self-diagnostics failed!\n", sc->bge_unit); return(ENODEV); } /* Clear the MAC control register */ CSR_WRITE_4(sc, BGE_MAC_MODE, 0); /* * Clear the MAC statistics block in the NIC's * internal memory. */ for (i = BGE_STATS_BLOCK; i < BGE_STATS_BLOCK_END + 1; i += sizeof(u_int32_t)) BGE_MEMWIN_WRITE(sc, i, 0); for (i = BGE_STATUS_BLOCK; i < BGE_STATUS_BLOCK_END + 1; i += sizeof(u_int32_t)) BGE_MEMWIN_WRITE(sc, i, 0); /* Set up the PCI DMA control register. */ if (sc->bge_pcie) { dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | (0xf << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | (0x2 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); } else if (pci_read_config(sc->bge_dev, BGE_PCI_PCISTATE, 4) & BGE_PCISTATE_PCI_BUSMODE) { /* Conventional PCI bus */ dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | (0x7 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | (0x0F); } else { /* PCI-X bus */ /* * The 5704 uses a different encoding of read/write * watermarks. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5704) dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | (0x7 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT); else dma_rw_ctl = BGE_PCI_READ_CMD|BGE_PCI_WRITE_CMD | (0x3 << BGE_PCIDMARWCTL_RD_WAT_SHIFT) | (0x3 << BGE_PCIDMARWCTL_WR_WAT_SHIFT) | (0x0F); /* * 5703 and 5704 need ONEDMA_AT_ONCE as a workaround * for hardware bugs. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || sc->bge_asicrev == BGE_ASICREV_BCM5704) { u_int32_t tmp; tmp = CSR_READ_4(sc, BGE_PCI_CLKCTL) & 0x1f; if (tmp == 0x6 || tmp == 0x7) dma_rw_ctl |= BGE_PCIDMARWCTL_ONEDMA_ATONCE; } } if (sc->bge_asicrev == BGE_ASICREV_BCM5703 || sc->bge_asicrev == BGE_ASICREV_BCM5704 || sc->bge_asicrev == BGE_ASICREV_BCM5705 || sc->bge_asicrev == BGE_ASICREV_BCM5750) dma_rw_ctl &= ~BGE_PCIDMARWCTL_MINDMA; pci_write_config(sc->bge_dev, BGE_PCI_DMA_RW_CTL, dma_rw_ctl, 4); /* * Set up general mode register. */ CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_WORDSWAP_NONFRAME| BGE_MODECTL_BYTESWAP_DATA|BGE_MODECTL_WORDSWAP_DATA| BGE_MODECTL_MAC_ATTN_INTR|BGE_MODECTL_HOST_SEND_BDS| BGE_MODECTL_TX_NO_PHDR_CSUM|BGE_MODECTL_RX_NO_PHDR_CSUM); /* * Disable memory write invalidate. Apparently it is not supported * properly by these devices. */ PCI_CLRBIT(sc->bge_dev, BGE_PCI_CMD, PCIM_CMD_MWIEN, 4); #ifdef __brokenalpha__ /* * Must insure that we do not cross an 8K (bytes) boundary * for DMA reads. Our highest limit is 1K bytes. This is a * restriction on some ALPHA platforms with early revision * 21174 PCI chipsets, such as the AlphaPC 164lx */ PCI_SETBIT(sc->bge_dev, BGE_PCI_DMA_RW_CTL, BGE_PCI_READ_BNDRY_1024BYTES, 4); #endif /* Set the timer prescaler (always 66Mhz) */ CSR_WRITE_4(sc, BGE_MISC_CFG, 65 << 1/*BGE_32BITTIME_66MHZ*/); return(0); } static int bge_blockinit(sc) struct bge_softc *sc; { struct bge_rcb *rcb; volatile struct bge_rcb *vrcb; int i; /* * Initialize the memory window pointer register so that * we can access the first 32K of internal NIC RAM. This will * allow us to set up the TX send ring RCBs and the RX return * ring RCBs, plus other things which live in NIC memory. */ CSR_WRITE_4(sc, BGE_PCI_MEMWIN_BASEADDR, 0); /* Note: the BCM5704 has a smaller mbuf space than other chips. */ if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) { /* Configure mbuf memory pool */ if (sc->bge_extram) { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_EXT_SSRAM); if (sc->bge_asicrev == BGE_ASICREV_BCM5704) CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); else CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); } else { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_BASEADDR, BGE_BUFFPOOL_1); if (sc->bge_asicrev == BGE_ASICREV_BCM5704) CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x10000); else CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_LEN, 0x18000); } /* Configure DMA resource pool */ CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_BASEADDR, BGE_DMA_DESCRIPTORS); CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LEN, 0x2000); } /* Configure mbuf pool watermarks */ if (sc->bge_asicrev == BGE_ASICREV_BCM5705 || sc->bge_asicrev == BGE_ASICREV_BCM5750) { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x0); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x10); } else { CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_READDMA_LOWAT, 0x50); CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_MACRX_LOWAT, 0x20); } CSR_WRITE_4(sc, BGE_BMAN_MBUFPOOL_HIWAT, 0x60); /* Configure DMA resource watermarks */ CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_LOWAT, 5); CSR_WRITE_4(sc, BGE_BMAN_DMA_DESCPOOL_HIWAT, 10); /* Enable buffer manager */ if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) { CSR_WRITE_4(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE|BGE_BMANMODE_LOMBUF_ATTN); /* Poll for buffer manager start indication */ for (i = 0; i < BGE_TIMEOUT; i++) { if (CSR_READ_4(sc, BGE_BMAN_MODE) & BGE_BMANMODE_ENABLE) break; DELAY(10); } if (i == BGE_TIMEOUT) { printf("bge%d: buffer manager failed to start\n", sc->bge_unit); return(ENXIO); } } /* Enable flow-through queues */ CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); /* Wait until queue initialization is complete */ for (i = 0; i < BGE_TIMEOUT; i++) { if (CSR_READ_4(sc, BGE_FTQ_RESET) == 0) break; DELAY(10); } if (i == BGE_TIMEOUT) { printf("bge%d: flow-through queue init failed\n", sc->bge_unit); return(ENXIO); } /* Initialize the standard RX ring control block */ rcb = &sc->bge_ldata.bge_info.bge_std_rx_rcb; rcb->bge_hostaddr.bge_addr_lo = BGE_ADDR_LO(sc->bge_ldata.bge_rx_std_ring_paddr); rcb->bge_hostaddr.bge_addr_hi = BGE_ADDR_HI(sc->bge_ldata.bge_rx_std_ring_paddr); bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_PREREAD); if (sc->bge_asicrev == BGE_ASICREV_BCM5705 || sc->bge_asicrev == BGE_ASICREV_BCM5750) rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(512, 0); else rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, 0); if (sc->bge_extram) rcb->bge_nicaddr = BGE_EXT_STD_RX_RINGS; else rcb->bge_nicaddr = BGE_STD_RX_RINGS; CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); CSR_WRITE_4(sc, BGE_RX_STD_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); CSR_WRITE_4(sc, BGE_RX_STD_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); CSR_WRITE_4(sc, BGE_RX_STD_RCB_NICADDR, rcb->bge_nicaddr); /* * Initialize the jumbo RX ring control block * We set the 'ring disabled' bit in the flags * field until we're actually ready to start * using this ring (i.e. once we set the MTU * high enough to require it). */ if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) { rcb = &sc->bge_ldata.bge_info.bge_jumbo_rx_rcb; rcb->bge_hostaddr.bge_addr_lo = BGE_ADDR_LO(sc->bge_ldata.bge_rx_jumbo_ring_paddr); rcb->bge_hostaddr.bge_addr_hi = BGE_ADDR_HI(sc->bge_ldata.bge_rx_jumbo_ring_paddr); bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREREAD); rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BGE_MAX_FRAMELEN, BGE_RCB_FLAG_RING_DISABLED); if (sc->bge_extram) rcb->bge_nicaddr = BGE_EXT_JUMBO_RX_RINGS; else rcb->bge_nicaddr = BGE_JUMBO_RX_RINGS; CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_HI, rcb->bge_hostaddr.bge_addr_hi); CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_HADDR_LO, rcb->bge_hostaddr.bge_addr_lo); CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); CSR_WRITE_4(sc, BGE_RX_JUMBO_RCB_NICADDR, rcb->bge_nicaddr); /* Set up dummy disabled mini ring RCB */ rcb = &sc->bge_ldata.bge_info.bge_mini_rx_rcb; rcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); CSR_WRITE_4(sc, BGE_RX_MINI_RCB_MAXLEN_FLAGS, rcb->bge_maxlen_flags); } /* * Set the BD ring replentish thresholds. The recommended * values are 1/8th the number of descriptors allocated to * each ring. */ CSR_WRITE_4(sc, BGE_RBDI_STD_REPL_THRESH, BGE_STD_RX_RING_CNT/8); CSR_WRITE_4(sc, BGE_RBDI_JUMBO_REPL_THRESH, BGE_JUMBO_RX_RING_CNT/8); /* * Disable all unused send rings by setting the 'ring disabled' * bit in the flags field of all the TX send ring control blocks. * These are located in NIC memory. */ vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + BGE_SEND_RING_RCB); for (i = 0; i < BGE_TX_RINGS_EXTSSRAM_MAX; i++) { vrcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(0, BGE_RCB_FLAG_RING_DISABLED); vrcb->bge_nicaddr = 0; vrcb++; } /* Configure TX RCB 0 (we use only the first ring) */ vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + BGE_SEND_RING_RCB); vrcb->bge_hostaddr.bge_addr_lo = htole32(BGE_ADDR_LO(sc->bge_ldata.bge_tx_ring_paddr)); vrcb->bge_hostaddr.bge_addr_hi = htole32(BGE_ADDR_HI(sc->bge_ldata.bge_tx_ring_paddr)); vrcb->bge_nicaddr = BGE_NIC_TXRING_ADDR(0, BGE_TX_RING_CNT); if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) vrcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(BGE_TX_RING_CNT, 0); /* Disable all unused RX return rings */ vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB); for (i = 0; i < BGE_RX_RINGS_MAX; i++) { vrcb->bge_hostaddr.bge_addr_hi = 0; vrcb->bge_hostaddr.bge_addr_lo = 0; vrcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, BGE_RCB_FLAG_RING_DISABLED); vrcb->bge_nicaddr = 0; CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO + (i * (sizeof(u_int64_t))), 0); vrcb++; } /* Initialize RX ring indexes */ CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, 0); CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, 0); CSR_WRITE_4(sc, BGE_MBX_RX_MINI_PROD_LO, 0); /* * Set up RX return ring 0 * Note that the NIC address for RX return rings is 0x00000000. * The return rings live entirely within the host, so the * nicaddr field in the RCB isn't used. */ vrcb = (volatile struct bge_rcb *)(sc->bge_vhandle + BGE_MEMWIN_START + BGE_RX_RETURN_RING_RCB); vrcb->bge_hostaddr.bge_addr_lo = BGE_ADDR_LO(sc->bge_ldata.bge_rx_return_ring_paddr); vrcb->bge_hostaddr.bge_addr_hi = BGE_ADDR_HI(sc->bge_ldata.bge_rx_return_ring_paddr); bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE); vrcb->bge_nicaddr = 0x00000000; vrcb->bge_maxlen_flags = BGE_RCB_MAXLEN_FLAGS(sc->bge_return_ring_cnt, 0); /* Set random backoff seed for TX */ CSR_WRITE_4(sc, BGE_TX_RANDOM_BACKOFF, IFP2ENADDR(sc->bge_ifp)[0] + IFP2ENADDR(sc->bge_ifp)[1] + IFP2ENADDR(sc->bge_ifp)[2] + IFP2ENADDR(sc->bge_ifp)[3] + IFP2ENADDR(sc->bge_ifp)[4] + IFP2ENADDR(sc->bge_ifp)[5] + BGE_TX_BACKOFF_SEED_MASK); /* Set inter-packet gap */ CSR_WRITE_4(sc, BGE_TX_LENGTHS, 0x2620); /* * Specify which ring to use for packets that don't match * any RX rules. */ CSR_WRITE_4(sc, BGE_RX_RULES_CFG, 0x08); /* * Configure number of RX lists. One interrupt distribution * list, sixteen active lists, one bad frames class. */ CSR_WRITE_4(sc, BGE_RXLP_CFG, 0x181); /* Inialize RX list placement stats mask. */ CSR_WRITE_4(sc, BGE_RXLP_STATS_ENABLE_MASK, 0x007FFFFF); CSR_WRITE_4(sc, BGE_RXLP_STATS_CTL, 0x1); /* Disable host coalescing until we get it set up */ CSR_WRITE_4(sc, BGE_HCC_MODE, 0x00000000); /* Poll to make sure it's shut down. */ for (i = 0; i < BGE_TIMEOUT; i++) { if (!(CSR_READ_4(sc, BGE_HCC_MODE) & BGE_HCCMODE_ENABLE)) break; DELAY(10); } if (i == BGE_TIMEOUT) { printf("bge%d: host coalescing engine failed to idle\n", sc->bge_unit); return(ENXIO); } /* Set up host coalescing defaults */ CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS, sc->bge_rx_coal_ticks); CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS, sc->bge_tx_coal_ticks); CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS, sc->bge_rx_max_coal_bds); CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS, sc->bge_tx_max_coal_bds); if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) { CSR_WRITE_4(sc, BGE_HCC_RX_COAL_TICKS_INT, 0); CSR_WRITE_4(sc, BGE_HCC_TX_COAL_TICKS_INT, 0); } CSR_WRITE_4(sc, BGE_HCC_RX_MAX_COAL_BDS_INT, 0); CSR_WRITE_4(sc, BGE_HCC_TX_MAX_COAL_BDS_INT, 0); /* Set up address of statistics block */ if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) { CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_HI, BGE_ADDR_HI(sc->bge_ldata.bge_stats_paddr)); CSR_WRITE_4(sc, BGE_HCC_STATS_ADDR_LO, BGE_ADDR_LO(sc->bge_ldata.bge_stats_paddr)); CSR_WRITE_4(sc, BGE_HCC_STATS_BASEADDR, BGE_STATS_BLOCK); CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_BASEADDR, BGE_STATUS_BLOCK); CSR_WRITE_4(sc, BGE_HCC_STATS_TICKS, sc->bge_stat_ticks); } /* Set up address of status block */ CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_HI, BGE_ADDR_HI(sc->bge_ldata.bge_status_block_paddr)); CSR_WRITE_4(sc, BGE_HCC_STATUSBLK_ADDR_LO, BGE_ADDR_LO(sc->bge_ldata.bge_status_block_paddr)); bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE); sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx = 0; sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx = 0; /* Turn on host coalescing state machine */ CSR_WRITE_4(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); /* Turn on RX BD completion state machine and enable attentions */ CSR_WRITE_4(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE|BGE_RBDCMODE_ATTN); /* Turn on RX list placement state machine */ CSR_WRITE_4(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); /* Turn on RX list selector state machine. */ if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) CSR_WRITE_4(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); /* Turn on DMA, clear stats */ CSR_WRITE_4(sc, BGE_MAC_MODE, BGE_MACMODE_TXDMA_ENB| BGE_MACMODE_RXDMA_ENB|BGE_MACMODE_RX_STATS_CLEAR| BGE_MACMODE_TX_STATS_CLEAR|BGE_MACMODE_RX_STATS_ENB| BGE_MACMODE_TX_STATS_ENB|BGE_MACMODE_FRMHDR_DMA_ENB| (sc->bge_tbi ? BGE_PORTMODE_TBI : BGE_PORTMODE_MII)); /* Set misc. local control, enable interrupts on attentions */ CSR_WRITE_4(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_INTR_ONATTN); #ifdef notdef /* Assert GPIO pins for PHY reset */ BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUT0| BGE_MLC_MISCIO_OUT1|BGE_MLC_MISCIO_OUT2); BGE_SETBIT(sc, BGE_MISC_LOCAL_CTL, BGE_MLC_MISCIO_OUTEN0| BGE_MLC_MISCIO_OUTEN1|BGE_MLC_MISCIO_OUTEN2); #endif /* Turn on DMA completion state machine */ if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) CSR_WRITE_4(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); /* Turn on write DMA state machine */ CSR_WRITE_4(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE|BGE_WDMAMODE_ALL_ATTNS); /* Turn on read DMA state machine */ CSR_WRITE_4(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE|BGE_RDMAMODE_ALL_ATTNS); /* Turn on RX data completion state machine */ CSR_WRITE_4(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); /* Turn on RX BD initiator state machine */ CSR_WRITE_4(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); /* Turn on RX data and RX BD initiator state machine */ CSR_WRITE_4(sc, BGE_RDBDI_MODE, BGE_RDBDIMODE_ENABLE); /* Turn on Mbuf cluster free state machine */ if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) CSR_WRITE_4(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); /* Turn on send BD completion state machine */ CSR_WRITE_4(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); /* Turn on send data completion state machine */ CSR_WRITE_4(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); /* Turn on send data initiator state machine */ CSR_WRITE_4(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); /* Turn on send BD initiator state machine */ CSR_WRITE_4(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); /* Turn on send BD selector state machine */ CSR_WRITE_4(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); CSR_WRITE_4(sc, BGE_SDI_STATS_ENABLE_MASK, 0x007FFFFF); CSR_WRITE_4(sc, BGE_SDI_STATS_CTL, BGE_SDISTATSCTL_ENABLE|BGE_SDISTATSCTL_FASTER); /* ack/clear link change events */ CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| BGE_MACSTAT_LINK_CHANGED); CSR_WRITE_4(sc, BGE_MI_STS, 0); /* Enable PHY auto polling (for MII/GMII only) */ if (sc->bge_tbi) { CSR_WRITE_4(sc, BGE_MI_STS, BGE_MISTS_LINK); } else { BGE_SETBIT(sc, BGE_MI_MODE, BGE_MIMODE_AUTOPOLL|10<<16); if (sc->bge_asicrev == BGE_ASICREV_BCM5700) CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT); } /* Enable link state change attentions. */ BGE_SETBIT(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_LINK_CHANGED); return(0); } /* * Probe for a Broadcom chip. Check the PCI vendor and device IDs * against our list and return its name if we find a match. Note * that since the Broadcom controller contains VPD support, we * can get the device name string from the controller itself instead * of the compiled-in string. This is a little slow, but it guarantees * we'll always announce the right product name. */ static int bge_probe(dev) device_t dev; { struct bge_type *t; struct bge_softc *sc; char *descbuf; t = bge_devs; sc = device_get_softc(dev); bzero(sc, sizeof(struct bge_softc)); sc->bge_unit = device_get_unit(dev); sc->bge_dev = dev; while(t->bge_name != NULL) { if ((pci_get_vendor(dev) == t->bge_vid) && (pci_get_device(dev) == t->bge_did)) { #ifdef notdef bge_vpd_read(sc); device_set_desc(dev, sc->bge_vpd_prodname); #endif descbuf = malloc(BGE_DEVDESC_MAX, M_TEMP, M_NOWAIT); if (descbuf == NULL) return(ENOMEM); snprintf(descbuf, BGE_DEVDESC_MAX, "%s, ASIC rev. %#04x", t->bge_name, pci_read_config(dev, BGE_PCI_MISC_CTL, 4) >> 16); device_set_desc_copy(dev, descbuf); if (pci_get_subvendor(dev) == DELL_VENDORID) sc->bge_no_3_led = 1; free(descbuf, M_TEMP); return(0); } t++; } return(ENXIO); } static void bge_dma_free(sc) struct bge_softc *sc; { int i; /* Destroy DMA maps for RX buffers */ for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_std_dmamap[i]) bus_dmamap_destroy(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_rx_std_dmamap[i]); } /* Destroy DMA maps for jumbo RX buffers */ for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { if (sc->bge_cdata.bge_rx_jumbo_dmamap[i]) bus_dmamap_destroy(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[i]); } /* Destroy DMA maps for TX buffers */ for (i = 0; i < BGE_TX_RING_CNT; i++) { if (sc->bge_cdata.bge_tx_dmamap[i]) bus_dmamap_destroy(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_tx_dmamap[i]); } if (sc->bge_cdata.bge_mtag) bus_dma_tag_destroy(sc->bge_cdata.bge_mtag); /* Destroy standard RX ring */ if (sc->bge_ldata.bge_rx_std_ring) bus_dmamem_free(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_ldata.bge_rx_std_ring, sc->bge_cdata.bge_rx_std_ring_map); if (sc->bge_cdata.bge_rx_std_ring_map) { bus_dmamap_unload(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map); bus_dmamap_destroy(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map); } if (sc->bge_cdata.bge_rx_std_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_std_ring_tag); /* Destroy jumbo RX ring */ if (sc->bge_ldata.bge_rx_jumbo_ring) bus_dmamem_free(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_ldata.bge_rx_jumbo_ring, sc->bge_cdata.bge_rx_jumbo_ring_map); if (sc->bge_cdata.bge_rx_jumbo_ring_map) { bus_dmamap_unload(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map); bus_dmamap_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map); } if (sc->bge_cdata.bge_rx_jumbo_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_jumbo_ring_tag); /* Destroy RX return ring */ if (sc->bge_ldata.bge_rx_return_ring) bus_dmamem_free(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_ldata.bge_rx_return_ring, sc->bge_cdata.bge_rx_return_ring_map); if (sc->bge_cdata.bge_rx_return_ring_map) { bus_dmamap_unload(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map); bus_dmamap_destroy(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map); } if (sc->bge_cdata.bge_rx_return_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_rx_return_ring_tag); /* Destroy TX ring */ if (sc->bge_ldata.bge_tx_ring) bus_dmamem_free(sc->bge_cdata.bge_tx_ring_tag, sc->bge_ldata.bge_tx_ring, sc->bge_cdata.bge_tx_ring_map); if (sc->bge_cdata.bge_tx_ring_map) { bus_dmamap_unload(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map); bus_dmamap_destroy(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map); } if (sc->bge_cdata.bge_tx_ring_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_tx_ring_tag); /* Destroy status block */ if (sc->bge_ldata.bge_status_block) bus_dmamem_free(sc->bge_cdata.bge_status_tag, sc->bge_ldata.bge_status_block, sc->bge_cdata.bge_status_map); if (sc->bge_cdata.bge_status_map) { bus_dmamap_unload(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map); bus_dmamap_destroy(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map); } if (sc->bge_cdata.bge_status_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_status_tag); /* Destroy statistics block */ if (sc->bge_ldata.bge_stats) bus_dmamem_free(sc->bge_cdata.bge_stats_tag, sc->bge_ldata.bge_stats, sc->bge_cdata.bge_stats_map); if (sc->bge_cdata.bge_stats_map) { bus_dmamap_unload(sc->bge_cdata.bge_stats_tag, sc->bge_cdata.bge_stats_map); bus_dmamap_destroy(sc->bge_cdata.bge_stats_tag, sc->bge_cdata.bge_stats_map); } if (sc->bge_cdata.bge_stats_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_stats_tag); /* Destroy the parent tag */ if (sc->bge_cdata.bge_parent_tag) bus_dma_tag_destroy(sc->bge_cdata.bge_parent_tag); return; } static int bge_dma_alloc(dev) device_t dev; { struct bge_softc *sc; int nseg, i, error; struct bge_dmamap_arg ctx; sc = device_get_softc(dev); /* * Allocate the parent bus DMA tag appropriate for PCI. */ #define BGE_NSEG_NEW 32 error = bus_dma_tag_create(NULL, /* parent */ PAGE_SIZE, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MAXBSIZE, BGE_NSEG_NEW, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->bge_cdata.bge_parent_tag); /* * Create tag for RX mbufs. */ nseg = 32; error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->bge_cdata.bge_mtag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Create DMA maps for RX buffers */ for (i = 0; i < BGE_STD_RX_RING_CNT; i++) { error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, &sc->bge_cdata.bge_rx_std_dmamap[i]); if (error) { device_printf(dev, "can't create DMA map for RX\n"); return(ENOMEM); } } /* Create DMA maps for TX buffers */ for (i = 0; i < BGE_TX_RING_CNT; i++) { error = bus_dmamap_create(sc->bge_cdata.bge_mtag, 0, &sc->bge_cdata.bge_tx_dmamap[i]); if (error) { device_printf(dev, "can't create DMA map for RX\n"); return(ENOMEM); } } /* Create tag for standard RX ring */ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BGE_STD_RX_RING_SZ, 1, BGE_STD_RX_RING_SZ, 0, NULL, NULL, &sc->bge_cdata.bge_rx_std_ring_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for standard RX ring */ error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_std_ring_tag, (void **)&sc->bge_ldata.bge_rx_std_ring, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_std_ring_map); if (error) return (ENOMEM); bzero((char *)sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ); /* Load the address of the standard RX ring */ ctx.bge_maxsegs = 1; ctx.sc = sc; error = bus_dmamap_load(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, sc->bge_ldata.bge_rx_std_ring, BGE_STD_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error) return (ENOMEM); sc->bge_ldata.bge_rx_std_ring_paddr = ctx.bge_busaddr; if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) { /* * Create tag for jumbo mbufs. * This is really a bit of a kludge. We allocate a special * jumbo buffer pool which (thanks to the way our DMA * memory allocation works) will consist of contiguous * pages. This means that even though a jumbo buffer might * be larger than a page size, we don't really need to * map it into more than one DMA segment. However, the * default mbuf tag will result in multi-segment mappings, * so we have to create a special jumbo mbuf tag that * lets us get away with mapping the jumbo buffers as * a single segment. I think eventually the driver should * be changed so that it uses ordinary mbufs and cluster * buffers, i.e. jumbo frames can span multiple DMA * descriptors. But that's a project for another day. */ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * nseg, nseg, BGE_JLEN, 0, NULL, NULL, &sc->bge_cdata.bge_mtag_jumbo); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Create tag for jumbo RX ring */ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BGE_JUMBO_RX_RING_SZ, 1, BGE_JUMBO_RX_RING_SZ, 0, NULL, NULL, &sc->bge_cdata.bge_rx_jumbo_ring_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for jumbo RX ring */ error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_jumbo_ring_tag, (void **)&sc->bge_ldata.bge_rx_jumbo_ring, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_jumbo_ring_map); if (error) return (ENOMEM); bzero((char *)sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ); /* Load the address of the jumbo RX ring */ ctx.bge_maxsegs = 1; ctx.sc = sc; error = bus_dmamap_load(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, sc->bge_ldata.bge_rx_jumbo_ring, BGE_JUMBO_RX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error) return (ENOMEM); sc->bge_ldata.bge_rx_jumbo_ring_paddr = ctx.bge_busaddr; /* Create DMA maps for jumbo RX buffers */ for (i = 0; i < BGE_JUMBO_RX_RING_CNT; i++) { error = bus_dmamap_create(sc->bge_cdata.bge_mtag_jumbo, 0, &sc->bge_cdata.bge_rx_jumbo_dmamap[i]); if (error) { device_printf(dev, "can't create DMA map for RX\n"); return(ENOMEM); } } } /* Create tag for RX return ring */ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BGE_RX_RTN_RING_SZ(sc), 1, BGE_RX_RTN_RING_SZ(sc), 0, NULL, NULL, &sc->bge_cdata.bge_rx_return_ring_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for RX return ring */ error = bus_dmamem_alloc(sc->bge_cdata.bge_rx_return_ring_tag, (void **)&sc->bge_ldata.bge_rx_return_ring, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_rx_return_ring_map); if (error) return (ENOMEM); bzero((char *)sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc)); /* Load the address of the RX return ring */ ctx.bge_maxsegs = 1; ctx.sc = sc; error = bus_dmamap_load(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map, sc->bge_ldata.bge_rx_return_ring, BGE_RX_RTN_RING_SZ(sc), bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error) return (ENOMEM); sc->bge_ldata.bge_rx_return_ring_paddr = ctx.bge_busaddr; /* Create tag for TX ring */ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BGE_TX_RING_SZ, 1, BGE_TX_RING_SZ, 0, NULL, NULL, &sc->bge_cdata.bge_tx_ring_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for TX ring */ error = bus_dmamem_alloc(sc->bge_cdata.bge_tx_ring_tag, (void **)&sc->bge_ldata.bge_tx_ring, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_tx_ring_map); if (error) return (ENOMEM); bzero((char *)sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ); /* Load the address of the TX ring */ ctx.bge_maxsegs = 1; ctx.sc = sc; error = bus_dmamap_load(sc->bge_cdata.bge_tx_ring_tag, sc->bge_cdata.bge_tx_ring_map, sc->bge_ldata.bge_tx_ring, BGE_TX_RING_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error) return (ENOMEM); sc->bge_ldata.bge_tx_ring_paddr = ctx.bge_busaddr; /* Create tag for status block */ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BGE_STATUS_BLK_SZ, 1, BGE_STATUS_BLK_SZ, 0, NULL, NULL, &sc->bge_cdata.bge_status_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for status block */ error = bus_dmamem_alloc(sc->bge_cdata.bge_status_tag, (void **)&sc->bge_ldata.bge_status_block, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_status_map); if (error) return (ENOMEM); bzero((char *)sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ); /* Load the address of the status block */ ctx.sc = sc; ctx.bge_maxsegs = 1; error = bus_dmamap_load(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, sc->bge_ldata.bge_status_block, BGE_STATUS_BLK_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error) return (ENOMEM); sc->bge_ldata.bge_status_block_paddr = ctx.bge_busaddr; /* Create tag for statistics block */ error = bus_dma_tag_create(sc->bge_cdata.bge_parent_tag, PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BGE_STATS_SZ, 1, BGE_STATS_SZ, 0, NULL, NULL, &sc->bge_cdata.bge_stats_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for statistics block */ error = bus_dmamem_alloc(sc->bge_cdata.bge_stats_tag, (void **)&sc->bge_ldata.bge_stats, BUS_DMA_NOWAIT, &sc->bge_cdata.bge_stats_map); if (error) return (ENOMEM); bzero((char *)sc->bge_ldata.bge_stats, BGE_STATS_SZ); /* Load the address of the statstics block */ ctx.sc = sc; ctx.bge_maxsegs = 1; error = bus_dmamap_load(sc->bge_cdata.bge_stats_tag, sc->bge_cdata.bge_stats_map, sc->bge_ldata.bge_stats, BGE_STATS_SZ, bge_dma_map_addr, &ctx, BUS_DMA_NOWAIT); if (error) return (ENOMEM); sc->bge_ldata.bge_stats_paddr = ctx.bge_busaddr; return(0); } static int bge_attach(dev) device_t dev; { struct ifnet *ifp; struct bge_softc *sc; u_int32_t hwcfg = 0; u_int32_t mac_tmp = 0; u_char eaddr[6]; int unit, error = 0, rid; sc = device_get_softc(dev); unit = device_get_unit(dev); sc->bge_dev = dev; sc->bge_unit = unit; /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = BGE_PCI_BAR0; sc->bge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE|PCI_RF_DENSE); if (sc->bge_res == NULL) { printf ("bge%d: couldn't map memory\n", unit); error = ENXIO; goto fail; } sc->bge_btag = rman_get_bustag(sc->bge_res); sc->bge_bhandle = rman_get_bushandle(sc->bge_res); sc->bge_vhandle = (vm_offset_t)rman_get_virtual(sc->bge_res); /* Allocate interrupt */ rid = 0; sc->bge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->bge_irq == NULL) { printf("bge%d: couldn't map interrupt\n", unit); error = ENXIO; goto fail; } sc->bge_unit = unit; BGE_LOCK_INIT(sc, device_get_nameunit(dev)); /* Save ASIC rev. */ sc->bge_chipid = pci_read_config(dev, BGE_PCI_MISC_CTL, 4) & BGE_PCIMISCCTL_ASICREV; sc->bge_asicrev = BGE_ASICREV(sc->bge_chipid); sc->bge_chiprev = BGE_CHIPREV(sc->bge_chipid); /* * Treat the 5714 like the 5750 until we have more info * on this chip. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5714) sc->bge_asicrev = BGE_ASICREV_BCM5750; /* * XXX: Broadcom Linux driver. Not in specs or eratta. * PCI-Express? */ if (sc->bge_asicrev == BGE_ASICREV_BCM5750) { u_int32_t v; v = pci_read_config(dev, BGE_PCI_MSI_CAPID, 4); if (((v >> 8) & 0xff) == BGE_PCIE_CAPID_REG) { v = pci_read_config(dev, BGE_PCIE_CAPID_REG, 4); if ((v & 0xff) == BGE_PCIE_CAPID) sc->bge_pcie = 1; } } /* Try to reset the chip. */ bge_reset(sc); if (bge_chipinit(sc)) { printf("bge%d: chip initialization failed\n", sc->bge_unit); bge_release_resources(sc); error = ENXIO; goto fail; } /* * Get station address from the EEPROM. */ mac_tmp = bge_readmem_ind(sc, 0x0c14); if ((mac_tmp >> 16) == 0x484b) { eaddr[0] = (u_char)(mac_tmp >> 8); eaddr[1] = (u_char)mac_tmp; mac_tmp = bge_readmem_ind(sc, 0x0c18); eaddr[2] = (u_char)(mac_tmp >> 24); eaddr[3] = (u_char)(mac_tmp >> 16); eaddr[4] = (u_char)(mac_tmp >> 8); eaddr[5] = (u_char)mac_tmp; } else if (bge_read_eeprom(sc, eaddr, BGE_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { printf("bge%d: failed to read station address\n", unit); bge_release_resources(sc); error = ENXIO; goto fail; } /* 5705 limits RX return ring to 512 entries. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5705 || sc->bge_asicrev == BGE_ASICREV_BCM5750) sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT_5705; else sc->bge_return_ring_cnt = BGE_RETURN_RING_CNT; if (bge_dma_alloc(dev)) { printf ("bge%d: failed to allocate DMA resources\n", sc->bge_unit); bge_release_resources(sc); error = ENXIO; goto fail; } /* * Try to allocate memory for jumbo buffers. * The 5705 does not appear to support jumbo frames. */ if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) { if (bge_alloc_jumbo_mem(sc)) { printf("bge%d: jumbo buffer allocation " "failed\n", sc->bge_unit); bge_release_resources(sc); error = ENXIO; goto fail; } } /* Set default tuneable values. */ sc->bge_stat_ticks = BGE_TICKS_PER_SEC; sc->bge_rx_coal_ticks = 150; sc->bge_tx_coal_ticks = 150; sc->bge_rx_max_coal_bds = 64; sc->bge_tx_max_coal_bds = 128; /* Set up ifnet structure */ ifp = sc->bge_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { printf("bge%d: failed to if_alloc()\n", sc->bge_unit); bge_release_resources(sc); error = ENXIO; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = bge_ioctl; ifp->if_start = bge_start; ifp->if_watchdog = bge_watchdog; ifp->if_init = bge_init; ifp->if_mtu = ETHERMTU; ifp->if_snd.ifq_drv_maxlen = BGE_TX_RING_CNT - 1; IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); IFQ_SET_READY(&ifp->if_snd); ifp->if_hwassist = BGE_CSUM_FEATURES; /* NB: the code for RX csum offload is disabled for now */ ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; ifp->if_capenable = ifp->if_capabilities; /* * Figure out what sort of media we have by checking the * hardware config word in the first 32k of NIC internal memory, * or fall back to examining the EEPROM if necessary. * Note: on some BCM5700 cards, this value appears to be unset. * If that's the case, we have to rely on identifying the NIC * by its PCI subsystem ID, as we do below for the SysKonnect * SK-9D41. */ if (bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_SIG) == BGE_MAGIC_NUMBER) hwcfg = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM_NICCFG); else { bge_read_eeprom(sc, (caddr_t)&hwcfg, BGE_EE_HWCFG_OFFSET, sizeof(hwcfg)); hwcfg = ntohl(hwcfg); } if ((hwcfg & BGE_HWCFG_MEDIA) == BGE_MEDIA_FIBER) sc->bge_tbi = 1; /* The SysKonnect SK-9D41 is a 1000baseSX card. */ if ((pci_read_config(dev, BGE_PCI_SUBSYS, 4) >> 16) == SK_SUBSYSID_9D41) sc->bge_tbi = 1; if (sc->bge_tbi) { ifmedia_init(&sc->bge_ifmedia, IFM_IMASK, bge_ifmedia_upd, bge_ifmedia_sts); ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); ifmedia_add(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); ifmedia_set(&sc->bge_ifmedia, IFM_ETHER|IFM_AUTO); sc->bge_ifmedia.ifm_media = sc->bge_ifmedia.ifm_cur->ifm_media; } else { /* * Do transceiver setup. */ if (mii_phy_probe(dev, &sc->bge_miibus, bge_ifmedia_upd, bge_ifmedia_sts)) { printf("bge%d: MII without any PHY!\n", sc->bge_unit); bge_release_resources(sc); bge_free_jumbo_mem(sc); if_free(ifp); error = ENXIO; goto fail; } } /* * When using the BCM5701 in PCI-X mode, data corruption has * been observed in the first few bytes of some received packets. * Aligning the packet buffer in memory eliminates the corruption. * Unfortunately, this misaligns the packet payloads. On platforms * which do not support unaligned accesses, we will realign the * payloads by copying the received packets. */ switch (sc->bge_chipid) { case BGE_CHIPID_BCM5701_A0: case BGE_CHIPID_BCM5701_B0: case BGE_CHIPID_BCM5701_B2: case BGE_CHIPID_BCM5701_B5: /* If in PCI-X mode, work around the alignment bug. */ if ((pci_read_config(dev, BGE_PCI_PCISTATE, 4) & (BGE_PCISTATE_PCI_BUSMODE | BGE_PCISTATE_PCI_BUSSPEED)) == BGE_PCISTATE_PCI_BUSSPEED) sc->bge_rx_alignment_bug = 1; break; } /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); callout_init(&sc->bge_stat_ch, CALLOUT_MPSAFE); /* * Hookup IRQ last. */ error = bus_setup_intr(dev, sc->bge_irq, INTR_TYPE_NET | INTR_MPSAFE, bge_intr, sc, &sc->bge_intrhand); if (error) { bge_detach(dev); printf("bge%d: couldn't set up irq\n", unit); } fail: return(error); } static int bge_detach(dev) device_t dev; { struct bge_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->bge_ifp; BGE_LOCK(sc); bge_stop(sc); bge_reset(sc); BGE_UNLOCK(sc); ether_ifdetach(ifp); if_free(ifp); if (sc->bge_tbi) { ifmedia_removeall(&sc->bge_ifmedia); } else { bus_generic_detach(dev); device_delete_child(dev, sc->bge_miibus); } bge_release_resources(sc); if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) bge_free_jumbo_mem(sc); return(0); } static void bge_release_resources(sc) struct bge_softc *sc; { device_t dev; dev = sc->bge_dev; if (sc->bge_vpd_prodname != NULL) free(sc->bge_vpd_prodname, M_DEVBUF); if (sc->bge_vpd_readonly != NULL) free(sc->bge_vpd_readonly, M_DEVBUF); if (sc->bge_intrhand != NULL) bus_teardown_intr(dev, sc->bge_irq, sc->bge_intrhand); if (sc->bge_irq != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->bge_irq); if (sc->bge_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, BGE_PCI_BAR0, sc->bge_res); bge_dma_free(sc); if (mtx_initialized(&sc->bge_mtx)) /* XXX */ BGE_LOCK_DESTROY(sc); return; } static void bge_reset(sc) struct bge_softc *sc; { device_t dev; u_int32_t cachesize, command, pcistate, reset; int i, val = 0; dev = sc->bge_dev; /* Save some important PCI state. */ cachesize = pci_read_config(dev, BGE_PCI_CACHESZ, 4); command = pci_read_config(dev, BGE_PCI_CMD, 4); pcistate = pci_read_config(dev, BGE_PCI_PCISTATE, 4); pci_write_config(dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4); reset = BGE_MISCCFG_RESET_CORE_CLOCKS|(65<<1); /* XXX: Broadcom Linux driver. */ if (sc->bge_pcie) { if (CSR_READ_4(sc, 0x7e2c) == 0x60) /* PCIE 1.0 */ CSR_WRITE_4(sc, 0x7e2c, 0x20); if (sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { /* Prevent PCIE link training during global reset */ CSR_WRITE_4(sc, BGE_MISC_CFG, (1<<29)); reset |= (1<<29); } } /* Issue global reset */ bge_writereg_ind(sc, BGE_MISC_CFG, reset); DELAY(1000); /* XXX: Broadcom Linux driver. */ if (sc->bge_pcie) { if (sc->bge_chipid == BGE_CHIPID_BCM5750_A0) { uint32_t v; DELAY(500000); /* wait for link training to complete */ v = pci_read_config(dev, 0xc4, 4); pci_write_config(dev, 0xc4, v | (1<<15), 4); } /* Set PCIE max payload size and clear error status. */ pci_write_config(dev, 0xd8, 0xf5000, 4); } /* Reset some of the PCI state that got zapped by reset */ pci_write_config(dev, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_INDIRECT_ACCESS|BGE_PCIMISCCTL_MASK_PCI_INTR| BGE_PCIMISCCTL_ENDIAN_WORDSWAP|BGE_PCIMISCCTL_PCISTATE_RW, 4); pci_write_config(dev, BGE_PCI_CACHESZ, cachesize, 4); pci_write_config(dev, BGE_PCI_CMD, command, 4); bge_writereg_ind(sc, BGE_MISC_CFG, (65 << 1)); /* Enable memory arbiter. */ if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) CSR_WRITE_4(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); /* * Prevent PXE restart: write a magic number to the * general communications memory at 0xB50. */ bge_writemem_ind(sc, BGE_SOFTWARE_GENCOMM, BGE_MAGIC_NUMBER); /* * Poll the value location we just wrote until * we see the 1's complement of the magic number. * This indicates that the firmware initialization * is complete. */ for (i = 0; i < BGE_TIMEOUT; i++) { val = bge_readmem_ind(sc, BGE_SOFTWARE_GENCOMM); if (val == ~BGE_MAGIC_NUMBER) break; DELAY(10); } if (i == BGE_TIMEOUT) { printf("bge%d: firmware handshake timed out\n", sc->bge_unit); return; } /* * XXX Wait for the value of the PCISTATE register to * return to its original pre-reset state. This is a * fairly good indicator of reset completion. If we don't * wait for the reset to fully complete, trying to read * from the device's non-PCI registers may yield garbage * results. */ for (i = 0; i < BGE_TIMEOUT; i++) { if (pci_read_config(dev, BGE_PCI_PCISTATE, 4) == pcistate) break; DELAY(10); } /* Fix up byte swapping */ CSR_WRITE_4(sc, BGE_MODE_CTL, BGE_MODECTL_BYTESWAP_NONFRAME| BGE_MODECTL_BYTESWAP_DATA); CSR_WRITE_4(sc, BGE_MAC_MODE, 0); /* * The 5704 in TBI mode apparently needs some special * adjustment to insure the SERDES drive level is set * to 1.2V. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5704 && sc->bge_tbi) { uint32_t serdescfg; serdescfg = CSR_READ_4(sc, BGE_SERDES_CFG); serdescfg = (serdescfg & ~0xFFF) | 0x880; CSR_WRITE_4(sc, BGE_SERDES_CFG, serdescfg); } /* XXX: Broadcom Linux driver. */ if (sc->bge_pcie && sc->bge_chipid != BGE_CHIPID_BCM5750_A0) { uint32_t v; v = CSR_READ_4(sc, 0x7c00); CSR_WRITE_4(sc, 0x7c00, v | (1<<25)); } DELAY(10000); return; } /* * Frame reception handling. This is called if there's a frame * on the receive return list. * * Note: we have to be able to handle two possibilities here: * 1) the frame is from the jumbo recieve ring * 2) the frame is from the standard receive ring */ static void bge_rxeof(sc) struct bge_softc *sc; { struct ifnet *ifp; int stdcnt = 0, jumbocnt = 0; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD); if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) { bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_POSTREAD); } while(sc->bge_rx_saved_considx != sc->bge_ldata.bge_status_block->bge_idx[0].bge_rx_prod_idx) { struct bge_rx_bd *cur_rx; u_int32_t rxidx; struct ether_header *eh; struct mbuf *m = NULL; u_int16_t vlan_tag = 0; int have_tag = 0; cur_rx = &sc->bge_ldata.bge_rx_return_ring[sc->bge_rx_saved_considx]; rxidx = cur_rx->bge_idx; BGE_INC(sc->bge_rx_saved_considx, sc->bge_return_ring_cnt); if (cur_rx->bge_flags & BGE_RXBDFLAG_VLAN_TAG) { have_tag = 1; vlan_tag = cur_rx->bge_vlan_tag; } if (cur_rx->bge_flags & BGE_RXBDFLAG_JUMBO_RING) { BGE_INC(sc->bge_jumbo, BGE_JUMBO_RX_RING_CNT); bus_dmamap_sync(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bge_cdata.bge_mtag_jumbo, sc->bge_cdata.bge_rx_jumbo_dmamap[rxidx]); m = sc->bge_cdata.bge_rx_jumbo_chain[rxidx]; sc->bge_cdata.bge_rx_jumbo_chain[rxidx] = NULL; jumbocnt++; if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { ifp->if_ierrors++; bge_newbuf_jumbo(sc, sc->bge_jumbo, m); continue; } if (bge_newbuf_jumbo(sc, sc->bge_jumbo, NULL) == ENOBUFS) { ifp->if_ierrors++; bge_newbuf_jumbo(sc, sc->bge_jumbo, m); continue; } } else { BGE_INC(sc->bge_std, BGE_STD_RX_RING_CNT); bus_dmamap_sync(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_rx_std_dmamap[rxidx], BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_rx_std_dmamap[rxidx]); m = sc->bge_cdata.bge_rx_std_chain[rxidx]; sc->bge_cdata.bge_rx_std_chain[rxidx] = NULL; stdcnt++; if (cur_rx->bge_flags & BGE_RXBDFLAG_ERROR) { ifp->if_ierrors++; bge_newbuf_std(sc, sc->bge_std, m); continue; } if (bge_newbuf_std(sc, sc->bge_std, NULL) == ENOBUFS) { ifp->if_ierrors++; bge_newbuf_std(sc, sc->bge_std, m); continue; } } ifp->if_ipackets++; #ifndef __i386__ /* * The i386 allows unaligned accesses, but for other * platforms we must make sure the payload is aligned. */ if (sc->bge_rx_alignment_bug) { bcopy(m->m_data, m->m_data + ETHER_ALIGN, cur_rx->bge_len); m->m_data += ETHER_ALIGN; } #endif eh = mtod(m, struct ether_header *); m->m_pkthdr.len = m->m_len = cur_rx->bge_len - ETHER_CRC_LEN; m->m_pkthdr.rcvif = ifp; #if 0 /* currently broken for some packets, possibly related to TCP options */ if (ifp->if_capenable & IFCAP_RXCSUM) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if ((cur_rx->bge_ip_csum ^ 0xffff) == 0) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if (cur_rx->bge_flags & BGE_RXBDFLAG_TCP_UDP_CSUM) { m->m_pkthdr.csum_data = cur_rx->bge_tcp_udp_csum; m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; } } #endif /* * If we received a packet with a vlan tag, * attach that information to the packet. */ if (have_tag) VLAN_INPUT_TAG(ifp, m, vlan_tag, continue); BGE_UNLOCK(sc); (*ifp->if_input)(ifp, m); BGE_LOCK(sc); } bus_dmamap_sync(sc->bge_cdata.bge_rx_return_ring_tag, sc->bge_cdata.bge_rx_return_ring_map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->bge_cdata.bge_rx_std_ring_tag, sc->bge_cdata.bge_rx_std_ring_map, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_PREWRITE); if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) { bus_dmamap_sync(sc->bge_cdata.bge_rx_jumbo_ring_tag, sc->bge_cdata.bge_rx_jumbo_ring_map, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } CSR_WRITE_4(sc, BGE_MBX_RX_CONS0_LO, sc->bge_rx_saved_considx); if (stdcnt) CSR_WRITE_4(sc, BGE_MBX_RX_STD_PROD_LO, sc->bge_std); if (jumbocnt) CSR_WRITE_4(sc, BGE_MBX_RX_JUMBO_PROD_LO, sc->bge_jumbo); return; } static void bge_txeof(sc) struct bge_softc *sc; { struct bge_tx_bd *cur_tx = NULL; struct ifnet *ifp; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; /* * Go through our tx ring and free mbufs for those * frames that have been sent. */ while (sc->bge_tx_saved_considx != sc->bge_ldata.bge_status_block->bge_idx[0].bge_tx_cons_idx) { u_int32_t idx = 0; idx = sc->bge_tx_saved_considx; cur_tx = &sc->bge_ldata.bge_tx_ring[idx]; if (cur_tx->bge_flags & BGE_TXBDFLAG_END) ifp->if_opackets++; if (sc->bge_cdata.bge_tx_chain[idx] != NULL) { m_freem(sc->bge_cdata.bge_tx_chain[idx]); sc->bge_cdata.bge_tx_chain[idx] = NULL; bus_dmamap_unload(sc->bge_cdata.bge_mtag, sc->bge_cdata.bge_tx_dmamap[idx]); } sc->bge_txcnt--; BGE_INC(sc->bge_tx_saved_considx, BGE_TX_RING_CNT); ifp->if_timer = 0; } if (cur_tx != NULL) ifp->if_flags &= ~IFF_OACTIVE; return; } static void bge_intr(xsc) void *xsc; { struct bge_softc *sc; struct ifnet *ifp; u_int32_t statusword; u_int32_t status, mimode; sc = xsc; ifp = sc->bge_ifp; BGE_LOCK(sc); bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_POSTWRITE); statusword = atomic_readandclear_32(&sc->bge_ldata.bge_status_block->bge_status); #ifdef notdef /* Avoid this for now -- checking this register is expensive. */ /* Make sure this is really our interrupt. */ if (!(CSR_READ_4(sc, BGE_MISC_LOCAL_CTL) & BGE_MLC_INTR_STATE)) return; #endif /* Ack interrupt and stop others from occuring. */ CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); /* * Process link state changes. * Grrr. The link status word in the status block does * not work correctly on the BCM5700 rev AX and BX chips, * according to all available information. Hence, we have * to enable MII interrupts in order to properly obtain * async link changes. Unfortunately, this also means that * we have to read the MAC status register to detect link * changes, thereby adding an additional register access to * the interrupt handler. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5700) { status = CSR_READ_4(sc, BGE_MAC_STS); if (status & BGE_MACSTAT_MI_INTERRUPT) { sc->bge_link = 0; callout_stop(&sc->bge_stat_ch); bge_tick_locked(sc); /* Clear the interrupt */ CSR_WRITE_4(sc, BGE_MAC_EVT_ENB, BGE_EVTENB_MI_INTERRUPT); bge_miibus_readreg(sc->bge_dev, 1, BRGPHY_MII_ISR); bge_miibus_writereg(sc->bge_dev, 1, BRGPHY_MII_IMR, BRGPHY_INTRS); } } else { if (statusword & BGE_STATFLAG_LINKSTATE_CHANGED) { /* * Sometimes PCS encoding errors are detected in * TBI mode (on fiber NICs), and for some reason * the chip will signal them as link changes. * If we get a link change event, but the 'PCS * encoding error' bit in the MAC status register * is set, don't bother doing a link check. * This avoids spurious "gigabit link up" messages * that sometimes appear on fiber NICs during * periods of heavy traffic. (There should be no * effect on copper NICs.) * * If we do have a copper NIC (bge_tbi == 0) then * check that the AUTOPOLL bit is set before * processing the event as a real link change. * Turning AUTOPOLL on and off in the MII read/write * functions will often trigger a link status * interrupt for no reason. */ status = CSR_READ_4(sc, BGE_MAC_STS); mimode = CSR_READ_4(sc, BGE_MI_MODE); if (!(status & (BGE_MACSTAT_PORT_DECODE_ERROR| BGE_MACSTAT_MI_COMPLETE)) && (!sc->bge_tbi && (mimode & BGE_MIMODE_AUTOPOLL))) { sc->bge_link = 0; callout_stop(&sc->bge_stat_ch); bge_tick_locked(sc); } /* Clear the interrupt */ CSR_WRITE_4(sc, BGE_MAC_STS, BGE_MACSTAT_SYNC_CHANGED| BGE_MACSTAT_CFG_CHANGED|BGE_MACSTAT_MI_COMPLETE| BGE_MACSTAT_LINK_CHANGED); /* Force flush the status block cached by PCI bridge */ CSR_READ_4(sc, BGE_MBX_IRQ0_LO); } } if (ifp->if_flags & IFF_RUNNING) { /* Check RX return ring producer/consumer */ bge_rxeof(sc); /* Check TX ring producer/consumer */ bge_txeof(sc); } bus_dmamap_sync(sc->bge_cdata.bge_status_tag, sc->bge_cdata.bge_status_map, BUS_DMASYNC_PREWRITE); bge_handle_events(sc); /* Re-enable interrupts. */ CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); if (ifp->if_flags & IFF_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) bge_start_locked(ifp); BGE_UNLOCK(sc); return; } static void bge_tick_locked(sc) struct bge_softc *sc; { struct mii_data *mii = NULL; struct ifmedia *ifm = NULL; struct ifnet *ifp; ifp = sc->bge_ifp; BGE_LOCK_ASSERT(sc); if (sc->bge_asicrev == BGE_ASICREV_BCM5705 || sc->bge_asicrev == BGE_ASICREV_BCM5750) bge_stats_update_regs(sc); else bge_stats_update(sc); callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); if (sc->bge_link) return; if (sc->bge_tbi) { ifm = &sc->bge_ifmedia; if (CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_TBI_PCS_SYNCHED) { sc->bge_link++; if (sc->bge_asicrev == BGE_ASICREV_BCM5704) BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_TBI_SEND_CFGS); CSR_WRITE_4(sc, BGE_MAC_STS, 0xFFFFFFFF); if (bootverbose) printf("bge%d: gigabit link up\n", sc->bge_unit); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) bge_start_locked(ifp); } return; } mii = device_get_softc(sc->bge_miibus); mii_tick(mii); if (!sc->bge_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->bge_link++; if ((IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) && bootverbose) printf("bge%d: gigabit link up\n", sc->bge_unit); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) bge_start_locked(ifp); } return; } static void bge_tick(xsc) void *xsc; { struct bge_softc *sc; sc = xsc; BGE_LOCK(sc); bge_tick_locked(sc); BGE_UNLOCK(sc); } static void bge_stats_update_regs(sc) struct bge_softc *sc; { struct ifnet *ifp; struct bge_mac_stats_regs stats; u_int32_t *s; int i; ifp = sc->bge_ifp; s = (u_int32_t *)&stats; for (i = 0; i < sizeof(struct bge_mac_stats_regs); i += 4) { *s = CSR_READ_4(sc, BGE_RX_STATS + i); s++; } ifp->if_collisions += (stats.dot3StatsSingleCollisionFrames + stats.dot3StatsMultipleCollisionFrames + stats.dot3StatsExcessiveCollisions + stats.dot3StatsLateCollisions) - ifp->if_collisions; return; } static void bge_stats_update(sc) struct bge_softc *sc; { struct ifnet *ifp; struct bge_stats *stats; ifp = sc->bge_ifp; stats = (struct bge_stats *)(sc->bge_vhandle + BGE_MEMWIN_START + BGE_STATS_BLOCK); ifp->if_collisions += (stats->txstats.dot3StatsSingleCollisionFrames.bge_addr_lo + stats->txstats.dot3StatsMultipleCollisionFrames.bge_addr_lo + stats->txstats.dot3StatsExcessiveCollisions.bge_addr_lo + stats->txstats.dot3StatsLateCollisions.bge_addr_lo) - ifp->if_collisions; #ifdef notdef ifp->if_collisions += (sc->bge_rdata->bge_info.bge_stats.dot3StatsSingleCollisionFrames + sc->bge_rdata->bge_info.bge_stats.dot3StatsMultipleCollisionFrames + sc->bge_rdata->bge_info.bge_stats.dot3StatsExcessiveCollisions + sc->bge_rdata->bge_info.bge_stats.dot3StatsLateCollisions) - ifp->if_collisions; #endif return; } /* * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data * pointers to descriptors. */ static int bge_encap(sc, m_head, txidx) struct bge_softc *sc; struct mbuf *m_head; u_int32_t *txidx; { struct bge_tx_bd *f = NULL; u_int16_t csum_flags = 0; struct m_tag *mtag; struct bge_dmamap_arg ctx; bus_dmamap_t map; int error; if (m_head->m_pkthdr.csum_flags) { if (m_head->m_pkthdr.csum_flags & CSUM_IP) csum_flags |= BGE_TXBDFLAG_IP_CSUM; if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) csum_flags |= BGE_TXBDFLAG_TCP_UDP_CSUM; if (m_head->m_flags & M_LASTFRAG) csum_flags |= BGE_TXBDFLAG_IP_FRAG_END; else if (m_head->m_flags & M_FRAG) csum_flags |= BGE_TXBDFLAG_IP_FRAG; } mtag = VLAN_OUTPUT_TAG(sc->bge_ifp, m_head); ctx.sc = sc; ctx.bge_idx = *txidx; ctx.bge_ring = sc->bge_ldata.bge_tx_ring; ctx.bge_flags = csum_flags; /* * Sanity check: avoid coming within 16 descriptors * of the end of the ring. */ ctx.bge_maxsegs = (BGE_TX_RING_CNT - sc->bge_txcnt) - 16; map = sc->bge_cdata.bge_tx_dmamap[*txidx]; error = bus_dmamap_load_mbuf(sc->bge_cdata.bge_mtag, map, m_head, bge_dma_map_tx_desc, &ctx, BUS_DMA_NOWAIT); if (error || ctx.bge_maxsegs == 0 /*|| ctx.bge_idx == sc->bge_tx_saved_considx*/) return (ENOBUFS); /* * Insure that the map for this transmission * is placed at the array index of the last descriptor * in this chain. */ sc->bge_cdata.bge_tx_dmamap[*txidx] = sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx]; sc->bge_cdata.bge_tx_dmamap[ctx.bge_idx] = map; sc->bge_cdata.bge_tx_chain[ctx.bge_idx] = m_head; sc->bge_txcnt += ctx.bge_maxsegs; f = &sc->bge_ldata.bge_tx_ring[*txidx]; if (mtag != NULL) { f->bge_flags |= htole16(BGE_TXBDFLAG_VLAN_TAG); f->bge_vlan_tag = htole16(VLAN_TAG_VALUE(mtag)); } else { f->bge_vlan_tag = 0; } BGE_INC(ctx.bge_idx, BGE_TX_RING_CNT); *txidx = ctx.bge_idx; return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit descriptors. */ static void bge_start_locked(ifp) struct ifnet *ifp; { struct bge_softc *sc; struct mbuf *m_head = NULL; u_int32_t prodidx = 0; int count = 0; sc = ifp->if_softc; if (!sc->bge_link && IFQ_DRV_IS_EMPTY(&ifp->if_snd)) return; prodidx = CSR_READ_4(sc, BGE_MBX_TX_HOST_PROD0_LO); while(sc->bge_cdata.bge_tx_chain[prodidx] == NULL) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * XXX * The code inside the if() block is never reached since we * must mark CSUM_IP_FRAGS in our if_hwassist to start getting * requests to checksum TCP/UDP in a fragmented packet. * * XXX * safety overkill. If this is a fragmented packet chain * with delayed TCP/UDP checksums, then only encapsulate * it if we have enough descriptors to handle the entire * chain at once. * (paranoia -- may not actually be needed) */ if (m_head->m_flags & M_FIRSTFRAG && m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { if ((BGE_TX_RING_CNT - sc->bge_txcnt) < m_head->m_pkthdr.csum_data + 16) { IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } } /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (bge_encap(sc, m_head, &prodidx)) { IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } ++count; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); } if (count == 0) { /* no packets were dequeued */ return; } /* Transmit */ CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); /* 5700 b2 errata */ if (sc->bge_chiprev == BGE_CHIPREV_5700_BX) CSR_WRITE_4(sc, BGE_MBX_TX_HOST_PROD0_LO, prodidx); /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; return; } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit descriptors. */ static void bge_start(ifp) struct ifnet *ifp; { struct bge_softc *sc; sc = ifp->if_softc; BGE_LOCK(sc); bge_start_locked(ifp); BGE_UNLOCK(sc); } static void bge_init_locked(sc) struct bge_softc *sc; { struct ifnet *ifp; u_int16_t *m; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; if (ifp->if_flags & IFF_RUNNING) return; /* Cancel pending I/O and flush buffers. */ bge_stop(sc); bge_reset(sc); bge_chipinit(sc); /* * Init the various state machines, ring * control blocks and firmware. */ if (bge_blockinit(sc)) { printf("bge%d: initialization failure\n", sc->bge_unit); return; } ifp = sc->bge_ifp; /* Specify MTU. */ CSR_WRITE_4(sc, BGE_RX_MTU, ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); /* Load our MAC address. */ m = (u_int16_t *)&IFP2ENADDR(sc->bge_ifp)[0]; CSR_WRITE_4(sc, BGE_MAC_ADDR1_LO, htons(m[0])); CSR_WRITE_4(sc, BGE_MAC_ADDR1_HI, (htons(m[1]) << 16) | htons(m[2])); /* Enable or disable promiscuous mode as needed. */ if (ifp->if_flags & IFF_PROMISC) { BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); } else { BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); } /* Program multicast filter. */ bge_setmulti(sc); /* Init RX ring. */ bge_init_rx_ring_std(sc); /* * Workaround for a bug in 5705 ASIC rev A0. Poll the NIC's * memory to insure that the chip has in fact read the first * entry of the ring. */ if (sc->bge_chipid == BGE_CHIPID_BCM5705_A0) { u_int32_t v, i; for (i = 0; i < 10; i++) { DELAY(20); v = bge_readmem_ind(sc, BGE_STD_RX_RINGS + 8); if (v == (MCLBYTES - ETHER_ALIGN)) break; } if (i == 10) printf ("bge%d: 5705 A0 chip failed to load RX ring\n", sc->bge_unit); } /* Init jumbo RX ring. */ if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) bge_init_rx_ring_jumbo(sc); /* Init our RX return ring index */ sc->bge_rx_saved_considx = 0; /* Init TX ring. */ bge_init_tx_ring(sc); /* Turn on transmitter */ BGE_SETBIT(sc, BGE_TX_MODE, BGE_TXMODE_ENABLE); /* Turn on receiver */ BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); /* Tell firmware we're alive. */ BGE_SETBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); /* Enable host interrupts. */ BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_CLEAR_INTA); BGE_CLRBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 0); bge_ifmedia_upd(ifp); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; callout_reset(&sc->bge_stat_ch, hz, bge_tick, sc); return; } static void bge_init(xsc) void *xsc; { struct bge_softc *sc = xsc; BGE_LOCK(sc); bge_init_locked(sc); BGE_UNLOCK(sc); return; } /* * Set media options. */ static int bge_ifmedia_upd(ifp) struct ifnet *ifp; { struct bge_softc *sc; struct mii_data *mii; struct ifmedia *ifm; sc = ifp->if_softc; ifm = &sc->bge_ifmedia; /* If this is a 1000baseX NIC, enable the TBI port. */ if (sc->bge_tbi) { if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return(EINVAL); switch(IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: #ifndef BGE_FAKE_AUTONEG /* * The BCM5704 ASIC appears to have a special * mechanism for programming the autoneg * advertisement registers in TBI mode. */ if (sc->bge_asicrev == BGE_ASICREV_BCM5704) { uint32_t sgdig; CSR_WRITE_4(sc, BGE_TX_TBI_AUTONEG, 0); sgdig = CSR_READ_4(sc, BGE_SGDIG_CFG); sgdig |= BGE_SGDIGCFG_AUTO| BGE_SGDIGCFG_PAUSE_CAP| BGE_SGDIGCFG_ASYM_PAUSE; CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig|BGE_SGDIGCFG_SEND); DELAY(5); CSR_WRITE_4(sc, BGE_SGDIG_CFG, sgdig); } #endif break; case IFM_1000_SX: if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { BGE_CLRBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); } else { BGE_SETBIT(sc, BGE_MAC_MODE, BGE_MACMODE_HALF_DUPLEX); } break; default: return(EINVAL); } return(0); } mii = device_get_softc(sc->bge_miibus); sc->bge_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; miisc = LIST_NEXT(miisc, mii_list)) mii_phy_reset(miisc); } mii_mediachg(mii); return(0); } /* * Report current media status. */ static void bge_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct bge_softc *sc; struct mii_data *mii; sc = ifp->if_softc; if (sc->bge_tbi) { ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (CSR_READ_4(sc, BGE_MAC_STS) & BGE_MACSTAT_TBI_PCS_SYNCHED) ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= IFM_1000_SX; if (CSR_READ_4(sc, BGE_MAC_MODE) & BGE_MACMODE_HALF_DUPLEX) ifmr->ifm_active |= IFM_HDX; else ifmr->ifm_active |= IFM_FDX; return; } mii = device_get_softc(sc->bge_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static int bge_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct bge_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int mask, error = 0; struct mii_data *mii; switch(command) { case SIOCSIFMTU: /* Disallow jumbo frames on 5705. */ if (((sc->bge_asicrev == BGE_ASICREV_BCM5705 || sc->bge_asicrev == BGE_ASICREV_BCM5750) && ifr->ifr_mtu > ETHERMTU) || ifr->ifr_mtu > BGE_JUMBO_MTU) error = EINVAL; else { ifp->if_mtu = ifr->ifr_mtu; ifp->if_flags &= ~IFF_RUNNING; bge_init(sc); } break; case SIOCSIFFLAGS: BGE_LOCK(sc); if (ifp->if_flags & IFF_UP) { /* * If only the state of the PROMISC flag changed, * then just use the 'set promisc mode' command * instead of reinitializing the entire NIC. Doing * a full re-init means reloading the firmware and * waiting for it to start up, which may take a * second or two. */ if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->bge_if_flags & IFF_PROMISC)) { BGE_SETBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->bge_if_flags & IFF_PROMISC) { BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_RX_PROMISC); } else bge_init_locked(sc); } else { if (ifp->if_flags & IFF_RUNNING) { bge_stop(sc); } } sc->bge_if_flags = ifp->if_flags; BGE_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: if (ifp->if_flags & IFF_RUNNING) { BGE_LOCK(sc); bge_setmulti(sc); BGE_UNLOCK(sc); error = 0; } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: if (sc->bge_tbi) { error = ifmedia_ioctl(ifp, ifr, &sc->bge_ifmedia, command); } else { mii = device_get_softc(sc->bge_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ ifp->if_capenable; /* NB: the code for RX csum offload is disabled for now */ if (mask & IFCAP_TXCSUM) { ifp->if_capenable ^= IFCAP_TXCSUM; if (IFCAP_TXCSUM & ifp->if_capenable) ifp->if_hwassist = BGE_CSUM_FEATURES; else ifp->if_hwassist = 0; } error = 0; break; default: error = ether_ioctl(ifp, command, data); break; } return(error); } static void bge_watchdog(ifp) struct ifnet *ifp; { struct bge_softc *sc; sc = ifp->if_softc; printf("bge%d: watchdog timeout -- resetting\n", sc->bge_unit); ifp->if_flags &= ~IFF_RUNNING; bge_init(sc); ifp->if_oerrors++; return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void bge_stop(sc) struct bge_softc *sc; { struct ifnet *ifp; struct ifmedia_entry *ifm; struct mii_data *mii = NULL; int mtmp, itmp; BGE_LOCK_ASSERT(sc); ifp = sc->bge_ifp; if (!sc->bge_tbi) mii = device_get_softc(sc->bge_miibus); callout_stop(&sc->bge_stat_ch); /* * Disable all of the receiver blocks */ BGE_CLRBIT(sc, BGE_RX_MODE, BGE_RXMODE_ENABLE); BGE_CLRBIT(sc, BGE_RBDI_MODE, BGE_RBDIMODE_ENABLE); BGE_CLRBIT(sc, BGE_RXLP_MODE, BGE_RXLPMODE_ENABLE); if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) BGE_CLRBIT(sc, BGE_RXLS_MODE, BGE_RXLSMODE_ENABLE); BGE_CLRBIT(sc, BGE_RDBDI_MODE, BGE_RBDIMODE_ENABLE); BGE_CLRBIT(sc, BGE_RDC_MODE, BGE_RDCMODE_ENABLE); BGE_CLRBIT(sc, BGE_RBDC_MODE, BGE_RBDCMODE_ENABLE); /* * Disable all of the transmit blocks */ BGE_CLRBIT(sc, BGE_SRS_MODE, BGE_SRSMODE_ENABLE); BGE_CLRBIT(sc, BGE_SBDI_MODE, BGE_SBDIMODE_ENABLE); BGE_CLRBIT(sc, BGE_SDI_MODE, BGE_SDIMODE_ENABLE); BGE_CLRBIT(sc, BGE_RDMA_MODE, BGE_RDMAMODE_ENABLE); BGE_CLRBIT(sc, BGE_SDC_MODE, BGE_SDCMODE_ENABLE); if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) BGE_CLRBIT(sc, BGE_DMAC_MODE, BGE_DMACMODE_ENABLE); BGE_CLRBIT(sc, BGE_SBDC_MODE, BGE_SBDCMODE_ENABLE); /* * Shut down all of the memory managers and related * state machines. */ BGE_CLRBIT(sc, BGE_HCC_MODE, BGE_HCCMODE_ENABLE); BGE_CLRBIT(sc, BGE_WDMA_MODE, BGE_WDMAMODE_ENABLE); if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) BGE_CLRBIT(sc, BGE_MBCF_MODE, BGE_MBCFMODE_ENABLE); CSR_WRITE_4(sc, BGE_FTQ_RESET, 0xFFFFFFFF); CSR_WRITE_4(sc, BGE_FTQ_RESET, 0); if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) { BGE_CLRBIT(sc, BGE_BMAN_MODE, BGE_BMANMODE_ENABLE); BGE_CLRBIT(sc, BGE_MARB_MODE, BGE_MARBMODE_ENABLE); } /* Disable host interrupts. */ BGE_SETBIT(sc, BGE_PCI_MISC_CTL, BGE_PCIMISCCTL_MASK_PCI_INTR); CSR_WRITE_4(sc, BGE_MBX_IRQ0_LO, 1); /* * Tell firmware we're shutting down. */ BGE_CLRBIT(sc, BGE_MODE_CTL, BGE_MODECTL_STACKUP); /* Free the RX lists. */ bge_free_rx_ring_std(sc); /* Free jumbo RX list. */ if (sc->bge_asicrev != BGE_ASICREV_BCM5705 && sc->bge_asicrev != BGE_ASICREV_BCM5750) bge_free_rx_ring_jumbo(sc); /* Free TX buffers. */ bge_free_tx_ring(sc); /* * Isolate/power down the PHY, but leave the media selection * unchanged so that things will be put back to normal when * we bring the interface back up. */ if (!sc->bge_tbi) { itmp = ifp->if_flags; ifp->if_flags |= IFF_UP; ifm = mii->mii_media.ifm_cur; mtmp = ifm->ifm_media; ifm->ifm_media = IFM_ETHER|IFM_NONE; mii_mediachg(mii); ifm->ifm_media = mtmp; ifp->if_flags = itmp; } sc->bge_link = 0; sc->bge_tx_saved_considx = BGE_TXCONS_UNSET; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void bge_shutdown(dev) device_t dev; { struct bge_softc *sc; sc = device_get_softc(dev); BGE_LOCK(sc); bge_stop(sc); bge_reset(sc); BGE_UNLOCK(sc); return; } Index: stable/6/sys/dev/ed/if_ed.c =================================================================== --- stable/6/sys/dev/ed/if_ed.c (revision 149421) +++ stable/6/sys/dev/ed/if_ed.c (revision 149422) @@ -1,1823 +1,1825 @@ /*- * Copyright (c) 1995, David Greenman * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Device driver for National Semiconductor DS8390/WD83C690 based ethernet * adapters. By David Greenman, 29-April-1993 * * Currently supports the Western Digital/SMC 8003 and 8013 series, * the SMC Elite Ultra (8216), the 3Com 3c503, the NE1000 and NE2000, * and a variety of similar clones. * */ #include "opt_ed.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef ED_NO_MIIBUS #include #include #endif #include #include #include devclass_t ed_devclass; static void ed_init(void *); static int ed_ioctl(struct ifnet *, u_long, caddr_t); static void ed_start(struct ifnet *); static void ed_reset(struct ifnet *); static void ed_watchdog(struct ifnet *); #ifndef ED_NO_MIIBUS static void ed_tick(void *); #endif static void ed_ds_getmcaf(struct ed_softc *, uint32_t *); static void ed_get_packet(struct ed_softc *, char *, u_short); static __inline void ed_rint(struct ed_softc *); static __inline void ed_xmit(struct ed_softc *); static __inline char *ed_ring_copy(struct ed_softc *, char *, char *, u_short); static u_short ed_pio_write_mbufs(struct ed_softc *, struct mbuf *, long); static void ed_setrcr(struct ed_softc *); /* * Generic probe routine for testing for the existance of a DS8390. * Must be called after the NIC has just been reset. This routine * works by looking at certain register values that are guaranteed * to be initialized a certain way after power-up or reset. Seems * not to currently work on the 83C690. * * Specifically: * * Register reset bits set bits * Command Register (CR) TXP, STA RD2, STP * Interrupt Status (ISR) RST * Interrupt Mask (IMR) All bits * Data Control (DCR) LAS * Transmit Config. (TCR) LB1, LB0 * * We only look at the CR and ISR registers, however, because looking at * the others would require changing register pages (which would be * intrusive if this isn't an 8390). * * Return 1 if 8390 was found, 0 if not. */ int ed_probe_generic8390(struct ed_softc *sc) { if ((ed_nic_inb(sc, ED_P0_CR) & (ED_CR_RD2 | ED_CR_TXP | ED_CR_STA | ED_CR_STP)) != (ED_CR_RD2 | ED_CR_STP)) return (0); if ((ed_nic_inb(sc, ED_P0_ISR) & ED_ISR_RST) != ED_ISR_RST) return (0); return (1); } void ed_disable_16bit_access(struct ed_softc *sc) { /* * Disable 16 bit access to shared memory */ if (sc->isa16bit && sc->vendor == ED_VENDOR_WD_SMC) { if (sc->chip_type == ED_CHIP_TYPE_WD790) ed_asic_outb(sc, ED_WD_MSR, 0x00); ed_asic_outb(sc, ED_WD_LAAR, sc->wd_laar_proto & ~ED_WD_LAAR_M16EN); } } void ed_enable_16bit_access(struct ed_softc *sc) { if (sc->isa16bit && sc->vendor == ED_VENDOR_WD_SMC) { ed_asic_outb(sc, ED_WD_LAAR, sc->wd_laar_proto | ED_WD_LAAR_M16EN); if (sc->chip_type == ED_CHIP_TYPE_WD790) ed_asic_outb(sc, ED_WD_MSR, ED_WD_MSR_MENB); } } /* * Allocate a port resource with the given resource id. */ int ed_alloc_port(device_t dev, int rid, int size) { struct ed_softc *sc = device_get_softc(dev); struct resource *res; res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0ul, ~0ul, size, RF_ACTIVE); if (res) { sc->port_rid = rid; sc->port_res = res; sc->port_used = size; sc->port_bst = rman_get_bustag(res); sc->port_bsh = rman_get_bushandle(res); return (0); } return (ENOENT); } /* * Allocate a memory resource with the given resource id. */ int ed_alloc_memory(device_t dev, int rid, int size) { struct ed_softc *sc = device_get_softc(dev); struct resource *res; res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0ul, ~0ul, size, RF_ACTIVE); if (res) { sc->mem_rid = rid; sc->mem_res = res; sc->mem_used = size; sc->mem_bst = rman_get_bustag(res); sc->mem_bsh = rman_get_bushandle(res); return (0); } return (ENOENT); } /* * Allocate an irq resource with the given resource id. */ int ed_alloc_irq(device_t dev, int rid, int flags) { struct ed_softc *sc = device_get_softc(dev); struct resource *res; res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | flags); if (res) { sc->irq_rid = rid; sc->irq_res = res; return (0); } return (ENOENT); } /* * Release all resources */ void ed_release_resources(device_t dev) { struct ed_softc *sc = device_get_softc(dev); if (sc->port_res) { bus_deactivate_resource(dev, SYS_RES_IOPORT, sc->port_rid, sc->port_res); bus_release_resource(dev, SYS_RES_IOPORT, sc->port_rid, sc->port_res); sc->port_res = 0; } if (sc->mem_res) { bus_deactivate_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); sc->mem_res = 0; } if (sc->irq_res) { bus_deactivate_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); sc->irq_res = 0; } } /* * Install interface into kernel networking data structures */ int ed_attach(device_t dev) { struct ed_softc *sc = device_get_softc(dev); struct ifnet *ifp; ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); return (ENOSPC); } callout_handle_init(&sc->tick_ch); /* * Set interface to stopped condition (reset) */ ed_stop(sc); /* * Initialize ifnet structure */ ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_start = ed_start; ifp->if_ioctl = ed_ioctl; ifp->if_watchdog = ed_watchdog; ifp->if_init = ed_init; IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); ifp->if_linkmib = &sc->mibdata; ifp->if_linkmiblen = sizeof sc->mibdata; /* * XXX - should do a better job. */ if (sc->chip_type == ED_CHIP_TYPE_WD790) sc->mibdata.dot3StatsEtherChipSet = DOT3CHIPSET(dot3VendorWesternDigital, dot3ChipSetWesternDigital83C790); else sc->mibdata.dot3StatsEtherChipSet = DOT3CHIPSET(dot3VendorNational, dot3ChipSetNational8390); sc->mibdata.dot3Compliance = DOT3COMPLIANCE_COLLS; /* * Set default state for ALTPHYS flag (used to disable the * tranceiver for AUI operation), based on compile-time * config option. */ if (device_get_flags(dev) & ED_FLAGS_DISABLE_TRANCEIVER) ifp->if_flags = (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_ALTPHYS | IFF_NEEDSGIANT); else ifp->if_flags = (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT); /* * Attach the interface */ ether_ifattach(ifp, sc->enaddr); /* device attach does transition from UNCONFIGURED to IDLE state */ if (bootverbose || 1) { if (sc->type_str && (*sc->type_str != 0)) device_printf(dev, "type %s ", sc->type_str); else device_printf(dev, "type unknown (0x%x) ", sc->type); #ifdef ED_HPP if (sc->vendor == ED_VENDOR_HP) printf("(%s %s IO)", (sc->hpp_id & ED_HPP_ID_16_BIT_ACCESS) ? "16-bit" : "32-bit", sc->hpp_mem_start ? "memory mapped" : "regular"); else #endif printf("%s ", sc->isa16bit ? "(16 bit)" : "(8 bit)"); #if defined(ED_HPP) || defined(ED_3C503) printf("%s\n", (((sc->vendor == ED_VENDOR_3COM) || (sc->vendor == ED_VENDOR_HP)) && (ifp->if_flags & IFF_ALTPHYS)) ? " tranceiver disabled" : ""); #endif printf("\n"); } return (0); } /* * Detach the driver from the hardware and other systems in the kernel. */ int ed_detach(device_t dev) { struct ed_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->ifp; if (sc->gone) return (0); ed_stop(sc); ifp->if_flags &= ~IFF_RUNNING; ether_ifdetach(ifp); if_free(ifp); sc->gone = 1; bus_teardown_intr(dev, sc->irq_res, sc->irq_handle); ed_release_resources(dev); return (0); } /* * Reset interface. */ static void ed_reset(struct ifnet *ifp) { struct ed_softc *sc = ifp->if_softc; int s; if (sc->gone) return; s = splimp(); /* * Stop interface and re-initialize. */ ed_stop(sc); ed_init(sc); (void) splx(s); } /* * Take interface offline. */ void ed_stop(struct ed_softc *sc) { int n = 5000; #ifndef ED_NO_MIIBUS untimeout(ed_tick, sc, sc->tick_ch); callout_handle_init(&sc->tick_ch); #endif if (sc->gone) return; /* * Stop everything on the interface, and select page 0 registers. */ ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STP); /* * Wait for interface to enter stopped state, but limit # of checks to * 'n' (about 5ms). It shouldn't even take 5us on modern DS8390's, but * just in case it's an old one. */ if (sc->chip_type != ED_CHIP_TYPE_AX88190) while (((ed_nic_inb(sc, ED_P0_ISR) & ED_ISR_RST) == 0) && --n) continue; } /* * Device timeout/watchdog routine. Entered if the device neglects to * generate an interrupt after a transmit has been started on it. */ static void ed_watchdog(struct ifnet *ifp) { struct ed_softc *sc = ifp->if_softc; if (sc->gone) return; log(LOG_ERR, "%s: device timeout\n", ifp->if_xname); ifp->if_oerrors++; ed_reset(ifp); } #ifndef ED_NO_MIIBUS static void ed_tick(void *arg) { struct ed_softc *sc = arg; struct mii_data *mii; int s; if (sc->gone) { callout_handle_init(&sc->tick_ch); return; } s = splimp(); if (sc->miibus != NULL) { mii = device_get_softc(sc->miibus); mii_tick(mii); } sc->tick_ch = timeout(ed_tick, sc, hz); splx(s); } #endif /* * Initialize device. */ static void ed_init(void *xsc) { struct ed_softc *sc = xsc; struct ifnet *ifp = sc->ifp; int i, s; if (sc->gone) return; /* * Initialize the NIC in the exact order outlined in the NS manual. * This init procedure is "mandatory"...don't change what or when * things happen. */ s = splimp(); /* reset transmitter flags */ sc->xmit_busy = 0; ifp->if_timer = 0; sc->txb_inuse = 0; sc->txb_new = 0; sc->txb_next_tx = 0; /* This variable is used below - don't move this assignment */ sc->next_packet = sc->rec_page_start + 1; /* * Set interface for page 0, Remote DMA complete, Stopped */ ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STP); if (sc->isa16bit) /* * Set FIFO threshold to 8, No auto-init Remote DMA, byte * order=80x86, word-wide DMA xfers, */ ed_nic_outb(sc, ED_P0_DCR, ED_DCR_FT1 | ED_DCR_WTS | ED_DCR_LS); else /* * Same as above, but byte-wide DMA xfers */ ed_nic_outb(sc, ED_P0_DCR, ED_DCR_FT1 | ED_DCR_LS); /* * Clear Remote Byte Count Registers */ ed_nic_outb(sc, ED_P0_RBCR0, 0); ed_nic_outb(sc, ED_P0_RBCR1, 0); /* * For the moment, don't store incoming packets in memory. */ ed_nic_outb(sc, ED_P0_RCR, ED_RCR_MON); /* * Place NIC in internal loopback mode */ ed_nic_outb(sc, ED_P0_TCR, ED_TCR_LB0); /* * Initialize transmit/receive (ring-buffer) Page Start */ ed_nic_outb(sc, ED_P0_TPSR, sc->tx_page_start); ed_nic_outb(sc, ED_P0_PSTART, sc->rec_page_start); /* Set lower bits of byte addressable framing to 0 */ if (sc->chip_type == ED_CHIP_TYPE_WD790) ed_nic_outb(sc, 0x09, 0); /* * Initialize Receiver (ring-buffer) Page Stop and Boundry */ ed_nic_outb(sc, ED_P0_PSTOP, sc->rec_page_stop); ed_nic_outb(sc, ED_P0_BNRY, sc->rec_page_start); /* * Clear all interrupts. A '1' in each bit position clears the * corresponding flag. */ ed_nic_outb(sc, ED_P0_ISR, 0xff); /* * Enable the following interrupts: receive/transmit complete, * receive/transmit error, and Receiver OverWrite. * * Counter overflow and Remote DMA complete are *not* enabled. */ ed_nic_outb(sc, ED_P0_IMR, ED_IMR_PRXE | ED_IMR_PTXE | ED_IMR_RXEE | ED_IMR_TXEE | ED_IMR_OVWE); /* * Program Command Register for page 1 */ ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_PAGE_1 | ED_CR_STP); /* * Copy out our station address */ for (i = 0; i < ETHER_ADDR_LEN; ++i) ed_nic_outb(sc, ED_P1_PAR(i), IFP2ENADDR(sc->ifp)[i]); /* * Set Current Page pointer to next_packet (initialized above) */ ed_nic_outb(sc, ED_P1_CURR, sc->next_packet); /* * Program Receiver Configuration Register and multicast filter. CR is * set to page 0 on return. */ ed_setrcr(sc); /* * Take interface out of loopback */ ed_nic_outb(sc, ED_P0_TCR, 0); #ifdef ED_3C503 /* * If this is a 3Com board, the tranceiver must be software enabled * (there is no settable hardware default). */ if (sc->vendor == ED_VENDOR_3COM) { if (ifp->if_flags & IFF_ALTPHYS) ed_asic_outb(sc, ED_3COM_CR, 0); else ed_asic_outb(sc, ED_3COM_CR, ED_3COM_CR_XSEL); } #endif #ifndef ED_NO_MIIBUS if (sc->miibus != NULL) { struct mii_data *mii; mii = device_get_softc(sc->miibus); mii_mediachg(mii); } #endif /* * Set 'running' flag, and clear output active flag. */ ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; /* * ...and attempt to start output */ ed_start(ifp); #ifndef ED_NO_MIIBUS untimeout(ed_tick, sc, sc->tick_ch); sc->tick_ch = timeout(ed_tick, sc, hz); #endif (void) splx(s); } /* * This routine actually starts the transmission on the interface */ static __inline void ed_xmit(struct ed_softc *sc) { struct ifnet *ifp = sc->ifp; unsigned short len; if (sc->gone) return; len = sc->txb_len[sc->txb_next_tx]; /* * Set NIC for page 0 register access */ ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STA); /* * Set TX buffer start page */ ed_nic_outb(sc, ED_P0_TPSR, sc->tx_page_start + sc->txb_next_tx * ED_TXBUF_SIZE); /* * Set TX length */ ed_nic_outb(sc, ED_P0_TBCR0, len); ed_nic_outb(sc, ED_P0_TBCR1, len >> 8); /* * Set page 0, Remote DMA complete, Transmit Packet, and *Start* */ ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_TXP | ED_CR_STA); sc->xmit_busy = 1; /* * Point to next transmit buffer slot and wrap if necessary. */ sc->txb_next_tx++; if (sc->txb_next_tx == sc->txb_cnt) sc->txb_next_tx = 0; /* * Set a timer just in case we never hear from the board again */ ifp->if_timer = 2; } /* * Start output on interface. * We make two assumptions here: * 1) that the current priority is set to splimp _before_ this code * is called *and* is returned to the appropriate priority after * return * 2) that the IFF_OACTIVE flag is checked before this code is called * (i.e. that the output part of the interface is idle) */ static void ed_start(struct ifnet *ifp) { struct ed_softc *sc = ifp->if_softc; struct mbuf *m0, *m; caddr_t buffer; int len; if (sc->gone) { printf("ed_start(%p) GONE\n",ifp); return; } outloop: /* * First, see if there are buffered packets and an idle transmitter - * should never happen at this point. */ if (sc->txb_inuse && (sc->xmit_busy == 0)) { printf("ed: packets buffered, but transmitter idle\n"); ed_xmit(sc); } /* * See if there is room to put another packet in the buffer. */ if (sc->txb_inuse == sc->txb_cnt) { /* * No room. Indicate this to the outside world and exit. */ ifp->if_flags |= IFF_OACTIVE; return; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == 0) { /* * We are using the !OACTIVE flag to indicate to the outside * world that we can accept an additional packet rather than * that the transmitter is _actually_ active. Indeed, the * transmitter may be active, but if we haven't filled all the * buffers with data then we still want to accept more. */ ifp->if_flags &= ~IFF_OACTIVE; return; } /* * Copy the mbuf chain into the transmit buffer */ m0 = m; /* txb_new points to next open buffer slot */ buffer = sc->mem_start + (sc->txb_new * ED_TXBUF_SIZE * ED_PAGE_SIZE); if (sc->mem_shared) { /* * Special case setup for 16 bit boards... */ if (sc->isa16bit) { switch (sc->vendor) { #ifdef ED_3C503 /* * For 16bit 3Com boards (which have 16k of * memory), we have the xmit buffers in a * different page of memory ('page 0') - so * change pages. */ case ED_VENDOR_3COM: ed_asic_outb(sc, ED_3COM_GACFR, ED_3COM_GACFR_RSEL); break; #endif /* * Enable 16bit access to shared memory on * WD/SMC boards. * * XXX - same as ed_enable_16bit_access() */ case ED_VENDOR_WD_SMC: ed_asic_outb(sc, ED_WD_LAAR, sc->wd_laar_proto | ED_WD_LAAR_M16EN); if (sc->chip_type == ED_CHIP_TYPE_WD790) ed_asic_outb(sc, ED_WD_MSR, ED_WD_MSR_MENB); break; } } for (len = 0; m != 0; m = m->m_next) { /* XXX * I'm not sure that this bcopy does only 16bit * access */ bcopy(mtod(m, caddr_t), buffer, m->m_len); buffer += m->m_len; len += m->m_len; } /* * Restore previous shared memory access */ if (sc->isa16bit) { switch (sc->vendor) { #ifdef ED_3C503 case ED_VENDOR_3COM: ed_asic_outb(sc, ED_3COM_GACFR, ED_3COM_GACFR_RSEL | ED_3COM_GACFR_MBS0); break; #endif case ED_VENDOR_WD_SMC: /* XXX - same as ed_disable_16bit_access() */ if (sc->chip_type == ED_CHIP_TYPE_WD790) ed_asic_outb(sc, ED_WD_MSR, 0x00); ed_asic_outb(sc, ED_WD_LAAR, sc->wd_laar_proto & ~ED_WD_LAAR_M16EN); break; } } } else { len = ed_pio_write_mbufs(sc, m, (uintptr_t)buffer); if (len == 0) { m_freem(m0); goto outloop; } } sc->txb_len[sc->txb_new] = max(len, (ETHER_MIN_LEN-ETHER_CRC_LEN)); sc->txb_inuse++; /* * Point to next buffer slot and wrap if necessary. */ sc->txb_new++; if (sc->txb_new == sc->txb_cnt) sc->txb_new = 0; if (sc->xmit_busy == 0) ed_xmit(sc); /* * Tap off here if there is a bpf listener. */ BPF_MTAP(ifp, m0); m_freem(m0); /* * Loop back to the top to possibly buffer more packets */ goto outloop; } /* * Ethernet interface receiver interrupt. */ static __inline void ed_rint(struct ed_softc *sc) { struct ifnet *ifp = sc->ifp; u_char boundry; u_short len; struct ed_ring packet_hdr; char *packet_ptr; if (sc->gone) return; /* * Set NIC to page 1 registers to get 'current' pointer */ ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_PAGE_1 | ED_CR_STA); /* * 'sc->next_packet' is the logical beginning of the ring-buffer - * i.e. it points to where new data has been buffered. The 'CURR' * (current) register points to the logical end of the ring-buffer - * i.e. it points to where additional new data will be added. We loop * here until the logical beginning equals the logical end (or in * other words, until the ring-buffer is empty). */ while (sc->next_packet != ed_nic_inb(sc, ED_P1_CURR)) { /* get pointer to this buffer's header structure */ packet_ptr = sc->mem_ring + (sc->next_packet - sc->rec_page_start) * ED_PAGE_SIZE; /* * The byte count includes a 4 byte header that was added by * the NIC. */ if (sc->mem_shared) packet_hdr = *(struct ed_ring *) packet_ptr; else ed_pio_readmem(sc, (uintptr_t)packet_ptr, (char *) &packet_hdr, sizeof(packet_hdr)); len = packet_hdr.count; if (len > (ETHER_MAX_LEN - ETHER_CRC_LEN + sizeof(struct ed_ring)) || len < (ETHER_MIN_LEN - ETHER_CRC_LEN + sizeof(struct ed_ring))) { /* * Length is a wild value. There's a good chance that * this was caused by the NIC being old and buggy. * The bug is that the length low byte is duplicated in * the high byte. Try to recalculate the length based on * the pointer to the next packet. */ /* * NOTE: sc->next_packet is pointing at the current packet. */ len &= ED_PAGE_SIZE - 1; /* preserve offset into page */ if (packet_hdr.next_packet >= sc->next_packet) len += (packet_hdr.next_packet - sc->next_packet) * ED_PAGE_SIZE; else len += ((packet_hdr.next_packet - sc->rec_page_start) + (sc->rec_page_stop - sc->next_packet)) * ED_PAGE_SIZE; /* * because buffers are aligned on 256-byte boundary, * the length computed above is off by 256 in almost * all cases. Fix it... */ if (len & 0xff) len -= 256; if (len > (ETHER_MAX_LEN - ETHER_CRC_LEN + sizeof(struct ed_ring))) sc->mibdata.dot3StatsFrameTooLongs++; } /* * Be fairly liberal about what we allow as a "reasonable" length * so that a [crufty] packet will make it to BPF (and can thus * be analyzed). Note that all that is really important is that * we have a length that will fit into one mbuf cluster or less; * the upper layer protocols can then figure out the length from * their own length field(s). * But make sure that we have at least a full ethernet header * or we would be unable to call ether_input() later. */ if ((len >= sizeof(struct ed_ring) + ETHER_HDR_LEN) && (len <= MCLBYTES) && (packet_hdr.next_packet >= sc->rec_page_start) && (packet_hdr.next_packet < sc->rec_page_stop)) { /* * Go get packet. */ ed_get_packet(sc, packet_ptr + sizeof(struct ed_ring), len - sizeof(struct ed_ring)); ifp->if_ipackets++; } else { /* * Really BAD. The ring pointers are corrupted. */ log(LOG_ERR, "%s: NIC memory corrupt - invalid packet length %d\n", ifp->if_xname, len); ifp->if_ierrors++; ed_reset(ifp); return; } /* * Update next packet pointer */ sc->next_packet = packet_hdr.next_packet; /* * Update NIC boundry pointer - being careful to keep it one * buffer behind. (as recommended by NS databook) */ boundry = sc->next_packet - 1; if (boundry < sc->rec_page_start) boundry = sc->rec_page_stop - 1; /* * Set NIC to page 0 registers to update boundry register */ ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STA); ed_nic_outb(sc, ED_P0_BNRY, boundry); /* * Set NIC to page 1 registers before looping to top (prepare * to get 'CURR' current pointer) */ ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_PAGE_1 | ED_CR_STA); } } /* * Ethernet interface interrupt processor */ void edintr(void *arg) { struct ed_softc *sc = (struct ed_softc*) arg; struct ifnet *ifp = sc->ifp; u_char isr; int count; if (sc->gone) return; /* * Set NIC to page 0 registers */ ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STA); /* * loop until there are no more new interrupts. When the card * goes away, the hardware will read back 0xff. Looking at * the interrupts, it would appear that 0xff is impossible, * or at least extremely unlikely. */ while ((isr = ed_nic_inb(sc, ED_P0_ISR)) != 0 && isr != 0xff) { /* * reset all the bits that we are 'acknowledging' by writing a * '1' to each bit position that was set (writing a '1' * *clears* the bit) */ ed_nic_outb(sc, ED_P0_ISR, isr); /* * XXX workaround for AX88190 * We limit this to 5000 iterations. At 1us per inb/outb, * this translates to about 15ms, which should be plenty * of time, and also gives protection in the card eject * case. */ if (sc->chip_type == ED_CHIP_TYPE_AX88190) { count = 5000; /* 15ms */ while (count-- && (ed_nic_inb(sc, ED_P0_ISR) & isr)) { ed_nic_outb(sc, ED_P0_ISR,0); ed_nic_outb(sc, ED_P0_ISR,isr); } if (count == 0) break; } /* * Handle transmitter interrupts. Handle these first because * the receiver will reset the board under some conditions. */ if (isr & (ED_ISR_PTX | ED_ISR_TXE)) { u_char collisions = ed_nic_inb(sc, ED_P0_NCR) & 0x0f; /* * Check for transmit error. If a TX completed with an * error, we end up throwing the packet away. Really * the only error that is possible is excessive * collisions, and in this case it is best to allow * the automatic mechanisms of TCP to backoff the * flow. Of course, with UDP we're screwed, but this * is expected when a network is heavily loaded. */ (void) ed_nic_inb(sc, ED_P0_TSR); if (isr & ED_ISR_TXE) { u_char tsr; /* * Excessive collisions (16) */ tsr = ed_nic_inb(sc, ED_P0_TSR); if ((tsr & ED_TSR_ABT) && (collisions == 0)) { /* * When collisions total 16, the * P0_NCR will indicate 0, and the * TSR_ABT is set. */ collisions = 16; sc->mibdata.dot3StatsExcessiveCollisions++; sc->mibdata.dot3StatsCollFrequencies[15]++; } if (tsr & ED_TSR_OWC) sc->mibdata.dot3StatsLateCollisions++; if (tsr & ED_TSR_CDH) sc->mibdata.dot3StatsSQETestErrors++; if (tsr & ED_TSR_CRS) sc->mibdata.dot3StatsCarrierSenseErrors++; if (tsr & ED_TSR_FU) sc->mibdata.dot3StatsInternalMacTransmitErrors++; /* * update output errors counter */ ifp->if_oerrors++; } else { /* * Update total number of successfully * transmitted packets. */ ifp->if_opackets++; } /* * reset tx busy and output active flags */ sc->xmit_busy = 0; ifp->if_flags &= ~IFF_OACTIVE; /* * clear watchdog timer */ ifp->if_timer = 0; /* * Add in total number of collisions on last * transmission. */ ifp->if_collisions += collisions; switch(collisions) { case 0: case 16: break; case 1: sc->mibdata.dot3StatsSingleCollisionFrames++; sc->mibdata.dot3StatsCollFrequencies[0]++; break; default: sc->mibdata.dot3StatsMultipleCollisionFrames++; sc->mibdata. dot3StatsCollFrequencies[collisions-1] ++; break; } /* * Decrement buffer in-use count if not zero (can only * be zero if a transmitter interrupt occured while * not actually transmitting). If data is ready to * transmit, start it transmitting, otherwise defer * until after handling receiver */ if (sc->txb_inuse && --sc->txb_inuse) ed_xmit(sc); } /* * Handle receiver interrupts */ if (isr & (ED_ISR_PRX | ED_ISR_RXE | ED_ISR_OVW)) { /* * Overwrite warning. In order to make sure that a * lockup of the local DMA hasn't occurred, we reset * and re-init the NIC. The NSC manual suggests only a * partial reset/re-init is necessary - but some chips * seem to want more. The DMA lockup has been seen * only with early rev chips - Methinks this bug was * fixed in later revs. -DG */ if (isr & ED_ISR_OVW) { ifp->if_ierrors++; #ifdef DIAGNOSTIC log(LOG_WARNING, "%s: warning - receiver ring buffer overrun\n", ifp->if_xname); #endif /* * Stop/reset/re-init NIC */ ed_reset(ifp); } else { /* * Receiver Error. One or more of: CRC error, * frame alignment error FIFO overrun, or * missed packet. */ if (isr & ED_ISR_RXE) { u_char rsr; rsr = ed_nic_inb(sc, ED_P0_RSR); if (rsr & ED_RSR_CRC) sc->mibdata.dot3StatsFCSErrors++; if (rsr & ED_RSR_FAE) sc->mibdata.dot3StatsAlignmentErrors++; if (rsr & ED_RSR_FO) sc->mibdata.dot3StatsInternalMacReceiveErrors++; ifp->if_ierrors++; #ifdef ED_DEBUG if_printf(ifp, "receive error %x\n", ed_nic_inb(sc, ED_P0_RSR)); #endif } /* * Go get the packet(s) XXX - Doing this on an * error is dubious because there shouldn't be * any data to get (we've configured the * interface to not accept packets with * errors). */ /* * Enable 16bit access to shared memory first * on WD/SMC boards. */ ed_enable_16bit_access(sc); ed_rint(sc); ed_disable_16bit_access(sc); } } /* * If it looks like the transmitter can take more data, * attempt to start output on the interface. This is done * after handling the receiver to give the receiver priority. */ if ((ifp->if_flags & IFF_OACTIVE) == 0) ed_start(ifp); /* * return NIC CR to standard state: page 0, remote DMA * complete, start (toggling the TXP bit off, even if was just * set in the transmit routine, is *okay* - it is 'edge' * triggered from low to high) */ ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STA); /* * If the Network Talley Counters overflow, read them to reset * them. It appears that old 8390's won't clear the ISR flag * otherwise - resulting in an infinite loop. */ if (isr & ED_ISR_CNT) { (void) ed_nic_inb(sc, ED_P0_CNTR0); (void) ed_nic_inb(sc, ED_P0_CNTR1); (void) ed_nic_inb(sc, ED_P0_CNTR2); } } } /* * Process an ioctl request. */ static int ed_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct ed_softc *sc = ifp->if_softc; #ifndef ED_NO_MIIBUS struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; #endif int s, error = 0; if (sc == NULL || sc->gone) { ifp->if_flags &= ~IFF_RUNNING; return ENXIO; } s = splimp(); switch (command) { case SIOCSIFFLAGS: /* * If the interface is marked up and stopped, then start it. * If it is marked down and running, then stop it. */ if (ifp->if_flags & IFF_UP) { if ((ifp->if_flags & IFF_RUNNING) == 0) ed_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) { ed_stop(sc); ifp->if_flags &= ~IFF_RUNNING; } } /* * Promiscuous flag may have changed, so reprogram the RCR. */ ed_setrcr(sc); /* * An unfortunate hack to provide the (required) software * control of the tranceiver for 3Com/HP boards. * The ALTPHYS flag disables the tranceiver if set. * * XXX - should use ifmedia. */ #ifdef ED_3C503 if (sc->vendor == ED_VENDOR_3COM) { if (ifp->if_flags & IFF_ALTPHYS) ed_asic_outb(sc, ED_3COM_CR, 0); else ed_asic_outb(sc, ED_3COM_CR, ED_3COM_CR_XSEL); } #endif #ifdef ED_HPP if (sc->vendor == ED_VENDOR_HP) ed_hpp_set_physical_link(sc); #endif break; case SIOCADDMULTI: case SIOCDELMULTI: /* * Multicast list has changed; set the hardware filter * accordingly. */ ed_setrcr(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (sc->miibus == NULL) { error = EINVAL; break; } #ifndef ED_NO_MIIBUS mii = device_get_softc(sc->miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; #endif default: error = ether_ioctl(ifp, command, data); } (void) splx(s); return (error); } /* * Given a source and destination address, copy 'amount' of a packet from * the ring buffer into a linear destination buffer. Takes into account * ring-wrap. */ static __inline char * ed_ring_copy(struct ed_softc *sc, char *src, char *dst, u_short amount) { u_short tmp_amount; /* does copy wrap to lower addr in ring buffer? */ if (src + amount > sc->mem_end) { tmp_amount = sc->mem_end - src; /* XXX * I'm not sure that this bcopy does only 16bit access */ /* copy amount up to end of NIC memory */ if (sc->mem_shared) bcopy(src, dst, tmp_amount); else ed_pio_readmem(sc, (uintptr_t)src, dst, tmp_amount); amount -= tmp_amount; src = sc->mem_ring; dst += tmp_amount; } if (sc->mem_shared) bcopy(src, dst, amount); else ed_pio_readmem(sc, (uintptr_t)src, dst, amount); return (src + amount); } /* * Retreive packet from shared memory and send to the next level up via * ether_input(). */ static void ed_get_packet(struct ed_softc *sc, char *buf, u_short len) { struct ifnet *ifp = sc->ifp; struct ether_header *eh; struct mbuf *m; /* Allocate a header mbuf */ MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) return; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len; /* * We always put the received packet in a single buffer - * either with just an mbuf header or in a cluster attached * to the header. The +2 is to compensate for the alignment * fixup below. */ if ((len + 2) > MHLEN) { /* Attach an mbuf cluster */ MCLGET(m, M_DONTWAIT); /* Insist on getting a cluster */ if ((m->m_flags & M_EXT) == 0) { m_freem(m); return; } } /* * The +2 is to longword align the start of the real packet. * This is important for NFS. */ m->m_data += 2; eh = mtod(m, struct ether_header *); /* * Get packet, including link layer address, from interface. */ ed_ring_copy(sc, buf, (char *)eh, len); m->m_pkthdr.len = m->m_len = len; (*ifp->if_input)(ifp, m); } /* * Supporting routines */ /* * Given a NIC memory source address and a host memory destination * address, copy 'amount' from NIC to host using Programmed I/O. * The 'amount' is rounded up to a word - okay as long as mbufs * are word sized. * This routine is currently Novell-specific. */ void ed_pio_readmem(struct ed_softc *sc, long src, uint8_t *dst, uint16_t amount) { #ifdef ED_HPP /* HP PC Lan+ cards need special handling */ if (sc->vendor == ED_VENDOR_HP && sc->type == ED_TYPE_HP_PCLANPLUS) { ed_hpp_readmem(sc, src, dst, amount); return; } #endif /* Regular Novell cards */ /* select page 0 registers */ ed_nic_outb(sc, ED_P0_CR, ED_CR_RD2 | ED_CR_STA); /* round up to a word */ if (amount & 1) ++amount; /* set up DMA byte count */ ed_nic_outb(sc, ED_P0_RBCR0, amount); ed_nic_outb(sc, ED_P0_RBCR1, amount >> 8); /* set up source address in NIC mem */ ed_nic_outb(sc, ED_P0_RSAR0, src); ed_nic_outb(sc, ED_P0_RSAR1, src >> 8); ed_nic_outb(sc, ED_P0_CR, ED_CR_RD0 | ED_CR_STA); if (sc->isa16bit) ed_asic_insw(sc, ED_NOVELL_DATA, dst, amount / 2); else ed_asic_insb(sc, ED_NOVELL_DATA, dst, amount); } /* * Stripped down routine for writing a linear buffer to NIC memory. * Only used in the probe routine to test the memory. 'len' must * be even. */ void ed_pio_writemem(struct ed_softc *sc, uint8_t *src, uint16_t dst, uint16_t len) { int maxwait = 200; /* about 240us */ /* select page 0 registers */ ed_nic_outb(sc, ED_P0_CR, ED_CR_RD2 | ED_CR_STA); /* reset remote DMA complete flag */ ed_nic_outb(sc, ED_P0_ISR, ED_ISR_RDC); /* set up DMA byte count */ ed_nic_outb(sc, ED_P0_RBCR0, len); ed_nic_outb(sc, ED_P0_RBCR1, len >> 8); /* set up destination address in NIC mem */ ed_nic_outb(sc, ED_P0_RSAR0, dst); ed_nic_outb(sc, ED_P0_RSAR1, dst >> 8); /* set remote DMA write */ ed_nic_outb(sc, ED_P0_CR, ED_CR_RD1 | ED_CR_STA); if (sc->isa16bit) ed_asic_outsw(sc, ED_NOVELL_DATA, src, len / 2); else ed_asic_outsb(sc, ED_NOVELL_DATA, src, len); /* * Wait for remote DMA complete. This is necessary because on the * transmit side, data is handled internally by the NIC in bursts and * we can't start another remote DMA until this one completes. Not * waiting causes really bad things to happen - like the NIC * irrecoverably jamming the ISA bus. */ while (((ed_nic_inb(sc, ED_P0_ISR) & ED_ISR_RDC) != ED_ISR_RDC) && --maxwait) continue; } /* * Write an mbuf chain to the destination NIC memory address using * programmed I/O. */ static u_short ed_pio_write_mbufs(struct ed_softc *sc, struct mbuf *m, long dst) { struct ifnet *ifp = sc->ifp; unsigned short total_len, dma_len; struct mbuf *mp; int maxwait = 200; /* about 240us */ #ifdef ED_HPP /* HP PC Lan+ cards need special handling */ if (sc->vendor == ED_VENDOR_HP && sc->type == ED_TYPE_HP_PCLANPLUS) return ed_hpp_write_mbufs(sc, m, dst); #endif /* Regular Novell cards */ /* First, count up the total number of bytes to copy */ for (total_len = 0, mp = m; mp; mp = mp->m_next) total_len += mp->m_len; dma_len = total_len; if (sc->isa16bit && (dma_len & 1)) dma_len++; /* select page 0 registers */ ed_nic_outb(sc, ED_P0_CR, ED_CR_RD2 | ED_CR_STA); /* reset remote DMA complete flag */ ed_nic_outb(sc, ED_P0_ISR, ED_ISR_RDC); /* set up DMA byte count */ ed_nic_outb(sc, ED_P0_RBCR0, dma_len); ed_nic_outb(sc, ED_P0_RBCR1, dma_len >> 8); /* set up destination address in NIC mem */ ed_nic_outb(sc, ED_P0_RSAR0, dst); ed_nic_outb(sc, ED_P0_RSAR1, dst >> 8); /* set remote DMA write */ ed_nic_outb(sc, ED_P0_CR, ED_CR_RD1 | ED_CR_STA); /* * Transfer the mbuf chain to the NIC memory. * 16-bit cards require that data be transferred as words, and only words. * So that case requires some extra code to patch over odd-length mbufs. */ if (!sc->isa16bit) { /* NE1000s are easy */ while (m) { if (m->m_len) ed_asic_outsb(sc, ED_NOVELL_DATA, m->m_data, m->m_len); m = m->m_next; } } else { /* NE2000s are a pain */ unsigned char *data; int len, wantbyte; unsigned char savebyte[2]; wantbyte = 0; while (m) { len = m->m_len; if (len) { data = mtod(m, caddr_t); /* finish the last word */ if (wantbyte) { savebyte[1] = *data; ed_asic_outw(sc, ED_NOVELL_DATA, *(u_short *)savebyte); data++; len--; wantbyte = 0; } /* output contiguous words */ if (len > 1) { ed_asic_outsw(sc, ED_NOVELL_DATA, data, len >> 1); data += len & ~1; len &= 1; } /* save last byte, if necessary */ if (len == 1) { savebyte[0] = *data; wantbyte = 1; } } m = m->m_next; } /* spit last byte */ if (wantbyte) ed_asic_outw(sc, ED_NOVELL_DATA, *(u_short *)savebyte); } /* * Wait for remote DMA complete. This is necessary because on the * transmit side, data is handled internally by the NIC in bursts and * we can't start another remote DMA until this one completes. Not * waiting causes really bad things to happen - like the NIC * irrecoverably jamming the ISA bus. */ while (((ed_nic_inb(sc, ED_P0_ISR) & ED_ISR_RDC) != ED_ISR_RDC) && --maxwait) continue; if (!maxwait) { log(LOG_WARNING, "%s: remote transmit DMA failed to complete\n", ifp->if_xname); ed_reset(ifp); return(0); } return (total_len); } #ifndef ED_NO_MIIBUS /* * MII bus support routines. */ int ed_miibus_readreg(device_t dev, int phy, int reg) { struct ed_softc *sc; int failed, s, val; s = splimp(); sc = device_get_softc(dev); if (sc->gone) { splx(s); return (0); } (*sc->mii_writebits)(sc, 0xffffffff, 32); (*sc->mii_writebits)(sc, ED_MII_STARTDELIM, ED_MII_STARTDELIM_BITS); (*sc->mii_writebits)(sc, ED_MII_READOP, ED_MII_OP_BITS); (*sc->mii_writebits)(sc, phy, ED_MII_PHY_BITS); (*sc->mii_writebits)(sc, reg, ED_MII_REG_BITS); failed = (*sc->mii_readbits)(sc, ED_MII_ACK_BITS); val = (*sc->mii_readbits)(sc, ED_MII_DATA_BITS); (*sc->mii_writebits)(sc, ED_MII_IDLE, ED_MII_IDLE_BITS); splx(s); return (failed ? 0 : val); } void ed_miibus_writereg(device_t dev, int phy, int reg, int data) { struct ed_softc *sc; int s; s = splimp(); sc = device_get_softc(dev); if (sc->gone) { splx(s); return; } (*sc->mii_writebits)(sc, 0xffffffff, 32); (*sc->mii_writebits)(sc, ED_MII_STARTDELIM, ED_MII_STARTDELIM_BITS); (*sc->mii_writebits)(sc, ED_MII_WRITEOP, ED_MII_OP_BITS); (*sc->mii_writebits)(sc, phy, ED_MII_PHY_BITS); (*sc->mii_writebits)(sc, reg, ED_MII_REG_BITS); (*sc->mii_writebits)(sc, ED_MII_TURNAROUND, ED_MII_TURNAROUND_BITS); (*sc->mii_writebits)(sc, data, ED_MII_DATA_BITS); (*sc->mii_writebits)(sc, ED_MII_IDLE, ED_MII_IDLE_BITS); splx(s); } int ed_ifmedia_upd(struct ifnet *ifp) { struct ed_softc *sc; struct mii_data *mii; sc = ifp->if_softc; if (sc->gone || sc->miibus == NULL) return (ENXIO); mii = device_get_softc(sc->miibus); return mii_mediachg(mii); } void ed_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct ed_softc *sc; struct mii_data *mii; sc = ifp->if_softc; if (sc->gone || sc->miibus == NULL) return; mii = device_get_softc(sc->miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } void ed_child_detached(device_t dev, device_t child) { struct ed_softc *sc; sc = device_get_softc(dev); if (child == sc->miibus) sc->miibus = NULL; } #endif static void ed_setrcr(struct ed_softc *sc) { struct ifnet *ifp = sc->ifp; int i; u_char reg1; /* Bit 6 in AX88190 RCR register must be set. */ if (sc->chip_type == ED_CHIP_TYPE_AX88190) reg1 = ED_RCR_INTT; else reg1 = 0x00; /* set page 1 registers */ ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_PAGE_1 | ED_CR_STP); if (ifp->if_flags & IFF_PROMISC) { /* * Reconfigure the multicast filter. */ for (i = 0; i < 8; i++) ed_nic_outb(sc, ED_P1_MAR(i), 0xff); /* * And turn on promiscuous mode. Also enable reception of * runts and packets with CRC & alignment errors. */ /* Set page 0 registers */ ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STP); ed_nic_outb(sc, ED_P0_RCR, ED_RCR_PRO | ED_RCR_AM | ED_RCR_AB | ED_RCR_AR | ED_RCR_SEP | reg1); } else { /* set up multicast addresses and filter modes */ if (ifp->if_flags & IFF_MULTICAST) { uint32_t mcaf[2]; if (ifp->if_flags & IFF_ALLMULTI) { mcaf[0] = 0xffffffff; mcaf[1] = 0xffffffff; } else ed_ds_getmcaf(sc, mcaf); /* * Set multicast filter on chip. */ for (i = 0; i < 8; i++) ed_nic_outb(sc, ED_P1_MAR(i), ((u_char *) mcaf)[i]); /* Set page 0 registers */ ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STP); ed_nic_outb(sc, ED_P0_RCR, ED_RCR_AM | ED_RCR_AB | reg1); } else { /* * Initialize multicast address hashing registers to * not accept multicasts. */ for (i = 0; i < 8; ++i) ed_nic_outb(sc, ED_P1_MAR(i), 0x00); /* Set page 0 registers */ ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STP); ed_nic_outb(sc, ED_P0_RCR, ED_RCR_AB | reg1); } } /* * Start interface. */ ed_nic_outb(sc, ED_P0_CR, sc->cr_proto | ED_CR_STA); } /* * Compute the multicast address filter from the * list of multicast addresses we need to listen to. */ static void ed_ds_getmcaf(struct ed_softc *sc, uint32_t *mcaf) { uint32_t index; u_char *af = (u_char *) mcaf; struct ifmultiaddr *ifma; mcaf[0] = 0; mcaf[1] = 0; + IF_ADDR_LOCK(sc->ifp); TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; index = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; af[index >> 3] |= 1 << (index & 7); } + IF_ADDR_UNLOCK(sc->ifp); } int ed_isa_mem_ok(device_t dev, u_long pmem, u_int memsize) { if (pmem < 0xa0000 || pmem + memsize > 0x1000000) { device_printf(dev, "Invalid ISA memory address range " "configured: 0x%lx - 0x%lx\n", pmem, pmem + memsize); return (ENXIO); } return (0); } int ed_clear_memory(device_t dev) { struct ed_softc *sc = device_get_softc(dev); int i; /* * Now zero memory and verify that it is clear * XXX restricted to 16-bit writes? Do we need to * XXX enable 16-bit access? */ bzero(sc->mem_start, sc->mem_size); for (i = 0; i < sc->mem_size; ++i) { if (sc->mem_start[i]) { device_printf(dev, "failed to clear shared memory at " "0x%jx - check configuration\n", (uintmax_t)rman_get_start(sc->mem_res) + i); return (ENXIO); } } return (0); } Index: stable/6/sys/dev/em/if_em.c =================================================================== --- stable/6/sys/dev/em/if_em.c (revision 149421) +++ stable/6/sys/dev/em/if_em.c (revision 149422) @@ -1,3456 +1,3458 @@ /************************************************************************** Copyright (c) 2001-2005, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************************************************************************/ /*$FreeBSD$*/ #include /********************************************************************* * Set this to one to display debug statistics *********************************************************************/ int em_display_debug_stats = 0; /********************************************************************* * Linked list of board private structures for all NICs found *********************************************************************/ struct adapter *em_adapter_list = NULL; /********************************************************************* * Driver version *********************************************************************/ char em_driver_version[] = "2.1.7"; /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on * Last field stores an index into em_strings * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } *********************************************************************/ static em_vendor_info_t em_vendor_info_array[] = { /* Intel(R) PRO/1000 Network Connection */ { 0x8086, E1000_DEV_ID_82540EM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82540EM_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82540EP, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82540EP_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82540EP_LP, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82541EI, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82541ER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82541ER_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82541EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82541GI, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82541GI_LF, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82541GI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82542, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82543GC_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82543GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82544EI_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82544EI_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82544GC_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82544GC_LOM, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82545EM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82545EM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82545GM_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82545GM_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82545GM_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82546EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82546EB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82546EB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82546GB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82546GB_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82546GB_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82546GB_PCIE, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82546GB_QUAD_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82547EI, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82547EI_MOBILE, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82547GI, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82573E, PCI_ANY_ID, PCI_ANY_ID, 0}, { 0x8086, E1000_DEV_ID_82573E_IAMT, PCI_ANY_ID, PCI_ANY_ID, 0}, /* required last entry */ { 0, 0, 0, 0, 0} }; /********************************************************************* * Table of branding strings for all supported NICs. *********************************************************************/ static char *em_strings[] = { "Intel(R) PRO/1000 Network Connection" }; /********************************************************************* * Function prototypes *********************************************************************/ static int em_probe(device_t); static int em_attach(device_t); static int em_detach(device_t); static int em_shutdown(device_t); static void em_intr(void *); static void em_start(struct ifnet *); static int em_ioctl(struct ifnet *, u_long, caddr_t); static void em_watchdog(struct ifnet *); static void em_init(void *); static void em_init_locked(struct adapter *); static void em_stop(void *); static void em_media_status(struct ifnet *, struct ifmediareq *); static int em_media_change(struct ifnet *); static void em_identify_hardware(struct adapter *); static int em_allocate_pci_resources(struct adapter *); static void em_free_pci_resources(struct adapter *); static void em_local_timer(void *); static int em_hardware_init(struct adapter *); static void em_setup_interface(device_t, struct adapter *); static int em_setup_transmit_structures(struct adapter *); static void em_initialize_transmit_unit(struct adapter *); static int em_setup_receive_structures(struct adapter *); static void em_initialize_receive_unit(struct adapter *); static void em_enable_intr(struct adapter *); static void em_disable_intr(struct adapter *); static void em_free_transmit_structures(struct adapter *); static void em_free_receive_structures(struct adapter *); static void em_update_stats_counters(struct adapter *); static void em_clean_transmit_interrupts(struct adapter *); static int em_allocate_receive_structures(struct adapter *); static int em_allocate_transmit_structures(struct adapter *); static void em_process_receive_interrupts(struct adapter *, int); static void em_receive_checksum(struct adapter *, struct em_rx_desc *, struct mbuf *); static void em_transmit_checksum_setup(struct adapter *, struct mbuf *, u_int32_t *, u_int32_t *); static void em_set_promisc(struct adapter *); static void em_disable_promisc(struct adapter *); static void em_set_multi(struct adapter *); static void em_print_hw_stats(struct adapter *); static void em_print_link_status(struct adapter *); static int em_get_buf(int i, struct adapter *, struct mbuf *); static void em_enable_vlans(struct adapter *); static void em_disable_vlans(struct adapter *); static int em_encap(struct adapter *, struct mbuf **); static void em_smartspeed(struct adapter *); static int em_82547_fifo_workaround(struct adapter *, int); static void em_82547_update_fifo_head(struct adapter *, int); static int em_82547_tx_fifo_reset(struct adapter *); static void em_82547_move_tail(void *arg); static void em_82547_move_tail_locked(struct adapter *); static int em_dma_malloc(struct adapter *, bus_size_t, struct em_dma_alloc *, int); static void em_dma_free(struct adapter *, struct em_dma_alloc *); static void em_print_debug_info(struct adapter *); static int em_is_valid_ether_addr(u_int8_t *); static int em_sysctl_stats(SYSCTL_HANDLER_ARGS); static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS); static u_int32_t em_fill_descriptors (u_int64_t address, u_int32_t length, PDESC_ARRAY desc_array); static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS); static void em_add_int_delay_sysctl(struct adapter *, const char *, const char *, struct em_int_delay_info *, int, int); /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t em_methods[] = { /* Device interface */ DEVMETHOD(device_probe, em_probe), DEVMETHOD(device_attach, em_attach), DEVMETHOD(device_detach, em_detach), DEVMETHOD(device_shutdown, em_shutdown), {0, 0} }; static driver_t em_driver = { "em", em_methods, sizeof(struct adapter ), }; static devclass_t em_devclass; DRIVER_MODULE(em, pci, em_driver, em_devclass, 0, 0); MODULE_DEPEND(em, pci, 1, 1, 1); MODULE_DEPEND(em, ether, 1, 1, 1); /********************************************************************* * Tunable default values. *********************************************************************/ #define E1000_TICKS_TO_USECS(ticks) ((1024 * (ticks) + 500) / 1000) #define E1000_USECS_TO_TICKS(usecs) ((1000 * (usecs) + 512) / 1024) static int em_tx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TIDV); static int em_rx_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RDTR); static int em_tx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_TADV); static int em_rx_abs_int_delay_dflt = E1000_TICKS_TO_USECS(EM_RADV); TUNABLE_INT("hw.em.tx_int_delay", &em_tx_int_delay_dflt); TUNABLE_INT("hw.em.rx_int_delay", &em_rx_int_delay_dflt); TUNABLE_INT("hw.em.tx_abs_int_delay", &em_tx_abs_int_delay_dflt); TUNABLE_INT("hw.em.rx_abs_int_delay", &em_rx_abs_int_delay_dflt); /********************************************************************* * Device identification routine * * em_probe determines if the driver should be loaded on * adapter based on PCI vendor/device id of the adapter. * * return BUS_PROBE_DEFAULT on success, positive on failure *********************************************************************/ static int em_probe(device_t dev) { em_vendor_info_t *ent; u_int16_t pci_vendor_id = 0; u_int16_t pci_device_id = 0; u_int16_t pci_subvendor_id = 0; u_int16_t pci_subdevice_id = 0; char adapter_name[60]; INIT_DEBUGOUT("em_probe: begin"); pci_vendor_id = pci_get_vendor(dev); if (pci_vendor_id != EM_VENDOR_ID) return(ENXIO); pci_device_id = pci_get_device(dev); pci_subvendor_id = pci_get_subvendor(dev); pci_subdevice_id = pci_get_subdevice(dev); ent = em_vendor_info_array; while (ent->vendor_id != 0) { if ((pci_vendor_id == ent->vendor_id) && (pci_device_id == ent->device_id) && ((pci_subvendor_id == ent->subvendor_id) || (ent->subvendor_id == PCI_ANY_ID)) && ((pci_subdevice_id == ent->subdevice_id) || (ent->subdevice_id == PCI_ANY_ID))) { sprintf(adapter_name, "%s, Version - %s", em_strings[ent->index], em_driver_version); device_set_desc_copy(dev, adapter_name); return(BUS_PROBE_DEFAULT); } ent++; } return(ENXIO); } /********************************************************************* * Device initialization routine * * The attach entry point is called when the driver is being loaded. * This routine identifies the type of hardware, allocates all resources * and initializes the hardware. * * return 0 on success, positive on failure *********************************************************************/ static int em_attach(device_t dev) { struct adapter * adapter; int tsize, rsize; int error = 0; INIT_DEBUGOUT("em_attach: begin"); /* Allocate, clear, and link in our adapter structure */ if (!(adapter = device_get_softc(dev))) { printf("em: adapter structure allocation failed\n"); return(ENOMEM); } bzero(adapter, sizeof(struct adapter )); adapter->dev = dev; adapter->osdep.dev = dev; adapter->unit = device_get_unit(dev); EM_LOCK_INIT(adapter, device_get_nameunit(dev)); if (em_adapter_list != NULL) em_adapter_list->prev = adapter; adapter->next = em_adapter_list; em_adapter_list = adapter; /* SYSCTL stuff */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug_info", CTLTYPE_INT|CTLFLAG_RW, (void *)adapter, 0, em_sysctl_debug_info, "I", "Debug Information"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "stats", CTLTYPE_INT|CTLFLAG_RW, (void *)adapter, 0, em_sysctl_stats, "I", "Statistics"); callout_init(&adapter->timer, CALLOUT_MPSAFE); callout_init(&adapter->tx_fifo_timer, CALLOUT_MPSAFE); /* Determine hardware revision */ em_identify_hardware(adapter); /* Set up some sysctls for the tunable interrupt delays */ em_add_int_delay_sysctl(adapter, "rx_int_delay", "receive interrupt delay in usecs", &adapter->rx_int_delay, E1000_REG_OFFSET(&adapter->hw, RDTR), em_rx_int_delay_dflt); em_add_int_delay_sysctl(adapter, "tx_int_delay", "transmit interrupt delay in usecs", &adapter->tx_int_delay, E1000_REG_OFFSET(&adapter->hw, TIDV), em_tx_int_delay_dflt); if (adapter->hw.mac_type >= em_82540) { em_add_int_delay_sysctl(adapter, "rx_abs_int_delay", "receive interrupt delay limit in usecs", &adapter->rx_abs_int_delay, E1000_REG_OFFSET(&adapter->hw, RADV), em_rx_abs_int_delay_dflt); em_add_int_delay_sysctl(adapter, "tx_abs_int_delay", "transmit interrupt delay limit in usecs", &adapter->tx_abs_int_delay, E1000_REG_OFFSET(&adapter->hw, TADV), em_tx_abs_int_delay_dflt); } /* Parameters (to be read from user) */ adapter->num_tx_desc = EM_MAX_TXD; adapter->num_rx_desc = EM_MAX_RXD; adapter->hw.autoneg = DO_AUTO_NEG; adapter->hw.wait_autoneg_complete = WAIT_FOR_AUTO_NEG_DEFAULT; adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; adapter->hw.tbi_compatibility_en = TRUE; adapter->rx_buffer_len = EM_RXBUFFER_2048; /* * These parameters control the automatic generation(Tx) and * response(Rx) to Ethernet PAUSE frames. */ adapter->hw.fc_high_water = FC_DEFAULT_HI_THRESH; adapter->hw.fc_low_water = FC_DEFAULT_LO_THRESH; adapter->hw.fc_pause_time = FC_DEFAULT_TX_TIMER; adapter->hw.fc_send_xon = TRUE; adapter->hw.fc = em_fc_full; adapter->hw.phy_init_script = 1; adapter->hw.phy_reset_disable = FALSE; #ifndef EM_MASTER_SLAVE adapter->hw.master_slave = em_ms_hw_default; #else adapter->hw.master_slave = EM_MASTER_SLAVE; #endif /* * Set the max frame size assuming standard ethernet * sized frames */ adapter->hw.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; adapter->hw.min_frame_size = MINIMUM_ETHERNET_PACKET_SIZE + ETHER_CRC_LEN; /* * This controls when hardware reports transmit completion * status. */ adapter->hw.report_tx_early = 1; if (em_allocate_pci_resources(adapter)) { printf("em%d: Allocation of PCI resources failed\n", adapter->unit); error = ENXIO; goto err_pci; } /* Initialize eeprom parameters */ em_init_eeprom_params(&adapter->hw); tsize = EM_ROUNDUP(adapter->num_tx_desc * sizeof(struct em_tx_desc), 4096); /* Allocate Transmit Descriptor ring */ if (em_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) { printf("em%d: Unable to allocate tx_desc memory\n", adapter->unit); error = ENOMEM; goto err_tx_desc; } adapter->tx_desc_base = (struct em_tx_desc *) adapter->txdma.dma_vaddr; rsize = EM_ROUNDUP(adapter->num_rx_desc * sizeof(struct em_rx_desc), 4096); /* Allocate Receive Descriptor ring */ if (em_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) { printf("em%d: Unable to allocate rx_desc memory\n", adapter->unit); error = ENOMEM; goto err_rx_desc; } adapter->rx_desc_base = (struct em_rx_desc *) adapter->rxdma.dma_vaddr; /* Initialize the hardware */ if (em_hardware_init(adapter)) { printf("em%d: Unable to initialize the hardware\n", adapter->unit); error = EIO; goto err_hw_init; } /* Copy the permanent MAC address out of the EEPROM */ if (em_read_mac_addr(&adapter->hw) < 0) { printf("em%d: EEPROM read error while reading mac address\n", adapter->unit); error = EIO; goto err_mac_addr; } if (!em_is_valid_ether_addr(adapter->hw.mac_addr)) { printf("em%d: Invalid mac address\n", adapter->unit); error = EIO; goto err_mac_addr; } /* Setup OS specific network interface */ em_setup_interface(dev, adapter); /* Initialize statistics */ em_clear_hw_cntrs(&adapter->hw); em_update_stats_counters(adapter); adapter->hw.get_link_status = 1; em_check_for_link(&adapter->hw); /* Print the link status */ if (adapter->link_active == 1) { em_get_speed_and_duplex(&adapter->hw, &adapter->link_speed, &adapter->link_duplex); printf("em%d: Speed:%d Mbps Duplex:%s\n", adapter->unit, adapter->link_speed, adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half"); } else printf("em%d: Speed:N/A Duplex:N/A\n", adapter->unit); /* Identify 82544 on PCIX */ em_get_bus_info(&adapter->hw); if(adapter->hw.bus_type == em_bus_type_pcix && adapter->hw.mac_type == em_82544) { adapter->pcix_82544 = TRUE; } else { adapter->pcix_82544 = FALSE; } INIT_DEBUGOUT("em_attach: end"); return(0); err_mac_addr: err_hw_init: em_dma_free(adapter, &adapter->rxdma); err_rx_desc: em_dma_free(adapter, &adapter->txdma); err_tx_desc: err_pci: em_free_pci_resources(adapter); return(error); } /********************************************************************* * Device removal routine * * The detach entry point is called when the driver is being removed. * This routine stops the adapter and deallocates all the resources * that were allocated for driver operation. * * return 0 on success, positive on failure *********************************************************************/ static int em_detach(device_t dev) { struct adapter * adapter = device_get_softc(dev); struct ifnet *ifp = adapter->ifp; INIT_DEBUGOUT("em_detach: begin"); EM_LOCK(adapter); adapter->in_detach = 1; em_stop(adapter); em_phy_hw_reset(&adapter->hw); EM_UNLOCK(adapter); #if __FreeBSD_version < 500000 ether_ifdetach(adapter->ifp, ETHER_BPF_SUPPORTED); #else ether_ifdetach(adapter->ifp); if_free(ifp); #endif em_free_pci_resources(adapter); bus_generic_detach(dev); /* Free Transmit Descriptor ring */ if (adapter->tx_desc_base) { em_dma_free(adapter, &adapter->txdma); adapter->tx_desc_base = NULL; } /* Free Receive Descriptor ring */ if (adapter->rx_desc_base) { em_dma_free(adapter, &adapter->rxdma); adapter->rx_desc_base = NULL; } /* Remove from the adapter list */ if (em_adapter_list == adapter) em_adapter_list = adapter->next; if (adapter->next != NULL) adapter->next->prev = adapter->prev; if (adapter->prev != NULL) adapter->prev->next = adapter->next; EM_LOCK_DESTROY(adapter); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); ifp->if_timer = 0; return(0); } /********************************************************************* * * Shutdown entry point * **********************************************************************/ static int em_shutdown(device_t dev) { struct adapter *adapter = device_get_softc(dev); EM_LOCK(adapter); em_stop(adapter); EM_UNLOCK(adapter); return(0); } /********************************************************************* * Transmit entry point * * em_start is called by the stack to initiate a transmit. * The driver will remain in this routine as long as there are * packets to transmit and transmit resources are available. * In case resources are not available stack is notified and * the packet is requeued. **********************************************************************/ static void em_start_locked(struct ifnet *ifp) { struct mbuf *m_head; struct adapter *adapter = ifp->if_softc; mtx_assert(&adapter->mtx, MA_OWNED); if (!adapter->link_active) return; while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * em_encap() can modify our pointer, and or make it NULL on * failure. In that event, we can't requeue. */ if (em_encap(adapter, &m_head)) { if (m_head == NULL) break; ifp->if_flags |= IFF_OACTIVE; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); break; } /* Send a copy of the frame to the BPF listener */ #if __FreeBSD_version < 500000 if (ifp->if_bpf) bpf_mtap(ifp, m_head); #else BPF_MTAP(ifp, m_head); #endif /* Set timeout in case hardware has problems transmitting */ ifp->if_timer = EM_TX_TIMEOUT; } return; } static void em_start(struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; EM_LOCK(adapter); em_start_locked(ifp); EM_UNLOCK(adapter); return; } /********************************************************************* * Ioctl entry point * * em_ioctl is called when the user wants to configure the * interface. * * return 0 on success, positive on failure **********************************************************************/ static int em_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { int mask, reinit, error = 0; struct ifreq *ifr = (struct ifreq *) data; struct adapter * adapter = ifp->if_softc; if (adapter->in_detach) return(error); switch (command) { case SIOCSIFADDR: case SIOCGIFADDR: IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)"); ether_ioctl(ifp, command, data); break; case SIOCSIFMTU: IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); if (ifr->ifr_mtu > MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN || \ /* 82573 does not support jumbo frames */ (adapter->hw.mac_type == em_82573 && ifr->ifr_mtu > ETHERMTU) ) { error = EINVAL; } else { EM_LOCK(adapter); ifp->if_mtu = ifr->ifr_mtu; adapter->hw.max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; em_init_locked(adapter); EM_UNLOCK(adapter); } break; case SIOCSIFFLAGS: IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)"); EM_LOCK(adapter); if (ifp->if_flags & IFF_UP) { if (!(ifp->if_flags & IFF_RUNNING)) { em_init_locked(adapter); } em_disable_promisc(adapter); em_set_promisc(adapter); } else { if (ifp->if_flags & IFF_RUNNING) { em_stop(adapter); } } EM_UNLOCK(adapter); break; case SIOCADDMULTI: case SIOCDELMULTI: IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI"); if (ifp->if_flags & IFF_RUNNING) { EM_LOCK(adapter); em_disable_intr(adapter); em_set_multi(adapter); if (adapter->hw.mac_type == em_82542_rev2_0) { em_initialize_receive_unit(adapter); } #ifdef DEVICE_POLLING if (!(ifp->if_flags & IFF_POLLING)) #endif em_enable_intr(adapter); EM_UNLOCK(adapter); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)"); error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); break; case SIOCSIFCAP: IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)"); reinit = 0; mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_POLLING) ifp->if_capenable ^= IFCAP_POLLING; if (mask & IFCAP_HWCSUM) { ifp->if_capenable ^= IFCAP_HWCSUM; reinit = 1; } if (mask & IFCAP_VLAN_HWTAGGING) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; reinit = 1; } if (reinit && (ifp->if_flags & IFF_RUNNING)) em_init(adapter); break; default: IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%x)", (int)command); error = EINVAL; } return(error); } /********************************************************************* * Watchdog entry point * * This routine is called whenever hardware quits transmitting. * **********************************************************************/ static void em_watchdog(struct ifnet *ifp) { struct adapter * adapter; adapter = ifp->if_softc; /* If we are in this routine because of pause frames, then * don't reset the hardware. */ if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_TXOFF) { ifp->if_timer = EM_TX_TIMEOUT; return; } if (em_check_for_link(&adapter->hw)) printf("em%d: watchdog timeout -- resetting\n", adapter->unit); ifp->if_flags &= ~IFF_RUNNING; em_init(adapter); ifp->if_oerrors++; return; } /********************************************************************* * Init entry point * * This routine is used in two ways. It is used by the stack as * init entry point in network interface structure. It is also used * by the driver as a hw/sw initialization routine to get to a * consistent state. * * return 0 on success, positive on failure **********************************************************************/ static void em_init_locked(struct adapter * adapter) { struct ifnet *ifp; uint32_t pba; ifp = adapter->ifp; INIT_DEBUGOUT("em_init: begin"); mtx_assert(&adapter->mtx, MA_OWNED); em_stop(adapter); /* Packet Buffer Allocation (PBA) * Writing PBA sets the receive portion of the buffer * the remainder is used for the transmit buffer. * * Devices before the 82547 had a Packet Buffer of 64K. * Default allocation: PBA=48K for Rx, leaving 16K for Tx. * After the 82547 the buffer was reduced to 40K. * Default allocation: PBA=30K for Rx, leaving 10K for Tx. * Note: default does not leave enough room for Jumbo Frame >10k. */ if(adapter->hw.mac_type < em_82547) { /* Total FIFO is 64K */ if(adapter->rx_buffer_len > EM_RXBUFFER_8192) pba = E1000_PBA_40K; /* 40K for Rx, 24K for Tx */ else pba = E1000_PBA_48K; /* 48K for Rx, 16K for Tx */ } else { /* Total FIFO is 40K */ if(adapter->hw.max_frame_size > EM_RXBUFFER_8192) { pba = E1000_PBA_22K; /* 22K for Rx, 18K for Tx */ } else { pba = E1000_PBA_30K; /* 30K for Rx, 10K for Tx */ } adapter->tx_fifo_head = 0; adapter->tx_head_addr = pba << EM_TX_HEAD_ADDR_SHIFT; adapter->tx_fifo_size = (E1000_PBA_40K - pba) << EM_PBA_BYTES_SHIFT; } INIT_DEBUGOUT1("em_init: pba=%dK",pba); E1000_WRITE_REG(&adapter->hw, PBA, pba); /* Get the latest mac address, User can use a LAA */ bcopy(IFP2ENADDR(adapter->ifp), adapter->hw.mac_addr, ETHER_ADDR_LEN); /* Initialize the hardware */ if (em_hardware_init(adapter)) { printf("em%d: Unable to initialize the hardware\n", adapter->unit); return; } if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) em_enable_vlans(adapter); /* Prepare transmit descriptors and buffers */ if (em_setup_transmit_structures(adapter)) { printf("em%d: Could not setup transmit structures\n", adapter->unit); em_stop(adapter); return; } em_initialize_transmit_unit(adapter); /* Setup Multicast table */ em_set_multi(adapter); /* Prepare receive descriptors and buffers */ if (em_setup_receive_structures(adapter)) { printf("em%d: Could not setup receive structures\n", adapter->unit); em_stop(adapter); return; } em_initialize_receive_unit(adapter); /* Don't loose promiscuous settings */ em_set_promisc(adapter); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; if (adapter->hw.mac_type >= em_82543) { if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist = EM_CHECKSUM_FEATURES; else ifp->if_hwassist = 0; } callout_reset(&adapter->timer, hz, em_local_timer, adapter); em_clear_hw_cntrs(&adapter->hw); #ifdef DEVICE_POLLING /* * Only enable interrupts if we are not polling, make sure * they are off otherwise. */ if (ifp->if_flags & IFF_POLLING) em_disable_intr(adapter); else #endif /* DEVICE_POLLING */ em_enable_intr(adapter); /* Don't reset the phy next time init gets called */ adapter->hw.phy_reset_disable = TRUE; return; } static void em_init(void *arg) { struct adapter * adapter = arg; EM_LOCK(adapter); em_init_locked(adapter); EM_UNLOCK(adapter); return; } #ifdef DEVICE_POLLING static poll_handler_t em_poll; static void em_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct adapter *adapter = ifp->if_softc; u_int32_t reg_icr; mtx_assert(&adapter->mtx, MA_OWNED); if (!(ifp->if_capenable & IFCAP_POLLING)) { ether_poll_deregister(ifp); cmd = POLL_DEREGISTER; } if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */ em_enable_intr(adapter); return; } if (cmd == POLL_AND_CHECK_STATUS) { reg_icr = E1000_READ_REG(&adapter->hw, ICR); if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { callout_stop(&adapter->timer); adapter->hw.get_link_status = 1; em_check_for_link(&adapter->hw); em_print_link_status(adapter); callout_reset(&adapter->timer, hz, em_local_timer, adapter); } } if (ifp->if_flags & IFF_RUNNING) { em_process_receive_interrupts(adapter, count); em_clean_transmit_interrupts(adapter); } if (ifp->if_flags & IFF_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) em_start_locked(ifp); } static void em_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct adapter *adapter = ifp->if_softc; EM_LOCK(adapter); em_poll_locked(ifp, cmd, count); EM_UNLOCK(adapter); } #endif /* DEVICE_POLLING */ /********************************************************************* * * Interrupt Service routine * **********************************************************************/ static void em_intr(void *arg) { u_int32_t loop_cnt = EM_MAX_INTR; u_int32_t reg_icr; struct ifnet *ifp; struct adapter *adapter = arg; EM_LOCK(adapter); ifp = adapter->ifp; #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) { EM_UNLOCK(adapter); return; } if ((ifp->if_capenable & IFCAP_POLLING) && ether_poll_register(em_poll, ifp)) { em_disable_intr(adapter); em_poll_locked(ifp, 0, 1); EM_UNLOCK(adapter); return; } #endif /* DEVICE_POLLING */ reg_icr = E1000_READ_REG(&adapter->hw, ICR); if (!reg_icr) { EM_UNLOCK(adapter); return; } /* Link status change */ if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) { callout_stop(&adapter->timer); adapter->hw.get_link_status = 1; em_check_for_link(&adapter->hw); em_print_link_status(adapter); callout_reset(&adapter->timer, hz, em_local_timer, adapter); } while (loop_cnt > 0) { if (ifp->if_flags & IFF_RUNNING) { em_process_receive_interrupts(adapter, -1); em_clean_transmit_interrupts(adapter); } loop_cnt--; } if (ifp->if_flags & IFF_RUNNING && !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) em_start_locked(ifp); EM_UNLOCK(adapter); return; } /********************************************************************* * * Media Ioctl callback * * This routine is called whenever the user queries the status of * the interface using ifconfig. * **********************************************************************/ static void em_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct adapter * adapter = ifp->if_softc; INIT_DEBUGOUT("em_media_status: begin"); em_check_for_link(&adapter->hw); if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) { if (adapter->link_active == 0) { em_get_speed_and_duplex(&adapter->hw, &adapter->link_speed, &adapter->link_duplex); adapter->link_active = 1; } } else { if (adapter->link_active == 1) { adapter->link_speed = 0; adapter->link_duplex = 0; adapter->link_active = 0; } } ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!adapter->link_active) return; ifmr->ifm_status |= IFM_ACTIVE; if (adapter->hw.media_type == em_media_type_fiber) { ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; } else { switch (adapter->link_speed) { case 10: ifmr->ifm_active |= IFM_10_T; break; case 100: ifmr->ifm_active |= IFM_100_TX; break; case 1000: #if __FreeBSD_version < 500000 ifmr->ifm_active |= IFM_1000_TX; #else ifmr->ifm_active |= IFM_1000_T; #endif break; } if (adapter->link_duplex == FULL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } return; } /********************************************************************* * * Media Ioctl callback * * This routine is called when the user changes speed/duplex using * media/mediopt option with ifconfig. * **********************************************************************/ static int em_media_change(struct ifnet *ifp) { struct adapter * adapter = ifp->if_softc; struct ifmedia *ifm = &adapter->media; INIT_DEBUGOUT("em_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return(EINVAL); switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: adapter->hw.autoneg = DO_AUTO_NEG; adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; break; case IFM_1000_SX: #if __FreeBSD_version < 500000 case IFM_1000_TX: #else case IFM_1000_T: #endif adapter->hw.autoneg = DO_AUTO_NEG; adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; break; case IFM_100_TX: adapter->hw.autoneg = FALSE; adapter->hw.autoneg_advertised = 0; if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) adapter->hw.forced_speed_duplex = em_100_full; else adapter->hw.forced_speed_duplex = em_100_half; break; case IFM_10_T: adapter->hw.autoneg = FALSE; adapter->hw.autoneg_advertised = 0; if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) adapter->hw.forced_speed_duplex = em_10_full; else adapter->hw.forced_speed_duplex = em_10_half; break; default: printf("em%d: Unsupported media type\n", adapter->unit); } /* As the speed/duplex settings my have changed we need to * reset the PHY. */ adapter->hw.phy_reset_disable = FALSE; em_init(adapter); return(0); } /********************************************************************* * * This routine maps the mbufs to tx descriptors. * * return 0 on success, positive on failure **********************************************************************/ static int em_encap(struct adapter *adapter, struct mbuf **m_headp) { u_int32_t txd_upper; u_int32_t txd_lower, txd_used = 0, txd_saved = 0; int i, j, error; u_int64_t address; struct mbuf *m_head; /* For 82544 Workaround */ DESC_ARRAY desc_array; u_int32_t array_elements; u_int32_t counter; #if __FreeBSD_version < 500000 struct ifvlan *ifv = NULL; #else struct m_tag *mtag; #endif bus_dma_segment_t segs[EM_MAX_SCATTER]; bus_dmamap_t map; int nsegs; struct em_buffer *tx_buffer = NULL; struct em_tx_desc *current_tx_desc = NULL; struct ifnet *ifp = adapter->ifp; m_head = *m_headp; /* * Force a cleanup if number of TX descriptors * available hits the threshold */ if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) { em_clean_transmit_interrupts(adapter); if (adapter->num_tx_desc_avail <= EM_TX_CLEANUP_THRESHOLD) { adapter->no_tx_desc_avail1++; return(ENOBUFS); } } /* * Map the packet for DMA. */ if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) { adapter->no_tx_map_avail++; return (ENOMEM); } error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { adapter->no_tx_dma_setup++; bus_dmamap_destroy(adapter->txtag, map); return (error); } KASSERT(nsegs != 0, ("em_encap: empty packet")); if (nsegs > adapter->num_tx_desc_avail) { adapter->no_tx_desc_avail2++; bus_dmamap_destroy(adapter->txtag, map); return (ENOBUFS); } if (ifp->if_hwassist > 0) { em_transmit_checksum_setup(adapter, m_head, &txd_upper, &txd_lower); } else txd_upper = txd_lower = 0; /* Find out if we are in vlan mode */ #if __FreeBSD_version < 500000 if ((m_head->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) && m_head->m_pkthdr.rcvif != NULL && m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN) ifv = m_head->m_pkthdr.rcvif->if_softc; #else mtag = VLAN_OUTPUT_TAG(ifp, m_head); #endif /* * When operating in promiscuous mode, hardware encapsulation for * packets is disabled. This means we have to add the vlan * encapsulation in the driver, since it will have come down from the * VLAN layer with a tag instead of a VLAN header. */ if (mtag != NULL && adapter->em_insert_vlan_header) { struct ether_vlan_header *evl; struct ether_header eh; m_head = m_pullup(m_head, sizeof(eh)); if (m_head == NULL) { *m_headp = NULL; bus_dmamap_destroy(adapter->txtag, map); return (ENOBUFS); } eh = *mtod(m_head, struct ether_header *); M_PREPEND(m_head, sizeof(*evl), M_DONTWAIT); if (m_head == NULL) { *m_headp = NULL; bus_dmamap_destroy(adapter->txtag, map); return (ENOBUFS); } m_head = m_pullup(m_head, sizeof(*evl)); if (m_head == NULL) { *m_headp = NULL; bus_dmamap_destroy(adapter->txtag, map); return (ENOBUFS); } evl = mtod(m_head, struct ether_vlan_header *); bcopy(&eh, evl, sizeof(*evl)); evl->evl_proto = evl->evl_encap_proto; evl->evl_encap_proto = htons(ETHERTYPE_VLAN); evl->evl_tag = htons(VLAN_TAG_VALUE(mtag)); m_tag_delete(m_head, mtag); mtag = NULL; *m_headp = m_head; } i = adapter->next_avail_tx_desc; if (adapter->pcix_82544) { txd_saved = i; txd_used = 0; } for (j = 0; j < nsegs; j++) { /* If adapter is 82544 and on PCIX bus */ if(adapter->pcix_82544) { array_elements = 0; address = htole64(segs[j].ds_addr); /* * Check the Address and Length combination and * split the data accordingly */ array_elements = em_fill_descriptors(address, htole32(segs[j].ds_len), &desc_array); for (counter = 0; counter < array_elements; counter++) { if (txd_used == adapter->num_tx_desc_avail) { adapter->next_avail_tx_desc = txd_saved; adapter->no_tx_desc_avail2++; bus_dmamap_destroy(adapter->txtag, map); return (ENOBUFS); } tx_buffer = &adapter->tx_buffer_area[i]; current_tx_desc = &adapter->tx_desc_base[i]; current_tx_desc->buffer_addr = htole64( desc_array.descriptor[counter].address); current_tx_desc->lower.data = htole32( (adapter->txd_cmd | txd_lower | (u_int16_t)desc_array.descriptor[counter].length)); current_tx_desc->upper.data = htole32((txd_upper)); if (++i == adapter->num_tx_desc) i = 0; tx_buffer->m_head = NULL; txd_used++; } } else { tx_buffer = &adapter->tx_buffer_area[i]; current_tx_desc = &adapter->tx_desc_base[i]; current_tx_desc->buffer_addr = htole64(segs[j].ds_addr); current_tx_desc->lower.data = htole32( adapter->txd_cmd | txd_lower | segs[j].ds_len); current_tx_desc->upper.data = htole32(txd_upper); if (++i == adapter->num_tx_desc) i = 0; tx_buffer->m_head = NULL; } } adapter->next_avail_tx_desc = i; if (adapter->pcix_82544) { adapter->num_tx_desc_avail -= txd_used; } else { adapter->num_tx_desc_avail -= nsegs; } #if __FreeBSD_version < 500000 if (ifv != NULL) { /* Set the vlan id */ current_tx_desc->upper.fields.special = htole16(ifv->ifv_tag); #else if (mtag != NULL) { /* Set the vlan id */ current_tx_desc->upper.fields.special = htole16(VLAN_TAG_VALUE(mtag)); #endif /* Tell hardware to add tag */ current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_VLE); } tx_buffer->m_head = m_head; tx_buffer->map = map; bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE); /* * Last Descriptor of Packet needs End Of Packet (EOP) */ current_tx_desc->lower.data |= htole32(E1000_TXD_CMD_EOP); /* * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000 * that this frame is available to transmit. */ bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); if (adapter->hw.mac_type == em_82547 && adapter->link_duplex == HALF_DUPLEX) { em_82547_move_tail_locked(adapter); } else { E1000_WRITE_REG(&adapter->hw, TDT, i); if (adapter->hw.mac_type == em_82547) { em_82547_update_fifo_head(adapter, m_head->m_pkthdr.len); } } return(0); } /********************************************************************* * * 82547 workaround to avoid controller hang in half-duplex environment. * The workaround is to avoid queuing a large packet that would span * the internal Tx FIFO ring boundary. We need to reset the FIFO pointers * in this case. We do that only when FIFO is quiescent. * **********************************************************************/ static void em_82547_move_tail_locked(struct adapter *adapter) { uint16_t hw_tdt; uint16_t sw_tdt; struct em_tx_desc *tx_desc; uint16_t length = 0; boolean_t eop = 0; EM_LOCK_ASSERT(adapter); hw_tdt = E1000_READ_REG(&adapter->hw, TDT); sw_tdt = adapter->next_avail_tx_desc; while (hw_tdt != sw_tdt) { tx_desc = &adapter->tx_desc_base[hw_tdt]; length += tx_desc->lower.flags.length; eop = tx_desc->lower.data & E1000_TXD_CMD_EOP; if(++hw_tdt == adapter->num_tx_desc) hw_tdt = 0; if(eop) { if (em_82547_fifo_workaround(adapter, length)) { adapter->tx_fifo_wrk_cnt++; callout_reset(&adapter->tx_fifo_timer, 1, em_82547_move_tail, adapter); break; } E1000_WRITE_REG(&adapter->hw, TDT, hw_tdt); em_82547_update_fifo_head(adapter, length); length = 0; } } return; } static void em_82547_move_tail(void *arg) { struct adapter *adapter = arg; EM_LOCK(adapter); em_82547_move_tail_locked(adapter); EM_UNLOCK(adapter); } static int em_82547_fifo_workaround(struct adapter *adapter, int len) { int fifo_space, fifo_pkt_len; fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR); if (adapter->link_duplex == HALF_DUPLEX) { fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; if (fifo_pkt_len >= (EM_82547_PKT_THRESH + fifo_space)) { if (em_82547_tx_fifo_reset(adapter)) { return(0); } else { return(1); } } } return(0); } static void em_82547_update_fifo_head(struct adapter *adapter, int len) { int fifo_pkt_len = EM_ROUNDUP(len + EM_FIFO_HDR, EM_FIFO_HDR); /* tx_fifo_head is always 16 byte aligned */ adapter->tx_fifo_head += fifo_pkt_len; if (adapter->tx_fifo_head >= adapter->tx_fifo_size) { adapter->tx_fifo_head -= adapter->tx_fifo_size; } return; } static int em_82547_tx_fifo_reset(struct adapter *adapter) { uint32_t tctl; if ( (E1000_READ_REG(&adapter->hw, TDT) == E1000_READ_REG(&adapter->hw, TDH)) && (E1000_READ_REG(&adapter->hw, TDFT) == E1000_READ_REG(&adapter->hw, TDFH)) && (E1000_READ_REG(&adapter->hw, TDFTS) == E1000_READ_REG(&adapter->hw, TDFHS)) && (E1000_READ_REG(&adapter->hw, TDFPC) == 0)) { /* Disable TX unit */ tctl = E1000_READ_REG(&adapter->hw, TCTL); E1000_WRITE_REG(&adapter->hw, TCTL, tctl & ~E1000_TCTL_EN); /* Reset FIFO pointers */ E1000_WRITE_REG(&adapter->hw, TDFT, adapter->tx_head_addr); E1000_WRITE_REG(&adapter->hw, TDFH, adapter->tx_head_addr); E1000_WRITE_REG(&adapter->hw, TDFTS, adapter->tx_head_addr); E1000_WRITE_REG(&adapter->hw, TDFHS, adapter->tx_head_addr); /* Re-enable TX unit */ E1000_WRITE_REG(&adapter->hw, TCTL, tctl); E1000_WRITE_FLUSH(&adapter->hw); adapter->tx_fifo_head = 0; adapter->tx_fifo_reset_cnt++; return(TRUE); } else { return(FALSE); } } static void em_set_promisc(struct adapter * adapter) { u_int32_t reg_rctl; struct ifnet *ifp = adapter->ifp; reg_rctl = E1000_READ_REG(&adapter->hw, RCTL); if (ifp->if_flags & IFF_PROMISC) { reg_rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); /* Disable VLAN stripping in promiscous mode * This enables bridging of vlan tagged frames to occur * and also allows vlan tags to be seen in tcpdump */ if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) em_disable_vlans(adapter); adapter->em_insert_vlan_header = 1; } else if (ifp->if_flags & IFF_ALLMULTI) { reg_rctl |= E1000_RCTL_MPE; reg_rctl &= ~E1000_RCTL_UPE; E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); adapter->em_insert_vlan_header = 0; } else adapter->em_insert_vlan_header = 0; return; } static void em_disable_promisc(struct adapter * adapter) { u_int32_t reg_rctl; struct ifnet *ifp = adapter->ifp; reg_rctl = E1000_READ_REG(&adapter->hw, RCTL); reg_rctl &= (~E1000_RCTL_UPE); reg_rctl &= (~E1000_RCTL_MPE); E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) em_enable_vlans(adapter); adapter->em_insert_vlan_header = 0; return; } /********************************************************************* * Multicast Update * * This routine is called whenever multicast address list is updated. * **********************************************************************/ static void em_set_multi(struct adapter * adapter) { u_int32_t reg_rctl = 0; u_int8_t mta[MAX_NUM_MULTICAST_ADDRESSES * ETH_LENGTH_OF_ADDRESS]; struct ifmultiaddr *ifma; int mcnt = 0; struct ifnet *ifp = adapter->ifp; IOCTL_DEBUGOUT("em_set_multi: begin"); if (adapter->hw.mac_type == em_82542_rev2_0) { reg_rctl = E1000_READ_REG(&adapter->hw, RCTL); if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) { em_pci_clear_mwi(&adapter->hw); } reg_rctl |= E1000_RCTL_RST; E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); msec_delay(5); } - + + IF_ADDR_LOCK(ifp); #if __FreeBSD_version < 500000 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { #else TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { #endif if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) break; bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), &mta[mcnt*ETH_LENGTH_OF_ADDRESS], ETH_LENGTH_OF_ADDRESS); mcnt++; } + IF_ADDR_UNLOCK(ifp); if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { reg_rctl = E1000_READ_REG(&adapter->hw, RCTL); reg_rctl |= E1000_RCTL_MPE; E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); } else em_mc_addr_list_update(&adapter->hw, mta, mcnt, 0, 1); if (adapter->hw.mac_type == em_82542_rev2_0) { reg_rctl = E1000_READ_REG(&adapter->hw, RCTL); reg_rctl &= ~E1000_RCTL_RST; E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); msec_delay(5); if (adapter->hw.pci_cmd_word & CMD_MEM_WRT_INVALIDATE) { em_pci_set_mwi(&adapter->hw); } } return; } /********************************************************************* * Timer routine * * This routine checks for link status and updates statistics. * **********************************************************************/ static void em_local_timer(void *arg) { struct ifnet *ifp; struct adapter * adapter = arg; ifp = adapter->ifp; EM_LOCK(adapter); em_check_for_link(&adapter->hw); em_print_link_status(adapter); em_update_stats_counters(adapter); if (em_display_debug_stats && ifp->if_flags & IFF_RUNNING) { em_print_hw_stats(adapter); } em_smartspeed(adapter); callout_reset(&adapter->timer, hz, em_local_timer, adapter); EM_UNLOCK(adapter); return; } static void em_print_link_status(struct adapter * adapter) { struct ifnet *ifp = adapter->ifp; if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) { if (adapter->link_active == 0) { em_get_speed_and_duplex(&adapter->hw, &adapter->link_speed, &adapter->link_duplex); if (bootverbose) printf("em%d: Link is up %d Mbps %s\n", adapter->unit, adapter->link_speed, ((adapter->link_duplex == FULL_DUPLEX) ? "Full Duplex" : "Half Duplex")); adapter->link_active = 1; adapter->smartspeed = 0; if_link_state_change(ifp, LINK_STATE_UP); } } else { if (adapter->link_active == 1) { adapter->link_speed = 0; adapter->link_duplex = 0; if (bootverbose) printf("em%d: Link is Down\n", adapter->unit); adapter->link_active = 0; if_link_state_change(ifp, LINK_STATE_DOWN); } } return; } /********************************************************************* * * This routine disables all traffic on the adapter by issuing a * global reset on the MAC and deallocates TX/RX buffers. * **********************************************************************/ static void em_stop(void *arg) { struct ifnet *ifp; struct adapter * adapter = arg; ifp = adapter->ifp; mtx_assert(&adapter->mtx, MA_OWNED); INIT_DEBUGOUT("em_stop: begin"); em_disable_intr(adapter); em_reset_hw(&adapter->hw); callout_stop(&adapter->timer); callout_stop(&adapter->tx_fifo_timer); em_free_transmit_structures(adapter); em_free_receive_structures(adapter); /* Tell the stack that the interface is no longer active */ ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); return; } /********************************************************************* * * Determine hardware revision. * **********************************************************************/ static void em_identify_hardware(struct adapter * adapter) { device_t dev = adapter->dev; /* Make sure our PCI config space has the necessary stuff set */ adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) && (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) { printf("em%d: Memory Access and/or Bus Master bits were not set!\n", adapter->unit); adapter->hw.pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2); } /* Save off the information about this board */ adapter->hw.vendor_id = pci_get_vendor(dev); adapter->hw.device_id = pci_get_device(dev); adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); /* Identify the MAC */ if (em_set_mac_type(&adapter->hw)) printf("em%d: Unknown MAC Type\n", adapter->unit); if(adapter->hw.mac_type == em_82541 || adapter->hw.mac_type == em_82541_rev_2 || adapter->hw.mac_type == em_82547 || adapter->hw.mac_type == em_82547_rev_2) adapter->hw.phy_init_script = TRUE; return; } static int em_allocate_pci_resources(struct adapter * adapter) { int i, val, rid; device_t dev = adapter->dev; rid = EM_MMBA; adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!(adapter->res_memory)) { printf("em%d: Unable to allocate bus resource: memory\n", adapter->unit); return(ENXIO); } adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->res_memory); adapter->osdep.mem_bus_space_handle = rman_get_bushandle(adapter->res_memory); adapter->hw.hw_addr = (uint8_t *)&adapter->osdep.mem_bus_space_handle; if (adapter->hw.mac_type > em_82543) { /* Figure our where our IO BAR is ? */ rid = EM_MMBA; for (i = 0; i < 5; i++) { val = pci_read_config(dev, rid, 4); if (val & 0x00000001) { adapter->io_rid = rid; break; } rid += 4; } adapter->res_ioport = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &adapter->io_rid, RF_ACTIVE); if (!(adapter->res_ioport)) { printf("em%d: Unable to allocate bus resource: ioport\n", adapter->unit); return(ENXIO); } adapter->hw.io_base = rman_get_start(adapter->res_ioport); } rid = 0x0; adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (!(adapter->res_interrupt)) { printf("em%d: Unable to allocate bus resource: interrupt\n", adapter->unit); return(ENXIO); } if (bus_setup_intr(dev, adapter->res_interrupt, INTR_TYPE_NET | INTR_MPSAFE, (void (*)(void *)) em_intr, adapter, &adapter->int_handler_tag)) { printf("em%d: Error registering interrupt handler!\n", adapter->unit); return(ENXIO); } adapter->hw.back = &adapter->osdep; return(0); } static void em_free_pci_resources(struct adapter * adapter) { device_t dev = adapter->dev; if (adapter->res_interrupt != NULL) { bus_teardown_intr(dev, adapter->res_interrupt, adapter->int_handler_tag); bus_release_resource(dev, SYS_RES_IRQ, 0, adapter->res_interrupt); } if (adapter->res_memory != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, EM_MMBA, adapter->res_memory); } if (adapter->res_ioport != NULL) { bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid, adapter->res_ioport); } return; } /********************************************************************* * * Initialize the hardware to a configuration as specified by the * adapter structure. The controller is reset, the EEPROM is * verified, the MAC address is set, then the shared initialization * routines are called. * **********************************************************************/ static int em_hardware_init(struct adapter * adapter) { INIT_DEBUGOUT("em_hardware_init: begin"); /* Issue a global reset */ em_reset_hw(&adapter->hw); /* When hardware is reset, fifo_head is also reset */ adapter->tx_fifo_head = 0; /* Make sure we have a good EEPROM before we read from it */ if (em_validate_eeprom_checksum(&adapter->hw) < 0) { printf("em%d: The EEPROM Checksum Is Not Valid\n", adapter->unit); return(EIO); } if (em_read_part_num(&adapter->hw, &(adapter->part_num)) < 0) { printf("em%d: EEPROM read error while reading part number\n", adapter->unit); return(EIO); } if (em_init_hw(&adapter->hw) < 0) { printf("em%d: Hardware Initialization Failed", adapter->unit); return(EIO); } em_check_for_link(&adapter->hw); if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU) adapter->link_active = 1; else adapter->link_active = 0; if (adapter->link_active) { em_get_speed_and_duplex(&adapter->hw, &adapter->link_speed, &adapter->link_duplex); } else { adapter->link_speed = 0; adapter->link_duplex = 0; } return(0); } /********************************************************************* * * Setup networking device structure and register an interface. * **********************************************************************/ static void em_setup_interface(device_t dev, struct adapter * adapter) { struct ifnet *ifp; INIT_DEBUGOUT("em_setup_interface: begin"); ifp = adapter->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) panic("%s: can not if_alloc()", device_get_nameunit(dev)); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_baudrate = 1000000000; ifp->if_init = em_init; ifp->if_softc = adapter; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = em_ioctl; ifp->if_start = em_start; ifp->if_watchdog = em_watchdog; IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1); ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1; IFQ_SET_READY(&ifp->if_snd); #if __FreeBSD_version < 500000 ether_ifattach(ifp, ETHER_BPF_SUPPORTED); #else ether_ifattach(ifp, adapter->hw.mac_addr); #endif ifp->if_capabilities = ifp->if_capenable = 0; if (adapter->hw.mac_type >= em_82543) { ifp->if_capabilities |= IFCAP_HWCSUM; ifp->if_capenable |= IFCAP_HWCSUM; } /* * Tell the upper layer(s) we support long frames. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); #if __FreeBSD_version >= 500000 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; ifp->if_capenable |= IFCAP_VLAN_MTU; #endif #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; ifp->if_capenable |= IFCAP_POLLING; #endif /* * Specify the media types supported by this adapter and register * callbacks to update media and link information */ ifmedia_init(&adapter->media, IFM_IMASK, em_media_change, em_media_status); if (adapter->hw.media_type == em_media_type_fiber) { ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); } else { ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); #if __FreeBSD_version < 500000 ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_TX | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_TX, 0, NULL); #else ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_T, 0, NULL); #endif } ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); return; } /********************************************************************* * * Workaround for SmartSpeed on 82541 and 82547 controllers * **********************************************************************/ static void em_smartspeed(struct adapter *adapter) { uint16_t phy_tmp; if(adapter->link_active || (adapter->hw.phy_type != em_phy_igp) || !adapter->hw.autoneg || !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL)) return; if(adapter->smartspeed == 0) { /* If Master/Slave config fault is asserted twice, * we assume back-to-back */ em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); if(!(phy_tmp & SR_1000T_MS_CONFIG_FAULT)) return; em_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_tmp); if(phy_tmp & SR_1000T_MS_CONFIG_FAULT) { em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp); if(phy_tmp & CR_1000T_MS_ENABLE) { phy_tmp &= ~CR_1000T_MS_ENABLE; em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp); adapter->smartspeed++; if(adapter->hw.autoneg && !em_phy_setup_autoneg(&adapter->hw) && !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) { phy_tmp |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp); } } } return; } else if(adapter->smartspeed == EM_SMARTSPEED_DOWNSHIFT) { /* If still no link, perhaps using 2/3 pair cable */ em_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_tmp); phy_tmp |= CR_1000T_MS_ENABLE; em_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_tmp); if(adapter->hw.autoneg && !em_phy_setup_autoneg(&adapter->hw) && !em_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_tmp)) { phy_tmp |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); em_write_phy_reg(&adapter->hw, PHY_CTRL, phy_tmp); } } /* Restart process after EM_SMARTSPEED_MAX iterations */ if(adapter->smartspeed++ == EM_SMARTSPEED_MAX) adapter->smartspeed = 0; return; } /* * Manage DMA'able memory. */ static void em_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error) return; *(bus_addr_t*) arg = segs->ds_addr; return; } static int em_dma_malloc(struct adapter *adapter, bus_size_t size, struct em_dma_alloc *dma, int mapflags) { int r; r = bus_dma_tag_create(NULL, /* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dma->dma_tag); if (r != 0) { printf("em%d: em_dma_malloc: bus_dma_tag_create failed; " "error %u\n", adapter->unit, r); goto fail_0; } r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, BUS_DMA_NOWAIT, &dma->dma_map); if (r != 0) { printf("em%d: em_dma_malloc: bus_dmammem_alloc failed; " "size %ju, error %d\n", adapter->unit, (uintmax_t)size, r); goto fail_2; } r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, em_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); if (r != 0) { printf("em%d: em_dma_malloc: bus_dmamap_load failed; " "error %u\n", adapter->unit, r); goto fail_3; } dma->dma_size = size; return (0); fail_3: bus_dmamap_unload(dma->dma_tag, dma->dma_map); fail_2: bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); fail_0: dma->dma_map = NULL; dma->dma_tag = NULL; return (r); } static void em_dma_free(struct adapter *adapter, struct em_dma_alloc *dma) { bus_dmamap_unload(dma->dma_tag, dma->dma_map); bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); } /********************************************************************* * * Allocate memory for tx_buffer structures. The tx_buffer stores all * the information needed to transmit a packet on the wire. * **********************************************************************/ static int em_allocate_transmit_structures(struct adapter * adapter) { if (!(adapter->tx_buffer_area = (struct em_buffer *) malloc(sizeof(struct em_buffer) * adapter->num_tx_desc, M_DEVBUF, M_NOWAIT))) { printf("em%d: Unable to allocate tx_buffer memory\n", adapter->unit); return ENOMEM; } bzero(adapter->tx_buffer_area, sizeof(struct em_buffer) * adapter->num_tx_desc); return 0; } /********************************************************************* * * Allocate and initialize transmit structures. * **********************************************************************/ static int em_setup_transmit_structures(struct adapter * adapter) { /* * Setup DMA descriptor areas. */ if (bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES * 8, /* maxsize */ EM_MAX_SCATTER, /* nsegments */ MCLBYTES * 8, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &adapter->txtag)) { printf("em%d: Unable to allocate TX DMA tag\n", adapter->unit); return (ENOMEM); } if (em_allocate_transmit_structures(adapter)) return (ENOMEM); bzero((void *) adapter->tx_desc_base, (sizeof(struct em_tx_desc)) * adapter->num_tx_desc); adapter->next_avail_tx_desc = 0; adapter->oldest_used_tx_desc = 0; /* Set number of descriptors available */ adapter->num_tx_desc_avail = adapter->num_tx_desc; /* Set checksum context */ adapter->active_checksum_context = OFFLOAD_NONE; return (0); } /********************************************************************* * * Enable transmit unit. * **********************************************************************/ static void em_initialize_transmit_unit(struct adapter * adapter) { u_int32_t reg_tctl; u_int32_t reg_tipg = 0; u_int64_t bus_addr; INIT_DEBUGOUT("em_initialize_transmit_unit: begin"); /* Setup the Base and Length of the Tx Descriptor Ring */ bus_addr = adapter->txdma.dma_paddr; E1000_WRITE_REG(&adapter->hw, TDBAL, (u_int32_t)bus_addr); E1000_WRITE_REG(&adapter->hw, TDBAH, (u_int32_t)(bus_addr >> 32)); E1000_WRITE_REG(&adapter->hw, TDLEN, adapter->num_tx_desc * sizeof(struct em_tx_desc)); /* Setup the HW Tx Head and Tail descriptor pointers */ E1000_WRITE_REG(&adapter->hw, TDH, 0); E1000_WRITE_REG(&adapter->hw, TDT, 0); HW_DEBUGOUT2("Base = %x, Length = %x\n", E1000_READ_REG(&adapter->hw, TDBAL), E1000_READ_REG(&adapter->hw, TDLEN)); /* Set the default values for the Tx Inter Packet Gap timer */ switch (adapter->hw.mac_type) { case em_82542_rev2_0: case em_82542_rev2_1: reg_tipg = DEFAULT_82542_TIPG_IPGT; reg_tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; reg_tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; break; default: if (adapter->hw.media_type == em_media_type_fiber) reg_tipg = DEFAULT_82543_TIPG_IPGT_FIBER; else reg_tipg = DEFAULT_82543_TIPG_IPGT_COPPER; reg_tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; reg_tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; } E1000_WRITE_REG(&adapter->hw, TIPG, reg_tipg); E1000_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay.value); if(adapter->hw.mac_type >= em_82540) E1000_WRITE_REG(&adapter->hw, TADV, adapter->tx_abs_int_delay.value); /* Program the Transmit Control Register */ reg_tctl = E1000_TCTL_PSP | E1000_TCTL_EN | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); if (adapter->hw.mac_type >= em_82573) reg_tctl |= E1000_TCTL_MULR; if (adapter->link_duplex == 1) { reg_tctl |= E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT; } else { reg_tctl |= E1000_HDX_COLLISION_DISTANCE << E1000_COLD_SHIFT; } E1000_WRITE_REG(&adapter->hw, TCTL, reg_tctl); /* Setup Transmit Descriptor Settings for this adapter */ adapter->txd_cmd = E1000_TXD_CMD_IFCS | E1000_TXD_CMD_RS; if (adapter->tx_int_delay.value > 0) adapter->txd_cmd |= E1000_TXD_CMD_IDE; return; } /********************************************************************* * * Free all transmit related data structures. * **********************************************************************/ static void em_free_transmit_structures(struct adapter * adapter) { struct em_buffer *tx_buffer; int i; INIT_DEBUGOUT("free_transmit_structures: begin"); if (adapter->tx_buffer_area != NULL) { tx_buffer = adapter->tx_buffer_area; for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { if (tx_buffer->m_head != NULL) { bus_dmamap_unload(adapter->txtag, tx_buffer->map); bus_dmamap_destroy(adapter->txtag, tx_buffer->map); m_freem(tx_buffer->m_head); } tx_buffer->m_head = NULL; } } if (adapter->tx_buffer_area != NULL) { free(adapter->tx_buffer_area, M_DEVBUF); adapter->tx_buffer_area = NULL; } if (adapter->txtag != NULL) { bus_dma_tag_destroy(adapter->txtag); adapter->txtag = NULL; } return; } /********************************************************************* * * The offload context needs to be set when we transfer the first * packet of a particular protocol (TCP/UDP). We change the * context only if the protocol type changes. * **********************************************************************/ static void em_transmit_checksum_setup(struct adapter * adapter, struct mbuf *mp, u_int32_t *txd_upper, u_int32_t *txd_lower) { struct em_context_desc *TXD; struct em_buffer *tx_buffer; int curr_txd; if (mp->m_pkthdr.csum_flags) { if (mp->m_pkthdr.csum_flags & CSUM_TCP) { *txd_upper = E1000_TXD_POPTS_TXSM << 8; *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; if (adapter->active_checksum_context == OFFLOAD_TCP_IP) return; else adapter->active_checksum_context = OFFLOAD_TCP_IP; } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) { *txd_upper = E1000_TXD_POPTS_TXSM << 8; *txd_lower = E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; if (adapter->active_checksum_context == OFFLOAD_UDP_IP) return; else adapter->active_checksum_context = OFFLOAD_UDP_IP; } else { *txd_upper = 0; *txd_lower = 0; return; } } else { *txd_upper = 0; *txd_lower = 0; return; } /* If we reach this point, the checksum offload context * needs to be reset. */ curr_txd = adapter->next_avail_tx_desc; tx_buffer = &adapter->tx_buffer_area[curr_txd]; TXD = (struct em_context_desc *) &adapter->tx_desc_base[curr_txd]; TXD->lower_setup.ip_fields.ipcss = ETHER_HDR_LEN; TXD->lower_setup.ip_fields.ipcso = ETHER_HDR_LEN + offsetof(struct ip, ip_sum); TXD->lower_setup.ip_fields.ipcse = htole16(ETHER_HDR_LEN + sizeof(struct ip) - 1); TXD->upper_setup.tcp_fields.tucss = ETHER_HDR_LEN + sizeof(struct ip); TXD->upper_setup.tcp_fields.tucse = htole16(0); if (adapter->active_checksum_context == OFFLOAD_TCP_IP) { TXD->upper_setup.tcp_fields.tucso = ETHER_HDR_LEN + sizeof(struct ip) + offsetof(struct tcphdr, th_sum); } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) { TXD->upper_setup.tcp_fields.tucso = ETHER_HDR_LEN + sizeof(struct ip) + offsetof(struct udphdr, uh_sum); } TXD->tcp_seg_setup.data = htole32(0); TXD->cmd_and_length = htole32(adapter->txd_cmd | E1000_TXD_CMD_DEXT); tx_buffer->m_head = NULL; if (++curr_txd == adapter->num_tx_desc) curr_txd = 0; adapter->num_tx_desc_avail--; adapter->next_avail_tx_desc = curr_txd; return; } /********************************************************************** * * Examine each tx_buffer in the used queue. If the hardware is done * processing the packet then free associated resources. The * tx_buffer is put back on the free queue. * **********************************************************************/ static void em_clean_transmit_interrupts(struct adapter * adapter) { int i, num_avail; struct em_buffer *tx_buffer; struct em_tx_desc *tx_desc; struct ifnet *ifp = adapter->ifp; mtx_assert(&adapter->mtx, MA_OWNED); if (adapter->num_tx_desc_avail == adapter->num_tx_desc) return; num_avail = adapter->num_tx_desc_avail; i = adapter->oldest_used_tx_desc; tx_buffer = &adapter->tx_buffer_area[i]; tx_desc = &adapter->tx_desc_base[i]; bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, BUS_DMASYNC_POSTREAD); while (tx_desc->upper.fields.status & E1000_TXD_STAT_DD) { tx_desc->upper.data = 0; num_avail++; if (tx_buffer->m_head) { ifp->if_opackets++; bus_dmamap_unload(adapter->txtag, tx_buffer->map); bus_dmamap_destroy(adapter->txtag, tx_buffer->map); m_freem(tx_buffer->m_head); tx_buffer->m_head = NULL; } if (++i == adapter->num_tx_desc) i = 0; tx_buffer = &adapter->tx_buffer_area[i]; tx_desc = &adapter->tx_desc_base[i]; } bus_dmamap_sync(adapter->txdma.dma_tag, adapter->txdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); adapter->oldest_used_tx_desc = i; /* * If we have enough room, clear IFF_OACTIVE to tell the stack * that it is OK to send packets. * If there are no pending descriptors, clear the timeout. Otherwise, * if some descriptors have been freed, restart the timeout. */ if (num_avail > EM_TX_CLEANUP_THRESHOLD) { ifp->if_flags &= ~IFF_OACTIVE; if (num_avail == adapter->num_tx_desc) ifp->if_timer = 0; else if (num_avail == adapter->num_tx_desc_avail) ifp->if_timer = EM_TX_TIMEOUT; } adapter->num_tx_desc_avail = num_avail; return; } /********************************************************************* * * Get a buffer from system mbuf buffer pool. * **********************************************************************/ static int em_get_buf(int i, struct adapter *adapter, struct mbuf *nmp) { register struct mbuf *mp = nmp; struct em_buffer *rx_buffer; struct ifnet *ifp; bus_addr_t paddr; int error; ifp = adapter->ifp; if (mp == NULL) { mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (mp == NULL) { adapter->mbuf_cluster_failed++; return(ENOBUFS); } mp->m_len = mp->m_pkthdr.len = MCLBYTES; } else { mp->m_len = mp->m_pkthdr.len = MCLBYTES; mp->m_data = mp->m_ext.ext_buf; mp->m_next = NULL; } if (ifp->if_mtu <= ETHERMTU) { m_adj(mp, ETHER_ALIGN); } rx_buffer = &adapter->rx_buffer_area[i]; /* * Using memory from the mbuf cluster pool, invoke the * bus_dma machinery to arrange the memory mapping. */ error = bus_dmamap_load(adapter->rxtag, rx_buffer->map, mtod(mp, void *), mp->m_len, em_dmamap_cb, &paddr, 0); if (error) { m_free(mp); return(error); } rx_buffer->m_head = mp; adapter->rx_desc_base[i].buffer_addr = htole64(paddr); bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD); return(0); } /********************************************************************* * * Allocate memory for rx_buffer structures. Since we use one * rx_buffer per received packet, the maximum number of rx_buffer's * that we'll need is equal to the number of receive descriptors * that we've allocated. * **********************************************************************/ static int em_allocate_receive_structures(struct adapter * adapter) { int i, error; struct em_buffer *rx_buffer; if (!(adapter->rx_buffer_area = (struct em_buffer *) malloc(sizeof(struct em_buffer) * adapter->num_rx_desc, M_DEVBUF, M_NOWAIT))) { printf("em%d: Unable to allocate rx_buffer memory\n", adapter->unit); return(ENOMEM); } bzero(adapter->rx_buffer_area, sizeof(struct em_buffer) * adapter->num_rx_desc); error = bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, /* maxsize */ 1, /* nsegments */ MCLBYTES, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &adapter->rxtag); if (error != 0) { printf("em%d: em_allocate_receive_structures: " "bus_dma_tag_create failed; error %u\n", adapter->unit, error); goto fail_0; } rx_buffer = adapter->rx_buffer_area; for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT, &rx_buffer->map); if (error != 0) { printf("em%d: em_allocate_receive_structures: " "bus_dmamap_create failed; error %u\n", adapter->unit, error); goto fail_1; } } for (i = 0; i < adapter->num_rx_desc; i++) { error = em_get_buf(i, adapter, NULL); if (error != 0) { adapter->rx_buffer_area[i].m_head = NULL; adapter->rx_desc_base[i].buffer_addr = 0; return(error); } } bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return(0); fail_1: bus_dma_tag_destroy(adapter->rxtag); fail_0: adapter->rxtag = NULL; free(adapter->rx_buffer_area, M_DEVBUF); adapter->rx_buffer_area = NULL; return (error); } /********************************************************************* * * Allocate and initialize receive structures. * **********************************************************************/ static int em_setup_receive_structures(struct adapter * adapter) { bzero((void *) adapter->rx_desc_base, (sizeof(struct em_rx_desc)) * adapter->num_rx_desc); if (em_allocate_receive_structures(adapter)) return ENOMEM; /* Setup our descriptor pointers */ adapter->next_rx_desc_to_check = 0; return(0); } /********************************************************************* * * Enable receive unit. * **********************************************************************/ static void em_initialize_receive_unit(struct adapter * adapter) { u_int32_t reg_rctl; u_int32_t reg_rxcsum; struct ifnet *ifp; u_int64_t bus_addr; INIT_DEBUGOUT("em_initialize_receive_unit: begin"); ifp = adapter->ifp; /* Make sure receives are disabled while setting up the descriptor ring */ E1000_WRITE_REG(&adapter->hw, RCTL, 0); /* Set the Receive Delay Timer Register */ E1000_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay.value | E1000_RDT_FPDB); if(adapter->hw.mac_type >= em_82540) { E1000_WRITE_REG(&adapter->hw, RADV, adapter->rx_abs_int_delay.value); /* Set the interrupt throttling rate. Value is calculated * as DEFAULT_ITR = 1/(MAX_INTS_PER_SEC * 256ns) */ #define MAX_INTS_PER_SEC 8000 #define DEFAULT_ITR 1000000000/(MAX_INTS_PER_SEC * 256) E1000_WRITE_REG(&adapter->hw, ITR, DEFAULT_ITR); } /* Setup the Base and Length of the Rx Descriptor Ring */ bus_addr = adapter->rxdma.dma_paddr; E1000_WRITE_REG(&adapter->hw, RDBAL, (u_int32_t)bus_addr); E1000_WRITE_REG(&adapter->hw, RDBAH, (u_int32_t)(bus_addr >> 32)); E1000_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc * sizeof(struct em_rx_desc)); /* Setup the HW Rx Head and Tail Descriptor Pointers */ E1000_WRITE_REG(&adapter->hw, RDH, 0); E1000_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1); /* Setup the Receive Control Register */ reg_rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); if (adapter->hw.tbi_compatibility_on == TRUE) reg_rctl |= E1000_RCTL_SBP; switch (adapter->rx_buffer_len) { default: case EM_RXBUFFER_2048: reg_rctl |= E1000_RCTL_SZ_2048; break; case EM_RXBUFFER_4096: reg_rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX | E1000_RCTL_LPE; break; case EM_RXBUFFER_8192: reg_rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX | E1000_RCTL_LPE; break; case EM_RXBUFFER_16384: reg_rctl |= E1000_RCTL_SZ_16384 | E1000_RCTL_BSEX | E1000_RCTL_LPE; break; } if (ifp->if_mtu > ETHERMTU) reg_rctl |= E1000_RCTL_LPE; /* Enable 82543 Receive Checksum Offload for TCP and UDP */ if ((adapter->hw.mac_type >= em_82543) && (ifp->if_capenable & IFCAP_RXCSUM)) { reg_rxcsum = E1000_READ_REG(&adapter->hw, RXCSUM); reg_rxcsum |= (E1000_RXCSUM_IPOFL | E1000_RXCSUM_TUOFL); E1000_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum); } /* Enable Receives */ E1000_WRITE_REG(&adapter->hw, RCTL, reg_rctl); return; } /********************************************************************* * * Free receive related data structures. * **********************************************************************/ static void em_free_receive_structures(struct adapter *adapter) { struct em_buffer *rx_buffer; int i; INIT_DEBUGOUT("free_receive_structures: begin"); if (adapter->rx_buffer_area != NULL) { rx_buffer = adapter->rx_buffer_area; for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { if (rx_buffer->map != NULL) { bus_dmamap_unload(adapter->rxtag, rx_buffer->map); bus_dmamap_destroy(adapter->rxtag, rx_buffer->map); } if (rx_buffer->m_head != NULL) m_freem(rx_buffer->m_head); rx_buffer->m_head = NULL; } } if (adapter->rx_buffer_area != NULL) { free(adapter->rx_buffer_area, M_DEVBUF); adapter->rx_buffer_area = NULL; } if (adapter->rxtag != NULL) { bus_dma_tag_destroy(adapter->rxtag); adapter->rxtag = NULL; } return; } /********************************************************************* * * This routine executes in interrupt context. It replenishes * the mbufs in the descriptor and sends data which has been * dma'ed into host memory to upper layer. * * We loop at most count times if count is > 0, or until done if * count < 0. * *********************************************************************/ static void em_process_receive_interrupts(struct adapter * adapter, int count) { struct ifnet *ifp; struct mbuf *mp; #if __FreeBSD_version < 500000 struct ether_header *eh; #endif u_int8_t accept_frame = 0; u_int8_t eop = 0; u_int16_t len, desc_len, prev_len_adj; int i; /* Pointer to the receive descriptor being examined. */ struct em_rx_desc *current_desc; mtx_assert(&adapter->mtx, MA_OWNED); ifp = adapter->ifp; i = adapter->next_rx_desc_to_check; current_desc = &adapter->rx_desc_base[i]; bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, BUS_DMASYNC_POSTREAD); if (!((current_desc->status) & E1000_RXD_STAT_DD)) { return; } while ((current_desc->status & E1000_RXD_STAT_DD) && (count != 0)) { mp = adapter->rx_buffer_area[i].m_head; bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map, BUS_DMASYNC_POSTREAD); accept_frame = 1; prev_len_adj = 0; desc_len = le16toh(current_desc->length); if (current_desc->status & E1000_RXD_STAT_EOP) { count--; eop = 1; if (desc_len < ETHER_CRC_LEN) { len = 0; prev_len_adj = ETHER_CRC_LEN - desc_len; } else { len = desc_len - ETHER_CRC_LEN; } } else { eop = 0; len = desc_len; } if (current_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK) { u_int8_t last_byte; u_int32_t pkt_len = desc_len; if (adapter->fmp != NULL) pkt_len += adapter->fmp->m_pkthdr.len; last_byte = *(mtod(mp, caddr_t) + desc_len - 1); if (TBI_ACCEPT(&adapter->hw, current_desc->status, current_desc->errors, pkt_len, last_byte)) { em_tbi_adjust_stats(&adapter->hw, &adapter->stats, pkt_len, adapter->hw.mac_addr); if (len > 0) len--; } else { accept_frame = 0; } } if (accept_frame) { if (em_get_buf(i, adapter, NULL) == ENOBUFS) { adapter->dropped_pkts++; em_get_buf(i, adapter, mp); if (adapter->fmp != NULL) m_freem(adapter->fmp); adapter->fmp = NULL; adapter->lmp = NULL; break; } /* Assign correct length to the current fragment */ mp->m_len = len; if (adapter->fmp == NULL) { mp->m_pkthdr.len = len; adapter->fmp = mp; /* Store the first mbuf */ adapter->lmp = mp; } else { /* Chain mbuf's together */ mp->m_flags &= ~M_PKTHDR; /* * Adjust length of previous mbuf in chain if we * received less than 4 bytes in the last descriptor. */ if (prev_len_adj > 0) { adapter->lmp->m_len -= prev_len_adj; adapter->fmp->m_pkthdr.len -= prev_len_adj; } adapter->lmp->m_next = mp; adapter->lmp = adapter->lmp->m_next; adapter->fmp->m_pkthdr.len += len; } if (eop) { adapter->fmp->m_pkthdr.rcvif = ifp; ifp->if_ipackets++; #if __FreeBSD_version < 500000 eh = mtod(adapter->fmp, struct ether_header *); /* Remove ethernet header from mbuf */ m_adj(adapter->fmp, sizeof(struct ether_header)); em_receive_checksum(adapter, current_desc, adapter->fmp); if (current_desc->status & E1000_RXD_STAT_VP) VLAN_INPUT_TAG(eh, adapter->fmp, (current_desc->special & E1000_RXD_SPC_VLAN_MASK)); else ether_input(ifp, eh, adapter->fmp); #else em_receive_checksum(adapter, current_desc, adapter->fmp); if (current_desc->status & E1000_RXD_STAT_VP) VLAN_INPUT_TAG(ifp, adapter->fmp, (current_desc->special & E1000_RXD_SPC_VLAN_MASK), adapter->fmp = NULL); if (adapter->fmp != NULL) { EM_UNLOCK(adapter); (*ifp->if_input)(ifp, adapter->fmp); EM_LOCK(adapter); } #endif adapter->fmp = NULL; adapter->lmp = NULL; } } else { adapter->dropped_pkts++; em_get_buf(i, adapter, mp); if (adapter->fmp != NULL) m_freem(adapter->fmp); adapter->fmp = NULL; adapter->lmp = NULL; } /* Zero out the receive descriptors status */ current_desc->status = 0; /* Advance the E1000's Receive Queue #0 "Tail Pointer". */ E1000_WRITE_REG(&adapter->hw, RDT, i); /* Advance our pointers to the next descriptor */ if (++i == adapter->num_rx_desc) { i = 0; current_desc = adapter->rx_desc_base; } else current_desc++; } bus_dmamap_sync(adapter->rxdma.dma_tag, adapter->rxdma.dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); adapter->next_rx_desc_to_check = i; return; } /********************************************************************* * * Verify that the hardware indicated that the checksum is valid. * Inform the stack about the status of checksum so that stack * doesn't spend time verifying the checksum. * *********************************************************************/ static void em_receive_checksum(struct adapter *adapter, struct em_rx_desc *rx_desc, struct mbuf *mp) { /* 82543 or newer only */ if ((adapter->hw.mac_type < em_82543) || /* Ignore Checksum bit is set */ (rx_desc->status & E1000_RXD_STAT_IXSM)) { mp->m_pkthdr.csum_flags = 0; return; } if (rx_desc->status & E1000_RXD_STAT_IPCS) { /* Did it pass? */ if (!(rx_desc->errors & E1000_RXD_ERR_IPE)) { /* IP Checksum Good */ mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; } else { mp->m_pkthdr.csum_flags = 0; } } if (rx_desc->status & E1000_RXD_STAT_TCPCS) { /* Did it pass? */ if (!(rx_desc->errors & E1000_RXD_ERR_TCPE)) { mp->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); mp->m_pkthdr.csum_data = htons(0xffff); } } return; } static void em_enable_vlans(struct adapter *adapter) { uint32_t ctrl; E1000_WRITE_REG(&adapter->hw, VET, ETHERTYPE_VLAN); ctrl = E1000_READ_REG(&adapter->hw, CTRL); ctrl |= E1000_CTRL_VME; E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); return; } static void em_disable_vlans(struct adapter *adapter) { uint32_t ctrl; ctrl = E1000_READ_REG(&adapter->hw, CTRL); ctrl &= ~E1000_CTRL_VME; E1000_WRITE_REG(&adapter->hw, CTRL, ctrl); return; } static void em_enable_intr(struct adapter * adapter) { E1000_WRITE_REG(&adapter->hw, IMS, (IMS_ENABLE_MASK)); return; } static void em_disable_intr(struct adapter *adapter) { /* * The first version of 82542 had an errata where when link was forced it * would stay up even up even if the cable was disconnected. Sequence errors * were used to detect the disconnect and then the driver would unforce the link. * This code in the in the ISR. For this to work correctly the Sequence error * interrupt had to be enabled all the time. */ if (adapter->hw.mac_type == em_82542_rev2_0) E1000_WRITE_REG(&adapter->hw, IMC, (0xffffffff & ~E1000_IMC_RXSEQ)); else E1000_WRITE_REG(&adapter->hw, IMC, 0xffffffff); return; } static int em_is_valid_ether_addr(u_int8_t *addr) { char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { return (FALSE); } return(TRUE); } void em_write_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value) { pci_write_config(((struct em_osdep *)hw->back)->dev, reg, *value, 2); } void em_read_pci_cfg(struct em_hw *hw, uint32_t reg, uint16_t *value) { *value = pci_read_config(((struct em_osdep *)hw->back)->dev, reg, 2); return; } void em_pci_set_mwi(struct em_hw *hw) { pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND, (hw->pci_cmd_word | CMD_MEM_WRT_INVALIDATE), 2); return; } void em_pci_clear_mwi(struct em_hw *hw) { pci_write_config(((struct em_osdep *)hw->back)->dev, PCIR_COMMAND, (hw->pci_cmd_word & ~CMD_MEM_WRT_INVALIDATE), 2); return; } uint32_t em_io_read(struct em_hw *hw, unsigned long port) { return(inl(port)); } void em_io_write(struct em_hw *hw, unsigned long port, uint32_t value) { outl(port, value); return; } /********************************************************************* * 82544 Coexistence issue workaround. * There are 2 issues. * 1. Transmit Hang issue. * To detect this issue, following equation can be used... * SIZE[3:0] + ADDR[2:0] = SUM[3:0]. * If SUM[3:0] is in between 1 to 4, we will have this issue. * * 2. DAC issue. * To detect this issue, following equation can be used... * SIZE[3:0] + ADDR[2:0] = SUM[3:0]. * If SUM[3:0] is in between 9 to c, we will have this issue. * * * WORKAROUND: * Make sure we do not have ending address as 1,2,3,4(Hang) or 9,a,b,c (DAC) * *** *********************************************************************/ static u_int32_t em_fill_descriptors (u_int64_t address, u_int32_t length, PDESC_ARRAY desc_array) { /* Since issue is sensitive to length and address.*/ /* Let us first check the address...*/ u_int32_t safe_terminator; if (length <= 4) { desc_array->descriptor[0].address = address; desc_array->descriptor[0].length = length; desc_array->elements = 1; return desc_array->elements; } safe_terminator = (u_int32_t)((((u_int32_t)address & 0x7) + (length & 0xF)) & 0xF); /* if it does not fall between 0x1 to 0x4 and 0x9 to 0xC then return */ if (safe_terminator == 0 || (safe_terminator > 4 && safe_terminator < 9) || (safe_terminator > 0xC && safe_terminator <= 0xF)) { desc_array->descriptor[0].address = address; desc_array->descriptor[0].length = length; desc_array->elements = 1; return desc_array->elements; } desc_array->descriptor[0].address = address; desc_array->descriptor[0].length = length - 4; desc_array->descriptor[1].address = address + (length - 4); desc_array->descriptor[1].length = 4; desc_array->elements = 2; return desc_array->elements; } /********************************************************************** * * Update the board statistics counters. * **********************************************************************/ static void em_update_stats_counters(struct adapter *adapter) { struct ifnet *ifp; if(adapter->hw.media_type == em_media_type_copper || (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) { adapter->stats.symerrs += E1000_READ_REG(&adapter->hw, SYMERRS); adapter->stats.sec += E1000_READ_REG(&adapter->hw, SEC); } adapter->stats.crcerrs += E1000_READ_REG(&adapter->hw, CRCERRS); adapter->stats.mpc += E1000_READ_REG(&adapter->hw, MPC); adapter->stats.scc += E1000_READ_REG(&adapter->hw, SCC); adapter->stats.ecol += E1000_READ_REG(&adapter->hw, ECOL); adapter->stats.mcc += E1000_READ_REG(&adapter->hw, MCC); adapter->stats.latecol += E1000_READ_REG(&adapter->hw, LATECOL); adapter->stats.colc += E1000_READ_REG(&adapter->hw, COLC); adapter->stats.dc += E1000_READ_REG(&adapter->hw, DC); adapter->stats.rlec += E1000_READ_REG(&adapter->hw, RLEC); adapter->stats.xonrxc += E1000_READ_REG(&adapter->hw, XONRXC); adapter->stats.xontxc += E1000_READ_REG(&adapter->hw, XONTXC); adapter->stats.xoffrxc += E1000_READ_REG(&adapter->hw, XOFFRXC); adapter->stats.xofftxc += E1000_READ_REG(&adapter->hw, XOFFTXC); adapter->stats.fcruc += E1000_READ_REG(&adapter->hw, FCRUC); adapter->stats.prc64 += E1000_READ_REG(&adapter->hw, PRC64); adapter->stats.prc127 += E1000_READ_REG(&adapter->hw, PRC127); adapter->stats.prc255 += E1000_READ_REG(&adapter->hw, PRC255); adapter->stats.prc511 += E1000_READ_REG(&adapter->hw, PRC511); adapter->stats.prc1023 += E1000_READ_REG(&adapter->hw, PRC1023); adapter->stats.prc1522 += E1000_READ_REG(&adapter->hw, PRC1522); adapter->stats.gprc += E1000_READ_REG(&adapter->hw, GPRC); adapter->stats.bprc += E1000_READ_REG(&adapter->hw, BPRC); adapter->stats.mprc += E1000_READ_REG(&adapter->hw, MPRC); adapter->stats.gptc += E1000_READ_REG(&adapter->hw, GPTC); /* For the 64-bit byte counters the low dword must be read first. */ /* Both registers clear on the read of the high dword */ adapter->stats.gorcl += E1000_READ_REG(&adapter->hw, GORCL); adapter->stats.gorch += E1000_READ_REG(&adapter->hw, GORCH); adapter->stats.gotcl += E1000_READ_REG(&adapter->hw, GOTCL); adapter->stats.gotch += E1000_READ_REG(&adapter->hw, GOTCH); adapter->stats.rnbc += E1000_READ_REG(&adapter->hw, RNBC); adapter->stats.ruc += E1000_READ_REG(&adapter->hw, RUC); adapter->stats.rfc += E1000_READ_REG(&adapter->hw, RFC); adapter->stats.roc += E1000_READ_REG(&adapter->hw, ROC); adapter->stats.rjc += E1000_READ_REG(&adapter->hw, RJC); adapter->stats.torl += E1000_READ_REG(&adapter->hw, TORL); adapter->stats.torh += E1000_READ_REG(&adapter->hw, TORH); adapter->stats.totl += E1000_READ_REG(&adapter->hw, TOTL); adapter->stats.toth += E1000_READ_REG(&adapter->hw, TOTH); adapter->stats.tpr += E1000_READ_REG(&adapter->hw, TPR); adapter->stats.tpt += E1000_READ_REG(&adapter->hw, TPT); adapter->stats.ptc64 += E1000_READ_REG(&adapter->hw, PTC64); adapter->stats.ptc127 += E1000_READ_REG(&adapter->hw, PTC127); adapter->stats.ptc255 += E1000_READ_REG(&adapter->hw, PTC255); adapter->stats.ptc511 += E1000_READ_REG(&adapter->hw, PTC511); adapter->stats.ptc1023 += E1000_READ_REG(&adapter->hw, PTC1023); adapter->stats.ptc1522 += E1000_READ_REG(&adapter->hw, PTC1522); adapter->stats.mptc += E1000_READ_REG(&adapter->hw, MPTC); adapter->stats.bptc += E1000_READ_REG(&adapter->hw, BPTC); if (adapter->hw.mac_type >= em_82543) { adapter->stats.algnerrc += E1000_READ_REG(&adapter->hw, ALGNERRC); adapter->stats.rxerrc += E1000_READ_REG(&adapter->hw, RXERRC); adapter->stats.tncrs += E1000_READ_REG(&adapter->hw, TNCRS); adapter->stats.cexterr += E1000_READ_REG(&adapter->hw, CEXTERR); adapter->stats.tsctc += E1000_READ_REG(&adapter->hw, TSCTC); adapter->stats.tsctfc += E1000_READ_REG(&adapter->hw, TSCTFC); } ifp = adapter->ifp; /* Fill out the OS statistics structure */ ifp->if_ibytes = adapter->stats.gorcl; ifp->if_obytes = adapter->stats.gotcl; ifp->if_imcasts = adapter->stats.mprc; ifp->if_collisions = adapter->stats.colc; /* Rx Errors */ ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.rxerrc + adapter->stats.crcerrs + adapter->stats.algnerrc + adapter->stats.rlec + adapter->stats.mpc + adapter->stats.cexterr; /* Tx Errors */ ifp->if_oerrors = adapter->stats.ecol + adapter->stats.latecol; } /********************************************************************** * * This routine is called only when em_display_debug_stats is enabled. * This routine provides a way to take a look at important statistics * maintained by the driver and hardware. * **********************************************************************/ static void em_print_debug_info(struct adapter *adapter) { int unit = adapter->unit; uint8_t *hw_addr = adapter->hw.hw_addr; printf("em%d: Adapter hardware address = %p \n", unit, hw_addr); printf("em%d:CTRL = 0x%x\n", unit, E1000_READ_REG(&adapter->hw, CTRL)); printf("em%d:RCTL = 0x%x PS=(0x8402)\n", unit, E1000_READ_REG(&adapter->hw, RCTL)); printf("em%d:tx_int_delay = %d, tx_abs_int_delay = %d\n", unit, E1000_READ_REG(&adapter->hw, TIDV), E1000_READ_REG(&adapter->hw, TADV)); printf("em%d:rx_int_delay = %d, rx_abs_int_delay = %d\n", unit, E1000_READ_REG(&adapter->hw, RDTR), E1000_READ_REG(&adapter->hw, RADV)); printf("em%d: fifo workaround = %lld, fifo_reset = %lld\n", unit, (long long)adapter->tx_fifo_wrk_cnt, (long long)adapter->tx_fifo_reset_cnt); printf("em%d: hw tdh = %d, hw tdt = %d\n", unit, E1000_READ_REG(&adapter->hw, TDH), E1000_READ_REG(&adapter->hw, TDT)); printf("em%d: Num Tx descriptors avail = %d\n", unit, adapter->num_tx_desc_avail); printf("em%d: Tx Descriptors not avail1 = %ld\n", unit, adapter->no_tx_desc_avail1); printf("em%d: Tx Descriptors not avail2 = %ld\n", unit, adapter->no_tx_desc_avail2); printf("em%d: Std mbuf failed = %ld\n", unit, adapter->mbuf_alloc_failed); printf("em%d: Std mbuf cluster failed = %ld\n", unit, adapter->mbuf_cluster_failed); printf("em%d: Driver dropped packets = %ld\n", unit, adapter->dropped_pkts); return; } static void em_print_hw_stats(struct adapter *adapter) { int unit = adapter->unit; printf("em%d: Excessive collisions = %lld\n", unit, (long long)adapter->stats.ecol); printf("em%d: Symbol errors = %lld\n", unit, (long long)adapter->stats.symerrs); printf("em%d: Sequence errors = %lld\n", unit, (long long)adapter->stats.sec); printf("em%d: Defer count = %lld\n", unit, (long long)adapter->stats.dc); printf("em%d: Missed Packets = %lld\n", unit, (long long)adapter->stats.mpc); printf("em%d: Receive No Buffers = %lld\n", unit, (long long)adapter->stats.rnbc); printf("em%d: Receive length errors = %lld\n", unit, (long long)adapter->stats.rlec); printf("em%d: Receive errors = %lld\n", unit, (long long)adapter->stats.rxerrc); printf("em%d: Crc errors = %lld\n", unit, (long long)adapter->stats.crcerrs); printf("em%d: Alignment errors = %lld\n", unit, (long long)adapter->stats.algnerrc); printf("em%d: Carrier extension errors = %lld\n", unit, (long long)adapter->stats.cexterr); printf("em%d: XON Rcvd = %lld\n", unit, (long long)adapter->stats.xonrxc); printf("em%d: XON Xmtd = %lld\n", unit, (long long)adapter->stats.xontxc); printf("em%d: XOFF Rcvd = %lld\n", unit, (long long)adapter->stats.xoffrxc); printf("em%d: XOFF Xmtd = %lld\n", unit, (long long)adapter->stats.xofftxc); printf("em%d: Good Packets Rcvd = %lld\n", unit, (long long)adapter->stats.gprc); printf("em%d: Good Packets Xmtd = %lld\n", unit, (long long)adapter->stats.gptc); return; } static int em_sysctl_debug_info(SYSCTL_HANDLER_ARGS) { int error; int result; struct adapter *adapter; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { adapter = (struct adapter *)arg1; em_print_debug_info(adapter); } return error; } static int em_sysctl_stats(SYSCTL_HANDLER_ARGS) { int error; int result; struct adapter *adapter; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { adapter = (struct adapter *)arg1; em_print_hw_stats(adapter); } return error; } static int em_sysctl_int_delay(SYSCTL_HANDLER_ARGS) { struct em_int_delay_info *info; struct adapter *adapter; u_int32_t regval; int error; int usecs; int ticks; int s; info = (struct em_int_delay_info *)arg1; adapter = info->adapter; usecs = info->value; error = sysctl_handle_int(oidp, &usecs, 0, req); if (error != 0 || req->newptr == NULL) return error; if (usecs < 0 || usecs > E1000_TICKS_TO_USECS(65535)) return EINVAL; info->value = usecs; ticks = E1000_USECS_TO_TICKS(usecs); s = splimp(); regval = E1000_READ_OFFSET(&adapter->hw, info->offset); regval = (regval & ~0xffff) | (ticks & 0xffff); /* Handle a few special cases. */ switch (info->offset) { case E1000_RDTR: case E1000_82542_RDTR: regval |= E1000_RDT_FPDB; break; case E1000_TIDV: case E1000_82542_TIDV: if (ticks == 0) { adapter->txd_cmd &= ~E1000_TXD_CMD_IDE; /* Don't write 0 into the TIDV register. */ regval++; } else adapter->txd_cmd |= E1000_TXD_CMD_IDE; break; } E1000_WRITE_OFFSET(&adapter->hw, info->offset, regval); splx(s); return 0; } static void em_add_int_delay_sysctl(struct adapter *adapter, const char *name, const char *description, struct em_int_delay_info *info, int offset, int value) { info->adapter = adapter; info->offset = offset; info->value = value; SYSCTL_ADD_PROC(device_get_sysctl_ctx(adapter->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, info, 0, em_sysctl_int_delay, "I", description); } Index: stable/6/sys/dev/ex/if_ex.c =================================================================== --- stable/6/sys/dev/ex/if_ex.c (revision 149421) +++ stable/6/sys/dev/ex/if_ex.c (revision 149422) @@ -1,1047 +1,1051 @@ /*- * Copyright (c) 1996, Javier Martín Rueda (jmrueda@diatel.upm.es) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * * MAINTAINER: Matthew N. Dodd * */ #include __FBSDID("$FreeBSD$"); /* * Intel EtherExpress Pro/10, Pro/10+ Ethernet driver * * Revision history: * * dd-mmm-yyyy: Multicast support ported from NetBSD's if_iy driver. * 30-Oct-1996: first beta version. Inet and BPF supported, but no multicast. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef EXDEBUG # define Start_End 1 # define Rcvd_Pkts 2 # define Sent_Pkts 4 # define Status 8 static int debug_mask = 0; # define DODEBUG(level, action) if (level & debug_mask) action #else # define DODEBUG(level, action) #endif devclass_t ex_devclass; char irq2eemap[] = { -1, -1, 0, 1, -1, 2, -1, -1, -1, 0, 3, 4, -1, -1, -1, -1 }; u_char ee2irqmap[] = { 9, 3, 5, 10, 11, 0, 0, 0 }; char plus_irq2eemap[] = { -1, -1, -1, 0, 1, 2, -1, 3, -1, 4, 5, 6, 7, -1, -1, -1 }; u_char plus_ee2irqmap[] = { 3, 4, 5, 7, 9, 10, 11, 12 }; /* Network Interface Functions */ static void ex_init(void *); static void ex_start(struct ifnet *); static int ex_ioctl(struct ifnet *, u_long, caddr_t); static void ex_watchdog(struct ifnet *); /* ifmedia Functions */ static int ex_ifmedia_upd(struct ifnet *); static void ex_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int ex_get_media(struct ex_softc *); static void ex_reset(struct ex_softc *); static void ex_setmulti(struct ex_softc *); static void ex_tx_intr(struct ex_softc *); static void ex_rx_intr(struct ex_softc *); void ex_get_address(struct ex_softc *sc, u_char *enaddr) { uint16_t eaddr_tmp; eaddr_tmp = ex_eeprom_read(sc, EE_Eth_Addr_Lo); enaddr[5] = eaddr_tmp & 0xff; enaddr[4] = eaddr_tmp >> 8; eaddr_tmp = ex_eeprom_read(sc, EE_Eth_Addr_Mid); enaddr[3] = eaddr_tmp & 0xff; enaddr[2] = eaddr_tmp >> 8; eaddr_tmp = ex_eeprom_read(sc, EE_Eth_Addr_Hi); enaddr[1] = eaddr_tmp & 0xff; enaddr[0] = eaddr_tmp >> 8; return; } int ex_card_type(u_char *enaddr) { if ((enaddr[0] == 0x00) && (enaddr[1] == 0xA0) && (enaddr[2] == 0xC9)) return (CARD_TYPE_EX_10_PLUS); return (CARD_TYPE_EX_10); } /* * Caller is responsible for eventually calling * ex_release_resources() on failure. */ int ex_alloc_resources(device_t dev) { struct ex_softc * sc = device_get_softc(dev); int error = 0; sc->ioport = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->ioport_rid, RF_ACTIVE); if (!sc->ioport) { device_printf(dev, "No I/O space?!\n"); error = ENOMEM; goto bad; } sc->bst = rman_get_bustag(sc->ioport); sc->bsh = rman_get_bushandle(sc->ioport); sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE); if (!sc->irq) { device_printf(dev, "No IRQ?!\n"); error = ENOMEM; goto bad; } bad: return (error); } void ex_release_resources(device_t dev) { struct ex_softc * sc = device_get_softc(dev); if (sc->ih) { bus_teardown_intr(dev, sc->irq, sc->ih); sc->ih = NULL; } if (sc->ioport) { bus_release_resource(dev, SYS_RES_IOPORT, sc->ioport_rid, sc->ioport); sc->ioport = NULL; } if (sc->irq) { bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); sc->irq = NULL; } return; } int ex_attach(device_t dev) { struct ex_softc * sc = device_get_softc(dev); struct ifnet * ifp; struct ifmedia * ifm; uint16_t temp; ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); return (ENOSPC); } /* work out which set of irq <-> internal tables to use */ if (ex_card_type(sc->enaddr) == CARD_TYPE_EX_10_PLUS) { sc->irq2ee = plus_irq2eemap; sc->ee2irq = plus_ee2irqmap; } else { sc->irq2ee = irq2eemap; sc->ee2irq = ee2irqmap; } sc->mem_size = CARD_RAM_SIZE; /* XXX This should be read from the card itself. */ /* * Initialize the ifnet structure. */ ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST | IFF_NEEDSGIANT; ifp->if_start = ex_start; ifp->if_ioctl = ex_ioctl; ifp->if_watchdog = ex_watchdog; ifp->if_init = ex_init; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; ifmedia_init(&sc->ifmedia, 0, ex_ifmedia_upd, ex_ifmedia_sts); temp = ex_eeprom_read(sc, EE_W5); if (temp & EE_W5_PORT_TPE) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); if (temp & EE_W5_PORT_BNC) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL); if (temp & EE_W5_PORT_AUI) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_NONE, 0, NULL); ifmedia_set(&sc->ifmedia, ex_get_media(sc)); ifm = &sc->ifmedia; ifm->ifm_media = ifm->ifm_cur->ifm_media; ex_ifmedia_upd(ifp); /* * Attach the interface. */ ether_ifattach(ifp, sc->enaddr); return(0); } int ex_detach(device_t dev) { struct ex_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->ifp; ex_stop(sc); ifp->if_flags &= ~IFF_RUNNING; ether_ifdetach(ifp); if_free(ifp); ex_release_resources(dev); return (0); } static void ex_init(void *xsc) { struct ex_softc * sc = (struct ex_softc *) xsc; struct ifnet * ifp = sc->ifp; int s; int i; unsigned short temp_reg; DODEBUG(Start_End, printf("%s: ex_init: start\n", ifp->if_xname);); s = splimp(); ifp->if_timer = 0; /* * Load the ethernet address into the card. */ CSR_WRITE_1(sc, CMD_REG, Bank2_Sel); temp_reg = CSR_READ_1(sc, EEPROM_REG); if (temp_reg & Trnoff_Enable) { CSR_WRITE_1(sc, EEPROM_REG, temp_reg & ~Trnoff_Enable); } for (i = 0; i < ETHER_ADDR_LEN; i++) { CSR_WRITE_1(sc, I_ADDR_REG0 + i, IFP2ENADDR(sc->ifp)[i]); } /* * - Setup transmit chaining and discard bad received frames. * - Match broadcast. * - Clear test mode. * - Set receiving mode. * - Set IRQ number. */ CSR_WRITE_1(sc, REG1, CSR_READ_1(sc, REG1) | Tx_Chn_Int_Md | Tx_Chn_ErStp | Disc_Bad_Fr); CSR_WRITE_1(sc, REG2, CSR_READ_1(sc, REG2) | No_SA_Ins | RX_CRC_InMem); CSR_WRITE_1(sc, REG3, CSR_READ_1(sc, REG3) & 0x3f /* XXX constants. */ ); CSR_WRITE_1(sc, CMD_REG, Bank1_Sel); CSR_WRITE_1(sc, INT_NO_REG, (CSR_READ_1(sc, INT_NO_REG) & 0xf8) | sc->irq2ee[sc->irq_no]); /* * Divide the available memory in the card into rcv and xmt buffers. * By default, I use the first 3/4 of the memory for the rcv buffer, * and the remaining 1/4 of the memory for the xmt buffer. */ sc->rx_mem_size = sc->mem_size * 3 / 4; sc->tx_mem_size = sc->mem_size - sc->rx_mem_size; sc->rx_lower_limit = 0x0000; sc->rx_upper_limit = sc->rx_mem_size - 2; sc->tx_lower_limit = sc->rx_mem_size; sc->tx_upper_limit = sc->mem_size - 2; CSR_WRITE_1(sc, RCV_LOWER_LIMIT_REG, sc->rx_lower_limit >> 8); CSR_WRITE_1(sc, RCV_UPPER_LIMIT_REG, sc->rx_upper_limit >> 8); CSR_WRITE_1(sc, XMT_LOWER_LIMIT_REG, sc->tx_lower_limit >> 8); CSR_WRITE_1(sc, XMT_UPPER_LIMIT_REG, sc->tx_upper_limit >> 8); /* * Enable receive and transmit interrupts, and clear any pending int. */ CSR_WRITE_1(sc, REG1, CSR_READ_1(sc, REG1) | TriST_INT); CSR_WRITE_1(sc, CMD_REG, Bank0_Sel); CSR_WRITE_1(sc, MASK_REG, All_Int & ~(Rx_Int | Tx_Int)); CSR_WRITE_1(sc, STATUS_REG, All_Int); /* * Initialize receive and transmit ring buffers. */ CSR_WRITE_2(sc, RCV_BAR, sc->rx_lower_limit); sc->rx_head = sc->rx_lower_limit; CSR_WRITE_2(sc, RCV_STOP_REG, sc->rx_upper_limit | 0xfe); CSR_WRITE_2(sc, XMT_BAR, sc->tx_lower_limit); sc->tx_head = sc->tx_tail = sc->tx_lower_limit; ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; DODEBUG(Status, printf("OIDLE init\n");); ex_setmulti(sc); /* * Final reset of the board, and enable operation. */ CSR_WRITE_1(sc, CMD_REG, Sel_Reset_CMD); DELAY(2); CSR_WRITE_1(sc, CMD_REG, Rcv_Enable_CMD); ex_start(ifp); splx(s); DODEBUG(Start_End, printf("%s: ex_init: finish\n", ifp->if_xname);); } static void ex_start(struct ifnet *ifp) { struct ex_softc * sc = ifp->if_softc; int i, s, len, data_len, avail, dest, next; unsigned char tmp16[2]; struct mbuf * opkt; struct mbuf * m; DODEBUG(Start_End, printf("ex_start%d: start\n", unit);); s = splimp(); /* * Main loop: send outgoing packets to network card until there are no * more packets left, or the card cannot accept any more yet. */ while (((opkt = ifp->if_snd.ifq_head) != NULL) && !(ifp->if_flags & IFF_OACTIVE)) { /* * Ensure there is enough free transmit buffer space for * this packet, including its header. Note: the header * cannot wrap around the end of the transmit buffer and * must be kept together, so we allow space for twice the * length of the header, just in case. */ for (len = 0, m = opkt; m != NULL; m = m->m_next) { len += m->m_len; } data_len = len; DODEBUG(Sent_Pkts, printf("1. Sending packet with %d data bytes. ", data_len);); if (len & 1) { len += XMT_HEADER_LEN + 1; } else { len += XMT_HEADER_LEN; } if ((i = sc->tx_tail - sc->tx_head) >= 0) { avail = sc->tx_mem_size - i; } else { avail = -i; } DODEBUG(Sent_Pkts, printf("i=%d, avail=%d\n", i, avail);); if (avail >= len + XMT_HEADER_LEN) { IF_DEQUEUE(&ifp->if_snd, opkt); #ifdef EX_PSA_INTR /* * Disable rx and tx interrupts, to avoid corruption * of the host address register by interrupt service * routines. * XXX Is this necessary with splimp() enabled? */ CSR_WRITE_1(sc, MASK_REG, All_Int); #endif /* * Compute the start and end addresses of this * frame in the tx buffer. */ dest = sc->tx_tail; next = dest + len; if (next > sc->tx_upper_limit) { if ((sc->tx_upper_limit + 2 - sc->tx_tail) <= XMT_HEADER_LEN) { dest = sc->tx_lower_limit; next = dest + len; } else { next = sc->tx_lower_limit + next - sc->tx_upper_limit - 2; } } /* * Build the packet frame in the card's ring buffer. */ DODEBUG(Sent_Pkts, printf("2. dest=%d, next=%d. ", dest, next);); CSR_WRITE_2(sc, HOST_ADDR_REG, dest); CSR_WRITE_2(sc, IO_PORT_REG, Transmit_CMD); CSR_WRITE_2(sc, IO_PORT_REG, 0); CSR_WRITE_2(sc, IO_PORT_REG, next); CSR_WRITE_2(sc, IO_PORT_REG, data_len); /* * Output the packet data to the card. Ensure all * transfers are 16-bit wide, even if individual * mbufs have odd length. */ for (m = opkt, i = 0; m != NULL; m = m->m_next) { DODEBUG(Sent_Pkts, printf("[%d]", m->m_len);); if (i) { tmp16[1] = *(mtod(m, caddr_t)); CSR_WRITE_MULTI_2(sc, IO_PORT_REG, (uint16_t *) tmp16, 1); } CSR_WRITE_MULTI_2(sc, IO_PORT_REG, (uint16_t *) (mtod(m, caddr_t) + i), (m->m_len - i) / 2); if ((i = (m->m_len - i) & 1) != 0) { tmp16[0] = *(mtod(m, caddr_t) + m->m_len - 1); } } if (i) CSR_WRITE_MULTI_2(sc, IO_PORT_REG, (uint16_t *) tmp16, 1); /* * If there were other frames chained, update the * chain in the last one. */ if (sc->tx_head != sc->tx_tail) { if (sc->tx_tail != dest) { CSR_WRITE_2(sc, HOST_ADDR_REG, sc->tx_last + XMT_Chain_Point); CSR_WRITE_2(sc, IO_PORT_REG, dest); } CSR_WRITE_2(sc, HOST_ADDR_REG, sc->tx_last + XMT_Byte_Count); i = CSR_READ_2(sc, IO_PORT_REG); CSR_WRITE_2(sc, HOST_ADDR_REG, sc->tx_last + XMT_Byte_Count); CSR_WRITE_2(sc, IO_PORT_REG, i | Ch_bit); } /* * Resume normal operation of the card: * - Make a dummy read to flush the DRAM write * pipeline. * - Enable receive and transmit interrupts. * - Send Transmit or Resume_XMT command, as * appropriate. */ CSR_READ_2(sc, IO_PORT_REG); #ifdef EX_PSA_INTR CSR_WRITE_1(sc, MASK_REG, All_Int & ~(Rx_Int | Tx_Int)); #endif if (sc->tx_head == sc->tx_tail) { CSR_WRITE_2(sc, XMT_BAR, dest); CSR_WRITE_1(sc, CMD_REG, Transmit_CMD); sc->tx_head = dest; DODEBUG(Sent_Pkts, printf("Transmit\n");); } else { CSR_WRITE_1(sc, CMD_REG, Resume_XMT_List_CMD); DODEBUG(Sent_Pkts, printf("Resume\n");); } sc->tx_last = dest; sc->tx_tail = next; BPF_MTAP(ifp, opkt); ifp->if_timer = 2; ifp->if_opackets++; m_freem(opkt); } else { ifp->if_flags |= IFF_OACTIVE; DODEBUG(Status, printf("OACTIVE start\n");); } } splx(s); DODEBUG(Start_End, printf("ex_start%d: finish\n", unit);); } void ex_stop(struct ex_softc *sc) { DODEBUG(Start_End, printf("ex_stop%d: start\n", unit);); /* * Disable card operation: * - Disable the interrupt line. * - Flush transmission and disable reception. * - Mask and clear all interrupts. * - Reset the 82595. */ CSR_WRITE_1(sc, CMD_REG, Bank1_Sel); CSR_WRITE_1(sc, REG1, CSR_READ_1(sc, REG1) & ~TriST_INT); CSR_WRITE_1(sc, CMD_REG, Bank0_Sel); CSR_WRITE_1(sc, CMD_REG, Rcv_Stop); sc->tx_head = sc->tx_tail = sc->tx_lower_limit; sc->tx_last = 0; /* XXX I think these two lines are not necessary, because ex_init will always be called again to reinit the interface. */ CSR_WRITE_1(sc, MASK_REG, All_Int); CSR_WRITE_1(sc, STATUS_REG, All_Int); CSR_WRITE_1(sc, CMD_REG, Reset_CMD); DELAY(200); DODEBUG(Start_End, printf("ex_stop%d: finish\n", unit);); return; } void ex_intr(void *arg) { struct ex_softc *sc = (struct ex_softc *)arg; struct ifnet *ifp = sc->ifp; int int_status, send_pkts; int loops = 100; DODEBUG(Start_End, printf("ex_intr%d: start\n", unit);); send_pkts = 0; while (loops-- > 0 && (int_status = CSR_READ_1(sc, STATUS_REG)) & (Tx_Int | Rx_Int)) { /* don't loop forever */ if (int_status == 0xff) break; if (int_status & Rx_Int) { CSR_WRITE_1(sc, STATUS_REG, Rx_Int); ex_rx_intr(sc); } else if (int_status & Tx_Int) { CSR_WRITE_1(sc, STATUS_REG, Tx_Int); ex_tx_intr(sc); send_pkts = 1; } } if (loops == 0) printf("100 loops are not enough\n"); /* * If any packet has been transmitted, and there are queued packets to * be sent, attempt to send more packets to the network card. */ if (send_pkts && (ifp->if_snd.ifq_head != NULL)) ex_start(ifp); DODEBUG(Start_End, printf("ex_intr%d: finish\n", unit);); return; } static void ex_tx_intr(struct ex_softc *sc) { struct ifnet * ifp = sc->ifp; int tx_status; DODEBUG(Start_End, printf("ex_tx_intr%d: start\n", unit);); /* * - Cancel the watchdog. * For all packets transmitted since last transmit interrupt: * - Advance chain pointer to next queued packet. * - Update statistics. */ ifp->if_timer = 0; while (sc->tx_head != sc->tx_tail) { CSR_WRITE_2(sc, HOST_ADDR_REG, sc->tx_head); if (! CSR_READ_2(sc, IO_PORT_REG) & Done_bit) break; tx_status = CSR_READ_2(sc, IO_PORT_REG); sc->tx_head = CSR_READ_2(sc, IO_PORT_REG); if (tx_status & TX_OK_bit) { ifp->if_opackets++; } else { ifp->if_oerrors++; } ifp->if_collisions += tx_status & No_Collisions_bits; } /* * The card should be ready to accept more packets now. */ ifp->if_flags &= ~IFF_OACTIVE; DODEBUG(Status, printf("OIDLE tx_intr\n");); DODEBUG(Start_End, printf("ex_tx_intr%d: finish\n", unit);); return; } static void ex_rx_intr(struct ex_softc *sc) { struct ifnet * ifp = sc->ifp; int rx_status; int pkt_len; int QQQ; struct mbuf * m; struct mbuf * ipkt; struct ether_header * eh; DODEBUG(Start_End, printf("ex_rx_intr%d: start\n", unit);); /* * For all packets received since last receive interrupt: * - If packet ok, read it into a new mbuf and queue it to interface, * updating statistics. * - If packet bad, just discard it, and update statistics. * Finally, advance receive stop limit in card's memory to new location. */ CSR_WRITE_2(sc, HOST_ADDR_REG, sc->rx_head); while (CSR_READ_2(sc, IO_PORT_REG) == RCV_Done) { rx_status = CSR_READ_2(sc, IO_PORT_REG); sc->rx_head = CSR_READ_2(sc, IO_PORT_REG); QQQ = pkt_len = CSR_READ_2(sc, IO_PORT_REG); if (rx_status & RCV_OK_bit) { MGETHDR(m, M_DONTWAIT, MT_DATA); ipkt = m; if (ipkt == NULL) { ifp->if_iqdrops++; } else { ipkt->m_pkthdr.rcvif = ifp; ipkt->m_pkthdr.len = pkt_len; ipkt->m_len = MHLEN; while (pkt_len > 0) { if (pkt_len >= MINCLSIZE) { MCLGET(m, M_DONTWAIT); if (m->m_flags & M_EXT) { m->m_len = MCLBYTES; } else { m_freem(ipkt); ifp->if_iqdrops++; goto rx_another; } } m->m_len = min(m->m_len, pkt_len); /* * NOTE: I'm assuming that all mbufs allocated are of even length, * except for the last one in an odd-length packet. */ CSR_READ_MULTI_2(sc, IO_PORT_REG, mtod(m, uint16_t *), m->m_len / 2); if (m->m_len & 1) { *(mtod(m, caddr_t) + m->m_len - 1) = CSR_READ_1(sc, IO_PORT_REG); } pkt_len -= m->m_len; if (pkt_len > 0) { MGET(m->m_next, M_DONTWAIT, MT_DATA); if (m->m_next == NULL) { m_freem(ipkt); ifp->if_iqdrops++; goto rx_another; } m = m->m_next; m->m_len = MLEN; } } eh = mtod(ipkt, struct ether_header *); #ifdef EXDEBUG if (debug_mask & Rcvd_Pkts) { if ((eh->ether_dhost[5] != 0xff) || (eh->ether_dhost[0] != 0xff)) { printf("Receive packet with %d data bytes: %6D -> ", QQQ, eh->ether_shost, ":"); printf("%6D\n", eh->ether_dhost, ":"); } /* QQQ */ } #endif (*ifp->if_input)(ifp, ipkt); ifp->if_ipackets++; } } else { ifp->if_ierrors++; } CSR_WRITE_2(sc, HOST_ADDR_REG, sc->rx_head); rx_another: ; } if (sc->rx_head < sc->rx_lower_limit + 2) CSR_WRITE_2(sc, RCV_STOP_REG, sc->rx_upper_limit); else CSR_WRITE_2(sc, RCV_STOP_REG, sc->rx_head - 2); DODEBUG(Start_End, printf("ex_rx_intr%d: finish\n", unit);); return; } static int ex_ioctl(register struct ifnet *ifp, u_long cmd, caddr_t data) { struct ex_softc * sc = ifp->if_softc; struct ifreq * ifr = (struct ifreq *)data; int s; int error = 0; DODEBUG(Start_End, printf("%s: ex_ioctl: start ", ifp->if_xname);); s = splimp(); switch(cmd) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: error = ether_ioctl(ifp, cmd, data); break; case SIOCSIFFLAGS: DODEBUG(Start_End, printf("SIOCSIFFLAGS");); if ((ifp->if_flags & IFF_UP) == 0 && (ifp->if_flags & IFF_RUNNING)) { ifp->if_flags &= ~IFF_RUNNING; ex_stop(sc); } else { ex_init(sc); } break; #ifdef NODEF case SIOCGHWADDR: DODEBUG(Start_End, printf("SIOCGHWADDR");); bcopy((caddr_t)sc->sc_addr, (caddr_t)&ifr->ifr_data, sizeof(sc->sc_addr)); break; #endif case SIOCADDMULTI: case SIOCDELMULTI: ex_init(sc); error = 0; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, cmd); break; default: DODEBUG(Start_End, printf("unknown");); error = EINVAL; } splx(s); DODEBUG(Start_End, printf("\n%s: ex_ioctl: finish\n", ifp->if_xname);); return(error); } static void ex_setmulti(struct ex_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *maddr; uint16_t *addr; int count; int timeout, status; ifp = sc->ifp; count = 0; + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(maddr, &ifp->if_multiaddrs, ifma_link) { if (maddr->ifma_addr->sa_family != AF_LINK) continue; count++; } + IF_ADDR_UNLOCK(ifp); if ((ifp->if_flags & IFF_PROMISC) || (ifp->if_flags & IFF_ALLMULTI) || count > 63) { /* Interface is in promiscuous mode or there are too many * multicast addresses for the card to handle */ CSR_WRITE_1(sc, CMD_REG, Bank2_Sel); CSR_WRITE_1(sc, REG2, CSR_READ_1(sc, REG2) | Promisc_Mode); CSR_WRITE_1(sc, REG3, CSR_READ_1(sc, REG3)); CSR_WRITE_1(sc, CMD_REG, Bank0_Sel); } else if ((ifp->if_flags & IFF_MULTICAST) && (count > 0)) { /* Program multicast addresses plus our MAC address * into the filter */ CSR_WRITE_1(sc, CMD_REG, Bank2_Sel); CSR_WRITE_1(sc, REG2, CSR_READ_1(sc, REG2) | Multi_IA); CSR_WRITE_1(sc, REG3, CSR_READ_1(sc, REG3)); CSR_WRITE_1(sc, CMD_REG, Bank0_Sel); /* Borrow space from TX buffer; this should be safe * as this is only called from ex_init */ CSR_WRITE_2(sc, HOST_ADDR_REG, sc->tx_lower_limit); CSR_WRITE_2(sc, IO_PORT_REG, MC_Setup_CMD); CSR_WRITE_2(sc, IO_PORT_REG, 0); CSR_WRITE_2(sc, IO_PORT_REG, 0); CSR_WRITE_2(sc, IO_PORT_REG, (count + 1) * 6); - + + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(maddr, &ifp->if_multiaddrs, ifma_link) { if (maddr->ifma_addr->sa_family != AF_LINK) continue; addr = (uint16_t*)LLADDR((struct sockaddr_dl *) maddr->ifma_addr); CSR_WRITE_2(sc, IO_PORT_REG, *addr++); CSR_WRITE_2(sc, IO_PORT_REG, *addr++); CSR_WRITE_2(sc, IO_PORT_REG, *addr++); } + IF_ADDR_UNLOCK(ifp); /* Program our MAC address as well */ /* XXX: Is this necessary? The Linux driver does this * but the NetBSD driver does not */ addr = (uint16_t*)(&IFP2ENADDR(sc->ifp)); CSR_WRITE_2(sc, IO_PORT_REG, *addr++); CSR_WRITE_2(sc, IO_PORT_REG, *addr++); CSR_WRITE_2(sc, IO_PORT_REG, *addr++); CSR_READ_2(sc, IO_PORT_REG); CSR_WRITE_2(sc, XMT_BAR, sc->tx_lower_limit); CSR_WRITE_1(sc, CMD_REG, MC_Setup_CMD); sc->tx_head = sc->tx_lower_limit; sc->tx_tail = sc->tx_head + XMT_HEADER_LEN + (count + 1) * 6; for (timeout=0; timeout<100; timeout++) { DELAY(2); if ((CSR_READ_1(sc, STATUS_REG) & Exec_Int) == 0) continue; status = CSR_READ_1(sc, CMD_REG); CSR_WRITE_1(sc, STATUS_REG, Exec_Int); break; } sc->tx_head = sc->tx_tail; } else { /* No multicast or promiscuous mode */ CSR_WRITE_1(sc, CMD_REG, Bank2_Sel); CSR_WRITE_1(sc, REG2, CSR_READ_1(sc, REG2) & 0xDE); /* ~(Multi_IA | Promisc_Mode) */ CSR_WRITE_1(sc, REG3, CSR_READ_1(sc, REG3)); CSR_WRITE_1(sc, CMD_REG, Bank0_Sel); } } static void ex_reset(struct ex_softc *sc) { int s; DODEBUG(Start_End, printf("ex_reset%d: start\n", unit);); s = splimp(); ex_stop(sc); ex_init(sc); splx(s); DODEBUG(Start_End, printf("ex_reset%d: finish\n", unit);); return; } static void ex_watchdog(struct ifnet *ifp) { struct ex_softc * sc = ifp->if_softc; DODEBUG(Start_End, printf("%s: ex_watchdog: start\n", ifp->if_xname);); ifp->if_flags &= ~IFF_OACTIVE; DODEBUG(Status, printf("OIDLE watchdog\n");); ifp->if_oerrors++; ex_reset(sc); ex_start(ifp); DODEBUG(Start_End, printf("%s: ex_watchdog: finish\n", ifp->if_xname);); return; } static int ex_get_media(struct ex_softc *sc) { int current; int media; media = ex_eeprom_read(sc, EE_W5); CSR_WRITE_1(sc, CMD_REG, Bank2_Sel); current = CSR_READ_1(sc, REG3); CSR_WRITE_1(sc, CMD_REG, Bank0_Sel); if ((current & TPE_bit) && (media & EE_W5_PORT_TPE)) return(IFM_ETHER|IFM_10_T); if ((current & BNC_bit) && (media & EE_W5_PORT_BNC)) return(IFM_ETHER|IFM_10_2); if (media & EE_W5_PORT_AUI) return (IFM_ETHER|IFM_10_5); return (IFM_ETHER|IFM_AUTO); } static int ex_ifmedia_upd(ifp) struct ifnet * ifp; { struct ex_softc * sc = ifp->if_softc; if (IFM_TYPE(sc->ifmedia.ifm_media) != IFM_ETHER) return EINVAL; return (0); } static void ex_ifmedia_sts(ifp, ifmr) struct ifnet * ifp; struct ifmediareq * ifmr; { struct ex_softc * sc = ifp->if_softc; ifmr->ifm_active = ex_get_media(sc); ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; return; } u_short ex_eeprom_read(struct ex_softc *sc, int location) { int i; u_short data = 0; int read_cmd = location | EE_READ_CMD; short ctrl_val = EECS; CSR_WRITE_1(sc, CMD_REG, Bank2_Sel); CSR_WRITE_1(sc, EEPROM_REG, EECS); for (i = 8; i >= 0; i--) { short outval = (read_cmd & (1 << i)) ? ctrl_val | EEDI : ctrl_val; CSR_WRITE_1(sc, EEPROM_REG, outval); CSR_WRITE_1(sc, EEPROM_REG, outval | EESK); DELAY(3); CSR_WRITE_1(sc, EEPROM_REG, outval); DELAY(2); } CSR_WRITE_1(sc, EEPROM_REG, ctrl_val); for (i = 16; i > 0; i--) { CSR_WRITE_1(sc, EEPROM_REG, ctrl_val | EESK); DELAY(3); data = (data << 1) | ((CSR_READ_1(sc, EEPROM_REG) & EEDO) ? 1 : 0); CSR_WRITE_1(sc, EEPROM_REG, ctrl_val); DELAY(2); } ctrl_val &= ~EECS; CSR_WRITE_1(sc, EEPROM_REG, ctrl_val | EESK); DELAY(3); CSR_WRITE_1(sc, EEPROM_REG, ctrl_val); DELAY(2); CSR_WRITE_1(sc, CMD_REG, Bank0_Sel); return(data); } Index: stable/6/sys/dev/fe/if_fe.c =================================================================== --- stable/6/sys/dev/fe/if_fe.c (revision 149421) +++ stable/6/sys/dev/fe/if_fe.c (revision 149422) @@ -1,2237 +1,2239 @@ /*- * All Rights Reserved, Copyright (C) Fujitsu Limited 1995 * * This software may be used, modified, copied, distributed, and sold, in * both source and binary form provided that the above copyright, these * terms and the following disclaimer are retained. The name of the author * and/or the contributor may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND THE CONTRIBUTOR ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR THE CONTRIBUTOR BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * * Device driver for Fujitsu MB86960A/MB86965A based Ethernet cards. * Contributed by M. Sekiguchi. * * This version is intended to be a generic template for various * MB86960A/MB86965A based Ethernet cards. It currently supports * Fujitsu FMV-180 series for ISA and Allied-Telesis AT1700/RE2000 * series for ISA, as well as Fujitsu MBH10302 PC card. * There are some currently- * unused hooks embedded, which are primarily intended to support * other types of Ethernet cards, but the author is not sure whether * they are useful. * * This version also includes some alignments to support RE1000, * C-NET(98)P2 and so on. These cards are not for AT-compatibles, * but for NEC PC-98 bus -- a proprietary bus architecture available * only in Japan. Confusingly, it is different from the Microsoft's * PC98 architecture. :-{ * Further work for PC-98 version will be available as a part of * FreeBSD(98) project. * * This software is a derivative work of if_ed.c version 1.56 by David * Greenman available as a part of FreeBSD 2.0 RELEASE source distribution. * * The following lines are retained from the original if_ed.c: * * Copyright (C) 1993, David Greenman. This software may be used, modified, * copied, distributed, and sold, in both source and binary form provided * that the above copyright and these terms are retained. Under no * circumstances is the author responsible for the proper functioning * of this software, nor does the author assume any responsibility * for damages incurred with its use. */ /* * TODO: * o To support ISA PnP auto configuration for FMV-183/184. * o To support REX-9886/87(PC-98 only). * o To reconsider mbuf usage. * o To reconsider transmission buffer usage, including * transmission buffer size (currently 4KB x 2) and pros-and- * cons of multiple frame transmission. * o To test IPX codes. * o To test new-bus frontend. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Transmit just one packet per a "send" command to 86960. * This option is intended for performance test. An EXPERIMENTAL option. */ #ifndef FE_SINGLE_TRANSMISSION #define FE_SINGLE_TRANSMISSION 0 #endif /* * Maximum loops when interrupt. * This option prevents an infinite loop due to hardware failure. * (Some laptops make an infinite loop after PC-Card is ejected.) */ #ifndef FE_MAX_LOOP #define FE_MAX_LOOP 0x800 #endif /* * Device configuration flags. */ /* DLCR6 settings. */ #define FE_FLAGS_DLCR6_VALUE 0x007F /* Force DLCR6 override. */ #define FE_FLAGS_OVERRIDE_DLCR6 0x0080 devclass_t fe_devclass; /* * Special filter values. */ static struct fe_filter const fe_filter_nothing = { FE_FILTER_NOTHING }; static struct fe_filter const fe_filter_all = { FE_FILTER_ALL }; /* Standard driver entry points. These can be static. */ static void fe_init (void *); static driver_intr_t fe_intr; static int fe_ioctl (struct ifnet *, u_long, caddr_t); static void fe_start (struct ifnet *); static void fe_watchdog (struct ifnet *); static int fe_medchange (struct ifnet *); static void fe_medstat (struct ifnet *, struct ifmediareq *); /* Local functions. Order of declaration is confused. FIXME. */ static int fe_get_packet ( struct fe_softc *, u_short ); static void fe_tint ( struct fe_softc *, u_char ); static void fe_rint ( struct fe_softc *, u_char ); static void fe_xmit ( struct fe_softc * ); static void fe_write_mbufs ( struct fe_softc *, struct mbuf * ); static void fe_setmode ( struct fe_softc * ); static void fe_loadmar ( struct fe_softc * ); #ifdef DIAGNOSTIC static void fe_emptybuffer ( struct fe_softc * ); #endif /* * Fe driver specific constants which relate to 86960/86965. */ /* Interrupt masks */ #define FE_TMASK ( FE_D2_COLL16 | FE_D2_TXDONE ) #define FE_RMASK ( FE_D3_OVRFLO | FE_D3_CRCERR \ | FE_D3_ALGERR | FE_D3_SRTPKT | FE_D3_PKTRDY ) /* Maximum number of iterations for a receive interrupt. */ #define FE_MAX_RECV_COUNT ( ( 65536 - 2048 * 2 ) / 64 ) /* * Maximum size of SRAM is 65536, * minimum size of transmission buffer in fe is 2x2KB, * and minimum amount of received packet including headers * added by the chip is 64 bytes. * Hence FE_MAX_RECV_COUNT is the upper limit for number * of packets in the receive buffer. */ /* * Miscellaneous definitions not directly related to hardware. */ /* The following line must be delete when "net/if_media.h" support it. */ #ifndef IFM_10_FL #define IFM_10_FL /* 13 */ IFM_10_5 #endif #if 0 /* Mapping between media bitmap (in fe_softc.mbitmap) and ifm_media. */ static int const bit2media [] = { IFM_HDX | IFM_ETHER | IFM_AUTO, IFM_HDX | IFM_ETHER | IFM_MANUAL, IFM_HDX | IFM_ETHER | IFM_10_T, IFM_HDX | IFM_ETHER | IFM_10_2, IFM_HDX | IFM_ETHER | IFM_10_5, IFM_HDX | IFM_ETHER | IFM_10_FL, IFM_FDX | IFM_ETHER | IFM_10_T, /* More can be come here... */ 0 }; #else /* Mapping between media bitmap (in fe_softc.mbitmap) and ifm_media. */ static int const bit2media [] = { IFM_ETHER | IFM_AUTO, IFM_ETHER | IFM_MANUAL, IFM_ETHER | IFM_10_T, IFM_ETHER | IFM_10_2, IFM_ETHER | IFM_10_5, IFM_ETHER | IFM_10_FL, IFM_ETHER | IFM_10_T, /* More can be come here... */ 0 }; #endif /* * Check for specific bits in specific registers have specific values. * A common utility function called from various sub-probe routines. */ int fe_simple_probe (struct fe_softc const * sc, struct fe_simple_probe_struct const * sp) { struct fe_simple_probe_struct const *p; int8_t bits; for (p = sp; p->mask != 0; p++) { bits = fe_inb(sc, p->port); printf("port %d, mask %x, bits %x read %x\n", p->port, p->mask, p->bits, bits); if ((bits & p->mask) != p->bits) return 0; } return 1; } /* Test if a given 6 byte value is a valid Ethernet station (MAC) address. "Vendor" is an expected vendor code (first three bytes,) or a zero when nothing expected. */ int fe_valid_Ether_p (u_char const * addr, unsigned vendor) { #ifdef FE_DEBUG printf("fe?: validating %6D against %06x\n", addr, ":", vendor); #endif /* All zero is not allowed as a vendor code. */ if (addr[0] == 0 && addr[1] == 0 && addr[2] == 0) return 0; switch (vendor) { case 0x000000: /* Legal Ethernet address (stored in ROM) must have its Group and Local bits cleared. */ if ((addr[0] & 0x03) != 0) return 0; break; case 0x020000: /* Same as above, but a local address is allowed in this context. */ if (ETHER_IS_MULTICAST(addr)) return 0; break; default: /* Make sure the vendor part matches if one is given. */ if ( addr[0] != ((vendor >> 16) & 0xFF) || addr[1] != ((vendor >> 8) & 0xFF) || addr[2] != ((vendor ) & 0xFF)) return 0; break; } /* Host part must not be all-zeros nor all-ones. */ if (addr[3] == 0xFF && addr[4] == 0xFF && addr[5] == 0xFF) return 0; if (addr[3] == 0x00 && addr[4] == 0x00 && addr[5] == 0x00) return 0; /* Given addr looks like an Ethernet address. */ return 1; } /* Fill our softc struct with default value. */ void fe_softc_defaults (struct fe_softc *sc) { /* Prepare for typical register prototypes. We assume a "typical" board has <32KB> of SRAM connected with a data lines. */ sc->proto_dlcr4 = FE_D4_LBC_DISABLE | FE_D4_CNTRL; sc->proto_dlcr5 = 0; sc->proto_dlcr6 = FE_D6_BUFSIZ_32KB | FE_D6_TXBSIZ_2x4KB | FE_D6_BBW_BYTE | FE_D6_SBW_WORD | FE_D6_SRAM_100ns; sc->proto_dlcr7 = FE_D7_BYTSWP_LH; sc->proto_bmpr13 = 0; /* Assume the probe process (to be done later) is stable. */ sc->stability = 0; /* A typical board needs no hooks. */ sc->init = NULL; sc->stop = NULL; /* Assume the board has no software-controllable media selection. */ sc->mbitmap = MB_HM; sc->defmedia = MB_HM; sc->msel = NULL; } /* Common error reporting routine used in probe routines for "soft configured IRQ"-type boards. */ void fe_irq_failure (char const *name, int unit, int irq, char const *list) { printf("fe%d: %s board is detected, but %s IRQ was given\n", unit, name, (irq == NO_IRQ ? "no" : "invalid")); if (list != NULL) { printf("fe%d: specify an IRQ from %s in kernel config\n", unit, list); } } /* * Hardware (vendor) specific hooks. */ /* * Generic media selection scheme for MB86965 based boards. */ void fe_msel_965 (struct fe_softc *sc) { u_char b13; /* Find the appropriate bits for BMPR13 tranceiver control. */ switch (IFM_SUBTYPE(sc->media.ifm_media)) { case IFM_AUTO: b13 = FE_B13_PORT_AUTO | FE_B13_TPTYPE_UTP; break; case IFM_10_T: b13 = FE_B13_PORT_TP | FE_B13_TPTYPE_UTP; break; default: b13 = FE_B13_PORT_AUI; break; } /* Write it into the register. It takes effect immediately. */ fe_outb(sc, FE_BMPR13, sc->proto_bmpr13 | b13); } /* * Fujitsu MB86965 JLI mode support routines. */ /* * Routines to read all bytes from the config EEPROM through MB86965A. * It is a MicroWire (3-wire) serial EEPROM with 6-bit address. * (93C06 or 93C46.) */ static void fe_strobe_eeprom_jli (struct fe_softc *sc, u_short bmpr16) { /* * We must guarantee 1us (or more) interval to access slow * EEPROMs. The following redundant code provides enough * delay with ISA timing. (Even if the bus clock is "tuned.") * Some modification will be needed on faster busses. */ fe_outb(sc, bmpr16, FE_B16_SELECT); fe_outb(sc, bmpr16, FE_B16_SELECT | FE_B16_CLOCK); fe_outb(sc, bmpr16, FE_B16_SELECT | FE_B16_CLOCK); fe_outb(sc, bmpr16, FE_B16_SELECT); } void fe_read_eeprom_jli (struct fe_softc * sc, u_char * data) { u_char n, val, bit; u_char save16, save17; /* Save the current value of the EEPROM interface registers. */ save16 = fe_inb(sc, FE_BMPR16); save17 = fe_inb(sc, FE_BMPR17); /* Read bytes from EEPROM; two bytes per an iteration. */ for (n = 0; n < JLI_EEPROM_SIZE / 2; n++) { /* Reset the EEPROM interface. */ fe_outb(sc, FE_BMPR16, 0x00); fe_outb(sc, FE_BMPR17, 0x00); /* Start EEPROM access. */ fe_outb(sc, FE_BMPR16, FE_B16_SELECT); fe_outb(sc, FE_BMPR17, FE_B17_DATA); fe_strobe_eeprom_jli(sc, FE_BMPR16); /* Pass the iteration count as well as a READ command. */ val = 0x80 | n; for (bit = 0x80; bit != 0x00; bit >>= 1) { fe_outb(sc, FE_BMPR17, (val & bit) ? FE_B17_DATA : 0); fe_strobe_eeprom_jli(sc, FE_BMPR16); } fe_outb(sc, FE_BMPR17, 0x00); /* Read a byte. */ val = 0; for (bit = 0x80; bit != 0x00; bit >>= 1) { fe_strobe_eeprom_jli(sc, FE_BMPR16); if (fe_inb(sc, FE_BMPR17) & FE_B17_DATA) val |= bit; } *data++ = val; /* Read one more byte. */ val = 0; for (bit = 0x80; bit != 0x00; bit >>= 1) { fe_strobe_eeprom_jli(sc, FE_BMPR16); if (fe_inb(sc, FE_BMPR17) & FE_B17_DATA) val |= bit; } *data++ = val; } #if 0 /* Reset the EEPROM interface, again. */ fe_outb(sc, FE_BMPR16, 0x00); fe_outb(sc, FE_BMPR17, 0x00); #else /* Make sure to restore the original value of EEPROM interface registers, since we are not yet sure we have MB86965A on the address. */ fe_outb(sc, FE_BMPR17, save17); fe_outb(sc, FE_BMPR16, save16); #endif #if 1 /* Report what we got. */ if (bootverbose) { int i; data -= JLI_EEPROM_SIZE; for (i = 0; i < JLI_EEPROM_SIZE; i += 16) { if_printf(sc->ifp, "EEPROM(JLI):%3x: %16D\n", i, data + i, " "); } } #endif } void fe_init_jli (struct fe_softc * sc) { /* "Reset" by writing into a magic location. */ DELAY(200); fe_outb(sc, 0x1E, fe_inb(sc, 0x1E)); DELAY(300); } /* * SSi 78Q8377A support routines. */ /* * Routines to read all bytes from the config EEPROM through 78Q8377A. * It is a MicroWire (3-wire) serial EEPROM with 8-bit address. (I.e., * 93C56 or 93C66.) * * As I don't have SSi manuals, (hmm, an old song again!) I'm not exactly * sure the following code is correct... It is just stolen from the * C-NET(98)P2 support routine in FreeBSD(98). */ void fe_read_eeprom_ssi (struct fe_softc *sc, u_char *data) { u_char val, bit; int n; u_char save6, save7, save12; /* Save the current value for the DLCR registers we are about to destroy. */ save6 = fe_inb(sc, FE_DLCR6); save7 = fe_inb(sc, FE_DLCR7); /* Put the 78Q8377A into a state that we can access the EEPROM. */ fe_outb(sc, FE_DLCR6, FE_D6_BBW_WORD | FE_D6_SBW_WORD | FE_D6_DLC_DISABLE); fe_outb(sc, FE_DLCR7, FE_D7_BYTSWP_LH | FE_D7_RBS_BMPR | FE_D7_RDYPNS | FE_D7_POWER_UP); /* Save the current value for the BMPR12 register, too. */ save12 = fe_inb(sc, FE_DLCR12); /* Read bytes from EEPROM; two bytes per an iteration. */ for (n = 0; n < SSI_EEPROM_SIZE / 2; n++) { /* Start EEPROM access */ fe_outb(sc, FE_DLCR12, SSI_EEP); fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL); /* Send the following four bits to the EEPROM in the specified order: a dummy bit, a start bit, and command bits (10) for READ. */ fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL ); fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_CLK ); /* 0 */ fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_DAT); fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_CLK | SSI_DAT); /* 1 */ fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_DAT); fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_CLK | SSI_DAT); /* 1 */ fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL ); fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_CLK ); /* 0 */ /* Pass the iteration count to the chip. */ for (bit = 0x80; bit != 0x00; bit >>= 1) { val = ( n & bit ) ? SSI_DAT : 0; fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | val); fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_CLK | val); } /* Read a byte. */ val = 0; for (bit = 0x80; bit != 0x00; bit >>= 1) { fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL); fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_CLK); if (fe_inb(sc, FE_DLCR12) & SSI_DIN) val |= bit; } *data++ = val; /* Read one more byte. */ val = 0; for (bit = 0x80; bit != 0x00; bit >>= 1) { fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL); fe_outb(sc, FE_DLCR12, SSI_EEP | SSI_CSL | SSI_CLK); if (fe_inb(sc, FE_DLCR12) & SSI_DIN) val |= bit; } *data++ = val; fe_outb(sc, FE_DLCR12, SSI_EEP); } /* Reset the EEPROM interface. (For now.) */ fe_outb(sc, FE_DLCR12, 0x00); /* Restore the saved register values, for the case that we didn't have 78Q8377A at the given address. */ fe_outb(sc, FE_DLCR12, save12); fe_outb(sc, FE_DLCR7, save7); fe_outb(sc, FE_DLCR6, save6); #if 1 /* Report what we got. */ if (bootverbose) { int i; data -= SSI_EEPROM_SIZE; for (i = 0; i < SSI_EEPROM_SIZE; i += 16) { if_printf(sc->ifp, "EEPROM(SSI):%3x: %16D\n", i, data + i, " "); } } #endif } /* * TDK/LANX boards support routines. */ /* It is assumed that the CLK line is low and SDA is high (float) upon entry. */ #define LNX_PH(D,K,N) \ ((LNX_SDA_##D | LNX_CLK_##K) << N) #define LNX_CYCLE(D1,D2,D3,D4,K1,K2,K3,K4) \ (LNX_PH(D1,K1,0)|LNX_PH(D2,K2,8)|LNX_PH(D3,K3,16)|LNX_PH(D4,K4,24)) #define LNX_CYCLE_START LNX_CYCLE(HI,LO,LO,HI, HI,HI,LO,LO) #define LNX_CYCLE_STOP LNX_CYCLE(LO,LO,HI,HI, LO,HI,HI,LO) #define LNX_CYCLE_HI LNX_CYCLE(HI,HI,HI,HI, LO,HI,LO,LO) #define LNX_CYCLE_LO LNX_CYCLE(LO,LO,LO,HI, LO,HI,LO,LO) #define LNX_CYCLE_INIT LNX_CYCLE(LO,HI,HI,HI, LO,LO,LO,LO) static void fe_eeprom_cycle_lnx (struct fe_softc *sc, u_short reg20, u_long cycle) { fe_outb(sc, reg20, (cycle ) & 0xFF); DELAY(15); fe_outb(sc, reg20, (cycle >> 8) & 0xFF); DELAY(15); fe_outb(sc, reg20, (cycle >> 16) & 0xFF); DELAY(15); fe_outb(sc, reg20, (cycle >> 24) & 0xFF); DELAY(15); } static u_char fe_eeprom_receive_lnx (struct fe_softc *sc, u_short reg20) { u_char dat; fe_outb(sc, reg20, LNX_CLK_HI | LNX_SDA_FL); DELAY(15); dat = fe_inb(sc, reg20); fe_outb(sc, reg20, LNX_CLK_LO | LNX_SDA_FL); DELAY(15); return (dat & LNX_SDA_IN); } void fe_read_eeprom_lnx (struct fe_softc *sc, u_char *data) { int i; u_char n, bit, val; u_char save20; u_short reg20 = 0x14; save20 = fe_inb(sc, reg20); /* NOTE: DELAY() timing constants are approximately three times longer (slower) than the required minimum. This is to guarantee a reliable operation under some tough conditions... Fortunately, this routine is only called during the boot phase, so the speed is less important than stability. */ #if 1 /* Reset the X24C01's internal state machine and put it into the IDLE state. We usually don't need this, but *if* someone (e.g., probe routine of other driver) write some garbage into the register at 0x14, synchronization will be lost, and the normal EEPROM access protocol won't work. Moreover, as there are no easy way to reset, we need a _manoeuvre_ here. (It even lacks a reset pin, so pushing the RESET button on the PC doesn't help!) */ fe_eeprom_cycle_lnx(sc, reg20, LNX_CYCLE_INIT); for (i = 0; i < 10; i++) fe_eeprom_cycle_lnx(sc, reg20, LNX_CYCLE_START); fe_eeprom_cycle_lnx(sc, reg20, LNX_CYCLE_STOP); DELAY(10000); #endif /* Issue a start condition. */ fe_eeprom_cycle_lnx(sc, reg20, LNX_CYCLE_START); /* Send seven bits of the starting address (zero, in this case) and a command bit for READ. */ val = 0x01; for (bit = 0x80; bit != 0x00; bit >>= 1) { if (val & bit) { fe_eeprom_cycle_lnx(sc, reg20, LNX_CYCLE_HI); } else { fe_eeprom_cycle_lnx(sc, reg20, LNX_CYCLE_LO); } } /* Receive an ACK bit. */ if (fe_eeprom_receive_lnx(sc, reg20)) { /* ACK was not received. EEPROM is not present (i.e., this board was not a TDK/LANX) or not working properly. */ if (bootverbose) { if_printf(sc->ifp, "no ACK received from EEPROM(LNX)\n"); } /* Clear the given buffer to indicate we could not get any info. and return. */ bzero(data, LNX_EEPROM_SIZE); goto RET; } /* Read bytes from EEPROM. */ for (n = 0; n < LNX_EEPROM_SIZE; n++) { /* Read a byte and store it into the buffer. */ val = 0x00; for (bit = 0x80; bit != 0x00; bit >>= 1) { if (fe_eeprom_receive_lnx(sc, reg20)) val |= bit; } *data++ = val; /* Acknowledge if we have to read more. */ if (n < LNX_EEPROM_SIZE - 1) { fe_eeprom_cycle_lnx(sc, reg20, LNX_CYCLE_LO); } } /* Issue a STOP condition, de-activating the clock line. It will be safer to keep the clock line low than to leave it high. */ fe_eeprom_cycle_lnx(sc, reg20, LNX_CYCLE_STOP); RET: fe_outb(sc, reg20, save20); #if 1 /* Report what we got. */ if (bootverbose) { data -= LNX_EEPROM_SIZE; for (i = 0; i < LNX_EEPROM_SIZE; i += 16) { if_printf(sc->ifp, "EEPROM(LNX):%3x: %16D\n", i, data + i, " "); } } #endif } void fe_init_lnx (struct fe_softc * sc) { /* Reset the 86960. Do we need this? FIXME. */ fe_outb(sc, 0x12, 0x06); DELAY(100); fe_outb(sc, 0x12, 0x07); DELAY(100); /* Setup IRQ control register on the ASIC. */ fe_outb(sc, 0x14, sc->priv_info); } /* * Ungermann-Bass boards support routine. */ void fe_init_ubn (struct fe_softc * sc) { /* Do we need this? FIXME. */ fe_outb(sc, FE_DLCR7, sc->proto_dlcr7 | FE_D7_RBS_BMPR | FE_D7_POWER_UP); fe_outb(sc, 0x18, 0x00); DELAY(200); /* Setup IRQ control register on the ASIC. */ fe_outb(sc, 0x14, sc->priv_info); } /* * Install interface into kernel networking data structures */ int fe_attach (device_t dev) { struct fe_softc *sc = device_get_softc(dev); struct ifnet *ifp; int flags = device_get_flags(dev); int b, error; ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not ifalloc\n"); return (ENOSPC); } error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET, fe_intr, sc, &sc->irq_handle); if (error) { fe_release_resource(dev); return ENXIO; } /* * Initialize ifnet structure */ ifp->if_softc = sc; if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_start = fe_start; ifp->if_ioctl = fe_ioctl; ifp->if_watchdog = fe_watchdog; ifp->if_init = fe_init; ifp->if_linkmib = &sc->mibdata; ifp->if_linkmiblen = sizeof (sc->mibdata); #if 0 /* I'm not sure... */ sc->mibdata.dot3Compliance = DOT3COMPLIANCE_COLLS; #endif /* * Set fixed interface flags. */ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT; #if 1 /* * Set maximum size of output queue, if it has not been set. * It is done here as this driver may be started after the * system initialization (i.e., the interface is PCMCIA.) * * I'm not sure this is really necessary, but, even if it is, * it should be done somewhere else, e.g., in if_attach(), * since it must be a common workaround for all network drivers. * FIXME. */ if (ifp->if_snd.ifq_maxlen == 0) ifp->if_snd.ifq_maxlen = ifqmaxlen; #endif #if FE_SINGLE_TRANSMISSION /* Override txb config to allocate minimum. */ sc->proto_dlcr6 &= ~FE_D6_TXBSIZ sc->proto_dlcr6 |= FE_D6_TXBSIZ_2x2KB; #endif /* Modify hardware config if it is requested. */ if (flags & FE_FLAGS_OVERRIDE_DLCR6) sc->proto_dlcr6 = flags & FE_FLAGS_DLCR6_VALUE; /* Find TX buffer size, based on the hardware dependent proto. */ switch (sc->proto_dlcr6 & FE_D6_TXBSIZ) { case FE_D6_TXBSIZ_2x2KB: sc->txb_size = 2048; break; case FE_D6_TXBSIZ_2x4KB: sc->txb_size = 4096; break; case FE_D6_TXBSIZ_2x8KB: sc->txb_size = 8192; break; default: /* Oops, we can't work with single buffer configuration. */ if (bootverbose) { if_printf(sc->ifp, "strange TXBSIZ config; fixing\n"); } sc->proto_dlcr6 &= ~FE_D6_TXBSIZ; sc->proto_dlcr6 |= FE_D6_TXBSIZ_2x2KB; sc->txb_size = 2048; break; } /* Initialize the if_media interface. */ ifmedia_init(&sc->media, 0, fe_medchange, fe_medstat); for (b = 0; bit2media[b] != 0; b++) { if (sc->mbitmap & (1 << b)) { ifmedia_add(&sc->media, bit2media[b], 0, NULL); } } for (b = 0; bit2media[b] != 0; b++) { if (sc->defmedia & (1 << b)) { ifmedia_set(&sc->media, bit2media[b]); break; } } #if 0 /* Turned off; this is called later, when the interface UPs. */ fe_medchange(sc); #endif /* Attach and stop the interface. */ ether_ifattach(sc->ifp, sc->enaddr); fe_stop(sc); /* Print additional info when attached. */ device_printf(dev, "type %s%s\n", sc->typestr, (sc->proto_dlcr4 & FE_D4_DSC) ? ", full duplex" : ""); if (bootverbose) { int buf, txb, bbw, sbw, ram; buf = txb = bbw = sbw = ram = -1; switch ( sc->proto_dlcr6 & FE_D6_BUFSIZ ) { case FE_D6_BUFSIZ_8KB: buf = 8; break; case FE_D6_BUFSIZ_16KB: buf = 16; break; case FE_D6_BUFSIZ_32KB: buf = 32; break; case FE_D6_BUFSIZ_64KB: buf = 64; break; } switch ( sc->proto_dlcr6 & FE_D6_TXBSIZ ) { case FE_D6_TXBSIZ_2x2KB: txb = 2; break; case FE_D6_TXBSIZ_2x4KB: txb = 4; break; case FE_D6_TXBSIZ_2x8KB: txb = 8; break; } switch ( sc->proto_dlcr6 & FE_D6_BBW ) { case FE_D6_BBW_BYTE: bbw = 8; break; case FE_D6_BBW_WORD: bbw = 16; break; } switch ( sc->proto_dlcr6 & FE_D6_SBW ) { case FE_D6_SBW_BYTE: sbw = 8; break; case FE_D6_SBW_WORD: sbw = 16; break; } switch ( sc->proto_dlcr6 & FE_D6_SRAM ) { case FE_D6_SRAM_100ns: ram = 100; break; case FE_D6_SRAM_150ns: ram = 150; break; } device_printf(dev, "SRAM %dKB %dbit %dns, TXB %dKBx2, %dbit I/O\n", buf, bbw, ram, txb, sbw); } if (sc->stability & UNSTABLE_IRQ) device_printf(dev, "warning: IRQ number may be incorrect\n"); if (sc->stability & UNSTABLE_MAC) device_printf(dev, "warning: above MAC address may be incorrect\n"); if (sc->stability & UNSTABLE_TYPE) device_printf(dev, "warning: hardware type was not validated\n"); return 0; } int fe_alloc_port(device_t dev, int size) { struct fe_softc *sc = device_get_softc(dev); struct resource *res; int rid; rid = 0; res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0ul, ~0ul, size, RF_ACTIVE); if (res) { sc->port_used = size; sc->port_res = res; sc->iot = rman_get_bustag(res); sc->ioh = rman_get_bushandle(res); return (0); } return (ENOENT); } int fe_alloc_irq(device_t dev, int flags) { struct fe_softc *sc = device_get_softc(dev); struct resource *res; int rid; rid = 0; res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | flags); if (res) { sc->irq_res = res; return (0); } return (ENOENT); } void fe_release_resource(device_t dev) { struct fe_softc *sc = device_get_softc(dev); if (sc->port_res) { bus_release_resource(dev, SYS_RES_IOPORT, 0, sc->port_res); sc->port_res = NULL; } if (sc->irq_res) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); sc->irq_res = NULL; } } /* * Reset interface, after some (hardware) trouble is deteced. */ static void fe_reset (struct fe_softc *sc) { /* Record how many packets are lost by this accident. */ sc->ifp->if_oerrors += sc->txb_sched + sc->txb_count; sc->mibdata.dot3StatsInternalMacTransmitErrors++; /* Put the interface into known initial state. */ fe_stop(sc); if (sc->ifp->if_flags & IFF_UP) fe_init(sc); } /* * Stop everything on the interface. * * All buffered packets, both transmitting and receiving, * if any, will be lost by stopping the interface. */ void fe_stop (struct fe_softc *sc) { int s; s = splimp(); /* Disable interrupts. */ fe_outb(sc, FE_DLCR2, 0x00); fe_outb(sc, FE_DLCR3, 0x00); /* Stop interface hardware. */ DELAY(200); fe_outb(sc, FE_DLCR6, sc->proto_dlcr6 | FE_D6_DLC_DISABLE); DELAY(200); /* Clear all interrupt status. */ fe_outb(sc, FE_DLCR0, 0xFF); fe_outb(sc, FE_DLCR1, 0xFF); /* Put the chip in stand-by mode. */ DELAY(200); fe_outb(sc, FE_DLCR7, sc->proto_dlcr7 | FE_D7_POWER_DOWN); DELAY(200); /* Reset transmitter variables and interface flags. */ sc->ifp->if_flags &= ~(IFF_OACTIVE | IFF_RUNNING); sc->ifp->if_timer = 0; sc->txb_free = sc->txb_size; sc->txb_count = 0; sc->txb_sched = 0; /* MAR loading can be delayed. */ sc->filter_change = 0; /* Call a device-specific hook. */ if (sc->stop) sc->stop(sc); (void) splx(s); } /* * Device timeout/watchdog routine. Entered if the device neglects to * generate an interrupt after a transmit has been started on it. */ static void fe_watchdog ( struct ifnet *ifp ) { struct fe_softc *sc = ifp->if_softc; /* A "debug" message. */ if_printf(ifp, "transmission timeout (%d+%d)%s\n", sc->txb_sched, sc->txb_count, (ifp->if_flags & IFF_UP) ? "" : " when down"); if (sc->ifp->if_opackets == 0 && sc->ifp->if_ipackets == 0) if_printf(ifp, "wrong IRQ setting in config?\n"); fe_reset(sc); } /* * Initialize device. */ static void fe_init (void * xsc) { struct fe_softc *sc = xsc; int s; /* Start initializing 86960. */ s = splimp(); /* Call a hook before we start initializing the chip. */ if (sc->init) sc->init(sc); /* * Make sure to disable the chip, also. * This may also help re-programming the chip after * hot insertion of PCMCIAs. */ DELAY(200); fe_outb(sc, FE_DLCR6, sc->proto_dlcr6 | FE_D6_DLC_DISABLE); DELAY(200); /* Power up the chip and select register bank for DLCRs. */ DELAY(200); fe_outb(sc, FE_DLCR7, sc->proto_dlcr7 | FE_D7_RBS_DLCR | FE_D7_POWER_UP); DELAY(200); /* Feed the station address. */ fe_outblk(sc, FE_DLCR8, IFP2ENADDR(sc->ifp), ETHER_ADDR_LEN); /* Clear multicast address filter to receive nothing. */ fe_outb(sc, FE_DLCR7, sc->proto_dlcr7 | FE_D7_RBS_MAR | FE_D7_POWER_UP); fe_outblk(sc, FE_MAR8, fe_filter_nothing.data, FE_FILTER_LEN); /* Select the BMPR bank for runtime register access. */ fe_outb(sc, FE_DLCR7, sc->proto_dlcr7 | FE_D7_RBS_BMPR | FE_D7_POWER_UP); /* Initialize registers. */ fe_outb(sc, FE_DLCR0, 0xFF); /* Clear all bits. */ fe_outb(sc, FE_DLCR1, 0xFF); /* ditto. */ fe_outb(sc, FE_DLCR2, 0x00); fe_outb(sc, FE_DLCR3, 0x00); fe_outb(sc, FE_DLCR4, sc->proto_dlcr4); fe_outb(sc, FE_DLCR5, sc->proto_dlcr5); fe_outb(sc, FE_BMPR10, 0x00); fe_outb(sc, FE_BMPR11, FE_B11_CTRL_SKIP | FE_B11_MODE1); fe_outb(sc, FE_BMPR12, 0x00); fe_outb(sc, FE_BMPR13, sc->proto_bmpr13); fe_outb(sc, FE_BMPR14, 0x00); fe_outb(sc, FE_BMPR15, 0x00); /* Enable interrupts. */ fe_outb(sc, FE_DLCR2, FE_TMASK); fe_outb(sc, FE_DLCR3, FE_RMASK); /* Select requested media, just before enabling DLC. */ if (sc->msel) sc->msel(sc); /* Enable transmitter and receiver. */ DELAY(200); fe_outb(sc, FE_DLCR6, sc->proto_dlcr6 | FE_D6_DLC_ENABLE); DELAY(200); #ifdef DIAGNOSTIC /* * Make sure to empty the receive buffer. * * This may be redundant, but *if* the receive buffer were full * at this point, then the driver would hang. I have experienced * some strange hang-up just after UP. I hope the following * code solve the problem. * * I have changed the order of hardware initialization. * I think the receive buffer cannot have any packets at this * point in this version. The following code *must* be * redundant now. FIXME. * * I've heard a rumore that on some PC card implementation of * 8696x, the receive buffer can have some data at this point. * The following message helps discovering the fact. FIXME. */ if (!(fe_inb(sc, FE_DLCR5) & FE_D5_BUFEMP)) { if_printf(sc->ifp, "receive buffer has some data after reset\n"); fe_emptybuffer(sc); } /* Do we need this here? Actually, no. I must be paranoia. */ fe_outb(sc, FE_DLCR0, 0xFF); /* Clear all bits. */ fe_outb(sc, FE_DLCR1, 0xFF); /* ditto. */ #endif /* Set 'running' flag, because we are now running. */ sc->ifp->if_flags |= IFF_RUNNING; /* * At this point, the interface is running properly, * except that it receives *no* packets. we then call * fe_setmode() to tell the chip what packets to be * received, based on the if_flags and multicast group * list. It completes the initialization process. */ fe_setmode(sc); #if 0 /* ...and attempt to start output queued packets. */ /* TURNED OFF, because the semi-auto media prober wants to UP the interface keeping it idle. The upper layer will soon start the interface anyway, and there are no significant delay. */ fe_start(sc->ifp); #endif (void) splx(s); } /* * This routine actually starts the transmission on the interface */ static void fe_xmit (struct fe_softc *sc) { /* * Set a timer just in case we never hear from the board again. * We use longer timeout for multiple packet transmission. * I'm not sure this timer value is appropriate. FIXME. */ sc->ifp->if_timer = 1 + sc->txb_count; /* Update txb variables. */ sc->txb_sched = sc->txb_count; sc->txb_count = 0; sc->txb_free = sc->txb_size; sc->tx_excolls = 0; /* Start transmitter, passing packets in TX buffer. */ fe_outb(sc, FE_BMPR10, sc->txb_sched | FE_B10_START); } /* * Start output on interface. * We make two assumptions here: * 1) that the current priority is set to splimp _before_ this code * is called *and* is returned to the appropriate priority after * return * 2) that the IFF_OACTIVE flag is checked before this code is called * (i.e. that the output part of the interface is idle) */ static void fe_start (struct ifnet *ifp) { struct fe_softc *sc = ifp->if_softc; struct mbuf *m; #ifdef DIAGNOSTIC /* Just a sanity check. */ if ((sc->txb_count == 0) != (sc->txb_free == sc->txb_size)) { /* * Txb_count and txb_free co-works to manage the * transmission buffer. Txb_count keeps track of the * used potion of the buffer, while txb_free does unused * potion. So, as long as the driver runs properly, * txb_count is zero if and only if txb_free is same * as txb_size (which represents whole buffer.) */ if_printf(ifp, "inconsistent txb variables (%d, %d)\n", sc->txb_count, sc->txb_free); /* * So, what should I do, then? * * We now know txb_count and txb_free contradicts. We * cannot, however, tell which is wrong. More * over, we cannot peek 86960 transmission buffer or * reset the transmission buffer. (In fact, we can * reset the entire interface. I don't want to do it.) * * If txb_count is incorrect, leaving it as-is will cause * sending of garbage after next interrupt. We have to * avoid it. Hence, we reset the txb_count here. If * txb_free was incorrect, resetting txb_count just loose * some packets. We can live with it. */ sc->txb_count = 0; } #endif /* * First, see if there are buffered packets and an idle * transmitter - should never happen at this point. */ if ((sc->txb_count > 0) && (sc->txb_sched == 0)) { if_printf(ifp, "transmitter idle with %d buffered packets\n", sc->txb_count); fe_xmit(sc); } /* * Stop accepting more transmission packets temporarily, when * a filter change request is delayed. Updating the MARs on * 86960 flushes the transmission buffer, so it is delayed * until all buffered transmission packets have been sent * out. */ if (sc->filter_change) { /* * Filter change request is delayed only when the DLC is * working. DLC soon raise an interrupt after finishing * the work. */ goto indicate_active; } for (;;) { /* * See if there is room to put another packet in the buffer. * We *could* do better job by peeking the send queue to * know the length of the next packet. Current version just * tests against the worst case (i.e., longest packet). FIXME. * * When adding the packet-peek feature, don't forget adding a * test on txb_count against QUEUEING_MAX. * There is a little chance the packet count exceeds * the limit. Assume transmission buffer is 8KB (2x8KB * configuration) and an application sends a bunch of small * (i.e., minimum packet sized) packets rapidly. An 8KB * buffer can hold 130 blocks of 62 bytes long... */ if (sc->txb_free < ETHER_MAX_LEN - ETHER_CRC_LEN + FE_DATA_LEN_LEN) { /* No room. */ goto indicate_active; } #if FE_SINGLE_TRANSMISSION if (sc->txb_count > 0) { /* Just one packet per a transmission buffer. */ goto indicate_active; } #endif /* * Get the next mbuf chain for a packet to send. */ IF_DEQUEUE(&sc->ifp->if_snd, m); if (m == NULL) { /* No more packets to send. */ goto indicate_inactive; } /* * Copy the mbuf chain into the transmission buffer. * txb_* variables are updated as necessary. */ fe_write_mbufs(sc, m); /* Start transmitter if it's idle. */ if ((sc->txb_count > 0) && (sc->txb_sched == 0)) fe_xmit(sc); /* * Tap off here if there is a bpf listener, * and the device is *not* in promiscuous mode. * (86960 receives self-generated packets if * and only if it is in "receive everything" * mode.) */ if (!(sc->ifp->if_flags & IFF_PROMISC)) BPF_MTAP(sc->ifp, m); m_freem(m); } indicate_inactive: /* * We are using the !OACTIVE flag to indicate to * the outside world that we can accept an * additional packet rather than that the * transmitter is _actually_ active. Indeed, the * transmitter may be active, but if we haven't * filled all the buffers with data then we still * want to accept more. */ sc->ifp->if_flags &= ~IFF_OACTIVE; return; indicate_active: /* * The transmitter is active, and there are no room for * more outgoing packets in the transmission buffer. */ sc->ifp->if_flags |= IFF_OACTIVE; return; } /* * Drop (skip) a packet from receive buffer in 86960 memory. */ static void fe_droppacket (struct fe_softc * sc, int len) { int i; /* * 86960 manual says that we have to read 8 bytes from the buffer * before skip the packets and that there must be more than 8 bytes * remaining in the buffer when issue a skip command. * Remember, we have already read 4 bytes before come here. */ if (len > 12) { /* Read 4 more bytes, and skip the rest of the packet. */ if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { (void) fe_inb(sc, FE_BMPR8); (void) fe_inb(sc, FE_BMPR8); (void) fe_inb(sc, FE_BMPR8); (void) fe_inb(sc, FE_BMPR8); } else { (void) fe_inw(sc, FE_BMPR8); (void) fe_inw(sc, FE_BMPR8); } fe_outb(sc, FE_BMPR14, FE_B14_SKIP); } else { /* We should not come here unless receiving RUNTs. */ if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { for (i = 0; i < len; i++) (void) fe_inb(sc, FE_BMPR8); } else { for (i = 0; i < len; i += 2) (void) fe_inw(sc, FE_BMPR8); } } } #ifdef DIAGNOSTIC /* * Empty receiving buffer. */ static void fe_emptybuffer (struct fe_softc * sc) { int i; u_char saved_dlcr5; #ifdef FE_DEBUG if_printf(sc->ifp, "emptying receive buffer\n"); #endif /* * Stop receiving packets, temporarily. */ saved_dlcr5 = fe_inb(sc, FE_DLCR5); fe_outb(sc, FE_DLCR5, sc->proto_dlcr5); DELAY(1300); /* * When we come here, the receive buffer management may * have been broken. So, we cannot use skip operation. * Just discard everything in the buffer. */ if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { for (i = 0; i < 65536; i++) { if (fe_inb(sc, FE_DLCR5) & FE_D5_BUFEMP) break; (void) fe_inb(sc, FE_BMPR8); } } else { for (i = 0; i < 65536; i += 2) { if (fe_inb(sc, FE_DLCR5) & FE_D5_BUFEMP) break; (void) fe_inw(sc, FE_BMPR8); } } /* * Double check. */ if (fe_inb(sc, FE_DLCR5) & FE_D5_BUFEMP) { if_printf(sc->ifp, "could not empty receive buffer\n"); /* Hmm. What should I do if this happens? FIXME. */ } /* * Restart receiving packets. */ fe_outb(sc, FE_DLCR5, saved_dlcr5); } #endif /* * Transmission interrupt handler * The control flow of this function looks silly. FIXME. */ static void fe_tint (struct fe_softc * sc, u_char tstat) { int left; int col; /* * Handle "excessive collision" interrupt. */ if (tstat & FE_D0_COLL16) { /* * Find how many packets (including this collided one) * are left unsent in transmission buffer. */ left = fe_inb(sc, FE_BMPR10); if_printf(sc->ifp, "excessive collision (%d/%d)\n", left, sc->txb_sched); /* * Clear the collision flag (in 86960) here * to avoid confusing statistics. */ fe_outb(sc, FE_DLCR0, FE_D0_COLLID); /* * Restart transmitter, skipping the * collided packet. * * We *must* skip the packet to keep network running * properly. Excessive collision error is an * indication of the network overload. If we * tried sending the same packet after excessive * collision, the network would be filled with * out-of-time packets. Packets belonging * to reliable transport (such as TCP) are resent * by some upper layer. */ fe_outb(sc, FE_BMPR11, FE_B11_CTRL_SKIP | FE_B11_MODE1); /* Update statistics. */ sc->tx_excolls++; } /* * Handle "transmission complete" interrupt. */ if (tstat & FE_D0_TXDONE) { /* * Add in total number of collisions on last * transmission. We also clear "collision occurred" flag * here. * * 86960 has a design flaw on collision count on multiple * packet transmission. When we send two or more packets * with one start command (that's what we do when the * transmission queue is crowded), 86960 informs us number * of collisions occurred on the last packet on the * transmission only. Number of collisions on previous * packets are lost. I have told that the fact is clearly * stated in the Fujitsu document. * * I considered not to mind it seriously. Collision * count is not so important, anyway. Any comments? FIXME. */ if (fe_inb(sc, FE_DLCR0) & FE_D0_COLLID) { /* Clear collision flag. */ fe_outb(sc, FE_DLCR0, FE_D0_COLLID); /* Extract collision count from 86960. */ col = fe_inb(sc, FE_DLCR4); col = (col & FE_D4_COL) >> FE_D4_COL_SHIFT; if (col == 0) { /* * Status register indicates collisions, * while the collision count is zero. * This can happen after multiple packet * transmission, indicating that one or more * previous packet(s) had been collided. * * Since the accurate number of collisions * has been lost, we just guess it as 1; * Am I too optimistic? FIXME. */ col = 1; } sc->ifp->if_collisions += col; if (col == 1) sc->mibdata.dot3StatsSingleCollisionFrames++; else sc->mibdata.dot3StatsMultipleCollisionFrames++; sc->mibdata.dot3StatsCollFrequencies[col-1]++; } /* * Update transmission statistics. * Be sure to reflect number of excessive collisions. */ col = sc->tx_excolls; sc->ifp->if_opackets += sc->txb_sched - col; sc->ifp->if_oerrors += col; sc->ifp->if_collisions += col * 16; sc->mibdata.dot3StatsExcessiveCollisions += col; sc->mibdata.dot3StatsCollFrequencies[15] += col; sc->txb_sched = 0; /* * The transmitter is no more active. * Reset output active flag and watchdog timer. */ sc->ifp->if_flags &= ~IFF_OACTIVE; sc->ifp->if_timer = 0; /* * If more data is ready to transmit in the buffer, start * transmitting them. Otherwise keep transmitter idle, * even if more data is queued. This gives receive * process a slight priority. */ if (sc->txb_count > 0) fe_xmit(sc); } } /* * Ethernet interface receiver interrupt. */ static void fe_rint (struct fe_softc * sc, u_char rstat) { u_short len; u_char status; int i; /* * Update statistics if this interrupt is caused by an error. * Note that, when the system was not sufficiently fast, the * receive interrupt might not be acknowledged immediately. If * one or more errornous frames were received before this routine * was scheduled, they are ignored, and the following error stats * give less than real values. */ if (rstat & (FE_D1_OVRFLO | FE_D1_CRCERR | FE_D1_ALGERR | FE_D1_SRTPKT)) { if (rstat & FE_D1_OVRFLO) sc->mibdata.dot3StatsInternalMacReceiveErrors++; if (rstat & FE_D1_CRCERR) sc->mibdata.dot3StatsFCSErrors++; if (rstat & FE_D1_ALGERR) sc->mibdata.dot3StatsAlignmentErrors++; #if 0 /* The reference MAC receiver defined in 802.3 silently ignores short frames (RUNTs) without notifying upper layer. RFC 1650 (dot3 MIB) is based on the 802.3, and it has no stats entry for RUNTs... */ if (rstat & FE_D1_SRTPKT) sc->mibdata.dot3StatsFrameTooShorts++; /* :-) */ #endif sc->ifp->if_ierrors++; } /* * MB86960 has a flag indicating "receive queue empty." * We just loop, checking the flag, to pull out all received * packets. * * We limit the number of iterations to avoid infinite-loop. * The upper bound is set to unrealistic high value. */ for (i = 0; i < FE_MAX_RECV_COUNT * 2; i++) { /* Stop the iteration if 86960 indicates no packets. */ if (fe_inb(sc, FE_DLCR5) & FE_D5_BUFEMP) return; /* * Extract a receive status byte. * As our 86960 is in 16 bit bus access mode, we have to * use inw() to get the status byte. The significant * value is returned in lower 8 bits. */ if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { status = fe_inb(sc, FE_BMPR8); (void) fe_inb(sc, FE_BMPR8); } else { status = (u_char) fe_inw(sc, FE_BMPR8); } /* * Extract the packet length. * It is a sum of a header (14 bytes) and a payload. * CRC has been stripped off by the 86960. */ if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { len = fe_inb(sc, FE_BMPR8); len |= (fe_inb(sc, FE_BMPR8) << 8); } else { len = fe_inw(sc, FE_BMPR8); } /* * AS our 86960 is programed to ignore errored frame, * we must not see any error indication in the * receive buffer. So, any error condition is a * serious error, e.g., out-of-sync of the receive * buffer pointers. */ if ((status & 0xF0) != 0x20 || len > ETHER_MAX_LEN - ETHER_CRC_LEN || len < ETHER_MIN_LEN - ETHER_CRC_LEN) { if_printf(sc->ifp, "RX buffer out-of-sync\n"); sc->ifp->if_ierrors++; sc->mibdata.dot3StatsInternalMacReceiveErrors++; fe_reset(sc); return; } /* * Go get a packet. */ if (fe_get_packet(sc, len) < 0) { /* * Negative return from fe_get_packet() * indicates no available mbuf. We stop * receiving packets, even if there are more * in the buffer. We hope we can get more * mbuf next time. */ sc->ifp->if_ierrors++; sc->mibdata.dot3StatsMissedFrames++; fe_droppacket(sc, len); return; } /* Successfully received a packet. Update stat. */ sc->ifp->if_ipackets++; } /* Maximum number of frames has been received. Something strange is happening here... */ if_printf(sc->ifp, "unusual receive flood\n"); sc->mibdata.dot3StatsInternalMacReceiveErrors++; fe_reset(sc); } /* * Ethernet interface interrupt processor */ static void fe_intr (void *arg) { struct fe_softc *sc = arg; u_char tstat, rstat; int loop_count = FE_MAX_LOOP; /* Loop until there are no more new interrupt conditions. */ while (loop_count-- > 0) { /* * Get interrupt conditions, masking unneeded flags. */ tstat = fe_inb(sc, FE_DLCR0) & FE_TMASK; rstat = fe_inb(sc, FE_DLCR1) & FE_RMASK; if (tstat == 0 && rstat == 0) return; /* * Reset the conditions we are acknowledging. */ fe_outb(sc, FE_DLCR0, tstat); fe_outb(sc, FE_DLCR1, rstat); /* * Handle transmitter interrupts. */ if (tstat) fe_tint(sc, tstat); /* * Handle receiver interrupts */ if (rstat) fe_rint(sc, rstat); /* * Update the multicast address filter if it is * needed and possible. We do it now, because * we can make sure the transmission buffer is empty, * and there is a good chance that the receive queue * is empty. It will minimize the possibility of * packet loss. */ if (sc->filter_change && sc->txb_count == 0 && sc->txb_sched == 0) { fe_loadmar(sc); sc->ifp->if_flags &= ~IFF_OACTIVE; } /* * If it looks like the transmitter can take more data, * attempt to start output on the interface. This is done * after handling the receiver interrupt to give the * receive operation priority. * * BTW, I'm not sure in what case the OACTIVE is on at * this point. Is the following test redundant? * * No. This routine polls for both transmitter and * receiver interrupts. 86960 can raise a receiver * interrupt when the transmission buffer is full. */ if ((sc->ifp->if_flags & IFF_OACTIVE) == 0) fe_start(sc->ifp); } if_printf(sc->ifp, "too many loops\n"); } /* * Process an ioctl request. This code needs some work - it looks * pretty ugly. */ static int fe_ioctl (struct ifnet * ifp, u_long command, caddr_t data) { struct fe_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int s, error = 0; s = splimp(); switch (command) { case SIOCSIFFLAGS: /* * Switch interface state between "running" and * "stopped", reflecting the UP flag. */ if (sc->ifp->if_flags & IFF_UP) { if ((sc->ifp->if_flags & IFF_RUNNING) == 0) fe_init(sc); } else { if ((sc->ifp->if_flags & IFF_RUNNING) != 0) fe_stop(sc); } /* * Promiscuous and/or multicast flags may have changed, * so reprogram the multicast filter and/or receive mode. */ fe_setmode(sc); /* Done. */ break; case SIOCADDMULTI: case SIOCDELMULTI: /* * Multicast list has changed; set the hardware filter * accordingly. */ fe_setmode(sc); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: /* Let if_media to handle these commands and to call us back. */ error = ifmedia_ioctl(ifp, ifr, &sc->media, command); break; default: error = ether_ioctl(ifp, command, data); break; } (void) splx(s); return (error); } /* * Retrieve packet from receive buffer and send to the next level up via * ether_input(). * Returns 0 if success, -1 if error (i.e., mbuf allocation failure). */ static int fe_get_packet (struct fe_softc * sc, u_short len) { struct ifnet *ifp = sc->ifp; struct ether_header *eh; struct mbuf *m; /* * NFS wants the data be aligned to the word (4 byte) * boundary. Ethernet header has 14 bytes. There is a * 2-byte gap. */ #define NFS_MAGIC_OFFSET 2 /* * This function assumes that an Ethernet packet fits in an * mbuf (with a cluster attached when necessary.) On FreeBSD * 2.0 for x86, which is the primary target of this driver, an * mbuf cluster has 4096 bytes, and we are happy. On ancient * BSDs, such as vanilla 4.3 for 386, a cluster size was 1024, * however. If the following #error message were printed upon * compile, you need to rewrite this function. */ #if ( MCLBYTES < ETHER_MAX_LEN - ETHER_CRC_LEN + NFS_MAGIC_OFFSET ) #error "Too small MCLBYTES to use fe driver." #endif /* * Our strategy has one more problem. There is a policy on * mbuf cluster allocation. It says that we must have at * least MINCLSIZE (208 bytes on FreeBSD 2.0 for x86) to * allocate a cluster. For a packet of a size between * (MHLEN - 2) to (MINCLSIZE - 2), our code violates the rule... * On the other hand, the current code is short, simple, * and fast, however. It does no harmful thing, just waists * some memory. Any comments? FIXME. */ /* Allocate an mbuf with packet header info. */ MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) return -1; /* Attach a cluster if this packet doesn't fit in a normal mbuf. */ if (len > MHLEN - NFS_MAGIC_OFFSET) { MCLGET(m, M_DONTWAIT); if (!(m->m_flags & M_EXT)) { m_freem(m); return -1; } } /* Initialize packet header info. */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = len; /* Set the length of this packet. */ m->m_len = len; /* The following silliness is to make NFS happy */ m->m_data += NFS_MAGIC_OFFSET; /* Get (actually just point to) the header part. */ eh = mtod(m, struct ether_header *); /* Get a packet. */ if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { fe_insb(sc, FE_BMPR8, (u_int8_t *)eh, len); } else { fe_insw(sc, FE_BMPR8, (u_int16_t *)eh, (len + 1) >> 1); } /* Feed the packet to upper layer. */ (*ifp->if_input)(ifp, m); return 0; } /* * Write an mbuf chain to the transmission buffer memory using 16 bit PIO. * Returns number of bytes actually written, including length word. * * If an mbuf chain is too long for an Ethernet frame, it is not sent. * Packets shorter than Ethernet minimum are legal, and we pad them * before sending out. An exception is "partial" packets which are * shorter than mandatory Ethernet header. */ static void fe_write_mbufs (struct fe_softc *sc, struct mbuf *m) { u_short length, len; struct mbuf *mp; u_char *data; u_short savebyte; /* WARNING: Architecture dependent! */ #define NO_PENDING_BYTE 0xFFFF static u_char padding [ETHER_MIN_LEN - ETHER_CRC_LEN - ETHER_HDR_LEN]; #ifdef DIAGNOSTIC /* First, count up the total number of bytes to copy */ length = 0; for (mp = m; mp != NULL; mp = mp->m_next) length += mp->m_len; /* Check if this matches the one in the packet header. */ if (length != m->m_pkthdr.len) { if_printf(sc->ifp, "packet length mismatch? (%d/%d)\n", length, m->m_pkthdr.len); } #else /* Just use the length value in the packet header. */ length = m->m_pkthdr.len; #endif #ifdef DIAGNOSTIC /* * Should never send big packets. If such a packet is passed, * it should be a bug of upper layer. We just ignore it. * ... Partial (too short) packets, neither. */ if (length < ETHER_HDR_LEN || length > ETHER_MAX_LEN - ETHER_CRC_LEN) { if_printf(sc->ifp, "got an out-of-spec packet (%u bytes) to send\n", length); sc->ifp->if_oerrors++; sc->mibdata.dot3StatsInternalMacTransmitErrors++; return; } #endif /* * Put the length word for this frame. * Does 86960 accept odd length? -- Yes. * Do we need to pad the length to minimum size by ourselves? * -- Generally yes. But for (or will be) the last * packet in the transmission buffer, we can skip the * padding process. It may gain performance slightly. FIXME. */ if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { len = max(length, ETHER_MIN_LEN - ETHER_CRC_LEN); fe_outb(sc, FE_BMPR8, len & 0x00ff); fe_outb(sc, FE_BMPR8, (len & 0xff00) >> 8); } else { fe_outw(sc, FE_BMPR8, max(length, ETHER_MIN_LEN - ETHER_CRC_LEN)); } /* * Update buffer status now. * Truncate the length up to an even number, since we use outw(). */ if ((sc->proto_dlcr6 & FE_D6_SBW) != FE_D6_SBW_BYTE) { length = (length + 1) & ~1; } sc->txb_free -= FE_DATA_LEN_LEN + max(length, ETHER_MIN_LEN - ETHER_CRC_LEN); sc->txb_count++; /* * Transfer the data from mbuf chain to the transmission buffer. * MB86960 seems to require that data be transferred as words, and * only words. So that we require some extra code to patch * over odd-length mbufs. */ if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { /* 8-bit cards are easy. */ for (mp = m; mp != 0; mp = mp->m_next) { if (mp->m_len) fe_outsb(sc, FE_BMPR8, mtod(mp, caddr_t), mp->m_len); } } else { /* 16-bit cards are a pain. */ savebyte = NO_PENDING_BYTE; for (mp = m; mp != 0; mp = mp->m_next) { /* Ignore empty mbuf. */ len = mp->m_len; if (len == 0) continue; /* Find the actual data to send. */ data = mtod(mp, caddr_t); /* Finish the last byte. */ if (savebyte != NO_PENDING_BYTE) { fe_outw(sc, FE_BMPR8, savebyte | (*data << 8)); data++; len--; savebyte = NO_PENDING_BYTE; } /* output contiguous words */ if (len > 1) { fe_outsw(sc, FE_BMPR8, (u_int16_t *)data, len >> 1); data += len & ~1; len &= 1; } /* Save a remaining byte, if there is one. */ if (len > 0) savebyte = *data; } /* Spit the last byte, if the length is odd. */ if (savebyte != NO_PENDING_BYTE) fe_outw(sc, FE_BMPR8, savebyte); } /* Pad to the Ethernet minimum length, if the packet is too short. */ if (length < ETHER_MIN_LEN - ETHER_CRC_LEN) { if ((sc->proto_dlcr6 & FE_D6_SBW) == FE_D6_SBW_BYTE) { fe_outsb(sc, FE_BMPR8, padding, ETHER_MIN_LEN - ETHER_CRC_LEN - length); } else { fe_outsw(sc, FE_BMPR8, (u_int16_t *)padding, (ETHER_MIN_LEN - ETHER_CRC_LEN - length) >> 1); } } } /* * Compute the multicast address filter from the * list of multicast addresses we need to listen to. */ static struct fe_filter fe_mcaf ( struct fe_softc *sc ) { int index; struct fe_filter filter; struct ifmultiaddr *ifma; filter = fe_filter_nothing; + IF_ADDR_LOCK(sc->ifp); TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; index = ether_crc32_le(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; #ifdef FE_DEBUG if_printf(sc->ifp, "hash(%6D) == %d\n", enm->enm_addrlo , ":", index); #endif filter.data[index >> 3] |= 1 << (index & 7); } + IF_ADDR_UNLOCK(sc->ifp); return ( filter ); } /* * Calculate a new "multicast packet filter" and put the 86960 * receiver in appropriate mode. */ static void fe_setmode (struct fe_softc *sc) { int flags = sc->ifp->if_flags; /* * If the interface is not running, we postpone the update * process for receive modes and multicast address filter * until the interface is restarted. It reduces some * complicated job on maintaining chip states. (Earlier versions * of this driver had a bug on that point...) * * To complete the trick, fe_init() calls fe_setmode() after * restarting the interface. */ if (!(flags & IFF_RUNNING)) return; /* * Promiscuous mode is handled separately. */ if (flags & IFF_PROMISC) { /* * Program 86960 to receive all packets on the segment * including those directed to other stations. * Multicast filter stored in MARs are ignored * under this setting, so we don't need to update it. * * Promiscuous mode in FreeBSD 2 is used solely by * BPF, and BPF only listens to valid (no error) packets. * So, we ignore erroneous ones even in this mode. * (Older versions of fe driver mistook the point.) */ fe_outb(sc, FE_DLCR5, sc->proto_dlcr5 | FE_D5_AFM0 | FE_D5_AFM1); sc->filter_change = 0; return; } /* * Turn the chip to the normal (non-promiscuous) mode. */ fe_outb(sc, FE_DLCR5, sc->proto_dlcr5 | FE_D5_AFM1); /* * Find the new multicast filter value. */ if (flags & IFF_ALLMULTI) sc->filter = fe_filter_all; else sc->filter = fe_mcaf(sc); sc->filter_change = 1; /* * We have to update the multicast filter in the 86960, A.S.A.P. * * Note that the DLC (Data Link Control unit, i.e. transmitter * and receiver) must be stopped when feeding the filter, and * DLC trashes all packets in both transmission and receive * buffers when stopped. * * To reduce the packet loss, we delay the filter update * process until buffers are empty. */ if (sc->txb_sched == 0 && sc->txb_count == 0 && !(fe_inb(sc, FE_DLCR1) & FE_D1_PKTRDY)) { /* * Buffers are (apparently) empty. Load * the new filter value into MARs now. */ fe_loadmar(sc); } else { /* * Buffers are not empty. Mark that we have to update * the MARs. The new filter will be loaded by feintr() * later. */ } } /* * Load a new multicast address filter into MARs. * * The caller must have splimp'ed before fe_loadmar. * This function starts the DLC upon return. So it can be called only * when the chip is working, i.e., from the driver's point of view, when * a device is RUNNING. (I mistook the point in previous versions.) */ static void fe_loadmar (struct fe_softc * sc) { /* Stop the DLC (transmitter and receiver). */ DELAY(200); fe_outb(sc, FE_DLCR6, sc->proto_dlcr6 | FE_D6_DLC_DISABLE); DELAY(200); /* Select register bank 1 for MARs. */ fe_outb(sc, FE_DLCR7, sc->proto_dlcr7 | FE_D7_RBS_MAR | FE_D7_POWER_UP); /* Copy filter value into the registers. */ fe_outblk(sc, FE_MAR8, sc->filter.data, FE_FILTER_LEN); /* Restore the bank selection for BMPRs (i.e., runtime registers). */ fe_outb(sc, FE_DLCR7, sc->proto_dlcr7 | FE_D7_RBS_BMPR | FE_D7_POWER_UP); /* Restart the DLC. */ DELAY(200); fe_outb(sc, FE_DLCR6, sc->proto_dlcr6 | FE_D6_DLC_ENABLE); DELAY(200); /* We have just updated the filter. */ sc->filter_change = 0; } /* Change the media selection. */ static int fe_medchange (struct ifnet *ifp) { struct fe_softc *sc = (struct fe_softc *)ifp->if_softc; #ifdef DIAGNOSTIC /* If_media should not pass any request for a media which this interface doesn't support. */ int b; for (b = 0; bit2media[b] != 0; b++) { if (bit2media[b] == sc->media.ifm_media) break; } if (((1 << b) & sc->mbitmap) == 0) { if_printf(sc->ifp, "got an unsupported media request (0x%x)\n", sc->media.ifm_media); return EINVAL; } #endif /* We don't actually change media when the interface is down. fe_init() will do the job, instead. Should we also wait until the transmission buffer being empty? Changing the media when we are sending a frame will cause two garbages on wires, one on old media and another on new. FIXME */ if (sc->ifp->if_flags & IFF_UP) { if (sc->msel) sc->msel(sc); } return 0; } /* I don't know how I can support media status callback... FIXME. */ static void fe_medstat (struct ifnet *ifp, struct ifmediareq *ifmr) { (void)ifp; (void)ifmr; } Index: stable/6/sys/dev/fxp/if_fxp.c =================================================================== --- stable/6/sys/dev/fxp/if_fxp.c (revision 149421) +++ stable/6/sys/dev/fxp/if_fxp.c (revision 149422) @@ -1,2722 +1,2724 @@ /*- * Copyright (c) 1995, David Greenman * Copyright (c) 2001 Jonathan Lemon * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); /* * Intel EtherExpress Pro/100B PCI Fast Ethernet driver */ #include #include #include #include /* #include */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for DELAY */ #include #include #ifdef FXP_IP_CSUM_WAR #include #include #include #include #endif #include #include /* for PCIM_CMD_xxx */ #include #include #include #include #include MODULE_DEPEND(fxp, pci, 1, 1, 1); MODULE_DEPEND(fxp, ether, 1, 1, 1); MODULE_DEPEND(fxp, miibus, 1, 1, 1); #include "miibus_if.h" /* * NOTE! On the Alpha, we have an alignment constraint. The * card DMAs the packet immediately following the RFA. However, * the first thing in the packet is a 14-byte Ethernet header. * This means that the packet is misaligned. To compensate, * we actually offset the RFA 2 bytes into the cluster. This * alignes the packet after the Ethernet header at a 32-bit * boundary. HOWEVER! This means that the RFA is misaligned! */ #define RFA_ALIGNMENT_FUDGE 2 /* * Set initial transmit threshold at 64 (512 bytes). This is * increased by 64 (512 bytes) at a time, to maximum of 192 * (1536 bytes), if an underrun occurs. */ static int tx_threshold = 64; /* * The configuration byte map has several undefined fields which * must be one or must be zero. Set up a template for these bits * only, (assuming a 82557 chip) leaving the actual configuration * to fxp_init. * * See struct fxp_cb_config for the bit definitions. */ static u_char fxp_cb_config_template[] = { 0x0, 0x0, /* cb_status */ 0x0, 0x0, /* cb_command */ 0x0, 0x0, 0x0, 0x0, /* link_addr */ 0x0, /* 0 */ 0x0, /* 1 */ 0x0, /* 2 */ 0x0, /* 3 */ 0x0, /* 4 */ 0x0, /* 5 */ 0x32, /* 6 */ 0x0, /* 7 */ 0x0, /* 8 */ 0x0, /* 9 */ 0x6, /* 10 */ 0x0, /* 11 */ 0x0, /* 12 */ 0x0, /* 13 */ 0xf2, /* 14 */ 0x48, /* 15 */ 0x0, /* 16 */ 0x40, /* 17 */ 0xf0, /* 18 */ 0x0, /* 19 */ 0x3f, /* 20 */ 0x5 /* 21 */ }; struct fxp_ident { uint16_t devid; int16_t revid; /* -1 matches anything */ char *name; }; /* * Claim various Intel PCI device identifiers for this driver. The * sub-vendor and sub-device field are extensively used to identify * particular variants, but we don't currently differentiate between * them. */ static struct fxp_ident fxp_ident_table[] = { { 0x1029, -1, "Intel 82559 PCI/CardBus Pro/100" }, { 0x1030, -1, "Intel 82559 Pro/100 Ethernet" }, { 0x1031, -1, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" }, { 0x1032, -1, "Intel 82801CAM (ICH3) Pro/100 VE Ethernet" }, { 0x1033, -1, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" }, { 0x1034, -1, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" }, { 0x1035, -1, "Intel 82801CAM (ICH3) Pro/100 Ethernet" }, { 0x1036, -1, "Intel 82801CAM (ICH3) Pro/100 Ethernet" }, { 0x1037, -1, "Intel 82801CAM (ICH3) Pro/100 Ethernet" }, { 0x1038, -1, "Intel 82801CAM (ICH3) Pro/100 VM Ethernet" }, { 0x1039, -1, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" }, { 0x103A, -1, "Intel 82801DB (ICH4) Pro/100 Ethernet" }, { 0x103B, -1, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" }, { 0x103C, -1, "Intel 82801DB (ICH4) Pro/100 Ethernet" }, { 0x103D, -1, "Intel 82801DB (ICH4) Pro/100 VE Ethernet" }, { 0x103E, -1, "Intel 82801DB (ICH4) Pro/100 VM Ethernet" }, { 0x1050, -1, "Intel 82801BA (D865) Pro/100 VE Ethernet" }, { 0x1051, -1, "Intel 82562ET (ICH5/ICH5R) Pro/100 VE Ethernet" }, { 0x1059, -1, "Intel 82551QM Pro/100 M Mobile Connection" }, { 0x1064, -1, "Intel 82562EZ (ICH6)" }, { 0x1068, -1, "Intel 82801FBM (ICH6-M) Pro/100 VE Ethernet" }, { 0x1209, -1, "Intel 82559ER Embedded 10/100 Ethernet" }, { 0x1229, 0x01, "Intel 82557 Pro/100 Ethernet" }, { 0x1229, 0x02, "Intel 82557 Pro/100 Ethernet" }, { 0x1229, 0x03, "Intel 82557 Pro/100 Ethernet" }, { 0x1229, 0x04, "Intel 82558 Pro/100 Ethernet" }, { 0x1229, 0x05, "Intel 82558 Pro/100 Ethernet" }, { 0x1229, 0x06, "Intel 82559 Pro/100 Ethernet" }, { 0x1229, 0x07, "Intel 82559 Pro/100 Ethernet" }, { 0x1229, 0x08, "Intel 82559 Pro/100 Ethernet" }, { 0x1229, 0x09, "Intel 82559ER Pro/100 Ethernet" }, { 0x1229, 0x0c, "Intel 82550 Pro/100 Ethernet" }, { 0x1229, 0x0d, "Intel 82550 Pro/100 Ethernet" }, { 0x1229, 0x0e, "Intel 82550 Pro/100 Ethernet" }, { 0x1229, 0x0f, "Intel 82551 Pro/100 Ethernet" }, { 0x1229, 0x10, "Intel 82551 Pro/100 Ethernet" }, { 0x1229, -1, "Intel 82557/8/9 Pro/100 Ethernet" }, { 0x2449, -1, "Intel 82801BA/CAM (ICH2/3) Pro/100 Ethernet" }, { 0x27dc, -1, "Intel 82801GB (ICH7) 10/100 Ethernet" }, { 0, -1, NULL }, }; #ifdef FXP_IP_CSUM_WAR #define FXP_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) #else #define FXP_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) #endif static int fxp_probe(device_t dev); static int fxp_attach(device_t dev); static int fxp_detach(device_t dev); static int fxp_shutdown(device_t dev); static int fxp_suspend(device_t dev); static int fxp_resume(device_t dev); static void fxp_intr(void *xsc); static void fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp, uint8_t statack, int count); static void fxp_init(void *xsc); static void fxp_init_body(struct fxp_softc *sc); static void fxp_tick(void *xsc); static void fxp_start(struct ifnet *ifp); static void fxp_start_body(struct ifnet *ifp); static int fxp_encap(struct fxp_softc *sc, struct mbuf *m_head); static void fxp_stop(struct fxp_softc *sc); static void fxp_release(struct fxp_softc *sc); static int fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data); static void fxp_watchdog(struct ifnet *ifp); static int fxp_add_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp); static int fxp_mc_addrs(struct fxp_softc *sc); static void fxp_mc_setup(struct fxp_softc *sc); static uint16_t fxp_eeprom_getword(struct fxp_softc *sc, int offset, int autosize); static void fxp_eeprom_putword(struct fxp_softc *sc, int offset, uint16_t data); static void fxp_autosize_eeprom(struct fxp_softc *sc); static void fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words); static void fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words); static int fxp_ifmedia_upd(struct ifnet *ifp); static void fxp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); static int fxp_serial_ifmedia_upd(struct ifnet *ifp); static void fxp_serial_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); static volatile int fxp_miibus_readreg(device_t dev, int phy, int reg); static void fxp_miibus_writereg(device_t dev, int phy, int reg, int value); static void fxp_load_ucode(struct fxp_softc *sc); static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high); static int sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS); static int sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS); static void fxp_scb_wait(struct fxp_softc *sc); static void fxp_scb_cmd(struct fxp_softc *sc, int cmd); static void fxp_dma_wait(struct fxp_softc *sc, volatile uint16_t *status, bus_dma_tag_t dmat, bus_dmamap_t map); static device_method_t fxp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, fxp_probe), DEVMETHOD(device_attach, fxp_attach), DEVMETHOD(device_detach, fxp_detach), DEVMETHOD(device_shutdown, fxp_shutdown), DEVMETHOD(device_suspend, fxp_suspend), DEVMETHOD(device_resume, fxp_resume), /* MII interface */ DEVMETHOD(miibus_readreg, fxp_miibus_readreg), DEVMETHOD(miibus_writereg, fxp_miibus_writereg), { 0, 0 } }; static driver_t fxp_driver = { "fxp", fxp_methods, sizeof(struct fxp_softc), }; static devclass_t fxp_devclass; DRIVER_MODULE(fxp, pci, fxp_driver, fxp_devclass, 0, 0); DRIVER_MODULE(fxp, cardbus, fxp_driver, fxp_devclass, 0, 0); DRIVER_MODULE(miibus, fxp, miibus_driver, miibus_devclass, 0, 0); /* * Wait for the previous command to be accepted (but not necessarily * completed). */ static void fxp_scb_wait(struct fxp_softc *sc) { int i = 10000; while (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) && --i) DELAY(2); if (i == 0) device_printf(sc->dev, "SCB timeout: 0x%x 0x%x 0x%x 0x%x\n", CSR_READ_1(sc, FXP_CSR_SCB_COMMAND), CSR_READ_1(sc, FXP_CSR_SCB_STATACK), CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS), CSR_READ_2(sc, FXP_CSR_FLOWCONTROL)); } static void fxp_scb_cmd(struct fxp_softc *sc, int cmd) { if (cmd == FXP_SCB_COMMAND_CU_RESUME && sc->cu_resume_bug) { CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, FXP_CB_COMMAND_NOP); fxp_scb_wait(sc); } CSR_WRITE_1(sc, FXP_CSR_SCB_COMMAND, cmd); } static void fxp_dma_wait(struct fxp_softc *sc, volatile uint16_t *status, bus_dma_tag_t dmat, bus_dmamap_t map) { int i = 10000; bus_dmamap_sync(dmat, map, BUS_DMASYNC_POSTREAD); while (!(le16toh(*status) & FXP_CB_STATUS_C) && --i) { DELAY(2); bus_dmamap_sync(dmat, map, BUS_DMASYNC_POSTREAD); } if (i == 0) device_printf(sc->dev, "DMA timeout\n"); } /* * Return identification string if this device is ours. */ static int fxp_probe(device_t dev) { uint16_t devid; uint8_t revid; struct fxp_ident *ident; if (pci_get_vendor(dev) == FXP_VENDORID_INTEL) { devid = pci_get_device(dev); revid = pci_get_revid(dev); for (ident = fxp_ident_table; ident->name != NULL; ident++) { if (ident->devid == devid && (ident->revid == revid || ident->revid == -1)) { device_set_desc(dev, ident->name); return (BUS_PROBE_DEFAULT); } } } return (ENXIO); } static void fxp_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { uint32_t *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; } static int fxp_attach(device_t dev) { struct fxp_softc *sc; struct fxp_cb_tx *tcbp; struct fxp_tx *txp; struct fxp_rx *rxp; struct ifnet *ifp; uint32_t val; uint16_t data, myea[ETHER_ADDR_LEN / 2]; u_char eaddr[ETHER_ADDR_LEN]; int i, rid, m1, m2, prefer_iomap; int error, s; error = 0; sc = device_get_softc(dev); sc->dev = dev; callout_init(&sc->stat_ch, CALLOUT_MPSAFE); mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); ifmedia_init(&sc->sc_media, 0, fxp_serial_ifmedia_upd, fxp_serial_ifmedia_sts); s = splimp(); ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } /* * Enable bus mastering. */ pci_enable_busmaster(dev); val = pci_read_config(dev, PCIR_COMMAND, 2); /* * Figure out which we should try first - memory mapping or i/o mapping? * We default to memory mapping. Then we accept an override from the * command line. Then we check to see which one is enabled. */ m1 = PCIM_CMD_MEMEN; m2 = PCIM_CMD_PORTEN; prefer_iomap = 0; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "prefer_iomap", &prefer_iomap) == 0 && prefer_iomap != 0) { m1 = PCIM_CMD_PORTEN; m2 = PCIM_CMD_MEMEN; } sc->rtp = (m1 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; sc->rgd = (m1 == PCIM_CMD_MEMEN)? FXP_PCI_MMBA : FXP_PCI_IOBA; sc->mem = bus_alloc_resource_any(dev, sc->rtp, &sc->rgd, RF_ACTIVE); if (sc->mem == NULL) { sc->rtp = (m2 == PCIM_CMD_MEMEN)? SYS_RES_MEMORY : SYS_RES_IOPORT; sc->rgd = (m2 == PCIM_CMD_MEMEN)? FXP_PCI_MMBA : FXP_PCI_IOBA; sc->mem = bus_alloc_resource_any(dev, sc->rtp, &sc->rgd, RF_ACTIVE); } if (!sc->mem) { error = ENXIO; goto fail; } if (bootverbose) { device_printf(dev, "using %s space register mapping\n", sc->rtp == SYS_RES_MEMORY? "memory" : "I/O"); } sc->sc_st = rman_get_bustag(sc->mem); sc->sc_sh = rman_get_bushandle(sc->mem); /* * Allocate our interrupt. */ rid = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->irq == NULL) { device_printf(dev, "could not map interrupt\n"); error = ENXIO; goto fail; } /* * Reset to a stable state. */ CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); DELAY(10); /* * Find out how large of an SEEPROM we have. */ fxp_autosize_eeprom(sc); /* * Find out the chip revision; lump all 82557 revs together. */ fxp_read_eeprom(sc, &data, 5, 1); if ((data >> 8) == 1) sc->revision = FXP_REV_82557; else sc->revision = pci_get_revid(dev); /* * Determine whether we must use the 503 serial interface. */ fxp_read_eeprom(sc, &data, 6, 1); if (sc->revision == FXP_REV_82557 && (data & FXP_PHY_DEVICE_MASK) != 0 && (data & FXP_PHY_SERIAL_ONLY)) sc->flags |= FXP_FLAG_SERIAL_MEDIA; SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "int_delay", CTLTYPE_INT | CTLFLAG_RW, &sc->tunable_int_delay, 0, sysctl_hw_fxp_int_delay, "I", "FXP driver receive interrupt microcode bundling delay"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "bundle_max", CTLTYPE_INT | CTLFLAG_RW, &sc->tunable_bundle_max, 0, sysctl_hw_fxp_bundle_max, "I", "FXP driver receive interrupt microcode bundle size limit"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rnr", CTLFLAG_RD, &sc->rnr, 0, "FXP RNR events"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "noflow", CTLFLAG_RW, &sc->tunable_noflow, 0, "FXP flow control disabled"); /* * Pull in device tunables. */ sc->tunable_int_delay = TUNABLE_INT_DELAY; sc->tunable_bundle_max = TUNABLE_BUNDLE_MAX; sc->tunable_noflow = 1; (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "int_delay", &sc->tunable_int_delay); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "bundle_max", &sc->tunable_bundle_max); (void) resource_int_value(device_get_name(dev), device_get_unit(dev), "noflow", &sc->tunable_noflow); sc->rnr = 0; /* * Enable workarounds for certain chip revision deficiencies. * * Systems based on the ICH2/ICH2-M chip from Intel, and possibly * some systems based a normal 82559 design, have a defect where * the chip can cause a PCI protocol violation if it receives * a CU_RESUME command when it is entering the IDLE state. The * workaround is to disable Dynamic Standby Mode, so the chip never * deasserts CLKRUN#, and always remains in an active state. * * See Intel 82801BA/82801BAM Specification Update, Errata #30. */ i = pci_get_device(dev); if (i == 0x2449 || (i > 0x1030 && i < 0x1039) || sc->revision >= FXP_REV_82559_A0) { fxp_read_eeprom(sc, &data, 10, 1); if (data & 0x02) { /* STB enable */ uint16_t cksum; int i; device_printf(dev, "Disabling dynamic standby mode in EEPROM\n"); data &= ~0x02; fxp_write_eeprom(sc, &data, 10, 1); device_printf(dev, "New EEPROM ID: 0x%x\n", data); cksum = 0; for (i = 0; i < (1 << sc->eeprom_size) - 1; i++) { fxp_read_eeprom(sc, &data, i, 1); cksum += data; } i = (1 << sc->eeprom_size) - 1; cksum = 0xBABA - cksum; fxp_read_eeprom(sc, &data, i, 1); fxp_write_eeprom(sc, &cksum, i, 1); device_printf(dev, "EEPROM checksum @ 0x%x: 0x%x -> 0x%x\n", i, data, cksum); #if 1 /* * If the user elects to continue, try the software * workaround, as it is better than nothing. */ sc->flags |= FXP_FLAG_CU_RESUME_BUG; #endif } } /* * If we are not a 82557 chip, we can enable extended features. */ if (sc->revision != FXP_REV_82557) { /* * If MWI is enabled in the PCI configuration, and there * is a valid cacheline size (8 or 16 dwords), then tell * the board to turn on MWI. */ if (val & PCIM_CMD_MWRICEN && pci_read_config(dev, PCIR_CACHELNSZ, 1) != 0) sc->flags |= FXP_FLAG_MWI_ENABLE; /* turn on the extended TxCB feature */ sc->flags |= FXP_FLAG_EXT_TXCB; /* enable reception of long frames for VLAN */ sc->flags |= FXP_FLAG_LONG_PKT_EN; } else { /* a hack to get long VLAN frames on a 82557 */ sc->flags |= FXP_FLAG_SAVE_BAD; } /* * Enable use of extended RFDs and TCBs for 82550 * and later chips. Note: we need extended TXCB support * too, but that's already enabled by the code above. * Be careful to do this only on the right devices. */ if (sc->revision == FXP_REV_82550 || sc->revision == FXP_REV_82550_C || sc->revision == FXP_REV_82551_E || sc->revision == FXP_REV_82551_F || sc->revision == FXP_REV_82551_10) { sc->rfa_size = sizeof (struct fxp_rfa); sc->tx_cmd = FXP_CB_COMMAND_IPCBXMIT; sc->flags |= FXP_FLAG_EXT_RFA; } else { sc->rfa_size = sizeof (struct fxp_rfa) - FXP_RFAX_LEN; sc->tx_cmd = FXP_CB_COMMAND_XMIT; } /* * Allocate DMA tags and DMA safe memory. */ sc->maxtxseg = FXP_NTXSEG; if (sc->flags & FXP_FLAG_EXT_RFA) sc->maxtxseg--; error = bus_dma_tag_create(NULL, 2, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * sc->maxtxseg, sc->maxtxseg, MCLBYTES, 0, busdma_lock_mutex, &Giant, &sc->fxp_mtag); if (error) { device_printf(dev, "could not allocate dma tag\n"); goto fail; } error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct fxp_stats), 1, sizeof(struct fxp_stats), 0, busdma_lock_mutex, &Giant, &sc->fxp_stag); if (error) { device_printf(dev, "could not allocate dma tag\n"); goto fail; } error = bus_dmamem_alloc(sc->fxp_stag, (void **)&sc->fxp_stats, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->fxp_smap); if (error) goto fail; error = bus_dmamap_load(sc->fxp_stag, sc->fxp_smap, sc->fxp_stats, sizeof(struct fxp_stats), fxp_dma_map_addr, &sc->stats_addr, 0); if (error) { device_printf(dev, "could not map the stats buffer\n"); goto fail; } error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, FXP_TXCB_SZ, 1, FXP_TXCB_SZ, 0, busdma_lock_mutex, &Giant, &sc->cbl_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); goto fail; } error = bus_dmamem_alloc(sc->cbl_tag, (void **)&sc->fxp_desc.cbl_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->cbl_map); if (error) goto fail; error = bus_dmamap_load(sc->cbl_tag, sc->cbl_map, sc->fxp_desc.cbl_list, FXP_TXCB_SZ, fxp_dma_map_addr, &sc->fxp_desc.cbl_addr, 0); if (error) { device_printf(dev, "could not map DMA memory\n"); goto fail; } error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct fxp_cb_mcs), 1, sizeof(struct fxp_cb_mcs), 0, busdma_lock_mutex, &Giant, &sc->mcs_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); goto fail; } error = bus_dmamem_alloc(sc->mcs_tag, (void **)&sc->mcsp, BUS_DMA_NOWAIT, &sc->mcs_map); if (error) goto fail; error = bus_dmamap_load(sc->mcs_tag, sc->mcs_map, sc->mcsp, sizeof(struct fxp_cb_mcs), fxp_dma_map_addr, &sc->mcs_addr, 0); if (error) { device_printf(dev, "can't map the multicast setup command\n"); goto fail; } /* * Pre-allocate the TX DMA maps and setup the pointers to * the TX command blocks. */ txp = sc->fxp_desc.tx_list; tcbp = sc->fxp_desc.cbl_list; for (i = 0; i < FXP_NTXCB; i++) { txp[i].tx_cb = tcbp + i; error = bus_dmamap_create(sc->fxp_mtag, 0, &txp[i].tx_map); if (error) { device_printf(dev, "can't create DMA map for TX\n"); goto fail; } } error = bus_dmamap_create(sc->fxp_mtag, 0, &sc->spare_map); if (error) { device_printf(dev, "can't create spare DMA map\n"); goto fail; } /* * Pre-allocate our receive buffers. */ sc->fxp_desc.rx_head = sc->fxp_desc.rx_tail = NULL; for (i = 0; i < FXP_NRFABUFS; i++) { rxp = &sc->fxp_desc.rx_list[i]; error = bus_dmamap_create(sc->fxp_mtag, 0, &rxp->rx_map); if (error) { device_printf(dev, "can't create DMA map for RX\n"); goto fail; } if (fxp_add_rfabuf(sc, rxp) != 0) { error = ENOMEM; goto fail; } } /* * Read MAC address. */ fxp_read_eeprom(sc, myea, 0, 3); eaddr[0] = myea[0] & 0xff; eaddr[1] = myea[0] >> 8; eaddr[2] = myea[1] & 0xff; eaddr[3] = myea[1] >> 8; eaddr[4] = myea[2] & 0xff; eaddr[5] = myea[2] >> 8; if (bootverbose) { device_printf(dev, "PCI IDs: %04x %04x %04x %04x %04x\n", pci_get_vendor(dev), pci_get_device(dev), pci_get_subvendor(dev), pci_get_subdevice(dev), pci_get_revid(dev)); fxp_read_eeprom(sc, &data, 10, 1); device_printf(dev, "Dynamic Standby mode is %s\n", data & 0x02 ? "enabled" : "disabled"); } /* * If this is only a 10Mbps device, then there is no MII, and * the PHY will use a serial interface instead. * * The Seeq 80c24 AutoDUPLEX(tm) Ethernet Interface Adapter * doesn't have a programming interface of any sort. The * media is sensed automatically based on how the link partner * is configured. This is, in essence, manual configuration. */ if (sc->flags & FXP_FLAG_SERIAL_MEDIA) { ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); } else { if (mii_phy_probe(dev, &sc->miibus, fxp_ifmedia_upd, fxp_ifmedia_sts)) { device_printf(dev, "MII without any PHY!\n"); error = ENXIO; goto fail; } } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_baudrate = 100000000; ifp->if_init = fxp_init; ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = fxp_ioctl; ifp->if_start = fxp_start; ifp->if_watchdog = fxp_watchdog; ifp->if_capabilities = ifp->if_capenable = 0; /* Enable checksum offload for 82550 or better chips */ if (sc->flags & FXP_FLAG_EXT_RFA) { ifp->if_hwassist = FXP_CSUM_FEATURES; ifp->if_capabilities |= IFCAP_HWCSUM; ifp->if_capenable |= IFCAP_HWCSUM; } #ifdef DEVICE_POLLING /* Inform the world we support polling. */ ifp->if_capabilities |= IFCAP_POLLING; ifp->if_capenable |= IFCAP_POLLING; #endif /* * Attach the interface. */ ether_ifattach(ifp, eaddr); /* * Tell the upper layer(s) we support long frames. * Must appear after the call to ether_ifattach() because * ether_ifattach() sets ifi_hdrlen to the default value. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable |= IFCAP_VLAN_MTU; /* the hw bits already set */ /* * Let the system queue as many packets as we have available * TX descriptors. */ IFQ_SET_MAXLEN(&ifp->if_snd, FXP_NTXCB - 1); ifp->if_snd.ifq_drv_maxlen = FXP_NTXCB - 1; IFQ_SET_READY(&ifp->if_snd); /* * Hook our interrupt after all initialization is complete. */ error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, fxp_intr, sc, &sc->ih); if (error) { device_printf(dev, "could not setup irq\n"); ether_ifdetach(sc->ifp); goto fail; } fail: splx(s); if (error) fxp_release(sc); return (error); } /* * Release all resources. The softc lock should not be held and the * interrupt should already be torn down. */ static void fxp_release(struct fxp_softc *sc) { struct fxp_rx *rxp; struct fxp_tx *txp; int i; FXP_LOCK_ASSERT(sc, MA_NOTOWNED); KASSERT(sc->ih == NULL, ("fxp_release() called with intr handle still active")); if (sc->miibus) device_delete_child(sc->dev, sc->miibus); bus_generic_detach(sc->dev); ifmedia_removeall(&sc->sc_media); if (sc->fxp_desc.cbl_list) { bus_dmamap_unload(sc->cbl_tag, sc->cbl_map); bus_dmamem_free(sc->cbl_tag, sc->fxp_desc.cbl_list, sc->cbl_map); } if (sc->fxp_stats) { bus_dmamap_unload(sc->fxp_stag, sc->fxp_smap); bus_dmamem_free(sc->fxp_stag, sc->fxp_stats, sc->fxp_smap); } if (sc->mcsp) { bus_dmamap_unload(sc->mcs_tag, sc->mcs_map); bus_dmamem_free(sc->mcs_tag, sc->mcsp, sc->mcs_map); } if (sc->irq) bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq); if (sc->mem) bus_release_resource(sc->dev, sc->rtp, sc->rgd, sc->mem); if (sc->fxp_mtag) { for (i = 0; i < FXP_NRFABUFS; i++) { rxp = &sc->fxp_desc.rx_list[i]; if (rxp->rx_mbuf != NULL) { bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map); m_freem(rxp->rx_mbuf); } bus_dmamap_destroy(sc->fxp_mtag, rxp->rx_map); } bus_dmamap_destroy(sc->fxp_mtag, sc->spare_map); for (i = 0; i < FXP_NTXCB; i++) { txp = &sc->fxp_desc.tx_list[i]; if (txp->tx_mbuf != NULL) { bus_dmamap_sync(sc->fxp_mtag, txp->tx_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->fxp_mtag, txp->tx_map); m_freem(txp->tx_mbuf); } bus_dmamap_destroy(sc->fxp_mtag, txp->tx_map); } bus_dma_tag_destroy(sc->fxp_mtag); } if (sc->fxp_stag) bus_dma_tag_destroy(sc->fxp_stag); if (sc->cbl_tag) bus_dma_tag_destroy(sc->cbl_tag); if (sc->mcs_tag) bus_dma_tag_destroy(sc->mcs_tag); if (sc->ifp) if_free(sc->ifp); mtx_destroy(&sc->sc_mtx); } /* * Detach interface. */ static int fxp_detach(device_t dev) { struct fxp_softc *sc = device_get_softc(dev); int s; FXP_LOCK(sc); s = splimp(); sc->suspended = 1; /* Do same thing as we do for suspend */ /* * Close down routes etc. */ ether_ifdetach(sc->ifp); /* * Stop DMA and drop transmit queue, but disable interrupts first. */ CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE); fxp_stop(sc); FXP_UNLOCK(sc); /* * Unhook interrupt before dropping lock. This is to prevent * races with fxp_intr(). */ bus_teardown_intr(sc->dev, sc->irq, sc->ih); sc->ih = NULL; splx(s); /* Release our allocated resources. */ fxp_release(sc); return (0); } /* * Device shutdown routine. Called at system shutdown after sync. The * main purpose of this routine is to shut off receiver DMA so that * kernel memory doesn't get clobbered during warmboot. */ static int fxp_shutdown(device_t dev) { /* * Make sure that DMA is disabled prior to reboot. Not doing * do could allow DMA to corrupt kernel memory during the * reboot before the driver initializes. */ fxp_stop((struct fxp_softc *) device_get_softc(dev)); return (0); } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int fxp_suspend(device_t dev) { struct fxp_softc *sc = device_get_softc(dev); int s; FXP_LOCK(sc); s = splimp(); fxp_stop(sc); sc->suspended = 1; FXP_UNLOCK(sc); splx(s); return (0); } /* * Device resume routine. re-enable busmastering, and restart the interface if * appropriate. */ static int fxp_resume(device_t dev) { struct fxp_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->ifp; uint16_t pci_command; int s; FXP_LOCK(sc); s = splimp(); /* reenable busmastering */ pci_command = pci_read_config(dev, PCIR_COMMAND, 2); pci_command |= (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, pci_command, 2); CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SELECTIVE_RESET); DELAY(10); /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) fxp_init_body(sc); sc->suspended = 0; FXP_UNLOCK(sc); splx(s); return (0); } static void fxp_eeprom_shiftin(struct fxp_softc *sc, int data, int length) { uint16_t reg; int x; /* * Shift in data. */ for (x = 1 << (length - 1); x; x >>= 1) { if (data & x) reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; else reg = FXP_EEPROM_EECS; CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); DELAY(1); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); } } /* * Read from the serial EEPROM. Basically, you manually shift in * the read opcode (one bit at a time) and then shift in the address, * and then you shift out the data (all of this one bit at a time). * The word size is 16 bits, so you have to provide the address for * every 16 bits of data. */ static uint16_t fxp_eeprom_getword(struct fxp_softc *sc, int offset, int autosize) { uint16_t reg, data; int x; CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); /* * Shift in read opcode. */ fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_READ, 3); /* * Shift in address. */ data = 0; for (x = 1 << (sc->eeprom_size - 1); x; x >>= 1) { if (offset & x) reg = FXP_EEPROM_EECS | FXP_EEPROM_EEDI; else reg = FXP_EEPROM_EECS; CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); DELAY(1); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); reg = CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO; data++; if (autosize && reg == 0) { sc->eeprom_size = data; break; } } /* * Shift out data. */ data = 0; reg = FXP_EEPROM_EECS; for (x = 1 << 15; x; x >>= 1) { CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg | FXP_EEPROM_EESK); DELAY(1); if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) data |= x; CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, reg); DELAY(1); } CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); DELAY(1); return (data); } static void fxp_eeprom_putword(struct fxp_softc *sc, int offset, uint16_t data) { int i; /* * Erase/write enable. */ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); fxp_eeprom_shiftin(sc, 0x4, 3); fxp_eeprom_shiftin(sc, 0x03 << (sc->eeprom_size - 2), sc->eeprom_size); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); DELAY(1); /* * Shift in write opcode, address, data. */ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); fxp_eeprom_shiftin(sc, FXP_EEPROM_OPC_WRITE, 3); fxp_eeprom_shiftin(sc, offset, sc->eeprom_size); fxp_eeprom_shiftin(sc, data, 16); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); DELAY(1); /* * Wait for EEPROM to finish up. */ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); DELAY(1); for (i = 0; i < 1000; i++) { if (CSR_READ_2(sc, FXP_CSR_EEPROMCONTROL) & FXP_EEPROM_EEDO) break; DELAY(50); } CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); DELAY(1); /* * Erase/write disable. */ CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, FXP_EEPROM_EECS); fxp_eeprom_shiftin(sc, 0x4, 3); fxp_eeprom_shiftin(sc, 0, sc->eeprom_size); CSR_WRITE_2(sc, FXP_CSR_EEPROMCONTROL, 0); DELAY(1); } /* * From NetBSD: * * Figure out EEPROM size. * * 559's can have either 64-word or 256-word EEPROMs, the 558 * datasheet only talks about 64-word EEPROMs, and the 557 datasheet * talks about the existance of 16 to 256 word EEPROMs. * * The only known sizes are 64 and 256, where the 256 version is used * by CardBus cards to store CIS information. * * The address is shifted in msb-to-lsb, and after the last * address-bit the EEPROM is supposed to output a `dummy zero' bit, * after which follows the actual data. We try to detect this zero, by * probing the data-out bit in the EEPROM control register just after * having shifted in a bit. If the bit is zero, we assume we've * shifted enough address bits. The data-out should be tri-state, * before this, which should translate to a logical one. */ static void fxp_autosize_eeprom(struct fxp_softc *sc) { /* guess maximum size of 256 words */ sc->eeprom_size = 8; /* autosize */ (void) fxp_eeprom_getword(sc, 0, 1); } static void fxp_read_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words) { int i; for (i = 0; i < words; i++) data[i] = fxp_eeprom_getword(sc, offset + i, 0); } static void fxp_write_eeprom(struct fxp_softc *sc, u_short *data, int offset, int words) { int i; for (i = 0; i < words; i++) fxp_eeprom_putword(sc, offset + i, data[i]); } /* * Grab the softc lock and call the real fxp_start_body() routine */ static void fxp_start(struct ifnet *ifp) { struct fxp_softc *sc = ifp->if_softc; FXP_LOCK(sc); fxp_start_body(ifp); FXP_UNLOCK(sc); } /* * Start packet transmission on the interface. * This routine must be called with the softc lock held, and is an * internal entry point only. */ static void fxp_start_body(struct ifnet *ifp) { struct fxp_softc *sc = ifp->if_softc; struct mbuf *mb_head; int error, txqueued; FXP_LOCK_ASSERT(sc, MA_OWNED); /* * See if we need to suspend xmit until the multicast filter * has been reprogrammed (which can only be done at the head * of the command chain). */ if (sc->need_mcsetup) return; /* * We're finished if there is nothing more to add to the list or if * we're all filled up with buffers to transmit. * NOTE: One TxCB is reserved to guarantee that fxp_mc_setup() can add * a NOP command when needed. */ txqueued = 0; while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->tx_queued < FXP_NTXCB - 1) { /* * Grab a packet to transmit. */ IFQ_DRV_DEQUEUE(&ifp->if_snd, mb_head); if (mb_head == NULL) break; error = fxp_encap(sc, mb_head); if (error) break; txqueued = 1; } bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); /* * We're finished. If we added to the list, issue a RESUME to get DMA * going again if suspended. */ if (txqueued) { fxp_scb_wait(sc); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME); } } static int fxp_encap(struct fxp_softc *sc, struct mbuf *m_head) { struct ifnet *ifp; struct mbuf *m; struct fxp_tx *txp; struct fxp_cb_tx *cbp; bus_dma_segment_t segs[FXP_NTXSEG]; int chainlen, error, i, nseg; FXP_LOCK_ASSERT(sc, MA_OWNED); ifp = sc->ifp; /* * Get pointer to next available tx desc. */ txp = sc->fxp_desc.tx_last->tx_next; /* * A note in Appendix B of the Intel 8255x 10/100 Mbps * Ethernet Controller Family Open Source Software * Developer Manual says: * Using software parsing is only allowed with legal * TCP/IP or UDP/IP packets. * ... * For all other datagrams, hardware parsing must * be used. * Software parsing appears to truncate ICMP and * fragmented UDP packets that contain one to three * bytes in the second (and final) mbuf of the packet. */ if (sc->flags & FXP_FLAG_EXT_RFA) txp->tx_cb->ipcb_ip_activation_high = FXP_IPCB_HARDWAREPARSING_ENABLE; /* * Deal with TCP/IP checksum offload. Note that * in order for TCP checksum offload to work, * the pseudo header checksum must have already * been computed and stored in the checksum field * in the TCP header. The stack should have * already done this for us. */ if (m_head->m_pkthdr.csum_flags) { if (m_head->m_pkthdr.csum_flags & CSUM_DELAY_DATA) { txp->tx_cb->ipcb_ip_schedule = FXP_IPCB_TCPUDP_CHECKSUM_ENABLE; if (m_head->m_pkthdr.csum_flags & CSUM_TCP) txp->tx_cb->ipcb_ip_schedule |= FXP_IPCB_TCP_PACKET; } #ifdef FXP_IP_CSUM_WAR /* * XXX The 82550 chip appears to have trouble * dealing with IP header checksums in very small * datagrams, namely fragments from 1 to 3 bytes * in size. For example, say you want to transmit * a UDP packet of 1473 bytes. The packet will be * fragmented over two IP datagrams, the latter * containing only one byte of data. The 82550 will * botch the header checksum on the 1-byte fragment. * As long as the datagram contains 4 or more bytes * of data, you're ok. * * The following code attempts to work around this * problem: if the datagram is less than 38 bytes * in size (14 bytes ether header, 20 bytes IP header, * plus 4 bytes of data), we punt and compute the IP * header checksum by hand. This workaround doesn't * work very well, however, since it can be fooled * by things like VLAN tags and IP options that make * the header sizes/offsets vary. */ if (m_head->m_pkthdr.csum_flags & CSUM_IP) { if (m_head->m_pkthdr.len < 38) { struct ip *ip; m_head->m_data += ETHER_HDR_LEN; ip = mtod(mb_head, struct ip *); ip->ip_sum = in_cksum(mb_head, ip->ip_hl << 2); m_head->m_data -= ETHER_HDR_LEN; } else { txp->tx_cb->ipcb_ip_activation_high = FXP_IPCB_HARDWAREPARSING_ENABLE; txp->tx_cb->ipcb_ip_schedule |= FXP_IPCB_IP_CHECKSUM_ENABLE; } } #endif } chainlen = 0; for (m = m_head; m != NULL && chainlen <= sc->maxtxseg; m = m->m_next) chainlen++; if (chainlen > sc->maxtxseg) { struct mbuf *mn; /* * We ran out of segments. We have to recopy this * mbuf chain first. Bail out if we can't get the * new buffers. */ mn = m_defrag(m_head, M_DONTWAIT); if (mn == NULL) { m_freem(m_head); return (-1); } else { m_head = mn; } } /* * Go through each of the mbufs in the chain and initialize * the transmit buffer descriptors with the physical address * and size of the mbuf. */ error = bus_dmamap_load_mbuf_sg(sc->fxp_mtag, txp->tx_map, m_head, segs, &nseg, 0); if (error) { device_printf(sc->dev, "can't map mbuf (error %d)\n", error); m_freem(m_head); return (-1); } KASSERT(nseg <= sc->maxtxseg, ("too many DMA segments")); cbp = txp->tx_cb; for (i = 0; i < nseg; i++) { KASSERT(segs[i].ds_len <= MCLBYTES, ("segment size too large")); /* * If this is an 82550/82551, then we're using extended * TxCBs _and_ we're using checksum offload. This means * that the TxCB is really an IPCB. One major difference * between the two is that with plain extended TxCBs, * the bottom half of the TxCB contains two entries from * the TBD array, whereas IPCBs contain just one entry: * one entry (8 bytes) has been sacrificed for the TCP/IP * checksum offload control bits. So to make things work * right, we have to start filling in the TBD array * starting from a different place depending on whether * the chip is an 82550/82551 or not. */ if (sc->flags & FXP_FLAG_EXT_RFA) { cbp->tbd[i + 1].tb_addr = htole32(segs[i].ds_addr); cbp->tbd[i + 1].tb_size = htole32(segs[i].ds_len); } else { cbp->tbd[i].tb_addr = htole32(segs[i].ds_addr); cbp->tbd[i].tb_size = htole32(segs[i].ds_len); } } cbp->tbd_number = nseg; bus_dmamap_sync(sc->fxp_mtag, txp->tx_map, BUS_DMASYNC_PREWRITE); txp->tx_mbuf = m_head; txp->tx_cb->cb_status = 0; txp->tx_cb->byte_count = 0; if (sc->tx_queued != FXP_CXINT_THRESH - 1) { txp->tx_cb->cb_command = htole16(sc->tx_cmd | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S); } else { txp->tx_cb->cb_command = htole16(sc->tx_cmd | FXP_CB_COMMAND_SF | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I); /* * Set a 5 second timer just in case we don't hear * from the card again. */ ifp->if_timer = 5; } txp->tx_cb->tx_threshold = tx_threshold; /* * Advance the end of list forward. */ #ifdef __alpha__ /* * On platforms which can't access memory in 16-bit * granularities, we must prevent the card from DMA'ing * up the status while we update the command field. * This could cause us to overwrite the completion status. * XXX This is probably bogus and we're _not_ looking * for atomicity here. */ atomic_clear_16(&sc->fxp_desc.tx_last->tx_cb->cb_command, htole16(FXP_CB_COMMAND_S)); #else sc->fxp_desc.tx_last->tx_cb->cb_command &= htole16(~FXP_CB_COMMAND_S); #endif /*__alpha__*/ sc->fxp_desc.tx_last = txp; /* * Advance the beginning of the list forward if there are * no other packets queued (when nothing is queued, tx_first * sits on the last TxCB that was sent out). */ if (sc->tx_queued == 0) sc->fxp_desc.tx_first = txp; sc->tx_queued++; /* * Pass packet to bpf if there is a listener. */ BPF_MTAP(ifp, m_head); return (0); } #ifdef DEVICE_POLLING static poll_handler_t fxp_poll; static void fxp_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct fxp_softc *sc = ifp->if_softc; uint8_t statack; FXP_LOCK(sc); if (!(ifp->if_capenable & IFCAP_POLLING)) { ether_poll_deregister(ifp); cmd = POLL_DEREGISTER; } if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */ CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0); FXP_UNLOCK(sc); return; } statack = FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA | FXP_SCB_STATACK_FR; if (cmd == POLL_AND_CHECK_STATUS) { uint8_t tmp; tmp = CSR_READ_1(sc, FXP_CSR_SCB_STATACK); if (tmp == 0xff || tmp == 0) { FXP_UNLOCK(sc); return; /* nothing to do */ } tmp &= ~statack; /* ack what we can */ if (tmp != 0) CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, tmp); statack |= tmp; } fxp_intr_body(sc, ifp, statack, count); FXP_UNLOCK(sc); } #endif /* DEVICE_POLLING */ /* * Process interface interrupts. */ static void fxp_intr(void *xsc) { struct fxp_softc *sc = xsc; struct ifnet *ifp = sc->ifp; uint8_t statack; FXP_LOCK(sc); if (sc->suspended) { FXP_UNLOCK(sc); return; } #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) { FXP_UNLOCK(sc); return; } if ((ifp->if_capenable & IFCAP_POLLING) && ether_poll_register(fxp_poll, ifp)) { /* disable interrupts */ CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE); FXP_UNLOCK(sc); fxp_poll(ifp, 0, 1); return; } #endif while ((statack = CSR_READ_1(sc, FXP_CSR_SCB_STATACK)) != 0) { /* * It should not be possible to have all bits set; the * FXP_SCB_INTR_SWI bit always returns 0 on a read. If * all bits are set, this may indicate that the card has * been physically ejected, so ignore it. */ if (statack == 0xff) { FXP_UNLOCK(sc); return; } /* * First ACK all the interrupts in this pass. */ CSR_WRITE_1(sc, FXP_CSR_SCB_STATACK, statack); fxp_intr_body(sc, ifp, statack, -1); } FXP_UNLOCK(sc); } static void fxp_txeof(struct fxp_softc *sc) { struct fxp_tx *txp; bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREREAD); for (txp = sc->fxp_desc.tx_first; sc->tx_queued && (le16toh(txp->tx_cb->cb_status) & FXP_CB_STATUS_C) != 0; txp = txp->tx_next) { if (txp->tx_mbuf != NULL) { bus_dmamap_sync(sc->fxp_mtag, txp->tx_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->fxp_mtag, txp->tx_map); m_freem(txp->tx_mbuf); txp->tx_mbuf = NULL; /* clear this to reset csum offload bits */ txp->tx_cb->tbd[0].tb_addr = 0; } sc->tx_queued--; } sc->fxp_desc.tx_first = txp; bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); } static void fxp_intr_body(struct fxp_softc *sc, struct ifnet *ifp, uint8_t statack, int count) { struct mbuf *m; struct fxp_rx *rxp; struct fxp_rfa *rfa; int rnr = (statack & FXP_SCB_STATACK_RNR) ? 1 : 0; int fxp_rc = 0; FXP_LOCK_ASSERT(sc, MA_OWNED); if (rnr) sc->rnr++; #ifdef DEVICE_POLLING /* Pick up a deferred RNR condition if `count' ran out last time. */ if (sc->flags & FXP_FLAG_DEFERRED_RNR) { sc->flags &= ~FXP_FLAG_DEFERRED_RNR; rnr = 1; } #endif /* * Free any finished transmit mbuf chains. * * Handle the CNA event likt a CXTNO event. It used to * be that this event (control unit not ready) was not * encountered, but it is now with the SMPng modifications. * The exact sequence of events that occur when the interface * is brought up are different now, and if this event * goes unhandled, the configuration/rxfilter setup sequence * can stall for several seconds. The result is that no * packets go out onto the wire for about 5 to 10 seconds * after the interface is ifconfig'ed for the first time. */ if (statack & (FXP_SCB_STATACK_CXTNO | FXP_SCB_STATACK_CNA)) { fxp_txeof(sc); ifp->if_timer = 0; if (sc->tx_queued == 0) { if (sc->need_mcsetup) fxp_mc_setup(sc); } /* * Try to start more packets transmitting. */ if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) fxp_start_body(ifp); } /* * Just return if nothing happened on the receive side. */ if (!rnr && (statack & FXP_SCB_STATACK_FR) == 0) return; /* * Process receiver interrupts. If a no-resource (RNR) * condition exists, get whatever packets we can and * re-start the receiver. * * When using polling, we do not process the list to completion, * so when we get an RNR interrupt we must defer the restart * until we hit the last buffer with the C bit set. * If we run out of cycles and rfa_headm has the C bit set, * record the pending RNR in the FXP_FLAG_DEFERRED_RNR flag so * that the info will be used in the subsequent polling cycle. */ for (;;) { rxp = sc->fxp_desc.rx_head; m = rxp->rx_mbuf; rfa = (struct fxp_rfa *)(m->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE); bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map, BUS_DMASYNC_POSTREAD); #ifdef DEVICE_POLLING /* loop at most count times if count >=0 */ if (count >= 0 && count-- == 0) { if (rnr) { /* Defer RNR processing until the next time. */ sc->flags |= FXP_FLAG_DEFERRED_RNR; rnr = 0; } break; } #endif /* DEVICE_POLLING */ if ((le16toh(rfa->rfa_status) & FXP_RFA_STATUS_C) == 0) break; /* * Advance head forward. */ sc->fxp_desc.rx_head = rxp->rx_next; /* * Add a new buffer to the receive chain. * If this fails, the old buffer is recycled * instead. */ fxp_rc = fxp_add_rfabuf(sc, rxp); if (fxp_rc == 0) { int total_len; /* * Fetch packet length (the top 2 bits of * actual_size are flags set by the controller * upon completion), and drop the packet in case * of bogus length or CRC errors. */ total_len = le16toh(rfa->actual_size) & 0x3fff; if (total_len < sizeof(struct ether_header) || total_len > MCLBYTES - RFA_ALIGNMENT_FUDGE - sc->rfa_size || le16toh(rfa->rfa_status) & FXP_RFA_STATUS_CRC) { m_freem(m); continue; } /* Do IP checksum checking. */ if (le16toh(rfa->rfa_status) & FXP_RFA_STATUS_PARSE) { if (rfa->rfax_csum_sts & FXP_RFDX_CS_IP_CSUM_BIT_VALID) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (rfa->rfax_csum_sts & FXP_RFDX_CS_IP_CSUM_VALID) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((rfa->rfax_csum_sts & FXP_RFDX_CS_TCPUDP_CSUM_BIT_VALID) && (rfa->rfax_csum_sts & FXP_RFDX_CS_TCPUDP_CSUM_VALID)) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } m->m_pkthdr.len = m->m_len = total_len; m->m_pkthdr.rcvif = ifp; /* * Drop locks before calling if_input() since it * may re-enter fxp_start() in the netisr case. * This would result in a lock reversal. Better * performance might be obtained by chaining all * packets received, dropping the lock, and then * calling if_input() on each one. */ FXP_UNLOCK(sc); (*ifp->if_input)(ifp, m); FXP_LOCK(sc); } else if (fxp_rc == ENOBUFS) { rnr = 0; break; } } if (rnr) { fxp_scb_wait(sc); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.rx_head->rx_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START); } } /* * Update packet in/out/collision statistics. The i82557 doesn't * allow you to access these counters without doing a fairly * expensive DMA to get _all_ of the statistics it maintains, so * we do this operation here only once per second. The statistics * counters in the kernel are updated from the previous dump-stats * DMA and then a new dump-stats DMA is started. The on-chip * counters are zeroed when the DMA completes. If we can't start * the DMA immediately, we don't wait - we just prepare to read * them again next time. */ static void fxp_tick(void *xsc) { struct fxp_softc *sc = xsc; struct ifnet *ifp = sc->ifp; struct fxp_stats *sp = sc->fxp_stats; int s; FXP_LOCK(sc); s = splimp(); bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_POSTREAD); ifp->if_opackets += le32toh(sp->tx_good); ifp->if_collisions += le32toh(sp->tx_total_collisions); if (sp->rx_good) { ifp->if_ipackets += le32toh(sp->rx_good); sc->rx_idle_secs = 0; } else { /* * Receiver's been idle for another second. */ sc->rx_idle_secs++; } ifp->if_ierrors += le32toh(sp->rx_crc_errors) + le32toh(sp->rx_alignment_errors) + le32toh(sp->rx_rnr_errors) + le32toh(sp->rx_overrun_errors); /* * If any transmit underruns occured, bump up the transmit * threshold by another 512 bytes (64 * 8). */ if (sp->tx_underruns) { ifp->if_oerrors += le32toh(sp->tx_underruns); if (tx_threshold < 192) tx_threshold += 64; } /* * Release any xmit buffers that have completed DMA. This isn't * strictly necessary to do here, but it's advantagous for mbufs * with external storage to be released in a timely manner rather * than being defered for a potentially long time. This limits * the delay to a maximum of one second. */ fxp_txeof(sc); /* * If we haven't received any packets in FXP_MAC_RX_IDLE seconds, * then assume the receiver has locked up and attempt to clear * the condition by reprogramming the multicast filter. This is * a work-around for a bug in the 82557 where the receiver locks * up if it gets certain types of garbage in the syncronization * bits prior to the packet header. This bug is supposed to only * occur in 10Mbps mode, but has been seen to occur in 100Mbps * mode as well (perhaps due to a 10/100 speed transition). */ if (sc->rx_idle_secs > FXP_MAX_RX_IDLE) { sc->rx_idle_secs = 0; fxp_mc_setup(sc); } /* * If there is no pending command, start another stats * dump. Otherwise punt for now. */ if (CSR_READ_1(sc, FXP_CSR_SCB_COMMAND) == 0) { /* * Start another stats dump. */ bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_PREREAD); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMPRESET); } else { /* * A previous command is still waiting to be accepted. * Just zero our copy of the stats and wait for the * next timer event to update them. */ sp->tx_good = 0; sp->tx_underruns = 0; sp->tx_total_collisions = 0; sp->rx_good = 0; sp->rx_crc_errors = 0; sp->rx_alignment_errors = 0; sp->rx_rnr_errors = 0; sp->rx_overrun_errors = 0; } if (sc->miibus != NULL) mii_tick(device_get_softc(sc->miibus)); /* * Schedule another timeout one second from now. */ callout_reset(&sc->stat_ch, hz, fxp_tick, sc); FXP_UNLOCK(sc); splx(s); } /* * Stop the interface. Cancels the statistics updater and resets * the interface. */ static void fxp_stop(struct fxp_softc *sc) { struct ifnet *ifp = sc->ifp; struct fxp_tx *txp; int i; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); ifp->if_timer = 0; #ifdef DEVICE_POLLING ether_poll_deregister(ifp); #endif /* * Cancel stats updater. */ callout_stop(&sc->stat_ch); /* * Issue software reset, which also unloads the microcode. */ sc->flags &= ~FXP_FLAG_UCODE; CSR_WRITE_4(sc, FXP_CSR_PORT, FXP_PORT_SOFTWARE_RESET); DELAY(50); /* * Release any xmit buffers. */ txp = sc->fxp_desc.tx_list; if (txp != NULL) { for (i = 0; i < FXP_NTXCB; i++) { if (txp[i].tx_mbuf != NULL) { bus_dmamap_sync(sc->fxp_mtag, txp[i].tx_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->fxp_mtag, txp[i].tx_map); m_freem(txp[i].tx_mbuf); txp[i].tx_mbuf = NULL; /* clear this to reset csum offload bits */ txp[i].tx_cb->tbd[0].tb_addr = 0; } } } bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); sc->tx_queued = 0; } /* * Watchdog/transmission transmit timeout handler. Called when a * transmission is started on the interface, but no interrupt is * received before the timeout. This usually indicates that the * card has wedged for some reason. */ static void fxp_watchdog(struct ifnet *ifp) { struct fxp_softc *sc = ifp->if_softc; FXP_LOCK(sc); device_printf(sc->dev, "device timeout\n"); ifp->if_oerrors++; fxp_init_body(sc); FXP_UNLOCK(sc); } /* * Acquire locks and then call the real initialization function. This * is necessary because ether_ioctl() calls if_init() and this would * result in mutex recursion if the mutex was held. */ static void fxp_init(void *xsc) { struct fxp_softc *sc = xsc; FXP_LOCK(sc); fxp_init_body(sc); FXP_UNLOCK(sc); } /* * Perform device initialization. This routine must be called with the * softc lock held. */ static void fxp_init_body(struct fxp_softc *sc) { struct ifnet *ifp = sc->ifp; struct fxp_cb_config *cbp; struct fxp_cb_ias *cb_ias; struct fxp_cb_tx *tcbp; struct fxp_tx *txp; struct fxp_cb_mcs *mcsp; int i, prm, s; FXP_LOCK_ASSERT(sc, MA_OWNED); s = splimp(); /* * Cancel any pending I/O */ fxp_stop(sc); prm = (ifp->if_flags & IFF_PROMISC) ? 1 : 0; /* * Initialize base of CBL and RFA memory. Loading with zero * sets it up for regular linear addressing. */ CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, 0); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_BASE); fxp_scb_wait(sc); fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_BASE); /* * Initialize base of dump-stats buffer. */ fxp_scb_wait(sc); bus_dmamap_sync(sc->fxp_stag, sc->fxp_smap, BUS_DMASYNC_PREREAD); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->stats_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_DUMP_ADR); /* * Attempt to load microcode if requested. */ if (ifp->if_flags & IFF_LINK0 && (sc->flags & FXP_FLAG_UCODE) == 0) fxp_load_ucode(sc); /* * Initialize the multicast address list. */ if (fxp_mc_addrs(sc)) { mcsp = sc->mcsp; mcsp->cb_status = 0; mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_EL); mcsp->link_addr = 0xffffffff; /* * Start the multicast setup command. */ fxp_scb_wait(sc); bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); /* ...and wait for it to complete. */ fxp_dma_wait(sc, &mcsp->cb_status, sc->mcs_tag, sc->mcs_map); bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_POSTWRITE); } /* * We temporarily use memory that contains the TxCB list to * construct the config CB. The TxCB list memory is rebuilt * later. */ cbp = (struct fxp_cb_config *)sc->fxp_desc.cbl_list; /* * This bcopy is kind of disgusting, but there are a bunch of must be * zero and must be one bits in this structure and this is the easiest * way to initialize them all to proper values. */ bcopy(fxp_cb_config_template, cbp, sizeof(fxp_cb_config_template)); cbp->cb_status = 0; cbp->cb_command = htole16(FXP_CB_COMMAND_CONFIG | FXP_CB_COMMAND_EL); cbp->link_addr = 0xffffffff; /* (no) next command */ cbp->byte_count = sc->flags & FXP_FLAG_EXT_RFA ? 32 : 22; cbp->rx_fifo_limit = 8; /* rx fifo threshold (32 bytes) */ cbp->tx_fifo_limit = 0; /* tx fifo threshold (0 bytes) */ cbp->adaptive_ifs = 0; /* (no) adaptive interframe spacing */ cbp->mwi_enable = sc->flags & FXP_FLAG_MWI_ENABLE ? 1 : 0; cbp->type_enable = 0; /* actually reserved */ cbp->read_align_en = sc->flags & FXP_FLAG_READ_ALIGN ? 1 : 0; cbp->end_wr_on_cl = sc->flags & FXP_FLAG_WRITE_ALIGN ? 1 : 0; cbp->rx_dma_bytecount = 0; /* (no) rx DMA max */ cbp->tx_dma_bytecount = 0; /* (no) tx DMA max */ cbp->dma_mbce = 0; /* (disable) dma max counters */ cbp->late_scb = 0; /* (don't) defer SCB update */ cbp->direct_dma_dis = 1; /* disable direct rcv dma mode */ cbp->tno_int_or_tco_en =0; /* (disable) tx not okay interrupt */ cbp->ci_int = 1; /* interrupt on CU idle */ cbp->ext_txcb_dis = sc->flags & FXP_FLAG_EXT_TXCB ? 0 : 1; cbp->ext_stats_dis = 1; /* disable extended counters */ cbp->keep_overrun_rx = 0; /* don't pass overrun frames to host */ cbp->save_bf = sc->flags & FXP_FLAG_SAVE_BAD ? 1 : prm; cbp->disc_short_rx = !prm; /* discard short packets */ cbp->underrun_retry = 1; /* retry mode (once) on DMA underrun */ cbp->two_frames = 0; /* do not limit FIFO to 2 frames */ cbp->dyn_tbd = 0; /* (no) dynamic TBD mode */ cbp->ext_rfa = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0; cbp->mediatype = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 0 : 1; cbp->csma_dis = 0; /* (don't) disable link */ cbp->tcp_udp_cksum = 0; /* (don't) enable checksum */ cbp->vlan_tco = 0; /* (don't) enable vlan wakeup */ cbp->link_wake_en = 0; /* (don't) assert PME# on link change */ cbp->arp_wake_en = 0; /* (don't) assert PME# on arp */ cbp->mc_wake_en = 0; /* (don't) enable PME# on mcmatch */ cbp->nsai = 1; /* (don't) disable source addr insert */ cbp->preamble_length = 2; /* (7 byte) preamble */ cbp->loopback = 0; /* (don't) loopback */ cbp->linear_priority = 0; /* (normal CSMA/CD operation) */ cbp->linear_pri_mode = 0; /* (wait after xmit only) */ cbp->interfrm_spacing = 6; /* (96 bits of) interframe spacing */ cbp->promiscuous = prm; /* promiscuous mode */ cbp->bcast_disable = 0; /* (don't) disable broadcasts */ cbp->wait_after_win = 0; /* (don't) enable modified backoff alg*/ cbp->ignore_ul = 0; /* consider U/L bit in IA matching */ cbp->crc16_en = 0; /* (don't) enable crc-16 algorithm */ cbp->crscdt = sc->flags & FXP_FLAG_SERIAL_MEDIA ? 1 : 0; cbp->stripping = !prm; /* truncate rx packet to byte count */ cbp->padding = 1; /* (do) pad short tx packets */ cbp->rcv_crc_xfer = 0; /* (don't) xfer CRC to host */ cbp->long_rx_en = sc->flags & FXP_FLAG_LONG_PKT_EN ? 1 : 0; cbp->ia_wake_en = 0; /* (don't) wake up on address match */ cbp->magic_pkt_dis = 0; /* (don't) disable magic packet */ /* must set wake_en in PMCSR also */ cbp->force_fdx = 0; /* (don't) force full duplex */ cbp->fdx_pin_en = 1; /* (enable) FDX# pin */ cbp->multi_ia = 0; /* (don't) accept multiple IAs */ cbp->mc_all = sc->flags & FXP_FLAG_ALL_MCAST ? 1 : 0; cbp->gamla_rx = sc->flags & FXP_FLAG_EXT_RFA ? 1 : 0; if (sc->tunable_noflow || sc->revision == FXP_REV_82557) { /* * The 82557 has no hardware flow control, the values * below are the defaults for the chip. */ cbp->fc_delay_lsb = 0; cbp->fc_delay_msb = 0x40; cbp->pri_fc_thresh = 3; cbp->tx_fc_dis = 0; cbp->rx_fc_restop = 0; cbp->rx_fc_restart = 0; cbp->fc_filter = 0; cbp->pri_fc_loc = 1; } else { cbp->fc_delay_lsb = 0x1f; cbp->fc_delay_msb = 0x01; cbp->pri_fc_thresh = 3; cbp->tx_fc_dis = 0; /* enable transmit FC */ cbp->rx_fc_restop = 1; /* enable FC restop frames */ cbp->rx_fc_restart = 1; /* enable FC restart frames */ cbp->fc_filter = !prm; /* drop FC frames to host */ cbp->pri_fc_loc = 1; /* FC pri location (byte31) */ } /* * Start the config command/DMA. */ fxp_scb_wait(sc); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); /* ...and wait for it to complete. */ fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE); /* * Now initialize the station address. Temporarily use the TxCB * memory area like we did above for the config CB. */ cb_ias = (struct fxp_cb_ias *)sc->fxp_desc.cbl_list; cb_ias->cb_status = 0; cb_ias->cb_command = htole16(FXP_CB_COMMAND_IAS | FXP_CB_COMMAND_EL); cb_ias->link_addr = 0xffffffff; bcopy(IFP2ENADDR(sc->ifp), cb_ias->macaddr, sizeof(IFP2ENADDR(sc->ifp))); /* * Start the IAS (Individual Address Setup) command/DMA. */ fxp_scb_wait(sc); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); /* ...and wait for it to complete. */ fxp_dma_wait(sc, &cb_ias->cb_status, sc->cbl_tag, sc->cbl_map); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE); /* * Initialize transmit control block (TxCB) list. */ txp = sc->fxp_desc.tx_list; tcbp = sc->fxp_desc.cbl_list; bzero(tcbp, FXP_TXCB_SZ); for (i = 0; i < FXP_NTXCB; i++) { txp[i].tx_mbuf = NULL; tcbp[i].cb_status = htole16(FXP_CB_STATUS_C | FXP_CB_STATUS_OK); tcbp[i].cb_command = htole16(FXP_CB_COMMAND_NOP); tcbp[i].link_addr = htole32(sc->fxp_desc.cbl_addr + (((i + 1) & FXP_TXCB_MASK) * sizeof(struct fxp_cb_tx))); if (sc->flags & FXP_FLAG_EXT_TXCB) tcbp[i].tbd_array_addr = htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[2])); else tcbp[i].tbd_array_addr = htole32(FXP_TXCB_DMA_ADDR(sc, &tcbp[i].tbd[0])); txp[i].tx_next = &txp[(i + 1) & FXP_TXCB_MASK]; } /* * Set the suspend flag on the first TxCB and start the control * unit. It will execute the NOP and then suspend. */ tcbp->cb_command = htole16(FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp; sc->tx_queued = 1; fxp_scb_wait(sc); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); /* * Initialize receiver buffer area - RFA. */ fxp_scb_wait(sc); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.rx_head->rx_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_RU_START); /* * Set current media. */ if (sc->miibus != NULL) mii_mediachg(device_get_softc(sc->miibus)); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; /* * Enable interrupts. */ #ifdef DEVICE_POLLING /* * ... but only do that if we are not polling. And because (presumably) * the default is interrupts on, we need to disable them explicitly! */ if ( ifp->if_flags & IFF_POLLING ) CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, FXP_SCB_INTR_DISABLE); else #endif /* DEVICE_POLLING */ CSR_WRITE_1(sc, FXP_CSR_SCB_INTRCNTL, 0); /* * Start stats updater. */ callout_reset(&sc->stat_ch, hz, fxp_tick, sc); splx(s); } static int fxp_serial_ifmedia_upd(struct ifnet *ifp) { return (0); } static void fxp_serial_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { ifmr->ifm_active = IFM_ETHER|IFM_MANUAL; } /* * Change media according to request. */ static int fxp_ifmedia_upd(struct ifnet *ifp) { struct fxp_softc *sc = ifp->if_softc; struct mii_data *mii; mii = device_get_softc(sc->miibus); mii_mediachg(mii); return (0); } /* * Notify the world which media we're using. */ static void fxp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct fxp_softc *sc = ifp->if_softc; struct mii_data *mii; mii = device_get_softc(sc->miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; if (ifmr->ifm_status & IFM_10_T && sc->flags & FXP_FLAG_CU_RESUME_BUG) sc->cu_resume_bug = 1; else sc->cu_resume_bug = 0; } /* * Add a buffer to the end of the RFA buffer list. * Return 0 if successful, 1 for failure. A failure results in * adding the 'oldm' (if non-NULL) on to the end of the list - * tossing out its old contents and recycling it. * The RFA struct is stuck at the beginning of mbuf cluster and the * data pointer is fixed up to point just past it. */ static int fxp_add_rfabuf(struct fxp_softc *sc, struct fxp_rx *rxp) { struct mbuf *m; struct fxp_rfa *rfa, *p_rfa; struct fxp_rx *p_rx; bus_dmamap_t tmp_map; int error; m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); /* * Move the data pointer up so that the incoming data packet * will be 32-bit aligned. */ m->m_data += RFA_ALIGNMENT_FUDGE; /* * Get a pointer to the base of the mbuf cluster and move * data start past it. */ rfa = mtod(m, struct fxp_rfa *); m->m_data += sc->rfa_size; rfa->size = htole16(MCLBYTES - sc->rfa_size - RFA_ALIGNMENT_FUDGE); rfa->rfa_status = 0; rfa->rfa_control = htole16(FXP_RFA_CONTROL_EL); rfa->actual_size = 0; /* * Initialize the rest of the RFA. Note that since the RFA * is misaligned, we cannot store values directly. We're thus * using the le32enc() function which handles endianness and * is also alignment-safe. */ le32enc(&rfa->link_addr, 0xffffffff); le32enc(&rfa->rbd_addr, 0xffffffff); /* Map the RFA into DMA memory. */ error = bus_dmamap_load(sc->fxp_mtag, sc->spare_map, rfa, MCLBYTES - RFA_ALIGNMENT_FUDGE, fxp_dma_map_addr, &rxp->rx_addr, 0); if (error) { m_freem(m); return (error); } bus_dmamap_unload(sc->fxp_mtag, rxp->rx_map); tmp_map = sc->spare_map; sc->spare_map = rxp->rx_map; rxp->rx_map = tmp_map; rxp->rx_mbuf = m; bus_dmamap_sync(sc->fxp_mtag, rxp->rx_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* * If there are other buffers already on the list, attach this * one to the end by fixing up the tail to point to this one. */ if (sc->fxp_desc.rx_head != NULL) { p_rx = sc->fxp_desc.rx_tail; p_rfa = (struct fxp_rfa *) (p_rx->rx_mbuf->m_ext.ext_buf + RFA_ALIGNMENT_FUDGE); p_rx->rx_next = rxp; le32enc(&p_rfa->link_addr, rxp->rx_addr); p_rfa->rfa_control = 0; bus_dmamap_sync(sc->fxp_mtag, p_rx->rx_map, BUS_DMASYNC_PREWRITE); } else { rxp->rx_next = NULL; sc->fxp_desc.rx_head = rxp; } sc->fxp_desc.rx_tail = rxp; return (0); } static volatile int fxp_miibus_readreg(device_t dev, int phy, int reg) { struct fxp_softc *sc = device_get_softc(dev); int count = 10000; int value; CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, (FXP_MDI_READ << 26) | (reg << 16) | (phy << 21)); while (((value = CSR_READ_4(sc, FXP_CSR_MDICONTROL)) & 0x10000000) == 0 && count--) DELAY(10); if (count <= 0) device_printf(dev, "fxp_miibus_readreg: timed out\n"); return (value & 0xffff); } static void fxp_miibus_writereg(device_t dev, int phy, int reg, int value) { struct fxp_softc *sc = device_get_softc(dev); int count = 10000; CSR_WRITE_4(sc, FXP_CSR_MDICONTROL, (FXP_MDI_WRITE << 26) | (reg << 16) | (phy << 21) | (value & 0xffff)); while ((CSR_READ_4(sc, FXP_CSR_MDICONTROL) & 0x10000000) == 0 && count--) DELAY(10); if (count <= 0) device_printf(dev, "fxp_miibus_writereg: timed out\n"); } static int fxp_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct fxp_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; int flag, mask, s, error = 0; /* * Detaching causes us to call ioctl with the mutex owned. Preclude * that by saying we're busy if the lock is already held. */ if (FXP_LOCKED(sc)) return (EBUSY); FXP_LOCK(sc); s = splimp(); switch (command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_ALLMULTI) sc->flags |= FXP_FLAG_ALL_MCAST; else sc->flags &= ~FXP_FLAG_ALL_MCAST; /* * If interface is marked up and not running, then start it. * If it is marked down and running, stop it. * XXX If it's up then re-initialize it. This is so flags * such as IFF_PROMISC are handled. */ if (ifp->if_flags & IFF_UP) { fxp_init_body(sc); } else { if (ifp->if_flags & IFF_RUNNING) fxp_stop(sc); } break; case SIOCADDMULTI: case SIOCDELMULTI: if (ifp->if_flags & IFF_ALLMULTI) sc->flags |= FXP_FLAG_ALL_MCAST; else sc->flags &= ~FXP_FLAG_ALL_MCAST; /* * Multicast list has changed; set the hardware filter * accordingly. */ if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0) fxp_mc_setup(sc); /* * fxp_mc_setup() can set FXP_FLAG_ALL_MCAST, so check it * again rather than else {}. */ if (sc->flags & FXP_FLAG_ALL_MCAST) fxp_init_body(sc); error = 0; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: if (sc->miibus != NULL) { mii = device_get_softc(sc->miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } else { error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, command); } break; case SIOCSIFCAP: mask = ifp->if_capenable ^ ifr->ifr_reqcap; if (mask & IFCAP_POLLING) ifp->if_capenable ^= IFCAP_POLLING; if (mask & IFCAP_VLAN_MTU) { ifp->if_capenable ^= IFCAP_VLAN_MTU; if (sc->revision != FXP_REV_82557) flag = FXP_FLAG_LONG_PKT_EN; else /* a hack to get long frames on the old chip */ flag = FXP_FLAG_SAVE_BAD; sc->flags ^= flag; if (ifp->if_flags & IFF_UP) fxp_init_body(sc); } break; default: /* * ether_ioctl() will eventually call fxp_start() which * will result in mutex recursion so drop it first. */ FXP_UNLOCK(sc); error = ether_ioctl(ifp, command, data); } if (FXP_LOCKED(sc)) FXP_UNLOCK(sc); splx(s); return (error); } /* * Fill in the multicast address list and return number of entries. */ static int fxp_mc_addrs(struct fxp_softc *sc) { struct fxp_cb_mcs *mcsp = sc->mcsp; struct ifnet *ifp = sc->ifp; struct ifmultiaddr *ifma; int nmcasts; nmcasts = 0; if ((sc->flags & FXP_FLAG_ALL_MCAST) == 0) { + IF_ADDR_LOCK(ifp); #if __FreeBSD_version < 500000 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { #else TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { #endif if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (nmcasts >= MAXMCADDR) { sc->flags |= FXP_FLAG_ALL_MCAST; nmcasts = 0; break; } bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), &sc->mcsp->mc_addr[nmcasts][0], ETHER_ADDR_LEN); nmcasts++; } + IF_ADDR_UNLOCK(ifp); } mcsp->mc_cnt = htole16(nmcasts * ETHER_ADDR_LEN); return (nmcasts); } /* * Program the multicast filter. * * We have an artificial restriction that the multicast setup command * must be the first command in the chain, so we take steps to ensure * this. By requiring this, it allows us to keep up the performance of * the pre-initialized command ring (esp. link pointers) by not actually * inserting the mcsetup command in the ring - i.e. its link pointer * points to the TxCB ring, but the mcsetup descriptor itself is not part * of it. We then can do 'CU_START' on the mcsetup descriptor and have it * lead into the regular TxCB ring when it completes. * * This function must be called at splimp. */ static void fxp_mc_setup(struct fxp_softc *sc) { struct fxp_cb_mcs *mcsp = sc->mcsp; struct ifnet *ifp = sc->ifp; struct fxp_tx *txp; int count; FXP_LOCK_ASSERT(sc, MA_OWNED); /* * If there are queued commands, we must wait until they are all * completed. If we are already waiting, then add a NOP command * with interrupt option so that we're notified when all commands * have been completed - fxp_start() ensures that no additional * TX commands will be added when need_mcsetup is true. */ if (sc->tx_queued) { /* * need_mcsetup will be true if we are already waiting for the * NOP command to be completed (see below). In this case, bail. */ if (sc->need_mcsetup) return; sc->need_mcsetup = 1; /* * Add a NOP command with interrupt so that we are notified * when all TX commands have been processed. */ txp = sc->fxp_desc.tx_last->tx_next; txp->tx_mbuf = NULL; txp->tx_cb->cb_status = 0; txp->tx_cb->cb_command = htole16(FXP_CB_COMMAND_NOP | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I); /* * Advance the end of list forward. */ sc->fxp_desc.tx_last->tx_cb->cb_command &= htole16(~FXP_CB_COMMAND_S); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); sc->fxp_desc.tx_last = txp; sc->tx_queued++; /* * Issue a resume in case the CU has just suspended. */ fxp_scb_wait(sc); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_RESUME); /* * Set a 5 second timer just in case we don't hear from the * card again. */ ifp->if_timer = 5; return; } sc->need_mcsetup = 0; /* * Initialize multicast setup descriptor. */ mcsp->cb_status = 0; mcsp->cb_command = htole16(FXP_CB_COMMAND_MCAS | FXP_CB_COMMAND_S | FXP_CB_COMMAND_I); mcsp->link_addr = htole32(sc->fxp_desc.cbl_addr); txp = &sc->fxp_desc.mcs_tx; txp->tx_mbuf = NULL; txp->tx_cb = (struct fxp_cb_tx *)sc->mcsp; txp->tx_next = sc->fxp_desc.tx_list; (void) fxp_mc_addrs(sc); sc->fxp_desc.tx_first = sc->fxp_desc.tx_last = txp; sc->tx_queued = 1; /* * Wait until command unit is not active. This should never * be the case when nothing is queued, but make sure anyway. */ count = 100; while ((CSR_READ_1(sc, FXP_CSR_SCB_RUSCUS) >> 6) == FXP_SCB_CUS_ACTIVE && --count) DELAY(10); if (count == 0) { device_printf(sc->dev, "command queue timeout\n"); return; } /* * Start the multicast setup command. */ fxp_scb_wait(sc); bus_dmamap_sync(sc->mcs_tag, sc->mcs_map, BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->mcs_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); ifp->if_timer = 2; return; } static uint32_t fxp_ucode_d101a[] = D101_A_RCVBUNDLE_UCODE; static uint32_t fxp_ucode_d101b0[] = D101_B0_RCVBUNDLE_UCODE; static uint32_t fxp_ucode_d101ma[] = D101M_B_RCVBUNDLE_UCODE; static uint32_t fxp_ucode_d101s[] = D101S_RCVBUNDLE_UCODE; static uint32_t fxp_ucode_d102[] = D102_B_RCVBUNDLE_UCODE; static uint32_t fxp_ucode_d102c[] = D102_C_RCVBUNDLE_UCODE; static uint32_t fxp_ucode_d102e[] = D102_E_RCVBUNDLE_UCODE; #define UCODE(x) x, sizeof(x)/sizeof(uint32_t) struct ucode { uint32_t revision; uint32_t *ucode; int length; u_short int_delay_offset; u_short bundle_max_offset; } ucode_table[] = { { FXP_REV_82558_A4, UCODE(fxp_ucode_d101a), D101_CPUSAVER_DWORD, 0 }, { FXP_REV_82558_B0, UCODE(fxp_ucode_d101b0), D101_CPUSAVER_DWORD, 0 }, { FXP_REV_82559_A0, UCODE(fxp_ucode_d101ma), D101M_CPUSAVER_DWORD, D101M_CPUSAVER_BUNDLE_MAX_DWORD }, { FXP_REV_82559S_A, UCODE(fxp_ucode_d101s), D101S_CPUSAVER_DWORD, D101S_CPUSAVER_BUNDLE_MAX_DWORD }, { FXP_REV_82550, UCODE(fxp_ucode_d102), D102_B_CPUSAVER_DWORD, D102_B_CPUSAVER_BUNDLE_MAX_DWORD }, { FXP_REV_82550_C, UCODE(fxp_ucode_d102c), D102_C_CPUSAVER_DWORD, D102_C_CPUSAVER_BUNDLE_MAX_DWORD }, { FXP_REV_82551_F, UCODE(fxp_ucode_d102e), D102_E_CPUSAVER_DWORD, D102_E_CPUSAVER_BUNDLE_MAX_DWORD }, { 0, NULL, 0, 0, 0 } }; static void fxp_load_ucode(struct fxp_softc *sc) { struct ucode *uc; struct fxp_cb_ucode *cbp; int i; for (uc = ucode_table; uc->ucode != NULL; uc++) if (sc->revision == uc->revision) break; if (uc->ucode == NULL) return; cbp = (struct fxp_cb_ucode *)sc->fxp_desc.cbl_list; cbp->cb_status = 0; cbp->cb_command = htole16(FXP_CB_COMMAND_UCODE | FXP_CB_COMMAND_EL); cbp->link_addr = 0xffffffff; /* (no) next command */ for (i = 0; i < uc->length; i++) cbp->ucode[i] = htole32(uc->ucode[i]); if (uc->int_delay_offset) *(uint16_t *)&cbp->ucode[uc->int_delay_offset] = htole16(sc->tunable_int_delay + sc->tunable_int_delay / 2); if (uc->bundle_max_offset) *(uint16_t *)&cbp->ucode[uc->bundle_max_offset] = htole16(sc->tunable_bundle_max); /* * Download the ucode to the chip. */ fxp_scb_wait(sc); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, FXP_CSR_SCB_GENERAL, sc->fxp_desc.cbl_addr); fxp_scb_cmd(sc, FXP_SCB_COMMAND_CU_START); /* ...and wait for it to complete. */ fxp_dma_wait(sc, &cbp->cb_status, sc->cbl_tag, sc->cbl_map); bus_dmamap_sync(sc->cbl_tag, sc->cbl_map, BUS_DMASYNC_POSTWRITE); device_printf(sc->dev, "Microcode loaded, int_delay: %d usec bundle_max: %d\n", sc->tunable_int_delay, uc->bundle_max_offset == 0 ? 0 : sc->tunable_bundle_max); sc->flags |= FXP_FLAG_UCODE; } static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) { int error, value; value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if (error || !req->newptr) return (error); if (value < low || value > high) return (EINVAL); *(int *)arg1 = value; return (0); } /* * Interrupt delay is expressed in microseconds, a multiplier is used * to convert this to the appropriate clock ticks before using. */ static int sysctl_hw_fxp_int_delay(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, 300, 3000)); } static int sysctl_hw_fxp_bundle_max(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, 1, 0xffff)); } Index: stable/6/sys/dev/gem/if_gem.c =================================================================== --- stable/6/sys/dev/gem/if_gem.c (revision 149421) +++ stable/6/sys/dev/gem/if_gem.c (revision 149422) @@ -1,1922 +1,1924 @@ /*- * Copyright (C) 2001 Eduardo Horvath. * Copyright (c) 2001-2003 Thomas Moestl * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp */ #include __FBSDID("$FreeBSD$"); /* * Driver for Sun GEM ethernet controllers. */ #if 0 #define GEM_DEBUG #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define TRIES 10000 static void gem_start(struct ifnet *); static void gem_stop(struct ifnet *, int); static int gem_ioctl(struct ifnet *, u_long, caddr_t); static void gem_cddma_callback(void *, bus_dma_segment_t *, int, int); static void gem_rxdma_callback(void *, bus_dma_segment_t *, int, bus_size_t, int); static void gem_txdma_callback(void *, bus_dma_segment_t *, int, bus_size_t, int); static void gem_tick(void *); static void gem_watchdog(struct ifnet *); static void gem_init(void *); static void gem_init_regs(struct gem_softc *sc); static int gem_ringsize(int sz); static int gem_meminit(struct gem_softc *); static int gem_load_txmbuf(struct gem_softc *, struct mbuf *); static void gem_mifinit(struct gem_softc *); static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, u_int32_t clr, u_int32_t set); static int gem_reset_rx(struct gem_softc *); static int gem_reset_tx(struct gem_softc *); static int gem_disable_rx(struct gem_softc *); static int gem_disable_tx(struct gem_softc *); static void gem_rxdrain(struct gem_softc *); static int gem_add_rxbuf(struct gem_softc *, int); static void gem_setladrf(struct gem_softc *); struct mbuf *gem_get(struct gem_softc *, int, int); static void gem_eint(struct gem_softc *, u_int); static void gem_rint(struct gem_softc *); #if 0 static void gem_rint_timeout(void *); #endif static void gem_tint(struct gem_softc *); #ifdef notyet static void gem_power(int, void *); #endif devclass_t gem_devclass; DRIVER_MODULE(miibus, gem, miibus_driver, miibus_devclass, 0, 0); MODULE_DEPEND(gem, miibus, 1, 1, 1); #ifdef GEM_DEBUG #include #define KTR_GEM KTR_CT2 #endif #define GEM_NSEGS GEM_NTXDESC /* * gem_attach: * * Attach a Gem interface to the system. */ int gem_attach(sc) struct gem_softc *sc; { struct ifnet *ifp; struct mii_softc *child; int i, error; u_int32_t v; ifp = sc->sc_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) return (ENOSPC); /* Make sure the chip is stopped. */ ifp->if_softc = sc; gem_reset(sc); error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, GEM_NSEGS, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->sc_pdmatag); if (error) goto fail_ifnet; error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MAXBSIZE, 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag); if (error) goto fail_ptag; error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, GEM_TD_BUFSIZE, GEM_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); if (error) goto fail_rtag; error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct gem_control_data), 1, sizeof(struct gem_control_data), BUS_DMA_ALLOCNOW, busdma_lock_mutex, &Giant, &sc->sc_cdmatag); if (error) goto fail_ttag; /* * Allocate the control data structures, and create and load the * DMA map for it. */ if ((error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_control_data, 0, &sc->sc_cddmamap))) { device_printf(sc->sc_dev, "unable to allocate control data," " error = %d\n", error); goto fail_ctag; } sc->sc_cddma = 0; if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, sc->sc_control_data, sizeof(struct gem_control_data), gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { device_printf(sc->sc_dev, "unable to load control data DMA " "map, error = %d\n", error); goto fail_cmem; } /* * Initialize the transmit job descriptors. */ STAILQ_INIT(&sc->sc_txfreeq); STAILQ_INIT(&sc->sc_txdirtyq); /* * Create the transmit buffer DMA maps. */ error = ENOMEM; for (i = 0; i < GEM_TXQUEUELEN; i++) { struct gem_txsoft *txs; txs = &sc->sc_txsoft[i]; txs->txs_mbuf = NULL; txs->txs_ndescs = 0; if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, &txs->txs_dmamap)) != 0) { device_printf(sc->sc_dev, "unable to create tx DMA map " "%d, error = %d\n", i, error); goto fail_txd; } STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); } /* * Create the receive buffer DMA maps. */ for (i = 0; i < GEM_NRXDESC; i++) { if ((error = bus_dmamap_create(sc->sc_rdmatag, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { device_printf(sc->sc_dev, "unable to create rx DMA map " "%d, error = %d\n", i, error); goto fail_rxd; } sc->sc_rxsoft[i].rxs_mbuf = NULL; } gem_mifinit(sc); if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, gem_mediachange, gem_mediastatus)) != 0) { device_printf(sc->sc_dev, "phy probe failed: %d\n", error); goto fail_rxd; } sc->sc_mii = device_get_softc(sc->sc_miibus); /* * From this point forward, the attachment cannot fail. A failure * before this point releases all resources that may have been * allocated. */ /* Get RX FIFO size */ sc->sc_rxfifosize = 64 * bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_FIFO_SIZE); /* Get TX FIFO size */ v = bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_FIFO_SIZE); device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", sc->sc_rxfifosize / 1024, v / 16); /* Initialize ifnet structure. */ ifp->if_softc = sc; if_initname(ifp, device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT; ifp->if_start = gem_start; ifp->if_ioctl = gem_ioctl; ifp->if_watchdog = gem_watchdog; ifp->if_init = gem_init; ifp->if_snd.ifq_maxlen = GEM_TXQUEUELEN; /* * Walk along the list of attached MII devices and * establish an `MII instance' to `phy number' * mapping. We'll use this mapping in media change * requests to determine which phy to use to program * the MIF configuration register. */ for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; child = LIST_NEXT(child, mii_list)) { /* * Note: we support just two PHYs: the built-in * internal device and an external on the MII * connector. */ if (child->mii_phy > 1 || child->mii_inst > 1) { device_printf(sc->sc_dev, "cannot accomodate " "MII device %s at phy %d, instance %d\n", device_get_name(child->mii_dev), child->mii_phy, child->mii_inst); continue; } sc->sc_phys[child->mii_inst] = child->mii_phy; } /* * Now select and activate the PHY we will use. * * The order of preference is External (MDI1), * Internal (MDI0), Serial Link (no MII). */ if (sc->sc_phys[1]) { #ifdef GEM_DEBUG printf("using external phy\n"); #endif sc->sc_mif_config |= GEM_MIF_CONFIG_PHY_SEL; } else { #ifdef GEM_DEBUG printf("using internal phy\n"); #endif sc->sc_mif_config &= ~GEM_MIF_CONFIG_PHY_SEL; } bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_MIF_CONFIG, sc->sc_mif_config); /* Attach the interface. */ ether_ifattach(ifp, sc->sc_enaddr); #if notyet /* * Add a suspend hook to make sure we come back up after a * resume. */ sc->sc_powerhook = powerhook_establish(gem_power, sc); if (sc->sc_powerhook == NULL) device_printf(sc->sc_dev, "WARNING: unable to establish power " "hook\n"); #endif callout_init(&sc->sc_tick_ch, 0); callout_init(&sc->sc_rx_ch, 0); return (0); /* * Free any resources we've allocated during the failed attach * attempt. Do this in reverse order and fall through. */ fail_rxd: for (i = 0; i < GEM_NRXDESC; i++) { if (sc->sc_rxsoft[i].rxs_dmamap != NULL) bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rxsoft[i].rxs_dmamap); } fail_txd: for (i = 0; i < GEM_TXQUEUELEN; i++) { if (sc->sc_txsoft[i].txs_dmamap != NULL) bus_dmamap_destroy(sc->sc_tdmatag, sc->sc_txsoft[i].txs_dmamap); } bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); fail_cmem: bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, sc->sc_cddmamap); fail_ctag: bus_dma_tag_destroy(sc->sc_cdmatag); fail_ttag: bus_dma_tag_destroy(sc->sc_tdmatag); fail_rtag: bus_dma_tag_destroy(sc->sc_rdmatag); fail_ptag: bus_dma_tag_destroy(sc->sc_pdmatag); fail_ifnet: if_free(ifp); return (error); } void gem_detach(sc) struct gem_softc *sc; { struct ifnet *ifp = sc->sc_ifp; int i; gem_stop(ifp, 1); ether_ifdetach(ifp); if_free(ifp); device_delete_child(sc->sc_dev, sc->sc_miibus); for (i = 0; i < GEM_NRXDESC; i++) { if (sc->sc_rxsoft[i].rxs_dmamap != NULL) bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rxsoft[i].rxs_dmamap); } for (i = 0; i < GEM_TXQUEUELEN; i++) { if (sc->sc_txsoft[i].txs_dmamap != NULL) bus_dmamap_destroy(sc->sc_tdmatag, sc->sc_txsoft[i].txs_dmamap); } GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); GEM_CDSYNC(sc, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, sc->sc_cddmamap); bus_dma_tag_destroy(sc->sc_cdmatag); bus_dma_tag_destroy(sc->sc_tdmatag); bus_dma_tag_destroy(sc->sc_rdmatag); bus_dma_tag_destroy(sc->sc_pdmatag); } void gem_suspend(sc) struct gem_softc *sc; { struct ifnet *ifp = sc->sc_ifp; gem_stop(ifp, 0); } void gem_resume(sc) struct gem_softc *sc; { struct ifnet *ifp = sc->sc_ifp; if (ifp->if_flags & IFF_UP) gem_init(ifp); } static void gem_cddma_callback(xsc, segs, nsegs, error) void *xsc; bus_dma_segment_t *segs; int nsegs; int error; { struct gem_softc *sc = (struct gem_softc *)xsc; if (error != 0) return; if (nsegs != 1) { /* can't happen... */ panic("gem_cddma_callback: bad control buffer segment count"); } sc->sc_cddma = segs[0].ds_addr; } static void gem_rxdma_callback(xsc, segs, nsegs, totsz, error) void *xsc; bus_dma_segment_t *segs; int nsegs; bus_size_t totsz; int error; { struct gem_rxsoft *rxs = (struct gem_rxsoft *)xsc; if (error != 0) return; KASSERT(nsegs == 1, ("gem_rxdma_callback: bad dma segment count")); rxs->rxs_paddr = segs[0].ds_addr; } static void gem_txdma_callback(xsc, segs, nsegs, totsz, error) void *xsc; bus_dma_segment_t *segs; int nsegs; bus_size_t totsz; int error; { struct gem_txdma *txd = (struct gem_txdma *)xsc; struct gem_softc *sc = txd->txd_sc; struct gem_txsoft *txs = txd->txd_txs; bus_size_t len = 0; uint64_t flags = 0; int seg, nexttx; if (error != 0) return; /* * Ensure we have enough descriptors free to describe * the packet. Note, we always reserve one descriptor * at the end of the ring as a termination point, to * prevent wrap-around. */ if (nsegs > sc->sc_txfree - 1) { txs->txs_ndescs = -1; return; } txs->txs_ndescs = nsegs; nexttx = txs->txs_firstdesc; /* * Initialize the transmit descriptors. */ for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) { #ifdef GEM_DEBUG CTR5(KTR_GEM, "txdma_cb: mapping seg %d (txd %d), len " "%lx, addr %#lx (%#lx)", seg, nexttx, segs[seg].ds_len, segs[seg].ds_addr, GEM_DMA_WRITE(sc, segs[seg].ds_addr)); #endif if (segs[seg].ds_len == 0) continue; sc->sc_txdescs[nexttx].gd_addr = GEM_DMA_WRITE(sc, segs[seg].ds_addr); KASSERT(segs[seg].ds_len < GEM_TD_BUFSIZE, ("gem_txdma_callback: segment size too large!")); flags = segs[seg].ds_len & GEM_TD_BUFSIZE; if (len == 0) { #ifdef GEM_DEBUG CTR2(KTR_GEM, "txdma_cb: start of packet at seg %d, " "tx %d", seg, nexttx); #endif flags |= GEM_TD_START_OF_PACKET; if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { sc->sc_txwin = 0; flags |= GEM_TD_INTERRUPT_ME; } } if (len + segs[seg].ds_len == totsz) { #ifdef GEM_DEBUG CTR2(KTR_GEM, "txdma_cb: end of packet at seg %d, " "tx %d", seg, nexttx); #endif flags |= GEM_TD_END_OF_PACKET; } sc->sc_txdescs[nexttx].gd_flags = GEM_DMA_WRITE(sc, flags); txs->txs_lastdesc = nexttx; len += segs[seg].ds_len; } KASSERT((flags & GEM_TD_END_OF_PACKET) != 0, ("gem_txdma_callback: missed end of packet!")); } static void gem_tick(arg) void *arg; { struct gem_softc *sc = arg; int s; s = splnet(); mii_tick(sc->sc_mii); splx(s); callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); } static int gem_bitwait(sc, r, clr, set) struct gem_softc *sc; bus_addr_t r; u_int32_t clr; u_int32_t set; { int i; u_int32_t reg; for (i = TRIES; i--; DELAY(100)) { reg = bus_space_read_4(sc->sc_bustag, sc->sc_h, r); if ((r & clr) == 0 && (r & set) == set) return (1); } return (0); } void gem_reset(sc) struct gem_softc *sc; { bus_space_tag_t t = sc->sc_bustag; bus_space_handle_t h = sc->sc_h; int s; s = splnet(); #ifdef GEM_DEBUG CTR1(KTR_GEM, "%s: gem_reset", device_get_name(sc->sc_dev)); #endif gem_reset_rx(sc); gem_reset_tx(sc); /* Do a full reset */ bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) device_printf(sc->sc_dev, "cannot reset device\n"); splx(s); } /* * gem_rxdrain: * * Drain the receive queue. */ static void gem_rxdrain(sc) struct gem_softc *sc; { struct gem_rxsoft *rxs; int i; for (i = 0; i < GEM_NRXDESC; i++) { rxs = &sc->sc_rxsoft[i]; if (rxs->rxs_mbuf != NULL) { bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); m_freem(rxs->rxs_mbuf); rxs->rxs_mbuf = NULL; } } } /* * Reset the whole thing. */ static void gem_stop(ifp, disable) struct ifnet *ifp; int disable; { struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; struct gem_txsoft *txs; #ifdef GEM_DEBUG CTR1(KTR_GEM, "%s: gem_stop", device_get_name(sc->sc_dev)); #endif callout_stop(&sc->sc_tick_ch); /* XXX - Should we reset these instead? */ gem_disable_tx(sc); gem_disable_rx(sc); /* * Release any queued transmit buffers. */ while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); if (txs->txs_ndescs != 0) { bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); if (txs->txs_mbuf != NULL) { m_freem(txs->txs_mbuf); txs->txs_mbuf = NULL; } } STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); } if (disable) gem_rxdrain(sc); /* * Mark the interface down and cancel the watchdog timer. */ ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); ifp->if_timer = 0; } /* * Reset the receiver */ int gem_reset_rx(sc) struct gem_softc *sc; { bus_space_tag_t t = sc->sc_bustag; bus_space_handle_t h = sc->sc_h; /* * Resetting while DMA is in progress can cause a bus hang, so we * disable DMA first. */ gem_disable_rx(sc); bus_space_write_4(t, h, GEM_RX_CONFIG, 0); /* Wait till it finishes */ if (!gem_bitwait(sc, GEM_RX_CONFIG, 1, 0)) device_printf(sc->sc_dev, "cannot disable read dma\n"); /* Wait 5ms extra. */ DELAY(5000); /* Finally, reset the ERX */ bus_space_write_4(t, h, GEM_RESET, GEM_RESET_RX); /* Wait till it finishes */ if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { device_printf(sc->sc_dev, "cannot reset receiver\n"); return (1); } return (0); } /* * Reset the transmitter */ static int gem_reset_tx(sc) struct gem_softc *sc; { bus_space_tag_t t = sc->sc_bustag; bus_space_handle_t h = sc->sc_h; int i; /* * Resetting while DMA is in progress can cause a bus hang, so we * disable DMA first. */ gem_disable_tx(sc); bus_space_write_4(t, h, GEM_TX_CONFIG, 0); /* Wait till it finishes */ if (!gem_bitwait(sc, GEM_TX_CONFIG, 1, 0)) device_printf(sc->sc_dev, "cannot disable read dma\n"); /* Wait 5ms extra. */ DELAY(5000); /* Finally, reset the ETX */ bus_space_write_4(t, h, GEM_RESET, GEM_RESET_TX); /* Wait till it finishes */ for (i = TRIES; i--; DELAY(100)) if ((bus_space_read_4(t, h, GEM_RESET) & GEM_RESET_TX) == 0) break; if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { device_printf(sc->sc_dev, "cannot reset receiver\n"); return (1); } return (0); } /* * disable receiver. */ static int gem_disable_rx(sc) struct gem_softc *sc; { bus_space_tag_t t = sc->sc_bustag; bus_space_handle_t h = sc->sc_h; u_int32_t cfg; /* Flip the enable bit */ cfg = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); cfg &= ~GEM_MAC_RX_ENABLE; bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, cfg); /* Wait for it to finish */ return (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)); } /* * disable transmitter. */ static int gem_disable_tx(sc) struct gem_softc *sc; { bus_space_tag_t t = sc->sc_bustag; bus_space_handle_t h = sc->sc_h; u_int32_t cfg; /* Flip the enable bit */ cfg = bus_space_read_4(t, h, GEM_MAC_TX_CONFIG); cfg &= ~GEM_MAC_TX_ENABLE; bus_space_write_4(t, h, GEM_MAC_TX_CONFIG, cfg); /* Wait for it to finish */ return (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)); } /* * Initialize interface. */ static int gem_meminit(sc) struct gem_softc *sc; { struct gem_rxsoft *rxs; int i, error; /* * Initialize the transmit descriptor ring. */ memset((void *)sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); for (i = 0; i < GEM_NTXDESC; i++) { sc->sc_txdescs[i].gd_flags = 0; sc->sc_txdescs[i].gd_addr = 0; } sc->sc_txfree = GEM_MAXTXFREE; sc->sc_txnext = 0; sc->sc_txwin = 0; /* * Initialize the receive descriptor and receive job * descriptor rings. */ for (i = 0; i < GEM_NRXDESC; i++) { rxs = &sc->sc_rxsoft[i]; if (rxs->rxs_mbuf == NULL) { if ((error = gem_add_rxbuf(sc, i)) != 0) { device_printf(sc->sc_dev, "unable to " "allocate or map rx buffer %d, error = " "%d\n", i, error); /* * XXX Should attempt to run with fewer receive * XXX buffers instead of just failing. */ gem_rxdrain(sc); return (1); } } else GEM_INIT_RXDESC(sc, i); } sc->sc_rxptr = 0; GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD); return (0); } static int gem_ringsize(sz) int sz; { int v = 0; switch (sz) { case 32: v = GEM_RING_SZ_32; break; case 64: v = GEM_RING_SZ_64; break; case 128: v = GEM_RING_SZ_128; break; case 256: v = GEM_RING_SZ_256; break; case 512: v = GEM_RING_SZ_512; break; case 1024: v = GEM_RING_SZ_1024; break; case 2048: v = GEM_RING_SZ_2048; break; case 4096: v = GEM_RING_SZ_4096; break; case 8192: v = GEM_RING_SZ_8192; break; default: printf("gem: invalid Receive Descriptor ring size\n"); break; } return (v); } /* * Initialization of interface; set up initialization block * and transmit/receive descriptor rings. */ static void gem_init(xsc) void *xsc; { struct gem_softc *sc = (struct gem_softc *)xsc; struct ifnet *ifp = sc->sc_ifp; bus_space_tag_t t = sc->sc_bustag; bus_space_handle_t h = sc->sc_h; int s; u_int32_t v; s = splnet(); #ifdef GEM_DEBUG CTR1(KTR_GEM, "%s: gem_init: calling stop", device_get_name(sc->sc_dev)); #endif /* * Initialization sequence. The numbered steps below correspond * to the sequence outlined in section 6.3.5.1 in the Ethernet * Channel Engine manual (part of the PCIO manual). * See also the STP2002-STQ document from Sun Microsystems. */ /* step 1 & 2. Reset the Ethernet Channel */ gem_stop(sc->sc_ifp, 0); gem_reset(sc); #ifdef GEM_DEBUG CTR1(KTR_GEM, "%s: gem_init: restarting", device_get_name(sc->sc_dev)); #endif /* Re-initialize the MIF */ gem_mifinit(sc); /* step 3. Setup data structures in host memory */ gem_meminit(sc); /* step 4. TX MAC registers & counters */ gem_init_regs(sc); /* XXX: VLAN code from NetBSD temporarily removed. */ bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, (ETHER_MAX_LEN + sizeof(struct ether_header)) | (0x2000<<16)); /* step 5. RX MAC registers & counters */ gem_setladrf(sc); /* step 6 & 7. Program Descriptor Ring Base Addresses */ /* NOTE: we use only 32-bit DMA addresses here. */ bus_space_write_4(t, h, GEM_TX_RING_PTR_HI, 0); bus_space_write_4(t, h, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); bus_space_write_4(t, h, GEM_RX_RING_PTR_HI, 0); bus_space_write_4(t, h, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); #ifdef GEM_DEBUG CTR3(KTR_GEM, "loading rx ring %lx, tx ring %lx, cddma %lx", GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); #endif /* step 8. Global Configuration & Interrupt Mask */ bus_space_write_4(t, h, GEM_INTMASK, ~(GEM_INTR_TX_INTME| GEM_INTR_TX_EMPTY| GEM_INTR_RX_DONE|GEM_INTR_RX_NOBUF| GEM_INTR_RX_TAG_ERR|GEM_INTR_PCS| GEM_INTR_MAC_CONTROL|GEM_INTR_MIF| GEM_INTR_BERR)); bus_space_write_4(t, h, GEM_MAC_RX_MASK, GEM_MAC_RX_DONE|GEM_MAC_RX_FRAME_CNT); bus_space_write_4(t, h, GEM_MAC_TX_MASK, 0xffff); /* XXXX */ bus_space_write_4(t, h, GEM_MAC_CONTROL_MASK, 0); /* XXXX */ /* step 9. ETX Configuration: use mostly default values */ /* Enable DMA */ v = gem_ringsize(GEM_NTXDESC /*XXX*/); bus_space_write_4(t, h, GEM_TX_CONFIG, v|GEM_TX_CONFIG_TXDMA_EN| ((0x400<<10)&GEM_TX_CONFIG_TXFIFO_TH)); /* step 10. ERX Configuration */ /* Encode Receive Descriptor ring size: four possible values */ v = gem_ringsize(GEM_NRXDESC /*XXX*/); /* Enable DMA */ bus_space_write_4(t, h, GEM_RX_CONFIG, v|(GEM_THRSH_1024<sc_rxfifosize / 256) | ( (sc->sc_rxfifosize / 256) << 12)); bus_space_write_4(t, h, GEM_RX_BLANKING, (6<<12)|6); /* step 11. Configure Media */ mii_mediachg(sc->sc_mii); /* step 12. RX_MAC Configuration Register */ v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); v |= GEM_MAC_RX_ENABLE; bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); /* step 14. Issue Transmit Pending command */ /* step 15. Give the reciever a swift kick */ bus_space_write_4(t, h, GEM_RX_KICK, GEM_NRXDESC-4); /* Start the one second timer. */ callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; ifp->if_timer = 0; sc->sc_ifflags = ifp->if_flags; splx(s); } static int gem_load_txmbuf(sc, m0) struct gem_softc *sc; struct mbuf *m0; { struct gem_txdma txd; struct gem_txsoft *txs; int error; /* Get a work queue entry. */ if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { /* Ran out of descriptors. */ return (-1); } txd.txd_sc = sc; txd.txd_txs = txs; txs->txs_mbuf = m0; txs->txs_firstdesc = sc->sc_txnext; error = bus_dmamap_load_mbuf(sc->sc_tdmatag, txs->txs_dmamap, m0, gem_txdma_callback, &txd, BUS_DMA_NOWAIT); if (error != 0) goto fail; if (txs->txs_ndescs == -1) { error = -1; goto fail; } /* Sync the DMA map. */ bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, BUS_DMASYNC_PREWRITE); #ifdef GEM_DEBUG CTR3(KTR_GEM, "load_mbuf: setting firstdesc=%d, lastdesc=%d, " "ndescs=%d", txs->txs_firstdesc, txs->txs_lastdesc, txs->txs_ndescs); #endif STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc); sc->sc_txfree -= txs->txs_ndescs; return (0); fail: #ifdef GEM_DEBUG CTR1(KTR_GEM, "gem_load_txmbuf failed (%d)", error); #endif bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); return (error); } static void gem_init_regs(sc) struct gem_softc *sc; { bus_space_tag_t t = sc->sc_bustag; bus_space_handle_t h = sc->sc_h; const u_char *laddr = IFP2ENADDR(sc->sc_ifp); u_int32_t v; /* These regs are not cleared on reset */ if (!sc->sc_inited) { /* Wooo. Magic values. */ bus_space_write_4(t, h, GEM_MAC_IPG0, 0); bus_space_write_4(t, h, GEM_MAC_IPG1, 8); bus_space_write_4(t, h, GEM_MAC_IPG2, 4); bus_space_write_4(t, h, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); /* Max frame and max burst size */ bus_space_write_4(t, h, GEM_MAC_MAC_MAX_FRAME, ETHER_MAX_LEN | (0x2000<<16)); bus_space_write_4(t, h, GEM_MAC_PREAMBLE_LEN, 0x7); bus_space_write_4(t, h, GEM_MAC_JAM_SIZE, 0x4); bus_space_write_4(t, h, GEM_MAC_ATTEMPT_LIMIT, 0x10); /* Dunno.... */ bus_space_write_4(t, h, GEM_MAC_CONTROL_TYPE, 0x8088); bus_space_write_4(t, h, GEM_MAC_RANDOM_SEED, ((laddr[5]<<8)|laddr[4])&0x3ff); /* Secondary MAC addr set to 0:0:0:0:0:0 */ bus_space_write_4(t, h, GEM_MAC_ADDR3, 0); bus_space_write_4(t, h, GEM_MAC_ADDR4, 0); bus_space_write_4(t, h, GEM_MAC_ADDR5, 0); /* MAC control addr set to 01:80:c2:00:00:01 */ bus_space_write_4(t, h, GEM_MAC_ADDR6, 0x0001); bus_space_write_4(t, h, GEM_MAC_ADDR7, 0xc200); bus_space_write_4(t, h, GEM_MAC_ADDR8, 0x0180); /* MAC filter addr set to 0:0:0:0:0:0 */ bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER0, 0); bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER1, 0); bus_space_write_4(t, h, GEM_MAC_ADDR_FILTER2, 0); bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK1_2, 0); bus_space_write_4(t, h, GEM_MAC_ADR_FLT_MASK0, 0); sc->sc_inited = 1; } /* Counters need to be zeroed */ bus_space_write_4(t, h, GEM_MAC_NORM_COLL_CNT, 0); bus_space_write_4(t, h, GEM_MAC_FIRST_COLL_CNT, 0); bus_space_write_4(t, h, GEM_MAC_EXCESS_COLL_CNT, 0); bus_space_write_4(t, h, GEM_MAC_LATE_COLL_CNT, 0); bus_space_write_4(t, h, GEM_MAC_DEFER_TMR_CNT, 0); bus_space_write_4(t, h, GEM_MAC_PEAK_ATTEMPTS, 0); bus_space_write_4(t, h, GEM_MAC_RX_FRAME_COUNT, 0); bus_space_write_4(t, h, GEM_MAC_RX_LEN_ERR_CNT, 0); bus_space_write_4(t, h, GEM_MAC_RX_ALIGN_ERR, 0); bus_space_write_4(t, h, GEM_MAC_RX_CRC_ERR_CNT, 0); bus_space_write_4(t, h, GEM_MAC_RX_CODE_VIOL, 0); /* Un-pause stuff */ #if 0 bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); #else bus_space_write_4(t, h, GEM_MAC_SEND_PAUSE_CMD, 0); #endif /* * Set the station address. */ bus_space_write_4(t, h, GEM_MAC_ADDR0, (laddr[4]<<8)|laddr[5]); bus_space_write_4(t, h, GEM_MAC_ADDR1, (laddr[2]<<8)|laddr[3]); bus_space_write_4(t, h, GEM_MAC_ADDR2, (laddr[0]<<8)|laddr[1]); /* * Enable MII outputs. Enable GMII if there is a gigabit PHY. */ sc->sc_mif_config = bus_space_read_4(t, h, GEM_MIF_CONFIG); v = GEM_MAC_XIF_TX_MII_ENA; if (sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) { v |= GEM_MAC_XIF_FDPLX_LED; if (sc->sc_flags & GEM_GIGABIT) v |= GEM_MAC_XIF_GMII_MODE; } bus_space_write_4(t, h, GEM_MAC_XIF_CONFIG, v); } static void gem_start(ifp) struct ifnet *ifp; { struct gem_softc *sc = (struct gem_softc *)ifp->if_softc; struct mbuf *m0 = NULL; int firsttx, ntx = 0, ofree, txmfail; if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) return; /* * Remember the previous number of free descriptors and * the first descriptor we'll use. */ ofree = sc->sc_txfree; firsttx = sc->sc_txnext; #ifdef GEM_DEBUG CTR3(KTR_GEM, "%s: gem_start: txfree %d, txnext %d", device_get_name(sc->sc_dev), ofree, firsttx); #endif /* * Loop through the send queue, setting up transmit descriptors * until we drain the queue, or use up all available transmit * descriptors. */ txmfail = 0; do { /* * Grab a packet off the queue. */ IF_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) break; txmfail = gem_load_txmbuf(sc, m0); if (txmfail > 0) { /* Drop the mbuf and complain. */ printf("gem_start: error %d while loading mbuf dma " "map\n", txmfail); continue; } /* Not enough descriptors. */ if (txmfail == -1) { if (sc->sc_txfree == GEM_MAXTXFREE) panic("gem_start: mbuf chain too long!"); IF_PREPEND(&ifp->if_snd, m0); break; } ntx++; /* Kick the transmitter. */ #ifdef GEM_DEBUG CTR2(KTR_GEM, "%s: gem_start: kicking tx %d", device_get_name(sc->sc_dev), sc->sc_txnext); #endif bus_space_write_4(sc->sc_bustag, sc->sc_h, GEM_TX_KICK, sc->sc_txnext); if (ifp->if_bpf != NULL) bpf_mtap(ifp->if_bpf, m0); } while (1); if (txmfail == -1 || sc->sc_txfree == 0) { /* No more slots left; notify upper layer. */ ifp->if_flags |= IFF_OACTIVE; } if (ntx > 0) { GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); #ifdef GEM_DEBUG CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d", device_get_name(sc->sc_dev), firsttx); #endif /* Set a watchdog timer in case the chip flakes out. */ ifp->if_timer = 5; #ifdef GEM_DEBUG CTR2(KTR_GEM, "%s: gem_start: watchdog %d", device_get_name(sc->sc_dev), ifp->if_timer); #endif } } /* * Transmit interrupt. */ static void gem_tint(sc) struct gem_softc *sc; { struct ifnet *ifp = sc->sc_ifp; bus_space_tag_t t = sc->sc_bustag; bus_space_handle_t mac = sc->sc_h; struct gem_txsoft *txs; int txlast; int progress = 0; #ifdef GEM_DEBUG CTR1(KTR_GEM, "%s: gem_tint", device_get_name(sc->sc_dev)); #endif /* * Unload collision counters */ ifp->if_collisions += bus_space_read_4(t, mac, GEM_MAC_NORM_COLL_CNT) + bus_space_read_4(t, mac, GEM_MAC_FIRST_COLL_CNT) + bus_space_read_4(t, mac, GEM_MAC_EXCESS_COLL_CNT) + bus_space_read_4(t, mac, GEM_MAC_LATE_COLL_CNT); /* * then clear the hardware counters. */ bus_space_write_4(t, mac, GEM_MAC_NORM_COLL_CNT, 0); bus_space_write_4(t, mac, GEM_MAC_FIRST_COLL_CNT, 0); bus_space_write_4(t, mac, GEM_MAC_EXCESS_COLL_CNT, 0); bus_space_write_4(t, mac, GEM_MAC_LATE_COLL_CNT, 0); /* * Go through our Tx list and free mbufs for those * frames that have been transmitted. */ GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { #ifdef GEM_DEBUG if (ifp->if_flags & IFF_DEBUG) { int i; printf(" txsoft %p transmit chain:\n", txs); for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { printf("descriptor %d: ", i); printf("gd_flags: 0x%016llx\t", (long long) GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_flags)); printf("gd_addr: 0x%016llx\n", (long long) GEM_DMA_READ(sc, sc->sc_txdescs[i].gd_addr)); if (i == txs->txs_lastdesc) break; } } #endif /* * In theory, we could harveast some descriptors before * the ring is empty, but that's a bit complicated. * * GEM_TX_COMPLETION points to the last descriptor * processed +1. */ txlast = bus_space_read_4(t, mac, GEM_TX_COMPLETION); #ifdef GEM_DEBUG CTR3(KTR_GEM, "gem_tint: txs->txs_firstdesc = %d, " "txs->txs_lastdesc = %d, txlast = %d", txs->txs_firstdesc, txs->txs_lastdesc, txlast); #endif if (txs->txs_firstdesc <= txs->txs_lastdesc) { if ((txlast >= txs->txs_firstdesc) && (txlast <= txs->txs_lastdesc)) break; } else { /* Ick -- this command wraps */ if ((txlast >= txs->txs_firstdesc) || (txlast <= txs->txs_lastdesc)) break; } #ifdef GEM_DEBUG CTR0(KTR_GEM, "gem_tint: releasing a desc"); #endif STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); sc->sc_txfree += txs->txs_ndescs; bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); if (txs->txs_mbuf != NULL) { m_freem(txs->txs_mbuf); txs->txs_mbuf = NULL; } STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); ifp->if_opackets++; progress = 1; } #ifdef GEM_DEBUG CTR3(KTR_GEM, "gem_tint: GEM_TX_STATE_MACHINE %x " "GEM_TX_DATA_PTR %llx " "GEM_TX_COMPLETION %x", bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_STATE_MACHINE), ((long long) bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_DATA_PTR_HI) << 32) | bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_DATA_PTR_LO), bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_COMPLETION)); #endif if (progress) { if (sc->sc_txfree == GEM_NTXDESC - 1) sc->sc_txwin = 0; /* Freed some descriptors, so reset IFF_OACTIVE and restart. */ ifp->if_flags &= ~IFF_OACTIVE; gem_start(ifp); if (STAILQ_EMPTY(&sc->sc_txdirtyq)) ifp->if_timer = 0; } #ifdef GEM_DEBUG CTR2(KTR_GEM, "%s: gem_tint: watchdog %d", device_get_name(sc->sc_dev), ifp->if_timer); #endif } #if 0 static void gem_rint_timeout(arg) void *arg; { gem_rint((struct gem_softc *)arg); } #endif /* * Receive interrupt. */ static void gem_rint(sc) struct gem_softc *sc; { struct ifnet *ifp = sc->sc_ifp; bus_space_tag_t t = sc->sc_bustag; bus_space_handle_t h = sc->sc_h; struct gem_rxsoft *rxs; struct mbuf *m; u_int64_t rxstat; u_int32_t rxcomp; int i, len, progress = 0; callout_stop(&sc->sc_rx_ch); #ifdef GEM_DEBUG CTR1(KTR_GEM, "%s: gem_rint", device_get_name(sc->sc_dev)); #endif /* * Read the completion register once. This limits * how long the following loop can execute. */ rxcomp = bus_space_read_4(t, h, GEM_RX_COMPLETION); #ifdef GEM_DEBUG CTR2(KTR_GEM, "gem_rint: sc->rxptr %d, complete %d", sc->sc_rxptr, rxcomp); #endif GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); for (i = sc->sc_rxptr; i != rxcomp; i = GEM_NEXTRX(i)) { rxs = &sc->sc_rxsoft[i]; rxstat = GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags); if (rxstat & GEM_RD_OWN) { #if 0 /* XXX: In case of emergency, re-enable this. */ /* * The descriptor is still marked as owned, although * it is supposed to have completed. This has been * observed on some machines. Just exiting here * might leave the packet sitting around until another * one arrives to trigger a new interrupt, which is * generally undesirable, so set up a timeout. */ callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, gem_rint_timeout, sc); #endif break; } progress++; ifp->if_ipackets++; if (rxstat & GEM_RD_BAD_CRC) { ifp->if_ierrors++; device_printf(sc->sc_dev, "receive error: CRC error\n"); GEM_INIT_RXDESC(sc, i); continue; } #ifdef GEM_DEBUG if (ifp->if_flags & IFF_DEBUG) { printf(" rxsoft %p descriptor %d: ", rxs, i); printf("gd_flags: 0x%016llx\t", (long long) GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_flags)); printf("gd_addr: 0x%016llx\n", (long long) GEM_DMA_READ(sc, sc->sc_rxdescs[i].gd_addr)); } #endif /* * No errors; receive the packet. Note the Gem * includes the CRC with every packet. */ len = GEM_RD_BUFLEN(rxstat); /* * Allocate a new mbuf cluster. If that fails, we are * out of memory, and must drop the packet and recycle * the buffer that's already attached to this descriptor. */ m = rxs->rxs_mbuf; if (gem_add_rxbuf(sc, i) != 0) { ifp->if_ierrors++; GEM_INIT_RXDESC(sc, i); continue; } m->m_data += 2; /* We're already off by two */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len - ETHER_CRC_LEN; /* Pass it on. */ (*ifp->if_input)(ifp, m); } if (progress) { GEM_CDSYNC(sc, BUS_DMASYNC_PREWRITE); /* Update the receive pointer. */ if (i == sc->sc_rxptr) { device_printf(sc->sc_dev, "rint: ring wrap\n"); } sc->sc_rxptr = i; bus_space_write_4(t, h, GEM_RX_KICK, GEM_PREVRX(i)); } #ifdef GEM_DEBUG CTR2(KTR_GEM, "gem_rint: done sc->rxptr %d, complete %d", sc->sc_rxptr, bus_space_read_4(t, h, GEM_RX_COMPLETION)); #endif } /* * gem_add_rxbuf: * * Add a receive buffer to the indicated descriptor. */ static int gem_add_rxbuf(sc, idx) struct gem_softc *sc; int idx; { struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; struct mbuf *m; int error; m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; #ifdef GEM_DEBUG /* bzero the packet to check dma */ memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); #endif if (rxs->rxs_mbuf != NULL) { bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); } rxs->rxs_mbuf = m; error = bus_dmamap_load_mbuf(sc->sc_rdmatag, rxs->rxs_dmamap, m, gem_rxdma_callback, rxs, BUS_DMA_NOWAIT); if (error != 0 || rxs->rxs_paddr == 0) { device_printf(sc->sc_dev, "can't load rx DMA map %d, error = " "%d\n", idx, error); panic("gem_add_rxbuf"); /* XXX */ } bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD); GEM_INIT_RXDESC(sc, idx); return (0); } static void gem_eint(sc, status) struct gem_softc *sc; u_int status; { if ((status & GEM_INTR_MIF) != 0) { device_printf(sc->sc_dev, "XXXlink status changed\n"); return; } device_printf(sc->sc_dev, "status=%x\n", status); } void gem_intr(v) void *v; { struct gem_softc *sc = (struct gem_softc *)v; bus_space_tag_t t = sc->sc_bustag; bus_space_handle_t seb = sc->sc_h; u_int32_t status; status = bus_space_read_4(t, seb, GEM_STATUS); #ifdef GEM_DEBUG CTR3(KTR_GEM, "%s: gem_intr: cplt %x, status %x", device_get_name(sc->sc_dev), (status>>19), (u_int)status); #endif if ((status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_BERR)) != 0) gem_eint(sc, status); if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) gem_tint(sc); if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) gem_rint(sc); /* We should eventually do more than just print out error stats. */ if (status & GEM_INTR_TX_MAC) { int txstat = bus_space_read_4(t, seb, GEM_MAC_TX_STATUS); if (txstat & ~GEM_MAC_TX_XMIT_DONE) device_printf(sc->sc_dev, "MAC tx fault, status %x\n", txstat); if (txstat & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) gem_init(sc); } if (status & GEM_INTR_RX_MAC) { int rxstat = bus_space_read_4(t, seb, GEM_MAC_RX_STATUS); if (rxstat & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) device_printf(sc->sc_dev, "MAC rx fault, status %x\n", rxstat); if ((rxstat & GEM_MAC_RX_OVERFLOW) != 0) gem_init(sc); } } static void gem_watchdog(ifp) struct ifnet *ifp; { struct gem_softc *sc = ifp->if_softc; #ifdef GEM_DEBUG CTR3(KTR_GEM, "gem_watchdog: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x " "GEM_MAC_RX_CONFIG %x", bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_RX_CONFIG), bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_STATUS), bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_RX_CONFIG)); CTR3(KTR_GEM, "gem_watchdog: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x " "GEM_MAC_TX_CONFIG %x", bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_TX_CONFIG), bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_STATUS), bus_space_read_4(sc->sc_bustag, sc->sc_h, GEM_MAC_TX_CONFIG)); #endif device_printf(sc->sc_dev, "device timeout\n"); ++ifp->if_oerrors; /* Try to get more packets going. */ gem_start(ifp); } /* * Initialize the MII Management Interface */ static void gem_mifinit(sc) struct gem_softc *sc; { bus_space_tag_t t = sc->sc_bustag; bus_space_handle_t mif = sc->sc_h; /* Configure the MIF in frame mode */ sc->sc_mif_config = bus_space_read_4(t, mif, GEM_MIF_CONFIG); sc->sc_mif_config &= ~GEM_MIF_CONFIG_BB_ENA; bus_space_write_4(t, mif, GEM_MIF_CONFIG, sc->sc_mif_config); } /* * MII interface * * The GEM MII interface supports at least three different operating modes: * * Bitbang mode is implemented using data, clock and output enable registers. * * Frame mode is implemented by loading a complete frame into the frame * register and polling the valid bit for completion. * * Polling mode uses the frame register but completion is indicated by * an interrupt. * */ int gem_mii_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct gem_softc *sc = device_get_softc(dev); bus_space_tag_t t = sc->sc_bustag; bus_space_handle_t mif = sc->sc_h; int n; u_int32_t v; #ifdef GEM_DEBUG_PHY printf("gem_mii_readreg: phy %d reg %d\n", phy, reg); #endif #if 0 /* Select the desired PHY in the MIF configuration register */ v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); /* Clear PHY select bit */ v &= ~GEM_MIF_CONFIG_PHY_SEL; if (phy == GEM_PHYAD_EXTERNAL) /* Set PHY select bit to get at external device */ v |= GEM_MIF_CONFIG_PHY_SEL; bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); #endif /* Construct the frame command */ v = (reg << GEM_MIF_REG_SHIFT) | (phy << GEM_MIF_PHY_SHIFT) | GEM_MIF_FRAME_READ; bus_space_write_4(t, mif, GEM_MIF_FRAME, v); for (n = 0; n < 100; n++) { DELAY(1); v = bus_space_read_4(t, mif, GEM_MIF_FRAME); if (v & GEM_MIF_FRAME_TA0) return (v & GEM_MIF_FRAME_DATA); } device_printf(sc->sc_dev, "mii_read timeout\n"); return (0); } int gem_mii_writereg(dev, phy, reg, val) device_t dev; int phy, reg, val; { struct gem_softc *sc = device_get_softc(dev); bus_space_tag_t t = sc->sc_bustag; bus_space_handle_t mif = sc->sc_h; int n; u_int32_t v; #ifdef GEM_DEBUG_PHY printf("gem_mii_writereg: phy %d reg %d val %x\n", phy, reg, val); #endif #if 0 /* Select the desired PHY in the MIF configuration register */ v = bus_space_read_4(t, mif, GEM_MIF_CONFIG); /* Clear PHY select bit */ v &= ~GEM_MIF_CONFIG_PHY_SEL; if (phy == GEM_PHYAD_EXTERNAL) /* Set PHY select bit to get at external device */ v |= GEM_MIF_CONFIG_PHY_SEL; bus_space_write_4(t, mif, GEM_MIF_CONFIG, v); #endif /* Construct the frame command */ v = GEM_MIF_FRAME_WRITE | (phy << GEM_MIF_PHY_SHIFT) | (reg << GEM_MIF_REG_SHIFT) | (val & GEM_MIF_FRAME_DATA); bus_space_write_4(t, mif, GEM_MIF_FRAME, v); for (n = 0; n < 100; n++) { DELAY(1); v = bus_space_read_4(t, mif, GEM_MIF_FRAME); if (v & GEM_MIF_FRAME_TA0) return (1); } device_printf(sc->sc_dev, "mii_write timeout\n"); return (0); } void gem_mii_statchg(dev) device_t dev; { struct gem_softc *sc = device_get_softc(dev); #ifdef GEM_DEBUG int instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); #endif bus_space_tag_t t = sc->sc_bustag; bus_space_handle_t mac = sc->sc_h; u_int32_t v; #ifdef GEM_DEBUG if (sc->sc_debug) printf("gem_mii_statchg: status change: phy = %d\n", sc->sc_phys[instance]); #endif /* Set tx full duplex options */ bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, 0); DELAY(10000); /* reg must be cleared and delay before changing. */ v = GEM_MAC_TX_ENA_IPG0|GEM_MAC_TX_NGU|GEM_MAC_TX_NGU_LIMIT| GEM_MAC_TX_ENABLE; if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) { v |= GEM_MAC_TX_IGN_CARRIER|GEM_MAC_TX_IGN_COLLIS; } bus_space_write_4(t, mac, GEM_MAC_TX_CONFIG, v); /* XIF Configuration */ /* We should really calculate all this rather than rely on defaults */ v = bus_space_read_4(t, mac, GEM_MAC_XIF_CONFIG); v = GEM_MAC_XIF_LINK_LED; v |= GEM_MAC_XIF_TX_MII_ENA; /* If an external transceiver is connected, enable its MII drivers */ sc->sc_mif_config = bus_space_read_4(t, mac, GEM_MIF_CONFIG); if ((sc->sc_mif_config & GEM_MIF_CONFIG_MDI1) != 0) { /* External MII needs echo disable if half duplex. */ if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) /* turn on full duplex LED */ v |= GEM_MAC_XIF_FDPLX_LED; else /* half duplex -- disable echo */ v |= GEM_MAC_XIF_ECHO_DISABL; if (IFM_SUBTYPE(sc->sc_mii->mii_media_active) == IFM_1000_T) v |= GEM_MAC_XIF_GMII_MODE; else v &= ~GEM_MAC_XIF_GMII_MODE; } else { /* Internal MII needs buf enable */ v |= GEM_MAC_XIF_MII_BUF_ENA; } bus_space_write_4(t, mac, GEM_MAC_XIF_CONFIG, v); } int gem_mediachange(ifp) struct ifnet *ifp; { struct gem_softc *sc = ifp->if_softc; /* XXX Add support for serial media. */ return (mii_mediachg(sc->sc_mii)); } void gem_mediastatus(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct gem_softc *sc = ifp->if_softc; if ((ifp->if_flags & IFF_UP) == 0) return; mii_pollstat(sc->sc_mii); ifmr->ifm_active = sc->sc_mii->mii_media_active; ifmr->ifm_status = sc->sc_mii->mii_media_status; } /* * Process an ioctl request. */ static int gem_ioctl(ifp, cmd, data) struct ifnet *ifp; u_long cmd; caddr_t data; { struct gem_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int s, error = 0; switch (cmd) { case SIOCSIFADDR: case SIOCGIFADDR: case SIOCSIFMTU: error = ether_ioctl(ifp, cmd, data); break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if ((sc->sc_ifflags ^ ifp->if_flags) == IFF_PROMISC) gem_setladrf(sc); else gem_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) gem_stop(ifp, 0); } sc->sc_ifflags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: gem_setladrf(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); break; default: error = ENOTTY; break; } /* Try to get things going again */ if (ifp->if_flags & IFF_UP) gem_start(ifp); splx(s); return (error); } /* * Set up the logical address filter. */ static void gem_setladrf(sc) struct gem_softc *sc; { struct ifnet *ifp = sc->sc_ifp; struct ifmultiaddr *inm; bus_space_tag_t t = sc->sc_bustag; bus_space_handle_t h = sc->sc_h; u_int32_t crc; u_int32_t hash[16]; u_int32_t v; int i; /* Get current RX configuration */ v = bus_space_read_4(t, h, GEM_MAC_RX_CONFIG); /* * Turn off promiscuous mode, promiscuous group mode (all multicast), * and hash filter. Depending on the case, the right bit will be * enabled. */ v &= ~(GEM_MAC_RX_PROMISCUOUS|GEM_MAC_RX_HASH_FILTER| GEM_MAC_RX_PROMISC_GRP); if ((ifp->if_flags & IFF_PROMISC) != 0) { /* Turn on promiscuous mode */ v |= GEM_MAC_RX_PROMISCUOUS; goto chipit; } if ((ifp->if_flags & IFF_ALLMULTI) != 0) { hash[3] = hash[2] = hash[1] = hash[0] = 0xffff; ifp->if_flags |= IFF_ALLMULTI; v |= GEM_MAC_RX_PROMISC_GRP; goto chipit; } /* * Set up multicast address filter by passing all multicast addresses * through a crc generator, and then using the high order 8 bits as an * index into the 256 bit logical address filter. The high order 4 * bits selects the word, while the other 4 bits select the bit within * the word (where bit 0 is the MSB). */ /* Clear hash table */ memset(hash, 0, sizeof(hash)); + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) { if (inm->ifma_addr->sa_family != AF_LINK) continue; crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) inm->ifma_addr), ETHER_ADDR_LEN); /* Just want the 8 most significant bits. */ crc >>= 24; /* Set the corresponding bit in the filter. */ hash[crc >> 4] |= 1 << (15 - (crc & 15)); } + IF_ADDR_UNLOCK(ifp); v |= GEM_MAC_RX_HASH_FILTER; ifp->if_flags &= ~IFF_ALLMULTI; /* Now load the hash table into the chip (if we are using it) */ for (i = 0; i < 16; i++) { bus_space_write_4(t, h, GEM_MAC_HASH0 + i * (GEM_MAC_HASH1-GEM_MAC_HASH0), hash[i]); } chipit: bus_space_write_4(t, h, GEM_MAC_RX_CONFIG, v); } Index: stable/6/sys/dev/hme/if_hme.c =================================================================== --- stable/6/sys/dev/hme/if_hme.c (revision 149421) +++ stable/6/sys/dev/hme/if_hme.c (revision 149422) @@ -1,1706 +1,1708 @@ /*- * Copyright (c) 1999 The NetBSD Foundation, Inc. * Copyright (c) 2001-2003 Thomas Moestl . * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Paul Kranenburg. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the NetBSD * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * from: NetBSD: hme.c,v 1.29 2002/05/05 03:02:38 thorpej Exp */ #include __FBSDID("$FreeBSD$"); /* * HME Ethernet module driver. * * The HME is e.g. part of the PCIO PCI multi function device. * It supports TX gathering and TX and RX checksum offloading. * RX buffers must be aligned at a programmable offset modulo 16. We choose 2 * for this offset: mbuf clusters are usually on about 2^11 boundaries, 2 bytes * are skipped to make sure the header after the ethernet header is aligned on a * natural boundary, so this ensures minimal wastage in the most common case. * * Also, apparently, the buffers must extend to a DMA burst boundary beyond the * maximum packet size (this is not verified). Buffers starting on odd * boundaries must be mapped so that the burst can start on a natural boundary. * * STP2002QFP-UG says that Ethernet hardware supports TCP checksum offloading. * In reality, we can do the same technique for UDP datagram too. However, * the hardware doesn't compensate the checksum for UDP datagram which can yield * to 0x0. As a safe guard, UDP checksum offload is disabled by default. It * can be reactivated by setting special link option link0 with ifconfig(8). */ #define HME_CSUM_FEATURES (CSUM_TCP) #define HMEDEBUG #define KTR_HME KTR_CT2 /* XXX */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void hme_start(struct ifnet *); static void hme_start_locked(struct ifnet *); static void hme_stop(struct hme_softc *); static int hme_ioctl(struct ifnet *, u_long, caddr_t); static void hme_tick(void *); static void hme_watchdog(struct ifnet *); static void hme_init(void *); static void hme_init_locked(struct hme_softc *); static int hme_add_rxbuf(struct hme_softc *, unsigned int, int); static int hme_meminit(struct hme_softc *); static int hme_mac_bitflip(struct hme_softc *, u_int32_t, u_int32_t, u_int32_t, u_int32_t); static void hme_mifinit(struct hme_softc *); static void hme_reset(struct hme_softc *); static void hme_setladrf(struct hme_softc *, int); static int hme_mediachange(struct ifnet *); static void hme_mediastatus(struct ifnet *, struct ifmediareq *); static int hme_load_txmbuf(struct hme_softc *, struct mbuf *); static void hme_read(struct hme_softc *, int, int, u_int32_t); static void hme_eint(struct hme_softc *, u_int); static void hme_rint(struct hme_softc *); static void hme_tint(struct hme_softc *); static void hme_txcksum(struct mbuf *, u_int32_t *); static void hme_rxcksum(struct mbuf *, u_int32_t); static void hme_cdma_callback(void *, bus_dma_segment_t *, int, int); static void hme_txdma_callback(void *, bus_dma_segment_t *, int, bus_size_t, int); devclass_t hme_devclass; static int hme_nerr; DRIVER_MODULE(miibus, hme, miibus_driver, miibus_devclass, 0, 0); MODULE_DEPEND(hme, miibus, 1, 1, 1); #define HME_SPC_READ_4(spc, sc, offs) \ bus_space_read_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \ (offs)) #define HME_SPC_WRITE_4(spc, sc, offs, v) \ bus_space_write_4((sc)->sc_ ## spc ## t, (sc)->sc_ ## spc ## h, \ (offs), (v)) #define HME_SEB_READ_4(sc, offs) HME_SPC_READ_4(seb, (sc), (offs)) #define HME_SEB_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(seb, (sc), (offs), (v)) #define HME_ERX_READ_4(sc, offs) HME_SPC_READ_4(erx, (sc), (offs)) #define HME_ERX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(erx, (sc), (offs), (v)) #define HME_ETX_READ_4(sc, offs) HME_SPC_READ_4(etx, (sc), (offs)) #define HME_ETX_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(etx, (sc), (offs), (v)) #define HME_MAC_READ_4(sc, offs) HME_SPC_READ_4(mac, (sc), (offs)) #define HME_MAC_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mac, (sc), (offs), (v)) #define HME_MIF_READ_4(sc, offs) HME_SPC_READ_4(mif, (sc), (offs)) #define HME_MIF_WRITE_4(sc, offs, v) HME_SPC_WRITE_4(mif, (sc), (offs), (v)) #define HME_MAXERR 5 #define HME_WHINE(dev, ...) do { \ if (hme_nerr++ < HME_MAXERR) \ device_printf(dev, __VA_ARGS__); \ if (hme_nerr == HME_MAXERR) { \ device_printf(dev, "too may errors; not reporting any " \ "more\n"); \ } \ } while(0) /* Support oversized VLAN frames. */ #define HME_MAX_FRAMESIZE (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) int hme_config(struct hme_softc *sc) { struct ifnet *ifp; struct mii_softc *child; bus_size_t size; int error, rdesc, tdesc, i; ifp = sc->sc_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) return (ENOSPC); /* * HME common initialization. * * hme_softc fields that must be initialized by the front-end: * * the DMA bus tag: * sc_dmatag * * the bus handles, tags and offsets (splitted for SBus compatability): * sc_seb{t,h,o} (Shared Ethernet Block registers) * sc_erx{t,h,o} (Receiver Unit registers) * sc_etx{t,h,o} (Transmitter Unit registers) * sc_mac{t,h,o} (MAC registers) * sc_mif{t,h,o} (Management Interface registers) * * the maximum bus burst size: * sc_burst * */ HME_LOCK_ASSERT(sc, MA_NOTOWNED); /* Make sure the chip is stopped. */ HME_LOCK(sc); hme_stop(sc); HME_UNLOCK(sc); /* * Allocate DMA capable memory * Buffer descriptors must be aligned on a 2048 byte boundary; * take this into account when calculating the size. Note that * the maximum number of descriptors (256) occupies 2048 bytes, * so we allocate that much regardless of HME_N*DESC. */ size = 4096; error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, HME_NTXDESC + HME_NRXDESC + 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->sc_pdmatag); if (error) goto fail_ifnet; error = bus_dma_tag_create(sc->sc_pdmatag, 2048, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, busdma_lock_mutex, &Giant, &sc->sc_cdmatag); if (error) goto fail_ptag; error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, HME_NRXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag); if (error) goto fail_ctag; error = bus_dma_tag_create(sc->sc_pdmatag, max(0x10, sc->sc_burst), 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, HME_NTXDESC, BUS_SPACE_MAXSIZE_32BIT, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); if (error) goto fail_rtag; /* Allocate control/TX DMA buffer */ error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_rb.rb_membase, 0, &sc->sc_cdmamap); if (error != 0) { device_printf(sc->sc_dev, "DMA buffer alloc error %d\n", error); goto fail_ttag; } /* Load the buffer */ sc->sc_rb.rb_dmabase = 0; if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cdmamap, sc->sc_rb.rb_membase, size, hme_cdma_callback, sc, 0)) != 0 || sc->sc_rb.rb_dmabase == 0) { device_printf(sc->sc_dev, "DMA buffer map load error %d\n", error); goto fail_free; } CTR2(KTR_HME, "hme_config: dma va %p, pa %#lx", sc->sc_rb.rb_membase, sc->sc_rb.rb_dmabase); /* * Prepare the RX descriptors. rdesc serves as marker for the last * processed descriptor and may be used later on. */ for (rdesc = 0; rdesc < HME_NRXDESC; rdesc++) { sc->sc_rb.rb_rxdesc[rdesc].hrx_m = NULL; error = bus_dmamap_create(sc->sc_rdmatag, 0, &sc->sc_rb.rb_rxdesc[rdesc].hrx_dmamap); if (error != 0) goto fail_rxdesc; } error = bus_dmamap_create(sc->sc_rdmatag, 0, &sc->sc_rb.rb_spare_dmamap); if (error != 0) goto fail_rxdesc; /* Same for the TX descs. */ for (tdesc = 0; tdesc < HME_NTXQ; tdesc++) { sc->sc_rb.rb_txdesc[tdesc].htx_m = NULL; error = bus_dmamap_create(sc->sc_tdmatag, 0, &sc->sc_rb.rb_txdesc[tdesc].htx_dmamap); if (error != 0) goto fail_txdesc; } sc->sc_csum_features = HME_CSUM_FEATURES; /* Initialize ifnet structure. */ ifp->if_softc = sc; if_initname(ifp, device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = hme_start; ifp->if_ioctl = hme_ioctl; ifp->if_init = hme_init; ifp->if_watchdog = hme_watchdog; IFQ_SET_MAXLEN(&ifp->if_snd, HME_NTXQ); ifp->if_snd.ifq_drv_maxlen = HME_NTXQ; IFQ_SET_READY(&ifp->if_snd); HME_LOCK(sc); hme_mifinit(sc); HME_UNLOCK(sc); if ((error = mii_phy_probe(sc->sc_dev, &sc->sc_miibus, hme_mediachange, hme_mediastatus)) != 0) { device_printf(sc->sc_dev, "phy probe failed: %d\n", error); goto fail_rxdesc; } sc->sc_mii = device_get_softc(sc->sc_miibus); /* * Walk along the list of attached MII devices and * establish an `MII instance' to `phy number' * mapping. We'll use this mapping in media change * requests to determine which phy to use to program * the MIF configuration register. */ for (child = LIST_FIRST(&sc->sc_mii->mii_phys); child != NULL; child = LIST_NEXT(child, mii_list)) { /* * Note: we support just two PHYs: the built-in * internal device and an external on the MII * connector. */ if (child->mii_phy > 1 || child->mii_inst > 1) { device_printf(sc->sc_dev, "cannot accommodate " "MII device %s at phy %d, instance %d\n", device_get_name(child->mii_dev), child->mii_phy, child->mii_inst); continue; } sc->sc_phys[child->mii_inst] = child->mii_phy; } /* Attach the interface. */ ether_ifattach(ifp, sc->sc_enaddr); /* * Tell the upper layer(s) we support long frames/checksum offloads. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; ifp->if_hwassist |= sc->sc_csum_features; ifp->if_capenable |= IFCAP_VLAN_MTU | IFCAP_HWCSUM; callout_init(&sc->sc_tick_ch, CALLOUT_MPSAFE); return (0); fail_txdesc: for (i = 0; i < tdesc; i++) { bus_dmamap_destroy(sc->sc_tdmatag, sc->sc_rb.rb_txdesc[i].htx_dmamap); } bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap); fail_rxdesc: for (i = 0; i < rdesc; i++) { bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_rxdesc[i].hrx_dmamap); } bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap); fail_free: bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap); fail_ttag: bus_dma_tag_destroy(sc->sc_tdmatag); fail_rtag: bus_dma_tag_destroy(sc->sc_rdmatag); fail_ctag: bus_dma_tag_destroy(sc->sc_cdmatag); fail_ptag: bus_dma_tag_destroy(sc->sc_pdmatag); fail_ifnet: if_free(ifp); return (error); } void hme_detach(struct hme_softc *sc) { struct ifnet *ifp = sc->sc_ifp; int i; HME_LOCK_ASSERT(sc, MA_NOTOWNED); ether_ifdetach(ifp); if_free(ifp); HME_LOCK(sc); hme_stop(sc); HME_UNLOCK(sc); device_delete_child(sc->sc_dev, sc->sc_miibus); for (i = 0; i < HME_NTXQ; i++) { bus_dmamap_destroy(sc->sc_tdmatag, sc->sc_rb.rb_txdesc[i].htx_dmamap); } bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap); for (i = 0; i < HME_NRXDESC; i++) { bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rb.rb_rxdesc[i].hrx_dmamap); } bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cdmamap); bus_dmamem_free(sc->sc_cdmatag, sc->sc_rb.rb_membase, sc->sc_cdmamap); bus_dma_tag_destroy(sc->sc_tdmatag); bus_dma_tag_destroy(sc->sc_rdmatag); bus_dma_tag_destroy(sc->sc_cdmatag); bus_dma_tag_destroy(sc->sc_pdmatag); } void hme_suspend(struct hme_softc *sc) { HME_LOCK(sc); hme_stop(sc); HME_UNLOCK(sc); } void hme_resume(struct hme_softc *sc) { struct ifnet *ifp = sc->sc_ifp; HME_LOCK(sc); if ((ifp->if_flags & IFF_UP) != 0) hme_init_locked(sc); HME_UNLOCK(sc); } static void hme_cdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) { struct hme_softc *sc = (struct hme_softc *)xsc; if (error != 0) return; KASSERT(nsegs == 1, ("hme_cdma_callback: bad dma segment count")); sc->sc_rb.rb_dmabase = segs[0].ds_addr; } static void hme_tick(void *arg) { struct hme_softc *sc = arg; int s; s = splnet(); mii_tick(sc->sc_mii); splx(s); callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); } static void hme_reset(struct hme_softc *sc) { int s; HME_LOCK(sc); s = splnet(); hme_init_locked(sc); splx(s); HME_UNLOCK(sc); } static void hme_stop(struct hme_softc *sc) { u_int32_t v; int n; callout_stop(&sc->sc_tick_ch); /* Reset transmitter and receiver */ HME_SEB_WRITE_4(sc, HME_SEBI_RESET, HME_SEB_RESET_ETX | HME_SEB_RESET_ERX); for (n = 0; n < 20; n++) { v = HME_SEB_READ_4(sc, HME_SEBI_RESET); if ((v & (HME_SEB_RESET_ETX | HME_SEB_RESET_ERX)) == 0) return; DELAY(20); } device_printf(sc->sc_dev, "hme_stop: reset failed\n"); } /* * Discard the contents of an mbuf in the RX ring, freeing the buffer in the * ring for subsequent use. */ static __inline void hme_discard_rxbuf(struct hme_softc *sc, int ix) { /* * Dropped a packet, reinitialize the descriptor and turn the * ownership back to the hardware. */ HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ix, HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, &sc->sc_rb.rb_rxdesc[ix]))); } static int hme_add_rxbuf(struct hme_softc *sc, unsigned int ri, int keepold) { struct hme_rxdesc *rd; struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; uintptr_t b; int a, unmap, nsegs; rd = &sc->sc_rb.rb_rxdesc[ri]; unmap = rd->hrx_m != NULL; if (unmap && keepold) { /* * Reinitialize the descriptor flags, as they may have been * altered by the hardware. */ hme_discard_rxbuf(sc, ri); return (0); } if ((m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR)) == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; b = mtod(m, uintptr_t); /* * Required alignment boundary. At least 16 is needed, but since * the mapping must be done in a way that a burst can start on a * natural boundary we might need to extend this. */ a = max(HME_MINRXALIGN, sc->sc_burst); /* * Make sure the buffer suitably aligned. The 2 byte offset is removed * when the mbuf is handed up. XXX: this ensures at least 16 byte * alignment of the header adjacent to the ethernet header, which * should be sufficient in all cases. Nevertheless, this second-guesses * ALIGN(). */ m_adj(m, roundup2(b, a) - b); if (bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, sc->sc_rb.rb_spare_dmamap, m, segs, &nsegs, 0) != 0) { m_freem(m); return (ENOBUFS); } /* If nsegs is wrong then the stack is corrupt */ KASSERT(nsegs == 1, ("Too many segments returned!")); if (unmap) { bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_rdmatag, rd->hrx_dmamap); } map = rd->hrx_dmamap; rd->hrx_dmamap = sc->sc_rb.rb_spare_dmamap; sc->sc_rb.rb_spare_dmamap = map; bus_dmamap_sync(sc->sc_rdmatag, rd->hrx_dmamap, BUS_DMASYNC_PREREAD); HME_XD_SETADDR(sc->sc_pci, sc->sc_rb.rb_rxd, ri, segs[0].ds_addr); rd->hrx_m = m; HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_rxd, ri, HME_XD_OWN | HME_XD_ENCODE_RSIZE(HME_DESC_RXLEN(sc, rd))); return (0); } static int hme_meminit(struct hme_softc *sc) { struct hme_ring *hr = &sc->sc_rb; struct hme_txdesc *td; bus_addr_t dma; caddr_t p; unsigned int i; int error; p = hr->rb_membase; dma = hr->rb_dmabase; /* * Allocate transmit descriptors */ hr->rb_txd = p; hr->rb_txddma = dma; p += HME_NTXDESC * HME_XD_SIZE; dma += HME_NTXDESC * HME_XD_SIZE; /* We have reserved descriptor space until the next 2048 byte boundary.*/ dma = (bus_addr_t)roundup((u_long)dma, 2048); p = (caddr_t)roundup((u_long)p, 2048); /* * Allocate receive descriptors */ hr->rb_rxd = p; hr->rb_rxddma = dma; p += HME_NRXDESC * HME_XD_SIZE; dma += HME_NRXDESC * HME_XD_SIZE; /* Again move forward to the next 2048 byte boundary.*/ dma = (bus_addr_t)roundup((u_long)dma, 2048); p = (caddr_t)roundup((u_long)p, 2048); /* * Initialize transmit buffer descriptors */ for (i = 0; i < HME_NTXDESC; i++) { HME_XD_SETADDR(sc->sc_pci, hr->rb_txd, i, 0); HME_XD_SETFLAGS(sc->sc_pci, hr->rb_txd, i, 0); } STAILQ_INIT(&sc->sc_rb.rb_txfreeq); STAILQ_INIT(&sc->sc_rb.rb_txbusyq); for (i = 0; i < HME_NTXQ; i++) { td = &sc->sc_rb.rb_txdesc[i]; if (td->htx_m != NULL) { m_freem(td->htx_m); bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap); td->htx_m = NULL; } STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, td, htx_q); } /* * Initialize receive buffer descriptors */ for (i = 0; i < HME_NRXDESC; i++) { error = hme_add_rxbuf(sc, i, 1); if (error != 0) return (error); } bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREREAD); bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREWRITE); hr->rb_tdhead = hr->rb_tdtail = 0; hr->rb_td_nbusy = 0; hr->rb_rdtail = 0; CTR2(KTR_HME, "hme_meminit: tx ring va %p, pa %#lx", hr->rb_txd, hr->rb_txddma); CTR2(KTR_HME, "hme_meminit: rx ring va %p, pa %#lx", hr->rb_rxd, hr->rb_rxddma); CTR2(KTR_HME, "rx entry 1: flags %x, address %x", *(u_int32_t *)hr->rb_rxd, *(u_int32_t *)(hr->rb_rxd + 4)); CTR2(KTR_HME, "tx entry 1: flags %x, address %x", *(u_int32_t *)hr->rb_txd, *(u_int32_t *)(hr->rb_txd + 4)); return (0); } static int hme_mac_bitflip(struct hme_softc *sc, u_int32_t reg, u_int32_t val, u_int32_t clr, u_int32_t set) { int i = 0; val &= ~clr; val |= set; HME_MAC_WRITE_4(sc, reg, val); if (clr == 0 && set == 0) return (1); /* just write, no bits to wait for */ do { DELAY(100); i++; val = HME_MAC_READ_4(sc, reg); if (i > 40) { /* After 3.5ms, we should have been done. */ device_printf(sc->sc_dev, "timeout while writing to " "MAC configuration register\n"); return (0); } } while ((val & clr) != 0 && (val & set) != set); return (1); } /* * Initialization of interface; set up initialization block * and transmit/receive descriptor rings. */ static void hme_init(void *xsc) { struct hme_softc *sc = (struct hme_softc *)xsc; HME_LOCK(sc); hme_init_locked(sc); HME_UNLOCK(sc); } static void hme_init_locked(struct hme_softc *sc) { struct ifnet *ifp = sc->sc_ifp; u_int8_t *ea; u_int32_t n, v; HME_LOCK_ASSERT(sc, MA_OWNED); /* * Initialization sequence. The numbered steps below correspond * to the sequence outlined in section 6.3.5.1 in the Ethernet * Channel Engine manual (part of the PCIO manual). * See also the STP2002-STQ document from Sun Microsystems. */ /* step 1 & 2. Reset the Ethernet Channel */ hme_stop(sc); /* Re-initialize the MIF */ hme_mifinit(sc); #if 0 /* Mask all MIF interrupts, just in case */ HME_MIF_WRITE_4(sc, HME_MIFI_IMASK, 0xffff); #endif /* step 3. Setup data structures in host memory */ if (hme_meminit(sc) != 0) { device_printf(sc->sc_dev, "out of buffers; init aborted."); return; } /* step 4. TX MAC registers & counters */ HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0); HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0); HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0); HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0); HME_MAC_WRITE_4(sc, HME_MACI_TXSIZE, HME_MAX_FRAMESIZE); /* Load station MAC address */ ea = IFP2ENADDR(sc->sc_ifp); HME_MAC_WRITE_4(sc, HME_MACI_MACADDR0, (ea[0] << 8) | ea[1]); HME_MAC_WRITE_4(sc, HME_MACI_MACADDR1, (ea[2] << 8) | ea[3]); HME_MAC_WRITE_4(sc, HME_MACI_MACADDR2, (ea[4] << 8) | ea[5]); /* * Init seed for backoff * (source suggested by manual: low 10 bits of MAC address) */ v = ((ea[4] << 8) | ea[5]) & 0x3fff; HME_MAC_WRITE_4(sc, HME_MACI_RANDSEED, v); /* Note: Accepting power-on default for other MAC registers here.. */ /* step 5. RX MAC registers & counters */ hme_setladrf(sc, 0); /* step 6 & 7. Program Descriptor Ring Base Addresses */ HME_ETX_WRITE_4(sc, HME_ETXI_RING, sc->sc_rb.rb_txddma); /* Transmit Descriptor ring size: in increments of 16 */ HME_ETX_WRITE_4(sc, HME_ETXI_RSIZE, HME_NTXDESC / 16 - 1); HME_ERX_WRITE_4(sc, HME_ERXI_RING, sc->sc_rb.rb_rxddma); HME_MAC_WRITE_4(sc, HME_MACI_RXSIZE, HME_MAX_FRAMESIZE); /* step 8. Global Configuration & Interrupt Mask */ HME_SEB_WRITE_4(sc, HME_SEBI_IMASK, ~(/*HME_SEB_STAT_GOTFRAME | HME_SEB_STAT_SENTFRAME |*/ HME_SEB_STAT_HOSTTOTX | HME_SEB_STAT_RXTOHOST | HME_SEB_STAT_TXALL | HME_SEB_STAT_TXPERR | HME_SEB_STAT_RCNTEXP | HME_SEB_STAT_ALL_ERRORS )); switch (sc->sc_burst) { default: v = 0; break; case 16: v = HME_SEB_CFG_BURST16; break; case 32: v = HME_SEB_CFG_BURST32; break; case 64: v = HME_SEB_CFG_BURST64; break; } /* * Blindly setting 64bit transfers may hang PCI cards(Cheerio?). * Allowing 64bit transfers breaks TX checksum offload as well. * Don't know this comes from hardware bug or driver's DMAing * scheme. * * if (sc->sc_pci == 0) * v |= HME_SEB_CFG_64BIT; */ HME_SEB_WRITE_4(sc, HME_SEBI_CFG, v); /* step 9. ETX Configuration: use mostly default values */ /* Enable DMA */ v = HME_ETX_READ_4(sc, HME_ETXI_CFG); v |= HME_ETX_CFG_DMAENABLE; HME_ETX_WRITE_4(sc, HME_ETXI_CFG, v); /* step 10. ERX Configuration */ v = HME_ERX_READ_4(sc, HME_ERXI_CFG); /* Encode Receive Descriptor ring size: four possible values */ v &= ~HME_ERX_CFG_RINGSIZEMSK; switch (HME_NRXDESC) { case 32: v |= HME_ERX_CFG_RINGSIZE32; break; case 64: v |= HME_ERX_CFG_RINGSIZE64; break; case 128: v |= HME_ERX_CFG_RINGSIZE128; break; case 256: v |= HME_ERX_CFG_RINGSIZE256; break; default: printf("hme: invalid Receive Descriptor ring size\n"); break; } /* Enable DMA, fix RX first byte offset. */ v &= ~HME_ERX_CFG_FBO_MASK; v |= HME_ERX_CFG_DMAENABLE | (HME_RXOFFS << HME_ERX_CFG_FBO_SHIFT); /* RX TCP/UDP checksum offset */ n = (ETHER_HDR_LEN + sizeof(struct ip)) / 2; n = (n << HME_ERX_CFG_CSUMSTART_SHIFT) & HME_ERX_CFG_CSUMSTART_MASK; v |= n; CTR1(KTR_HME, "hme_init: programming ERX_CFG to %x", (u_int)v); HME_ERX_WRITE_4(sc, HME_ERXI_CFG, v); /* step 11. XIF Configuration */ v = HME_MAC_READ_4(sc, HME_MACI_XIF); v |= HME_MAC_XIF_OE; /* If an external transceiver is connected, enable its MII drivers */ if ((HME_MIF_READ_4(sc, HME_MIFI_CFG) & HME_MIF_CFG_MDI1) != 0) v |= HME_MAC_XIF_MIIENABLE; CTR1(KTR_HME, "hme_init: programming XIF to %x", (u_int)v); HME_MAC_WRITE_4(sc, HME_MACI_XIF, v); /* step 12. RX_MAC Configuration Register */ v = HME_MAC_READ_4(sc, HME_MACI_RXCFG); v |= HME_MAC_RXCFG_ENABLE; v &= ~(HME_MAC_RXCFG_DCRCS); CTR1(KTR_HME, "hme_init: programming RX_MAC to %x", (u_int)v); HME_MAC_WRITE_4(sc, HME_MACI_RXCFG, v); /* step 13. TX_MAC Configuration Register */ v = HME_MAC_READ_4(sc, HME_MACI_TXCFG); v |= (HME_MAC_TXCFG_ENABLE | HME_MAC_TXCFG_DGIVEUP); CTR1(KTR_HME, "hme_init: programming TX_MAC to %x", (u_int)v); HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v); /* step 14. Issue Transmit Pending command */ #ifdef HMEDEBUG /* Debug: double-check. */ CTR4(KTR_HME, "hme_init: tx ring %#x, rsz %#x, rx ring %#x, " "rxsize %#x", HME_ETX_READ_4(sc, HME_ETXI_RING), HME_ETX_READ_4(sc, HME_ETXI_RSIZE), HME_ERX_READ_4(sc, HME_ERXI_RING), HME_MAC_READ_4(sc, HME_MACI_RXSIZE)); CTR3(KTR_HME, "hme_init: intr mask %#x, erx cfg %#x, etx cfg %#x", HME_SEB_READ_4(sc, HME_SEBI_IMASK), HME_ERX_READ_4(sc, HME_ERXI_CFG), HME_ETX_READ_4(sc, HME_ETXI_CFG)); CTR2(KTR_HME, "hme_init: mac rxcfg %#x, maci txcfg %#x", HME_MAC_READ_4(sc, HME_MACI_RXCFG), HME_MAC_READ_4(sc, HME_MACI_TXCFG)); #endif /* Set the current media. */ /* * HME_UNLOCK(sc); * mii_mediachg(sc->sc_mii); * HME_LOCK(sc); */ /* Start the one second timer. */ callout_reset(&sc->sc_tick_ch, hz, hme_tick, sc); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; ifp->if_timer = 0; hme_start_locked(ifp); } struct hme_txdma_arg { struct hme_softc *hta_sc; struct hme_txdesc *hta_htx; int hta_ndescs; }; /* * XXX: this relies on the fact that segments returned by bus_dmamap_load_mbuf() * are readable from the nearest burst boundary on (i.e. potentially before * ds_addr) to the first boundary beyond the end. This is usually a safe * assumption to make, but is not documented. */ static void hme_txdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, bus_size_t totsz, int error) { struct hme_txdma_arg *ta = xsc; struct hme_txdesc *htx; bus_size_t len = 0; caddr_t txd; u_int32_t flags = 0; int i, tdhead, pci; if (error != 0) return; tdhead = ta->hta_sc->sc_rb.rb_tdhead; pci = ta->hta_sc->sc_pci; txd = ta->hta_sc->sc_rb.rb_txd; htx = ta->hta_htx; if (ta->hta_sc->sc_rb.rb_td_nbusy + nsegs >= HME_NTXDESC) { ta->hta_ndescs = -1; return; } ta->hta_ndescs = nsegs; for (i = 0; i < nsegs; i++) { if (segs[i].ds_len == 0) continue; /* Fill the ring entry. */ flags = HME_XD_ENCODE_TSIZE(segs[i].ds_len); if (len == 0) flags |= HME_XD_SOP; if (len + segs[i].ds_len == totsz) flags |= HME_XD_EOP; CTR5(KTR_HME, "hme_txdma_callback: seg %d/%d, ri %d, " "flags %#x, addr %#x", i + 1, nsegs, tdhead, (u_int)flags, (u_int)segs[i].ds_addr); HME_XD_SETFLAGS(pci, txd, tdhead, flags); HME_XD_SETADDR(pci, txd, tdhead, segs[i].ds_addr); ta->hta_sc->sc_rb.rb_td_nbusy++; htx->htx_lastdesc = tdhead; tdhead = (tdhead + 1) % HME_NTXDESC; len += segs[i].ds_len; } ta->hta_sc->sc_rb.rb_tdhead = tdhead; KASSERT((flags & HME_XD_EOP) != 0, ("hme_txdma_callback: missed end of packet!")); } /* TX TCP/UDP checksum */ static void hme_txcksum(struct mbuf *m, u_int32_t *cflags) { struct ip *ip; u_int32_t offset, offset2; caddr_t p; for(; m && m->m_len == 0; m = m->m_next) ; if (m == NULL || m->m_len < ETHER_HDR_LEN) { printf("hme_txcksum: m_len < ETHER_HDR_LEN\n"); return; /* checksum will be corrupted */ } if (m->m_len < ETHER_HDR_LEN + sizeof(u_int32_t)) { if (m->m_len != ETHER_HDR_LEN) { printf("hme_txcksum: m_len != ETHER_HDR_LEN\n"); return; /* checksum will be corrupted */ } /* XXX */ for(m = m->m_next; m && m->m_len == 0; m = m->m_next) ; if (m == NULL) return; /* checksum will be corrupted */ ip = mtod(m, struct ip *); } else { p = mtod(m, caddr_t); p += ETHER_HDR_LEN; ip = (struct ip *)p; } offset2 = m->m_pkthdr.csum_data; offset = (ip->ip_hl << 2) + ETHER_HDR_LEN; *cflags = offset << HME_XD_TXCKSUM_SSHIFT; *cflags |= ((offset + offset2) << HME_XD_TXCKSUM_OSHIFT); *cflags |= HME_XD_TXCKSUM; } /* * Routine to dma map an mbuf chain, set up the descriptor rings accordingly and * start the transmission. * Returns 0 on success, -1 if there were not enough free descriptors to map * the packet, or an errno otherwise. */ static int hme_load_txmbuf(struct hme_softc *sc, struct mbuf *m0) { struct hme_txdma_arg cba; struct hme_txdesc *td; int error, si, ri; u_int32_t flags, cflags = 0; si = sc->sc_rb.rb_tdhead; if ((td = STAILQ_FIRST(&sc->sc_rb.rb_txfreeq)) == NULL) return (-1); if ((m0->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) hme_txcksum(m0, &cflags); cba.hta_sc = sc; cba.hta_htx = td; if ((error = bus_dmamap_load_mbuf(sc->sc_tdmatag, td->htx_dmamap, m0, hme_txdma_callback, &cba, 0)) != 0) goto fail; if (cba.hta_ndescs == -1) { error = -1; goto fail; } bus_dmamap_sync(sc->sc_tdmatag, td->htx_dmamap, BUS_DMASYNC_PREWRITE); STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txfreeq, htx_q); STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txbusyq, td, htx_q); td->htx_m = m0; /* Turn descriptor ownership to the hme, back to forth. */ ri = sc->sc_rb.rb_tdhead; CTR2(KTR_HME, "hme_load_mbuf: next desc is %d (%#x)", ri, HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri)); do { ri = (ri + HME_NTXDESC - 1) % HME_NTXDESC; flags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri) | HME_XD_OWN | cflags; CTR3(KTR_HME, "hme_load_mbuf: activating ri %d, si %d (%#x)", ri, si, flags); HME_XD_SETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri, flags); } while (ri != si); /* start the transmission. */ HME_ETX_WRITE_4(sc, HME_ETXI_PENDING, HME_ETX_TP_DMAWAKEUP); return (0); fail: bus_dmamap_unload(sc->sc_tdmatag, td->htx_dmamap); return (error); } /* * Pass a packet to the higher levels. */ static void hme_read(struct hme_softc *sc, int ix, int len, u_int32_t flags) { struct ifnet *ifp = sc->sc_ifp; struct mbuf *m; if (len <= sizeof(struct ether_header) || len > HME_MAX_FRAMESIZE) { #ifdef HMEDEBUG HME_WHINE(sc->sc_dev, "invalid packet size %d; dropping\n", len); #endif ifp->if_ierrors++; hme_discard_rxbuf(sc, ix); return; } m = sc->sc_rb.rb_rxdesc[ix].hrx_m; CTR1(KTR_HME, "hme_read: len %d", len); if (hme_add_rxbuf(sc, ix, 0) != 0) { /* * hme_add_rxbuf will leave the old buffer in the ring until * it is sure that a new buffer can be mapped. If it can not, * drop the packet, but leave the interface up. */ ifp->if_iqdrops++; hme_discard_rxbuf(sc, ix); return; } ifp->if_ipackets++; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len + HME_RXOFFS; m_adj(m, HME_RXOFFS); /* RX TCP/UDP checksum */ if (ifp->if_capenable & IFCAP_RXCSUM) hme_rxcksum(m, flags); /* Pass the packet up. */ HME_UNLOCK(sc); (*ifp->if_input)(ifp, m); HME_LOCK(sc); } static void hme_start(struct ifnet *ifp) { struct hme_softc *sc = ifp->if_softc; HME_LOCK(sc); hme_start_locked(ifp); HME_UNLOCK(sc); } static void hme_start_locked(struct ifnet *ifp) { struct hme_softc *sc = (struct hme_softc *)ifp->if_softc; struct mbuf *m; int error, enq = 0; if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) return; error = 0; for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; error = hme_load_txmbuf(sc, m); if (error == -1) { ifp->if_flags |= IFF_OACTIVE; IFQ_DRV_PREPEND(&ifp->if_snd, m); break; } else if (error > 0) { printf("hme_start: error %d while loading mbuf\n", error); } else { enq = 1; BPF_MTAP(ifp, m); } } if (sc->sc_rb.rb_td_nbusy == HME_NTXDESC || error == -1) ifp->if_flags |= IFF_OACTIVE; /* Set watchdog timer if a packet was queued */ if (enq) { bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREWRITE); ifp->if_timer = 5; } } /* * Transmit interrupt. */ static void hme_tint(struct hme_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct hme_txdesc *htx; unsigned int ri, txflags; /* * Unload collision counters */ ifp->if_collisions += HME_MAC_READ_4(sc, HME_MACI_NCCNT) + HME_MAC_READ_4(sc, HME_MACI_FCCNT) + HME_MAC_READ_4(sc, HME_MACI_EXCNT) + HME_MAC_READ_4(sc, HME_MACI_LTCNT); /* * then clear the hardware counters. */ HME_MAC_WRITE_4(sc, HME_MACI_NCCNT, 0); HME_MAC_WRITE_4(sc, HME_MACI_FCCNT, 0); HME_MAC_WRITE_4(sc, HME_MACI_EXCNT, 0); HME_MAC_WRITE_4(sc, HME_MACI_LTCNT, 0); htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq); bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD); /* Fetch current position in the transmit ring */ for (ri = sc->sc_rb.rb_tdtail;; ri = (ri + 1) % HME_NTXDESC) { if (sc->sc_rb.rb_td_nbusy <= 0) { CTR0(KTR_HME, "hme_tint: not busy!"); break; } txflags = HME_XD_GETFLAGS(sc->sc_pci, sc->sc_rb.rb_txd, ri); CTR2(KTR_HME, "hme_tint: index %d, flags %#x", ri, txflags); if ((txflags & HME_XD_OWN) != 0) break; CTR0(KTR_HME, "hme_tint: not owned"); --sc->sc_rb.rb_td_nbusy; ifp->if_flags &= ~IFF_OACTIVE; /* Complete packet transmitted? */ if ((txflags & HME_XD_EOP) == 0) continue; KASSERT(htx->htx_lastdesc == ri, ("hme_tint: ring indices skewed: %d != %d!", htx->htx_lastdesc, ri)); bus_dmamap_sync(sc->sc_tdmatag, htx->htx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_tdmatag, htx->htx_dmamap); ifp->if_opackets++; m_freem(htx->htx_m); htx->htx_m = NULL; STAILQ_REMOVE_HEAD(&sc->sc_rb.rb_txbusyq, htx_q); STAILQ_INSERT_TAIL(&sc->sc_rb.rb_txfreeq, htx, htx_q); htx = STAILQ_FIRST(&sc->sc_rb.rb_txbusyq); } /* Turn off watchdog */ if (sc->sc_rb.rb_td_nbusy == 0) ifp->if_timer = 0; /* Update ring */ sc->sc_rb.rb_tdtail = ri; hme_start_locked(ifp); if (sc->sc_rb.rb_td_nbusy == 0) ifp->if_timer = 0; } /* * RX TCP/UDP checksum */ static void hme_rxcksum(struct mbuf *m, u_int32_t flags) { struct ether_header *eh; struct ip *ip; struct udphdr *uh; int32_t hlen, len, pktlen; u_int16_t cksum, *opts; u_int32_t temp32; pktlen = m->m_pkthdr.len; if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) return; eh = mtod(m, struct ether_header *); if (eh->ether_type != htons(ETHERTYPE_IP)) return; ip = (struct ip *)(eh + 1); if (ip->ip_v != IPVERSION) return; hlen = ip->ip_hl << 2; pktlen -= sizeof(struct ether_header); if (hlen < sizeof(struct ip)) return; if (ntohs(ip->ip_len) < hlen) return; if (ntohs(ip->ip_len) != pktlen) return; if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) return; /* can't handle fragmented packet */ switch (ip->ip_p) { case IPPROTO_TCP: if (pktlen < (hlen + sizeof(struct tcphdr))) return; break; case IPPROTO_UDP: if (pktlen < (hlen + sizeof(struct udphdr))) return; uh = (struct udphdr *)((caddr_t)ip + hlen); if (uh->uh_sum == 0) return; /* no checksum */ break; default: return; } cksum = ~(flags & HME_XD_RXCKSUM); /* checksum fixup for IP options */ len = hlen - sizeof(struct ip); if (len > 0) { opts = (u_int16_t *)(ip + 1); for (; len > 0; len -= sizeof(u_int16_t), opts++) { temp32 = cksum - *opts; temp32 = (temp32 >> 16) + (temp32 & 65535); cksum = temp32 & 65535; } } m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; m->m_pkthdr.csum_data = cksum; } /* * Receive interrupt. */ static void hme_rint(struct hme_softc *sc) { caddr_t xdr = sc->sc_rb.rb_rxd; struct ifnet *ifp = sc->sc_ifp; unsigned int ri, len; int progress = 0; u_int32_t flags; /* * Process all buffers with valid data. */ bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_POSTREAD); for (ri = sc->sc_rb.rb_rdtail;; ri = (ri + 1) % HME_NRXDESC) { flags = HME_XD_GETFLAGS(sc->sc_pci, xdr, ri); CTR2(KTR_HME, "hme_rint: index %d, flags %#x", ri, flags); if ((flags & HME_XD_OWN) != 0) break; progress++; if ((flags & HME_XD_OFL) != 0) { device_printf(sc->sc_dev, "buffer overflow, ri=%d; " "flags=0x%x\n", ri, flags); ifp->if_ierrors++; hme_discard_rxbuf(sc, ri); } else { len = HME_XD_DECODE_RSIZE(flags); hme_read(sc, ri, len, flags); } } if (progress) { bus_dmamap_sync(sc->sc_cdmatag, sc->sc_cdmamap, BUS_DMASYNC_PREWRITE); } sc->sc_rb.rb_rdtail = ri; } static void hme_eint(struct hme_softc *sc, u_int status) { if ((status & HME_SEB_STAT_MIFIRQ) != 0) { device_printf(sc->sc_dev, "XXXlink status changed\n"); return; } HME_WHINE(sc->sc_dev, "error signaled, status=%#x\n", status); } void hme_intr(void *v) { struct hme_softc *sc = (struct hme_softc *)v; u_int32_t status; HME_LOCK(sc); status = HME_SEB_READ_4(sc, HME_SEBI_STAT); CTR1(KTR_HME, "hme_intr: status %#x", (u_int)status); if ((status & HME_SEB_STAT_ALL_ERRORS) != 0) hme_eint(sc, status); if ((status & (HME_SEB_STAT_TXALL | HME_SEB_STAT_HOSTTOTX)) != 0) hme_tint(sc); if ((status & HME_SEB_STAT_RXTOHOST) != 0) hme_rint(sc); HME_UNLOCK(sc); } static void hme_watchdog(struct ifnet *ifp) { struct hme_softc *sc = ifp->if_softc; #ifdef HMEDEBUG u_int32_t status; #endif HME_LOCK(sc); #ifdef HMEDEBUG status = HME_SEB_READ_4(sc, HME_SEBI_STAT); CTR1(KTR_HME, "hme_watchdog: status %x", (u_int)status); #endif device_printf(sc->sc_dev, "device timeout\n"); ++ifp->if_oerrors; HME_UNLOCK(sc); hme_reset(sc); } /* * Initialize the MII Management Interface */ static void hme_mifinit(struct hme_softc *sc) { u_int32_t v; HME_LOCK_ASSERT(sc, MA_OWNED); /* Configure the MIF in frame mode */ v = HME_MIF_READ_4(sc, HME_MIFI_CFG); v &= ~HME_MIF_CFG_BBMODE; HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); } /* * MII interface */ int hme_mii_readreg(device_t dev, int phy, int reg) { struct hme_softc *sc = device_get_softc(dev); int n; u_int32_t v; HME_LOCK(sc); /* Select the desired PHY in the MIF configuration register */ v = HME_MIF_READ_4(sc, HME_MIFI_CFG); /* Clear PHY select bit */ v &= ~HME_MIF_CFG_PHY; if (phy == HME_PHYAD_EXTERNAL) /* Set PHY select bit to get at external device */ v |= HME_MIF_CFG_PHY; HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); /* Construct the frame command */ v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | HME_MIF_FO_TAMSB | (MII_COMMAND_READ << HME_MIF_FO_OPC_SHIFT) | (phy << HME_MIF_FO_PHYAD_SHIFT) | (reg << HME_MIF_FO_REGAD_SHIFT); HME_MIF_WRITE_4(sc, HME_MIFI_FO, v); for (n = 0; n < 100; n++) { DELAY(1); v = HME_MIF_READ_4(sc, HME_MIFI_FO); if (v & HME_MIF_FO_TALSB) { HME_UNLOCK(sc); return (v & HME_MIF_FO_DATA); } } device_printf(sc->sc_dev, "mii_read timeout\n"); HME_UNLOCK(sc); return (0); } int hme_mii_writereg(device_t dev, int phy, int reg, int val) { struct hme_softc *sc = device_get_softc(dev); int n; u_int32_t v; HME_LOCK(sc); /* Select the desired PHY in the MIF configuration register */ v = HME_MIF_READ_4(sc, HME_MIFI_CFG); /* Clear PHY select bit */ v &= ~HME_MIF_CFG_PHY; if (phy == HME_PHYAD_EXTERNAL) /* Set PHY select bit to get at external device */ v |= HME_MIF_CFG_PHY; HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); /* Construct the frame command */ v = (MII_COMMAND_START << HME_MIF_FO_ST_SHIFT) | HME_MIF_FO_TAMSB | (MII_COMMAND_WRITE << HME_MIF_FO_OPC_SHIFT) | (phy << HME_MIF_FO_PHYAD_SHIFT) | (reg << HME_MIF_FO_REGAD_SHIFT) | (val & HME_MIF_FO_DATA); HME_MIF_WRITE_4(sc, HME_MIFI_FO, v); for (n = 0; n < 100; n++) { DELAY(1); v = HME_MIF_READ_4(sc, HME_MIFI_FO); if (v & HME_MIF_FO_TALSB) { HME_UNLOCK(sc); return (1); } } device_printf(sc->sc_dev, "mii_write timeout\n"); HME_UNLOCK(sc); return (0); } void hme_mii_statchg(device_t dev) { struct hme_softc *sc = device_get_softc(dev); int instance; int phy; u_int32_t v; HME_LOCK(sc); instance = IFM_INST(sc->sc_mii->mii_media.ifm_cur->ifm_media); phy = sc->sc_phys[instance]; #ifdef HMEDEBUG if (sc->sc_debug) printf("hme_mii_statchg: status change: phy = %d\n", phy); #endif /* Select the current PHY in the MIF configuration register */ v = HME_MIF_READ_4(sc, HME_MIFI_CFG); v &= ~HME_MIF_CFG_PHY; if (phy == HME_PHYAD_EXTERNAL) v |= HME_MIF_CFG_PHY; HME_MIF_WRITE_4(sc, HME_MIFI_CFG, v); /* Set the MAC Full Duplex bit appropriately */ v = HME_MAC_READ_4(sc, HME_MACI_TXCFG); if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, HME_MAC_TXCFG_ENABLE, 0)) { HME_UNLOCK(sc); return; } if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) v |= HME_MAC_TXCFG_FULLDPLX; else v &= ~HME_MAC_TXCFG_FULLDPLX; HME_MAC_WRITE_4(sc, HME_MACI_TXCFG, v); if (!hme_mac_bitflip(sc, HME_MACI_TXCFG, v, 0, HME_MAC_TXCFG_ENABLE)) { HME_UNLOCK(sc); return; } HME_UNLOCK(sc); } static int hme_mediachange(struct ifnet *ifp) { struct hme_softc *sc = ifp->if_softc; return (mii_mediachg(sc->sc_mii)); } static void hme_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) { struct hme_softc *sc = ifp->if_softc; HME_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0) { HME_UNLOCK(sc); return; } HME_UNLOCK(sc); mii_pollstat(sc->sc_mii); HME_LOCK(sc); ifmr->ifm_active = sc->sc_mii->mii_media_active; ifmr->ifm_status = sc->sc_mii->mii_media_status; HME_UNLOCK(sc); } /* * Process an ioctl request. */ static int hme_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct hme_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int s, error = 0; HME_LOCK(sc); s = splnet(); switch (cmd) { case SIOCSIFFLAGS: if ((ifp->if_flags & IFF_UP) == 0 && (ifp->if_flags & IFF_RUNNING) != 0) { /* * If interface is marked down and it is running, then * stop it. */ hme_stop(sc); ifp->if_flags &= ~IFF_RUNNING; } else if ((ifp->if_flags & IFF_UP) != 0 && (ifp->if_flags & IFF_RUNNING) == 0) { /* * If interface is marked up and it is stopped, then * start it. */ hme_init_locked(sc); } else if ((ifp->if_flags & IFF_UP) != 0) { /* * Reset the interface to pick up changes in any other * flags that affect hardware registers. */ hme_init_locked(sc); } if ((ifp->if_flags & IFF_LINK0) != 0) sc->sc_csum_features |= CSUM_UDP; else sc->sc_csum_features &= ~CSUM_UDP; if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) ifp->if_hwassist = sc->sc_csum_features; #ifdef HMEDEBUG sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; #endif break; case SIOCADDMULTI: case SIOCDELMULTI: hme_setladrf(sc, 1); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: HME_UNLOCK(sc); error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); HME_LOCK(sc); break; case SIOCSIFCAP: ifp->if_capenable = ifr->ifr_reqcap; if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) ifp->if_hwassist = sc->sc_csum_features; else ifp->if_hwassist = 0; break; default: HME_UNLOCK(sc); error = ether_ioctl(ifp, cmd, data); HME_LOCK(sc); break; } splx(s); HME_UNLOCK(sc); return (error); } /* * Set up the logical address filter. */ static void hme_setladrf(struct hme_softc *sc, int reenable) { struct ifnet *ifp = sc->sc_ifp; struct ifmultiaddr *inm; u_int32_t crc; u_int32_t hash[4]; u_int32_t macc; HME_LOCK_ASSERT(sc, MA_OWNED); /* Clear hash table */ hash[3] = hash[2] = hash[1] = hash[0] = 0; /* Get current RX configuration */ macc = HME_MAC_READ_4(sc, HME_MACI_RXCFG); /* * Disable the receiver while changing it's state as the documentation * mandates. * We then must wait until the bit clears in the register. This should * take at most 3.5ms. */ if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, HME_MAC_RXCFG_ENABLE, 0)) return; /* Disable the hash filter before writing to the filter registers. */ if (!hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, HME_MAC_RXCFG_HENABLE, 0)) return; /* make RXMAC really SIMPLEX */ macc |= HME_MAC_RXCFG_ME; if (reenable) macc |= HME_MAC_RXCFG_ENABLE; else macc &= ~HME_MAC_RXCFG_ENABLE; if ((ifp->if_flags & IFF_PROMISC) != 0) { /* Turn on promiscuous mode; turn off the hash filter */ macc |= HME_MAC_RXCFG_PMISC; macc &= ~HME_MAC_RXCFG_HENABLE; ifp->if_flags |= IFF_ALLMULTI; goto chipit; } /* Turn off promiscuous mode; turn on the hash filter */ macc &= ~HME_MAC_RXCFG_PMISC; macc |= HME_MAC_RXCFG_HENABLE; /* * Set up multicast address filter by passing all multicast addresses * through a crc generator, and then using the high order 6 bits as an * index into the 64 bit logical address filter. The high order bit * selects the word, while the rest of the bits select the bit within * the word. */ + IF_ADDR_LOCK(sc->sc_ifp); TAILQ_FOREACH(inm, &sc->sc_ifp->if_multiaddrs, ifma_link) { if (inm->ifma_addr->sa_family != AF_LINK) continue; crc = ether_crc32_le(LLADDR((struct sockaddr_dl *) inm->ifma_addr), ETHER_ADDR_LEN); /* Just want the 6 most significant bits. */ crc >>= 26; /* Set the corresponding bit in the filter. */ hash[crc >> 4] |= 1 << (crc & 0xf); } + IF_ADDR_UNLOCK(sc->sc_ifp); ifp->if_flags &= ~IFF_ALLMULTI; chipit: /* Now load the hash table into the chip */ HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB0, hash[0]); HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB1, hash[1]); HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB2, hash[2]); HME_MAC_WRITE_4(sc, HME_MACI_HASHTAB3, hash[3]); hme_mac_bitflip(sc, HME_MACI_RXCFG, macc, 0, macc & (HME_MAC_RXCFG_ENABLE | HME_MAC_RXCFG_HENABLE | HME_MAC_RXCFG_ME)); } Index: stable/6/sys/dev/ie/if_ie.c =================================================================== --- stable/6/sys/dev/ie/if_ie.c (revision 149421) +++ stable/6/sys/dev/ie/if_ie.c (revision 149422) @@ -1,1801 +1,1803 @@ /*- * Copyright (c) 1992, 1993, University of Vermont and State * Agricultural College. * Copyright (c) 1992, 1993, Garrett A. Wollman. * * Portions: * Copyright (c) 1990, 1991, William F. Jolitz * Copyright (c) 1990, The Regents of the University of California * * 3Com 3C507 support: * Copyright (c) 1993, 1994, Charles M. Hannum * * EtherExpress 16 support: * Copyright (c) 1993, 1994, 1995, Rodney W. Grimes * Copyright (c) 1997, Aaron C. Smith * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * Vermont and State Agricultural College and Garrett A. Wollman, by * William F. Jolitz, by the University of California, Berkeley, * Lawrence Berkeley Laboratory, and their contributors, by * Charles M. Hannum, by Rodney W. Grimes, and by Aaron C. Smith. * 4. Neither the names of the Universities nor the names of the authors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR AUTHORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * MAINTAINER: Matthew N. Dodd */ #include __FBSDID("$FreeBSD$"); /* * Intel 82586 Ethernet chip * Register, bit, and structure definitions. * * Written by GAW with reference to the Clarkson Packet Driver code for this * chip written by Russ Nelson and others. * * Intel EtherExpress 16 support from if_ix.c, written by Rodney W. Grimes. */ /* * The i82586 is a very versatile chip, found in many implementations. * Programming this chip is mostly the same, but certain details differ * from card to card. This driver is written so that different cards * can be automatically detected at run-time. */ /* * Mode of operation: * * We run the 82586 in a standard Ethernet mode. We keep NFRAMES * received frame descriptors around for the receiver to use, and * NRXBUFS associated receive buffer descriptors, both in a circular * list. Whenever a frame is received, we rotate both lists as * necessary. (The 586 treats both lists as a simple queue.) We also * keep a transmit command around so that packets can be sent off * quickly. * * We configure the adapter in AL-LOC = 1 mode, which means that the * Ethernet/802.3 MAC header is placed at the beginning of the receive * buffer rather than being split off into various fields in the RFD. * This also means that we must include this header in the transmit * buffer as well. * * By convention, all transmit commands, and only transmit commands, * shall have the I (IE_CMD_INTR) bit set in the command. This way, * when an interrupt arrives at ieintr(), it is immediately possible * to tell what precisely caused it. ANY OTHER command-sending routines * should run at splimp(), and should post an acknowledgement to every * interrupt they generate. * * The 82586 has a 24-bit address space internally, and the adaptor's * memory is located at the top of this region. However, the value * we are given in configuration is normally the *bottom* of the adaptor * RAM. So, we must go through a few gyrations to come up with a * kernel virtual address which represents the actual beginning of the * 586 address space. First, we autosize the RAM by running through * several possible sizes and trying to initialize the adapter under * the assumption that the selected size is correct. Then, knowing * the correct RAM size, we set up our pointers in the softc `iomem' * represents the computed base of the 586 address space. `iomembot' * represents the actual configured base of adapter RAM. Finally, * `iosize' represents the calculated size of 586 RAM. Then, when * laying out commands, we use the interval [iomembot, iomembot + * iosize); to make 24-pointers, we subtract iomem, and to make * 16-pointers, we subtract iomem and and with 0xffff. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef DEBUG #define IED_RINT 0x01 #define IED_TINT 0x02 #define IED_RNR 0x04 #define IED_CNA 0x08 #define IED_READFRAME 0x10 static int ie_debug = IED_RNR; #endif #define IE_BUF_LEN ETHER_MAX_LEN /* length of transmit buffer */ /* Forward declaration */ struct ie_softc; static void ieinit (void *); static void ie_stop (struct ie_softc *); static int ieioctl (struct ifnet *, u_long, caddr_t); static void iestart (struct ifnet *); static __inline void ee16_interrupt_enable (struct ie_softc *); static void ee16_eeprom_outbits (struct ie_softc *, int, int); static void ee16_eeprom_clock (struct ie_softc *, int); static u_short ee16_read_eeprom (struct ie_softc *, int); static int ee16_eeprom_inbits (struct ie_softc *); static void ee16_shutdown (void *, int); static __inline void ie_ack (struct ie_softc *, u_int); static void iereset (struct ie_softc *); static void ie_readframe (struct ie_softc *, int); static void ie_drop_packet_buffer (struct ie_softc *); static void find_ie_mem_size (struct ie_softc *); static void chan_attn_timeout (void *); static int command_and_wait (struct ie_softc *, int, void volatile *, int); static void run_tdr (struct ie_softc *, volatile struct ie_tdr_cmd *); static int ierint (struct ie_softc *); static int ietint (struct ie_softc *); static int iernr (struct ie_softc *); static void start_receiver (struct ie_softc *); static __inline int ieget (struct ie_softc *, struct mbuf **); static v_caddr_t setup_rfa (struct ie_softc *, v_caddr_t); static int mc_setup (struct ie_softc *); static void ie_mc_reset (struct ie_softc *); #ifdef DEBUG static void print_rbd (volatile struct ie_recv_buf_desc * rbd); static int in_ierint = 0; static int in_ietint = 0; #endif static const char *ie_hardware_names[] = { "None", "StarLAN 10", "EN100", "StarLAN Fiber", "3C507", "NI5210", "EtherExpress 16", "Unknown" }; /* * sizeof(iscp) == 1+1+2+4 == 8 * sizeof(scb) == 2+2+2+2+2+2+2+2 == 16 * NFRAMES * sizeof(rfd) == NFRAMES*(2+2+2+2+6+6+2+2) == NFRAMES*24 == 384 * sizeof(xmit_cmd) == 2+2+2+2+6+2 == 18 * sizeof(transmit buffer) == 1512 * sizeof(transmit buffer desc) == 8 * ----- * 1946 * * NRXBUFS * sizeof(rbd) == NRXBUFS*(2+2+4+2+2) == NRXBUFS*12 * NRXBUFS * IE_RBUF_SIZE == NRXBUFS*256 * * NRXBUFS should be (16384 - 1946) / (256 + 12) == 14438 / 268 == 53 * * With NRXBUFS == 48, this leaves us 1574 bytes for another command or * more buffers. Another transmit command would be 18+8+1512 == 1538 * ---just barely fits! * * Obviously all these would have to be reduced for smaller memory sizes. * With a larger memory, it would be possible to roughly double the number * of both transmit and receive buffers. */ #define NFRAMES 4 /* number of receive frames */ #define NRXBUFS 24 /* number of buffers to allocate */ #define IE_RBUF_SIZE 256 /* size of each buffer, MUST BE POWER OF TWO */ #define NTXBUFS 1 /* number of transmit commands */ #define IE_TBUF_SIZE ETHER_MAX_LEN /* size of transmit buffer */ #define MK_24(base, ptr) ((caddr_t)((uintptr_t)ptr - (uintptr_t)base)) #define MK_16(base, ptr) ((u_short)(uintptr_t)MK_24(base, ptr)) static void ee16_shutdown(void *xsc, int howto) { struct ie_softc *sc = (struct ie_softc *)xsc; ee16_reset_586(sc); outb(PORT(sc) + IEE16_ECTRL, IEE16_RESET_ASIC); outb(PORT(sc) + IEE16_ECTRL, 0); } /* * Taken almost exactly from Bill's if_is.c, then modified beyond recognition. */ int ie_attach(device_t dev) { struct ie_softc * sc; struct ifnet * ifp; size_t allocsize; int factor; sc = device_get_softc(dev); ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(sc->dev, "can not if_alloc()\n"); return (ENOSPC); } sc->dev = dev; sc->unit = device_get_unit(dev); /* * based on the amount of memory we have, allocate our tx and rx * resources. */ factor = rman_get_size(sc->mem_res) / 8192; sc->nframes = factor * NFRAMES; sc->nrxbufs = factor * NRXBUFS; sc->ntxbufs = factor * NTXBUFS; /* * Since all of these guys are arrays of pointers, allocate as one * big chunk and dole out accordingly. */ allocsize = sizeof(void *) * (sc->nframes + (sc->nrxbufs * 2) + (sc->ntxbufs * 3)); sc->rframes = (volatile struct ie_recv_frame_desc **) malloc(allocsize, M_DEVBUF, M_NOWAIT); if (sc->rframes == NULL) { if_free(ifp); return (ENXIO); } sc->rbuffs = (volatile struct ie_recv_buf_desc **)&sc->rframes[sc->nframes]; sc->cbuffs = (volatile u_char **)&sc->rbuffs[sc->nrxbufs]; sc->xmit_cmds = (volatile struct ie_xmit_cmd **)&sc->cbuffs[sc->nrxbufs]; sc->xmit_buffs = (volatile struct ie_xmit_buf **)&sc->xmit_cmds[sc->ntxbufs]; sc->xmit_cbuffs = (volatile u_char **)&sc->xmit_buffs[sc->ntxbufs]; if (bootverbose) device_printf(sc->dev, "hardware type %s, revision %d\n", ie_hardware_names[sc->hard_type], sc->hard_vers + 1); ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT; ifp->if_start = iestart; ifp->if_ioctl = ieioctl; ifp->if_init = ieinit; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; if (sc->hard_type == IE_EE16) EVENTHANDLER_REGISTER(shutdown_post_sync, ee16_shutdown, sc, SHUTDOWN_PRI_DEFAULT); ether_ifattach(ifp, sc->enaddr); return (0); } static __inline void ie_ack(struct ie_softc *sc, u_int mask) { sc->scb->ie_command = sc->scb->ie_status & mask; (*sc->ie_chan_attn) (sc); } /* * What to do upon receipt of an interrupt. */ void ie_intr(void *xsc) { struct ie_softc *sc = (struct ie_softc *)xsc; u_short status; /* Clear the interrupt latch on the 3C507. */ if (sc->hard_type == IE_3C507 && (inb(PORT(sc) + IE507_CTRL) & EL_CTRL_INTL)) outb(PORT(sc) + IE507_ICTRL, 1); /* disable interrupts on the EE16. */ if (sc->hard_type == IE_EE16) outb(PORT(sc) + IEE16_IRQ, sc->irq_encoded); status = sc->scb->ie_status; loop: /* Don't ack interrupts which we didn't receive */ ie_ack(sc, IE_ST_WHENCE & status); if (status & (IE_ST_RECV | IE_ST_RNR)) { #ifdef DEBUG in_ierint++; if (ie_debug & IED_RINT) printf("ie%d: rint\n", sc->unit); #endif ierint(sc); #ifdef DEBUG in_ierint--; #endif } if (status & IE_ST_DONE) { #ifdef DEBUG in_ietint++; if (ie_debug & IED_TINT) printf("ie%d: tint\n", sc->unit); #endif ietint(sc); #ifdef DEBUG in_ietint--; #endif } if (status & IE_ST_RNR) { #ifdef DEBUG if (ie_debug & IED_RNR) printf("ie%d: rnr\n", sc->unit); #endif iernr(sc); } #ifdef DEBUG if ((status & IE_ST_ALLDONE) && (ie_debug & IED_CNA)) printf("ie%d: cna\n", sc->unit); #endif if ((status = sc->scb->ie_status) & IE_ST_WHENCE) goto loop; /* Clear the interrupt latch on the 3C507. */ if (sc->hard_type == IE_3C507) outb(PORT(sc) + IE507_ICTRL, 1); /* enable interrupts on the EE16. */ if (sc->hard_type == IE_EE16) outb(PORT(sc) + IEE16_IRQ, sc->irq_encoded | IEE16_IRQ_ENABLE); } /* * Process a received-frame interrupt. */ static int ierint(struct ie_softc *sc) { int i, status; static int timesthru = 1024; i = sc->rfhead; while (1) { status = sc->rframes[i]->ie_fd_status; if ((status & IE_FD_COMPLETE) && (status & IE_FD_OK)) { sc->ifp->if_ipackets++; if (!--timesthru) { sc->ifp->if_ierrors += sc->scb->ie_err_crc + sc->scb->ie_err_align + sc->scb->ie_err_resource + sc->scb->ie_err_overrun; sc->scb->ie_err_crc = 0; sc->scb->ie_err_align = 0; sc->scb->ie_err_resource = 0; sc->scb->ie_err_overrun = 0; timesthru = 1024; } ie_readframe(sc, i); } else { if (status & IE_FD_RNR) { if (!(sc->scb->ie_status & IE_RU_READY)) { sc->rframes[0]->ie_fd_next = MK_16(MEM(sc), sc->rbuffs[0]); sc->scb->ie_recv_list = MK_16(MEM(sc), sc->rframes[0]); command_and_wait(sc, IE_RU_START, 0, 0); } } break; } i = (i + 1) % sc->nframes; } return (0); } /* * Process a command-complete interrupt. These are only generated by * the transmission of frames. This routine is deceptively simple, since * most of the real work is done by iestart(). */ static int ietint(struct ie_softc *sc) { int status; int i; sc->ifp->if_timer = 0; sc->ifp->if_flags &= ~IFF_OACTIVE; for (i = 0; i < sc->xmit_count; i++) { status = sc->xmit_cmds[i]->ie_xmit_status; if (status & IE_XS_LATECOLL) { printf("ie%d: late collision\n", sc->unit); sc->ifp->if_collisions++; sc->ifp->if_oerrors++; } else if (status & IE_XS_NOCARRIER) { printf("ie%d: no carrier\n", sc->unit); sc->ifp->if_oerrors++; } else if (status & IE_XS_LOSTCTS) { printf("ie%d: lost CTS\n", sc->unit); sc->ifp->if_oerrors++; } else if (status & IE_XS_UNDERRUN) { printf("ie%d: DMA underrun\n", sc->unit); sc->ifp->if_oerrors++; } else if (status & IE_XS_EXCMAX) { printf("ie%d: too many collisions\n", sc->unit); sc->ifp->if_collisions += 16; sc->ifp->if_oerrors++; } else { sc->ifp->if_opackets++; sc->ifp->if_collisions += status & IE_XS_MAXCOLL; } } sc->xmit_count = 0; /* * If multicast addresses were added or deleted while we were * transmitting, ie_mc_reset() set the want_mcsetup flag indicating * that we should do it. */ if (sc->want_mcsetup) { mc_setup(sc); sc->want_mcsetup = 0; } /* Wish I knew why this seems to be necessary... */ sc->xmit_cmds[0]->ie_xmit_status |= IE_STAT_COMPL; iestart(sc->ifp); return (0); /* shouldn't be necessary */ } /* * Process a receiver-not-ready interrupt. I believe that we get these * when there aren't enough buffers to go around. For now (FIXME), we * just restart the receiver, and hope everything's ok. */ static int iernr(struct ie_softc *sc) { #ifdef doesnt_work setup_rfa(sc, (v_caddr_t) sc->rframes[0]); sc->scb->ie_recv_list = MK_16(MEM(sc), sc->rframes[0]); command_and_wait(sc, IE_RU_START, 0, 0); #else /* This doesn't work either, but it doesn't hang either. */ command_and_wait(sc, IE_RU_DISABLE, 0, 0); /* just in case */ setup_rfa(sc, (v_caddr_t) sc->rframes[0]); /* ignore cast-qual */ sc->scb->ie_recv_list = MK_16(MEM(sc), sc->rframes[0]); command_and_wait(sc, IE_RU_START, 0, 0); /* was ENABLE */ #endif ie_ack(sc, IE_ST_WHENCE); sc->ifp->if_ierrors++; return (0); } /* * Compare two Ether/802 addresses for equality, inlined and * unrolled for speed. I'd love to have an inline assembler * version of this... */ static __inline int ether_equal(u_char * one, u_char * two) { if (one[0] != two[0]) return (0); if (one[1] != two[1]) return (0); if (one[2] != two[2]) return (0); if (one[3] != two[3]) return (0); if (one[4] != two[4]) return (0); if (one[5] != two[5]) return (0); return 1; } /* * Determine quickly whether we should bother reading in this packet. * This depends on whether BPF and/or bridging is enabled, whether we * are receiving multicast address, and whether promiscuous mode is enabled. * We assume that if IFF_PROMISC is set, then *somebody* wants to see * all incoming packets. */ static __inline int check_eh(struct ie_softc *sc, struct ether_header *eh) { /* Optimize the common case: normal operation. We've received either a unicast with our dest or a multicast packet. */ if (sc->promisc == 0) { int i; /* If not multicast, it's definitely for us */ if ((eh->ether_dhost[0] & 1) == 0) return (1); /* Accept broadcasts (loose but fast check) */ if (eh->ether_dhost[0] == 0xff) return (1); /* Compare against our multicast addresses */ for (i = 0; i < sc->mcast_count; i++) { if (ether_equal(eh->ether_dhost, (u_char *)&sc->mcast_addrs[i])) return (1); } return (0); } /* Always accept packets when in promiscuous mode */ if ((sc->promisc & IFF_PROMISC) != 0) return (1); /* Always accept packets directed at us */ if (ether_equal(eh->ether_dhost, IFP2ENADDR(sc->ifp))) return (1); /* Must have IFF_ALLMULTI but not IFF_PROMISC set. The chip is actually in promiscuous mode, so discard unicast packets. */ return((eh->ether_dhost[0] & 1) != 0); } /* * We want to isolate the bits that have meaning... This assumes that * IE_RBUF_SIZE is an even power of two. If somehow the act_len exceeds * the size of the buffer, then we are screwed anyway. */ static __inline int ie_buflen(struct ie_softc *sc, int head) { return (sc->rbuffs[head]->ie_rbd_actual & (IE_RBUF_SIZE | (IE_RBUF_SIZE - 1))); } static __inline int ie_packet_len(struct ie_softc *sc) { int i; int head = sc->rbhead; int acc = 0; do { if (!(sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_USED)) { #ifdef DEBUG print_rbd(sc->rbuffs[sc->rbhead]); #endif log(LOG_ERR, "ie%d: receive descriptors out of sync at %d\n", sc->unit, sc->rbhead); iereset(sc); return (-1); } i = sc->rbuffs[head]->ie_rbd_actual & IE_RBD_LAST; acc += ie_buflen(sc, head); head = (head + 1) % sc->nrxbufs; } while (!i); return (acc); } /* * Read data off the interface, and turn it into an mbuf chain. * * This code is DRAMATICALLY different from the previous version; this * version tries to allocate the entire mbuf chain up front, given the * length of the data available. This enables us to allocate mbuf * clusters in many situations where before we would have had a long * chain of partially-full mbufs. This should help to speed up the * operation considerably. (Provided that it works, of course.) */ static __inline int ieget(struct ie_softc *sc, struct mbuf **mp) { struct ether_header eh; struct mbuf *m, *top, **mymp; int offset; int totlen, resid; int thismboff; int head; totlen = ie_packet_len(sc); if (totlen <= 0) return (-1); /* * Snarf the Ethernet header. */ bcopy((caddr_t)sc->cbuffs[sc->rbhead], &eh, sizeof(struct ether_header)); /* ignore cast-qual warning here */ /* * As quickly as possible, check if this packet is for us. If not, * don't waste a single cycle copying the rest of the packet in. * This is only a consideration when FILTER is defined; i.e., when * we are either running BPF or doing multicasting. */ if (!check_eh(sc, &eh)) { ie_drop_packet_buffer(sc); sc->ifp->if_ierrors--; /* just this case, it's not an * error */ return (-1); } MGETHDR(m, M_DONTWAIT, MT_DATA); if (!m) { ie_drop_packet_buffer(sc); /* XXXX if_ierrors++; */ return (-1); } *mp = m; m->m_pkthdr.rcvif = sc->ifp; m->m_len = MHLEN; resid = m->m_pkthdr.len = totlen; top = 0; mymp = ⊤ /* * This loop goes through and allocates mbufs for all the data we * will be copying in. It does not actually do the copying yet. */ do { /* while(resid > 0) */ /* * Try to allocate an mbuf to hold the data that we have. * If we already allocated one, just get another one and * stick it on the end (eventually). If we don't already * have one, try to allocate an mbuf cluster big enough to * hold the whole packet, if we think it's reasonable, or a * single mbuf which may or may not be big enough. Got that? */ if (top) { MGET(m, M_DONTWAIT, MT_DATA); if (!m) { m_freem(top); ie_drop_packet_buffer(sc); return (-1); } m->m_len = MLEN; } if (resid >= MINCLSIZE) { MCLGET(m, M_DONTWAIT); if (m->m_flags & M_EXT) m->m_len = min(resid, MCLBYTES); } else { if (resid < m->m_len) { if (!top && resid + max_linkhdr <= m->m_len) m->m_data += max_linkhdr; m->m_len = resid; } } resid -= m->m_len; *mymp = m; mymp = &m->m_next; } while (resid > 0); resid = totlen; /* remaining data */ offset = 0; /* packet offset */ thismboff = 0; /* offset in m */ m = top; /* current mbuf */ head = sc->rbhead; /* current rx buffer */ /* * Now we take the mbuf chain (hopefully only one mbuf most of the * time) and stuff the data into it. There are no possible failures * at or after this point. */ while (resid > 0) { /* while there's stuff left */ int thislen = ie_buflen(sc, head) - offset; /* * If too much data for the current mbuf, then fill the * current one up, go to the next one, and try again. */ if (thislen > m->m_len - thismboff) { int newlen = m->m_len - thismboff; bcopy((v_caddr_t) (sc->cbuffs[head] + offset), mtod(m, caddr_t) +thismboff, (unsigned) newlen); /* ignore cast-qual warning */ m = m->m_next; thismboff = 0; /* new mbuf, so no offset */ offset += newlen; /* we are now this far into * the packet */ resid -= newlen; /* so there is this much left * to get */ continue; } /* * If there is more than enough space in the mbuf to hold * the contents of this buffer, copy everything in, advance * pointers, and so on. */ if (thislen < m->m_len - thismboff) { bcopy((v_caddr_t) (sc->cbuffs[head] + offset), mtod(m, caddr_t) +thismboff, (unsigned) thislen); thismboff += thislen; /* we are this far into the * mbuf */ resid -= thislen; /* and this much is left */ goto nextbuf; } /* * Otherwise, there is exactly enough space to put this * buffer's contents into the current mbuf. Do the * combination of the above actions. */ bcopy((v_caddr_t) (sc->cbuffs[head] + offset), mtod(m, caddr_t) + thismboff, (unsigned) thislen); m = m->m_next; thismboff = 0; /* new mbuf, start at the beginning */ resid -= thislen; /* and we are this far through */ /* * Advance all the pointers. We can get here from either of * the last two cases, but never the first. */ nextbuf: offset = 0; sc->rbuffs[head]->ie_rbd_actual = 0; sc->rbuffs[head]->ie_rbd_length |= IE_RBD_LAST; sc->rbhead = head = (head + 1) % sc->nrxbufs; sc->rbuffs[sc->rbtail]->ie_rbd_length &= ~IE_RBD_LAST; sc->rbtail = (sc->rbtail + 1) % sc->nrxbufs; } /* * Unless something changed strangely while we were doing the copy, * we have now copied everything in from the shared memory. This * means that we are done. */ return (0); } /* * Read frame NUM from unit UNIT (pre-cached as IE). * * This routine reads the RFD at NUM, and copies in the buffers from * the list of RBD, then rotates the RBD and RFD lists so that the receiver * doesn't start complaining. Trailers are DROPPED---there's no point * in wasting time on confusing code to deal with them. Hopefully, * this machine will never ARP for trailers anyway. */ static void ie_readframe(struct ie_softc *sc, int num/* frame number to read */) { struct ifnet *ifp = sc->ifp; struct ie_recv_frame_desc rfd; struct mbuf *m = 0; #ifdef DEBUG struct ether_header *eh; #endif bcopy((v_caddr_t) (sc->rframes[num]), &rfd, sizeof(struct ie_recv_frame_desc)); /* * Immediately advance the RFD list, since we we have copied ours * now. */ sc->rframes[num]->ie_fd_status = 0; sc->rframes[num]->ie_fd_last |= IE_FD_LAST; sc->rframes[sc->rftail]->ie_fd_last &= ~IE_FD_LAST; sc->rftail = (sc->rftail + 1) % sc->nframes; sc->rfhead = (sc->rfhead + 1) % sc->nframes; if (rfd.ie_fd_status & IE_FD_OK) { if (ieget(sc, &m)) { sc->ifp->if_ierrors++; /* this counts as an * error */ return; } } #ifdef DEBUG eh = mtod(m, struct ether_header *); if (ie_debug & IED_READFRAME) { printf("ie%d: frame from ether %6D type %x\n", sc->unit, eh->ether_shost, ":", (unsigned) eh->ether_type); } if (ntohs(eh->ether_type) > ETHERTYPE_TRAIL && ntohs(eh->ether_type) < (ETHERTYPE_TRAIL + ETHERTYPE_NTRAILER)) printf("received trailer!\n"); #endif if (!m) return; /* * Finally pass this packet up to higher layers. */ (*ifp->if_input)(ifp, m); } static void ie_drop_packet_buffer(struct ie_softc *sc) { int i; do { /* * This means we are somehow out of sync. So, we reset the * adapter. */ if (!(sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_USED)) { #ifdef DEBUG print_rbd(sc->rbuffs[sc->rbhead]); #endif log(LOG_ERR, "ie%d: receive descriptors out of sync at %d\n", sc->unit, sc->rbhead); iereset(sc); return; } i = sc->rbuffs[sc->rbhead]->ie_rbd_actual & IE_RBD_LAST; sc->rbuffs[sc->rbhead]->ie_rbd_length |= IE_RBD_LAST; sc->rbuffs[sc->rbhead]->ie_rbd_actual = 0; sc->rbhead = (sc->rbhead + 1) % sc->nrxbufs; sc->rbuffs[sc->rbtail]->ie_rbd_length &= ~IE_RBD_LAST; sc->rbtail = (sc->rbtail + 1) % sc->nrxbufs; } while (!i); } /* * Start transmission on an interface. */ static void iestart(struct ifnet *ifp) { struct ie_softc *sc = ifp->if_softc; struct mbuf *m0, *m; volatile unsigned char *buffer; u_short len; /* * This is not really volatile, in this routine, but it makes gcc * happy. */ volatile u_short *bptr = &sc->scb->ie_command_list; if (!(ifp->if_flags & IFF_RUNNING)) return; if (ifp->if_flags & IFF_OACTIVE) return; do { IF_DEQUEUE(&sc->ifp->if_snd, m); if (!m) break; buffer = sc->xmit_cbuffs[sc->xmit_count]; len = 0; for (m0 = m; m && len < IE_BUF_LEN; m = m->m_next) { bcopy(mtod(m, caddr_t), buffer, m->m_len); buffer += m->m_len; len += m->m_len; } m_freem(m0); len = max(len, ETHER_MIN_LEN); /* * See if bpf is listening on this interface, let it see the * packet before we commit it to the wire. */ BPF_TAP(sc->ifp, (void *)sc->xmit_cbuffs[sc->xmit_count], len); sc->xmit_buffs[sc->xmit_count]->ie_xmit_flags = IE_XMIT_LAST|len; sc->xmit_buffs[sc->xmit_count]->ie_xmit_next = 0xffff; sc->xmit_buffs[sc->xmit_count]->ie_xmit_buf = MK_24(sc->iomem, sc->xmit_cbuffs[sc->xmit_count]); sc->xmit_cmds[sc->xmit_count]->com.ie_cmd_cmd = IE_CMD_XMIT; sc->xmit_cmds[sc->xmit_count]->ie_xmit_status = 0; sc->xmit_cmds[sc->xmit_count]->ie_xmit_desc = MK_16(sc->iomem, sc->xmit_buffs[sc->xmit_count]); *bptr = MK_16(sc->iomem, sc->xmit_cmds[sc->xmit_count]); bptr = &sc->xmit_cmds[sc->xmit_count]->com.ie_cmd_link; sc->xmit_count++; } while (sc->xmit_count < sc->ntxbufs); /* * If we queued up anything for transmission, send it. */ if (sc->xmit_count) { sc->xmit_cmds[sc->xmit_count - 1]->com.ie_cmd_cmd |= IE_CMD_LAST | IE_CMD_INTR; /* * By passing the command pointer as a null, we tell * command_and_wait() to pretend that this isn't an action * command. I wish I understood what was happening here. */ command_and_wait(sc, IE_CU_START, 0, 0); ifp->if_flags |= IFF_OACTIVE; } return; } /* * Check to see if there's an 82586 out there. */ int check_ie_present(struct ie_softc *sc) { volatile struct ie_sys_conf_ptr *scp; volatile struct ie_int_sys_conf_ptr *iscp; volatile struct ie_sys_ctl_block *scb; u_long realbase; int s; s = splimp(); realbase = (uintptr_t) sc->iomembot + sc->iosize - (1 << 24); scp = (volatile struct ie_sys_conf_ptr *) (uintptr_t) (realbase + IE_SCP_ADDR); bzero((volatile char *) scp, sizeof *scp); /* * First we put the ISCP at the bottom of memory; this tests to make * sure that our idea of the size of memory is the same as the * controller's. This is NOT where the ISCP will be in normal * operation. */ iscp = (volatile struct ie_int_sys_conf_ptr *) sc->iomembot; bzero((volatile char *)iscp, sizeof *iscp); scb = (volatile struct ie_sys_ctl_block *) sc->iomembot; bzero((volatile char *)scb, sizeof *scb); scp->ie_bus_use = sc->bus_use; /* 8-bit or 16-bit */ scp->ie_iscp_ptr = (caddr_t) (uintptr_t) ((volatile char *) iscp - (volatile char *) (uintptr_t) realbase); iscp->ie_busy = 1; iscp->ie_scb_offset = MK_16(realbase, scb) + 256; (*sc->ie_reset_586) (sc); (*sc->ie_chan_attn) (sc); DELAY(100); /* wait a while... */ if (iscp->ie_busy) { splx(s); return (0); } /* * Now relocate the ISCP to its real home, and reset the controller * again. */ iscp = (void *) Align((caddr_t) (uintptr_t) (realbase + IE_SCP_ADDR - sizeof(struct ie_int_sys_conf_ptr))); bzero((volatile char *) iscp, sizeof *iscp); /* ignore cast-qual */ scp->ie_iscp_ptr = (caddr_t) (uintptr_t) ((volatile char *) iscp - (volatile char *) (uintptr_t) realbase); iscp->ie_busy = 1; iscp->ie_scb_offset = MK_16(realbase, scb); (*sc->ie_reset_586) (sc); (*sc->ie_chan_attn) (sc); DELAY(100); if (iscp->ie_busy) { splx(s); return (0); } sc->iomem = (caddr_t) (uintptr_t) realbase; sc->iscp = iscp; sc->scb = scb; /* * Acknowledge any interrupts we may have caused... */ ie_ack(sc, IE_ST_WHENCE); splx(s); return (1); } /* * Divine the memory size of ie board UNIT. * Better hope there's nothing important hiding just below the ie card... */ static void find_ie_mem_size(struct ie_softc *sc) { unsigned size; sc->iosize = 0; for (size = 65536; size >= 8192; size -= 8192) { if (check_ie_present(sc)) { return; } } return; } void el_reset_586(struct ie_softc *sc) { outb(PORT(sc) + IE507_CTRL, EL_CTRL_RESET); DELAY(100); outb(PORT(sc) + IE507_CTRL, EL_CTRL_NORMAL); DELAY(100); } void sl_reset_586(struct ie_softc *sc) { outb(PORT(sc) + IEATT_RESET, 0); } void ee16_reset_586(struct ie_softc *sc) { outb(PORT(sc) + IEE16_ECTRL, IEE16_RESET_586); DELAY(100); outb(PORT(sc) + IEE16_ECTRL, 0); DELAY(100); } void el_chan_attn(struct ie_softc *sc) { outb(PORT(sc) + IE507_ATTN, 1); } void sl_chan_attn(struct ie_softc *sc) { outb(PORT(sc) + IEATT_ATTN, 0); } void ee16_chan_attn(struct ie_softc *sc) { outb(PORT(sc) + IEE16_ATTN, 0); } u_short ee16_read_eeprom(struct ie_softc *sc, int location) { int ectrl, edata; ectrl = inb(sc->port + IEE16_ECTRL); ectrl &= IEE16_ECTRL_MASK; ectrl |= IEE16_ECTRL_EECS; outb(sc->port + IEE16_ECTRL, ectrl); ee16_eeprom_outbits(sc, IEE16_EEPROM_READ, IEE16_EEPROM_OPSIZE1); ee16_eeprom_outbits(sc, location, IEE16_EEPROM_ADDR_SIZE); edata = ee16_eeprom_inbits(sc); ectrl = inb(sc->port + IEE16_ECTRL); ectrl &= ~(IEE16_RESET_ASIC | IEE16_ECTRL_EEDI | IEE16_ECTRL_EECS); outb(sc->port + IEE16_ECTRL, ectrl); ee16_eeprom_clock(sc, 1); ee16_eeprom_clock(sc, 0); return edata; } static void ee16_eeprom_outbits(struct ie_softc *sc, int edata, int count) { int ectrl, i; ectrl = inb(sc->port + IEE16_ECTRL); ectrl &= ~IEE16_RESET_ASIC; for (i = count - 1; i >= 0; i--) { ectrl &= ~IEE16_ECTRL_EEDI; if (edata & (1 << i)) { ectrl |= IEE16_ECTRL_EEDI; } outb(sc->port + IEE16_ECTRL, ectrl); DELAY(1); /* eeprom data must be setup for 0.4 uSec */ ee16_eeprom_clock(sc, 1); ee16_eeprom_clock(sc, 0); } ectrl &= ~IEE16_ECTRL_EEDI; outb(sc->port + IEE16_ECTRL, ectrl); DELAY(1); /* eeprom data must be held for 0.4 uSec */ } static int ee16_eeprom_inbits(struct ie_softc *sc) { int ectrl, edata, i; ectrl = inb(sc->port + IEE16_ECTRL); ectrl &= ~IEE16_RESET_ASIC; for (edata = 0, i = 0; i < 16; i++) { edata = edata << 1; ee16_eeprom_clock(sc, 1); ectrl = inb(sc->port + IEE16_ECTRL); if (ectrl & IEE16_ECTRL_EEDO) { edata |= 1; } ee16_eeprom_clock(sc, 0); } return (edata); } static void ee16_eeprom_clock(struct ie_softc *sc, int state) { int ectrl; ectrl = inb(sc->port + IEE16_ECTRL); ectrl &= ~(IEE16_RESET_ASIC | IEE16_ECTRL_EESK); if (state) { ectrl |= IEE16_ECTRL_EESK; } outb(sc->port + IEE16_ECTRL, ectrl); DELAY(9); /* EESK must be stable for 8.38 uSec */ } static __inline void ee16_interrupt_enable(struct ie_softc *sc) { DELAY(100); outb(sc->port + IEE16_IRQ, sc->irq_encoded | IEE16_IRQ_ENABLE); DELAY(100); } void sl_read_ether(struct ie_softc *sc, unsigned char *addr) { int i; for (i = 0; i < 6; i++) addr[i] = inb(PORT(sc) + i); } static void iereset(struct ie_softc *sc) { int s = splimp(); printf("ie%d: reset\n", sc->unit); sc->ifp->if_flags &= ~IFF_UP; ieioctl(sc->ifp, SIOCSIFFLAGS, 0); /* * Stop i82586 dead in its tracks. */ if (command_and_wait(sc, IE_RU_ABORT | IE_CU_ABORT, 0, 0)) printf("ie%d: abort commands timed out\n", sc->unit); if (command_and_wait(sc, IE_RU_DISABLE | IE_CU_STOP, 0, 0)) printf("ie%d: disable commands timed out\n", sc->unit); #ifdef notdef if (!check_ie_present(sc)) panic("ie disappeared!"); #endif sc->ifp->if_flags |= IFF_UP; ieioctl(sc->ifp, SIOCSIFFLAGS, 0); splx(s); return; } /* * This is called if we time out. */ static void chan_attn_timeout(void *rock) { *(int *) rock = 1; } /* * Send a command to the controller and wait for it to either * complete or be accepted, depending on the command. If the * command pointer is null, then pretend that the command is * not an action command. If the command pointer is not null, * and the command is an action command, wait for * ((volatile struct ie_cmd_common *)pcmd)->ie_cmd_status & MASK * to become true. */ static int command_and_wait(struct ie_softc *sc, int cmd, volatile void *pcmd, int mask) { volatile struct ie_cmd_common *cc = pcmd; volatile int timedout = 0; struct callout_handle ch; sc->scb->ie_command = (u_short) cmd; if (IE_ACTION_COMMAND(cmd) && pcmd) { (*sc->ie_chan_attn) (sc); /* * According to the packet driver, the minimum timeout * should be .369 seconds, which we round up to .37. */ ch = timeout(chan_attn_timeout, (caddr_t)&timedout, 37 * hz / 100); /* ignore cast-qual */ /* * Now spin-lock waiting for status. This is not a very * nice thing to do, but I haven't figured out how, or * indeed if, we can put the process waiting for action to * sleep. (We may be getting called through some other * timeout running in the kernel.) */ while (1) { if ((cc->ie_cmd_status & mask) || timedout) break; } untimeout(chan_attn_timeout, (caddr_t)&timedout, ch); /* ignore cast-qual */ return (timedout); } else { /* * Otherwise, just wait for the command to be accepted. */ (*sc->ie_chan_attn) (sc); while (sc->scb->ie_command); /* spin lock */ return (0); } } /* * Run the time-domain reflectometer... */ static void run_tdr(struct ie_softc *sc, volatile struct ie_tdr_cmd *cmd) { int result; cmd->com.ie_cmd_status = 0; cmd->com.ie_cmd_cmd = IE_CMD_TDR | IE_CMD_LAST; cmd->com.ie_cmd_link = 0xffff; cmd->ie_tdr_time = 0; sc->scb->ie_command_list = MK_16(MEM(sc), cmd); cmd->ie_tdr_time = 0; if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL)) result = 0x2000; else result = cmd->ie_tdr_time; ie_ack(sc, IE_ST_WHENCE); if (result & IE_TDR_SUCCESS) return; if (result & IE_TDR_XCVR) { printf("ie%d: transceiver problem\n", sc->unit); } else if (result & IE_TDR_OPEN) { printf("ie%d: TDR detected an open %d clocks away\n", sc->unit, result & IE_TDR_TIME); } else if (result & IE_TDR_SHORT) { printf("ie%d: TDR detected a short %d clocks away\n", sc->unit, result & IE_TDR_TIME); } else { printf("ie%d: TDR returned unknown status %x\n", sc->unit, result); } } static void start_receiver(struct ie_softc *sc) { int s = splimp(); sc->scb->ie_recv_list = MK_16(MEM(sc), sc->rframes[0]); command_and_wait(sc, IE_RU_START, 0, 0); ie_ack(sc, IE_ST_WHENCE); splx(s); } /* * Here is a helper routine for iernr() and ieinit(). This sets up * the RFA. */ static v_caddr_t setup_rfa(struct ie_softc *sc, v_caddr_t ptr) { volatile struct ie_recv_frame_desc *rfd = (volatile void *)ptr; volatile struct ie_recv_buf_desc *rbd; int i; /* First lay them out */ for (i = 0; i < sc->nframes; i++) { sc->rframes[i] = rfd; bzero((volatile char *) rfd, sizeof *rfd); /* ignore cast-qual */ rfd++; } ptr = Alignvol(rfd); /* ignore cast-qual */ /* Now link them together */ for (i = 0; i < sc->nframes; i++) { sc->rframes[i]->ie_fd_next = MK_16(MEM(sc), sc->rframes[(i + 1) % sc->nframes]); } /* Finally, set the EOL bit on the last one. */ sc->rframes[sc->nframes - 1]->ie_fd_last |= IE_FD_LAST; /* * Now lay out some buffers for the incoming frames. Note that we * set aside a bit of slop in each buffer, to make sure that we have * enough space to hold a single frame in every buffer. */ rbd = (volatile void *) ptr; for (i = 0; i < sc->nrxbufs; i++) { sc->rbuffs[i] = rbd; bzero((volatile char *)rbd, sizeof *rbd); ptr = Alignvol(ptr + sizeof *rbd); rbd->ie_rbd_length = IE_RBUF_SIZE; rbd->ie_rbd_buffer = MK_24(MEM(sc), ptr); sc->cbuffs[i] = (volatile void *) ptr; ptr += IE_RBUF_SIZE; rbd = (volatile void *) ptr; } /* Now link them together */ for (i = 0; i < sc->nrxbufs; i++) { sc->rbuffs[i]->ie_rbd_next = MK_16(MEM(sc), sc->rbuffs[(i + 1) % sc->nrxbufs]); } /* Tag EOF on the last one */ sc->rbuffs[sc->nrxbufs - 1]->ie_rbd_length |= IE_RBD_LAST; /* * We use the head and tail pointers on receive to keep track of the * order in which RFDs and RBDs are used. */ sc->rfhead = 0; sc->rftail = sc->nframes - 1; sc->rbhead = 0; sc->rbtail = sc->nrxbufs - 1; sc->scb->ie_recv_list = MK_16(MEM(sc), sc->rframes[0]); sc->rframes[0]->ie_fd_buf_desc = MK_16(MEM(sc), sc->rbuffs[0]); ptr = Alignvol(ptr); return (ptr); } /* * Run the multicast setup command. * Call at splimp(). */ static int mc_setup(struct ie_softc *sc) { volatile struct ie_mcast_cmd *cmd = (volatile void *)sc->xmit_cbuffs[0]; cmd->com.ie_cmd_status = 0; cmd->com.ie_cmd_cmd = IE_CMD_MCAST | IE_CMD_LAST; cmd->com.ie_cmd_link = 0xffff; /* ignore cast-qual */ bcopy((v_caddr_t) sc->mcast_addrs, (v_caddr_t) cmd->ie_mcast_addrs, sc->mcast_count * sizeof *sc->mcast_addrs); cmd->ie_mcast_bytes = sc->mcast_count * 6; /* grrr... */ sc->scb->ie_command_list = MK_16(MEM(sc), cmd); if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) || !(cmd->com.ie_cmd_status & IE_STAT_OK)) { printf("ie%d: multicast address setup command failed\n", sc->unit); return (0); } return (1); } /* * This routine takes the environment generated by check_ie_present() * and adds to it all the other structures we need to operate the adapter. * This includes executing the CONFIGURE, IA-SETUP, and MC-SETUP commands, * starting the receiver unit, and clearing interrupts. * * THIS ROUTINE MUST BE CALLED AT splimp() OR HIGHER. */ static void ieinit(xsc) void *xsc; { struct ie_softc *sc = xsc; volatile struct ie_sys_ctl_block *scb = sc->scb; caddr_t ptr; int i; int unit = sc->unit; ptr = Alignvol((volatile char *) scb + sizeof *scb); /* * Send the configure command first. */ { volatile struct ie_config_cmd *cmd = (volatile void *) ptr; ie_setup_config(cmd, sc->promisc, sc->hard_type == IE_STARLAN10); cmd->com.ie_cmd_status = 0; cmd->com.ie_cmd_cmd = IE_CMD_CONFIG | IE_CMD_LAST; cmd->com.ie_cmd_link = 0xffff; scb->ie_command_list = MK_16(MEM(sc), cmd); if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) || !(cmd->com.ie_cmd_status & IE_STAT_OK)) { printf("ie%d: configure command failed\n", unit); return; } } /* * Now send the Individual Address Setup command. */ { volatile struct ie_iasetup_cmd *cmd = (volatile void *) ptr; cmd->com.ie_cmd_status = 0; cmd->com.ie_cmd_cmd = IE_CMD_IASETUP | IE_CMD_LAST; cmd->com.ie_cmd_link = 0xffff; bcopy((volatile char *)IFP2ENADDR(sc->ifp), (volatile char *)&cmd->ie_address, sizeof cmd->ie_address); scb->ie_command_list = MK_16(MEM(sc), cmd); if (command_and_wait(sc, IE_CU_START, cmd, IE_STAT_COMPL) || !(cmd->com.ie_cmd_status & IE_STAT_OK)) { printf("ie%d: individual address " "setup command failed\n", sc->unit); return; } } /* * Now run the time-domain reflectometer. */ run_tdr(sc, (volatile void *) ptr); /* * Acknowledge any interrupts we have generated thus far. */ ie_ack(sc, IE_ST_WHENCE); /* * Set up the RFA. */ ptr = setup_rfa(sc, ptr); /* * Finally, the transmit command and buffer are the last little bit * of work. */ /* transmit command buffers */ for (i = 0; i < sc->ntxbufs; i++) { sc->xmit_cmds[i] = (volatile void *) ptr; ptr += sizeof *sc->xmit_cmds[i]; ptr = Alignvol(ptr); sc->xmit_buffs[i] = (volatile void *)ptr; ptr += sizeof *sc->xmit_buffs[i]; ptr = Alignvol(ptr); } /* transmit buffers */ for (i = 0; i < sc->ntxbufs - 1; i++) { sc->xmit_cbuffs[i] = (volatile void *)ptr; ptr += IE_BUF_LEN; ptr = Alignvol(ptr); } sc->xmit_cbuffs[sc->ntxbufs - 1] = (volatile void *) ptr; for (i = 1; i < sc->ntxbufs; i++) { bzero((v_caddr_t) sc->xmit_cmds[i], sizeof *sc->xmit_cmds[i]); bzero((v_caddr_t) sc->xmit_buffs[i], sizeof *sc->xmit_buffs[i]); } /* * This must be coordinated with iestart() and ietint(). */ sc->xmit_cmds[0]->ie_xmit_status = IE_STAT_COMPL; /* take the ee16 out of loopback */ if (sc->hard_type == IE_EE16) { u_int8_t bart_config; bart_config = inb(PORT(sc) + IEE16_CONFIG); bart_config &= ~IEE16_BART_LOOPBACK; /* inb doesn't get bit! */ bart_config |= IEE16_BART_MCS16_TEST; outb(PORT(sc) + IEE16_CONFIG, bart_config); ee16_interrupt_enable(sc); ee16_chan_attn(sc); } sc->ifp->if_flags |= IFF_RUNNING; /* tell higher levels * we're here */ sc->ifp->if_flags &= ~IFF_OACTIVE; start_receiver(sc); return; } static void ie_stop(struct ie_softc *sc) { command_and_wait(sc, IE_RU_DISABLE, 0, 0); } static int ieioctl(struct ifnet *ifp, u_long command, caddr_t data) { int s, error = 0; struct ie_softc *sc = ifp->if_softc; s = splimp(); switch (command) { case SIOCSIFFLAGS: /* * Note that this device doesn't have an "all multicast" * mode, so we must turn on promiscuous mode and do the * filtering manually. */ if ((ifp->if_flags & IFF_UP) == 0 && (ifp->if_flags & IFF_RUNNING)) { ifp->if_flags &= ~IFF_RUNNING; ie_stop(sc); } else if ((ifp->if_flags & IFF_UP) && (ifp->if_flags & IFF_RUNNING) == 0) { sc->promisc = ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI); ieinit(sc); } else if (sc->promisc ^ (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI))) { sc->promisc = ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI); ieinit(sc); } break; case SIOCADDMULTI: case SIOCDELMULTI: /* * Update multicast listeners */ /* reset multicast filtering */ ie_mc_reset(sc); error = 0; break; default: error = ether_ioctl(ifp, command, data); break; } splx(s); return (error); } static void ie_mc_reset(struct ie_softc *sc) { struct ifmultiaddr *ifma; /* * Step through the list of addresses. */ sc->mcast_count = 0; + IF_ADDR_LOCK(sc->ifp); TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; /* XXX - this is broken... */ if (sc->mcast_count >= MAXMCAST) { sc->ifp->if_flags |= IFF_ALLMULTI; ieioctl(sc->ifp, SIOCSIFFLAGS, (void *) 0); goto setflag; } bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), &(sc->mcast_addrs[sc->mcast_count]), 6); sc->mcast_count++; } + IF_ADDR_UNLOCK(sc->ifp); setflag: sc->want_mcsetup = 1; } #ifdef DEBUG static void print_rbd(volatile struct ie_recv_buf_desc * rbd) { printf("RBD at %p:\n" "actual %04x, next %04x, buffer %p\n" "length %04x, mbz %04x\n", (volatile void *) rbd, rbd->ie_rbd_actual, rbd->ie_rbd_next, (void *) rbd->ie_rbd_buffer, rbd->ie_rbd_length, rbd->mbz); } #endif /* DEBUG */ int ie_alloc_resources (device_t dev) { struct ie_softc * sc; int error; error = 0; sc = device_get_softc(dev); sc->io_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->io_rid, RF_ACTIVE); if (!sc->io_res) { device_printf(dev, "No I/O space?!\n"); error = ENOMEM; goto bad; } sc->io_bt = rman_get_bustag(sc->io_res); sc->io_bh = rman_get_bushandle(sc->io_res); sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE); if (!sc->mem_res) { device_printf(dev, "No Memory!\n"); error = ENOMEM; goto bad; } sc->mem_bt = rman_get_bustag(sc->mem_res); sc->mem_bh = rman_get_bushandle(sc->mem_res); sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE); if (!sc->irq_res) { device_printf(dev, "No IRQ!\n"); error = ENOMEM; goto bad; } sc->port = rman_get_start(sc->io_res); /* XXX hack */ sc->iomembot = rman_get_virtual(sc->mem_res); sc->iosize = rman_get_size(sc->mem_res); return (0); bad: return (error); } void ie_release_resources (device_t dev) { struct ie_softc * sc; sc = device_get_softc(dev); if (sc->irq_ih) bus_teardown_intr(dev, sc->irq_res, sc->irq_ih); if (sc->io_res) bus_release_resource(dev, SYS_RES_IOPORT, sc->io_rid, sc->io_res); if (sc->irq_res) bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); if (sc->mem_res) bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); return; } int ie_detach (device_t dev) { struct ie_softc * sc; struct ifnet * ifp; sc = device_get_softc(dev); ifp = sc->ifp; if (sc->hard_type == IE_EE16) ee16_shutdown(sc, 0); ie_stop(sc); ifp->if_flags &= ~IFF_RUNNING; ether_ifdetach(ifp); if_free(ifp); ie_release_resources(dev); return (0); } Index: stable/6/sys/dev/if_ndis/if_ndis.c =================================================================== --- stable/6/sys/dev/if_ndis/if_ndis.c (revision 149421) +++ stable/6/sys/dev/if_ndis/if_ndis.c (revision 149422) @@ -1,3022 +1,3025 @@ /*- * Copyright (c) 2003 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * WPA support added by Arvind Srinivasan */ #include __FBSDID("$FreeBSD$"); #include "opt_bdg.h" #include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version < 502113 #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(ndis, ether, 1, 1, 1); MODULE_DEPEND(ndis, wlan, 1, 1, 1); MODULE_DEPEND(ndis, ndisapi, 1, 1, 1); MODULE_VERSION(ndis, 1); int ndis_attach (device_t); int ndis_detach (device_t); int ndis_suspend (device_t); int ndis_resume (device_t); void ndis_shutdown (device_t); int ndisdrv_modevent (module_t, int, void *); static void ndis_txeof (ndis_handle, ndis_packet *, ndis_status); static void ndis_rxeof (ndis_handle, ndis_packet **, uint32_t); static void ndis_rxeof_eth (ndis_handle, ndis_handle, char *, void *, uint32_t, void *, uint32_t, uint32_t); static void ndis_rxeof_done (ndis_handle); static void ndis_rxeof_xfr (kdpc *, ndis_handle, void *, void *); static void ndis_rxeof_xfr_done (ndis_handle, ndis_packet *, uint32_t, uint32_t); static void ndis_linksts (ndis_handle, ndis_status, void *, uint32_t); static void ndis_linksts_done (ndis_handle); /* We need to wrap these functions for amd64. */ static funcptr ndis_txeof_wrap; static funcptr ndis_rxeof_wrap; static funcptr ndis_rxeof_eth_wrap; static funcptr ndis_rxeof_done_wrap; static funcptr ndis_rxeof_xfr_wrap; static funcptr ndis_rxeof_xfr_done_wrap; static funcptr ndis_linksts_wrap; static funcptr ndis_linksts_done_wrap; static funcptr ndis_ticktask_wrap; static funcptr ndis_starttask_wrap; static funcptr ndis_resettask_wrap; static void ndis_intr (void *); static void ndis_tick (void *); static void ndis_ticktask (ndis_work_item *, void *); static void ndis_start (struct ifnet *); static void ndis_starttask (ndis_work_item *, void *); static void ndis_resettask (ndis_work_item *, void *); static int ndis_ioctl (struct ifnet *, u_long, caddr_t); static int ndis_wi_ioctl_get (struct ifnet *, u_long, caddr_t); static int ndis_wi_ioctl_set (struct ifnet *, u_long, caddr_t); static int ndis_80211_ioctl_get (struct ifnet *, u_long, caddr_t); static int ndis_80211_ioctl_set (struct ifnet *, u_long, caddr_t); static void ndis_init (void *); static void ndis_stop (struct ndis_softc *); static void ndis_watchdog (struct ifnet *); static int ndis_ifmedia_upd (struct ifnet *); static void ndis_ifmedia_sts (struct ifnet *, struct ifmediareq *); static int ndis_get_assoc (struct ndis_softc *, ndis_wlan_bssid_ex **); static int ndis_probe_offload (struct ndis_softc *); static int ndis_set_offload (struct ndis_softc *); static void ndis_getstate_80211 (struct ndis_softc *); static void ndis_setstate_80211 (struct ndis_softc *); static int ndis_add_key (struct ndis_softc *, struct ieee80211req_key *, int16_t); static void ndis_media_status (struct ifnet *, struct ifmediareq *); static void ndis_setmulti (struct ndis_softc *); static void ndis_map_sclist (void *, bus_dma_segment_t *, int, bus_size_t, int); static int ndisdrv_loaded = 0; /* * This routine should call windrv_load() once for each driver * image. This will do the relocation and dynalinking for the * image, and create a Windows driver object which will be * saved in our driver database. */ int ndisdrv_modevent(mod, cmd, arg) module_t mod; int cmd; void *arg; { int error = 0; switch (cmd) { case MOD_LOAD: ndisdrv_loaded++; if (ndisdrv_loaded > 1) break; windrv_wrap((funcptr)ndis_rxeof, &ndis_rxeof_wrap, 3, WINDRV_WRAP_STDCALL); windrv_wrap((funcptr)ndis_rxeof_eth, &ndis_rxeof_eth_wrap, 8, WINDRV_WRAP_STDCALL); windrv_wrap((funcptr)ndis_rxeof_done, &ndis_rxeof_done_wrap, 1, WINDRV_WRAP_STDCALL); windrv_wrap((funcptr)ndis_rxeof_xfr, &ndis_rxeof_xfr_wrap, 4, WINDRV_WRAP_STDCALL); windrv_wrap((funcptr)ndis_rxeof_xfr_done, &ndis_rxeof_xfr_done_wrap, 4, WINDRV_WRAP_STDCALL); windrv_wrap((funcptr)ndis_txeof, &ndis_txeof_wrap, 3, WINDRV_WRAP_STDCALL); windrv_wrap((funcptr)ndis_linksts, &ndis_linksts_wrap, 4, WINDRV_WRAP_STDCALL); windrv_wrap((funcptr)ndis_linksts_done, &ndis_linksts_done_wrap, 1, WINDRV_WRAP_STDCALL); windrv_wrap((funcptr)ndis_ticktask, &ndis_ticktask_wrap, 2, WINDRV_WRAP_STDCALL); windrv_wrap((funcptr)ndis_starttask, &ndis_starttask_wrap, 2, WINDRV_WRAP_STDCALL); windrv_wrap((funcptr)ndis_resettask, &ndis_resettask_wrap, 2, WINDRV_WRAP_STDCALL); break; case MOD_UNLOAD: ndisdrv_loaded--; if (ndisdrv_loaded > 0) break; /* fallthrough */ case MOD_SHUTDOWN: windrv_unwrap(ndis_rxeof_wrap); windrv_unwrap(ndis_rxeof_eth_wrap); windrv_unwrap(ndis_rxeof_done_wrap); windrv_unwrap(ndis_rxeof_xfr_wrap); windrv_unwrap(ndis_rxeof_xfr_done_wrap); windrv_unwrap(ndis_txeof_wrap); windrv_unwrap(ndis_linksts_wrap); windrv_unwrap(ndis_linksts_done_wrap); windrv_unwrap(ndis_ticktask_wrap); windrv_unwrap(ndis_starttask_wrap); windrv_unwrap(ndis_resettask_wrap); break; default: error = EINVAL; break; } return (error); } /* * Program the 64-bit multicast hash filter. */ static void ndis_setmulti(sc) struct ndis_softc *sc; { struct ifnet *ifp; struct ifmultiaddr *ifma; int len, mclistsz, error; uint8_t *mclist; ifp = sc->ifp; if (!NDIS_INITIALIZED(sc)) return; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { sc->ndis_filter |= NDIS_PACKET_TYPE_ALL_MULTICAST; len = sizeof(sc->ndis_filter); error = ndis_set_info(sc, OID_GEN_CURRENT_PACKET_FILTER, &sc->ndis_filter, &len); if (error) device_printf (sc->ndis_dev, "set filter failed: %d\n", error); return; } if (TAILQ_EMPTY(&ifp->if_multiaddrs)) return; len = sizeof(mclistsz); ndis_get_info(sc, OID_802_3_MAXIMUM_LIST_SIZE, &mclistsz, &len); mclist = malloc(ETHER_ADDR_LEN * mclistsz, M_TEMP, M_NOWAIT|M_ZERO); if (mclist == NULL) { sc->ndis_filter |= NDIS_PACKET_TYPE_ALL_MULTICAST; goto out; } sc->ndis_filter |= NDIS_PACKET_TYPE_MULTICAST; len = 0; + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), mclist + (ETHER_ADDR_LEN * len), ETHER_ADDR_LEN); len++; if (len > mclistsz) { + IF_ADDR_UNLOCK(ifp); sc->ndis_filter |= NDIS_PACKET_TYPE_ALL_MULTICAST; sc->ndis_filter &= ~NDIS_PACKET_TYPE_MULTICAST; goto out; } } + IF_ADDR_UNLOCK(ifp); len = len * ETHER_ADDR_LEN; error = ndis_set_info(sc, OID_802_3_MULTICAST_LIST, mclist, &len); if (error) { device_printf (sc->ndis_dev, "set mclist failed: %d\n", error); sc->ndis_filter |= NDIS_PACKET_TYPE_ALL_MULTICAST; sc->ndis_filter &= ~NDIS_PACKET_TYPE_MULTICAST; } out: free(mclist, M_TEMP); len = sizeof(sc->ndis_filter); error = ndis_set_info(sc, OID_GEN_CURRENT_PACKET_FILTER, &sc->ndis_filter, &len); if (error) device_printf (sc->ndis_dev, "set filter failed: %d\n", error); return; } static int ndis_set_offload(sc) struct ndis_softc *sc; { ndis_task_offload *nto; ndis_task_offload_hdr *ntoh; ndis_task_tcpip_csum *nttc; struct ifnet *ifp; int len, error; ifp = sc->ifp; if (!NDIS_INITIALIZED(sc)) return(EINVAL); /* See if there's anything to set. */ error = ndis_probe_offload(sc); if (error) return(error); if (sc->ndis_hwassist == 0 && ifp->if_capabilities == 0) return(0); len = sizeof(ndis_task_offload_hdr) + sizeof(ndis_task_offload) + sizeof(ndis_task_tcpip_csum); ntoh = malloc(len, M_TEMP, M_NOWAIT|M_ZERO); if (ntoh == NULL) return(ENOMEM); ntoh->ntoh_vers = NDIS_TASK_OFFLOAD_VERSION; ntoh->ntoh_len = sizeof(ndis_task_offload_hdr); ntoh->ntoh_offset_firsttask = sizeof(ndis_task_offload_hdr); ntoh->ntoh_encapfmt.nef_encaphdrlen = sizeof(struct ether_header); ntoh->ntoh_encapfmt.nef_encap = NDIS_ENCAP_IEEE802_3; ntoh->ntoh_encapfmt.nef_flags = NDIS_ENCAPFLAG_FIXEDHDRLEN; nto = (ndis_task_offload *)((char *)ntoh + ntoh->ntoh_offset_firsttask); nto->nto_vers = NDIS_TASK_OFFLOAD_VERSION; nto->nto_len = sizeof(ndis_task_offload); nto->nto_task = NDIS_TASK_TCPIP_CSUM; nto->nto_offset_nexttask = 0; nto->nto_taskbuflen = sizeof(ndis_task_tcpip_csum); nttc = (ndis_task_tcpip_csum *)nto->nto_taskbuf; if (ifp->if_capenable & IFCAP_TXCSUM) nttc->nttc_v4tx = sc->ndis_v4tx; if (ifp->if_capenable & IFCAP_RXCSUM) nttc->nttc_v4rx = sc->ndis_v4rx; error = ndis_set_info(sc, OID_TCP_TASK_OFFLOAD, ntoh, &len); free(ntoh, M_TEMP); return(error); } static int ndis_probe_offload(sc) struct ndis_softc *sc; { ndis_task_offload *nto; ndis_task_offload_hdr *ntoh; ndis_task_tcpip_csum *nttc = NULL; struct ifnet *ifp; int len, error, dummy; ifp = sc->ifp; len = sizeof(dummy); error = ndis_get_info(sc, OID_TCP_TASK_OFFLOAD, &dummy, &len); if (error != ENOSPC) return(error); ntoh = malloc(len, M_TEMP, M_NOWAIT|M_ZERO); if (ntoh == NULL) return(ENOMEM); ntoh->ntoh_vers = NDIS_TASK_OFFLOAD_VERSION; ntoh->ntoh_len = sizeof(ndis_task_offload_hdr); ntoh->ntoh_encapfmt.nef_encaphdrlen = sizeof(struct ether_header); ntoh->ntoh_encapfmt.nef_encap = NDIS_ENCAP_IEEE802_3; ntoh->ntoh_encapfmt.nef_flags = NDIS_ENCAPFLAG_FIXEDHDRLEN; error = ndis_get_info(sc, OID_TCP_TASK_OFFLOAD, ntoh, &len); if (error) { free(ntoh, M_TEMP); return(error); } if (ntoh->ntoh_vers != NDIS_TASK_OFFLOAD_VERSION) { free(ntoh, M_TEMP); return(EINVAL); } nto = (ndis_task_offload *)((char *)ntoh + ntoh->ntoh_offset_firsttask); while (1) { switch (nto->nto_task) { case NDIS_TASK_TCPIP_CSUM: nttc = (ndis_task_tcpip_csum *)nto->nto_taskbuf; break; /* Don't handle these yet. */ case NDIS_TASK_IPSEC: case NDIS_TASK_TCP_LARGESEND: default: break; } if (nto->nto_offset_nexttask == 0) break; nto = (ndis_task_offload *)((char *)nto + nto->nto_offset_nexttask); } if (nttc == NULL) { free(ntoh, M_TEMP); return(ENOENT); } sc->ndis_v4tx = nttc->nttc_v4tx; sc->ndis_v4rx = nttc->nttc_v4rx; if (nttc->nttc_v4tx & NDIS_TCPSUM_FLAGS_IP_CSUM) sc->ndis_hwassist |= CSUM_IP; if (nttc->nttc_v4tx & NDIS_TCPSUM_FLAGS_TCP_CSUM) sc->ndis_hwassist |= CSUM_TCP; if (nttc->nttc_v4tx & NDIS_TCPSUM_FLAGS_UDP_CSUM) sc->ndis_hwassist |= CSUM_UDP; if (sc->ndis_hwassist) ifp->if_capabilities |= IFCAP_TXCSUM; if (nttc->nttc_v4rx & NDIS_TCPSUM_FLAGS_IP_CSUM) ifp->if_capabilities |= IFCAP_RXCSUM; if (nttc->nttc_v4rx & NDIS_TCPSUM_FLAGS_TCP_CSUM) ifp->if_capabilities |= IFCAP_RXCSUM; if (nttc->nttc_v4rx & NDIS_TCPSUM_FLAGS_UDP_CSUM) ifp->if_capabilities |= IFCAP_RXCSUM; free(ntoh, M_TEMP); return(0); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ int ndis_attach(dev) device_t dev; { u_char eaddr[ETHER_ADDR_LEN]; struct ndis_softc *sc; driver_object *pdrv; device_object *pdo; struct ifnet *ifp = NULL; int error = 0, len; int i; sc = device_get_softc(dev); ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { error = ENOSPC; goto fail; } ifp->if_softc = sc; mtx_init(&sc->ndis_mtx, "ndis softc lock", MTX_NETWORK_LOCK, MTX_DEF); /* * Hook interrupt early, since calling the driver's * init routine may trigger an interrupt. Note that * we don't need to do any explicit interrupt setup * for USB. */ if (sc->ndis_iftype == PCMCIABus || sc->ndis_iftype == PCIBus) { error = bus_setup_intr(dev, sc->ndis_irq, INTR_TYPE_NET | INTR_MPSAFE, ndis_intr, sc, &sc->ndis_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); goto fail; } } if (sc->ndis_iftype == PCMCIABus) { error = ndis_alloc_amem(sc); if (error) { device_printf(dev, "failed to allocate " "attribute memory\n"); goto fail; } } #if __FreeBSD_version < 502113 sysctl_ctx_init(&sc->ndis_ctx); #endif /* Create sysctl registry nodes */ ndis_create_sysctls(sc); /* Find the PDO for this device instance. */ if (sc->ndis_iftype == PCIBus) pdrv = windrv_lookup(0, "PCI Bus"); else if (sc->ndis_iftype == PCMCIABus) pdrv = windrv_lookup(0, "PCCARD Bus"); else pdrv = windrv_lookup(0, "USB Bus"); pdo = windrv_find_pdo(pdrv, dev); /* * Create a new functional device object for this * device. This is what creates the miniport block * for this device instance. */ if (NdisAddDevice(sc->ndis_dobj, pdo) != STATUS_SUCCESS) { device_printf(dev, "failed to create FDO!\n"); error = ENXIO; goto fail; } /* Tell the user what version of the API the driver is using. */ device_printf(dev, "NDIS API version: %d.%d\n", sc->ndis_chars->nmc_version_major, sc->ndis_chars->nmc_version_minor); /* Do resource conversion. */ if (sc->ndis_iftype == PCMCIABus || sc->ndis_iftype == PCIBus) ndis_convert_res(sc); else sc->ndis_block->nmb_rlist = NULL; /* Install our RX and TX interrupt handlers. */ sc->ndis_block->nmb_senddone_func = ndis_txeof_wrap; sc->ndis_block->nmb_pktind_func = ndis_rxeof_wrap; sc->ndis_block->nmb_ethrxindicate_func = ndis_rxeof_eth_wrap; sc->ndis_block->nmb_ethrxdone_func = ndis_rxeof_done_wrap; sc->ndis_block->nmb_tdcond_func = ndis_rxeof_xfr_done_wrap; /* Call driver's init routine. */ if (ndis_init_nic(sc)) { device_printf (dev, "init handler failed\n"); error = ENXIO; goto fail; } /* * Get station address from the driver. */ len = sizeof(eaddr); ndis_get_info(sc, OID_802_3_CURRENT_ADDRESS, &eaddr, &len); /* * Figure out if we're allowed to use multipacket sends * with this driver, and if so, how many. */ if (sc->ndis_chars->nmc_sendsingle_func && sc->ndis_chars->nmc_sendmulti_func == NULL) { sc->ndis_maxpkts = 1; } else { len = sizeof(sc->ndis_maxpkts); ndis_get_info(sc, OID_GEN_MAXIMUM_SEND_PACKETS, &sc->ndis_maxpkts, &len); } sc->ndis_txarray = malloc(sizeof(ndis_packet *) * sc->ndis_maxpkts, M_DEVBUF, M_NOWAIT|M_ZERO); /* Allocate a pool of ndis_packets for TX encapsulation. */ NdisAllocatePacketPool(&i, &sc->ndis_txpool, sc->ndis_maxpkts, PROTOCOL_RESERVED_SIZE_IN_PACKET); if (i != NDIS_STATUS_SUCCESS) { sc->ndis_txpool = NULL; device_printf(dev, "failed to allocate TX packet pool"); error = ENOMEM; goto fail; } sc->ndis_txpending = sc->ndis_maxpkts; sc->ndis_oidcnt = 0; /* Get supported oid list. */ ndis_get_supported_oids(sc, &sc->ndis_oids, &sc->ndis_oidcnt); /* If the NDIS module requested scatter/gather, init maps. */ if (sc->ndis_sc) ndis_init_dma(sc); /* * See if the OID_802_11_CONFIGURATION OID is * supported by this driver. If it is, then this an 802.11 * wireless driver, and we should set up media for wireless. */ for (i = 0; i < sc->ndis_oidcnt; i++) { if (sc->ndis_oids[i] == OID_802_11_CONFIGURATION) { sc->ndis_80211++; break; } } /* Check for task offload support. */ ndis_probe_offload(sc); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = ndis_ioctl; ifp->if_start = ndis_start; ifp->if_watchdog = ndis_watchdog; ifp->if_init = ndis_init; ifp->if_baudrate = 10000000; #if __FreeBSD_version < 502114 ifp->if_snd.ifq_maxlen = 50; #else IFQ_SET_MAXLEN(&ifp->if_snd, 50); ifp->if_snd.ifq_drv_maxlen = 25; IFQ_SET_READY(&ifp->if_snd); #endif ifp->if_capenable = ifp->if_capabilities; ifp->if_hwassist = sc->ndis_hwassist; /* Do media setup */ if (sc->ndis_80211) { struct ieee80211com *ic = (void *)&sc->ic; ndis_80211_rates_ex rates; struct ndis_80211_nettype_list *ntl; uint32_t arg; int r; ic->ic_ifp = ifp; ic->ic_phytype = IEEE80211_T_DS; ic->ic_opmode = IEEE80211_M_STA; ic->ic_caps = IEEE80211_C_IBSS; ic->ic_state = IEEE80211_S_ASSOC; ic->ic_modecaps = (1<ntl_items; i++) { switch (ntl->ntl_type[i]) { case NDIS_80211_NETTYPE_11FH: case NDIS_80211_NETTYPE_11DS: ic->ic_modecaps |= (1<ic_modecaps |= (1<ic_modecaps |= (1<ic_sup_rates[x].rs_nrates; i++) { \ if (ic->ic_sup_rates[x].rs_rates[i] == (y)) \ break; \ } \ if (i == ic->ic_sup_rates[x].rs_nrates) { \ ic->ic_sup_rates[x].rs_rates[i] = (y); \ ic->ic_sup_rates[x].rs_nrates++; \ } \ } while (0) #define SETRATE(x, y) \ ic->ic_sup_rates[x].rs_rates[ic->ic_sup_rates[x].rs_nrates] = (y) #define INCRATE(x) \ ic->ic_sup_rates[x].rs_nrates++ ic->ic_curmode = IEEE80211_MODE_AUTO; if (ic->ic_modecaps & (1<ic_sup_rates[IEEE80211_MODE_11A].rs_nrates = 0; if (ic->ic_modecaps & (1<ic_sup_rates[IEEE80211_MODE_11B].rs_nrates = 0; if (ic->ic_modecaps & (1<ic_sup_rates[IEEE80211_MODE_11G].rs_nrates = 0; for (i = 0; i < len; i++) { switch (rates[i] & IEEE80211_RATE_VAL) { case 2: case 4: case 11: case 10: case 22: if (!(ic->ic_modecaps & (1<ic_modecaps |= (1<ic_sup_rates[IEEE80211_MODE_11B]. rs_nrates = 0; } SETRATE(IEEE80211_MODE_11B, rates[i]); INCRATE(IEEE80211_MODE_11B); break; default: if (ic->ic_modecaps & (1<ic_modecaps & (1<ic_modecaps & (1<ic_modecaps & (1<ic_modecaps & (1<ic_sup_rates[IEEE80211_MODE_11G].rs_nrates) chanflag |= IEEE80211_CHAN_G; if (i <= 14) chanflag |= IEEE80211_CHAN_B; if (ic->ic_sup_rates[IEEE80211_MODE_11A].rs_nrates && i > 14) chanflag = IEEE80211_CHAN_A; if (chanflag == 0) break; ic->ic_channels[i].ic_freq = ieee80211_ieee2mhz(i, chanflag); ic->ic_channels[i].ic_flags = chanflag; } i = sizeof(arg); r = ndis_get_info(sc, OID_802_11_WEP_STATUS, &arg, &i); if (arg != NDIS_80211_WEPSTAT_NOTSUPPORTED) ic->ic_caps |= IEEE80211_C_WEP; i = sizeof(arg); r = ndis_get_info(sc, OID_802_11_POWER_MODE, &arg, &i); if (r == 0) ic->ic_caps |= IEEE80211_C_PMGT; bcopy(eaddr, &ic->ic_myaddr, sizeof(eaddr)); ieee80211_ifattach(ic); ieee80211_media_init(ic, ieee80211_media_change, ndis_media_status); ic->ic_ibss_chan = IEEE80211_CHAN_ANYC; ic->ic_bss->ni_chan = ic->ic_ibss_chan; } else { ifmedia_init(&sc->ifmedia, IFM_IMASK, ndis_ifmedia_upd, ndis_ifmedia_sts); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_AUTO); ether_ifattach(ifp, eaddr); } /* Override the status handler so we can detect link changes. */ sc->ndis_block->nmb_status_func = ndis_linksts_wrap; sc->ndis_block->nmb_statusdone_func = ndis_linksts_done_wrap; /* Set up work item handlers. */ NdisInitializeWorkItem(&sc->ndis_tickitem, (ndis_proc)ndis_ticktask_wrap, sc); NdisInitializeWorkItem(&sc->ndis_startitem, (ndis_proc)ndis_starttask_wrap, ifp); NdisInitializeWorkItem(&sc->ndis_resetitem, (ndis_proc)ndis_resettask_wrap, sc); KeInitializeDpc(&sc->ndis_rxdpc, ndis_rxeof_xfr_wrap, sc->ndis_block); fail: if (error) ndis_detach(dev); else /* We're done talking to the NIC for now; halt it. */ ndis_halt_nic(sc); return(error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ int ndis_detach(dev) device_t dev; { struct ndis_softc *sc; struct ifnet *ifp; driver_object *drv; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->ndis_mtx), ("ndis mutex not initialized")); NDIS_LOCK(sc); ifp = sc->ifp; ifp->if_flags &= ~IFF_UP; if (device_is_attached(dev)) { NDIS_UNLOCK(sc); ndis_stop(sc); if (sc->ndis_80211) ieee80211_ifdetach(&sc->ic); else ether_ifdetach(ifp); } else NDIS_UNLOCK(sc); if (ifp != NULL) if_free(ifp); bus_generic_detach(dev); if (sc->ndis_intrhand) bus_teardown_intr(dev, sc->ndis_irq, sc->ndis_intrhand); if (sc->ndis_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ndis_irq); if (sc->ndis_res_io) bus_release_resource(dev, SYS_RES_IOPORT, sc->ndis_io_rid, sc->ndis_res_io); if (sc->ndis_res_mem) bus_release_resource(dev, SYS_RES_MEMORY, sc->ndis_mem_rid, sc->ndis_res_mem); if (sc->ndis_res_altmem) bus_release_resource(dev, SYS_RES_MEMORY, sc->ndis_altmem_rid, sc->ndis_res_altmem); if (sc->ndis_iftype == PCMCIABus) ndis_free_amem(sc); if (sc->ndis_sc) ndis_destroy_dma(sc); if (sc->ndis_txarray) free(sc->ndis_txarray, M_DEVBUF); if (!sc->ndis_80211) ifmedia_removeall(&sc->ifmedia); if (sc->ndis_txpool != NULL) NdisFreePacketPool(sc->ndis_txpool); ndis_unload_driver(sc); /* Destroy the PDO for this device. */ if (sc->ndis_iftype == PCIBus) drv = windrv_lookup(0, "PCI Bus"); else if (sc->ndis_iftype == PCMCIABus) drv = windrv_lookup(0, "PCCARD Bus"); else drv = windrv_lookup(0, "USB Bus"); if (drv == NULL) panic("couldn't find driver object"); windrv_destroy_pdo(drv, dev); if (sc->ndis_iftype == PCIBus) bus_dma_tag_destroy(sc->ndis_parent_tag); #if __FreeBSD_version < 502113 sysctl_ctx_free(&sc->ndis_ctx); #endif mtx_destroy(&sc->ndis_mtx); return(0); } int ndis_suspend(dev) device_t dev; { struct ndis_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->ifp; #ifdef notdef if (NDIS_INITIALIZED(sc)) ndis_stop(sc); #endif return(0); } int ndis_resume(dev) device_t dev; { struct ndis_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->ifp; if (NDIS_INITIALIZED(sc)) ndis_init(sc); return(0); } /* * The following bunch of routines are here to support drivers that * use the NdisMEthIndicateReceive()/MiniportTransferData() mechanism. */ static void ndis_rxeof_eth(adapter, ctx, addr, hdr, hdrlen, lookahead, lookaheadlen, pktlen) ndis_handle adapter; ndis_handle ctx; char *addr; void *hdr; uint32_t hdrlen; void *lookahead; uint32_t lookaheadlen; uint32_t pktlen; { ndis_miniport_block *block; uint8_t irql; uint32_t status; ndis_buffer *b; ndis_packet *p; struct mbuf *m; ndis_ethpriv *priv; block = adapter; m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { NdisFreePacket(p); return; } /* Save the data provided to us so far. */ m->m_len = lookaheadlen + hdrlen; m->m_pkthdr.len = pktlen + hdrlen; m->m_next = NULL; m_copyback(m, 0, hdrlen, hdr); m_copyback(m, hdrlen, lookaheadlen, lookahead); /* Now create a fake NDIS_PACKET to hold the data */ NdisAllocatePacket(&status, &p, block->nmb_rxpool); if (status != NDIS_STATUS_SUCCESS) { m_freem(m); return; } p->np_m0 = m; b = IoAllocateMdl(m->m_data, m->m_pkthdr.len, FALSE, FALSE, NULL); if (b == NULL) { NdisFreePacket(p); m_freem(m); return; } p->np_private.npp_head = p->np_private.npp_tail = b; p->np_private.npp_totlen = m->m_pkthdr.len; /* Save the packet RX context somewhere. */ priv = (ndis_ethpriv *)&p->np_protocolreserved; priv->nep_ctx = ctx; KeAcquireSpinLock(&block->nmb_lock, &irql); INSERT_LIST_TAIL((&block->nmb_packetlist), ((list_entry *)&p->u.np_clrsvd.np_miniport_rsvd)); KeReleaseSpinLock(&block->nmb_lock, irql); return; } static void ndis_rxeof_done(adapter) ndis_handle adapter; { struct ndis_softc *sc; ndis_miniport_block *block; block = adapter; /* Schedule transfer/RX of queued packets. */ sc = device_get_softc(block->nmb_physdeviceobj->do_devext); KeInsertQueueDpc(&sc->ndis_rxdpc, NULL, NULL); return; } /* * Runs at DISPATCH_LEVEL. */ static void ndis_rxeof_xfr(dpc, adapter, sysarg1, sysarg2) kdpc *dpc; ndis_handle adapter; void *sysarg1; void *sysarg2; { ndis_miniport_block *block; struct ndis_softc *sc; ndis_packet *p; list_entry *l; uint32_t status; ndis_ethpriv *priv; struct ifnet *ifp; struct mbuf *m; block = adapter; sc = device_get_softc(block->nmb_physdeviceobj->do_devext); ifp = sc->ifp; KeAcquireSpinLockAtDpcLevel(&block->nmb_lock); l = block->nmb_packetlist.nle_flink; while(l != &block->nmb_packetlist) { REMOVE_LIST_HEAD((&block->nmb_packetlist)); p = CONTAINING_RECORD(l, ndis_packet, u.np_clrsvd.np_miniport_rsvd); priv = (ndis_ethpriv *)&p->np_protocolreserved; m = p->np_m0; p->np_softc = sc; p->np_m0 = NULL; KeReleaseSpinLockFromDpcLevel(&block->nmb_lock); status = MSCALL6(sc->ndis_chars->nmc_transferdata_func, p, &p->np_private.npp_totlen, block, priv->nep_ctx, m->m_len, m->m_pkthdr.len - m->m_len); KeAcquireSpinLockAtDpcLevel(&block->nmb_lock); /* * If status is NDIS_STATUS_PENDING, do nothing and * wait for a callback to the ndis_rxeof_xfr_done() * handler. */ m->m_len = m->m_pkthdr.len; m->m_pkthdr.rcvif = ifp; if (status == NDIS_STATUS_SUCCESS) { IoFreeMdl(p->np_private.npp_head); NdisFreePacket(p); ifp->if_ipackets++; (*ifp->if_input)(ifp, m); } if (status == NDIS_STATUS_FAILURE) m_freem(m); /* Advance to next packet */ l = block->nmb_packetlist.nle_flink; } KeReleaseSpinLockFromDpcLevel(&block->nmb_lock); return; } static void ndis_rxeof_xfr_done(adapter, packet, status, len) ndis_handle adapter; ndis_packet *packet; uint32_t status; uint32_t len; { ndis_miniport_block *block; struct ndis_softc *sc; struct ifnet *ifp; struct mbuf *m; block = adapter; sc = device_get_softc(block->nmb_physdeviceobj->do_devext); ifp = sc->ifp; m = packet->np_m0; IoFreeMdl(packet->np_private.npp_head); NdisFreePacket(packet); if (status != NDIS_STATUS_SUCCESS) { m_freem(m); return; } m->m_len = m->m_pkthdr.len; m->m_pkthdr.rcvif = ifp; ifp->if_ipackets++; (*ifp->if_input)(ifp, m); return; } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. * * When handling received NDIS packets, the 'status' field in the * out-of-band portion of the ndis_packet has special meaning. In the * most common case, the underlying NDIS driver will set this field * to NDIS_STATUS_SUCCESS, which indicates that it's ok for us to * take posession of it. We then change the status field to * NDIS_STATUS_PENDING to tell the driver that we now own the packet, * and that we will return it at some point in the future via the * return packet handler. * * If the driver hands us a packet with a status of NDIS_STATUS_RESOURCES, * this means the driver is running out of packet/buffer resources and * wants to maintain ownership of the packet. In this case, we have to * copy the packet data into local storage and let the driver keep the * packet. */ static void ndis_rxeof(adapter, packets, pktcnt) ndis_handle adapter; ndis_packet **packets; uint32_t pktcnt; { struct ndis_softc *sc; ndis_miniport_block *block; ndis_packet *p; uint32_t s; ndis_tcpip_csum *csum; struct ifnet *ifp; struct mbuf *m0, *m; int i; block = (ndis_miniport_block *)adapter; sc = device_get_softc(block->nmb_physdeviceobj->do_devext); ifp = sc->ifp; for (i = 0; i < pktcnt; i++) { p = packets[i]; /* Stash the softc here so ptom can use it. */ p->np_softc = sc; if (ndis_ptom(&m0, p)) { device_printf (sc->ndis_dev, "ptom failed\n"); if (p->np_oob.npo_status == NDIS_STATUS_SUCCESS) ndis_return_packet(sc, p); } else { if (p->np_oob.npo_status == NDIS_STATUS_RESOURCES) { m = m_dup(m0, M_DONTWAIT); /* * NOTE: we want to destroy the mbuf here, but * we don't actually want to return it to the * driver via the return packet handler. By * bumping np_refcnt, we can prevent the * ndis_return_packet() routine from actually * doing anything. */ p->np_refcnt++; m_freem(m0); if (m == NULL) ifp->if_ierrors++; else m0 = m; } else p->np_oob.npo_status = NDIS_STATUS_PENDING; m0->m_pkthdr.rcvif = ifp; ifp->if_ipackets++; /* Deal with checksum offload. */ if (ifp->if_capenable & IFCAP_RXCSUM && p->np_ext.npe_info[ndis_tcpipcsum_info] != NULL) { s = (uintptr_t) p->np_ext.npe_info[ndis_tcpipcsum_info]; csum = (ndis_tcpip_csum *)&s; if (csum->u.ntc_rxflags & NDIS_RXCSUM_IP_PASSED) m0->m_pkthdr.csum_flags |= CSUM_IP_CHECKED|CSUM_IP_VALID; if (csum->u.ntc_rxflags & (NDIS_RXCSUM_TCP_PASSED | NDIS_RXCSUM_UDP_PASSED)) { m0->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m0->m_pkthdr.csum_data = 0xFFFF; } } (*ifp->if_input)(ifp, m0); } } return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void ndis_txeof(adapter, packet, status) ndis_handle adapter; ndis_packet *packet; ndis_status status; { struct ndis_softc *sc; ndis_miniport_block *block; struct ifnet *ifp; int idx; struct mbuf *m; block = (ndis_miniport_block *)adapter; sc = device_get_softc(block->nmb_physdeviceobj->do_devext); ifp = sc->ifp; m = packet->np_m0; idx = packet->np_txidx; if (sc->ndis_sc) bus_dmamap_unload(sc->ndis_ttag, sc->ndis_tmaps[idx]); ndis_free_packet(packet); m_freem(m); NDIS_LOCK(sc); sc->ndis_txarray[idx] = NULL; sc->ndis_txpending++; if (status == NDIS_STATUS_SUCCESS) ifp->if_opackets++; else ifp->if_oerrors++; ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; NDIS_UNLOCK(sc); NdisScheduleWorkItem(&sc->ndis_startitem); return; } static void ndis_linksts(adapter, status, sbuf, slen) ndis_handle adapter; ndis_status status; void *sbuf; uint32_t slen; { ndis_miniport_block *block; struct ndis_softc *sc; block = adapter; sc = device_get_softc(block->nmb_physdeviceobj->do_devext); block->nmb_getstat = status; return; } static void ndis_linksts_done(adapter) ndis_handle adapter; { ndis_miniport_block *block; struct ndis_softc *sc; struct ifnet *ifp; block = adapter; sc = device_get_softc(block->nmb_physdeviceobj->do_devext); ifp = sc->ifp; if (!NDIS_INITIALIZED(sc)) return; switch (block->nmb_getstat) { case NDIS_STATUS_MEDIA_CONNECT: NdisScheduleWorkItem(&sc->ndis_tickitem); NdisScheduleWorkItem(&sc->ndis_startitem); break; case NDIS_STATUS_MEDIA_DISCONNECT: if (sc->ndis_link) NdisScheduleWorkItem(&sc->ndis_tickitem); break; default: break; } return; } static void ndis_intr(arg) void *arg; { struct ndis_softc *sc; struct ifnet *ifp; int is_our_intr = 0; int call_isr = 0; uint8_t irql; ndis_miniport_interrupt *intr; sc = arg; ifp = sc->ifp; intr = sc->ndis_block->nmb_interrupt; if (intr == NULL || sc->ndis_block->nmb_miniportadapterctx == NULL) return; KeAcquireSpinLock(&intr->ni_dpccountlock, &irql); if (sc->ndis_block->nmb_interrupt->ni_isrreq == TRUE) ndis_isr(sc, &is_our_intr, &call_isr); else { ndis_disable_intr(sc); call_isr = 1; } KeReleaseSpinLock(&intr->ni_dpccountlock, irql); if ((is_our_intr || call_isr)) IoRequestDpc(sc->ndis_block->nmb_deviceobj, NULL, sc); return; } static void ndis_tick(xsc) void *xsc; { struct ndis_softc *sc; mtx_unlock(&Giant); sc = xsc; NdisScheduleWorkItem(&sc->ndis_tickitem); sc->ndis_stat_ch = timeout(ndis_tick, sc, hz * sc->ndis_block->nmb_checkforhangsecs); mtx_lock(&Giant); return; } static void ndis_ticktask(w, xsc) ndis_work_item *w; void *xsc; { struct ndis_softc *sc; ndis_checkforhang_handler hangfunc; uint8_t rval; ndis_media_state linkstate; int error, len; sc = xsc; hangfunc = sc->ndis_chars->nmc_checkhang_func; if (hangfunc != NULL) { rval = MSCALL1(hangfunc, sc->ndis_block->nmb_miniportadapterctx); if (rval == TRUE) { ndis_reset_nic(sc); return; } } len = sizeof(linkstate); error = ndis_get_info(sc, OID_GEN_MEDIA_CONNECT_STATUS, (void *)&linkstate, &len); NDIS_LOCK(sc); if (sc->ndis_link == 0 && linkstate == nmc_connected) { device_printf(sc->ndis_dev, "link up\n"); sc->ndis_link = 1; NDIS_UNLOCK(sc); if (sc->ndis_80211) ndis_getstate_80211(sc); NDIS_LOCK(sc); #ifdef LINK_STATE_UP sc->ifp->if_link_state = LINK_STATE_UP; rt_ifmsg(sc->ifp); #endif /* LINK_STATE_UP */ } if (sc->ndis_link == 1 && linkstate == nmc_disconnected) { device_printf(sc->ndis_dev, "link down\n"); sc->ndis_link = 0; #ifdef LINK_STATE_DOWN sc->ifp->if_link_state = LINK_STATE_DOWN; rt_ifmsg(sc->ifp); #endif /* LINK_STATE_DOWN */ } NDIS_UNLOCK(sc); return; } static void ndis_map_sclist(arg, segs, nseg, mapsize, error) void *arg; bus_dma_segment_t *segs; int nseg; bus_size_t mapsize; int error; { struct ndis_sc_list *sclist; int i; if (error || arg == NULL) return; sclist = arg; sclist->nsl_frags = nseg; for (i = 0; i < nseg; i++) { sclist->nsl_elements[i].nse_addr.np_quad = segs[i].ds_addr; sclist->nsl_elements[i].nse_len = segs[i].ds_len; } return; } static void ndis_starttask(w, arg) ndis_work_item *w; void *arg; { struct ifnet *ifp; ifp = arg; #if __FreeBSD_version < 502114 if (ifp->if_snd.ifq_head != NULL) #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) #endif ndis_start(ifp); return; } /* * Main transmit routine. To make NDIS drivers happy, we need to * transform mbuf chains into NDIS packets and feed them to the * send packet routines. Most drivers allow you to send several * packets at once (up to the maxpkts limit). Unfortunately, rather * that accepting them in the form of a linked list, they expect * a contiguous array of pointers to packets. * * For those drivers which use the NDIS scatter/gather DMA mechanism, * we need to perform busdma work here. Those that use map registers * will do the mapping themselves on a buffer by buffer basis. */ static void ndis_start(ifp) struct ifnet *ifp; { struct ndis_softc *sc; struct mbuf *m = NULL; ndis_packet **p0 = NULL, *p = NULL; ndis_tcpip_csum *csum; int pcnt = 0, status; sc = ifp->if_softc; NDIS_LOCK(sc); if (!sc->ndis_link || ifp->if_flags & IFF_OACTIVE) { NDIS_UNLOCK(sc); return; } p0 = &sc->ndis_txarray[sc->ndis_txidx]; while(sc->ndis_txpending) { #if __FreeBSD_version < 502114 IF_DEQUEUE(&ifp->if_snd, m); #else IFQ_DRV_DEQUEUE(&ifp->if_snd, m); #endif if (m == NULL) break; NdisAllocatePacket(&status, &sc->ndis_txarray[sc->ndis_txidx], sc->ndis_txpool); if (status != NDIS_STATUS_SUCCESS) break; if (ndis_mtop(m, &sc->ndis_txarray[sc->ndis_txidx])) { #if __FreeBSD_version >= 502114 IFQ_DRV_PREPEND(&ifp->if_snd, m); #endif NDIS_UNLOCK(sc); #if __FreeBSD_version < 502114 IF_PREPEND(&ifp->if_snd, m); #endif return; } /* * Save pointer to original mbuf * so we can free it later. */ p = sc->ndis_txarray[sc->ndis_txidx]; p->np_txidx = sc->ndis_txidx; p->np_m0 = m; p->np_oob.npo_status = NDIS_STATUS_PENDING; /* * Do scatter/gather processing, if driver requested it. */ if (sc->ndis_sc) { bus_dmamap_load_mbuf(sc->ndis_ttag, sc->ndis_tmaps[sc->ndis_txidx], m, ndis_map_sclist, &p->np_sclist, BUS_DMA_NOWAIT); bus_dmamap_sync(sc->ndis_ttag, sc->ndis_tmaps[sc->ndis_txidx], BUS_DMASYNC_PREREAD); p->np_ext.npe_info[ndis_sclist_info] = &p->np_sclist; } /* Handle checksum offload. */ if (ifp->if_capenable & IFCAP_TXCSUM && m->m_pkthdr.csum_flags) { csum = (ndis_tcpip_csum *) &p->np_ext.npe_info[ndis_tcpipcsum_info]; csum->u.ntc_txflags = NDIS_TXCSUM_DO_IPV4; if (m->m_pkthdr.csum_flags & CSUM_IP) csum->u.ntc_txflags |= NDIS_TXCSUM_DO_IP; if (m->m_pkthdr.csum_flags & CSUM_TCP) csum->u.ntc_txflags |= NDIS_TXCSUM_DO_TCP; if (m->m_pkthdr.csum_flags & CSUM_UDP) csum->u.ntc_txflags |= NDIS_TXCSUM_DO_UDP; p->np_private.npp_flags = NDIS_PROTOCOL_ID_TCP_IP; } NDIS_INC(sc); sc->ndis_txpending--; pcnt++; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m); /* * The array that p0 points to must appear contiguous, * so we must not wrap past the end of sc->ndis_txarray[]. * If it looks like we're about to wrap, break out here * so the this batch of packets can be transmitted, then * wait for txeof to ask us to send the rest. */ if (sc->ndis_txidx == 0) break; } if (pcnt == 0) { NDIS_UNLOCK(sc); return; } if (sc->ndis_txpending == 0) ifp->if_flags |= IFF_OACTIVE; /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; NDIS_UNLOCK(sc); if (sc->ndis_maxpkts == 1) ndis_send_packet(sc, p); else ndis_send_packets(sc, p0, pcnt); return; } static void ndis_init(xsc) void *xsc; { struct ndis_softc *sc = xsc; struct ifnet *ifp = sc->ifp; int i, error; /* * Avoid reintializing the link unnecessarily. * This should be dealt with in a better way by * fixing the upper layer modules so they don't * call ifp->if_init() quite as often. */ if (sc->ndis_link && sc->ndis_skip) return; /* * Cancel pending I/O and free all RX/TX buffers. */ ndis_stop(sc); if (ndis_init_nic(sc)) return; /* Init our MAC address */ /* Program the packet filter */ sc->ndis_filter = NDIS_PACKET_TYPE_DIRECTED; if (ifp->if_flags & IFF_BROADCAST) sc->ndis_filter |= NDIS_PACKET_TYPE_BROADCAST; if (ifp->if_flags & IFF_PROMISC) sc->ndis_filter |= NDIS_PACKET_TYPE_PROMISCUOUS; i = sizeof(sc->ndis_filter); error = ndis_set_info(sc, OID_GEN_CURRENT_PACKET_FILTER, &sc->ndis_filter, &i); if (error) device_printf (sc->ndis_dev, "set filter failed: %d\n", error); /* * Program the multicast filter, if necessary. */ ndis_setmulti(sc); /* Setup task offload. */ ndis_set_offload(sc); /* Enable interrupts. */ ndis_enable_intr(sc); if (sc->ndis_80211) ndis_setstate_80211(sc); NDIS_LOCK(sc); sc->ndis_txidx = 0; sc->ndis_txpending = sc->ndis_maxpkts; sc->ndis_link = 0; ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; NDIS_UNLOCK(sc); /* * Some drivers don't set this value. The NDIS spec says * the default checkforhang timeout is "approximately 2 * seconds." We use 3 seconds, because it seems for some * drivers, exactly 2 seconds is too fast. */ if (sc->ndis_block->nmb_checkforhangsecs == 0) sc->ndis_block->nmb_checkforhangsecs = 3; sc->ndis_stat_ch = timeout(ndis_tick, sc, hz * sc->ndis_block->nmb_checkforhangsecs); return; } /* * Set media options. */ static int ndis_ifmedia_upd(ifp) struct ifnet *ifp; { struct ndis_softc *sc; sc = ifp->if_softc; if (NDIS_INITIALIZED(sc)) ndis_init(sc); return(0); } /* * Report current media status. */ static void ndis_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct ndis_softc *sc; uint32_t media_info; ndis_media_state linkstate; int error, len; ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; sc = ifp->if_softc; if (!NDIS_INITIALIZED(sc)) return; len = sizeof(linkstate); error = ndis_get_info(sc, OID_GEN_MEDIA_CONNECT_STATUS, (void *)&linkstate, &len); len = sizeof(media_info); error = ndis_get_info(sc, OID_GEN_LINK_SPEED, (void *)&media_info, &len); if (linkstate == nmc_connected) ifmr->ifm_status |= IFM_ACTIVE; switch(media_info) { case 100000: ifmr->ifm_active |= IFM_10_T; break; case 1000000: ifmr->ifm_active |= IFM_100_TX; break; case 10000000: ifmr->ifm_active |= IFM_1000_T; break; default: device_printf(sc->ndis_dev, "unknown speed: %d\n", media_info); break; } return; } static void ndis_setstate_80211(sc) struct ndis_softc *sc; { struct ieee80211com *ic; ndis_80211_ssid ssid; ndis_80211_config config; ndis_80211_wep wep; int i, rval = 0, len; uint32_t arg; struct ifnet *ifp; ic = &sc->ic; ifp = sc->ifp; if (!NDIS_INITIALIZED(sc)) return; /* Set network infrastructure mode. */ len = sizeof(arg); if (ic->ic_opmode == IEEE80211_M_IBSS) arg = NDIS_80211_NET_INFRA_IBSS; else arg = NDIS_80211_NET_INFRA_BSS; rval = ndis_set_info(sc, OID_802_11_INFRASTRUCTURE_MODE, &arg, &len); if (rval) device_printf (sc->ndis_dev, "set infra failed: %d\n", rval); /* Set WEP */ #ifdef IEEE80211_F_PRIVACY if (ic->ic_flags & IEEE80211_F_PRIVACY) { #else if (ic->ic_wep_mode >= IEEE80211_WEP_ON) { #endif for (i = 0; i < IEEE80211_WEP_NKID; i++) { if (ic->ic_nw_keys[i].wk_keylen) { bzero((char *)&wep, sizeof(wep)); wep.nw_keylen = ic->ic_nw_keys[i].wk_keylen; #ifdef notdef /* 5 and 13 are the only valid key lengths */ if (ic->ic_nw_keys[i].wk_keylen < 5) wep.nw_keylen = 5; else if (ic->ic_nw_keys[i].wk_keylen > 5 && ic->ic_nw_keys[i].wk_keylen < 13) wep.nw_keylen = 13; #endif wep.nw_keyidx = i; wep.nw_length = (sizeof(uint32_t) * 3) + wep.nw_keylen; if (i == ic->ic_def_txkey) wep.nw_keyidx |= NDIS_80211_WEPKEY_TX; bcopy(ic->ic_nw_keys[i].wk_key, wep.nw_keydata, wep.nw_length); len = sizeof(wep); rval = ndis_set_info(sc, OID_802_11_ADD_WEP, &wep, &len); if (rval) device_printf(sc->ndis_dev, "set wepkey failed: %d\n", rval); } } arg = NDIS_80211_WEPSTAT_ENABLED; len = sizeof(arg); rval = ndis_set_info(sc, OID_802_11_WEP_STATUS, &arg, &len); if (rval) device_printf(sc->ndis_dev, "enable WEP failed: %d\n", rval); #ifndef IEEE80211_F_WEPON #if 0 if (ic->ic_wep_mode != IEEE80211_WEP_8021X && ic->ic_wep_mode != IEEE80211_WEP_ON) arg = NDIS_80211_PRIVFILT_ACCEPTALL; else #endif #endif arg = NDIS_80211_PRIVFILT_8021XWEP; len = sizeof(arg); rval = ndis_set_info(sc, OID_802_11_PRIVACY_FILTER, &arg, &len); #ifdef IEEE80211_WEP_8021X /*IEEE80211_F_WEPON*/ /* Accept that we only have "shared" and 802.1x modes. */ if (rval == 0) { if (arg == NDIS_80211_PRIVFILT_ACCEPTALL) ic->ic_wep_mode = IEEE80211_WEP_MIXED; else ic->ic_wep_mode = IEEE80211_WEP_8021X; } #endif arg = NDIS_80211_AUTHMODE_OPEN; } else { arg = NDIS_80211_WEPSTAT_DISABLED; len = sizeof(arg); ndis_set_info(sc, OID_802_11_WEP_STATUS, &arg, &len); arg = NDIS_80211_AUTHMODE_OPEN; } len = sizeof(arg); rval = ndis_set_info(sc, OID_802_11_AUTHENTICATION_MODE, &arg, &len); #ifdef notyet if (rval) device_printf (sc->ndis_dev, "set auth failed: %d\n", rval); #endif #ifdef notyet /* Set network type. */ arg = 0; switch (ic->ic_curmode) { case IEEE80211_MODE_11A: arg = NDIS_80211_NETTYPE_11OFDM5; break; case IEEE80211_MODE_11B: arg = NDIS_80211_NETTYPE_11DS; break; case IEEE80211_MODE_11G: arg = NDIS_80211_NETTYPE_11OFDM24; break; default: device_printf(sc->ndis_dev, "unknown mode: %d\n", ic->ic_curmode); } if (arg) { len = sizeof(arg); rval = ndis_set_info(sc, OID_802_11_NETWORK_TYPE_IN_USE, &arg, &len); if (rval) device_printf (sc->ndis_dev, "set nettype failed: %d\n", rval); } #endif len = sizeof(config); bzero((char *)&config, len); config.nc_length = len; config.nc_fhconfig.ncf_length = sizeof(ndis_80211_config_fh); rval = ndis_get_info(sc, OID_802_11_CONFIGURATION, &config, &len); /* * Some drivers expect us to initialize these values, so * provide some defaults. */ if (config.nc_beaconperiod == 0) config.nc_beaconperiod = 100; if (config.nc_atimwin == 0) config.nc_atimwin = 100; if (config.nc_fhconfig.ncf_dwelltime == 0) config.nc_fhconfig.ncf_dwelltime = 200; if (rval == 0 && ic->ic_ibss_chan != IEEE80211_CHAN_ANYC) { int chan, chanflag; chan = ieee80211_chan2ieee(ic, ic->ic_ibss_chan); chanflag = config.nc_dsconfig > 2500000 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ; if (chan != ieee80211_mhz2ieee(config.nc_dsconfig / 1000, 0)) { config.nc_dsconfig = ic->ic_ibss_chan->ic_freq * 1000; ic->ic_bss->ni_chan = ic->ic_ibss_chan; len = sizeof(config); config.nc_length = len; config.nc_fhconfig.ncf_length = sizeof(ndis_80211_config_fh); rval = ndis_set_info(sc, OID_802_11_CONFIGURATION, &config, &len); if (rval) device_printf(sc->ndis_dev, "couldn't change " "DS config to %ukHz: %d\n", config.nc_dsconfig, rval); } } else if (rval) device_printf(sc->ndis_dev, "couldn't retrieve " "channel info: %d\n", rval); /* Set SSID -- always do this last. */ len = sizeof(ssid); bzero((char *)&ssid, len); ssid.ns_ssidlen = ic->ic_des_esslen; if (ssid.ns_ssidlen == 0) { ssid.ns_ssidlen = 1; } else bcopy(ic->ic_des_essid, ssid.ns_ssid, ssid.ns_ssidlen); rval = ndis_set_info(sc, OID_802_11_SSID, &ssid, &len); if (rval) device_printf (sc->ndis_dev, "set ssid failed: %d\n", rval); return; } static void ndis_media_status(struct ifnet *ifp, struct ifmediareq *imr) { struct ieee80211com *ic = &((struct ndis_softc *)ifp->if_softc)->ic; struct ieee80211_node *ni = NULL; imr->ifm_status = IFM_AVALID; imr->ifm_active = IFM_IEEE80211; if (ic->ic_state == IEEE80211_S_RUN) imr->ifm_status |= IFM_ACTIVE; imr->ifm_active |= IFM_AUTO; switch (ic->ic_opmode) { case IEEE80211_M_STA: ni = ic->ic_bss; /* calculate rate subtype */ imr->ifm_active |= ieee80211_rate2media(ic, ni->ni_rates.rs_rates[ni->ni_txrate], ic->ic_curmode); break; case IEEE80211_M_IBSS: ni = ic->ic_bss; /* calculate rate subtype */ imr->ifm_active |= ieee80211_rate2media(ic, ni->ni_rates.rs_rates[ni->ni_txrate], ic->ic_curmode); imr->ifm_active |= IFM_IEEE80211_ADHOC; break; case IEEE80211_M_AHDEMO: /* should not come here */ break; case IEEE80211_M_HOSTAP: imr->ifm_active |= IFM_IEEE80211_HOSTAP; break; case IEEE80211_M_MONITOR: imr->ifm_active |= IFM_IEEE80211_MONITOR; break; } switch (ic->ic_curmode) { case IEEE80211_MODE_11A: imr->ifm_active |= IFM_MAKEMODE(IFM_IEEE80211_11A); break; case IEEE80211_MODE_11B: imr->ifm_active |= IFM_MAKEMODE(IFM_IEEE80211_11B); break; case IEEE80211_MODE_11G: imr->ifm_active |= IFM_MAKEMODE(IFM_IEEE80211_11G); break; case IEEE80211_MODE_TURBO_A: imr->ifm_active |= IFM_MAKEMODE(IFM_IEEE80211_11A) | IFM_IEEE80211_TURBO; break; } } static int ndis_get_assoc(sc, assoc) struct ndis_softc *sc; ndis_wlan_bssid_ex **assoc; { ndis_80211_bssid_list_ex *bl; ndis_wlan_bssid_ex *bs; ndis_80211_macaddr bssid; int i, len, error; if (!sc->ndis_link) return(ENOENT); len = sizeof(bssid); error = ndis_get_info(sc, OID_802_11_BSSID, &bssid, &len); if (error) { device_printf(sc->ndis_dev, "failed to get bssid\n"); return(ENOENT); } len = 0; error = ndis_get_info(sc, OID_802_11_BSSID_LIST, NULL, &len); if (error != ENOSPC) { device_printf(sc->ndis_dev, "bssid_list failed\n"); return (error); } bl = malloc(len, M_TEMP, M_NOWAIT|M_ZERO); error = ndis_get_info(sc, OID_802_11_BSSID_LIST, bl, &len); if (error) { free(bl, M_TEMP); device_printf(sc->ndis_dev, "bssid_list failed\n"); return (error); } bs = (ndis_wlan_bssid_ex *)&bl->nblx_bssid[0]; for (i = 0; i < bl->nblx_items; i++) { if (bcmp(bs->nwbx_macaddr, bssid, sizeof(bssid)) == 0) { *assoc = malloc(bs->nwbx_len, M_TEMP, M_NOWAIT); if (*assoc == NULL) { free(bl, M_TEMP); return(ENOMEM); } bcopy((char *)bs, (char *)*assoc, bs->nwbx_len); free(bl, M_TEMP); return(0); } bs = (ndis_wlan_bssid_ex *)((char *)bs + bs->nwbx_len); } free(bl, M_TEMP); return(ENOENT); } static void ndis_getstate_80211(sc) struct ndis_softc *sc; { struct ieee80211com *ic; ndis_80211_ssid ssid; ndis_80211_config config; ndis_wlan_bssid_ex *bs; int rval, len, i = 0; uint32_t arg; struct ifnet *ifp; ic = &sc->ic; ifp = sc->ifp; if (!NDIS_INITIALIZED(sc)) return; if (sc->ndis_link) ic->ic_state = IEEE80211_S_RUN; else ic->ic_state = IEEE80211_S_ASSOC; /* * If we're associated, retrieve info on the current bssid. */ if ((rval = ndis_get_assoc(sc, &bs)) == 0) { switch(bs->nwbx_nettype) { case NDIS_80211_NETTYPE_11FH: case NDIS_80211_NETTYPE_11DS: ic->ic_curmode = IEEE80211_MODE_11B; break; case NDIS_80211_NETTYPE_11OFDM5: ic->ic_curmode = IEEE80211_MODE_11A; break; case NDIS_80211_NETTYPE_11OFDM24: ic->ic_curmode = IEEE80211_MODE_11G; break; default: device_printf(sc->ndis_dev, "unknown nettype %d\n", arg); break; } IEEE80211_ADDR_COPY(ic->ic_bss->ni_bssid, bs->nwbx_macaddr); free(bs, M_TEMP); } else return; len = sizeof(ssid); bzero((char *)&ssid, len); rval = ndis_get_info(sc, OID_802_11_SSID, &ssid, &len); if (rval) device_printf (sc->ndis_dev, "get ssid failed: %d\n", rval); bcopy(ssid.ns_ssid, ic->ic_bss->ni_essid, ssid.ns_ssidlen); ic->ic_bss->ni_esslen = ssid.ns_ssidlen; len = sizeof(arg); rval = ndis_get_info(sc, OID_GEN_LINK_SPEED, &arg, &len); if (rval) device_printf (sc->ndis_dev, "get link speed failed: %d\n", rval); if (ic->ic_modecaps & (1<ic_bss->ni_rates = ic->ic_sup_rates[IEEE80211_MODE_11B]; for (i = 0; i < ic->ic_bss->ni_rates.rs_nrates; i++) { if ((ic->ic_bss->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL) == arg / 5000) break; } } if (i == ic->ic_bss->ni_rates.rs_nrates && ic->ic_modecaps & (1<ic_bss->ni_rates = ic->ic_sup_rates[IEEE80211_MODE_11G]; for (i = 0; i < ic->ic_bss->ni_rates.rs_nrates; i++) { if ((ic->ic_bss->ni_rates.rs_rates[i] & IEEE80211_RATE_VAL) == arg / 5000) break; } } if (i == ic->ic_bss->ni_rates.rs_nrates) device_printf(sc->ndis_dev, "no matching rate for: %d\n", arg / 5000); else ic->ic_bss->ni_txrate = i; if (ic->ic_caps & IEEE80211_C_PMGT) { len = sizeof(arg); rval = ndis_get_info(sc, OID_802_11_POWER_MODE, &arg, &len); if (rval) device_printf(sc->ndis_dev, "get power mode failed: %d\n", rval); if (arg == NDIS_80211_POWERMODE_CAM) ic->ic_flags &= ~IEEE80211_F_PMGTON; else ic->ic_flags |= IEEE80211_F_PMGTON; } len = sizeof(config); bzero((char *)&config, len); config.nc_length = len; config.nc_fhconfig.ncf_length = sizeof(ndis_80211_config_fh); rval = ndis_get_info(sc, OID_802_11_CONFIGURATION, &config, &len); if (rval == 0) { int chan; chan = ieee80211_mhz2ieee(config.nc_dsconfig / 1000, 0); if (chan < 0 || chan >= IEEE80211_CHAN_MAX) { if (ifp->if_flags & IFF_DEBUG) device_printf(sc->ndis_dev, "current channel " "(%uMHz) out of bounds\n", config.nc_dsconfig / 1000); ic->ic_bss->ni_chan = &ic->ic_channels[1]; } else ic->ic_bss->ni_chan = &ic->ic_channels[chan]; } else device_printf(sc->ndis_dev, "couldn't retrieve " "channel info: %d\n", rval); /* len = sizeof(arg); rval = ndis_get_info(sc, OID_802_11_WEP_STATUS, &arg, &len); if (rval) device_printf (sc->ndis_dev, "get wep status failed: %d\n", rval); if (arg == NDIS_80211_WEPSTAT_ENABLED) ic->ic_flags |= IEEE80211_F_WEPON; else ic->ic_flags &= ~IEEE80211_F_WEPON; */ return; } static int ndis_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct ndis_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int i, error = 0; /*NDIS_LOCK(sc);*/ switch(command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->ndis_if_flags & IFF_PROMISC)) { sc->ndis_filter |= NDIS_PACKET_TYPE_PROMISCUOUS; i = sizeof(sc->ndis_filter); error = ndis_set_info(sc, OID_GEN_CURRENT_PACKET_FILTER, &sc->ndis_filter, &i); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->ndis_if_flags & IFF_PROMISC) { sc->ndis_filter &= ~NDIS_PACKET_TYPE_PROMISCUOUS; i = sizeof(sc->ndis_filter); error = ndis_set_info(sc, OID_GEN_CURRENT_PACKET_FILTER, &sc->ndis_filter, &i); } else ndis_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) ndis_stop(sc); } sc->ndis_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: ndis_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (sc->ndis_80211) { error = ieee80211_ioctl(&sc->ic, command, data); if (error == ENETRESET) { ndis_setstate_80211(sc); /*ndis_init(sc);*/ error = 0; } } else error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); break; case SIOCSIFCAP: ifp->if_capenable = ifr->ifr_reqcap; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist = sc->ndis_hwassist; else ifp->if_hwassist = 0; ndis_set_offload(sc); break; case SIOCG80211: if (!NDIS_INITIALIZED(sc)) goto do_80211; if (sc->ndis_80211) error = ndis_80211_ioctl_get(ifp, command, data); else error = ENOTTY; break; case SIOCS80211: if (!NDIS_INITIALIZED(sc)) goto do_80211; if (sc->ndis_80211) error = ndis_80211_ioctl_set(ifp, command, data); else error = ENOTTY; break; case SIOCGIFGENERIC: case SIOCSIFGENERIC: if (sc->ndis_80211 && NDIS_INITIALIZED(sc)) { if (command == SIOCGIFGENERIC) error = ndis_wi_ioctl_get(ifp, command, data); else error = ndis_wi_ioctl_set(ifp, command, data); } else error = ENOTTY; if (error != ENOTTY) break; default: do_80211: sc->ndis_skip = 1; if (sc->ndis_80211) { error = ieee80211_ioctl(&sc->ic, command, data); if (error == ENETRESET) { ndis_setstate_80211(sc); error = 0; } } else error = ether_ioctl(ifp, command, data); sc->ndis_skip = 0; break; } /*NDIS_UNLOCK(sc);*/ return(error); } static int ndis_wi_ioctl_get(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct wi_req wreq; struct ifreq *ifr; struct ndis_softc *sc; ndis_80211_bssid_list_ex *bl; ndis_wlan_bssid_ex *wb; struct wi_apinfo *api; int error, i, j, len, maxaps; sc = ifp->if_softc; ifr = (struct ifreq *)data; error = copyin(ifr->ifr_data, &wreq, sizeof(wreq)); if (error) return (error); switch (wreq.wi_type) { case WI_RID_READ_APS: len = 0; error = ndis_set_info(sc, OID_802_11_BSSID_LIST_SCAN, NULL, &len); if (error == 0) tsleep(&error, PPAUSE|PCATCH, "ssidscan", hz * 2); len = 0; error = ndis_get_info(sc, OID_802_11_BSSID_LIST, NULL, &len); if (error != ENOSPC) break; bl = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO); error = ndis_get_info(sc, OID_802_11_BSSID_LIST, bl, &len); if (error) { free(bl, M_DEVBUF); break; } maxaps = (2 * wreq.wi_len - sizeof(int)) / sizeof(*api); maxaps = MIN(maxaps, bl->nblx_items); wreq.wi_len = (maxaps * sizeof(*api) + sizeof(int)) / 2; *(int *)&wreq.wi_val = maxaps; api = (struct wi_apinfo *)&((int *)&wreq.wi_val)[1]; wb = bl->nblx_bssid; while (maxaps--) { bzero(api, sizeof(*api)); bcopy(&wb->nwbx_macaddr, &api->bssid, sizeof(api->bssid)); api->namelen = wb->nwbx_ssid.ns_ssidlen; bcopy(&wb->nwbx_ssid.ns_ssid, &api->name, api->namelen); if (wb->nwbx_privacy) api->capinfo |= IEEE80211_CAPINFO_PRIVACY; /* XXX Where can we get noise information? */ api->signal = wb->nwbx_rssi + 149; /* XXX */ api->quality = api->signal; api->channel = ieee80211_mhz2ieee(wb->nwbx_config.nc_dsconfig / 1000, 0); /* In "auto" infrastructure mode, this is useless. */ if (wb->nwbx_netinfra == NDIS_80211_NET_INFRA_IBSS) api->capinfo |= IEEE80211_CAPINFO_IBSS; if (wb->nwbx_len > sizeof(ndis_wlan_bssid)) { j = sizeof(ndis_80211_rates_ex); /* handle other extended things */ } else j = sizeof(ndis_80211_rates); for (i = api->rate = 0; i < j; i++) api->rate = MAX(api->rate, 5 * (wb->nwbx_supportedrates[i] & 0x7f)); api++; wb = (ndis_wlan_bssid_ex *)((char *)wb + wb->nwbx_len); } free(bl, M_DEVBUF); error = copyout(&wreq, ifr->ifr_data, sizeof(wreq)); break; default: error = ENOTTY; break; } return (error); } static int ndis_wi_ioctl_set(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct wi_req wreq; struct ifreq *ifr; struct ndis_softc *sc; uint32_t foo; int error, len; error = suser(curthread); if (error) return (error); sc = ifp->if_softc; ifr = (struct ifreq *)data; error = copyin(ifr->ifr_data, &wreq, sizeof(wreq)); if (error) return (error); switch (wreq.wi_type) { case WI_RID_SCAN_APS: case WI_RID_SCAN_REQ: /* arguments ignored */ len = sizeof(foo); foo = 0; error = ndis_set_info(sc, OID_802_11_BSSID_LIST_SCAN, &foo, &len); break; default: error = ENOTTY; break; } return (error); } static int ndis_80211_ioctl_get(struct ifnet *ifp, u_long command, caddr_t data) { struct ndis_softc *sc; struct ieee80211req *ireq; ndis_80211_bssid_list_ex *bl; ndis_80211_ssid ssid; ndis_80211_macaddr bssid; ndis_wlan_bssid_ex *wb; struct ieee80211req_scan_result *sr, *bsr; int error, len, i, j; char *cp; uint8_t nodename[IEEE80211_NWID_LEN]; uint16_t nodename_u[IEEE80211_NWID_LEN + 1]; char *acode; sc = ifp->if_softc; ireq = (struct ieee80211req *) data; switch (ireq->i_type) { case IEEE80211_IOC_MLME: error = 0; break; case IEEE80211_IOC_BSSID: len = sizeof(bssid); bzero((char*)&bssid, len); error = ndis_get_info(sc, OID_802_11_BSSID, &bssid, &len); if (error) { device_printf(sc->ndis_dev, "failed to get bssid\n"); return(error); } ireq->i_len = len; error = copyout(&bssid, ireq->i_data, len); break; case IEEE80211_IOC_SSID: len = sizeof(ssid); bzero((char*)&ssid, len); error = ndis_get_info(sc, OID_802_11_SSID, &ssid, &len); if (error) { device_printf(sc->ndis_dev, "failed to get ssid: %d\n", error); return(error); } ireq->i_len = ssid.ns_ssidlen; error = copyout(&ssid.ns_ssid, ireq->i_data, ssid.ns_ssidlen); break; case IEEE80211_IOC_SCAN_RESULTS: len = 0; error = ndis_get_info(sc, OID_802_11_BSSID_LIST, NULL, &len); if (error != ENOSPC) break; bl = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); error = ndis_get_info(sc, OID_802_11_BSSID_LIST, bl, &len); if (error) { free(bl, M_DEVBUF); break; } sr = bsr = malloc(ireq->i_len, M_DEVBUF, M_WAITOK | M_ZERO); wb = bl->nblx_bssid; len = 0; for (i = 0; i < bl->nblx_items; i++) { /* * Check if we have enough space left for this ap */ j = roundup(sizeof(*sr) + wb->nwbx_ssid.ns_ssidlen + wb->nwbx_ielen - sizeof(struct ndis_80211_fixed_ies), sizeof(uint32_t)); if (len + j > ireq->i_len) break; bcopy(&wb->nwbx_macaddr, &sr->isr_bssid, sizeof(sr->isr_bssid)); if (wb->nwbx_privacy) sr->isr_capinfo |= IEEE80211_CAPINFO_PRIVACY; sr->isr_rssi = wb->nwbx_rssi + 200; sr->isr_freq = wb->nwbx_config.nc_dsconfig / 1000; sr->isr_intval = wb->nwbx_config.nc_beaconperiod; switch (wb->nwbx_netinfra) { case NDIS_80211_NET_INFRA_IBSS: sr->isr_capinfo |= IEEE80211_CAPINFO_IBSS; break; case NDIS_80211_NET_INFRA_BSS: sr->isr_capinfo |= IEEE80211_CAPINFO_ESS; break; } for (j = 0; j < sizeof(sr->isr_rates); j++) { /* XXX - check units */ if (wb->nwbx_supportedrates[j] == 0) break; sr->isr_rates[j] = wb->nwbx_supportedrates[j] & 0x7f; } sr->isr_nrates = j; sr->isr_ssid_len = wb->nwbx_ssid.ns_ssidlen; cp = (char *)sr + sizeof(*sr); bcopy(&wb->nwbx_ssid.ns_ssid, cp, sr->isr_ssid_len); cp += sr->isr_ssid_len; sr->isr_ie_len = wb->nwbx_ielen - sizeof(struct ndis_80211_fixed_ies); bcopy((char *)wb->nwbx_ies + sizeof(struct ndis_80211_fixed_ies), cp, sr->isr_ie_len); sr->isr_len = roundup(sizeof(*sr) + sr->isr_ssid_len + sr->isr_ie_len, sizeof(uint32_t)); len += sr->isr_len; sr = (struct ieee80211req_scan_result *)((char *)sr + sr->isr_len); wb = (ndis_wlan_bssid_ex *)((char *)wb + wb->nwbx_len); } ireq->i_len = len; error = copyout(bsr, ireq->i_data, len); free(bl, M_DEVBUF); free(bsr, M_DEVBUF); break; case IEEE80211_IOC_STATIONNAME: error = ndis_get_info(sc, OID_GEN_MACHINE_NAME, &nodename_u, &len); if (error) break; acode = nodename; bzero((char *)nodename, IEEE80211_NWID_LEN); ndis_unicode_to_ascii(nodename_u, len, &acode); ireq->i_len = len / 2 + 1; error = copyout(acode, ireq->i_data, ireq->i_len); break; default: error = ieee80211_ioctl(&sc->ic, command, data); } return(error); } static int ndis_add_key(sc, wk, i_len) struct ndis_softc *sc; struct ieee80211req_key *wk; int16_t i_len; { ndis_80211_key *rkey; ndis_80211_wep *wep; int len, error; uint32_t arg; /* infrastructure mode only supported for now */ len = sizeof(arg); arg = NDIS_80211_NET_INFRA_BSS; error = ndis_set_info(sc, OID_802_11_INFRASTRUCTURE_MODE, &arg, &len); if (error) { device_printf(sc->ndis_dev, "setting infrastructure mode failed\n"); return(error); } switch(wk->ik_type) { case IEEE80211_CIPHER_WEP: len = 12 + wk->ik_keylen; wep = malloc(len, M_TEMP, M_WAITOK | M_ZERO); if(!wep) return(ENOSPC); wep->nw_length = len; wep->nw_keyidx = wk->ik_keyix; wep->nw_keylen = wk->ik_keylen; if(wk->ik_flags & IEEE80211_KEY_XMIT) wep->nw_keyidx |= 1 << 31; device_printf(sc->ndis_dev, "setting wep key\n"); error = copyin(wk->ik_keydata, wep->nw_keydata, wk->ik_keylen); if(error) { device_printf(sc->ndis_dev, "copyin of wep key to kernel space failed\n"); free(wep, M_TEMP); break; } error = ndis_set_info(sc, OID_802_11_ADD_WEP, wep, &len); if(error) { device_printf(sc->ndis_dev, "setting wep key failed\n"); break; } free(wep, M_TEMP); /* set the authentication mode */ arg = NDIS_80211_AUTHMODE_OPEN; error = ndis_set_info(sc, OID_802_11_AUTHENTICATION_MODE, &arg, &len); if(error) { device_printf(sc->ndis_dev, "setting authentication mode failed\n"); } /* set the encryption */ len = sizeof(arg); arg = NDIS_80211_WEPSTAT_ENABLED; error = ndis_set_info(sc, OID_802_11_ENCRYPTION_STATUS, &arg, &len); if(error) { device_printf(sc->ndis_dev, "setting encryption status failed\n"); return(error); } break; case IEEE80211_CIPHER_TKIP: len = 12 + 6 + 6 + 8 + wk->ik_keylen; rkey = malloc(len, M_TEMP, M_WAITOK | M_ZERO); if(!rkey) return(ENOSPC); rkey->nk_len = len; error = copyin(wk->ik_macaddr, rkey->nk_bssid, IEEE80211_ADDR_LEN); if(error) { device_printf(sc->ndis_dev, "copyin of bssid to kernel space failed\n"); free(rkey, M_TEMP); break; } /* keyrsc needs to be fixed: need to do some shifting */ error = copyin(&(wk->ik_keyrsc), &(rkey->nk_keyrsc), sizeof(rkey->nk_keyrsc)); if(error) { device_printf(sc->ndis_dev, "copyin of keyrsc to kernel space failed\n"); free(rkey, M_TEMP); break; } /* key index - gets weird in NDIS */ rkey->nk_keyidx = wk->ik_keyix; if(wk->ik_flags & IEEE80211_KEY_XMIT) rkey->nk_keyidx |= 1 << 31; if((bcmp(rkey->nk_bssid, "\xff\xff\xff\xff\xff\xff", IEEE80211_ADDR_LEN) == 0) || (bcmp(rkey->nk_bssid, "\x0\x0\x0\x0\x0\x0", IEEE80211_ADDR_LEN) == 0)) { /* group key - nothing to do in ndis */ } else { /* pairwise key */ rkey->nk_keyidx |= 1 << 30; } /* need to set bit 29 based on keyrsc */ rkey->nk_keylen = wk->ik_keylen; if (wk->ik_type == IEEE80211_CIPHER_TKIP && wk->ik_keylen == 32) { /* * key data needs to be offset by 4 due * to mismatch between NDIS spec and BSD?? */ error = copyin(wk->ik_keydata, rkey->nk_keydata + 4, 16); if(error) { device_printf(sc->ndis_dev, "copyin of " "keydata(0) to kernel space failed\n"); free(rkey, M_TEMP); break; } error = copyin(wk->ik_keydata + 24, rkey->nk_keydata + 20, 8); if(error) { device_printf(sc->ndis_dev, "copyin of " "keydata(1) to kernel space failed\n"); free(rkey, M_TEMP); break; } error = copyin(wk->ik_keydata + 16, rkey->nk_keydata + 28, 8); if(error) { device_printf(sc->ndis_dev, "copyin of " "keydata(2) to kernel space failed\n"); free(rkey, M_TEMP); break; } } else { error = copyin(wk->ik_keydata, rkey->nk_keydata + 4, wk->ik_keylen); if(error) { device_printf(sc->ndis_dev, "copyin of " "keydata(CCMP) to kernel space failed\n"); free(rkey, M_TEMP); break; } } error = ndis_set_info(sc, OID_802_11_ADD_KEY, rkey, &len); break; case IEEE80211_CIPHER_AES_CCM: return(ENOTTY); default: return(ENOTTY); } return(error); } static int ndis_80211_ioctl_set(struct ifnet *ifp, u_long command, caddr_t data) { struct ndis_softc *sc; struct ieee80211req *ireq; int error, len, arg, ucnt; uint8_t nodename[IEEE80211_NWID_LEN]; uint16_t nodename_u[IEEE80211_NWID_LEN + 1]; uint16_t *ucode; struct ieee80211req_del_key *rk; struct ieee80211req_key *wk; unsigned char *wpa_ie; ndis_80211_ssid ssid; ndis_80211_remove_key rkey; sc = ifp->if_softc; ireq = (struct ieee80211req *) data; switch (ireq->i_type) { case IEEE80211_IOC_MLME: case IEEE80211_IOC_ROAMING: case IEEE80211_IOC_COUNTERMEASURES: case IEEE80211_IOC_DROPUNENCRYPTED: error = 0; break; case IEEE80211_IOC_PRIVACY: len = sizeof(arg); arg = NDIS_80211_PRIVFILT_8021XWEP; error = ndis_set_info(sc, OID_802_11_PRIVACY_FILTER, &arg, &len); if (error) { device_printf(sc->ndis_dev, "setting wep privacy filter failed\n"); error = 0; } break; case IEEE80211_IOC_WPA: /* nothing to do */ error = 0; break; case IEEE80211_IOC_OPTIE: wpa_ie = (char*)ireq->i_data; if (ireq->i_len < 14 || !wpa_ie) { /* cannot figure out anything */ arg = NDIS_80211_AUTHMODE_OPEN; error = ndis_set_info(sc, OID_802_11_AUTHENTICATION_MODE, &arg, &len); return(error); } if (wpa_ie[0] == IEEE80211_ELEMID_RSN) { error = ENOTTY; break; } else if (wpa_ie[0] == IEEE80211_ELEMID_VENDOR) { /* set the encryption based on multicast cipher */ if (!memcmp(wpa_ie + 8, "\x00\x50\xf2\x02", 4)) { len = sizeof(arg); arg = NDIS_80211_WEPSTAT_ENC2ENABLED; error = ndis_set_info(sc, OID_802_11_ENCRYPTION_STATUS, &arg, &len); if (error) { device_printf(sc->ndis_dev, "setting " "encryption status to " "ENC2 failed\n"); /* continue anyway */ } } } /* set the authentication mode */ ucnt = wpa_ie[12] + 256* wpa_ie[13]; /* 4 bytes per unicast cipher */ ucnt = 14 + 4*ucnt + 2; /* account for number of authsels */ if (ireq->i_len < ucnt) { arg = NDIS_80211_AUTHMODE_WPANONE; } else { if (!memcmp((void*)(&wpa_ie[ucnt]), "\x00\x50\xf2\x02", 4)) { arg = NDIS_80211_AUTHMODE_WPAPSK; } else if (!memcmp((void*)(&wpa_ie[ucnt]), "\x00\x50\xf2\x01", 4)) { arg = NDIS_80211_AUTHMODE_WPA; } else { arg = NDIS_80211_AUTHMODE_WPANONE; } } len = sizeof(arg); error = ndis_set_info(sc, OID_802_11_AUTHENTICATION_MODE, &arg, &len); if (error) { device_printf(sc->ndis_dev, "setting authentication mode to WPA-PSK failed\n"); break; } break; case IEEE80211_IOC_SSID: len = sizeof(ssid); bzero((char*)&ssid, len); ssid.ns_ssidlen = ireq->i_len; error = copyin(ireq->i_data, &(ssid.ns_ssid), ireq->i_len); if (error) break; error = ndis_set_info(sc, OID_802_11_SSID, &ssid, &len); if (error) { device_printf(sc->ndis_dev, "setting SSID to %s\n", ssid.ns_ssid); } break; case IEEE80211_IOC_DELKEY: len = sizeof(rkey); bzero((char*)&rkey, len); rk = (struct ieee80211req_del_key*)ireq->i_data; rkey.nk_len = len; rkey.nk_keyidx = rk->idk_keyix; error = copyin(rk->idk_macaddr, &(rkey.nk_bssid), sizeof(ndis_80211_macaddr)); if (error) break; error = ndis_set_info(sc, OID_802_11_REMOVE_KEY, &rkey, &len); if (error) device_printf(sc->ndis_dev, "deleting key\n"); break; case IEEE80211_IOC_WPAKEY: wk = (struct ieee80211req_key*)ireq->i_data; error = ndis_add_key(sc, wk, ireq->i_len); break; case IEEE80211_IOC_SCAN_REQ: len = 0; error = ndis_set_info(sc, OID_802_11_BSSID_LIST_SCAN, NULL, &len); tsleep(&error, PPAUSE|PCATCH, "ssidscan", hz * 2); rt_ieee80211msg(ifp, RTM_IEEE80211_SCAN, NULL, 0); break; case IEEE80211_IOC_STATIONNAME: error = suser(curthread); if (error) break; if (ireq->i_val != 0 || ireq->i_len > IEEE80211_NWID_LEN) { error = EINVAL; break; } bzero((char *)nodename, IEEE80211_NWID_LEN); error = copyin(ireq->i_data, nodename, ireq->i_len); if (error) break; ucode = nodename_u; ndis_ascii_to_unicode((char *)nodename, &ucode); len = ireq->i_len * 2; error = ndis_set_info(sc, OID_GEN_MACHINE_NAME, &nodename_u, &len); break; default: error = ieee80211_ioctl(&sc->ic, command, data); if (error == ENETRESET) { ndis_setstate_80211(sc); error = 0; } } return(error); } static void ndis_resettask(w, arg) ndis_work_item *w; void *arg; { struct ndis_softc *sc; sc = arg; ndis_reset_nic(sc); return; } static void ndis_watchdog(ifp) struct ifnet *ifp; { struct ndis_softc *sc; sc = ifp->if_softc; NDIS_LOCK(sc); ifp->if_oerrors++; device_printf(sc->ndis_dev, "watchdog timeout\n"); NDIS_UNLOCK(sc); NdisScheduleWorkItem(&sc->ndis_resetitem); NdisScheduleWorkItem(&sc->ndis_startitem); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void ndis_stop(sc) struct ndis_softc *sc; { struct ifnet *ifp; ifp = sc->ifp; untimeout(ndis_tick, sc, sc->ndis_stat_ch); NDIS_LOCK(sc); ifp->if_timer = 0; sc->ndis_link = 0; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); NDIS_UNLOCK(sc); ndis_halt_nic(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ void ndis_shutdown(dev) device_t dev; { struct ndis_softc *sc; sc = device_get_softc(dev); ndis_stop(sc); return; } Index: stable/6/sys/dev/ixgb/if_ixgb.c =================================================================== --- stable/6/sys/dev/ixgb/if_ixgb.c (revision 149421) +++ stable/6/sys/dev/ixgb/if_ixgb.c (revision 149422) @@ -1,2501 +1,2503 @@ /******************************************************************************* Copyright (c) 2001-2004, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************************************************************************/ /*$FreeBSD$*/ #include /********************************************************************* * Set this to one to display debug statistics *********************************************************************/ int ixgb_display_debug_stats = 0; /********************************************************************* * Linked list of board private structures for all NICs found *********************************************************************/ struct adapter *ixgb_adapter_list = NULL; /********************************************************************* * Driver version *********************************************************************/ char ixgb_driver_version[] = "1.0.6"; char ixgb_copyright[] = "Copyright (c) 2001-2004 Intel Corporation."; /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on * Last field stores an index into ixgb_strings * Last entry must be all 0s * * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } *********************************************************************/ static ixgb_vendor_info_t ixgb_vendor_info_array[] = { /* Intel(R) PRO/10000 Network Connection */ {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX, PCI_ANY_ID, PCI_ANY_ID, 0}, {INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR, PCI_ANY_ID, PCI_ANY_ID, 0}, /* required last entry */ {0, 0, 0, 0, 0} }; /********************************************************************* * Table of branding strings for all supported NICs. *********************************************************************/ static char *ixgb_strings[] = { "Intel(R) PRO/10GbE Network Driver" }; /********************************************************************* * Function prototypes *********************************************************************/ static int ixgb_probe(device_t); static int ixgb_attach(device_t); static int ixgb_detach(device_t); static int ixgb_shutdown(device_t); static void ixgb_intr(void *); static void ixgb_start(struct ifnet *); static void ixgb_start_locked(struct ifnet *); static int ixgb_ioctl(struct ifnet *, IOCTL_CMD_TYPE, caddr_t); static void ixgb_watchdog(struct ifnet *); static void ixgb_init(void *); static void ixgb_init_locked(struct adapter *); static void ixgb_stop(void *); static void ixgb_media_status(struct ifnet *, struct ifmediareq *); static int ixgb_media_change(struct ifnet *); static void ixgb_identify_hardware(struct adapter *); static int ixgb_allocate_pci_resources(struct adapter *); static void ixgb_free_pci_resources(struct adapter *); static void ixgb_local_timer(void *); static int ixgb_hardware_init(struct adapter *); static void ixgb_setup_interface(device_t, struct adapter *); static int ixgb_setup_transmit_structures(struct adapter *); static void ixgb_initialize_transmit_unit(struct adapter *); static int ixgb_setup_receive_structures(struct adapter *); static void ixgb_initialize_receive_unit(struct adapter *); static void ixgb_enable_intr(struct adapter *); static void ixgb_disable_intr(struct adapter *); static void ixgb_free_transmit_structures(struct adapter *); static void ixgb_free_receive_structures(struct adapter *); static void ixgb_update_stats_counters(struct adapter *); static void ixgb_clean_transmit_interrupts(struct adapter *); static int ixgb_allocate_receive_structures(struct adapter *); static int ixgb_allocate_transmit_structures(struct adapter *); static void ixgb_process_receive_interrupts(struct adapter *, int); static void ixgb_receive_checksum(struct adapter *, struct ixgb_rx_desc * rx_desc, struct mbuf *); static void ixgb_transmit_checksum_setup(struct adapter *, struct mbuf *, u_int8_t *); static void ixgb_set_promisc(struct adapter *); static void ixgb_disable_promisc(struct adapter *); static void ixgb_set_multi(struct adapter *); static void ixgb_print_hw_stats(struct adapter *); static void ixgb_print_link_status(struct adapter *); static int ixgb_get_buf(int i, struct adapter *, struct mbuf *); static void ixgb_enable_vlans(struct adapter * adapter); static int ixgb_encap(struct adapter * adapter, struct mbuf * m_head); static int ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS); static int ixgb_dma_malloc(struct adapter *, bus_size_t, struct ixgb_dma_alloc *, int); static void ixgb_dma_free(struct adapter *, struct ixgb_dma_alloc *); /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t ixgb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ixgb_probe), DEVMETHOD(device_attach, ixgb_attach), DEVMETHOD(device_detach, ixgb_detach), DEVMETHOD(device_shutdown, ixgb_shutdown), {0, 0} }; static driver_t ixgb_driver = { "ixgb", ixgb_methods, sizeof(struct adapter), }; static devclass_t ixgb_devclass; DRIVER_MODULE(if_ixgb, pci, ixgb_driver, ixgb_devclass, 0, 0); MODULE_DEPEND(if_ixgb, pci, 1, 1, 1); MODULE_DEPEND(if_ixgb, ether, 1, 1, 1); /* some defines for controlling descriptor fetches in h/w */ #define RXDCTL_PTHRESH_DEFAULT 128 /* chip considers prefech below this */ #define RXDCTL_HTHRESH_DEFAULT 16 /* chip will only prefetch if tail is * pushed this many descriptors from * head */ #define RXDCTL_WTHRESH_DEFAULT 0 /* chip writes back at this many or RXT0 */ /********************************************************************* * Device identification routine * * ixgb_probe determines if the driver should be loaded on * adapter based on PCI vendor/device id of the adapter. * * return 0 on success, positive on failure *********************************************************************/ static int ixgb_probe(device_t dev) { ixgb_vendor_info_t *ent; u_int16_t pci_vendor_id = 0; u_int16_t pci_device_id = 0; u_int16_t pci_subvendor_id = 0; u_int16_t pci_subdevice_id = 0; char adapter_name[60]; INIT_DEBUGOUT("ixgb_probe: begin"); pci_vendor_id = pci_get_vendor(dev); if (pci_vendor_id != IXGB_VENDOR_ID) return (ENXIO); pci_device_id = pci_get_device(dev); pci_subvendor_id = pci_get_subvendor(dev); pci_subdevice_id = pci_get_subdevice(dev); ent = ixgb_vendor_info_array; while (ent->vendor_id != 0) { if ((pci_vendor_id == ent->vendor_id) && (pci_device_id == ent->device_id) && ((pci_subvendor_id == ent->subvendor_id) || (ent->subvendor_id == PCI_ANY_ID)) && ((pci_subdevice_id == ent->subdevice_id) || (ent->subdevice_id == PCI_ANY_ID))) { sprintf(adapter_name, "%s, Version - %s", ixgb_strings[ent->index], ixgb_driver_version); device_set_desc_copy(dev, adapter_name); return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); } /********************************************************************* * Device initialization routine * * The attach entry point is called when the driver is being loaded. * This routine identifies the type of hardware, allocates all resources * and initializes the hardware. * * return 0 on success, positive on failure *********************************************************************/ static int ixgb_attach(device_t dev) { struct adapter *adapter; int tsize, rsize; int error = 0; printf("ixgb%d: %s\n", device_get_unit(dev), ixgb_copyright); INIT_DEBUGOUT("ixgb_attach: begin"); /* Allocate, clear, and link in our adapter structure */ if (!(adapter = device_get_softc(dev))) { printf("ixgb: adapter structure allocation failed\n"); return (ENOMEM); } bzero(adapter, sizeof(struct adapter)); adapter->dev = dev; adapter->osdep.dev = dev; adapter->unit = device_get_unit(dev); IXGB_LOCK_INIT(adapter, device_get_nameunit(dev)); if (ixgb_adapter_list != NULL) ixgb_adapter_list->prev = adapter; adapter->next = ixgb_adapter_list; ixgb_adapter_list = adapter; /* SYSCTL APIs */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "stats", CTLTYPE_INT | CTLFLAG_RW, (void *)adapter, 0, ixgb_sysctl_stats, "I", "Statistics"); callout_init(&adapter->timer, CALLOUT_MPSAFE); /* Determine hardware revision */ ixgb_identify_hardware(adapter); /* Parameters (to be read from user) */ adapter->num_tx_desc = IXGB_MAX_TXD; adapter->num_rx_desc = IXGB_MAX_RXD; adapter->tx_int_delay = TIDV; adapter->rx_int_delay = RDTR; adapter->rx_buffer_len = IXGB_RXBUFFER_2048; adapter->hw.fc.high_water = FCRTH; adapter->hw.fc.low_water = FCRTL; adapter->hw.fc.pause_time = FCPAUSE; adapter->hw.fc.send_xon = TRUE; adapter->hw.fc.type = FLOW_CONTROL; /* Set the max frame size assuming standard ethernet sized frames */ adapter->hw.max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN; if (ixgb_allocate_pci_resources(adapter)) { printf("ixgb%d: Allocation of PCI resources failed\n", adapter->unit); error = ENXIO; goto err_pci; } tsize = IXGB_ROUNDUP(adapter->num_tx_desc * sizeof(struct ixgb_tx_desc), 4096); /* Allocate Transmit Descriptor ring */ if (ixgb_dma_malloc(adapter, tsize, &adapter->txdma, BUS_DMA_NOWAIT)) { printf("ixgb%d: Unable to allocate TxDescriptor memory\n", adapter->unit); error = ENOMEM; goto err_tx_desc; } adapter->tx_desc_base = (struct ixgb_tx_desc *) adapter->txdma.dma_vaddr; rsize = IXGB_ROUNDUP(adapter->num_rx_desc * sizeof(struct ixgb_rx_desc), 4096); /* Allocate Receive Descriptor ring */ if (ixgb_dma_malloc(adapter, rsize, &adapter->rxdma, BUS_DMA_NOWAIT)) { printf("ixgb%d: Unable to allocate rx_desc memory\n", adapter->unit); error = ENOMEM; goto err_rx_desc; } adapter->rx_desc_base = (struct ixgb_rx_desc *) adapter->rxdma.dma_vaddr; /* Initialize the hardware */ if (ixgb_hardware_init(adapter)) { printf("ixgb%d: Unable to initialize the hardware\n", adapter->unit); error = EIO; goto err_hw_init; } /* Setup OS specific network interface */ ixgb_setup_interface(dev, adapter); /* Initialize statistics */ ixgb_clear_hw_cntrs(&adapter->hw); ixgb_update_stats_counters(adapter); INIT_DEBUGOUT("ixgb_attach: end"); return (0); err_hw_init: ixgb_dma_free(adapter, &adapter->rxdma); err_rx_desc: ixgb_dma_free(adapter, &adapter->txdma); err_tx_desc: err_pci: ixgb_free_pci_resources(adapter); sysctl_ctx_free(&adapter->sysctl_ctx); return (error); } /********************************************************************* * Device removal routine * * The detach entry point is called when the driver is being removed. * This routine stops the adapter and deallocates all the resources * that were allocated for driver operation. * * return 0 on success, positive on failure *********************************************************************/ static int ixgb_detach(device_t dev) { struct adapter *adapter = device_get_softc(dev); struct ifnet *ifp = adapter->ifp; INIT_DEBUGOUT("ixgb_detach: begin"); IXGB_LOCK(adapter); adapter->in_detach = 1; ixgb_stop(adapter); IXGB_UNLOCK(adapter); #if __FreeBSD_version < 500000 ether_ifdetach(adapter->ifp, ETHER_BPF_SUPPORTED); #else ether_ifdetach(adapter->ifp); if_free(adapter->ifp); #endif ixgb_free_pci_resources(adapter); /* Free Transmit Descriptor ring */ if (adapter->tx_desc_base) { ixgb_dma_free(adapter, &adapter->txdma); adapter->tx_desc_base = NULL; } /* Free Receive Descriptor ring */ if (adapter->rx_desc_base) { ixgb_dma_free(adapter, &adapter->rxdma); adapter->rx_desc_base = NULL; } /* Remove from the adapter list */ if (ixgb_adapter_list == adapter) ixgb_adapter_list = adapter->next; if (adapter->next != NULL) adapter->next->prev = adapter->prev; if (adapter->prev != NULL) adapter->prev->next = adapter->next; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); ifp->if_timer = 0; IXGB_LOCK_DESTROY(adapter); return (0); } /********************************************************************* * * Shutdown entry point * **********************************************************************/ static int ixgb_shutdown(device_t dev) { struct adapter *adapter = device_get_softc(dev); IXGB_LOCK(adapter); ixgb_stop(adapter); IXGB_UNLOCK(adapter); return (0); } /********************************************************************* * Transmit entry point * * ixgb_start is called by the stack to initiate a transmit. * The driver will remain in this routine as long as there are * packets to transmit and transmit resources are available. * In case resources are not available stack is notified and * the packet is requeued. **********************************************************************/ static void ixgb_start_locked(struct ifnet * ifp) { struct mbuf *m_head; struct adapter *adapter = ifp->if_softc; IXGB_LOCK_ASSERT(adapter); if (!adapter->link_active) return; while (ifp->if_snd.ifq_head != NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (ixgb_encap(adapter, m_head)) { ifp->if_flags |= IFF_OACTIVE; IF_PREPEND(&ifp->if_snd, m_head); break; } /* Send a copy of the frame to the BPF listener */ #if __FreeBSD_version < 500000 if (ifp->if_bpf) bpf_mtap(ifp, m_head); #else BPF_MTAP(ifp, m_head); #endif /* Set timeout in case hardware has problems transmitting */ ifp->if_timer = IXGB_TX_TIMEOUT; } return; } static void ixgb_start(struct ifnet *ifp) { struct adapter *adapter = ifp->if_softc; IXGB_LOCK(adapter); ixgb_start_locked(ifp); IXGB_UNLOCK(adapter); return; } /********************************************************************* * Ioctl entry point * * ixgb_ioctl is called when the user wants to configure the * interface. * * return 0 on success, positive on failure **********************************************************************/ static int ixgb_ioctl(struct ifnet * ifp, IOCTL_CMD_TYPE command, caddr_t data) { int mask, error = 0; struct ifreq *ifr = (struct ifreq *) data; struct adapter *adapter = ifp->if_softc; if (adapter->in_detach) goto out; switch (command) { case SIOCSIFADDR: case SIOCGIFADDR: IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFADDR (Get/Set Interface Addr)"); ether_ioctl(ifp, command, data); break; case SIOCSIFMTU: IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); if (ifr->ifr_mtu > IXGB_MAX_JUMBO_FRAME_SIZE - ETHER_HDR_LEN) { error = EINVAL; } else { IXGB_LOCK(adapter); ifp->if_mtu = ifr->ifr_mtu; adapter->hw.max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; ixgb_init_locked(adapter); IXGB_UNLOCK(adapter); } break; case SIOCSIFFLAGS: IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFFLAGS (Set Interface Flags)"); IXGB_LOCK(adapter); if (ifp->if_flags & IFF_UP) { if (!(ifp->if_flags & IFF_RUNNING)) { ixgb_init_locked(adapter); } ixgb_disable_promisc(adapter); ixgb_set_promisc(adapter); } else { if (ifp->if_flags & IFF_RUNNING) { ixgb_stop(adapter); } } IXGB_UNLOCK(adapter); break; case SIOCADDMULTI: case SIOCDELMULTI: IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI"); if (ifp->if_flags & IFF_RUNNING) { IXGB_LOCK(adapter); ixgb_disable_intr(adapter); ixgb_set_multi(adapter); ixgb_enable_intr(adapter); IXGB_UNLOCK(adapter); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: IOCTL_DEBUGOUT("ioctl rcv'd: SIOCxIFMEDIA (Get/Set Interface Media)"); error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); break; case SIOCSIFCAP: IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)"); mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_HWCSUM) { if (IFCAP_HWCSUM & ifp->if_capenable) ifp->if_capenable &= ~IFCAP_HWCSUM; else ifp->if_capenable |= IFCAP_HWCSUM; if (ifp->if_flags & IFF_RUNNING) ixgb_init(adapter); } break; default: IOCTL_DEBUGOUT1("ioctl received: UNKNOWN (0x%X)\n", (int)command); error = EINVAL; } out: return (error); } /********************************************************************* * Watchdog entry point * * This routine is called whenever hardware quits transmitting. * **********************************************************************/ static void ixgb_watchdog(struct ifnet * ifp) { struct adapter *adapter; adapter = ifp->if_softc; /* * If we are in this routine because of pause frames, then don't * reset the hardware. */ if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF) { ifp->if_timer = IXGB_TX_TIMEOUT; return; } printf("ixgb%d: watchdog timeout -- resetting\n", adapter->unit); ifp->if_flags &= ~IFF_RUNNING; ixgb_stop(adapter); ixgb_init(adapter); ifp->if_oerrors++; return; } /********************************************************************* * Init entry point * * This routine is used in two ways. It is used by the stack as * init entry point in network interface structure. It is also used * by the driver as a hw/sw initialization routine to get to a * consistent state. * * return 0 on success, positive on failure **********************************************************************/ static void ixgb_init_locked(struct adapter *adapter) { struct ifnet *ifp; INIT_DEBUGOUT("ixgb_init: begin"); IXGB_LOCK_ASSERT(adapter); ixgb_stop(adapter); /* Get the latest mac address, User can use a LAA */ bcopy(IFP2ENADDR(adapter->ifp), adapter->hw.curr_mac_addr, IXGB_ETH_LENGTH_OF_ADDRESS); /* Initialize the hardware */ if (ixgb_hardware_init(adapter)) { printf("ixgb%d: Unable to initialize the hardware\n", adapter->unit); return; } ixgb_enable_vlans(adapter); /* Prepare transmit descriptors and buffers */ if (ixgb_setup_transmit_structures(adapter)) { printf("ixgb%d: Could not setup transmit structures\n", adapter->unit); ixgb_stop(adapter); return; } ixgb_initialize_transmit_unit(adapter); /* Setup Multicast table */ ixgb_set_multi(adapter); /* Prepare receive descriptors and buffers */ if (ixgb_setup_receive_structures(adapter)) { printf("ixgb%d: Could not setup receive structures\n", adapter->unit); ixgb_stop(adapter); return; } ixgb_initialize_receive_unit(adapter); /* Don't loose promiscuous settings */ ixgb_set_promisc(adapter); ifp = adapter->ifp; ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist = IXGB_CHECKSUM_FEATURES; else ifp->if_hwassist = 0; /* Enable jumbo frames */ if (ifp->if_mtu > ETHERMTU) { uint32_t temp_reg; IXGB_WRITE_REG(&adapter->hw, MFS, adapter->hw.max_frame_size << IXGB_MFS_SHIFT); temp_reg = IXGB_READ_REG(&adapter->hw, CTRL0); temp_reg |= IXGB_CTRL0_JFE; IXGB_WRITE_REG(&adapter->hw, CTRL0, temp_reg); } callout_reset(&adapter->timer, 2 * hz, ixgb_local_timer, adapter); ixgb_clear_hw_cntrs(&adapter->hw); #ifdef DEVICE_POLLING /* * Only disable interrupts if we are polling, make sure they are on * otherwise. */ if (ifp->if_flags & IFF_POLLING) ixgb_disable_intr(adapter); else #endif /* DEVICE_POLLING */ ixgb_enable_intr(adapter); return; } static void ixgb_init(void *arg) { struct adapter *adapter = arg; IXGB_LOCK(adapter); ixgb_init_locked(adapter); IXGB_UNLOCK(adapter); return; } #ifdef DEVICE_POLLING static poll_handler_t ixgb_poll; static void ixgb_poll_locked(struct ifnet * ifp, enum poll_cmd cmd, int count) { struct adapter *adapter = ifp->if_softc; u_int32_t reg_icr; IXGB_LOCK_ASSERT(adapter); if (!(ifp->if_capenable & IFCAP_POLLING)) { ether_poll_deregister(ifp); cmd = POLL_DEREGISTER; } if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */ ixgb_enable_intr(adapter); return; } if (cmd == POLL_AND_CHECK_STATUS) { reg_icr = IXGB_READ_REG(&adapter->hw, ICR); if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) { callout_stop(&adapter->timer); ixgb_check_for_link(&adapter->hw); ixgb_print_link_status(adapter); callout_reset(&adapter->timer, 2 * hz, ixgb_local_timer, adapter); } } if (ifp->if_flags & IFF_RUNNING) { ixgb_process_receive_interrupts(adapter, count); ixgb_clean_transmit_interrupts(adapter); } if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL) ixgb_start_locked(ifp); } static void ixgb_poll(struct ifnet * ifp, enum poll_cmd cmd, int count) { struct adapter *adapter = ifp->if_softc; IXGB_LOCK(adapter); ixgb_poll_locked(ifp, cmd, count); IXGB_UNLOCK(adapter); } #endif /* DEVICE_POLLING */ /********************************************************************* * * Interrupt Service routine * **********************************************************************/ static void ixgb_intr(void *arg) { u_int32_t loop_cnt = IXGB_MAX_INTR; u_int32_t reg_icr; struct ifnet *ifp; struct adapter *adapter = arg; boolean_t rxdmt0 = FALSE; IXGB_LOCK(adapter); ifp = adapter->ifp; #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) { IXGB_UNLOCK(adapter); return; } if ((ifp->if_capenable & IFCAP_POLLING) && ether_poll_register(ixgb_poll, ifp)) { ixgb_disable_intr(adapter); ixgb_poll_locked(ifp, 0, 1); IXGB_UNLOCK(adapter); return; } #endif /* DEVICE_POLLING */ reg_icr = IXGB_READ_REG(&adapter->hw, ICR); if (reg_icr == 0) { IXGB_UNLOCK(adapter); return; } if (reg_icr & IXGB_INT_RXDMT0) rxdmt0 = TRUE; #ifdef _SV_ if (reg_icr & IXGB_INT_RXDMT0) adapter->sv_stats.icr_rxdmt0++; if (reg_icr & IXGB_INT_RXO) adapter->sv_stats.icr_rxo++; if (reg_icr & IXGB_INT_RXT0) adapter->sv_stats.icr_rxt0++; if (reg_icr & IXGB_INT_TXDW) adapter->sv_stats.icr_TXDW++; #endif /* _SV_ */ /* Link status change */ if (reg_icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)) { callout_stop(&adapter->timer); ixgb_check_for_link(&adapter->hw); ixgb_print_link_status(adapter); callout_reset(&adapter->timer, 2 * hz, ixgb_local_timer, adapter); } while (loop_cnt > 0) { if (ifp->if_flags & IFF_RUNNING) { ixgb_process_receive_interrupts(adapter, -1); ixgb_clean_transmit_interrupts(adapter); } loop_cnt--; } if (rxdmt0 && adapter->raidc) { IXGB_WRITE_REG(&adapter->hw, IMC, IXGB_INT_RXDMT0); IXGB_WRITE_REG(&adapter->hw, IMS, IXGB_INT_RXDMT0); } if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL) ixgb_start_locked(ifp); IXGB_UNLOCK(adapter); return; } /********************************************************************* * * Media Ioctl callback * * This routine is called whenever the user queries the status of * the interface using ifconfig. * **********************************************************************/ static void ixgb_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) { struct adapter *adapter = ifp->if_softc; INIT_DEBUGOUT("ixgb_media_status: begin"); ixgb_check_for_link(&adapter->hw); ixgb_print_link_status(adapter); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!adapter->hw.link_up) return; ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= IFM_1000_SX | IFM_FDX; return; } /********************************************************************* * * Media Ioctl callback * * This routine is called when the user changes speed/duplex using * media/mediopt option with ifconfig. * **********************************************************************/ static int ixgb_media_change(struct ifnet * ifp) { struct adapter *adapter = ifp->if_softc; struct ifmedia *ifm = &adapter->media; INIT_DEBUGOUT("ixgb_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); return (0); } /********************************************************************* * * This routine maps the mbufs to tx descriptors. * * return 0 on success, positive on failure **********************************************************************/ static int ixgb_encap(struct adapter * adapter, struct mbuf * m_head) { u_int8_t txd_popts; int i, j, error, nsegs; #if __FreeBSD_version < 500000 struct ifvlan *ifv = NULL; #else struct m_tag *mtag; #endif bus_dma_segment_t segs[IXGB_MAX_SCATTER]; bus_dmamap_t map; struct ixgb_buffer *tx_buffer = NULL; struct ixgb_tx_desc *current_tx_desc = NULL; struct ifnet *ifp = adapter->ifp; /* * Force a cleanup if number of TX descriptors available hits the * threshold */ if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) { ixgb_clean_transmit_interrupts(adapter); } if (adapter->num_tx_desc_avail <= IXGB_TX_CLEANUP_THRESHOLD) { adapter->no_tx_desc_avail1++; return (ENOBUFS); } /* * Map the packet for DMA. */ if (bus_dmamap_create(adapter->txtag, BUS_DMA_NOWAIT, &map)) { adapter->no_tx_map_avail++; return (ENOMEM); } error = bus_dmamap_load_mbuf_sg(adapter->txtag, map, m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { adapter->no_tx_dma_setup++; printf("ixgb%d: ixgb_encap: bus_dmamap_load_mbuf failed; " "error %u\n", adapter->unit, error); bus_dmamap_destroy(adapter->txtag, map); return (error); } KASSERT(nsegs != 0, ("ixgb_encap: empty packet")); if (nsegs > adapter->num_tx_desc_avail) { adapter->no_tx_desc_avail2++; bus_dmamap_destroy(adapter->txtag, map); return (ENOBUFS); } if (ifp->if_hwassist > 0) { ixgb_transmit_checksum_setup(adapter, m_head, &txd_popts); } else txd_popts = 0; /* Find out if we are in vlan mode */ #if __FreeBSD_version < 500000 if ((m_head->m_flags & (M_PROTO1 | M_PKTHDR)) == (M_PROTO1 | M_PKTHDR) && m_head->m_pkthdr.rcvif != NULL && m_head->m_pkthdr.rcvif->if_type == IFT_L2VLAN) ifv = m_head->m_pkthdr.rcvif->if_softc; #else mtag = VLAN_OUTPUT_TAG(ifp, m_head); #endif i = adapter->next_avail_tx_desc; for (j = 0; j < nsegs; j++) { tx_buffer = &adapter->tx_buffer_area[i]; current_tx_desc = &adapter->tx_desc_base[i]; current_tx_desc->buff_addr = htole64(segs[j].ds_addr); current_tx_desc->cmd_type_len = (adapter->txd_cmd | segs[j].ds_len); current_tx_desc->popts = txd_popts; if (++i == adapter->num_tx_desc) i = 0; tx_buffer->m_head = NULL; } adapter->num_tx_desc_avail -= nsegs; adapter->next_avail_tx_desc = i; #if __FreeBSD_version < 500000 if (ifv != NULL) { /* Set the vlan id */ current_tx_desc->vlan = ifv->ifv_tag; #else if (mtag != NULL) { /* Set the vlan id */ current_tx_desc->vlan = VLAN_TAG_VALUE(mtag); #endif /* Tell hardware to add tag */ current_tx_desc->cmd_type_len |= IXGB_TX_DESC_CMD_VLE; } tx_buffer->m_head = m_head; tx_buffer->map = map; bus_dmamap_sync(adapter->txtag, map, BUS_DMASYNC_PREWRITE); /* * Last Descriptor of Packet needs End Of Packet (EOP) */ current_tx_desc->cmd_type_len |= (IXGB_TX_DESC_CMD_EOP); /* * Advance the Transmit Descriptor Tail (Tdt), this tells the E1000 * that this frame is available to transmit. */ IXGB_WRITE_REG(&adapter->hw, TDT, i); return (0); } static void ixgb_set_promisc(struct adapter * adapter) { u_int32_t reg_rctl; struct ifnet *ifp = adapter->ifp; reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); if (ifp->if_flags & IFF_PROMISC) { reg_rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE); IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); } else if (ifp->if_flags & IFF_ALLMULTI) { reg_rctl |= IXGB_RCTL_MPE; reg_rctl &= ~IXGB_RCTL_UPE; IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); } return; } static void ixgb_disable_promisc(struct adapter * adapter) { u_int32_t reg_rctl; reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); reg_rctl &= (~IXGB_RCTL_UPE); reg_rctl &= (~IXGB_RCTL_MPE); IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); return; } /********************************************************************* * Multicast Update * * This routine is called whenever multicast address list is updated. * **********************************************************************/ static void ixgb_set_multi(struct adapter * adapter) { u_int32_t reg_rctl = 0; u_int8_t mta[MAX_NUM_MULTICAST_ADDRESSES * IXGB_ETH_LENGTH_OF_ADDRESS]; struct ifmultiaddr *ifma; int mcnt = 0; struct ifnet *ifp = adapter->ifp; IOCTL_DEBUGOUT("ixgb_set_multi: begin"); + IF_ADDR_LOCK(ifp); #if __FreeBSD_version < 500000 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { #else TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { #endif if (ifma->ifma_addr->sa_family != AF_LINK) continue; bcopy(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), &mta[mcnt * IXGB_ETH_LENGTH_OF_ADDRESS], IXGB_ETH_LENGTH_OF_ADDRESS); mcnt++; } + IF_ADDR_UNLOCK(ifp); if (mcnt > MAX_NUM_MULTICAST_ADDRESSES) { reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); reg_rctl |= IXGB_RCTL_MPE; IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); } else ixgb_mc_addr_list_update(&adapter->hw, mta, mcnt, 0); return; } /********************************************************************* * Timer routine * * This routine checks for link status and updates statistics. * **********************************************************************/ static void ixgb_local_timer(void *arg) { struct ifnet *ifp; struct adapter *adapter = arg; ifp = adapter->ifp; IXGB_LOCK(adapter); ixgb_check_for_link(&adapter->hw); ixgb_print_link_status(adapter); ixgb_update_stats_counters(adapter); if (ixgb_display_debug_stats && ifp->if_flags & IFF_RUNNING) { ixgb_print_hw_stats(adapter); } callout_reset(&adapter->timer, 2 * hz, ixgb_local_timer, adapter); IXGB_UNLOCK(adapter); return; } static void ixgb_print_link_status(struct adapter * adapter) { if (adapter->hw.link_up) { if (!adapter->link_active) { printf("ixgb%d: Link is up %d Mbps %s \n", adapter->unit, 10000, "Full Duplex"); adapter->link_active = 1; } } else { if (adapter->link_active) { printf("ixgb%d: Link is Down \n", adapter->unit); adapter->link_active = 0; } } return; } /********************************************************************* * * This routine disables all traffic on the adapter by issuing a * global reset on the MAC and deallocates TX/RX buffers. * **********************************************************************/ static void ixgb_stop(void *arg) { struct ifnet *ifp; struct adapter *adapter = arg; ifp = adapter->ifp; IXGB_LOCK_ASSERT(adapter); INIT_DEBUGOUT("ixgb_stop: begin\n"); ixgb_disable_intr(adapter); adapter->hw.adapter_stopped = FALSE; ixgb_adapter_stop(&adapter->hw); callout_stop(&adapter->timer); ixgb_free_transmit_structures(adapter); ixgb_free_receive_structures(adapter); /* Tell the stack that the interface is no longer active */ ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); return; } /********************************************************************* * * Determine hardware revision. * **********************************************************************/ static void ixgb_identify_hardware(struct adapter * adapter) { device_t dev = adapter->dev; /* Make sure our PCI config space has the necessary stuff set */ adapter->hw.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); if (!((adapter->hw.pci_cmd_word & PCIM_CMD_BUSMASTEREN) && (adapter->hw.pci_cmd_word & PCIM_CMD_MEMEN))) { printf("ixgb%d: Memory Access and/or Bus Master bits were not set!\n", adapter->unit); adapter->hw.pci_cmd_word |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); pci_write_config(dev, PCIR_COMMAND, adapter->hw.pci_cmd_word, 2); } /* Save off the information about this board */ adapter->hw.vendor_id = pci_get_vendor(dev); adapter->hw.device_id = pci_get_device(dev); adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); adapter->hw.subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); adapter->hw.subsystem_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); /* Set MacType, etc. based on this PCI info */ switch (adapter->hw.device_id) { case IXGB_DEVICE_ID_82597EX: case IXGB_DEVICE_ID_82597EX_SR: adapter->hw.mac_type = ixgb_82597; break; default: INIT_DEBUGOUT1("Unknown device if 0x%x", adapter->hw.device_id); printf("ixgb%d: unsupported device id 0x%x\n", adapter->unit, adapter->hw.device_id); } return; } static int ixgb_allocate_pci_resources(struct adapter * adapter) { int rid; device_t dev = adapter->dev; rid = IXGB_MMBA; adapter->res_memory = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); if (!(adapter->res_memory)) { printf("ixgb%d: Unable to allocate bus resource: memory\n", adapter->unit); return (ENXIO); } adapter->osdep.mem_bus_space_tag = rman_get_bustag(adapter->res_memory); adapter->osdep.mem_bus_space_handle = rman_get_bushandle(adapter->res_memory); adapter->hw.hw_addr = (uint8_t *) & adapter->osdep.mem_bus_space_handle; rid = 0x0; adapter->res_interrupt = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (!(adapter->res_interrupt)) { printf("ixgb%d: Unable to allocate bus resource: interrupt\n", adapter->unit); return (ENXIO); } if (bus_setup_intr(dev, adapter->res_interrupt, INTR_TYPE_NET | INTR_MPSAFE, (void (*) (void *))ixgb_intr, adapter, &adapter->int_handler_tag)) { printf("ixgb%d: Error registering interrupt handler!\n", adapter->unit); return (ENXIO); } adapter->hw.back = &adapter->osdep; return (0); } static void ixgb_free_pci_resources(struct adapter * adapter) { device_t dev = adapter->dev; if (adapter->res_interrupt != NULL) { bus_teardown_intr(dev, adapter->res_interrupt, adapter->int_handler_tag); bus_release_resource(dev, SYS_RES_IRQ, 0, adapter->res_interrupt); } if (adapter->res_memory != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, IXGB_MMBA, adapter->res_memory); } if (adapter->res_ioport != NULL) { bus_release_resource(dev, SYS_RES_IOPORT, adapter->io_rid, adapter->res_ioport); } return; } /********************************************************************* * * Initialize the hardware to a configuration as specified by the * adapter structure. The controller is reset, the EEPROM is * verified, the MAC address is set, then the shared initialization * routines are called. * **********************************************************************/ static int ixgb_hardware_init(struct adapter * adapter) { /* Issue a global reset */ adapter->hw.adapter_stopped = FALSE; ixgb_adapter_stop(&adapter->hw); /* Make sure we have a good EEPROM before we read from it */ if (!ixgb_validate_eeprom_checksum(&adapter->hw)) { printf("ixgb%d: The EEPROM Checksum Is Not Valid\n", adapter->unit); return (EIO); } if (!ixgb_init_hw(&adapter->hw)) { printf("ixgb%d: Hardware Initialization Failed", adapter->unit); return (EIO); } return (0); } /********************************************************************* * * Setup networking device structure and register an interface. * **********************************************************************/ static void ixgb_setup_interface(device_t dev, struct adapter * adapter) { struct ifnet *ifp; INIT_DEBUGOUT("ixgb_setup_interface: begin"); ifp = adapter->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) panic("%s: can not if_alloc()\n", device_get_nameunit(dev)); #if __FreeBSD_version >= 502000 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); #else ifp->if_unit = adapter->unit; ifp->if_name = "ixgb"; #endif ifp->if_mtu = ETHERMTU; ifp->if_baudrate = 1000000000; ifp->if_init = ixgb_init; ifp->if_softc = adapter; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = ixgb_ioctl; ifp->if_start = ixgb_start; ifp->if_watchdog = ixgb_watchdog; ifp->if_snd.ifq_maxlen = adapter->num_tx_desc - 1; #if __FreeBSD_version < 500000 ether_ifattach(ifp, ETHER_BPF_SUPPORTED); #else ether_ifattach(ifp, adapter->hw.curr_mac_addr); #endif ifp->if_capabilities = IFCAP_HWCSUM; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* * Tell the upper layer(s) we support long frames. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); #if __FreeBSD_version >= 500000 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; #endif ifp->if_capenable = ifp->if_capabilities; /* * Specify the media types supported by this adapter and register * callbacks to update media and link information */ ifmedia_init(&adapter->media, IFM_IMASK, ixgb_media_change, ixgb_media_status); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); return; } /******************************************************************** * Manage DMA'able memory. *******************************************************************/ static void ixgb_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error) { if (error) return; *(bus_addr_t *) arg = segs->ds_addr; return; } static int ixgb_dma_malloc(struct adapter * adapter, bus_size_t size, struct ixgb_dma_alloc * dma, int mapflags) { int r; r = bus_dma_tag_create(NULL, /* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ #if __FreeBSD_version >= 502000 NULL, /* lockfunc */ NULL, /* lockfuncarg */ #endif &dma->dma_tag); if (r != 0) { printf("ixgb%d: ixgb_dma_malloc: bus_dma_tag_create failed; " "error %u\n", adapter->unit, r); goto fail_0; } r = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, BUS_DMA_NOWAIT, &dma->dma_map); if (r != 0) { printf("ixgb%d: ixgb_dma_malloc: bus_dmamem_alloc failed; " "error %u\n", adapter->unit, r); goto fail_1; } r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, ixgb_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); if (r != 0) { printf("ixgb%d: ixgb_dma_malloc: bus_dmamap_load failed; " "error %u\n", adapter->unit, r); goto fail_2; } dma->dma_size = size; return (0); fail_2: bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); fail_1: bus_dma_tag_destroy(dma->dma_tag); fail_0: dma->dma_map = NULL; dma->dma_tag = NULL; return (r); } static void ixgb_dma_free(struct adapter * adapter, struct ixgb_dma_alloc * dma) { bus_dmamap_unload(dma->dma_tag, dma->dma_map); bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); bus_dma_tag_destroy(dma->dma_tag); } /********************************************************************* * * Allocate memory for tx_buffer structures. The tx_buffer stores all * the information needed to transmit a packet on the wire. * **********************************************************************/ static int ixgb_allocate_transmit_structures(struct adapter * adapter) { if (!(adapter->tx_buffer_area = (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) * adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { printf("ixgb%d: Unable to allocate tx_buffer memory\n", adapter->unit); return ENOMEM; } bzero(adapter->tx_buffer_area, sizeof(struct ixgb_buffer) * adapter->num_tx_desc); return 0; } /********************************************************************* * * Allocate and initialize transmit structures. * **********************************************************************/ static int ixgb_setup_transmit_structures(struct adapter * adapter) { /* * Setup DMA descriptor areas. */ if (bus_dma_tag_create(NULL, /* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES * IXGB_MAX_SCATTER, /* maxsize */ IXGB_MAX_SCATTER, /* nsegments */ MCLBYTES, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ #if __FreeBSD_version >= 502000 NULL, /* lockfunc */ NULL, /* lockfuncarg */ #endif &adapter->txtag)) { printf("ixgb%d: Unable to allocate TX DMA tag\n", adapter->unit); return (ENOMEM); } if (ixgb_allocate_transmit_structures(adapter)) return ENOMEM; bzero((void *)adapter->tx_desc_base, (sizeof(struct ixgb_tx_desc)) * adapter->num_tx_desc); adapter->next_avail_tx_desc = 0; adapter->oldest_used_tx_desc = 0; /* Set number of descriptors available */ adapter->num_tx_desc_avail = adapter->num_tx_desc; /* Set checksum context */ adapter->active_checksum_context = OFFLOAD_NONE; return 0; } /********************************************************************* * * Enable transmit unit. * **********************************************************************/ static void ixgb_initialize_transmit_unit(struct adapter * adapter) { u_int32_t reg_tctl; u_int64_t tdba = adapter->txdma.dma_paddr; /* Setup the Base and Length of the Tx Descriptor Ring */ IXGB_WRITE_REG(&adapter->hw, TDBAL, (tdba & 0x00000000ffffffffULL)); IXGB_WRITE_REG(&adapter->hw, TDBAH, (tdba >> 32)); IXGB_WRITE_REG(&adapter->hw, TDLEN, adapter->num_tx_desc * sizeof(struct ixgb_tx_desc)); /* Setup the HW Tx Head and Tail descriptor pointers */ IXGB_WRITE_REG(&adapter->hw, TDH, 0); IXGB_WRITE_REG(&adapter->hw, TDT, 0); HW_DEBUGOUT2("Base = %x, Length = %x\n", IXGB_READ_REG(&adapter->hw, TDBAL), IXGB_READ_REG(&adapter->hw, TDLEN)); IXGB_WRITE_REG(&adapter->hw, TIDV, adapter->tx_int_delay); /* Program the Transmit Control Register */ reg_tctl = IXGB_READ_REG(&adapter->hw, TCTL); reg_tctl = IXGB_TCTL_TCE | IXGB_TCTL_TXEN | IXGB_TCTL_TPDE; IXGB_WRITE_REG(&adapter->hw, TCTL, reg_tctl); /* Setup Transmit Descriptor Settings for this adapter */ adapter->txd_cmd = IXGB_TX_DESC_TYPE | IXGB_TX_DESC_CMD_RS; if (adapter->tx_int_delay > 0) adapter->txd_cmd |= IXGB_TX_DESC_CMD_IDE; return; } /********************************************************************* * * Free all transmit related data structures. * **********************************************************************/ static void ixgb_free_transmit_structures(struct adapter * adapter) { struct ixgb_buffer *tx_buffer; int i; INIT_DEBUGOUT("free_transmit_structures: begin"); if (adapter->tx_buffer_area != NULL) { tx_buffer = adapter->tx_buffer_area; for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { if (tx_buffer->m_head != NULL) { bus_dmamap_unload(adapter->txtag, tx_buffer->map); bus_dmamap_destroy(adapter->txtag, tx_buffer->map); m_freem(tx_buffer->m_head); } tx_buffer->m_head = NULL; } } if (adapter->tx_buffer_area != NULL) { free(adapter->tx_buffer_area, M_DEVBUF); adapter->tx_buffer_area = NULL; } if (adapter->txtag != NULL) { bus_dma_tag_destroy(adapter->txtag); adapter->txtag = NULL; } return; } /********************************************************************* * * The offload context needs to be set when we transfer the first * packet of a particular protocol (TCP/UDP). We change the * context only if the protocol type changes. * **********************************************************************/ static void ixgb_transmit_checksum_setup(struct adapter * adapter, struct mbuf * mp, u_int8_t * txd_popts) { struct ixgb_context_desc *TXD; struct ixgb_buffer *tx_buffer; int curr_txd; if (mp->m_pkthdr.csum_flags) { if (mp->m_pkthdr.csum_flags & CSUM_TCP) { *txd_popts = IXGB_TX_DESC_POPTS_TXSM; if (adapter->active_checksum_context == OFFLOAD_TCP_IP) return; else adapter->active_checksum_context = OFFLOAD_TCP_IP; } else if (mp->m_pkthdr.csum_flags & CSUM_UDP) { *txd_popts = IXGB_TX_DESC_POPTS_TXSM; if (adapter->active_checksum_context == OFFLOAD_UDP_IP) return; else adapter->active_checksum_context = OFFLOAD_UDP_IP; } else { *txd_popts = 0; return; } } else { *txd_popts = 0; return; } /* * If we reach this point, the checksum offload context needs to be * reset. */ curr_txd = adapter->next_avail_tx_desc; tx_buffer = &adapter->tx_buffer_area[curr_txd]; TXD = (struct ixgb_context_desc *) & adapter->tx_desc_base[curr_txd]; TXD->tucss = ENET_HEADER_SIZE + sizeof(struct ip); TXD->tucse = 0; TXD->mss = 0; if (adapter->active_checksum_context == OFFLOAD_TCP_IP) { TXD->tucso = ENET_HEADER_SIZE + sizeof(struct ip) + offsetof(struct tcphdr, th_sum); } else if (adapter->active_checksum_context == OFFLOAD_UDP_IP) { TXD->tucso = ENET_HEADER_SIZE + sizeof(struct ip) + offsetof(struct udphdr, uh_sum); } TXD->cmd_type_len = IXGB_CONTEXT_DESC_CMD_TCP | IXGB_TX_DESC_CMD_RS | IXGB_CONTEXT_DESC_CMD_IDE; tx_buffer->m_head = NULL; if (++curr_txd == adapter->num_tx_desc) curr_txd = 0; adapter->num_tx_desc_avail--; adapter->next_avail_tx_desc = curr_txd; return; } /********************************************************************** * * Examine each tx_buffer in the used queue. If the hardware is done * processing the packet then free associated resources. The * tx_buffer is put back on the free queue. * **********************************************************************/ static void ixgb_clean_transmit_interrupts(struct adapter * adapter) { int i, num_avail; struct ixgb_buffer *tx_buffer; struct ixgb_tx_desc *tx_desc; IXGB_LOCK_ASSERT(adapter); if (adapter->num_tx_desc_avail == adapter->num_tx_desc) return; #ifdef _SV_ adapter->clean_tx_interrupts++; #endif num_avail = adapter->num_tx_desc_avail; i = adapter->oldest_used_tx_desc; tx_buffer = &adapter->tx_buffer_area[i]; tx_desc = &adapter->tx_desc_base[i]; while (tx_desc->status & IXGB_TX_DESC_STATUS_DD) { tx_desc->status = 0; num_avail++; if (tx_buffer->m_head) { bus_dmamap_sync(adapter->txtag, tx_buffer->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(adapter->txtag, tx_buffer->map); bus_dmamap_destroy(adapter->txtag, tx_buffer->map); m_freem(tx_buffer->m_head); tx_buffer->m_head = NULL; } if (++i == adapter->num_tx_desc) i = 0; tx_buffer = &adapter->tx_buffer_area[i]; tx_desc = &adapter->tx_desc_base[i]; } adapter->oldest_used_tx_desc = i; /* * If we have enough room, clear IFF_OACTIVE to tell the stack that * it is OK to send packets. If there are no pending descriptors, * clear the timeout. Otherwise, if some descriptors have been freed, * restart the timeout. */ if (num_avail > IXGB_TX_CLEANUP_THRESHOLD) { struct ifnet *ifp = adapter->ifp; ifp->if_flags &= ~IFF_OACTIVE; if (num_avail == adapter->num_tx_desc) ifp->if_timer = 0; else if (num_avail == adapter->num_tx_desc_avail) ifp->if_timer = IXGB_TX_TIMEOUT; } adapter->num_tx_desc_avail = num_avail; return; } /********************************************************************* * * Get a buffer from system mbuf buffer pool. * **********************************************************************/ static int ixgb_get_buf(int i, struct adapter * adapter, struct mbuf * nmp) { register struct mbuf *mp = nmp; struct ixgb_buffer *rx_buffer; struct ifnet *ifp; bus_addr_t paddr; int error; ifp = adapter->ifp; if (mp == NULL) { mp = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (mp == NULL) { adapter->mbuf_alloc_failed++; return (ENOBUFS); } mp->m_len = mp->m_pkthdr.len = MCLBYTES; } else { mp->m_len = mp->m_pkthdr.len = MCLBYTES; mp->m_data = mp->m_ext.ext_buf; mp->m_next = NULL; } if (ifp->if_mtu <= ETHERMTU) { m_adj(mp, ETHER_ALIGN); } rx_buffer = &adapter->rx_buffer_area[i]; /* * Using memory from the mbuf cluster pool, invoke the bus_dma * machinery to arrange the memory mapping. */ error = bus_dmamap_load(adapter->rxtag, rx_buffer->map, mtod(mp, void *), mp->m_len, ixgb_dmamap_cb, &paddr, 0); if (error) { m_free(mp); return (error); } rx_buffer->m_head = mp; adapter->rx_desc_base[i].buff_addr = htole64(paddr); bus_dmamap_sync(adapter->rxtag, rx_buffer->map, BUS_DMASYNC_PREREAD); return (0); } /********************************************************************* * * Allocate memory for rx_buffer structures. Since we use one * rx_buffer per received packet, the maximum number of rx_buffer's * that we'll need is equal to the number of receive descriptors * that we've allocated. * **********************************************************************/ static int ixgb_allocate_receive_structures(struct adapter * adapter) { int i, error; struct ixgb_buffer *rx_buffer; if (!(adapter->rx_buffer_area = (struct ixgb_buffer *) malloc(sizeof(struct ixgb_buffer) * adapter->num_rx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { printf("ixgb%d: Unable to allocate rx_buffer memory\n", adapter->unit); return (ENOMEM); } bzero(adapter->rx_buffer_area, sizeof(struct ixgb_buffer) * adapter->num_rx_desc); error = bus_dma_tag_create(NULL, /* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, /* maxsize */ 1, /* nsegments */ MCLBYTES, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ #if __FreeBSD_version >= 502000 NULL, /* lockfunc */ NULL, /* lockfuncarg */ #endif &adapter->rxtag); if (error != 0) { printf("ixgb%d: ixgb_allocate_receive_structures: " "bus_dma_tag_create failed; error %u\n", adapter->unit, error); goto fail_0; } rx_buffer = adapter->rx_buffer_area; for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { error = bus_dmamap_create(adapter->rxtag, BUS_DMA_NOWAIT, &rx_buffer->map); if (error != 0) { printf("ixgb%d: ixgb_allocate_receive_structures: " "bus_dmamap_create failed; error %u\n", adapter->unit, error); goto fail_1; } } for (i = 0; i < adapter->num_rx_desc; i++) { if (ixgb_get_buf(i, adapter, NULL) == ENOBUFS) { adapter->rx_buffer_area[i].m_head = NULL; adapter->rx_desc_base[i].buff_addr = 0; return (ENOBUFS); } } return (0); fail_1: bus_dma_tag_destroy(adapter->rxtag); fail_0: adapter->rxtag = NULL; free(adapter->rx_buffer_area, M_DEVBUF); adapter->rx_buffer_area = NULL; return (error); } /********************************************************************* * * Allocate and initialize receive structures. * **********************************************************************/ static int ixgb_setup_receive_structures(struct adapter * adapter) { bzero((void *)adapter->rx_desc_base, (sizeof(struct ixgb_rx_desc)) * adapter->num_rx_desc); if (ixgb_allocate_receive_structures(adapter)) return ENOMEM; /* Setup our descriptor pointers */ adapter->next_rx_desc_to_check = 0; adapter->next_rx_desc_to_use = 0; return (0); } /********************************************************************* * * Enable receive unit. * **********************************************************************/ static void ixgb_initialize_receive_unit(struct adapter * adapter) { u_int32_t reg_rctl; u_int32_t reg_rxcsum; u_int32_t reg_rxdctl; struct ifnet *ifp; u_int64_t rdba = adapter->rxdma.dma_paddr; ifp = adapter->ifp; /* * Make sure receives are disabled while setting up the descriptor * ring */ reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl & ~IXGB_RCTL_RXEN); /* Set the Receive Delay Timer Register */ IXGB_WRITE_REG(&adapter->hw, RDTR, adapter->rx_int_delay); /* Setup the Base and Length of the Rx Descriptor Ring */ IXGB_WRITE_REG(&adapter->hw, RDBAL, (rdba & 0x00000000ffffffffULL)); IXGB_WRITE_REG(&adapter->hw, RDBAH, (rdba >> 32)); IXGB_WRITE_REG(&adapter->hw, RDLEN, adapter->num_rx_desc * sizeof(struct ixgb_rx_desc)); /* Setup the HW Rx Head and Tail Descriptor Pointers */ IXGB_WRITE_REG(&adapter->hw, RDH, 0); IXGB_WRITE_REG(&adapter->hw, RDT, adapter->num_rx_desc - 1); reg_rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT | RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT | RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT; IXGB_WRITE_REG(&adapter->hw, RXDCTL, reg_rxdctl); adapter->raidc = 1; if (adapter->raidc) { uint32_t raidc; uint8_t poll_threshold; #define IXGB_RAIDC_POLL_DEFAULT 120 poll_threshold = ((adapter->num_rx_desc - 1) >> 3); poll_threshold >>= 1; poll_threshold &= 0x3F; raidc = IXGB_RAIDC_EN | IXGB_RAIDC_RXT_GATE | (IXGB_RAIDC_POLL_DEFAULT << IXGB_RAIDC_POLL_SHIFT) | (adapter->rx_int_delay << IXGB_RAIDC_DELAY_SHIFT) | poll_threshold; IXGB_WRITE_REG(&adapter->hw, RAIDC, raidc); } /* Enable Receive Checksum Offload for TCP and UDP ? */ if (ifp->if_capenable & IFCAP_RXCSUM) { reg_rxcsum = IXGB_READ_REG(&adapter->hw, RXCSUM); reg_rxcsum |= IXGB_RXCSUM_TUOFL; IXGB_WRITE_REG(&adapter->hw, RXCSUM, reg_rxcsum); } /* Setup the Receive Control Register */ reg_rctl = IXGB_READ_REG(&adapter->hw, RCTL); reg_rctl &= ~(3 << IXGB_RCTL_MO_SHIFT); reg_rctl |= IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 | IXGB_RCTL_SECRC | IXGB_RCTL_CFF | (adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT); switch (adapter->rx_buffer_len) { default: case IXGB_RXBUFFER_2048: reg_rctl |= IXGB_RCTL_BSIZE_2048; break; case IXGB_RXBUFFER_4096: reg_rctl |= IXGB_RCTL_BSIZE_4096; break; case IXGB_RXBUFFER_8192: reg_rctl |= IXGB_RCTL_BSIZE_8192; break; case IXGB_RXBUFFER_16384: reg_rctl |= IXGB_RCTL_BSIZE_16384; break; } reg_rctl |= IXGB_RCTL_RXEN; /* Enable Receives */ IXGB_WRITE_REG(&adapter->hw, RCTL, reg_rctl); return; } /********************************************************************* * * Free receive related data structures. * **********************************************************************/ static void ixgb_free_receive_structures(struct adapter * adapter) { struct ixgb_buffer *rx_buffer; int i; INIT_DEBUGOUT("free_receive_structures: begin"); if (adapter->rx_buffer_area != NULL) { rx_buffer = adapter->rx_buffer_area; for (i = 0; i < adapter->num_rx_desc; i++, rx_buffer++) { if (rx_buffer->map != NULL) { bus_dmamap_unload(adapter->rxtag, rx_buffer->map); bus_dmamap_destroy(adapter->rxtag, rx_buffer->map); } if (rx_buffer->m_head != NULL) m_freem(rx_buffer->m_head); rx_buffer->m_head = NULL; } } if (adapter->rx_buffer_area != NULL) { free(adapter->rx_buffer_area, M_DEVBUF); adapter->rx_buffer_area = NULL; } if (adapter->rxtag != NULL) { bus_dma_tag_destroy(adapter->rxtag); adapter->rxtag = NULL; } return; } /********************************************************************* * * This routine executes in interrupt context. It replenishes * the mbufs in the descriptor and sends data which has been * dma'ed into host memory to upper layer. * * We loop at most count times if count is > 0, or until done if * count < 0. * *********************************************************************/ static void ixgb_process_receive_interrupts(struct adapter * adapter, int count) { struct ifnet *ifp; struct mbuf *mp; #if __FreeBSD_version < 500000 struct ether_header *eh; #endif int eop = 0; int len; u_int8_t accept_frame = 0; int i; int next_to_use = 0; int eop_desc; /* Pointer to the receive descriptor being examined. */ struct ixgb_rx_desc *current_desc; IXGB_LOCK_ASSERT(adapter); ifp = adapter->ifp; i = adapter->next_rx_desc_to_check; next_to_use = adapter->next_rx_desc_to_use; eop_desc = adapter->next_rx_desc_to_check; current_desc = &adapter->rx_desc_base[i]; if (!((current_desc->status) & IXGB_RX_DESC_STATUS_DD)) { #ifdef _SV_ adapter->no_pkts_avail++; #endif return; } while ((current_desc->status & IXGB_RX_DESC_STATUS_DD) && (count != 0)) { mp = adapter->rx_buffer_area[i].m_head; bus_dmamap_sync(adapter->rxtag, adapter->rx_buffer_area[i].map, BUS_DMASYNC_POSTREAD); accept_frame = 1; if (current_desc->status & IXGB_RX_DESC_STATUS_EOP) { count--; eop = 1; } else { eop = 0; } len = current_desc->length; if (current_desc->errors & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE)) { accept_frame = 0; } if (accept_frame) { /* Assign correct length to the current fragment */ mp->m_len = len; if (adapter->fmp == NULL) { mp->m_pkthdr.len = len; adapter->fmp = mp; /* Store the first mbuf */ adapter->lmp = mp; } else { /* Chain mbuf's together */ mp->m_flags &= ~M_PKTHDR; adapter->lmp->m_next = mp; adapter->lmp = adapter->lmp->m_next; adapter->fmp->m_pkthdr.len += len; } if (eop) { eop_desc = i; adapter->fmp->m_pkthdr.rcvif = ifp; #if __FreeBSD_version < 500000 eh = mtod(adapter->fmp, struct ether_header *); /* Remove ethernet header from mbuf */ m_adj(adapter->fmp, sizeof(struct ether_header)); ixgb_receive_checksum(adapter, current_desc, adapter->fmp); if (current_desc->status & IXGB_RX_DESC_STATUS_VP) VLAN_INPUT_TAG(eh, adapter->fmp, current_desc->special); else ether_input(ifp, eh, adapter->fmp); #else ixgb_receive_checksum(adapter, current_desc, adapter->fmp); if (current_desc->status & IXGB_RX_DESC_STATUS_VP) VLAN_INPUT_TAG(ifp, adapter->fmp, current_desc->special, adapter->fmp = NULL); if (adapter->fmp != NULL) { IXGB_UNLOCK(adapter); (*ifp->if_input) (ifp, adapter->fmp); IXGB_LOCK(adapter); } #endif adapter->fmp = NULL; adapter->lmp = NULL; } adapter->rx_buffer_area[i].m_head = NULL; } else { adapter->dropped_pkts++; if (adapter->fmp != NULL) m_freem(adapter->fmp); adapter->fmp = NULL; adapter->lmp = NULL; } /* Zero out the receive descriptors status */ current_desc->status = 0; /* Advance our pointers to the next descriptor */ if (++i == adapter->num_rx_desc) { i = 0; current_desc = adapter->rx_desc_base; } else current_desc++; } adapter->next_rx_desc_to_check = i; if (--i < 0) i = (adapter->num_rx_desc - 1); /* * 82597EX: Workaround for redundent write back in receive descriptor ring (causes * memory corruption). Avoid using and re-submitting the most recently received RX * descriptor back to hardware. * * if(Last written back descriptor == EOP bit set descriptor) * then avoid re-submitting the most recently received RX descriptor * back to hardware. * if(Last written back descriptor != EOP bit set descriptor) * then avoid re-submitting the most recently received RX descriptors * till last EOP bit set descriptor. */ if (eop_desc != i) { if (++eop_desc == adapter->num_rx_desc) eop_desc = 0; i = eop_desc; } /* Replenish the descriptors with new mbufs till last EOP bit set descriptor */ while (next_to_use != i) { current_desc = &adapter->rx_desc_base[next_to_use]; if ((current_desc->errors & (IXGB_RX_DESC_ERRORS_CE | IXGB_RX_DESC_ERRORS_SE | IXGB_RX_DESC_ERRORS_P | IXGB_RX_DESC_ERRORS_RXE))) { mp = adapter->rx_buffer_area[next_to_use].m_head; ixgb_get_buf(next_to_use, adapter, mp); } else { if (ixgb_get_buf(next_to_use, adapter, NULL) == ENOBUFS) break; } /* Advance our pointers to the next descriptor */ if (++next_to_use == adapter->num_rx_desc) { next_to_use = 0; current_desc = adapter->rx_desc_base; } else current_desc++; } adapter->next_rx_desc_to_use = next_to_use; if (--next_to_use < 0) next_to_use = (adapter->num_rx_desc - 1); /* Advance the IXGB's Receive Queue #0 "Tail Pointer" */ IXGB_WRITE_REG(&adapter->hw, RDT, next_to_use); return; } /********************************************************************* * * Verify that the hardware indicated that the checksum is valid. * Inform the stack about the status of checksum so that stack * doesn't spend time verifying the checksum. * *********************************************************************/ static void ixgb_receive_checksum(struct adapter * adapter, struct ixgb_rx_desc * rx_desc, struct mbuf * mp) { if (rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) { mp->m_pkthdr.csum_flags = 0; return; } if (rx_desc->status & IXGB_RX_DESC_STATUS_IPCS) { /* Did it pass? */ if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_IPE)) { /* IP Checksum Good */ mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; } else { mp->m_pkthdr.csum_flags = 0; } } if (rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS) { /* Did it pass? */ if (!(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE)) { mp->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); mp->m_pkthdr.csum_data = htons(0xffff); } } return; } static void ixgb_enable_vlans(struct adapter * adapter) { uint32_t ctrl; ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); ctrl |= IXGB_CTRL0_VME; IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl); return; } static void ixgb_enable_intr(struct adapter * adapter) { IXGB_WRITE_REG(&adapter->hw, IMS, (IXGB_INT_RXT0 | IXGB_INT_TXDW | IXGB_INT_RXDMT0 | IXGB_INT_LSC | IXGB_INT_RXO)); return; } static void ixgb_disable_intr(struct adapter * adapter) { IXGB_WRITE_REG(&adapter->hw, IMC, ~0); return; } void ixgb_write_pci_cfg(struct ixgb_hw * hw, uint32_t reg, uint16_t * value) { pci_write_config(((struct ixgb_osdep *) hw->back)->dev, reg, *value, 2); } /********************************************************************** * * Update the board statistics counters. * **********************************************************************/ static void ixgb_update_stats_counters(struct adapter * adapter) { struct ifnet *ifp; adapter->stats.crcerrs += IXGB_READ_REG(&adapter->hw, CRCERRS); adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL); adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH); adapter->stats.gorcl += IXGB_READ_REG(&adapter->hw, GORCL); adapter->stats.gorch += IXGB_READ_REG(&adapter->hw, GORCH); adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL); adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH); adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL); adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH); adapter->stats.roc += IXGB_READ_REG(&adapter->hw, ROC); adapter->stats.mpc += IXGB_READ_REG(&adapter->hw, MPC); adapter->stats.dc += IXGB_READ_REG(&adapter->hw, DC); adapter->stats.rlec += IXGB_READ_REG(&adapter->hw, RLEC); adapter->stats.xonrxc += IXGB_READ_REG(&adapter->hw, XONRXC); adapter->stats.xontxc += IXGB_READ_REG(&adapter->hw, XONTXC); adapter->stats.xoffrxc += IXGB_READ_REG(&adapter->hw, XOFFRXC); adapter->stats.xofftxc += IXGB_READ_REG(&adapter->hw, XOFFTXC); adapter->stats.gptcl += IXGB_READ_REG(&adapter->hw, GPTCL); adapter->stats.gptch += IXGB_READ_REG(&adapter->hw, GPTCH); adapter->stats.gotcl += IXGB_READ_REG(&adapter->hw, GOTCL); adapter->stats.gotch += IXGB_READ_REG(&adapter->hw, GOTCH); adapter->stats.ruc += IXGB_READ_REG(&adapter->hw, RUC); adapter->stats.rfc += IXGB_READ_REG(&adapter->hw, RFC); adapter->stats.rjc += IXGB_READ_REG(&adapter->hw, RJC); adapter->stats.torl += IXGB_READ_REG(&adapter->hw, TORL); adapter->stats.torh += IXGB_READ_REG(&adapter->hw, TORH); adapter->stats.totl += IXGB_READ_REG(&adapter->hw, TOTL); adapter->stats.toth += IXGB_READ_REG(&adapter->hw, TOTH); adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL); adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH); adapter->stats.tptl += IXGB_READ_REG(&adapter->hw, TPTL); adapter->stats.tpth += IXGB_READ_REG(&adapter->hw, TPTH); adapter->stats.plt64c += IXGB_READ_REG(&adapter->hw, PLT64C); adapter->stats.mptcl += IXGB_READ_REG(&adapter->hw, MPTCL); adapter->stats.mptch += IXGB_READ_REG(&adapter->hw, MPTCH); adapter->stats.bptcl += IXGB_READ_REG(&adapter->hw, BPTCL); adapter->stats.bptch += IXGB_READ_REG(&adapter->hw, BPTCH); adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL); adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH); adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL); adapter->stats.vprch += IXGB_READ_REG(&adapter->hw, VPRCH); adapter->stats.jprcl += IXGB_READ_REG(&adapter->hw, JPRCL); adapter->stats.jprch += IXGB_READ_REG(&adapter->hw, JPRCH); adapter->stats.rnbc += IXGB_READ_REG(&adapter->hw, RNBC); adapter->stats.icbc += IXGB_READ_REG(&adapter->hw, ICBC); adapter->stats.ecbc += IXGB_READ_REG(&adapter->hw, ECBC); adapter->stats.uptcl += IXGB_READ_REG(&adapter->hw, UPTCL); adapter->stats.uptch += IXGB_READ_REG(&adapter->hw, UPTCH); adapter->stats.vptcl += IXGB_READ_REG(&adapter->hw, VPTCL); adapter->stats.vptch += IXGB_READ_REG(&adapter->hw, VPTCH); adapter->stats.jptcl += IXGB_READ_REG(&adapter->hw, JPTCL); adapter->stats.jptch += IXGB_READ_REG(&adapter->hw, JPTCH); adapter->stats.tsctc += IXGB_READ_REG(&adapter->hw, TSCTC); adapter->stats.tsctfc += IXGB_READ_REG(&adapter->hw, TSCTFC); adapter->stats.ibic += IXGB_READ_REG(&adapter->hw, IBIC); adapter->stats.lfc += IXGB_READ_REG(&adapter->hw, LFC); adapter->stats.pfrc += IXGB_READ_REG(&adapter->hw, PFRC); adapter->stats.pftc += IXGB_READ_REG(&adapter->hw, PFTC); adapter->stats.mcfrc += IXGB_READ_REG(&adapter->hw, MCFRC); ifp = adapter->ifp; /* Fill out the OS statistics structure */ ifp->if_ipackets = adapter->stats.gprcl; ifp->if_opackets = adapter->stats.gptcl; ifp->if_ibytes = adapter->stats.gorcl; ifp->if_obytes = adapter->stats.gotcl; ifp->if_imcasts = adapter->stats.mprcl; ifp->if_collisions = 0; /* Rx Errors */ ifp->if_ierrors = adapter->dropped_pkts + adapter->stats.crcerrs + adapter->stats.rnbc + adapter->stats.mpc + adapter->stats.rlec; } /********************************************************************** * * This routine is called only when ixgb_display_debug_stats is enabled. * This routine provides a way to take a look at important statistics * maintained by the driver and hardware. * **********************************************************************/ static void ixgb_print_hw_stats(struct adapter * adapter) { char buf_speed[100], buf_type[100]; ixgb_bus_speed bus_speed; ixgb_bus_type bus_type; int unit = adapter->unit; #ifdef _SV_ printf("ixgb%d: Packets not Avail = %ld\n", unit, adapter->no_pkts_avail); printf("ixgb%d: CleanTxInterrupts = %ld\n", unit, adapter->clean_tx_interrupts); printf("ixgb%d: ICR RXDMT0 = %lld\n", unit, (long long)adapter->sv_stats.icr_rxdmt0); printf("ixgb%d: ICR RXO = %lld\n", unit, (long long)adapter->sv_stats.icr_rxo); printf("ixgb%d: ICR RXT0 = %lld\n", unit, (long long)adapter->sv_stats.icr_rxt0); printf("ixgb%d: ICR TXDW = %lld\n", unit, (long long)adapter->sv_stats.icr_TXDW); #endif /* _SV_ */ bus_speed = adapter->hw.bus.speed; bus_type = adapter->hw.bus.type; sprintf(buf_speed, bus_speed == ixgb_bus_speed_33 ? "33MHz" : bus_speed == ixgb_bus_speed_66 ? "66MHz" : bus_speed == ixgb_bus_speed_100 ? "100MHz" : bus_speed == ixgb_bus_speed_133 ? "133MHz" : "UNKNOWN"); printf("ixgb%d: PCI_Bus_Speed = %s\n", unit, buf_speed); sprintf(buf_type, bus_type == ixgb_bus_type_pci ? "PCI" : bus_type == ixgb_bus_type_pcix ? "PCI-X" : "UNKNOWN"); printf("ixgb%d: PCI_Bus_Type = %s\n", unit, buf_type); printf("ixgb%d: Tx Descriptors not Avail1 = %ld\n", unit, adapter->no_tx_desc_avail1); printf("ixgb%d: Tx Descriptors not Avail2 = %ld\n", unit, adapter->no_tx_desc_avail2); printf("ixgb%d: Std Mbuf Failed = %ld\n", unit, adapter->mbuf_alloc_failed); printf("ixgb%d: Std Cluster Failed = %ld\n", unit, adapter->mbuf_cluster_failed); printf("ixgb%d: Defer count = %lld\n", unit, (long long)adapter->stats.dc); printf("ixgb%d: Missed Packets = %lld\n", unit, (long long)adapter->stats.mpc); printf("ixgb%d: Receive No Buffers = %lld\n", unit, (long long)adapter->stats.rnbc); printf("ixgb%d: Receive length errors = %lld\n", unit, (long long)adapter->stats.rlec); printf("ixgb%d: Crc errors = %lld\n", unit, (long long)adapter->stats.crcerrs); printf("ixgb%d: Driver dropped packets = %ld\n", unit, adapter->dropped_pkts); printf("ixgb%d: XON Rcvd = %lld\n", unit, (long long)adapter->stats.xonrxc); printf("ixgb%d: XON Xmtd = %lld\n", unit, (long long)adapter->stats.xontxc); printf("ixgb%d: XOFF Rcvd = %lld\n", unit, (long long)adapter->stats.xoffrxc); printf("ixgb%d: XOFF Xmtd = %lld\n", unit, (long long)adapter->stats.xofftxc); printf("ixgb%d: Good Packets Rcvd = %lld\n", unit, (long long)adapter->stats.gprcl); printf("ixgb%d: Good Packets Xmtd = %lld\n", unit, (long long)adapter->stats.gptcl); printf("ixgb%d: Jumbo frames recvd = %lld\n", unit, (long long)adapter->stats.jprcl); printf("ixgb%d: Jumbo frames Xmtd = %lld\n", unit, (long long)adapter->stats.jptcl); return; } static int ixgb_sysctl_stats(SYSCTL_HANDLER_ARGS) { int error; int result; struct adapter *adapter; result = -1; error = sysctl_handle_int(oidp, &result, 0, req); if (error || !req->newptr) return (error); if (result == 1) { adapter = (struct adapter *) arg1; ixgb_print_hw_stats(adapter); } return error; } Index: stable/6/sys/dev/lge/if_lge.c =================================================================== --- stable/6/sys/dev/lge/if_lge.c (revision 149421) +++ stable/6/sys/dev/lge/if_lge.c (revision 149422) @@ -1,1572 +1,1574 @@ /*- * Copyright (c) 2001 Wind River Systems * Copyright (c) 1997, 1998, 1999, 2000, 2001 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Level 1 LXT1001 gigabit ethernet driver for FreeBSD. Public * documentation not available, but ask me nicely. * * The Level 1 chip is used on some D-Link, SMC and Addtron NICs. * It's a 64-bit PCI part that supports TCP/IP checksum offload, * VLAN tagging/insertion, GMII and TBI (1000baseX) ports. There * are three supported methods for data transfer between host and * NIC: programmed I/O, traditional scatter/gather DMA and Packet * Propulsion Technology (tm) DMA. The latter mechanism is a form * of double buffer DMA where the packet data is copied to a * pre-allocated DMA buffer who's physical address has been loaded * into a table at device initialization time. The rationale is that * the virtual to physical address translation needed for normal * scatter/gather DMA is more expensive than the data copy needed * for double buffering. This may be true in Windows NT and the like, * but it isn't true for us, at least on the x86 arch. This driver * uses the scatter/gather I/O method for both TX and RX. * * The LXT1001 only supports TCP/IP checksum offload on receive. * Also, the VLAN tagging is done using a 16-entry table which allows * the chip to perform hardware filtering based on VLAN tags. Sadly, * our vlan support doesn't currently play well with this kind of * hardware support. * * Special thanks to: * - Jeff James at Intel, for arranging to have the LXT1001 manual * released (at long last) * - Beny Chen at D-Link, for actually sending it to me * - Brad Short and Keith Alexis at SMC, for sending me sample * SMC9462SX and SMC9462TX adapters for testing * - Paul Saab at Y!, for not killing me (though it remains to be seen * if in fact he did me much of a favor) */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include /* for DELAY */ #include #include #include #include #include #include #include #include #define LGE_USEIOSPACE #include /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Various supported device vendors/types and their names. */ static struct lge_type lge_devs[] = { { LGE_VENDORID, LGE_DEVICEID, "Level 1 Gigabit Ethernet" }, { 0, 0, NULL } }; static int lge_probe(device_t); static int lge_attach(device_t); static int lge_detach(device_t); static int lge_alloc_jumbo_mem(struct lge_softc *); static void lge_free_jumbo_mem(struct lge_softc *); static void *lge_jalloc(struct lge_softc *); static void lge_jfree(void *, void *); static int lge_newbuf(struct lge_softc *, struct lge_rx_desc *, struct mbuf *); static int lge_encap(struct lge_softc *, struct mbuf *, u_int32_t *); static void lge_rxeof(struct lge_softc *, int); static void lge_rxeoc(struct lge_softc *); static void lge_txeof(struct lge_softc *); static void lge_intr(void *); static void lge_tick(void *); static void lge_start(struct ifnet *); static int lge_ioctl(struct ifnet *, u_long, caddr_t); static void lge_init(void *); static void lge_stop(struct lge_softc *); static void lge_watchdog(struct ifnet *); static void lge_shutdown(device_t); static int lge_ifmedia_upd(struct ifnet *); static void lge_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void lge_eeprom_getword(struct lge_softc *, int, u_int16_t *); static void lge_read_eeprom(struct lge_softc *, caddr_t, int, int, int); static int lge_miibus_readreg(device_t, int, int); static int lge_miibus_writereg(device_t, int, int, int); static void lge_miibus_statchg(device_t); static void lge_setmulti(struct lge_softc *); static void lge_reset(struct lge_softc *); static int lge_list_rx_init(struct lge_softc *); static int lge_list_tx_init(struct lge_softc *); #ifdef LGE_USEIOSPACE #define LGE_RES SYS_RES_IOPORT #define LGE_RID LGE_PCI_LOIO #else #define LGE_RES SYS_RES_MEMORY #define LGE_RID LGE_PCI_LOMEM #endif static device_method_t lge_methods[] = { /* Device interface */ DEVMETHOD(device_probe, lge_probe), DEVMETHOD(device_attach, lge_attach), DEVMETHOD(device_detach, lge_detach), DEVMETHOD(device_shutdown, lge_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, lge_miibus_readreg), DEVMETHOD(miibus_writereg, lge_miibus_writereg), DEVMETHOD(miibus_statchg, lge_miibus_statchg), { 0, 0 } }; static driver_t lge_driver = { "lge", lge_methods, sizeof(struct lge_softc) }; static devclass_t lge_devclass; DRIVER_MODULE(lge, pci, lge_driver, lge_devclass, 0, 0); DRIVER_MODULE(miibus, lge, miibus_driver, miibus_devclass, 0, 0); MODULE_DEPEND(lge, pci, 1, 1, 1); MODULE_DEPEND(lge, ether, 1, 1, 1); MODULE_DEPEND(lge, miibus, 1, 1, 1); #define LGE_SETBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) | (x)) #define LGE_CLRBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) & ~(x)) #define SIO_SET(x) \ CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) | x) #define SIO_CLR(x) \ CSR_WRITE_4(sc, LGE_MEAR, CSR_READ_4(sc, LGE_MEAR) & ~x) /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void lge_eeprom_getword(sc, addr, dest) struct lge_softc *sc; int addr; u_int16_t *dest; { register int i; u_int32_t val; CSR_WRITE_4(sc, LGE_EECTL, LGE_EECTL_CMD_READ| LGE_EECTL_SINGLEACCESS|((addr >> 1) << 8)); for (i = 0; i < LGE_TIMEOUT; i++) if (!(CSR_READ_4(sc, LGE_EECTL) & LGE_EECTL_CMD_READ)) break; if (i == LGE_TIMEOUT) { printf("lge%d: EEPROM read timed out\n", sc->lge_unit); return; } val = CSR_READ_4(sc, LGE_EEDATA); if (addr & 1) *dest = (val >> 16) & 0xFFFF; else *dest = val & 0xFFFF; return; } /* * Read a sequence of words from the EEPROM. */ static void lge_read_eeprom(sc, dest, off, cnt, swap) struct lge_softc *sc; caddr_t dest; int off; int cnt; int swap; { int i; u_int16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { lge_eeprom_getword(sc, off + i, &word); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } return; } static int lge_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct lge_softc *sc; int i; sc = device_get_softc(dev); /* * If we have a non-PCS PHY, pretend that the internal * autoneg stuff at PHY address 0 isn't there so that * the miibus code will find only the GMII PHY. */ if (sc->lge_pcs == 0 && phy == 0) return(0); CSR_WRITE_4(sc, LGE_GMIICTL, (phy << 8) | reg | LGE_GMIICMD_READ); for (i = 0; i < LGE_TIMEOUT; i++) if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY)) break; if (i == LGE_TIMEOUT) { printf("lge%d: PHY read timed out\n", sc->lge_unit); return(0); } return(CSR_READ_4(sc, LGE_GMIICTL) >> 16); } static int lge_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct lge_softc *sc; int i; sc = device_get_softc(dev); CSR_WRITE_4(sc, LGE_GMIICTL, (data << 16) | (phy << 8) | reg | LGE_GMIICMD_WRITE); for (i = 0; i < LGE_TIMEOUT; i++) if (!(CSR_READ_4(sc, LGE_GMIICTL) & LGE_GMIICTL_CMDBUSY)) break; if (i == LGE_TIMEOUT) { printf("lge%d: PHY write timed out\n", sc->lge_unit); return(0); } return(0); } static void lge_miibus_statchg(dev) device_t dev; { struct lge_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->lge_miibus); LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_SPEED); switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_1000_T: case IFM_1000_SX: LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000); break; case IFM_100_TX: LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_100); break; case IFM_10_T: LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_10); break; default: /* * Choose something, even if it's wrong. Clearing * all the bits will hose autoneg on the internal * PHY. */ LGE_SETBIT(sc, LGE_GMIIMODE, LGE_SPEED_1000); break; } if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { LGE_SETBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX); } else { LGE_CLRBIT(sc, LGE_GMIIMODE, LGE_GMIIMODE_FDX); } return; } static void lge_setmulti(sc) struct lge_softc *sc; { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int32_t h = 0, hashes[2] = { 0, 0 }; ifp = sc->lge_ifp; /* Make sure multicast hash table is enabled. */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_MCAST); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { CSR_WRITE_4(sc, LGE_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, LGE_MAR1, 0xFFFFFFFF); return; } /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, LGE_MAR0, 0); CSR_WRITE_4(sc, LGE_MAR1, 0); /* now program new ones */ + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } + IF_ADDR_UNLOCK(ifp); CSR_WRITE_4(sc, LGE_MAR0, hashes[0]); CSR_WRITE_4(sc, LGE_MAR1, hashes[1]); return; } static void lge_reset(sc) struct lge_softc *sc; { register int i; LGE_SETBIT(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_SOFTRST); for (i = 0; i < LGE_TIMEOUT; i++) { if (!(CSR_READ_4(sc, LGE_MODE1) & LGE_MODE1_SOFTRST)) break; } if (i == LGE_TIMEOUT) printf("lge%d: reset never completed\n", sc->lge_unit); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); return; } /* * Probe for a Level 1 chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int lge_probe(dev) device_t dev; { struct lge_type *t; t = lge_devs; while(t->lge_name != NULL) { if ((pci_get_vendor(dev) == t->lge_vid) && (pci_get_device(dev) == t->lge_did)) { device_set_desc(dev, t->lge_name); return(BUS_PROBE_DEFAULT); } t++; } return(ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int lge_attach(dev) device_t dev; { int s; u_char eaddr[ETHER_ADDR_LEN]; struct lge_softc *sc; struct ifnet *ifp; int unit, error = 0, rid; s = splimp(); sc = device_get_softc(dev); unit = device_get_unit(dev); bzero(sc, sizeof(struct lge_softc)); /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = LGE_RID; sc->lge_res = bus_alloc_resource_any(dev, LGE_RES, &rid, RF_ACTIVE); if (sc->lge_res == NULL) { printf("lge%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->lge_btag = rman_get_bustag(sc->lge_res); sc->lge_bhandle = rman_get_bushandle(sc->lge_res); /* Allocate interrupt */ rid = 0; sc->lge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->lge_irq == NULL) { printf("lge%d: couldn't map interrupt\n", unit); bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->lge_irq, INTR_TYPE_NET, lge_intr, sc, &sc->lge_intrhand); if (error) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq); bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res); printf("lge%d: couldn't set up irq\n", unit); goto fail; } /* Reset the adapter. */ lge_reset(sc); /* * Get station address from the EEPROM. */ lge_read_eeprom(sc, (caddr_t)&eaddr[0], LGE_EE_NODEADDR_0, 1, 0); lge_read_eeprom(sc, (caddr_t)&eaddr[2], LGE_EE_NODEADDR_1, 1, 0); lge_read_eeprom(sc, (caddr_t)&eaddr[4], LGE_EE_NODEADDR_2, 1, 0); sc->lge_unit = unit; callout_handle_init(&sc->lge_stat_ch); sc->lge_ldata = contigmalloc(sizeof(struct lge_list_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->lge_ldata == NULL) { printf("lge%d: no memory for list buffers!\n", unit); bus_teardown_intr(dev, sc->lge_irq, sc->lge_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq); bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res); error = ENXIO; goto fail; } bzero(sc->lge_ldata, sizeof(struct lge_list_data)); /* Try to allocate memory for jumbo buffers. */ if (lge_alloc_jumbo_mem(sc)) { printf("lge%d: jumbo buffer allocation failed\n", sc->lge_unit); contigfree(sc->lge_ldata, sizeof(struct lge_list_data), M_DEVBUF); bus_teardown_intr(dev, sc->lge_irq, sc->lge_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq); bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res); error = ENXIO; goto fail; } ifp = sc->lge_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { printf("lge%d: can not if_alloc()\n", sc->lge_unit); contigfree(sc->lge_ldata, sizeof(struct lge_list_data), M_DEVBUF); lge_free_jumbo_mem(sc); bus_teardown_intr(dev, sc->lge_irq, sc->lge_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq); bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT; ifp->if_ioctl = lge_ioctl; ifp->if_start = lge_start; ifp->if_watchdog = lge_watchdog; ifp->if_init = lge_init; ifp->if_baudrate = 1000000000; ifp->if_snd.ifq_maxlen = LGE_TX_LIST_CNT - 1; ifp->if_capabilities = IFCAP_RXCSUM; ifp->if_capenable = ifp->if_capabilities; if (CSR_READ_4(sc, LGE_GMIIMODE) & LGE_GMIIMODE_PCSENH) sc->lge_pcs = 1; else sc->lge_pcs = 0; /* * Do MII setup. */ if (mii_phy_probe(dev, &sc->lge_miibus, lge_ifmedia_upd, lge_ifmedia_sts)) { printf("lge%d: MII without any PHY!\n", sc->lge_unit); contigfree(sc->lge_ldata, sizeof(struct lge_list_data), M_DEVBUF); lge_free_jumbo_mem(sc); bus_teardown_intr(dev, sc->lge_irq, sc->lge_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq); bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res); if_free(ifp); error = ENXIO; goto fail; } /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); callout_handle_init(&sc->lge_stat_ch); fail: splx(s); return(error); } static int lge_detach(dev) device_t dev; { struct lge_softc *sc; struct ifnet *ifp; int s; s = splimp(); sc = device_get_softc(dev); ifp = sc->lge_ifp; lge_reset(sc); lge_stop(sc); ether_ifdetach(ifp); if_free(ifp); bus_generic_detach(dev); device_delete_child(dev, sc->lge_miibus); bus_teardown_intr(dev, sc->lge_irq, sc->lge_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lge_irq); bus_release_resource(dev, LGE_RES, LGE_RID, sc->lge_res); contigfree(sc->lge_ldata, sizeof(struct lge_list_data), M_DEVBUF); lge_free_jumbo_mem(sc); splx(s); return(0); } /* * Initialize the transmit descriptors. */ static int lge_list_tx_init(sc) struct lge_softc *sc; { struct lge_list_data *ld; struct lge_ring_data *cd; int i; cd = &sc->lge_cdata; ld = sc->lge_ldata; for (i = 0; i < LGE_TX_LIST_CNT; i++) { ld->lge_tx_list[i].lge_mbuf = NULL; ld->lge_tx_list[i].lge_ctl = 0; } cd->lge_tx_prod = cd->lge_tx_cons = 0; return(0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arralge the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int lge_list_rx_init(sc) struct lge_softc *sc; { struct lge_list_data *ld; struct lge_ring_data *cd; int i; ld = sc->lge_ldata; cd = &sc->lge_cdata; cd->lge_rx_prod = cd->lge_rx_cons = 0; CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0); for (i = 0; i < LGE_RX_LIST_CNT; i++) { if (CSR_READ_1(sc, LGE_RXCMDFREE_8BIT) == 0) break; if (lge_newbuf(sc, &ld->lge_rx_list[i], NULL) == ENOBUFS) return(ENOBUFS); } /* Clear possible 'rx command queue empty' interrupt. */ CSR_READ_4(sc, LGE_ISR); return(0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int lge_newbuf(sc, c, m) struct lge_softc *sc; struct lge_rx_desc *c; struct mbuf *m; { struct mbuf *m_new = NULL; caddr_t *buf = NULL; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("lge%d: no memory for rx list " "-- packet dropped!\n", sc->lge_unit); return(ENOBUFS); } /* Allocate the jumbo buffer */ buf = lge_jalloc(sc); if (buf == NULL) { #ifdef LGE_VERBOSE printf("lge%d: jumbo allocation failed " "-- packet dropped!\n", sc->lge_unit); #endif m_freem(m_new); return(ENOBUFS); } /* Attach the buffer to the mbuf */ m_new->m_data = (void *)buf; m_new->m_len = m_new->m_pkthdr.len = LGE_JUMBO_FRAMELEN; MEXTADD(m_new, buf, LGE_JUMBO_FRAMELEN, lge_jfree, (struct lge_softc *)sc, 0, EXT_NET_DRV); } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = LGE_JUMBO_FRAMELEN; m_new->m_data = m_new->m_ext.ext_buf; } /* * Adjust alignment so packet payload begins on a * longword boundary. Mandatory for Alpha, useful on * x86 too. */ m_adj(m_new, ETHER_ALIGN); c->lge_mbuf = m_new; c->lge_fragptr_hi = 0; c->lge_fragptr_lo = vtophys(mtod(m_new, caddr_t)); c->lge_fraglen = m_new->m_len; c->lge_ctl = m_new->m_len | LGE_RXCTL_WANTINTR | LGE_FRAGCNT(1); c->lge_sts = 0; /* * Put this buffer in the RX command FIFO. To do this, * we just write the physical address of the descriptor * into the RX descriptor address registers. Note that * there are two registers, one high DWORD and one low * DWORD, which lets us specify a 64-bit address if * desired. We only use a 32-bit address for now. * Writing to the low DWORD register is what actually * causes the command to be issued, so we do that * last. */ CSR_WRITE_4(sc, LGE_RXDESC_ADDR_LO, vtophys(c)); LGE_INC(sc->lge_cdata.lge_rx_prod, LGE_RX_LIST_CNT); return(0); } static int lge_alloc_jumbo_mem(sc) struct lge_softc *sc; { caddr_t ptr; register int i; struct lge_jpool_entry *entry; /* Grab a big chunk o' storage. */ sc->lge_cdata.lge_jumbo_buf = contigmalloc(LGE_JMEM, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->lge_cdata.lge_jumbo_buf == NULL) { printf("lge%d: no memory for jumbo buffers!\n", sc->lge_unit); return(ENOBUFS); } SLIST_INIT(&sc->lge_jfree_listhead); SLIST_INIT(&sc->lge_jinuse_listhead); /* * Now divide it up into 9K pieces and save the addresses * in an array. */ ptr = sc->lge_cdata.lge_jumbo_buf; for (i = 0; i < LGE_JSLOTS; i++) { sc->lge_cdata.lge_jslots[i] = ptr; ptr += LGE_JLEN; entry = malloc(sizeof(struct lge_jpool_entry), M_DEVBUF, M_NOWAIT); if (entry == NULL) { printf("lge%d: no memory for jumbo " "buffer queue!\n", sc->lge_unit); return(ENOBUFS); } entry->slot = i; SLIST_INSERT_HEAD(&sc->lge_jfree_listhead, entry, jpool_entries); } return(0); } static void lge_free_jumbo_mem(sc) struct lge_softc *sc; { int i; struct lge_jpool_entry *entry; for (i = 0; i < LGE_JSLOTS; i++) { entry = SLIST_FIRST(&sc->lge_jfree_listhead); SLIST_REMOVE_HEAD(&sc->lge_jfree_listhead, jpool_entries); free(entry, M_DEVBUF); } contigfree(sc->lge_cdata.lge_jumbo_buf, LGE_JMEM, M_DEVBUF); return; } /* * Allocate a jumbo buffer. */ static void * lge_jalloc(sc) struct lge_softc *sc; { struct lge_jpool_entry *entry; entry = SLIST_FIRST(&sc->lge_jfree_listhead); if (entry == NULL) { #ifdef LGE_VERBOSE printf("lge%d: no free jumbo buffers\n", sc->lge_unit); #endif return(NULL); } SLIST_REMOVE_HEAD(&sc->lge_jfree_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc->lge_jinuse_listhead, entry, jpool_entries); return(sc->lge_cdata.lge_jslots[entry->slot]); } /* * Release a jumbo buffer. */ static void lge_jfree(buf, args) void *buf; void *args; { struct lge_softc *sc; int i; struct lge_jpool_entry *entry; /* Extract the softc struct pointer. */ sc = args; if (sc == NULL) panic("lge_jfree: can't find softc pointer!"); /* calculate the slot this buffer belongs to */ i = ((vm_offset_t)buf - (vm_offset_t)sc->lge_cdata.lge_jumbo_buf) / LGE_JLEN; if ((i < 0) || (i >= LGE_JSLOTS)) panic("lge_jfree: asked to free buffer that we don't manage!"); entry = SLIST_FIRST(&sc->lge_jinuse_listhead); if (entry == NULL) panic("lge_jfree: buffer not in use!"); entry->slot = i; SLIST_REMOVE_HEAD(&sc->lge_jinuse_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc->lge_jfree_listhead, entry, jpool_entries); return; } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static void lge_rxeof(sc, cnt) struct lge_softc *sc; int cnt; { struct mbuf *m; struct ifnet *ifp; struct lge_rx_desc *cur_rx; int c, i, total_len = 0; u_int32_t rxsts, rxctl; ifp = sc->lge_ifp; /* Find out how many frames were processed. */ c = cnt; i = sc->lge_cdata.lge_rx_cons; /* Suck them in. */ while(c) { struct mbuf *m0 = NULL; cur_rx = &sc->lge_ldata->lge_rx_list[i]; rxctl = cur_rx->lge_ctl; rxsts = cur_rx->lge_sts; m = cur_rx->lge_mbuf; cur_rx->lge_mbuf = NULL; total_len = LGE_RXBYTES(cur_rx); LGE_INC(i, LGE_RX_LIST_CNT); c--; /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (rxctl & LGE_RXCTL_ERRMASK) { ifp->if_ierrors++; lge_newbuf(sc, &LGE_RXTAIL(sc), m); continue; } if (lge_newbuf(sc, &LGE_RXTAIL(sc), NULL) == ENOBUFS) { m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp, NULL); lge_newbuf(sc, &LGE_RXTAIL(sc), m); if (m0 == NULL) { printf("lge%d: no receive buffers " "available -- packet dropped!\n", sc->lge_unit); ifp->if_ierrors++; continue; } m = m0; } else { m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; } ifp->if_ipackets++; /* Do IP checksum checking. */ if (rxsts & LGE_RXSTS_ISIP) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (!(rxsts & LGE_RXSTS_IPCSUMERR)) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((rxsts & LGE_RXSTS_ISTCP && !(rxsts & LGE_RXSTS_TCPCSUMERR)) || (rxsts & LGE_RXSTS_ISUDP && !(rxsts & LGE_RXSTS_UDPCSUMERR))) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } (*ifp->if_input)(ifp, m); } sc->lge_cdata.lge_rx_cons = i; return; } static void lge_rxeoc(sc) struct lge_softc *sc; { struct ifnet *ifp; ifp = sc->lge_ifp; ifp->if_flags &= ~IFF_RUNNING; lge_init(sc); return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void lge_txeof(sc) struct lge_softc *sc; { struct lge_tx_desc *cur_tx = NULL; struct ifnet *ifp; u_int32_t idx, txdone; ifp = sc->lge_ifp; /* Clear the timeout timer. */ ifp->if_timer = 0; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ idx = sc->lge_cdata.lge_tx_cons; txdone = CSR_READ_1(sc, LGE_TXDMADONE_8BIT); while (idx != sc->lge_cdata.lge_tx_prod && txdone) { cur_tx = &sc->lge_ldata->lge_tx_list[idx]; ifp->if_opackets++; if (cur_tx->lge_mbuf != NULL) { m_freem(cur_tx->lge_mbuf); cur_tx->lge_mbuf = NULL; } cur_tx->lge_ctl = 0; txdone--; LGE_INC(idx, LGE_TX_LIST_CNT); ifp->if_timer = 0; } sc->lge_cdata.lge_tx_cons = idx; if (cur_tx != NULL) ifp->if_flags &= ~IFF_OACTIVE; return; } static void lge_tick(xsc) void *xsc; { struct lge_softc *sc; struct mii_data *mii; struct ifnet *ifp; int s; s = splimp(); sc = xsc; ifp = sc->lge_ifp; CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_SINGLE_COLL_PKTS); ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL); CSR_WRITE_4(sc, LGE_STATSIDX, LGE_STATS_MULTI_COLL_PKTS); ifp->if_collisions += CSR_READ_4(sc, LGE_STATSVAL); if (!sc->lge_link) { mii = device_get_softc(sc->lge_miibus); mii_tick(mii); if (mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->lge_link++; if (bootverbose && (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX|| IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T)) printf("lge%d: gigabit link up\n", sc->lge_unit); if (ifp->if_snd.ifq_head != NULL) lge_start(ifp); } } sc->lge_stat_ch = timeout(lge_tick, sc, hz); splx(s); return; } static void lge_intr(arg) void *arg; { struct lge_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; ifp = sc->lge_ifp; /* Supress unwanted interrupts */ if (!(ifp->if_flags & IFF_UP)) { lge_stop(sc); return; } for (;;) { /* * Reading the ISR register clears all interrupts, and * clears the 'interrupts enabled' bit in the IMR * register. */ status = CSR_READ_4(sc, LGE_ISR); if ((status & LGE_INTRS) == 0) break; if ((status & (LGE_ISR_TXCMDFIFO_EMPTY|LGE_ISR_TXDMA_DONE))) lge_txeof(sc); if (status & LGE_ISR_RXDMA_DONE) lge_rxeof(sc, LGE_RX_DMACNT(status)); if (status & LGE_ISR_RXCMDFIFO_EMPTY) lge_rxeoc(sc); if (status & LGE_ISR_PHY_INTR) { sc->lge_link = 0; untimeout(lge_tick, sc, sc->lge_stat_ch); lge_tick(sc); } } /* Re-enable interrupts. */ CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0|LGE_IMR_INTR_ENB); if (ifp->if_snd.ifq_head != NULL) lge_start(ifp); return; } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int lge_encap(sc, m_head, txidx) struct lge_softc *sc; struct mbuf *m_head; u_int32_t *txidx; { struct lge_frag *f = NULL; struct lge_tx_desc *cur_tx; struct mbuf *m; int frag = 0, tot_len = 0; /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ m = m_head; cur_tx = &sc->lge_ldata->lge_tx_list[*txidx]; frag = 0; for (m = m_head; m != NULL; m = m->m_next) { if (m->m_len != 0) { tot_len += m->m_len; f = &cur_tx->lge_frags[frag]; f->lge_fraglen = m->m_len; f->lge_fragptr_lo = vtophys(mtod(m, vm_offset_t)); f->lge_fragptr_hi = 0; frag++; } } if (m != NULL) return(ENOBUFS); cur_tx->lge_mbuf = m_head; cur_tx->lge_ctl = LGE_TXCTL_WANTINTR|LGE_FRAGCNT(frag)|tot_len; LGE_INC((*txidx), LGE_TX_LIST_CNT); /* Queue for transmit */ CSR_WRITE_4(sc, LGE_TXDESC_ADDR_LO, vtophys(cur_tx)); return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void lge_start(ifp) struct ifnet *ifp; { struct lge_softc *sc; struct mbuf *m_head = NULL; u_int32_t idx; sc = ifp->if_softc; if (!sc->lge_link) return; idx = sc->lge_cdata.lge_tx_prod; if (ifp->if_flags & IFF_OACTIVE) return; while(sc->lge_ldata->lge_tx_list[idx].lge_mbuf == NULL) { if (CSR_READ_1(sc, LGE_TXCMDFREE_8BIT) == 0) break; IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (lge_encap(sc, m_head, &idx)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); } sc->lge_cdata.lge_tx_prod = idx; /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; return; } static void lge_init(xsc) void *xsc; { struct lge_softc *sc = xsc; struct ifnet *ifp = sc->lge_ifp; struct mii_data *mii; int s; if (ifp->if_flags & IFF_RUNNING) return; s = splimp(); /* * Cancel pending I/O and free all RX/TX buffers. */ lge_stop(sc); lge_reset(sc); mii = device_get_softc(sc->lge_miibus); /* Set MAC address */ CSR_WRITE_4(sc, LGE_PAR0, *(u_int32_t *)(&IFP2ENADDR(sc->lge_ifp)[0])); CSR_WRITE_4(sc, LGE_PAR1, *(u_int32_t *)(&IFP2ENADDR(sc->lge_ifp)[4])); /* Init circular RX list. */ if (lge_list_rx_init(sc) == ENOBUFS) { printf("lge%d: initialization failed: no " "memory for rx buffers\n", sc->lge_unit); lge_stop(sc); (void)splx(s); return; } /* * Init tx descriptors. */ lge_list_tx_init(sc); /* Set initial value for MODE1 register. */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_UCAST| LGE_MODE1_TX_CRC|LGE_MODE1_TXPAD| LGE_MODE1_RX_FLOWCTL|LGE_MODE1_SETRST_CTL0| LGE_MODE1_SETRST_CTL1|LGE_MODE1_SETRST_CTL2); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_PROMISC); } else { CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_PROMISC); } /* * Set the capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_BCAST); } else { CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_BCAST); } /* Packet padding workaround? */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RMVPAD); /* No error frames */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ERRPKTS); /* Receive large frames */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_GIANTS); /* Workaround: disable RX/TX flow control */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_TX_FLOWCTL); CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_FLOWCTL); /* Make sure to strip CRC from received frames */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_CRC); /* Turn off magic packet mode */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_MPACK_ENB); /* Turn off all VLAN stuff */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_VLAN_RX|LGE_MODE1_VLAN_TX| LGE_MODE1_VLAN_STRIP|LGE_MODE1_VLAN_INSERT); /* Workarond: FIFO overflow */ CSR_WRITE_2(sc, LGE_RXFIFO_HIWAT, 0x3FFF); CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL1|LGE_IMR_RXFIFO_WAT); /* * Load the multicast filter. */ lge_setmulti(sc); /* * Enable hardware checksum validation for all received IPv4 * packets, do not reject packets with bad checksums. */ CSR_WRITE_4(sc, LGE_MODE2, LGE_MODE2_RX_IPCSUM| LGE_MODE2_RX_TCPCSUM|LGE_MODE2_RX_UDPCSUM| LGE_MODE2_RX_ERRCSUM); /* * Enable the delivery of PHY interrupts based on * link/speed/duplex status chalges. */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL0|LGE_MODE1_GMIIPOLL); /* Enable receiver and transmitter. */ CSR_WRITE_4(sc, LGE_RXDESC_ADDR_HI, 0); CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_RX_ENB); CSR_WRITE_4(sc, LGE_TXDESC_ADDR_HI, 0); CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1|LGE_MODE1_TX_ENB); /* * Enable interrupts. */ CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_SETRST_CTL0| LGE_IMR_SETRST_CTL1|LGE_IMR_INTR_ENB|LGE_INTRS); lge_ifmedia_upd(ifp); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; (void)splx(s); sc->lge_stat_ch = timeout(lge_tick, sc, hz); return; } /* * Set media options. */ static int lge_ifmedia_upd(ifp) struct ifnet *ifp; { struct lge_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->lge_miibus); sc->lge_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; miisc = LIST_NEXT(miisc, mii_list)) mii_phy_reset(miisc); } mii_mediachg(mii); return(0); } /* * Report current media status. */ static void lge_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct lge_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->lge_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static int lge_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct lge_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int s, error = 0; s = splimp(); switch(command) { case SIOCSIFMTU: if (ifr->ifr_mtu > LGE_JUMBO_MTU) error = EINVAL; else ifp->if_mtu = ifr->ifr_mtu; break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->lge_if_flags & IFF_PROMISC)) { CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_SETRST_CTL1| LGE_MODE1_RX_PROMISC); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->lge_if_flags & IFF_PROMISC) { CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_PROMISC); } else { ifp->if_flags &= ~IFF_RUNNING; lge_init(sc); } } else { if (ifp->if_flags & IFF_RUNNING) lge_stop(sc); } sc->lge_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: lge_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->lge_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); break; } (void)splx(s); return(error); } static void lge_watchdog(ifp) struct ifnet *ifp; { struct lge_softc *sc; sc = ifp->if_softc; ifp->if_oerrors++; printf("lge%d: watchdog timeout\n", sc->lge_unit); lge_stop(sc); lge_reset(sc); ifp->if_flags &= ~IFF_RUNNING; lge_init(sc); if (ifp->if_snd.ifq_head != NULL) lge_start(ifp); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void lge_stop(sc) struct lge_softc *sc; { register int i; struct ifnet *ifp; ifp = sc->lge_ifp; ifp->if_timer = 0; untimeout(lge_tick, sc, sc->lge_stat_ch); CSR_WRITE_4(sc, LGE_IMR, LGE_IMR_INTR_ENB); /* Disable receiver and transmitter. */ CSR_WRITE_4(sc, LGE_MODE1, LGE_MODE1_RX_ENB|LGE_MODE1_TX_ENB); sc->lge_link = 0; /* * Free data in the RX lists. */ for (i = 0; i < LGE_RX_LIST_CNT; i++) { if (sc->lge_ldata->lge_rx_list[i].lge_mbuf != NULL) { m_freem(sc->lge_ldata->lge_rx_list[i].lge_mbuf); sc->lge_ldata->lge_rx_list[i].lge_mbuf = NULL; } } bzero((char *)&sc->lge_ldata->lge_rx_list, sizeof(sc->lge_ldata->lge_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < LGE_TX_LIST_CNT; i++) { if (sc->lge_ldata->lge_tx_list[i].lge_mbuf != NULL) { m_freem(sc->lge_ldata->lge_tx_list[i].lge_mbuf); sc->lge_ldata->lge_tx_list[i].lge_mbuf = NULL; } } bzero((char *)&sc->lge_ldata->lge_tx_list, sizeof(sc->lge_ldata->lge_tx_list)); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void lge_shutdown(dev) device_t dev; { struct lge_softc *sc; sc = device_get_softc(dev); lge_reset(sc); lge_stop(sc); return; } Index: stable/6/sys/dev/lnc/if_lnc.c =================================================================== --- stable/6/sys/dev/lnc/if_lnc.c (revision 149421) +++ stable/6/sys/dev/lnc/if_lnc.c (revision 149422) @@ -1,1556 +1,1558 @@ /*- * Copyright (c) 1994-2000 * Paul Richards. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * verbatim and that no modifications are made prior to this * point in the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name Paul Richards may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY PAUL RICHARDS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL PAUL RICHARDS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* #define DIAGNOSTIC #define DEBUG * * TODO ---- * * Check all the XXX comments -- some of them are just things I've left * unfinished rather than "difficult" problems that were hacked around. * * Check log settings. * * Check how all the arpcom flags get set and used. * * Re-inline and re-static all routines after debugging. * * Remember to assign iobase in SHMEM probe routines. * * Replace all occurences of LANCE-controller-card etc in prints by the name * strings of the appropriate type -- nifty window dressing * * Add DEPCA support -- mostly done. * */ #include "opt_inet.h" /* Some defines that should really be in generic locations */ #define FCS_LEN 4 #define MULTICAST_FILTER_LEN 8 #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include devclass_t lnc_devclass; static char const * const nic_ident[] = { "Unknown", "BICC", "NE2100", "DEPCA", "CNET98S", /* PC-98 */ }; static char const * const ic_ident[] = { "Unknown", "LANCE", "C-LANCE", "PCnet-ISA", "PCnet-ISA+", "PCnet-ISA II", "PCnet-32 VL-Bus", "PCnet-PCI", "PCnet-PCI II", "PCnet-FAST", "PCnet-FAST+", "PCnet-Home", }; static void lnc_setladrf(struct lnc_softc *sc); static void lnc_reset(struct lnc_softc *sc); static void lnc_free_mbufs(struct lnc_softc *sc); static __inline int alloc_mbuf_cluster(struct lnc_softc *sc, struct host_ring_entry *desc); static __inline struct mbuf *chain_mbufs(struct lnc_softc *sc, int start_of_packet, int pkt_len); static __inline struct mbuf *mbuf_packet(struct lnc_softc *sc, int start_of_packet, int pkt_len); static void lnc_rint(struct lnc_softc *sc); static void lnc_tint(struct lnc_softc *sc); static void lnc_init(void *); static __inline int mbuf_to_buffer(struct mbuf *m, char *buffer); static __inline struct mbuf *chain_to_cluster(struct mbuf *m); static void lnc_start(struct ifnet *ifp); static int lnc_ioctl(struct ifnet *ifp, u_long command, caddr_t data); static void lnc_watchdog(struct ifnet *ifp); #ifdef DEBUG void lnc_dump_state(struct lnc_softc *sc); void mbuf_dump_chain(struct mbuf *m); #endif u_short read_csr(struct lnc_softc *sc, u_short port) { lnc_outw(sc->rap, port); return (lnc_inw(sc->rdp)); } void write_csr(struct lnc_softc *sc, u_short port, u_short val) { lnc_outw(sc->rap, port); lnc_outw(sc->rdp, val); } static __inline void write_bcr(struct lnc_softc *sc, u_short port, u_short val) { lnc_outw(sc->rap, port); lnc_outw(sc->bdp, val); } static __inline u_short read_bcr(struct lnc_softc *sc, u_short port) { lnc_outw(sc->rap, port); return (lnc_inw(sc->bdp)); } int lance_probe(struct lnc_softc *sc) { write_csr(sc, CSR0, STOP); if ((lnc_inw(sc->rdp) & STOP) && ! (read_csr(sc, CSR3))) { /* * Check to see if it's a C-LANCE. For the LANCE the INEA bit * cannot be set while the STOP bit is. This restriction is * removed for the C-LANCE. */ write_csr(sc, CSR0, INEA); if (read_csr(sc, CSR0) & INEA) return (C_LANCE); else return (LANCE); } else return (UNKNOWN); } void lnc_release_resources(device_t dev) { lnc_softc_t *sc = device_get_softc(dev); if (sc->irqres) { bus_teardown_intr(dev, sc->irqres, sc->intrhand); bus_release_resource(dev, SYS_RES_IRQ, sc->irqrid, sc->irqres); } if (sc->portres) bus_release_resource(dev, SYS_RES_IOPORT, sc->portrid, sc->portres); if (sc->drqres) bus_release_resource(dev, SYS_RES_DRQ, sc->drqrid, sc->drqres); if (sc->dmat) { if (sc->dmamap) { bus_dmamap_unload(sc->dmat, sc->dmamap); bus_dmamem_free(sc->dmat, sc->recv_ring, sc->dmamap); } bus_dma_tag_destroy(sc->dmat); } } /* * Set up the logical address filter for multicast packets */ static __inline void lnc_setladrf(struct lnc_softc *sc) { struct ifnet *ifp = sc->ifp; struct ifmultiaddr *ifma; u_long index; int i; if (sc->flags & IFF_ALLMULTI) { for (i=0; i < MULTICAST_FILTER_LEN; i++) sc->init_block->ladrf[i] = 0xFF; return; } /* * For each multicast address, calculate a crc for that address and * then use the high order 6 bits of the crc as a hash code where * bits 3-5 select the byte of the address filter and bits 0-2 select * the bit within that byte. */ bzero(sc->init_block->ladrf, MULTICAST_FILTER_LEN); + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; index = ether_crc32_le(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; sc->init_block->ladrf[index >> 3] |= 1 << (index & 7); } + IF_ADDR_UNLOCK(ifp); } void lnc_stop(struct lnc_softc *sc) { write_csr(sc, CSR0, STOP); } static void lnc_reset(struct lnc_softc *sc) { lnc_init(sc); } static void lnc_free_mbufs(struct lnc_softc *sc) { int i; /* * We rely on other routines to keep the buff.mbuf field valid. If * it's not NULL then we assume it points to an allocated mbuf. */ for (i = 0; i < NDESC(sc->nrdre); i++) if ((sc->recv_ring + i)->buff.mbuf) m_free((sc->recv_ring + i)->buff.mbuf); for (i = 0; i < NDESC(sc->ntdre); i++) if ((sc->trans_ring + i)->buff.mbuf) m_free((sc->trans_ring + i)->buff.mbuf); if (sc->mbuf_count) m_freem(sc->mbufs); } static __inline int alloc_mbuf_cluster(struct lnc_softc *sc, struct host_ring_entry *desc) { register struct mds *md = desc->md; struct mbuf *m=0; int addr; /* Try and get cluster off local cache */ if (sc->mbuf_count) { sc->mbuf_count--; m = sc->mbufs; sc->mbufs = m->m_next; /* XXX m->m_data = m->m_ext.ext_buf;*/ } else { MGET(m, M_DONTWAIT, MT_DATA); if (!m) return(1); MCLGET(m, M_DONTWAIT); if (!m->m_ext.ext_buf) { m_free(m); return(1); } } desc->buff.mbuf = m; addr = kvtop(m->m_data); md->md0 = addr; md->md1= ((addr >> 16) & 0xff) | OWN; md->md2 = -(short)(MCLBYTES - sizeof(struct pkthdr)); md->md3 = 0; return(0); } static __inline struct mbuf * chain_mbufs(struct lnc_softc *sc, int start_of_packet, int pkt_len) { struct mbuf *head, *m; struct host_ring_entry *desc; /* * Turn head into a pkthdr mbuf -- * assumes a pkthdr type mbuf was * allocated to the descriptor * originally. */ desc = sc->recv_ring + start_of_packet; head = desc->buff.mbuf; head->m_flags |= M_PKTHDR; bzero(&head->m_pkthdr, sizeof(head->m_pkthdr)); m = head; do { m = desc->buff.mbuf; m->m_len = min((MCLBYTES - sizeof(struct pkthdr)), pkt_len); pkt_len -= m->m_len; if (alloc_mbuf_cluster(sc, desc)) return((struct mbuf *)NULL); INC_MD_PTR(start_of_packet, sc->nrdre) desc = sc->recv_ring + start_of_packet; m->m_next = desc->buff.mbuf; } while (start_of_packet != sc->recv_next); m->m_next = 0; return(head); } static __inline struct mbuf * mbuf_packet(struct lnc_softc *sc, int start_of_packet, int pkt_len) { struct host_ring_entry *start; struct mbuf *head,*m,*m_prev; char *data,*mbuf_data; short blen; int amount; /* Get a pkthdr mbuf for the start of packet */ MGETHDR(head, M_DONTWAIT, MT_DATA); if (!head) { LNCSTATS(drop_packet) return(0); } m = head; m->m_len = 0; start = sc->recv_ring + start_of_packet; /*blen = -(start->md->md2);*/ blen = RECVBUFSIZE; /* XXX More PCnet-32 crap */ data = start->buff.data; mbuf_data = m->m_data; while (start_of_packet != sc->recv_next) { /* * If the data left fits in a single buffer then set * blen to the size of the data left. */ if (pkt_len < blen) blen = pkt_len; /* * amount is least of data in current ring buffer and * amount of space left in current mbuf. */ amount = min(blen, M_TRAILINGSPACE(m)); if (amount == 0) { /* mbuf must be empty */ m_prev = m; MGET(m, M_DONTWAIT, MT_DATA); if (!m) { m_freem(head); return(0); } if (pkt_len >= MINCLSIZE) MCLGET(m, M_DONTWAIT); m->m_len = 0; m_prev->m_next = m; amount = min(blen, M_TRAILINGSPACE(m)); mbuf_data = m->m_data; } bcopy(data, mbuf_data, amount); blen -= amount; pkt_len -= amount; m->m_len += amount; data += amount; mbuf_data += amount; if (blen == 0) { start->md->md1 &= HADR; start->md->md1 |= OWN; start->md->md2 = -RECVBUFSIZE; /* XXX - shouldn't be necessary */ INC_MD_PTR(start_of_packet, sc->nrdre) start = sc->recv_ring + start_of_packet; data = start->buff.data; /*blen = -(start->md->md2);*/ blen = RECVBUFSIZE; /* XXX More PCnet-32 crap */ } } return(head); } static void lnc_rint(struct lnc_softc *sc) { struct ifnet *ifp = sc->ifp; struct host_ring_entry *next, *start; int start_of_packet; struct mbuf *head; struct ether_header *eh; int lookahead; int flags; int pkt_len; /* * The LANCE will issue a RINT interrupt when the ownership of the * last buffer of a receive packet has been relinquished by the LANCE. * Therefore, it can be assumed that a complete packet can be found * before hitting buffers that are still owned by the LANCE, if not * then there is a bug in the driver that is causing the descriptors * to get out of sync. */ #ifdef DIAGNOSTIC if ((sc->recv_ring + sc->recv_next)->md->md1 & OWN) { log(LOG_ERR, "%s: Receive interrupt with buffer still owned by controller -- Resetting\n", ifp->if_xname); lnc_reset(sc); return; } if (!((sc->recv_ring + sc->recv_next)->md->md1 & STP)) { log(LOG_ERR, "%s: Receive interrupt but not start of packet -- Resetting\n", ifp->if_xname); lnc_reset(sc); return; } #endif lookahead = 0; next = sc->recv_ring + sc->recv_next; while ((flags = next->md->md1) & STP) { /* Make a note of the start of the packet */ start_of_packet = sc->recv_next; /* * Find the end of the packet. Even if not data chaining, * jabber packets can overrun into a second descriptor. * If there is no error, then the ENP flag is set in the last * descriptor of the packet. If there is an error then the ERR * flag will be set in the descriptor where the error occured. * Therefore, to find the last buffer of a packet we search for * either ERR or ENP. */ if (!(flags & (ENP | MDERR))) { do { INC_MD_PTR(sc->recv_next, sc->nrdre) next = sc->recv_ring + sc->recv_next; flags = next->md->md1; } while (!(flags & (STP | OWN | ENP | MDERR))); if (flags & STP) { log(LOG_ERR, "%s: Start of packet found before end of previous in receive ring -- Resetting\n", ifp->if_xname); lnc_reset(sc); return; } if (flags & OWN) { if (lookahead) { /* * Looked ahead into a packet still * being received */ sc->recv_next = start_of_packet; break; } else { log(LOG_ERR, "%s: End of received packet not found-- Resetting\n", ifp->if_xname); lnc_reset(sc); return; } } } pkt_len = (next->md->md3 & MCNT) - FCS_LEN; /* Move pointer onto start of next packet */ INC_MD_PTR(sc->recv_next, sc->nrdre) next = sc->recv_ring + sc->recv_next; if (flags & MDERR) { const char *if_xname = ifp->if_xname; if (flags & RBUFF) { LNCSTATS(rbuff) log(LOG_ERR, "%s: Receive buffer error\n", if_xname); } if (flags & OFLO) { /* OFLO only valid if ENP is not set */ if (!(flags & ENP)) { LNCSTATS(oflo) log(LOG_ERR, "%s: Receive overflow error \n", if_xname); } } else if (flags & ENP) { if ((ifp->if_flags & IFF_PROMISC)==0) { /* * FRAM and CRC are valid only if ENP * is set and OFLO is not. */ if (flags & FRAM) { LNCSTATS(fram) log(LOG_ERR, "%s: Framing error\n", if_xname); /* * FRAM is only set if there's a CRC * error so avoid multiple messages */ } else if (flags & CRC) { LNCSTATS(crc) log(LOG_ERR, "%s: Receive CRC error\n", if_xname); } } } /* Drop packet */ LNCSTATS(rerr) ifp->if_ierrors++; while (start_of_packet != sc->recv_next) { start = sc->recv_ring + start_of_packet; start->md->md2 = -RECVBUFSIZE; /* XXX - shouldn't be necessary */ start->md->md1 &= HADR; start->md->md1 |= OWN; INC_MD_PTR(start_of_packet, sc->nrdre) } } else { /* Valid packet */ ifp->if_ipackets++; if (sc->nic.mem_mode == DMA_MBUF) head = chain_mbufs(sc, start_of_packet, pkt_len); else head = mbuf_packet(sc, start_of_packet, pkt_len); if (head) { /* * First mbuf in packet holds the * ethernet and packet headers */ head->m_pkthdr.rcvif = ifp; head->m_pkthdr.len = pkt_len ; eh = (struct ether_header *) head->m_data; /* * vmware ethernet hardware emulation loops * packets back to itself, violates IFF_SIMPLEX. * drop it if it is from myself. */ if (bcmp(eh->ether_shost, IFP2ENADDR(sc->ifp), ETHER_ADDR_LEN) == 0) { m_freem(head); } else { (*ifp->if_input)(ifp, head); } } else { log(LOG_ERR,"%s: Packet dropped, no mbufs\n",ifp->if_xname); LNCSTATS(drop_packet) } } lookahead++; } /* * At this point all completely received packets have been processed * so clear RINT since any packets that have arrived while we were in * here have been dealt with. */ lnc_outw(sc->rdp, RINT | INEA); } static void lnc_tint(struct lnc_softc *sc) { struct host_ring_entry *next, *start; int start_of_packet; int lookahead; /* * If the driver is reset in this routine then we return immediately to * the interrupt driver routine. Any interrupts that have occured * since the reset will be dealt with there. sc->trans_next * should point to the start of the first packet that was awaiting * transmission after the last transmit interrupt was dealt with. The * LANCE should have relinquished ownership of that descriptor before * the interrupt. Therefore, sc->trans_next should point to a * descriptor with STP set and OWN cleared. If not then the driver's * pointers are out of sync with the LANCE, which signifies a bug in * the driver. Therefore, the following two checks are really * diagnostic, since if the driver is working correctly they should * never happen. */ #ifdef DIAGNOSTIC if ((sc->trans_ring + sc->trans_next)->md->md1 & OWN) { log(LOG_ERR, "%s: Transmit interrupt with buffer still owned by controller -- Resetting\n", sc->ifp->if_xname); lnc_reset(sc); return; } #endif /* * The LANCE will write the status information for the packet it just * tried to transmit in one of two places. If the packet was * transmitted successfully then the status will be written into the * last descriptor of the packet. If the transmit failed then the * status will be written into the descriptor that was being accessed * when the error occured and all subsequent descriptors in that * packet will have been relinquished by the LANCE. * * At this point we know that sc->trans_next points to the start * of a packet that the LANCE has just finished trying to transmit. * We now search for a buffer with either ENP or ERR set. */ lookahead = 0; do { start_of_packet = sc->trans_next; next = sc->trans_ring + sc->trans_next; #ifdef DIAGNOSTIC if (!(next->md->md1 & STP)) { log(LOG_ERR, "%s: Transmit interrupt but not start of packet -- Resetting\n", sc->ifp->if_xname); lnc_reset(sc); return; } #endif /* * Find end of packet. */ if (!(next->md->md1 & (ENP | MDERR))) { do { INC_MD_PTR(sc->trans_next, sc->ntdre) next = sc->trans_ring + sc->trans_next; } while (!(next->md->md1 & (STP | OWN | ENP | MDERR))); if (next->md->md1 & STP) { log(LOG_ERR, "%s: Start of packet found before end of previous in transmit ring -- Resetting\n", sc->ifp->if_xname); lnc_reset(sc); return; } if (next->md->md1 & OWN) { if (lookahead) { /* * Looked ahead into a packet still * being transmitted */ sc->trans_next = start_of_packet; break; } else { log(LOG_ERR, "%s: End of transmitted packet not found -- Resetting\n", sc->ifp->if_xname); lnc_reset(sc); return; } } } /* * Check for ERR first since other flags are irrelevant if an * error occurred. */ if (next->md->md1 & MDERR) { LNCSTATS(terr) sc->ifp->if_oerrors++; if (next->md->md3 & LCOL) { LNCSTATS(lcol) log(LOG_ERR, "%s: Transmit late collision -- Net error?\n", sc->ifp->if_xname); sc->ifp->if_collisions++; /* * Clear TBUFF since it's not valid when LCOL * set */ next->md->md3 &= ~TBUFF; } if (next->md->md3 & LCAR) { LNCSTATS(lcar) log(LOG_ERR, "%s: Loss of carrier during transmit -- Net error?\n", sc->ifp->if_xname); } if (next->md->md3 & RTRY) { LNCSTATS(rtry) log(LOG_ERR, "%s: Transmit of packet failed after 16 attempts -- TDR = %d\n", sc->ifp->if_xname, ((sc->trans_ring + sc->trans_next)->md->md3 & TDR)); sc->ifp->if_collisions += 16; /* * Clear TBUFF since it's not valid when RTRY * set */ next->md->md3 &= ~TBUFF; } /* * TBUFF is only valid if neither LCOL nor RTRY are set. * We need to check UFLO after LCOL and RTRY so that we * know whether or not TBUFF is valid. If either are * set then TBUFF will have been cleared above. A * UFLO error will turn off the transmitter so we * have to reset. * */ if (next->md->md3 & UFLO) { LNCSTATS(uflo) /* * If an UFLO has occured it's possibly due * to a TBUFF error */ if (next->md->md3 & TBUFF) { LNCSTATS(tbuff) log(LOG_ERR, "%s: Transmit buffer error -- Resetting\n", sc->ifp->if_xname); } else log(LOG_ERR, "%s: Transmit underflow error -- Resetting\n", sc->ifp->if_xname); lnc_reset(sc); return; } do { INC_MD_PTR(sc->trans_next, sc->ntdre) next = sc->trans_ring + sc->trans_next; } while (!(next->md->md1 & STP) && (sc->trans_next != sc->next_to_send)); } else { /* * Since we check for ERR first then if we get here * the packet was transmitted correctly. There may * still have been non-fatal errors though. * Don't bother checking for DEF, waste of time. */ sc->ifp->if_opackets++; if (next->md->md1 & MORE) { LNCSTATS(more) sc->ifp->if_collisions += 2; } /* * ONE is invalid if LCOL is set. If LCOL was set then * ERR would have also been set and we would have * returned from lnc_tint above. Therefore we can * assume if we arrive here that ONE is valid. * */ if (next->md->md1 & ONE) { LNCSTATS(one) sc->ifp->if_collisions++; } INC_MD_PTR(sc->trans_next, sc->ntdre) next = sc->trans_ring + sc->trans_next; } /* * Clear descriptors and free any mbufs. */ do { start = sc->trans_ring + start_of_packet; start->md->md1 &= HADR; if (sc->nic.mem_mode == DMA_MBUF) { /* Cache clusters on a local queue */ if ((start->buff.mbuf->m_flags & M_EXT) && (sc->mbuf_count < MBUF_CACHE_LIMIT)) { if (sc->mbuf_count) { start->buff.mbuf->m_next = sc->mbufs; sc->mbufs = start->buff.mbuf; } else sc->mbufs = start->buff.mbuf; sc->mbuf_count++; start->buff.mbuf = 0; } else { /* * XXX should this be m_freem()? */ m_free(start->buff.mbuf); start->buff.mbuf = NULL; } } sc->pending_transmits--; INC_MD_PTR(start_of_packet, sc->ntdre) }while (start_of_packet != sc->trans_next); /* * There's now at least one free descriptor * in the ring so indicate that we can accept * more packets again. */ sc->ifp->if_flags &= ~IFF_OACTIVE; lookahead++; } while (sc->pending_transmits && !(next->md->md1 & OWN)); /* * Clear TINT since we've dealt with all * the completed transmissions. */ lnc_outw(sc->rdp, TINT | INEA); } int lnc_attach_common(device_t dev) { lnc_softc_t *sc = device_get_softc(dev); int i; int skip; u_char eaddr[6]; switch (sc->nic.ident) { case BICC: case CNET98S: skip = 2; break; default: skip = 1; break; } /* Set default mode */ sc->nic.mode = NORMAL; /* Fill in arpcom structure entries */ sc->ifp = if_alloc(IFT_ETHER); if (sc->ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); return (0); } sc->ifp->if_softc = sc; if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev)); sc->ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT; sc->ifp->if_timer = 0; sc->ifp->if_start = lnc_start; sc->ifp->if_ioctl = lnc_ioctl; sc->ifp->if_watchdog = lnc_watchdog; sc->ifp->if_init = lnc_init; IFQ_SET_MAXLEN(&sc->ifp->if_snd, IFQ_MAXLEN); sc->ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; IFQ_SET_READY(&sc->ifp->if_snd); /* Extract MAC address from PROM */ for (i = 0; i < ETHER_ADDR_LEN; i++) eaddr[i] = lnc_inb(i * skip); ether_ifattach(sc->ifp, eaddr); if (sc->nic.ic == LANCE || sc->nic.ic == C_LANCE) if_printf(sc->ifp, "%s (%s)\n", nic_ident[sc->nic.ident], ic_ident[sc->nic.ic]); else if_printf(sc->ifp, "%s\n", ic_ident[sc->nic.ic]); return (1); } int lnc_detach_common(device_t dev) { lnc_softc_t *sc = device_get_softc(dev); int s = splimp(); ether_ifdetach(sc->ifp); if_free(sc->ifp); lnc_stop(sc); lnc_release_resources(dev); splx(s); return (0); } static void lnc_init(xsc) void *xsc; { struct lnc_softc *sc = xsc; int s, i; char *lnc_mem; /* Shut down interface */ s = splimp(); lnc_stop(sc); sc->ifp->if_flags |= IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; /* XXX??? */ /* * This sets up the memory area for the controller. Memory is set up for * the initialisation block (12 words of contiguous memory starting * on a word boundary),the transmit and receive ring structures (each * entry is 4 words long and must start on a quadword boundary) and * the data buffers. * * The alignment tests are particularly paranoid. */ sc->recv_next = 0; sc->trans_ring = sc->recv_ring + NDESC(sc->nrdre); sc->trans_next = 0; if (sc->nic.mem_mode == SHMEM) lnc_mem = (char *)(uintptr_t)sc->nic.iobase; else lnc_mem = (char *) (sc->trans_ring + NDESC(sc->ntdre)); lnc_mem = (char *)(((long)lnc_mem + 1) & ~1); sc->init_block = (struct init_block *) ((long) lnc_mem & ~1); lnc_mem = (char *) (sc->init_block + 1); lnc_mem = (char *)(((long)lnc_mem + 7) & ~7); /* Initialise pointers to descriptor entries */ for (i = 0; i < NDESC(sc->nrdre); i++) { (sc->recv_ring + i)->md = (struct mds *) lnc_mem; lnc_mem += sizeof(struct mds); } for (i = 0; i < NDESC(sc->ntdre); i++) { (sc->trans_ring + i)->md = (struct mds *) lnc_mem; lnc_mem += sizeof(struct mds); } /* Initialise the remaining ring entries */ if (sc->nic.mem_mode == DMA_MBUF) { sc->mbufs = 0; sc->mbuf_count = 0; /* Free previously allocated mbufs */ if (sc->flags & LNC_INITIALISED) lnc_free_mbufs(sc); for (i = 0; i < NDESC(sc->nrdre); i++) { if (alloc_mbuf_cluster(sc, sc->recv_ring+i)) { log(LOG_ERR, "Initialisation failed -- no mbufs\n"); splx(s); return; } } for (i = 0; i < NDESC(sc->ntdre); i++) { (sc->trans_ring + i)->buff.mbuf = 0; (sc->trans_ring + i)->md->md0 = 0; (sc->trans_ring + i)->md->md1 = 0; (sc->trans_ring + i)->md->md2 = 0; (sc->trans_ring + i)->md->md3 = 0; } } else { for (i = 0; i < NDESC(sc->nrdre); i++) { (sc->recv_ring + i)->md->md0 = kvtop(lnc_mem); (sc->recv_ring + i)->md->md1 = ((kvtop(lnc_mem) >> 16) & 0xff) | OWN; (sc->recv_ring + i)->md->md2 = -RECVBUFSIZE; (sc->recv_ring + i)->md->md3 = 0; (sc->recv_ring + i)->buff.data = lnc_mem; lnc_mem += RECVBUFSIZE; } for (i = 0; i < NDESC(sc->ntdre); i++) { (sc->trans_ring + i)->md->md0 = kvtop(lnc_mem); (sc->trans_ring + i)->md->md1 = ((kvtop(lnc_mem) >> 16) & 0xff); (sc->trans_ring + i)->md->md2 = 0; (sc->trans_ring + i)->md->md3 = 0; (sc->trans_ring + i)->buff.data = lnc_mem; lnc_mem += TRANSBUFSIZE; } } sc->next_to_send = 0; /* Set up initialisation block */ sc->init_block->mode = sc->nic.mode; for (i = 0; i < ETHER_ADDR_LEN; i++) sc->init_block->padr[i] = IFP2ENADDR(sc->ifp)[i]; lnc_setladrf(sc); sc->init_block->rdra = kvtop(sc->recv_ring->md); sc->init_block->rlen = ((kvtop(sc->recv_ring->md) >> 16) & 0xff) | (sc->nrdre << 13); sc->init_block->tdra = kvtop(sc->trans_ring->md); sc->init_block->tlen = ((kvtop(sc->trans_ring->md) >> 16) & 0xff) | (sc->ntdre << 13); /* Set flags to show that the memory area is valid */ sc->flags |= LNC_INITIALISED; sc->pending_transmits = 0; /* Give the LANCE the physical address of the initialisation block */ if (sc->nic.ic == PCnet_Home) { u_short media; /* Set PHY_SEL to HomeRun */ media = read_bcr(sc, BCR49); media &= ~3; media |= 1; write_bcr(sc, BCR49, media); } write_csr(sc, CSR1, kvtop(sc->init_block)); write_csr(sc, CSR2, (kvtop(sc->init_block) >> 16) & 0xff); /* * Depending on which controller this is, CSR3 has different meanings. * For the Am7990 it controls DMA operations, for the Am79C960 it * controls interrupt masks and transmitter algorithms. In either * case, none of the flags are set. * */ write_csr(sc, CSR3, 0); /* Let's see if it starts */ /* printf("Enabling lnc interrupts\n"); sc->ifp->if_timer = 10; write_csr(sc, CSR0, INIT|INEA); */ /* * Now that the initialisation is complete there's no reason to * access anything except CSR0, so we leave RAP pointing there * so we can just access RDP from now on, saving an outw each * time. */ write_csr(sc, CSR0, INIT); for(i=0; i < 1000; i++) if (read_csr(sc, CSR0) & IDON) break; if (read_csr(sc, CSR0) & IDON) { /* * Enable interrupts, start the LANCE, mark the interface as * running and transmit any pending packets. */ write_csr(sc, CSR0, STRT | INEA); sc->ifp->if_flags |= IFF_RUNNING; sc->ifp->if_flags &= ~IFF_OACTIVE; lnc_start(sc->ifp); } else log(LOG_ERR, "%s: Initialisation failed\n", sc->ifp->if_xname); splx(s); } /* * The interrupt flag (INTR) will be set and provided that the interrupt enable * flag (INEA) is also set, the interrupt pin will be driven low when any of * the following occur: * * 1) Completion of the initialisation routine (IDON). 2) The reception of a * packet (RINT). 3) The transmission of a packet (TINT). 4) A transmitter * timeout error (BABL). 5) A missed packet (MISS). 6) A memory error (MERR). * * The interrupt flag is cleared when all of the above conditions are cleared. * * If the driver is reset from this routine then it first checks to see if any * interrupts have ocurred since the reset and handles them before returning. * This is because the NIC may signify a pending interrupt in CSR0 using the * INTR flag even if a hardware interrupt is currently inhibited (at least I * think it does from reading the data sheets). We may as well deal with * these pending interrupts now rather than get the overhead of another * hardware interrupt immediately upon returning from the interrupt handler. * */ void lncintr(void *arg) { lnc_softc_t *sc = arg; u_short csr0; /* * INEA is the only bit that can be cleared by writing a 0 to it so * we have to include it in any writes that clear other flags. */ while ((csr0 = lnc_inw(sc->rdp)) & INTR) { /* * Clear interrupt flags early to avoid race conditions. The * controller can still set these flags even while we're in * this interrupt routine. If the flag is still set from the * event that caused this interrupt any new events will * be missed. */ lnc_outw(sc->rdp, csr0); /*lnc_outw(sc->rdp, IDON | CERR | BABL | MISS | MERR | RINT | TINT | INEA);*/ #ifdef notyet if (csr0 & IDON) { printf("IDON\n"); sc->ifp->if_timer = 0; write_csr(sc, CSR0, STRT | INEA); sc->ifp->if_flags |= IFF_RUNNING; sc->ifp->if_flags &= ~IFF_OACTIVE; lnc_start(sc->ifp); continue; } #endif if (csr0 & ERR) { if (csr0 & CERR) { log(LOG_ERR, "%s: Heartbeat error -- SQE test failed\n", sc->ifp->if_xname); LNCSTATS(cerr) } if (csr0 & BABL) { log(LOG_ERR, "%s: Babble error - more than 1519 bytes transmitted\n", sc->ifp->if_xname); LNCSTATS(babl) sc->ifp->if_oerrors++; } if (csr0 & MISS) { log(LOG_ERR, "%s: Missed packet -- no receive buffer\n", sc->ifp->if_xname); LNCSTATS(miss) sc->ifp->if_ierrors++; } if (csr0 & MERR) { log(LOG_ERR, "%s: Memory error -- Resetting\n", sc->ifp->if_xname); LNCSTATS(merr) lnc_reset(sc); continue; } } if (csr0 & RINT) { LNCSTATS(rint) lnc_rint(sc); } if (csr0 & TINT) { LNCSTATS(tint) sc->ifp->if_timer = 0; lnc_tint(sc); } /* * If there's room in the transmit descriptor ring then queue * some more transmit packets. */ if (!(sc->ifp->if_flags & IFF_OACTIVE)) lnc_start(sc->ifp); } } static __inline int mbuf_to_buffer(struct mbuf *m, char *buffer) { int len=0; for( ; m; m = m->m_next) { bcopy(mtod(m, caddr_t), buffer, m->m_len); buffer += m->m_len; len += m->m_len; } return(len); } static __inline struct mbuf * chain_to_cluster(struct mbuf *m) { struct mbuf *new; MGET(new, M_DONTWAIT, MT_DATA); if (new) { MCLGET(new, M_DONTWAIT); if (new->m_ext.ext_buf) { new->m_len = mbuf_to_buffer(m, new->m_data); m_freem(m); return(new); } else m_free(new); } return(0); } /* * IFF_OACTIVE and IFF_RUNNING are checked in ether_output so it's redundant * to check them again since we wouldn't have got here if they were not * appropriately set. This is also called from lnc_init and lncintr but the * flags should be ok at those points too. */ static void lnc_start(struct ifnet *ifp) { struct lnc_softc *sc = ifp->if_softc; struct host_ring_entry *desc; int tmp; int end_of_packet; struct mbuf *head, *m; int len, chunk; int addr; int no_entries_needed; do { IFQ_DRV_DEQUEUE(&sc->ifp->if_snd, head); if (!head) return; if (sc->nic.mem_mode == DMA_MBUF) { no_entries_needed = 0; for (m=head; m; m = m->m_next) no_entries_needed++; /* * We try and avoid bcopy as much as possible * but there are two cases when we use it. * * 1) If there are not enough free entries in the ring * to hold each mbuf in the chain then compact the * chain into a single cluster. * * 2) The Am7990 and Am79C90 must not have less than * 100 bytes in the first descriptor of a chained * packet so it's necessary to shuffle the mbuf * contents to ensure this. */ if (no_entries_needed > (NDESC(sc->ntdre) - sc->pending_transmits)) { if (!(head = chain_to_cluster(head))) { log(LOG_ERR, "%s: Couldn't get mbuf for transmit packet -- Resetting \n ",ifp->if_xname); lnc_reset(sc); return; } } else if ((sc->nic.ic == LANCE) || (sc->nic.ic == C_LANCE)) { if ((head->m_len < 100) && (head->m_next)) { len = 100 - head->m_len; if (M_TRAILINGSPACE(head) < len) { /* * Move data to start of data * area. We assume the first * mbuf has a packet header * and is not a cluster. */ bcopy((caddr_t)head->m_data, (caddr_t)head->m_pktdat, head->m_len); head->m_data = head->m_pktdat; } m = head->m_next; while (m && (len > 0)) { chunk = min(len, m->m_len); bcopy(mtod(m, caddr_t), mtod(head, caddr_t) + head->m_len, chunk); len -= chunk; head->m_len += chunk; m->m_len -= chunk; m->m_data += chunk; if (m->m_len <= 0) { m = m_free(m); head->m_next = m; } } } } tmp = sc->next_to_send; /* * On entering this loop we know that tmp points to a * descriptor with a clear OWN bit. */ desc = sc->trans_ring + tmp; len = ETHER_MIN_LEN; for (m = head; m; m = m->m_next) { desc->buff.mbuf = m; addr = kvtop(m->m_data); desc->md->md0 = addr; desc->md->md1 = ((addr >> 16) & 0xff); desc->md->md3 = 0; desc->md->md2 = -m->m_len; sc->pending_transmits++; len -= m->m_len; INC_MD_PTR(tmp, sc->ntdre) desc = sc->trans_ring + tmp; } end_of_packet = tmp; DEC_MD_PTR(tmp, sc->ntdre) desc = sc->trans_ring + tmp; desc->md->md1 |= ENP; if (len > 0) desc->md->md2 -= len; /* * Set OWN bits in reverse order, otherwise the Lance * could start sending the packet before all the * buffers have been relinquished by the host. */ while (tmp != sc->next_to_send) { desc->md->md1 |= OWN; DEC_MD_PTR(tmp, sc->ntdre) desc = sc->trans_ring + tmp; } sc->next_to_send = end_of_packet; desc->md->md1 |= STP | OWN; } else { sc->pending_transmits++; desc = sc->trans_ring + sc->next_to_send; len = mbuf_to_buffer(head, desc->buff.data); desc->md->md3 = 0; desc->md->md2 = -max(len, ETHER_MIN_LEN - ETHER_CRC_LEN); desc->md->md1 |= OWN | STP | ENP; INC_MD_PTR(sc->next_to_send, sc->ntdre) } /* Force an immediate poll of the transmit ring */ lnc_outw(sc->rdp, TDMD | INEA); /* * Set a timer so if the buggy Am7990.h shuts * down we can wake it up. */ ifp->if_timer = 2; BPF_MTAP(sc->ifp, head); if (sc->nic.mem_mode != DMA_MBUF) m_freem(head); } while (sc->pending_transmits < NDESC(sc->ntdre)); /* * Transmit ring is full so set IFF_OACTIVE * since we can't buffer any more packets. */ sc->ifp->if_flags |= IFF_OACTIVE; LNCSTATS(trans_ring_full) } static int lnc_ioctl(struct ifnet * ifp, u_long command, caddr_t data) { struct lnc_softc *sc = ifp->if_softc; int s, error = 0; s = splimp(); switch (command) { case SIOCSIFFLAGS: #ifdef DEBUG if (ifp->if_flags & IFF_DEBUG) sc->lnc_debug = 1; else sc->lnc_debug = 0; #endif if (ifp->if_flags & IFF_PROMISC) { if (!(sc->nic.mode & PROM)) { sc->nic.mode |= PROM; lnc_init(sc); } } else if (sc->nic.mode & PROM) { sc->nic.mode &= ~PROM; lnc_init(sc); } if ((ifp->if_flags & IFF_ALLMULTI) && !(sc->flags & LNC_ALLMULTI)) { sc->flags |= LNC_ALLMULTI; lnc_init(sc); } else if (!(ifp->if_flags & IFF_ALLMULTI) && (sc->flags & LNC_ALLMULTI)) { sc->flags &= ~LNC_ALLMULTI; lnc_init(sc); } if ((ifp->if_flags & IFF_UP) == 0 && (ifp->if_flags & IFF_RUNNING) != 0) { /* * If interface is marked down and it is running, * then stop it. */ lnc_stop(sc); ifp->if_flags &= ~IFF_RUNNING; } else if ((ifp->if_flags & IFF_UP) != 0 && (ifp->if_flags & IFF_RUNNING) == 0) { /* * If interface is marked up and it is stopped, then * start it. */ lnc_init(sc); } break; case SIOCADDMULTI: case SIOCDELMULTI: lnc_init(sc); error = 0; break; default: error = ether_ioctl(ifp, command, data); break; } (void) splx(s); return error; } static void lnc_watchdog(struct ifnet *ifp) { log(LOG_ERR, "%s: Device timeout -- Resetting\n", ifp->if_xname); ifp->if_oerrors++; lnc_reset(ifp->if_softc); } #ifdef DEBUG void lnc_dump_state(struct lnc_softc *sc) { int i; printf("\nDriver/NIC [%s] state dump\n", sc->ifp->if_xname); printf("Memory access mode: %b\n", sc->nic.mem_mode, MEM_MODES); printf("Host memory\n"); printf("-----------\n"); printf("Receive ring: base = %p, next = %p\n", (void *)sc->recv_ring, (void *)(sc->recv_ring + sc->recv_next)); for (i = 0; i < NDESC(sc->nrdre); i++) printf("\t%d:%p md = %p buff = %p\n", i, (void *)(sc->recv_ring + i), (void *)(sc->recv_ring + i)->md, (void *)(sc->recv_ring + i)->buff.data); printf("Transmit ring: base = %p, next = %p\n", (void *)sc->trans_ring, (void *)(sc->trans_ring + sc->trans_next)); for (i = 0; i < NDESC(sc->ntdre); i++) printf("\t%d:%p md = %p buff = %p\n", i, (void *)(sc->trans_ring + i), (void *)(sc->trans_ring + i)->md, (void *)(sc->trans_ring + i)->buff.data); printf("Lance memory (may be on host(DMA) or card(SHMEM))\n"); printf("Init block = %p\n", (void *)sc->init_block); printf("\tmode = %b rlen:rdra = %x:%x tlen:tdra = %x:%x\n", sc->init_block->mode, INIT_MODE, sc->init_block->rlen, sc->init_block->rdra, sc->init_block->tlen, sc->init_block->tdra); printf("Receive descriptor ring\n"); for (i = 0; i < NDESC(sc->nrdre); i++) printf("\t%d buffer = 0x%x%x, BCNT = %d,\tMCNT = %u,\tflags = %b\n", i, ((sc->recv_ring + i)->md->md1 & HADR), (sc->recv_ring + i)->md->md0, -(short) (sc->recv_ring + i)->md->md2, (sc->recv_ring + i)->md->md3, (((sc->recv_ring + i)->md->md1 & ~HADR) >> 8), RECV_MD1); printf("Transmit descriptor ring\n"); for (i = 0; i < NDESC(sc->ntdre); i++) printf("\t%d buffer = 0x%x%x, BCNT = %d,\tflags = %b %b\n", i, ((sc->trans_ring + i)->md->md1 & HADR), (sc->trans_ring + i)->md->md0, -(short) (sc->trans_ring + i)->md->md2, ((sc->trans_ring + i)->md->md1 >> 8), TRANS_MD1, ((sc->trans_ring + i)->md->md3 >> 10), TRANS_MD3); printf("\nnext_to_send = %x\n", sc->next_to_send); printf("\n CSR0 = %b CSR1 = %x CSR2 = %x CSR3 = %x\n\n", read_csr(sc, CSR0), CSR0_FLAGS, read_csr(sc, CSR1), read_csr(sc, CSR2), read_csr(sc, CSR3)); /* Set RAP back to CSR0 */ lnc_outw(sc->rap, CSR0); } void mbuf_dump_chain(struct mbuf * m) { #define MBUF_FLAGS \ "\20\1M_EXT\2M_PKTHDR\3M_EOR\4UNKNOWN\5M_BCAST\6M_MCAST" if (!m) log(LOG_DEBUG, "m == NULL\n"); do { log(LOG_DEBUG, "m = %p\n", (void *)m); log(LOG_DEBUG, "m_hdr.mh_next = %p\n", (void *)m->m_hdr.mh_next); log(LOG_DEBUG, "m_hdr.mh_nextpkt = %p\n", (void *)m->m_hdr.mh_nextpkt); log(LOG_DEBUG, "m_hdr.mh_len = %d\n", m->m_hdr.mh_len); log(LOG_DEBUG, "m_hdr.mh_data = %p\n", (void *)m->m_hdr.mh_data); log(LOG_DEBUG, "m_hdr.mh_type = %d\n", m->m_hdr.mh_type); log(LOG_DEBUG, "m_hdr.mh_flags = %b\n", m->m_hdr.mh_flags, MBUF_FLAGS); if (!(m->m_hdr.mh_flags & (M_PKTHDR | M_EXT))) log(LOG_DEBUG, "M_dat.M_databuf = %p\n", (void *)m->M_dat.M_databuf); else { if (m->m_hdr.mh_flags & M_PKTHDR) { log(LOG_DEBUG, "M_dat.MH.MH_pkthdr.len = %d\n", m->M_dat.MH.MH_pkthdr.len); log(LOG_DEBUG, "M_dat.MH.MH_pkthdr.rcvif = %p\n", (void *)m->M_dat.MH.MH_pkthdr.rcvif); if (!(m->m_hdr.mh_flags & M_EXT)) log(LOG_DEBUG, "M_dat.MH.MH_dat.MH_databuf = %p\n", (void *)m->M_dat.MH.MH_dat.MH_databuf); } if (m->m_hdr.mh_flags & M_EXT) { log(LOG_DEBUG, "M_dat.MH.MH_dat.MH_ext.ext_buff %p\n", (void *)m->M_dat.MH.MH_dat.MH_ext.ext_buf); log(LOG_DEBUG, "M_dat.MH.MH_dat.MH_ext.ext_free %p\n", (void *)m->M_dat.MH.MH_dat.MH_ext.ext_free); log(LOG_DEBUG, "M_dat.MH.MH_dat.MH_ext.ext_size %d\n", m->M_dat.MH.MH_dat.MH_ext.ext_size); } } } while ((m = m->m_next) != NULL); } #endif Index: stable/6/sys/dev/my/if_my.c =================================================================== --- stable/6/sys/dev/my/if_my.c (revision 149421) +++ stable/6/sys/dev/my/if_my.c (revision 149422) @@ -1,1840 +1,1842 @@ /*- * Written by: yen_cw@myson.com.tw * Copyright (c) 2002 Myson Technology Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Myson fast ethernet PCI NIC driver, available at: http://www.myson.com.tw/ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #define NBPFILTER 1 #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include /* for DELAY */ #include #include #include #include #include #include #include #include #include "miibus_if.h" /* * #define MY_USEIOSPACE */ static int MY_USEIOSPACE = 1; #if (MY_USEIOSPACE) #define MY_RES SYS_RES_IOPORT #define MY_RID MY_PCI_LOIO #else #define MY_RES SYS_RES_MEMORY #define MY_RID MY_PCI_LOMEM #endif #include #ifndef lint static const char rcsid[] = "$Id: if_my.c,v 1.16 2003/04/15 06:37:25 mdodd Exp $"; #endif /* * Various supported device vendors/types and their names. */ struct my_type *my_info_tmp; static struct my_type my_devs[] = { {MYSONVENDORID, MTD800ID, "Myson MTD80X Based Fast Ethernet Card"}, {MYSONVENDORID, MTD803ID, "Myson MTD80X Based Fast Ethernet Card"}, {MYSONVENDORID, MTD891ID, "Myson MTD89X Based Giga Ethernet Card"}, {0, 0, NULL} }; /* * Various supported PHY vendors/types and their names. Note that this driver * will work with pretty much any MII-compliant PHY, so failure to positively * identify the chip is not a fatal error. */ static struct my_type my_phys[] = { {MysonPHYID0, MysonPHYID0, ""}, {SeeqPHYID0, SeeqPHYID0, ""}, {AhdocPHYID0, AhdocPHYID0, ""}, {MarvellPHYID0, MarvellPHYID0, ""}, {LevelOnePHYID0, LevelOnePHYID0, ""}, {0, 0, ""} }; static int my_probe(device_t); static int my_attach(device_t); static int my_detach(device_t); static int my_newbuf(struct my_softc *, struct my_chain_onefrag *); static int my_encap(struct my_softc *, struct my_chain *, struct mbuf *); static void my_rxeof(struct my_softc *); static void my_txeof(struct my_softc *); static void my_txeoc(struct my_softc *); static void my_intr(void *); static void my_start(struct ifnet *); static int my_ioctl(struct ifnet *, u_long, caddr_t); static void my_init(void *); static void my_stop(struct my_softc *); static void my_watchdog(struct ifnet *); static void my_shutdown(device_t); static int my_ifmedia_upd(struct ifnet *); static void my_ifmedia_sts(struct ifnet *, struct ifmediareq *); static u_int16_t my_phy_readreg(struct my_softc *, int); static void my_phy_writereg(struct my_softc *, int, int); static void my_autoneg_xmit(struct my_softc *); static void my_autoneg_mii(struct my_softc *, int, int); static void my_setmode_mii(struct my_softc *, int); static void my_getmode_mii(struct my_softc *); static void my_setcfg(struct my_softc *, int); static void my_setmulti(struct my_softc *); static void my_reset(struct my_softc *); static int my_list_rx_init(struct my_softc *); static int my_list_tx_init(struct my_softc *); static long my_send_cmd_to_phy(struct my_softc *, int, int); #define MY_SETBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) #define MY_CLRBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) static device_method_t my_methods[] = { /* Device interface */ DEVMETHOD(device_probe, my_probe), DEVMETHOD(device_attach, my_attach), DEVMETHOD(device_detach, my_detach), DEVMETHOD(device_shutdown, my_shutdown), {0, 0} }; static driver_t my_driver = { "my", my_methods, sizeof(struct my_softc) }; static devclass_t my_devclass; DRIVER_MODULE(my, pci, my_driver, my_devclass, 0, 0); MODULE_DEPEND(my, pci, 1, 1, 1); MODULE_DEPEND(my, ether, 1, 1, 1); static long my_send_cmd_to_phy(struct my_softc * sc, int opcode, int regad) { long miir; int i; int mask, data; MY_LOCK(sc); /* enable MII output */ miir = CSR_READ_4(sc, MY_MANAGEMENT); miir &= 0xfffffff0; miir |= MY_MASK_MIIR_MII_WRITE + MY_MASK_MIIR_MII_MDO; /* send 32 1's preamble */ for (i = 0; i < 32; i++) { /* low MDC; MDO is already high (miir) */ miir &= ~MY_MASK_MIIR_MII_MDC; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); /* high MDC */ miir |= MY_MASK_MIIR_MII_MDC; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); } /* calculate ST+OP+PHYAD+REGAD+TA */ data = opcode | (sc->my_phy_addr << 7) | (regad << 2); /* sent out */ mask = 0x8000; while (mask) { /* low MDC, prepare MDO */ miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO); if (mask & data) miir |= MY_MASK_MIIR_MII_MDO; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); /* high MDC */ miir |= MY_MASK_MIIR_MII_MDC; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); DELAY(30); /* next */ mask >>= 1; if (mask == 0x2 && opcode == MY_OP_READ) miir &= ~MY_MASK_MIIR_MII_WRITE; } MY_UNLOCK(sc); return miir; } static u_int16_t my_phy_readreg(struct my_softc * sc, int reg) { long miir; int mask, data; MY_LOCK(sc); if (sc->my_info->my_did == MTD803ID) data = CSR_READ_2(sc, MY_PHYBASE + reg * 2); else { miir = my_send_cmd_to_phy(sc, MY_OP_READ, reg); /* read data */ mask = 0x8000; data = 0; while (mask) { /* low MDC */ miir &= ~MY_MASK_MIIR_MII_MDC; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); /* read MDI */ miir = CSR_READ_4(sc, MY_MANAGEMENT); if (miir & MY_MASK_MIIR_MII_MDI) data |= mask; /* high MDC, and wait */ miir |= MY_MASK_MIIR_MII_MDC; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); DELAY(30); /* next */ mask >>= 1; } /* low MDC */ miir &= ~MY_MASK_MIIR_MII_MDC; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); } MY_UNLOCK(sc); return (u_int16_t) data; } static void my_phy_writereg(struct my_softc * sc, int reg, int data) { long miir; int mask; MY_LOCK(sc); if (sc->my_info->my_did == MTD803ID) CSR_WRITE_2(sc, MY_PHYBASE + reg * 2, data); else { miir = my_send_cmd_to_phy(sc, MY_OP_WRITE, reg); /* write data */ mask = 0x8000; while (mask) { /* low MDC, prepare MDO */ miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO); if (mask & data) miir |= MY_MASK_MIIR_MII_MDO; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); DELAY(1); /* high MDC */ miir |= MY_MASK_MIIR_MII_MDC; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); DELAY(1); /* next */ mask >>= 1; } /* low MDC */ miir &= ~MY_MASK_MIIR_MII_MDC; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); } MY_UNLOCK(sc); return; } /* * Program the 64-bit multicast hash filter. */ static void my_setmulti(struct my_softc * sc) { struct ifnet *ifp; int h = 0; u_int32_t hashes[2] = {0, 0}; struct ifmultiaddr *ifma; u_int32_t rxfilt; int mcnt = 0; MY_LOCK(sc); ifp = sc->my_ifp; rxfilt = CSR_READ_4(sc, MY_TCRRCR); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { rxfilt |= MY_AM; CSR_WRITE_4(sc, MY_TCRRCR, rxfilt); CSR_WRITE_4(sc, MY_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, MY_MAR1, 0xFFFFFFFF); MY_UNLOCK(sc); return; } /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, MY_MAR0, 0); CSR_WRITE_4(sc, MY_MAR1, 0); /* now program new ones */ + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ~ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); mcnt++; } + IF_ADDR_UNLOCK(ifp); if (mcnt) rxfilt |= MY_AM; else rxfilt &= ~MY_AM; CSR_WRITE_4(sc, MY_MAR0, hashes[0]); CSR_WRITE_4(sc, MY_MAR1, hashes[1]); CSR_WRITE_4(sc, MY_TCRRCR, rxfilt); MY_UNLOCK(sc); return; } /* * Initiate an autonegotiation session. */ static void my_autoneg_xmit(struct my_softc * sc) { u_int16_t phy_sts = 0; MY_LOCK(sc); my_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET); DELAY(500); while (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_RESET); phy_sts = my_phy_readreg(sc, PHY_BMCR); phy_sts |= PHY_BMCR_AUTONEGENBL | PHY_BMCR_AUTONEGRSTR; my_phy_writereg(sc, PHY_BMCR, phy_sts); MY_UNLOCK(sc); return; } /* * Invoke autonegotiation on a PHY. */ static void my_autoneg_mii(struct my_softc * sc, int flag, int verbose) { u_int16_t phy_sts = 0, media, advert, ability; u_int16_t ability2 = 0; struct ifnet *ifp; struct ifmedia *ifm; MY_LOCK(sc); ifm = &sc->ifmedia; ifp = sc->my_ifp; ifm->ifm_media = IFM_ETHER | IFM_AUTO; #ifndef FORCE_AUTONEG_TFOUR /* * First, see if autoneg is supported. If not, there's no point in * continuing. */ phy_sts = my_phy_readreg(sc, PHY_BMSR); if (!(phy_sts & PHY_BMSR_CANAUTONEG)) { if (verbose) printf("my%d: autonegotiation not supported\n", sc->my_unit); ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX; MY_UNLOCK(sc); return; } #endif switch (flag) { case MY_FLAG_FORCEDELAY: /* * XXX Never use this option anywhere but in the probe * routine: making the kernel stop dead in its tracks for * three whole seconds after we've gone multi-user is really * bad manners. */ my_autoneg_xmit(sc); DELAY(5000000); break; case MY_FLAG_SCHEDDELAY: /* * Wait for the transmitter to go idle before starting an * autoneg session, otherwise my_start() may clobber our * timeout, and we don't want to allow transmission during an * autoneg session since that can screw it up. */ if (sc->my_cdata.my_tx_head != NULL) { sc->my_want_auto = 1; MY_UNLOCK(sc); return; } my_autoneg_xmit(sc); ifp->if_timer = 5; sc->my_autoneg = 1; sc->my_want_auto = 0; MY_UNLOCK(sc); return; case MY_FLAG_DELAYTIMEO: ifp->if_timer = 0; sc->my_autoneg = 0; break; default: printf("my%d: invalid autoneg flag: %d\n", sc->my_unit, flag); MY_UNLOCK(sc); return; } if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) { if (verbose) printf("my%d: autoneg complete, ", sc->my_unit); phy_sts = my_phy_readreg(sc, PHY_BMSR); } else { if (verbose) printf("my%d: autoneg not complete, ", sc->my_unit); } media = my_phy_readreg(sc, PHY_BMCR); /* Link is good. Report modes and set duplex mode. */ if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) { if (verbose) printf("my%d: link status good. ", sc->my_unit); advert = my_phy_readreg(sc, PHY_ANAR); ability = my_phy_readreg(sc, PHY_LPAR); if ((sc->my_pinfo->my_vid == MarvellPHYID0) || (sc->my_pinfo->my_vid == LevelOnePHYID0)) { ability2 = my_phy_readreg(sc, PHY_1000SR); if (ability2 & PHY_1000SR_1000BTXFULL) { advert = 0; ability = 0; /* * this version did not support 1000M, * ifm->ifm_media = * IFM_ETHER|IFM_1000_T|IFM_FDX; */ ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX; media &= ~PHY_BMCR_SPEEDSEL; media |= PHY_BMCR_1000; media |= PHY_BMCR_DUPLEX; printf("(full-duplex, 1000Mbps)\n"); } else if (ability2 & PHY_1000SR_1000BTXHALF) { advert = 0; ability = 0; /* * this version did not support 1000M, * ifm->ifm_media = IFM_ETHER|IFM_1000_T; */ ifm->ifm_media = IFM_ETHER | IFM_100_TX; media &= ~PHY_BMCR_SPEEDSEL; media &= ~PHY_BMCR_DUPLEX; media |= PHY_BMCR_1000; printf("(half-duplex, 1000Mbps)\n"); } } if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) { ifm->ifm_media = IFM_ETHER | IFM_100_T4; media |= PHY_BMCR_SPEEDSEL; media &= ~PHY_BMCR_DUPLEX; printf("(100baseT4)\n"); } else if (advert & PHY_ANAR_100BTXFULL && ability & PHY_ANAR_100BTXFULL) { ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX; media |= PHY_BMCR_SPEEDSEL; media |= PHY_BMCR_DUPLEX; printf("(full-duplex, 100Mbps)\n"); } else if (advert & PHY_ANAR_100BTXHALF && ability & PHY_ANAR_100BTXHALF) { ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX; media |= PHY_BMCR_SPEEDSEL; media &= ~PHY_BMCR_DUPLEX; printf("(half-duplex, 100Mbps)\n"); } else if (advert & PHY_ANAR_10BTFULL && ability & PHY_ANAR_10BTFULL) { ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX; media &= ~PHY_BMCR_SPEEDSEL; media |= PHY_BMCR_DUPLEX; printf("(full-duplex, 10Mbps)\n"); } else if (advert) { ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX; media &= ~PHY_BMCR_SPEEDSEL; media &= ~PHY_BMCR_DUPLEX; printf("(half-duplex, 10Mbps)\n"); } media &= ~PHY_BMCR_AUTONEGENBL; /* Set ASIC's duplex mode to match the PHY. */ my_phy_writereg(sc, PHY_BMCR, media); my_setcfg(sc, media); } else { if (verbose) printf("my%d: no carrier\n", sc->my_unit); } my_init(sc); if (sc->my_tx_pend) { sc->my_autoneg = 0; sc->my_tx_pend = 0; my_start(ifp); } MY_UNLOCK(sc); return; } /* * To get PHY ability. */ static void my_getmode_mii(struct my_softc * sc) { u_int16_t bmsr; struct ifnet *ifp; MY_LOCK(sc); ifp = sc->my_ifp; bmsr = my_phy_readreg(sc, PHY_BMSR); if (bootverbose) printf("my%d: PHY status word: %x\n", sc->my_unit, bmsr); /* fallback */ sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX; if (bmsr & PHY_BMSR_10BTHALF) { if (bootverbose) printf("my%d: 10Mbps half-duplex mode supported\n", sc->my_unit); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T, 0, NULL); } if (bmsr & PHY_BMSR_10BTFULL) { if (bootverbose) printf("my%d: 10Mbps full-duplex mode supported\n", sc->my_unit); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX; } if (bmsr & PHY_BMSR_100BTXHALF) { if (bootverbose) printf("my%d: 100Mbps half-duplex mode supported\n", sc->my_unit); ifp->if_baudrate = 100000000; ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX, 0, NULL); sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX; } if (bmsr & PHY_BMSR_100BTXFULL) { if (bootverbose) printf("my%d: 100Mbps full-duplex mode supported\n", sc->my_unit); ifp->if_baudrate = 100000000; ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX; } /* Some also support 100BaseT4. */ if (bmsr & PHY_BMSR_100BT4) { if (bootverbose) printf("my%d: 100baseT4 mode supported\n", sc->my_unit); ifp->if_baudrate = 100000000; ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_T4, 0, NULL); sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_T4; #ifdef FORCE_AUTONEG_TFOUR if (bootverbose) printf("my%d: forcing on autoneg support for BT4\n", sc->my_unit); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0 NULL): sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO; #endif } #if 0 /* this version did not support 1000M, */ if (sc->my_pinfo->my_vid == MarvellPHYID0) { if (bootverbose) printf("my%d: 1000Mbps half-duplex mode supported\n", sc->my_unit); ifp->if_baudrate = 1000000000; ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_HDX, 0, NULL); if (bootverbose) printf("my%d: 1000Mbps full-duplex mode supported\n", sc->my_unit); ifp->if_baudrate = 1000000000; ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); sc->ifmedia.ifm_media = IFM_ETHER | IFM_1000_T | IFM_FDX; } #endif if (bmsr & PHY_BMSR_CANAUTONEG) { if (bootverbose) printf("my%d: autoneg supported\n", sc->my_unit); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO; } MY_UNLOCK(sc); return; } /* * Set speed and duplex mode. */ static void my_setmode_mii(struct my_softc * sc, int media) { u_int16_t bmcr; struct ifnet *ifp; MY_LOCK(sc); ifp = sc->my_ifp; /* * If an autoneg session is in progress, stop it. */ if (sc->my_autoneg) { printf("my%d: canceling autoneg session\n", sc->my_unit); ifp->if_timer = sc->my_autoneg = sc->my_want_auto = 0; bmcr = my_phy_readreg(sc, PHY_BMCR); bmcr &= ~PHY_BMCR_AUTONEGENBL; my_phy_writereg(sc, PHY_BMCR, bmcr); } printf("my%d: selecting MII, ", sc->my_unit); bmcr = my_phy_readreg(sc, PHY_BMCR); bmcr &= ~(PHY_BMCR_AUTONEGENBL | PHY_BMCR_SPEEDSEL | PHY_BMCR_1000 | PHY_BMCR_DUPLEX | PHY_BMCR_LOOPBK); #if 0 /* this version did not support 1000M, */ if (IFM_SUBTYPE(media) == IFM_1000_T) { printf("1000Mbps/T4, half-duplex\n"); bmcr &= ~PHY_BMCR_SPEEDSEL; bmcr &= ~PHY_BMCR_DUPLEX; bmcr |= PHY_BMCR_1000; } #endif if (IFM_SUBTYPE(media) == IFM_100_T4) { printf("100Mbps/T4, half-duplex\n"); bmcr |= PHY_BMCR_SPEEDSEL; bmcr &= ~PHY_BMCR_DUPLEX; } if (IFM_SUBTYPE(media) == IFM_100_TX) { printf("100Mbps, "); bmcr |= PHY_BMCR_SPEEDSEL; } if (IFM_SUBTYPE(media) == IFM_10_T) { printf("10Mbps, "); bmcr &= ~PHY_BMCR_SPEEDSEL; } if ((media & IFM_GMASK) == IFM_FDX) { printf("full duplex\n"); bmcr |= PHY_BMCR_DUPLEX; } else { printf("half duplex\n"); bmcr &= ~PHY_BMCR_DUPLEX; } my_phy_writereg(sc, PHY_BMCR, bmcr); my_setcfg(sc, bmcr); MY_UNLOCK(sc); return; } /* * The Myson manual states that in order to fiddle with the 'full-duplex' and * '100Mbps' bits in the netconfig register, we first have to put the * transmit and/or receive logic in the idle state. */ static void my_setcfg(struct my_softc * sc, int bmcr) { int i, restart = 0; MY_LOCK(sc); if (CSR_READ_4(sc, MY_TCRRCR) & (MY_TE | MY_RE)) { restart = 1; MY_CLRBIT(sc, MY_TCRRCR, (MY_TE | MY_RE)); for (i = 0; i < MY_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_4(sc, MY_TCRRCR) & (MY_TXRUN | MY_RXRUN))) break; } if (i == MY_TIMEOUT) printf("my%d: failed to force tx and rx to idle \n", sc->my_unit); } MY_CLRBIT(sc, MY_TCRRCR, MY_PS1000); MY_CLRBIT(sc, MY_TCRRCR, MY_PS10); if (bmcr & PHY_BMCR_1000) MY_SETBIT(sc, MY_TCRRCR, MY_PS1000); else if (!(bmcr & PHY_BMCR_SPEEDSEL)) MY_SETBIT(sc, MY_TCRRCR, MY_PS10); if (bmcr & PHY_BMCR_DUPLEX) MY_SETBIT(sc, MY_TCRRCR, MY_FD); else MY_CLRBIT(sc, MY_TCRRCR, MY_FD); if (restart) MY_SETBIT(sc, MY_TCRRCR, MY_TE | MY_RE); MY_UNLOCK(sc); return; } static void my_reset(struct my_softc * sc) { register int i; MY_LOCK(sc); MY_SETBIT(sc, MY_BCR, MY_SWR); for (i = 0; i < MY_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_4(sc, MY_BCR) & MY_SWR)) break; } if (i == MY_TIMEOUT) printf("m0x%d: reset never completed!\n", sc->my_unit); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); MY_UNLOCK(sc); return; } /* * Probe for a Myson chip. Check the PCI vendor and device IDs against our * list and return a device name if we find a match. */ static int my_probe(device_t dev) { struct my_type *t; t = my_devs; while (t->my_name != NULL) { if ((pci_get_vendor(dev) == t->my_vid) && (pci_get_device(dev) == t->my_did)) { device_set_desc(dev, t->my_name); my_info_tmp = t; return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia setup and * ethernet/BPF attach. */ static int my_attach(device_t dev) { int s, i; u_char eaddr[ETHER_ADDR_LEN]; u_int32_t command, iobase; struct my_softc *sc; struct ifnet *ifp; int media = IFM_ETHER | IFM_100_TX | IFM_FDX; unsigned int round; caddr_t roundptr; struct my_type *p; u_int16_t phy_vid, phy_did, phy_sts = 0; int rid, unit, error = 0; s = splimp(); sc = device_get_softc(dev); unit = device_get_unit(dev); bzero(sc, sizeof(struct my_softc)); mtx_init(&sc->my_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); MY_LOCK(sc); /* * Map control/status registers. */ #if 0 command = pci_read_config(dev, PCI_COMMAND_STATUS_REG, 4); command |= (PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCI_COMMAND_STATUS_REG, command & 0x000000ff, 4); command = pci_read_config(dev, PCI_COMMAND_STATUS_REG, 4); #endif command = pci_read_config(dev, PCIR_COMMAND, 4); command |= (PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); pci_write_config(dev, PCIR_COMMAND, command & 0x000000ff, 4); command = pci_read_config(dev, PCIR_COMMAND, 4); if (my_info_tmp->my_did == MTD800ID) { iobase = pci_read_config(dev, MY_PCI_LOIO, 4); if (iobase & 0x300) MY_USEIOSPACE = 0; } if (MY_USEIOSPACE) { if (!(command & PCIM_CMD_PORTEN)) { printf("my%d: failed to enable I/O ports!\n", unit); free(sc, M_DEVBUF); error = ENXIO; goto fail; } #if 0 if (!pci_map_port(config_id, MY_PCI_LOIO, (u_int16_t *) & (sc->my_bhandle))) { printf("my%d: couldn't map ports\n", unit); error = ENXIO; goto fail; } sc->my_btag = I386_BUS_SPACE_IO; #endif } else { if (!(command & PCIM_CMD_MEMEN)) { printf("my%d: failed to enable memory mapping!\n", unit); error = ENXIO; goto fail; } #if 0 if (!pci_map_mem(config_id, MY_PCI_LOMEM, &vbase, &pbase)) { printf ("my%d: couldn't map memory\n", unit); error = ENXIO; goto fail; } sc->my_btag = I386_BUS_SPACE_MEM; sc->my_bhandle = vbase; #endif } rid = MY_RID; sc->my_res = bus_alloc_resource_any(dev, MY_RES, &rid, RF_ACTIVE); if (sc->my_res == NULL) { printf("my%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->my_btag = rman_get_bustag(sc->my_res); sc->my_bhandle = rman_get_bushandle(sc->my_res); rid = 0; sc->my_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->my_irq == NULL) { printf("my%d: couldn't map interrupt\n", unit); bus_release_resource(dev, MY_RES, MY_RID, sc->my_res); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->my_irq, INTR_TYPE_NET, my_intr, sc, &sc->my_intrhand); if (error) { bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq); bus_release_resource(dev, MY_RES, MY_RID, sc->my_res); printf("my%d: couldn't set up irq\n", unit); goto fail; } callout_handle_init(&sc->my_stat_ch); sc->my_info = my_info_tmp; /* Reset the adapter. */ my_reset(sc); /* * Get station address */ for (i = 0; i < ETHER_ADDR_LEN; ++i) eaddr[i] = CSR_READ_1(sc, MY_PAR0 + i); sc->my_unit = unit; sc->my_ldata_ptr = malloc(sizeof(struct my_list_data) + 8, M_DEVBUF, M_NOWAIT); if (sc->my_ldata_ptr == NULL) { free(sc, M_DEVBUF); printf("my%d: no memory for list buffers!\n", unit); error = ENXIO; goto fail; } sc->my_ldata = (struct my_list_data *) sc->my_ldata_ptr; round = (uintptr_t)sc->my_ldata_ptr & 0xF; roundptr = sc->my_ldata_ptr; for (i = 0; i < 8; i++) { if (round % 8) { round++; roundptr++; } else break; } sc->my_ldata = (struct my_list_data *) roundptr; bzero(sc->my_ldata, sizeof(struct my_list_data)); ifp = sc->my_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = my_ioctl; ifp->if_start = my_start; ifp->if_watchdog = my_watchdog; ifp->if_init = my_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; if (sc->my_info->my_did == MTD803ID) sc->my_pinfo = my_phys; else { if (bootverbose) printf("my%d: probing for a PHY\n", sc->my_unit); for (i = MY_PHYADDR_MIN; i < MY_PHYADDR_MAX + 1; i++) { if (bootverbose) printf("my%d: checking address: %d\n", sc->my_unit, i); sc->my_phy_addr = i; phy_sts = my_phy_readreg(sc, PHY_BMSR); if ((phy_sts != 0) && (phy_sts != 0xffff)) break; else phy_sts = 0; } if (phy_sts) { phy_vid = my_phy_readreg(sc, PHY_VENID); phy_did = my_phy_readreg(sc, PHY_DEVID); if (bootverbose) { printf("my%d: found PHY at address %d, ", sc->my_unit, sc->my_phy_addr); printf("vendor id: %x device id: %x\n", phy_vid, phy_did); } p = my_phys; while (p->my_vid) { if (phy_vid == p->my_vid) { sc->my_pinfo = p; break; } p++; } if (sc->my_pinfo == NULL) sc->my_pinfo = &my_phys[PHY_UNKNOWN]; if (bootverbose) printf("my%d: PHY type: %s\n", sc->my_unit, sc->my_pinfo->my_name); } else { printf("my%d: MII without any phy!\n", sc->my_unit); error = ENXIO; goto fail; } } /* Do ifmedia setup. */ ifmedia_init(&sc->ifmedia, 0, my_ifmedia_upd, my_ifmedia_sts); my_getmode_mii(sc); my_autoneg_mii(sc, MY_FLAG_FORCEDELAY, 1); media = sc->ifmedia.ifm_media; my_stop(sc); ifmedia_set(&sc->ifmedia, media); ether_ifattach(ifp, eaddr); #if 0 at_shutdown(my_shutdown, sc, SHUTDOWN_POST_SYNC); shutdownhook_establish(my_shutdown, sc); #endif MY_UNLOCK(sc); return (0); fail: MY_UNLOCK(sc); mtx_destroy(&sc->my_mtx); if (sc->my_ldata_ptr != NULL) free(sc->my_ldata_ptr, M_DEVBUF); splx(s); return (error); } static int my_detach(device_t dev) { struct my_softc *sc; struct ifnet *ifp; int s; s = splimp(); sc = device_get_softc(dev); MY_LOCK(sc); ifp = sc->my_ifp; ether_ifdetach(ifp); if_free(ifp); my_stop(sc); #if 0 bus_generic_detach(dev); device_delete_child(dev, sc->rl_miibus); #endif bus_teardown_intr(dev, sc->my_irq, sc->my_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq); bus_release_resource(dev, MY_RES, MY_RID, sc->my_res); #if 0 contigfree(sc->my_cdata.my_rx_buf, MY_RXBUFLEN + 32, M_DEVBUF); #endif free(sc, M_DEVBUF); MY_UNLOCK(sc); splx(s); mtx_destroy(&sc->my_mtx); return (0); } /* * Initialize the transmit descriptors. */ static int my_list_tx_init(struct my_softc * sc) { struct my_chain_data *cd; struct my_list_data *ld; int i; MY_LOCK(sc); cd = &sc->my_cdata; ld = sc->my_ldata; for (i = 0; i < MY_TX_LIST_CNT; i++) { cd->my_tx_chain[i].my_ptr = &ld->my_tx_list[i]; if (i == (MY_TX_LIST_CNT - 1)) cd->my_tx_chain[i].my_nextdesc = &cd->my_tx_chain[0]; else cd->my_tx_chain[i].my_nextdesc = &cd->my_tx_chain[i + 1]; } cd->my_tx_free = &cd->my_tx_chain[0]; cd->my_tx_tail = cd->my_tx_head = NULL; MY_UNLOCK(sc); return (0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that we * arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int my_list_rx_init(struct my_softc * sc) { struct my_chain_data *cd; struct my_list_data *ld; int i; MY_LOCK(sc); cd = &sc->my_cdata; ld = sc->my_ldata; for (i = 0; i < MY_RX_LIST_CNT; i++) { cd->my_rx_chain[i].my_ptr = (struct my_desc *) & ld->my_rx_list[i]; if (my_newbuf(sc, &cd->my_rx_chain[i]) == ENOBUFS) { MY_UNLOCK(sc); return (ENOBUFS); } if (i == (MY_RX_LIST_CNT - 1)) { cd->my_rx_chain[i].my_nextdesc = &cd->my_rx_chain[0]; ld->my_rx_list[i].my_next = vtophys(&ld->my_rx_list[0]); } else { cd->my_rx_chain[i].my_nextdesc = &cd->my_rx_chain[i + 1]; ld->my_rx_list[i].my_next = vtophys(&ld->my_rx_list[i + 1]); } } cd->my_rx_head = &cd->my_rx_chain[0]; MY_UNLOCK(sc); return (0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int my_newbuf(struct my_softc * sc, struct my_chain_onefrag * c) { struct mbuf *m_new = NULL; MY_LOCK(sc); MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("my%d: no memory for rx list -- packet dropped!\n", sc->my_unit); MY_UNLOCK(sc); return (ENOBUFS); } MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { printf("my%d: no memory for rx list -- packet dropped!\n", sc->my_unit); m_freem(m_new); MY_UNLOCK(sc); return (ENOBUFS); } c->my_mbuf = m_new; c->my_ptr->my_data = vtophys(mtod(m_new, caddr_t)); c->my_ptr->my_ctl = (MCLBYTES - 1) << MY_RBSShift; c->my_ptr->my_status = MY_OWNByNIC; MY_UNLOCK(sc); return (0); } /* * A frame has been uploaded: pass the resulting mbuf chain up to the higher * level protocols. */ static void my_rxeof(struct my_softc * sc) { struct ether_header *eh; struct mbuf *m; struct ifnet *ifp; struct my_chain_onefrag *cur_rx; int total_len = 0; u_int32_t rxstat; MY_LOCK(sc); ifp = sc->my_ifp; while (!((rxstat = sc->my_cdata.my_rx_head->my_ptr->my_status) & MY_OWNByNIC)) { cur_rx = sc->my_cdata.my_rx_head; sc->my_cdata.my_rx_head = cur_rx->my_nextdesc; if (rxstat & MY_ES) { /* error summary: give up this rx pkt */ ifp->if_ierrors++; cur_rx->my_ptr->my_status = MY_OWNByNIC; continue; } /* No errors; receive the packet. */ total_len = (rxstat & MY_FLNGMASK) >> MY_FLNGShift; total_len -= ETHER_CRC_LEN; if (total_len < MINCLSIZE) { m = m_devget(mtod(cur_rx->my_mbuf, char *), total_len, 0, ifp, NULL); cur_rx->my_ptr->my_status = MY_OWNByNIC; if (m == NULL) { ifp->if_ierrors++; continue; } } else { m = cur_rx->my_mbuf; /* * Try to conjure up a new mbuf cluster. If that * fails, it means we have an out of memory condition * and should leave the buffer in place and continue. * This will result in a lost packet, but there's * little else we can do in this situation. */ if (my_newbuf(sc, cur_rx) == ENOBUFS) { ifp->if_ierrors++; cur_rx->my_ptr->my_status = MY_OWNByNIC; continue; } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; } ifp->if_ipackets++; eh = mtod(m, struct ether_header *); #if NBPFILTER > 0 /* * Handle BPF listeners. Let the BPF user see the packet, but * don't pass it up to the ether_input() layer unless it's a * broadcast packet, multicast packet, matches our ethernet * address or the interface is in promiscuous mode. */ if (ifp->if_bpf) { BPF_MTAP(ifp, m); if (ifp->if_flags & IFF_PROMISC && (bcmp(eh->ether_dhost, IFP2ENADDR(sc->my_ifp), ETHER_ADDR_LEN) && (eh->ether_dhost[0] & 1) == 0)) { m_freem(m); continue; } } #endif MY_UNLOCK(sc); (*ifp->if_input)(ifp, m); MY_LOCK(sc); } MY_UNLOCK(sc); return; } /* * A frame was downloaded to the chip. It's safe for us to clean up the list * buffers. */ static void my_txeof(struct my_softc * sc) { struct my_chain *cur_tx; struct ifnet *ifp; MY_LOCK(sc); ifp = sc->my_ifp; /* Clear the timeout timer. */ ifp->if_timer = 0; if (sc->my_cdata.my_tx_head == NULL) { MY_UNLOCK(sc); return; } /* * Go through our tx list and free mbufs for those frames that have * been transmitted. */ while (sc->my_cdata.my_tx_head->my_mbuf != NULL) { u_int32_t txstat; cur_tx = sc->my_cdata.my_tx_head; txstat = MY_TXSTATUS(cur_tx); if ((txstat & MY_OWNByNIC) || txstat == MY_UNSENT) break; if (!(CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced)) { if (txstat & MY_TXERR) { ifp->if_oerrors++; if (txstat & MY_EC) /* excessive collision */ ifp->if_collisions++; if (txstat & MY_LC) /* late collision */ ifp->if_collisions++; } ifp->if_collisions += (txstat & MY_NCRMASK) >> MY_NCRShift; } ifp->if_opackets++; m_freem(cur_tx->my_mbuf); cur_tx->my_mbuf = NULL; if (sc->my_cdata.my_tx_head == sc->my_cdata.my_tx_tail) { sc->my_cdata.my_tx_head = NULL; sc->my_cdata.my_tx_tail = NULL; break; } sc->my_cdata.my_tx_head = cur_tx->my_nextdesc; } if (CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced) { ifp->if_collisions += (CSR_READ_4(sc, MY_TSR) & MY_NCRMask); } MY_UNLOCK(sc); return; } /* * TX 'end of channel' interrupt handler. */ static void my_txeoc(struct my_softc * sc) { struct ifnet *ifp; MY_LOCK(sc); ifp = sc->my_ifp; ifp->if_timer = 0; if (sc->my_cdata.my_tx_head == NULL) { ifp->if_flags &= ~IFF_OACTIVE; sc->my_cdata.my_tx_tail = NULL; if (sc->my_want_auto) my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1); } else { if (MY_TXOWN(sc->my_cdata.my_tx_head) == MY_UNSENT) { MY_TXOWN(sc->my_cdata.my_tx_head) = MY_OWNByNIC; ifp->if_timer = 5; CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF); } } MY_UNLOCK(sc); return; } static void my_intr(void *arg) { struct my_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; MY_LOCK(sc); ifp = sc->my_ifp; if (!(ifp->if_flags & IFF_UP)) { MY_UNLOCK(sc); return; } /* Disable interrupts. */ CSR_WRITE_4(sc, MY_IMR, 0x00000000); for (;;) { status = CSR_READ_4(sc, MY_ISR); status &= MY_INTRS; if (status) CSR_WRITE_4(sc, MY_ISR, status); else break; if (status & MY_RI) /* receive interrupt */ my_rxeof(sc); if ((status & MY_RBU) || (status & MY_RxErr)) { /* rx buffer unavailable or rx error */ ifp->if_ierrors++; #ifdef foo my_stop(sc); my_reset(sc); my_init(sc); #endif } if (status & MY_TI) /* tx interrupt */ my_txeof(sc); if (status & MY_ETI) /* tx early interrupt */ my_txeof(sc); if (status & MY_TBU) /* tx buffer unavailable */ my_txeoc(sc); #if 0 /* 90/1/18 delete */ if (status & MY_FBE) { my_reset(sc); my_init(sc); } #endif } /* Re-enable interrupts. */ CSR_WRITE_4(sc, MY_IMR, MY_INTRS); if (ifp->if_snd.ifq_head != NULL) my_start(ifp); MY_UNLOCK(sc); return; } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int my_encap(struct my_softc * sc, struct my_chain * c, struct mbuf * m_head) { struct my_desc *f = NULL; int total_len; struct mbuf *m, *m_new = NULL; MY_LOCK(sc); /* calculate the total tx pkt length */ total_len = 0; for (m = m_head; m != NULL; m = m->m_next) total_len += m->m_len; /* * Start packing the mbufs in this chain into the fragment pointers. * Stop when we run out of fragments or hit the end of the mbuf * chain. */ m = m_head; MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("my%d: no memory for tx list", sc->my_unit); MY_UNLOCK(sc); return (1); } if (m_head->m_pkthdr.len > MHLEN) { MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); printf("my%d: no memory for tx list", sc->my_unit); MY_UNLOCK(sc); return (1); } } m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t)); m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; m_freem(m_head); m_head = m_new; f = &c->my_ptr->my_frag[0]; f->my_status = 0; f->my_data = vtophys(mtod(m_new, caddr_t)); total_len = m_new->m_len; f->my_ctl = MY_TXFD | MY_TXLD | MY_CRCEnable | MY_PADEnable; f->my_ctl |= total_len << MY_PKTShift; /* pkt size */ f->my_ctl |= total_len; /* buffer size */ /* 89/12/29 add, for mtd891 *//* [ 89? ] */ if (sc->my_info->my_did == MTD891ID) f->my_ctl |= MY_ETIControl | MY_RetryTxLC; c->my_mbuf = m_head; c->my_lastdesc = 0; MY_TXNEXT(c) = vtophys(&c->my_nextdesc->my_ptr->my_frag[0]); MY_UNLOCK(sc); return (0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void my_start(struct ifnet * ifp) { struct my_softc *sc; struct mbuf *m_head = NULL; struct my_chain *cur_tx = NULL, *start_tx; sc = ifp->if_softc; MY_LOCK(sc); if (sc->my_autoneg) { sc->my_tx_pend = 1; MY_UNLOCK(sc); return; } /* * Check for an available queue slot. If there are none, punt. */ if (sc->my_cdata.my_tx_free->my_mbuf != NULL) { ifp->if_flags |= IFF_OACTIVE; MY_UNLOCK(sc); return; } start_tx = sc->my_cdata.my_tx_free; while (sc->my_cdata.my_tx_free->my_mbuf == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* Pick a descriptor off the free list. */ cur_tx = sc->my_cdata.my_tx_free; sc->my_cdata.my_tx_free = cur_tx->my_nextdesc; /* Pack the data into the descriptor. */ my_encap(sc, cur_tx, m_head); if (cur_tx != start_tx) MY_TXOWN(cur_tx) = MY_OWNByNIC; #if NBPFILTER > 0 /* * If there's a BPF listener, bounce a copy of this frame to * him. */ BPF_MTAP(ifp, cur_tx->my_mbuf); #endif } /* * If there are no packets queued, bail. */ if (cur_tx == NULL) { MY_UNLOCK(sc); return; } /* * Place the request for the upload interrupt in the last descriptor * in the chain. This way, if we're chaining several packets at once, * we'll only get an interupt once for the whole chain rather than * once for each packet. */ MY_TXCTL(cur_tx) |= MY_TXIC; cur_tx->my_ptr->my_frag[0].my_ctl |= MY_TXIC; sc->my_cdata.my_tx_tail = cur_tx; if (sc->my_cdata.my_tx_head == NULL) sc->my_cdata.my_tx_head = start_tx; MY_TXOWN(start_tx) = MY_OWNByNIC; CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF); /* tx polling demand */ /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; MY_UNLOCK(sc); return; } static void my_init(void *xsc) { struct my_softc *sc = xsc; struct ifnet *ifp = sc->my_ifp; int s; u_int16_t phy_bmcr = 0; MY_LOCK(sc); if (sc->my_autoneg) { MY_UNLOCK(sc); return; } s = splimp(); if (sc->my_pinfo != NULL) phy_bmcr = my_phy_readreg(sc, PHY_BMCR); /* * Cancel pending I/O and free all RX/TX buffers. */ my_stop(sc); my_reset(sc); /* * Set cache alignment and burst length. */ #if 0 /* 89/9/1 modify, */ CSR_WRITE_4(sc, MY_BCR, MY_RPBLE512); CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF); #endif CSR_WRITE_4(sc, MY_BCR, MY_PBL8); CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF | MY_RBLEN | MY_RPBLE512); /* * 89/12/29 add, for mtd891, */ if (sc->my_info->my_did == MTD891ID) { MY_SETBIT(sc, MY_BCR, MY_PROG); MY_SETBIT(sc, MY_TCRRCR, MY_Enhanced); } my_setcfg(sc, phy_bmcr); /* Init circular RX list. */ if (my_list_rx_init(sc) == ENOBUFS) { printf("my%d: init failed: no memory for rx buffers\n", sc->my_unit); my_stop(sc); (void)splx(s); MY_UNLOCK(sc); return; } /* Init TX descriptors. */ my_list_tx_init(sc); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) MY_SETBIT(sc, MY_TCRRCR, MY_PROM); else MY_CLRBIT(sc, MY_TCRRCR, MY_PROM); /* * Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) MY_SETBIT(sc, MY_TCRRCR, MY_AB); else MY_CLRBIT(sc, MY_TCRRCR, MY_AB); /* * Program the multicast filter, if necessary. */ my_setmulti(sc); /* * Load the address of the RX list. */ MY_CLRBIT(sc, MY_TCRRCR, MY_RE); CSR_WRITE_4(sc, MY_RXLBA, vtophys(&sc->my_ldata->my_rx_list[0])); /* * Enable interrupts. */ CSR_WRITE_4(sc, MY_IMR, MY_INTRS); CSR_WRITE_4(sc, MY_ISR, 0xFFFFFFFF); /* Enable receiver and transmitter. */ MY_SETBIT(sc, MY_TCRRCR, MY_RE); MY_CLRBIT(sc, MY_TCRRCR, MY_TE); CSR_WRITE_4(sc, MY_TXLBA, vtophys(&sc->my_ldata->my_tx_list[0])); MY_SETBIT(sc, MY_TCRRCR, MY_TE); /* Restore state of BMCR */ if (sc->my_pinfo != NULL) my_phy_writereg(sc, PHY_BMCR, phy_bmcr); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; (void)splx(s); MY_UNLOCK(sc); return; } /* * Set media options. */ static int my_ifmedia_upd(struct ifnet * ifp) { struct my_softc *sc; struct ifmedia *ifm; sc = ifp->if_softc; MY_LOCK(sc); ifm = &sc->ifmedia; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { MY_UNLOCK(sc); return (EINVAL); } if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1); else my_setmode_mii(sc, ifm->ifm_media); MY_UNLOCK(sc); return (0); } /* * Report current media status. */ static void my_ifmedia_sts(struct ifnet * ifp, struct ifmediareq * ifmr) { struct my_softc *sc; u_int16_t advert = 0, ability = 0; sc = ifp->if_softc; MY_LOCK(sc); ifmr->ifm_active = IFM_ETHER; if (!(my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) { #if 0 /* this version did not support 1000M, */ if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_1000) ifmr->ifm_active = IFM_ETHER | IFM_1000TX; #endif if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL) ifmr->ifm_active = IFM_ETHER | IFM_100_TX; else ifmr->ifm_active = IFM_ETHER | IFM_10_T; if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; MY_UNLOCK(sc); return; } ability = my_phy_readreg(sc, PHY_LPAR); advert = my_phy_readreg(sc, PHY_ANAR); #if 0 /* this version did not support 1000M, */ if (sc->my_pinfo->my_vid = MarvellPHYID0) { ability2 = my_phy_readreg(sc, PHY_1000SR); if (ability2 & PHY_1000SR_1000BTXFULL) { advert = 0; ability = 0; ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_FDX; } else if (ability & PHY_1000SR_1000BTXHALF) { advert = 0; ability = 0; ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_HDX; } } #endif if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) ifmr->ifm_active = IFM_ETHER | IFM_100_T4; else if (advert & PHY_ANAR_100BTXFULL && ability & PHY_ANAR_100BTXFULL) ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX; else if (advert & PHY_ANAR_100BTXHALF && ability & PHY_ANAR_100BTXHALF) ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_HDX; else if (advert & PHY_ANAR_10BTFULL && ability & PHY_ANAR_10BTFULL) ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_FDX; else if (advert & PHY_ANAR_10BTHALF && ability & PHY_ANAR_10BTHALF) ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_HDX; MY_UNLOCK(sc); return; } static int my_ioctl(struct ifnet * ifp, u_long command, caddr_t data) { struct my_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int s, error = 0; s = splimp(); MY_LOCK(sc); switch (command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) my_init(sc); else if (ifp->if_flags & IFF_RUNNING) my_stop(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: my_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); break; default: error = ether_ioctl(ifp, command, data); break; } MY_UNLOCK(sc); (void)splx(s); return (error); } static void my_watchdog(struct ifnet * ifp) { struct my_softc *sc; sc = ifp->if_softc; MY_LOCK(sc); if (sc->my_autoneg) { my_autoneg_mii(sc, MY_FLAG_DELAYTIMEO, 1); MY_UNLOCK(sc); return; } ifp->if_oerrors++; printf("my%d: watchdog timeout\n", sc->my_unit); if (!(my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT)) printf("my%d: no carrier - transceiver cable problem?\n", sc->my_unit); my_stop(sc); my_reset(sc); my_init(sc); if (ifp->if_snd.ifq_head != NULL) my_start(ifp); MY_LOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the RX and TX lists. */ static void my_stop(struct my_softc * sc) { register int i; struct ifnet *ifp; MY_LOCK(sc); ifp = sc->my_ifp; ifp->if_timer = 0; MY_CLRBIT(sc, MY_TCRRCR, (MY_RE | MY_TE)); CSR_WRITE_4(sc, MY_IMR, 0x00000000); CSR_WRITE_4(sc, MY_TXLBA, 0x00000000); CSR_WRITE_4(sc, MY_RXLBA, 0x00000000); /* * Free data in the RX lists. */ for (i = 0; i < MY_RX_LIST_CNT; i++) { if (sc->my_cdata.my_rx_chain[i].my_mbuf != NULL) { m_freem(sc->my_cdata.my_rx_chain[i].my_mbuf); sc->my_cdata.my_rx_chain[i].my_mbuf = NULL; } } bzero((char *)&sc->my_ldata->my_rx_list, sizeof(sc->my_ldata->my_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < MY_TX_LIST_CNT; i++) { if (sc->my_cdata.my_tx_chain[i].my_mbuf != NULL) { m_freem(sc->my_cdata.my_tx_chain[i].my_mbuf); sc->my_cdata.my_tx_chain[i].my_mbuf = NULL; } } bzero((char *)&sc->my_ldata->my_tx_list, sizeof(sc->my_ldata->my_tx_list)); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); MY_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't get confused * by errant DMAs when rebooting. */ static void my_shutdown(device_t dev) { struct my_softc *sc; sc = device_get_softc(dev); my_stop(sc); return; } Index: stable/6/sys/dev/nge/if_nge.c =================================================================== --- stable/6/sys/dev/nge/if_nge.c (revision 149421) +++ stable/6/sys/dev/nge/if_nge.c (revision 149422) @@ -1,2166 +1,2168 @@ /*- * Copyright (c) 2001 Wind River Systems * Copyright (c) 1997, 1998, 1999, 2000, 2001 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * National Semiconductor DP83820/DP83821 gigabit ethernet driver * for FreeBSD. Datasheets are available from: * * http://www.national.com/ds/DP/DP83820.pdf * http://www.national.com/ds/DP/DP83821.pdf * * These chips are used on several low cost gigabit ethernet NICs * sold by D-Link, Addtron, SMC and Asante. Both parts are * virtually the same, except the 83820 is a 64-bit/32-bit part, * while the 83821 is 32-bit only. * * Many cards also use National gigE transceivers, such as the * DP83891, DP83861 and DP83862 gigPHYTER parts. The DP83861 datasheet * contains a full register description that applies to all of these * components: * * http://www.national.com/ds/DP/DP83861.pdf * * Written by Bill Paul * BSDi Open Source Solutions */ /* * The NatSemi DP83820 and 83821 controllers are enhanced versions * of the NatSemi MacPHYTER 10/100 devices. They support 10, 100 * and 1000Mbps speeds with 1000baseX (ten bit interface), MII and GMII * ports. Other features include 8K TX FIFO and 32K RX FIFO, TCP/IP * hardware checksum offload (IPv4 only), VLAN tagging and filtering, * priority TX and RX queues, a 2048 bit multicast hash filter, 4 RX pattern * matching buffers, one perfect address filter buffer and interrupt * moderation. The 83820 supports both 64-bit and 32-bit addressing * and data transfers: the 64-bit support can be toggled on or off * via software. This affects the size of certain fields in the DMA * descriptors. * * There are two bugs/misfeatures in the 83820/83821 that I have * discovered so far: * * - Receive buffers must be aligned on 64-bit boundaries, which means * you must resort to copying data in order to fix up the payload * alignment. * * - In order to transmit jumbo frames larger than 8170 bytes, you have * to turn off transmit checksum offloading, because the chip can't * compute the checksum on an outgoing frame unless it fits entirely * within the TX FIFO, which is only 8192 bytes in size. If you have * TX checksum offload enabled and you transmit attempt to transmit a * frame larger than 8170 bytes, the transmitter will wedge. * * To work around the latter problem, TX checksum offload is disabled * if the user selects an MTU larger than 8152 (8170 - 18). */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include /* for DELAY */ #include #include #include #include #include #include #include #include #define NGE_USEIOSPACE #include MODULE_DEPEND(nge, pci, 1, 1, 1); MODULE_DEPEND(nge, ether, 1, 1, 1); MODULE_DEPEND(nge, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #define NGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) /* * Various supported device vendors/types and their names. */ static struct nge_type nge_devs[] = { { NGE_VENDORID, NGE_DEVICEID, "National Semiconductor Gigabit Ethernet" }, { 0, 0, NULL } }; static int nge_probe(device_t); static int nge_attach(device_t); static int nge_detach(device_t); static int nge_newbuf(struct nge_softc *, struct nge_desc *, struct mbuf *); static int nge_encap(struct nge_softc *, struct mbuf *, u_int32_t *); #ifdef NGE_FIXUP_RX static __inline void nge_fixup_rx (struct mbuf *); #endif static void nge_rxeof(struct nge_softc *); static void nge_txeof(struct nge_softc *); static void nge_intr(void *); static void nge_tick(void *); static void nge_tick_locked(struct nge_softc *); static void nge_start(struct ifnet *); static void nge_start_locked(struct ifnet *); static int nge_ioctl(struct ifnet *, u_long, caddr_t); static void nge_init(void *); static void nge_init_locked(struct nge_softc *); static void nge_stop(struct nge_softc *); static void nge_watchdog(struct ifnet *); static void nge_shutdown(device_t); static int nge_ifmedia_upd(struct ifnet *); static void nge_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void nge_delay(struct nge_softc *); static void nge_eeprom_idle(struct nge_softc *); static void nge_eeprom_putbyte(struct nge_softc *, int); static void nge_eeprom_getword(struct nge_softc *, int, u_int16_t *); static void nge_read_eeprom(struct nge_softc *, caddr_t, int, int, int); static void nge_mii_sync(struct nge_softc *); static void nge_mii_send(struct nge_softc *, u_int32_t, int); static int nge_mii_readreg(struct nge_softc *, struct nge_mii_frame *); static int nge_mii_writereg(struct nge_softc *, struct nge_mii_frame *); static int nge_miibus_readreg(device_t, int, int); static int nge_miibus_writereg(device_t, int, int, int); static void nge_miibus_statchg(device_t); static void nge_setmulti(struct nge_softc *); static void nge_reset(struct nge_softc *); static int nge_list_rx_init(struct nge_softc *); static int nge_list_tx_init(struct nge_softc *); #ifdef NGE_USEIOSPACE #define NGE_RES SYS_RES_IOPORT #define NGE_RID NGE_PCI_LOIO #else #define NGE_RES SYS_RES_MEMORY #define NGE_RID NGE_PCI_LOMEM #endif static device_method_t nge_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nge_probe), DEVMETHOD(device_attach, nge_attach), DEVMETHOD(device_detach, nge_detach), DEVMETHOD(device_shutdown, nge_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, nge_miibus_readreg), DEVMETHOD(miibus_writereg, nge_miibus_writereg), DEVMETHOD(miibus_statchg, nge_miibus_statchg), { 0, 0 } }; static driver_t nge_driver = { "nge", nge_methods, sizeof(struct nge_softc) }; static devclass_t nge_devclass; DRIVER_MODULE(nge, pci, nge_driver, nge_devclass, 0, 0); DRIVER_MODULE(miibus, nge, miibus_driver, miibus_devclass, 0, 0); #define NGE_SETBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) | (x)) #define NGE_CLRBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) & ~(x)) #define SIO_SET(x) \ CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) | (x)) #define SIO_CLR(x) \ CSR_WRITE_4(sc, NGE_MEAR, CSR_READ_4(sc, NGE_MEAR) & ~(x)) static void nge_delay(sc) struct nge_softc *sc; { int idx; for (idx = (300 / 33) + 1; idx > 0; idx--) CSR_READ_4(sc, NGE_CSR); return; } static void nge_eeprom_idle(sc) struct nge_softc *sc; { register int i; SIO_SET(NGE_MEAR_EE_CSEL); nge_delay(sc); SIO_SET(NGE_MEAR_EE_CLK); nge_delay(sc); for (i = 0; i < 25; i++) { SIO_CLR(NGE_MEAR_EE_CLK); nge_delay(sc); SIO_SET(NGE_MEAR_EE_CLK); nge_delay(sc); } SIO_CLR(NGE_MEAR_EE_CLK); nge_delay(sc); SIO_CLR(NGE_MEAR_EE_CSEL); nge_delay(sc); CSR_WRITE_4(sc, NGE_MEAR, 0x00000000); return; } /* * Send a read command and address to the EEPROM, check for ACK. */ static void nge_eeprom_putbyte(sc, addr) struct nge_softc *sc; int addr; { register int d, i; d = addr | NGE_EECMD_READ; /* * Feed in each bit and stobe the clock. */ for (i = 0x400; i; i >>= 1) { if (d & i) { SIO_SET(NGE_MEAR_EE_DIN); } else { SIO_CLR(NGE_MEAR_EE_DIN); } nge_delay(sc); SIO_SET(NGE_MEAR_EE_CLK); nge_delay(sc); SIO_CLR(NGE_MEAR_EE_CLK); nge_delay(sc); } return; } /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void nge_eeprom_getword(sc, addr, dest) struct nge_softc *sc; int addr; u_int16_t *dest; { register int i; u_int16_t word = 0; /* Force EEPROM to idle state. */ nge_eeprom_idle(sc); /* Enter EEPROM access mode. */ nge_delay(sc); SIO_CLR(NGE_MEAR_EE_CLK); nge_delay(sc); SIO_SET(NGE_MEAR_EE_CSEL); nge_delay(sc); /* * Send address of word we want to read. */ nge_eeprom_putbyte(sc, addr); /* * Start reading bits from EEPROM. */ for (i = 0x8000; i; i >>= 1) { SIO_SET(NGE_MEAR_EE_CLK); nge_delay(sc); if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_EE_DOUT) word |= i; nge_delay(sc); SIO_CLR(NGE_MEAR_EE_CLK); nge_delay(sc); } /* Turn off EEPROM access mode. */ nge_eeprom_idle(sc); *dest = word; return; } /* * Read a sequence of words from the EEPROM. */ static void nge_read_eeprom(sc, dest, off, cnt, swap) struct nge_softc *sc; caddr_t dest; int off; int cnt; int swap; { int i; u_int16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { nge_eeprom_getword(sc, off + i, &word); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } return; } /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void nge_mii_sync(sc) struct nge_softc *sc; { register int i; SIO_SET(NGE_MEAR_MII_DIR|NGE_MEAR_MII_DATA); for (i = 0; i < 32; i++) { SIO_SET(NGE_MEAR_MII_CLK); DELAY(1); SIO_CLR(NGE_MEAR_MII_CLK); DELAY(1); } return; } /* * Clock a series of bits through the MII. */ static void nge_mii_send(sc, bits, cnt) struct nge_softc *sc; u_int32_t bits; int cnt; { int i; SIO_CLR(NGE_MEAR_MII_CLK); for (i = (0x1 << (cnt - 1)); i; i >>= 1) { if (bits & i) { SIO_SET(NGE_MEAR_MII_DATA); } else { SIO_CLR(NGE_MEAR_MII_DATA); } DELAY(1); SIO_CLR(NGE_MEAR_MII_CLK); DELAY(1); SIO_SET(NGE_MEAR_MII_CLK); } } /* * Read an PHY register through the MII. */ static int nge_mii_readreg(sc, frame) struct nge_softc *sc; struct nge_mii_frame *frame; { int i, ack; /* * Set up frame for RX. */ frame->mii_stdelim = NGE_MII_STARTDELIM; frame->mii_opcode = NGE_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; CSR_WRITE_4(sc, NGE_MEAR, 0); /* * Turn on data xmit. */ SIO_SET(NGE_MEAR_MII_DIR); nge_mii_sync(sc); /* * Send command/address info. */ nge_mii_send(sc, frame->mii_stdelim, 2); nge_mii_send(sc, frame->mii_opcode, 2); nge_mii_send(sc, frame->mii_phyaddr, 5); nge_mii_send(sc, frame->mii_regaddr, 5); /* Idle bit */ SIO_CLR((NGE_MEAR_MII_CLK|NGE_MEAR_MII_DATA)); DELAY(1); SIO_SET(NGE_MEAR_MII_CLK); DELAY(1); /* Turn off xmit. */ SIO_CLR(NGE_MEAR_MII_DIR); /* Check for ack */ SIO_CLR(NGE_MEAR_MII_CLK); DELAY(1); ack = CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA; SIO_SET(NGE_MEAR_MII_CLK); DELAY(1); /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ if (ack) { for(i = 0; i < 16; i++) { SIO_CLR(NGE_MEAR_MII_CLK); DELAY(1); SIO_SET(NGE_MEAR_MII_CLK); DELAY(1); } goto fail; } for (i = 0x8000; i; i >>= 1) { SIO_CLR(NGE_MEAR_MII_CLK); DELAY(1); if (!ack) { if (CSR_READ_4(sc, NGE_MEAR) & NGE_MEAR_MII_DATA) frame->mii_data |= i; DELAY(1); } SIO_SET(NGE_MEAR_MII_CLK); DELAY(1); } fail: SIO_CLR(NGE_MEAR_MII_CLK); DELAY(1); SIO_SET(NGE_MEAR_MII_CLK); DELAY(1); if (ack) return(1); return(0); } /* * Write to a PHY register through the MII. */ static int nge_mii_writereg(sc, frame) struct nge_softc *sc; struct nge_mii_frame *frame; { /* * Set up frame for TX. */ frame->mii_stdelim = NGE_MII_STARTDELIM; frame->mii_opcode = NGE_MII_WRITEOP; frame->mii_turnaround = NGE_MII_TURNAROUND; /* * Turn on data output. */ SIO_SET(NGE_MEAR_MII_DIR); nge_mii_sync(sc); nge_mii_send(sc, frame->mii_stdelim, 2); nge_mii_send(sc, frame->mii_opcode, 2); nge_mii_send(sc, frame->mii_phyaddr, 5); nge_mii_send(sc, frame->mii_regaddr, 5); nge_mii_send(sc, frame->mii_turnaround, 2); nge_mii_send(sc, frame->mii_data, 16); /* Idle bit. */ SIO_SET(NGE_MEAR_MII_CLK); DELAY(1); SIO_CLR(NGE_MEAR_MII_CLK); DELAY(1); /* * Turn off xmit. */ SIO_CLR(NGE_MEAR_MII_DIR); return(0); } static int nge_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct nge_softc *sc; struct nge_mii_frame frame; sc = device_get_softc(dev); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; nge_mii_readreg(sc, &frame); return(frame.mii_data); } static int nge_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct nge_softc *sc; struct nge_mii_frame frame; sc = device_get_softc(dev); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = data; nge_mii_writereg(sc, &frame); return(0); } static void nge_miibus_statchg(dev) device_t dev; { int status; struct nge_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); if (sc->nge_tbi) { if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) == IFM_AUTO) { status = CSR_READ_4(sc, NGE_TBI_ANLPAR); if (status == 0 || status & NGE_TBIANAR_FDX) { NGE_SETBIT(sc, NGE_TX_CFG, (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); } else { NGE_CLRBIT(sc, NGE_TX_CFG, (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); } } else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK) != IFM_FDX) { NGE_CLRBIT(sc, NGE_TX_CFG, (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); } else { NGE_SETBIT(sc, NGE_TX_CFG, (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); } } else { mii = device_get_softc(sc->nge_miibus); if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { NGE_SETBIT(sc, NGE_TX_CFG, (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); } else { NGE_CLRBIT(sc, NGE_TX_CFG, (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); } /* If we have a 1000Mbps link, set the mode_1000 bit. */ if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T || IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_SX) { NGE_SETBIT(sc, NGE_CFG, NGE_CFG_MODE_1000); } else { NGE_CLRBIT(sc, NGE_CFG, NGE_CFG_MODE_1000); } } return; } static void nge_setmulti(sc) struct nge_softc *sc; { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int32_t h = 0, i, filtsave; int bit, index; NGE_LOCK_ASSERT(sc); ifp = sc->nge_ifp; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_MCHASH|NGE_RXFILTCTL_UCHASH); NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLMULTI); return; } /* * We have to explicitly enable the multicast hash table * on the NatSemi chip if we want to use it, which we do. * We also have to tell it that we don't want to use the * hash table for matching unicast addresses. */ NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_MCHASH); NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLMULTI|NGE_RXFILTCTL_UCHASH); filtsave = CSR_READ_4(sc, NGE_RXFILT_CTL); /* first, zot all the existing hash bits */ for (i = 0; i < NGE_MCAST_FILTER_LEN; i += 2) { CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + i); CSR_WRITE_4(sc, NGE_RXFILT_DATA, 0); } /* * From the 11 bits returned by the crc routine, the top 7 * bits represent the 16-bit word in the mcast hash table * that needs to be updated, and the lower 4 bits represent * which bit within that byte needs to be set. */ + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 21; index = (h >> 4) & 0x7F; bit = h & 0xF; CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_MCAST_LO + (index * 2)); NGE_SETBIT(sc, NGE_RXFILT_DATA, (1 << bit)); } + IF_ADDR_UNLOCK(ifp); CSR_WRITE_4(sc, NGE_RXFILT_CTL, filtsave); return; } static void nge_reset(sc) struct nge_softc *sc; { register int i; NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RESET); for (i = 0; i < NGE_TIMEOUT; i++) { if (!(CSR_READ_4(sc, NGE_CSR) & NGE_CSR_RESET)) break; } if (i == NGE_TIMEOUT) printf("nge%d: reset never completed\n", sc->nge_unit); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); /* * If this is a NetSemi chip, make sure to clear * PME mode. */ CSR_WRITE_4(sc, NGE_CLKRUN, NGE_CLKRUN_PMESTS); CSR_WRITE_4(sc, NGE_CLKRUN, 0); return; } /* * Probe for a NatSemi chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int nge_probe(dev) device_t dev; { struct nge_type *t; t = nge_devs; while(t->nge_name != NULL) { if ((pci_get_vendor(dev) == t->nge_vid) && (pci_get_device(dev) == t->nge_did)) { device_set_desc(dev, t->nge_name); return(BUS_PROBE_DEFAULT); } t++; } return(ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int nge_attach(dev) device_t dev; { u_char eaddr[ETHER_ADDR_LEN]; struct nge_softc *sc; struct ifnet *ifp; int unit, error = 0, rid; const char *sep = ""; sc = device_get_softc(dev); unit = device_get_unit(dev); bzero(sc, sizeof(struct nge_softc)); NGE_LOCK_INIT(sc, device_get_nameunit(dev)); /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = NGE_RID; sc->nge_res = bus_alloc_resource_any(dev, NGE_RES, &rid, RF_ACTIVE); if (sc->nge_res == NULL) { printf("nge%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->nge_btag = rman_get_bustag(sc->nge_res); sc->nge_bhandle = rman_get_bushandle(sc->nge_res); /* Allocate interrupt */ rid = 0; sc->nge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->nge_irq == NULL) { printf("nge%d: couldn't map interrupt\n", unit); bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); error = ENXIO; goto fail; } /* Reset the adapter. */ nge_reset(sc); /* * Get station address from the EEPROM. */ nge_read_eeprom(sc, (caddr_t)&eaddr[4], NGE_EE_NODEADDR, 1, 0); nge_read_eeprom(sc, (caddr_t)&eaddr[2], NGE_EE_NODEADDR + 1, 1, 0); nge_read_eeprom(sc, (caddr_t)&eaddr[0], NGE_EE_NODEADDR + 2, 1, 0); sc->nge_unit = unit; /* XXX: leaked on error */ sc->nge_ldata = contigmalloc(sizeof(struct nge_list_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->nge_ldata == NULL) { printf("nge%d: no memory for list buffers!\n", unit); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); error = ENXIO; goto fail; } ifp = sc->nge_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { printf("nge%d: can not if_alloc()\n", unit); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = nge_ioctl; ifp->if_start = nge_start; ifp->if_watchdog = nge_watchdog; ifp->if_init = nge_init; ifp->if_baudrate = 1000000000; ifp->if_snd.ifq_maxlen = NGE_TX_LIST_CNT - 1; ifp->if_hwassist = NGE_CSUM_FEATURES; ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif ifp->if_capenable = ifp->if_capabilities; /* * Do MII setup. */ if (mii_phy_probe(dev, &sc->nge_miibus, nge_ifmedia_upd, nge_ifmedia_sts)) { if (CSR_READ_4(sc, NGE_CFG) & NGE_CFG_TBI_EN) { sc->nge_tbi = 1; device_printf(dev, "Using TBI\n"); sc->nge_miibus = dev; ifmedia_init(&sc->nge_ifmedia, 0, nge_ifmedia_upd, nge_ifmedia_sts); #define ADD(m, c) ifmedia_add(&sc->nge_ifmedia, (m), (c), NULL) #define PRINT(s) printf("%s%s", sep, s); sep = ", " ADD(IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, 0), 0); device_printf(dev, " "); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, 0, 0), 0); PRINT("1000baseSX"); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, IFM_FDX, 0),0); PRINT("1000baseSX-FDX"); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, 0), 0); PRINT("auto"); printf("\n"); #undef ADD #undef PRINT ifmedia_set(&sc->nge_ifmedia, IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, 0)); CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) | NGE_GPIO_GP4_OUT | NGE_GPIO_GP1_OUTENB | NGE_GPIO_GP2_OUTENB | NGE_GPIO_GP3_OUTENB | NGE_GPIO_GP3_IN | NGE_GPIO_GP4_IN); } else { printf("nge%d: MII without any PHY!\n", sc->nge_unit); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); error = ENXIO; goto fail; } } /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); callout_init(&sc->nge_stat_ch, CALLOUT_MPSAFE); /* * Hookup IRQ last. */ error = bus_setup_intr(dev, sc->nge_irq, INTR_TYPE_NET | INTR_MPSAFE, nge_intr, sc, &sc->nge_intrhand); if (error) { /* XXX: resource leaks */ bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); printf("nge%d: couldn't set up irq\n", unit); } fail: if (error) NGE_LOCK_DESTROY(sc); return(error); } static int nge_detach(dev) device_t dev; { struct nge_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->nge_ifp; NGE_LOCK(sc); nge_reset(sc); nge_stop(sc); NGE_UNLOCK(sc); ether_ifdetach(ifp); if_free(ifp); bus_generic_detach(dev); if (!sc->nge_tbi) { device_delete_child(dev, sc->nge_miibus); } bus_teardown_intr(dev, sc->nge_irq, sc->nge_intrhand); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->nge_irq); bus_release_resource(dev, NGE_RES, NGE_RID, sc->nge_res); contigfree(sc->nge_ldata, sizeof(struct nge_list_data), M_DEVBUF); NGE_LOCK_DESTROY(sc); return(0); } /* * Initialize the transmit descriptors. */ static int nge_list_tx_init(sc) struct nge_softc *sc; { struct nge_list_data *ld; struct nge_ring_data *cd; int i; cd = &sc->nge_cdata; ld = sc->nge_ldata; for (i = 0; i < NGE_TX_LIST_CNT; i++) { if (i == (NGE_TX_LIST_CNT - 1)) { ld->nge_tx_list[i].nge_nextdesc = &ld->nge_tx_list[0]; ld->nge_tx_list[i].nge_next = vtophys(&ld->nge_tx_list[0]); } else { ld->nge_tx_list[i].nge_nextdesc = &ld->nge_tx_list[i + 1]; ld->nge_tx_list[i].nge_next = vtophys(&ld->nge_tx_list[i + 1]); } ld->nge_tx_list[i].nge_mbuf = NULL; ld->nge_tx_list[i].nge_ptr = 0; ld->nge_tx_list[i].nge_ctl = 0; } cd->nge_tx_prod = cd->nge_tx_cons = cd->nge_tx_cnt = 0; return(0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int nge_list_rx_init(sc) struct nge_softc *sc; { struct nge_list_data *ld; struct nge_ring_data *cd; int i; ld = sc->nge_ldata; cd = &sc->nge_cdata; for (i = 0; i < NGE_RX_LIST_CNT; i++) { if (nge_newbuf(sc, &ld->nge_rx_list[i], NULL) == ENOBUFS) return(ENOBUFS); if (i == (NGE_RX_LIST_CNT - 1)) { ld->nge_rx_list[i].nge_nextdesc = &ld->nge_rx_list[0]; ld->nge_rx_list[i].nge_next = vtophys(&ld->nge_rx_list[0]); } else { ld->nge_rx_list[i].nge_nextdesc = &ld->nge_rx_list[i + 1]; ld->nge_rx_list[i].nge_next = vtophys(&ld->nge_rx_list[i + 1]); } } cd->nge_rx_prod = 0; sc->nge_head = sc->nge_tail = NULL; return(0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int nge_newbuf(sc, c, m) struct nge_softc *sc; struct nge_desc *c; struct mbuf *m; { if (m == NULL) { m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); } else m->m_data = m->m_ext.ext_buf; m->m_len = m->m_pkthdr.len = MCLBYTES; m_adj(m, sizeof(u_int64_t)); c->nge_mbuf = m; c->nge_ptr = vtophys(mtod(m, caddr_t)); c->nge_ctl = m->m_len; c->nge_extsts = 0; return(0); } #ifdef NGE_FIXUP_RX static __inline void nge_fixup_rx(m) struct mbuf *m; { int i; uint16_t *src, *dst; src = mtod(m, uint16_t *); dst = src - 1; for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) *dst++ = *src++; m->m_data -= ETHER_ALIGN; return; } #endif /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static void nge_rxeof(sc) struct nge_softc *sc; { struct mbuf *m; struct ifnet *ifp; struct nge_desc *cur_rx; int i, total_len = 0; u_int32_t rxstat; NGE_LOCK_ASSERT(sc); ifp = sc->nge_ifp; i = sc->nge_cdata.nge_rx_prod; while(NGE_OWNDESC(&sc->nge_ldata->nge_rx_list[i])) { u_int32_t extsts; #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif /* DEVICE_POLLING */ cur_rx = &sc->nge_ldata->nge_rx_list[i]; rxstat = cur_rx->nge_rxstat; extsts = cur_rx->nge_extsts; m = cur_rx->nge_mbuf; cur_rx->nge_mbuf = NULL; total_len = NGE_RXBYTES(cur_rx); NGE_INC(i, NGE_RX_LIST_CNT); if (rxstat & NGE_CMDSTS_MORE) { m->m_len = total_len; if (sc->nge_head == NULL) { m->m_pkthdr.len = total_len; sc->nge_head = sc->nge_tail = m; } else { m->m_flags &= ~M_PKTHDR; sc->nge_head->m_pkthdr.len += total_len; sc->nge_tail->m_next = m; sc->nge_tail = m; } nge_newbuf(sc, cur_rx, NULL); continue; } /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (!(rxstat & NGE_CMDSTS_PKT_OK)) { ifp->if_ierrors++; if (sc->nge_head != NULL) { m_freem(sc->nge_head); sc->nge_head = sc->nge_tail = NULL; } nge_newbuf(sc, cur_rx, m); continue; } /* Try conjure up a replacement mbuf. */ if (nge_newbuf(sc, cur_rx, NULL)) { ifp->if_ierrors++; if (sc->nge_head != NULL) { m_freem(sc->nge_head); sc->nge_head = sc->nge_tail = NULL; } nge_newbuf(sc, cur_rx, m); continue; } if (sc->nge_head != NULL) { m->m_len = total_len; m->m_flags &= ~M_PKTHDR; sc->nge_tail->m_next = m; m = sc->nge_head; m->m_pkthdr.len += total_len; sc->nge_head = sc->nge_tail = NULL; } else m->m_pkthdr.len = m->m_len = total_len; /* * Ok. NatSemi really screwed up here. This is the * only gigE chip I know of with alignment constraints * on receive buffers. RX buffers must be 64-bit aligned. */ /* * By popular demand, ignore the alignment problems * on the Intel x86 platform. The performance hit * incurred due to unaligned accesses is much smaller * than the hit produced by forcing buffer copies all * the time, especially with jumbo frames. We still * need to fix up the alignment everywhere else though. */ #ifdef NGE_FIXUP_RX nge_fixup_rx(m); #endif ifp->if_ipackets++; m->m_pkthdr.rcvif = ifp; /* Do IP checksum checking. */ if (extsts & NGE_RXEXTSTS_IPPKT) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (!(extsts & NGE_RXEXTSTS_IPCSUMERR)) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((extsts & NGE_RXEXTSTS_TCPPKT && !(extsts & NGE_RXEXTSTS_TCPCSUMERR)) || (extsts & NGE_RXEXTSTS_UDPPKT && !(extsts & NGE_RXEXTSTS_UDPCSUMERR))) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } /* * If we received a packet with a vlan tag, pass it * to vlan_input() instead of ether_input(). */ if (extsts & NGE_RXEXTSTS_VLANPKT) { VLAN_INPUT_TAG(ifp, m, ntohs(extsts & NGE_RXEXTSTS_VTCI), continue); } NGE_UNLOCK(sc); (*ifp->if_input)(ifp, m); NGE_LOCK(sc); } sc->nge_cdata.nge_rx_prod = i; return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void nge_txeof(sc) struct nge_softc *sc; { struct nge_desc *cur_tx; struct ifnet *ifp; u_int32_t idx; NGE_LOCK_ASSERT(sc); ifp = sc->nge_ifp; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ idx = sc->nge_cdata.nge_tx_cons; while (idx != sc->nge_cdata.nge_tx_prod) { cur_tx = &sc->nge_ldata->nge_tx_list[idx]; if (NGE_OWNDESC(cur_tx)) break; if (cur_tx->nge_ctl & NGE_CMDSTS_MORE) { sc->nge_cdata.nge_tx_cnt--; NGE_INC(idx, NGE_TX_LIST_CNT); continue; } if (!(cur_tx->nge_ctl & NGE_CMDSTS_PKT_OK)) { ifp->if_oerrors++; if (cur_tx->nge_txstat & NGE_TXSTAT_EXCESSCOLLS) ifp->if_collisions++; if (cur_tx->nge_txstat & NGE_TXSTAT_OUTOFWINCOLL) ifp->if_collisions++; } ifp->if_collisions += (cur_tx->nge_txstat & NGE_TXSTAT_COLLCNT) >> 16; ifp->if_opackets++; if (cur_tx->nge_mbuf != NULL) { m_freem(cur_tx->nge_mbuf); cur_tx->nge_mbuf = NULL; ifp->if_flags &= ~IFF_OACTIVE; } sc->nge_cdata.nge_tx_cnt--; NGE_INC(idx, NGE_TX_LIST_CNT); } sc->nge_cdata.nge_tx_cons = idx; if (idx == sc->nge_cdata.nge_tx_prod) ifp->if_timer = 0; return; } static void nge_tick(xsc) void *xsc; { struct nge_softc *sc; sc = xsc; NGE_LOCK(sc); nge_tick_locked(sc); NGE_UNLOCK(sc); } static void nge_tick_locked(sc) struct nge_softc *sc; { struct mii_data *mii; struct ifnet *ifp; NGE_LOCK_ASSERT(sc); ifp = sc->nge_ifp; if (sc->nge_tbi) { if (!sc->nge_link) { if (CSR_READ_4(sc, NGE_TBI_BMSR) & NGE_TBIBMSR_ANEG_DONE) { if (bootverbose) printf("nge%d: gigabit link up\n", sc->nge_unit); nge_miibus_statchg(sc->nge_miibus); sc->nge_link++; if (ifp->if_snd.ifq_head != NULL) nge_start_locked(ifp); } } } else { mii = device_get_softc(sc->nge_miibus); mii_tick(mii); if (!sc->nge_link) { if (mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->nge_link++; if (IFM_SUBTYPE(mii->mii_media_active) == IFM_1000_T && bootverbose) printf("nge%d: gigabit link up\n", sc->nge_unit); if (ifp->if_snd.ifq_head != NULL) nge_start_locked(ifp); } } } callout_reset(&sc->nge_stat_ch, hz, nge_tick, sc); return; } #ifdef DEVICE_POLLING static poll_handler_t nge_poll; static void nge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct nge_softc *sc = ifp->if_softc; NGE_LOCK(sc); if (!(ifp->if_capenable & IFCAP_POLLING)) { ether_poll_deregister(ifp); cmd = POLL_DEREGISTER; } if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */ CSR_WRITE_4(sc, NGE_IER, 1); NGE_UNLOCK(sc); return; } /* * On the nge, reading the status register also clears it. * So before returning to intr mode we must make sure that all * possible pending sources of interrupts have been served. * In practice this means run to completion the *eof routines, * and then call the interrupt routine */ sc->rxcycles = count; nge_rxeof(sc); nge_txeof(sc); if (ifp->if_snd.ifq_head != NULL) nge_start_locked(ifp); if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) { u_int32_t status; /* Reading the ISR register clears all interrupts. */ status = CSR_READ_4(sc, NGE_ISR); if (status & (NGE_ISR_RX_ERR|NGE_ISR_RX_OFLOW)) nge_rxeof(sc); if (status & (NGE_ISR_RX_IDLE)) NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); if (status & NGE_ISR_SYSERR) { nge_reset(sc); nge_init_locked(sc); } } NGE_UNLOCK(sc); } #endif /* DEVICE_POLLING */ static void nge_intr(arg) void *arg; { struct nge_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; ifp = sc->nge_ifp; NGE_LOCK(sc); #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) { NGE_UNLOCK(sc); return; } if ((ifp->if_capenable & IFCAP_POLLING) && ether_poll_register(nge_poll, ifp)) { /* ok, disable interrupts */ CSR_WRITE_4(sc, NGE_IER, 0); NGE_UNLOCK(sc); nge_poll(ifp, 0, 1); return; } #endif /* DEVICE_POLLING */ /* Supress unwanted interrupts */ if (!(ifp->if_flags & IFF_UP)) { nge_stop(sc); NGE_UNLOCK(sc); return; } /* Disable interrupts. */ CSR_WRITE_4(sc, NGE_IER, 0); /* Data LED on for TBI mode */ if(sc->nge_tbi) CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) | NGE_GPIO_GP3_OUT); for (;;) { /* Reading the ISR register clears all interrupts. */ status = CSR_READ_4(sc, NGE_ISR); if ((status & NGE_INTRS) == 0) break; if ((status & NGE_ISR_TX_DESC_OK) || (status & NGE_ISR_TX_ERR) || (status & NGE_ISR_TX_OK) || (status & NGE_ISR_TX_IDLE)) nge_txeof(sc); if ((status & NGE_ISR_RX_DESC_OK) || (status & NGE_ISR_RX_ERR) || (status & NGE_ISR_RX_OFLOW) || (status & NGE_ISR_RX_FIFO_OFLOW) || (status & NGE_ISR_RX_IDLE) || (status & NGE_ISR_RX_OK)) nge_rxeof(sc); if ((status & NGE_ISR_RX_IDLE)) NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); if (status & NGE_ISR_SYSERR) { nge_reset(sc); ifp->if_flags &= ~IFF_RUNNING; nge_init_locked(sc); } #if 0 /* * XXX: nge_tick() is not ready to be called this way * it screws up the aneg timeout because mii_tick() is * only to be called once per second. */ if (status & NGE_IMR_PHY_INTR) { sc->nge_link = 0; nge_tick_locked(sc); } #endif } /* Re-enable interrupts. */ CSR_WRITE_4(sc, NGE_IER, 1); if (ifp->if_snd.ifq_head != NULL) nge_start_locked(ifp); /* Data LED off for TBI mode */ if(sc->nge_tbi) CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) & ~NGE_GPIO_GP3_OUT); NGE_UNLOCK(sc); return; } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int nge_encap(sc, m_head, txidx) struct nge_softc *sc; struct mbuf *m_head; u_int32_t *txidx; { struct nge_desc *f = NULL; struct mbuf *m; int frag, cur, cnt = 0; struct m_tag *mtag; /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ m = m_head; cur = frag = *txidx; for (m = m_head; m != NULL; m = m->m_next) { if (m->m_len != 0) { if ((NGE_TX_LIST_CNT - (sc->nge_cdata.nge_tx_cnt + cnt)) < 2) return(ENOBUFS); f = &sc->nge_ldata->nge_tx_list[frag]; f->nge_ctl = NGE_CMDSTS_MORE | m->m_len; f->nge_ptr = vtophys(mtod(m, vm_offset_t)); if (cnt != 0) f->nge_ctl |= NGE_CMDSTS_OWN; cur = frag; NGE_INC(frag, NGE_TX_LIST_CNT); cnt++; } } if (m != NULL) return(ENOBUFS); sc->nge_ldata->nge_tx_list[*txidx].nge_extsts = 0; if (m_head->m_pkthdr.csum_flags) { if (m_head->m_pkthdr.csum_flags & CSUM_IP) sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |= NGE_TXEXTSTS_IPCSUM; if (m_head->m_pkthdr.csum_flags & CSUM_TCP) sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |= NGE_TXEXTSTS_TCPCSUM; if (m_head->m_pkthdr.csum_flags & CSUM_UDP) sc->nge_ldata->nge_tx_list[*txidx].nge_extsts |= NGE_TXEXTSTS_UDPCSUM; } mtag = VLAN_OUTPUT_TAG(sc->nge_ifp, m_head); if (mtag != NULL) { sc->nge_ldata->nge_tx_list[cur].nge_extsts |= (NGE_TXEXTSTS_VLANPKT|htons(VLAN_TAG_VALUE(mtag))); } sc->nge_ldata->nge_tx_list[cur].nge_mbuf = m_head; sc->nge_ldata->nge_tx_list[cur].nge_ctl &= ~NGE_CMDSTS_MORE; sc->nge_ldata->nge_tx_list[*txidx].nge_ctl |= NGE_CMDSTS_OWN; sc->nge_cdata.nge_tx_cnt += cnt; *txidx = frag; return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void nge_start(ifp) struct ifnet *ifp; { struct nge_softc *sc; sc = ifp->if_softc; NGE_LOCK(sc); nge_start_locked(ifp); NGE_UNLOCK(sc); } static void nge_start_locked(ifp) struct ifnet *ifp; { struct nge_softc *sc; struct mbuf *m_head = NULL; u_int32_t idx; sc = ifp->if_softc; if (!sc->nge_link) return; idx = sc->nge_cdata.nge_tx_prod; if (ifp->if_flags & IFF_OACTIVE) return; while(sc->nge_ldata->nge_tx_list[idx].nge_mbuf == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (nge_encap(sc, m_head, &idx)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); } /* Transmit */ sc->nge_cdata.nge_tx_prod = idx; NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_ENABLE); /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; return; } static void nge_init(xsc) void *xsc; { struct nge_softc *sc = xsc; NGE_LOCK(sc); nge_init_locked(sc); NGE_UNLOCK(sc); } static void nge_init_locked(sc) struct nge_softc *sc; { struct ifnet *ifp = sc->nge_ifp; struct mii_data *mii; NGE_LOCK_ASSERT(sc); if (ifp->if_flags & IFF_RUNNING) return; /* * Cancel pending I/O and free all RX/TX buffers. */ nge_stop(sc); if (sc->nge_tbi) { mii = NULL; } else { mii = device_get_softc(sc->nge_miibus); } /* Set MAC address */ CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR0); CSR_WRITE_4(sc, NGE_RXFILT_DATA, ((u_int16_t *)IFP2ENADDR(sc->nge_ifp))[0]); CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR1); CSR_WRITE_4(sc, NGE_RXFILT_DATA, ((u_int16_t *)IFP2ENADDR(sc->nge_ifp))[1]); CSR_WRITE_4(sc, NGE_RXFILT_CTL, NGE_FILTADDR_PAR2); CSR_WRITE_4(sc, NGE_RXFILT_DATA, ((u_int16_t *)IFP2ENADDR(sc->nge_ifp))[2]); /* Init circular RX list. */ if (nge_list_rx_init(sc) == ENOBUFS) { printf("nge%d: initialization failed: no " "memory for rx buffers\n", sc->nge_unit); nge_stop(sc); return; } /* * Init tx descriptors. */ nge_list_tx_init(sc); /* * For the NatSemi chip, we have to explicitly enable the * reception of ARP frames, as well as turn on the 'perfect * match' filter where we store the station address, otherwise * we won't receive unicasts meant for this host. */ NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ARP); NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_PERFECT); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS); } else { NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS); } /* * Set the capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD); } else { NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_BROAD); } /* * Load the multicast filter. */ nge_setmulti(sc); /* Turn the receive filter on */ NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ENABLE); /* * Load the address of the RX and TX lists. */ CSR_WRITE_4(sc, NGE_RX_LISTPTR, vtophys(&sc->nge_ldata->nge_rx_list[0])); CSR_WRITE_4(sc, NGE_TX_LISTPTR, vtophys(&sc->nge_ldata->nge_tx_list[0])); /* Set RX configuration */ CSR_WRITE_4(sc, NGE_RX_CFG, NGE_RXCFG); /* * Enable hardware checksum validation for all IPv4 * packets, do not reject packets with bad checksums. */ CSR_WRITE_4(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_IPCSUM_ENB); /* * Tell the chip to detect and strip VLAN tag info from * received frames. The tag will be provided in the extsts * field in the RX descriptors. */ NGE_SETBIT(sc, NGE_VLAN_IP_RXCTL, NGE_VIPRXCTL_TAG_DETECT_ENB|NGE_VIPRXCTL_TAG_STRIP_ENB); /* Set TX configuration */ CSR_WRITE_4(sc, NGE_TX_CFG, NGE_TXCFG); /* * Enable TX IPv4 checksumming on a per-packet basis. */ CSR_WRITE_4(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_CSUM_PER_PKT); /* * Tell the chip to insert VLAN tags on a per-packet basis as * dictated by the code in the frame encapsulation routine. */ NGE_SETBIT(sc, NGE_VLAN_IP_TXCTL, NGE_VIPTXCTL_TAG_PER_PKT); /* Set full/half duplex mode. */ if (sc->nge_tbi) { if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK) == IFM_FDX) { NGE_SETBIT(sc, NGE_TX_CFG, (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); } else { NGE_CLRBIT(sc, NGE_TX_CFG, (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); } } else { if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { NGE_SETBIT(sc, NGE_TX_CFG, (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); } else { NGE_CLRBIT(sc, NGE_TX_CFG, (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); } } nge_tick_locked(sc); /* * Enable the delivery of PHY interrupts based on * link/speed/duplex status changes. Also enable the * extsts field in the DMA descriptors (needed for * TCP/IP checksum offload on transmit). */ NGE_SETBIT(sc, NGE_CFG, NGE_CFG_PHYINTR_SPD| NGE_CFG_PHYINTR_LNK|NGE_CFG_PHYINTR_DUP|NGE_CFG_EXTSTS_ENB); /* * Configure interrupt holdoff (moderation). We can * have the chip delay interrupt delivery for a certain * period. Units are in 100us, and the max setting * is 25500us (0xFF x 100us). Default is a 100us holdoff. */ CSR_WRITE_4(sc, NGE_IHR, 0x01); /* * Enable interrupts. */ CSR_WRITE_4(sc, NGE_IMR, NGE_INTRS); #ifdef DEVICE_POLLING /* * ... only enable interrupts if we are not polling, make sure * they are off otherwise. */ if (ifp->if_flags & IFF_POLLING) CSR_WRITE_4(sc, NGE_IER, 0); else #endif /* DEVICE_POLLING */ CSR_WRITE_4(sc, NGE_IER, 1); /* Enable receiver and transmitter. */ NGE_CLRBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE); NGE_SETBIT(sc, NGE_CSR, NGE_CSR_RX_ENABLE); nge_ifmedia_upd(ifp); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; return; } /* * Set media options. */ static int nge_ifmedia_upd(ifp) struct ifnet *ifp; { struct nge_softc *sc; struct mii_data *mii; sc = ifp->if_softc; if (sc->nge_tbi) { if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) == IFM_AUTO) { CSR_WRITE_4(sc, NGE_TBI_ANAR, CSR_READ_4(sc, NGE_TBI_ANAR) | NGE_TBIANAR_HDX | NGE_TBIANAR_FDX | NGE_TBIANAR_PS1 | NGE_TBIANAR_PS2); CSR_WRITE_4(sc, NGE_TBI_BMCR, NGE_TBIBMCR_ENABLE_ANEG | NGE_TBIBMCR_RESTART_ANEG); CSR_WRITE_4(sc, NGE_TBI_BMCR, NGE_TBIBMCR_ENABLE_ANEG); } else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK) == IFM_FDX) { NGE_SETBIT(sc, NGE_TX_CFG, (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); NGE_SETBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); CSR_WRITE_4(sc, NGE_TBI_ANAR, 0); CSR_WRITE_4(sc, NGE_TBI_BMCR, 0); } else { NGE_CLRBIT(sc, NGE_TX_CFG, (NGE_TXCFG_IGN_HBEAT|NGE_TXCFG_IGN_CARR)); NGE_CLRBIT(sc, NGE_RX_CFG, NGE_RXCFG_RX_FDX); CSR_WRITE_4(sc, NGE_TBI_ANAR, 0); CSR_WRITE_4(sc, NGE_TBI_BMCR, 0); } CSR_WRITE_4(sc, NGE_GPIO, CSR_READ_4(sc, NGE_GPIO) & ~NGE_GPIO_GP3_OUT); } else { mii = device_get_softc(sc->nge_miibus); sc->nge_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; miisc = LIST_NEXT(miisc, mii_list)) mii_phy_reset(miisc); } mii_mediachg(mii); } return(0); } /* * Report current media status. */ static void nge_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct nge_softc *sc; struct mii_data *mii; sc = ifp->if_softc; if (sc->nge_tbi) { ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (CSR_READ_4(sc, NGE_TBI_BMSR) & NGE_TBIBMSR_ANEG_DONE) { ifmr->ifm_status |= IFM_ACTIVE; } if (CSR_READ_4(sc, NGE_TBI_BMCR) & NGE_TBIBMCR_LOOPBACK) ifmr->ifm_active |= IFM_LOOP; if (!CSR_READ_4(sc, NGE_TBI_BMSR) & NGE_TBIBMSR_ANEG_DONE) { ifmr->ifm_active |= IFM_NONE; ifmr->ifm_status = 0; return; } ifmr->ifm_active |= IFM_1000_SX; if (IFM_SUBTYPE(sc->nge_ifmedia.ifm_cur->ifm_media) == IFM_AUTO) { ifmr->ifm_active |= IFM_AUTO; if (CSR_READ_4(sc, NGE_TBI_ANLPAR) & NGE_TBIANAR_FDX) { ifmr->ifm_active |= IFM_FDX; }else if (CSR_READ_4(sc, NGE_TBI_ANLPAR) & NGE_TBIANAR_HDX) { ifmr->ifm_active |= IFM_HDX; } } else if ((sc->nge_ifmedia.ifm_cur->ifm_media & IFM_GMASK) == IFM_FDX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } else { mii = device_get_softc(sc->nge_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } return; } static int nge_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct nge_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; switch(command) { case SIOCSIFMTU: if (ifr->ifr_mtu > NGE_JUMBO_MTU) error = EINVAL; else { ifp->if_mtu = ifr->ifr_mtu; /* * Workaround: if the MTU is larger than * 8152 (TX FIFO size minus 64 minus 18), turn off * TX checksum offloading. */ if (ifr->ifr_mtu >= 8152) { ifp->if_capenable &= ~IFCAP_TXCSUM; ifp->if_hwassist = 0; } else { ifp->if_capenable |= IFCAP_TXCSUM; ifp->if_hwassist = NGE_CSUM_FEATURES; } } break; case SIOCSIFFLAGS: NGE_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->nge_if_flags & IFF_PROMISC)) { NGE_SETBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS| NGE_RXFILTCTL_ALLMULTI); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->nge_if_flags & IFF_PROMISC) { NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLPHYS); if (!(ifp->if_flags & IFF_ALLMULTI)) NGE_CLRBIT(sc, NGE_RXFILT_CTL, NGE_RXFILTCTL_ALLMULTI); } else { ifp->if_flags &= ~IFF_RUNNING; nge_init_locked(sc); } } else { if (ifp->if_flags & IFF_RUNNING) nge_stop(sc); } sc->nge_if_flags = ifp->if_flags; NGE_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: NGE_LOCK(sc); nge_setmulti(sc); NGE_UNLOCK(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (sc->nge_tbi) { error = ifmedia_ioctl(ifp, ifr, &sc->nge_ifmedia, command); } else { mii = device_get_softc(sc->nge_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } break; case SIOCSIFCAP: ifp->if_capenable &= ~IFCAP_POLLING; ifp->if_capenable |= ifr->ifr_reqcap & IFCAP_POLLING; break; default: error = ether_ioctl(ifp, command, data); break; } return(error); } static void nge_watchdog(ifp) struct ifnet *ifp; { struct nge_softc *sc; sc = ifp->if_softc; ifp->if_oerrors++; printf("nge%d: watchdog timeout\n", sc->nge_unit); NGE_LOCK(sc); nge_stop(sc); nge_reset(sc); ifp->if_flags &= ~IFF_RUNNING; nge_init_locked(sc); if (ifp->if_snd.ifq_head != NULL) nge_start_locked(ifp); NGE_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void nge_stop(sc) struct nge_softc *sc; { register int i; struct ifnet *ifp; struct mii_data *mii; NGE_LOCK_ASSERT(sc); ifp = sc->nge_ifp; ifp->if_timer = 0; if (sc->nge_tbi) { mii = NULL; } else { mii = device_get_softc(sc->nge_miibus); } callout_stop(&sc->nge_stat_ch); #ifdef DEVICE_POLLING ether_poll_deregister(ifp); #endif CSR_WRITE_4(sc, NGE_IER, 0); CSR_WRITE_4(sc, NGE_IMR, 0); NGE_SETBIT(sc, NGE_CSR, NGE_CSR_TX_DISABLE|NGE_CSR_RX_DISABLE); DELAY(1000); CSR_WRITE_4(sc, NGE_TX_LISTPTR, 0); CSR_WRITE_4(sc, NGE_RX_LISTPTR, 0); if (!sc->nge_tbi) mii_down(mii); sc->nge_link = 0; /* * Free data in the RX lists. */ for (i = 0; i < NGE_RX_LIST_CNT; i++) { if (sc->nge_ldata->nge_rx_list[i].nge_mbuf != NULL) { m_freem(sc->nge_ldata->nge_rx_list[i].nge_mbuf); sc->nge_ldata->nge_rx_list[i].nge_mbuf = NULL; } } bzero((char *)&sc->nge_ldata->nge_rx_list, sizeof(sc->nge_ldata->nge_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < NGE_TX_LIST_CNT; i++) { if (sc->nge_ldata->nge_tx_list[i].nge_mbuf != NULL) { m_freem(sc->nge_ldata->nge_tx_list[i].nge_mbuf); sc->nge_ldata->nge_tx_list[i].nge_mbuf = NULL; } } bzero((char *)&sc->nge_ldata->nge_tx_list, sizeof(sc->nge_ldata->nge_tx_list)); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void nge_shutdown(dev) device_t dev; { struct nge_softc *sc; sc = device_get_softc(dev); NGE_LOCK(sc); nge_reset(sc); nge_stop(sc); NGE_UNLOCK(sc); return; } Index: stable/6/sys/dev/nve/if_nve.c =================================================================== --- stable/6/sys/dev/nve/if_nve.c (revision 149421) +++ stable/6/sys/dev/nve/if_nve.c (revision 149422) @@ -1,1724 +1,1726 @@ /* * Copyright (c) 2005 by David E. O'Brien . * Copyright (c) 2003,2004 by Quinton Dolan . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $Id: if_nv.c,v 1.19 2004/08/12 14:00:05 q Exp $ */ /* * NVIDIA nForce MCP Networking Adapter driver * * This is a port of the NVIDIA MCP Linux ethernet driver distributed by NVIDIA * through their web site. * * All mainstream nForce and nForce2 motherboards are supported. This module * is as stable, sometimes more stable, than the linux version. (Recent * Linux stability issues seem to be related to some issues with newer * distributions using GCC 3.x, however this don't appear to effect FreeBSD * 5.x). * * In accordance with the NVIDIA distribution license it is necessary to * link this module against the nvlibnet.o binary object included in the * Linux driver source distribution. The binary component is not modified in * any way and is simply linked against a FreeBSD equivalent of the nvnet.c * linux kernel module "wrapper". * * The Linux driver uses a common code API that is shared between Win32 and * i386 Linux. This abstracts the low level driver functions and uses * callbacks and hooks to access the underlying hardware device. By using * this same API in a FreeBSD kernel module it is possible to support the * hardware without breaching the Linux source distributions licensing * requirements, or obtaining the hardware programming specifications. * * Although not conventional, it works, and given the relatively small * amount of hardware centric code, it's hopefully no more buggy than its * linux counterpart. * * NVIDIA now support the nForce3 AMD64 platform, however I have been * unable to access such a system to verify support. However, the code is * reported to work with little modification when compiled with the AMD64 * version of the NVIDIA Linux library. All that should be necessary to make * the driver work is to link it directly into the kernel, instead of as a * module, and apply the docs/amd64.diff patch in this source distribution to * the NVIDIA Linux driver source. * * This driver should work on all versions of FreeBSD since 4.9/5.1 as well * as recent versions of DragonFly. * * Written by Quinton Dolan * Portions based on existing FreeBSD network drivers. * NVIDIA API usage derived from distributed NVIDIA NVNET driver source files. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include /* for DELAY */ #include #include #include #include #include #include #include "miibus_if.h" /* Include NVIDIA Linux driver header files */ #define linux #include #include #include "os+%DIKED-nve.h" #include #include #undef linux #include MODULE_DEPEND(nve, pci, 1, 1, 1); MODULE_DEPEND(nve, ether, 1, 1, 1); MODULE_DEPEND(nve, miibus, 1, 1, 1); static int nve_probe(device_t); static int nve_attach(device_t); static int nve_detach(device_t); static void nve_init(void *); static void nve_stop(struct nve_softc *); static void nve_shutdown(device_t); static int nve_init_rings(struct nve_softc *); static void nve_free_rings(struct nve_softc *); static void nve_ifstart(struct ifnet *); static int nve_ioctl(struct ifnet *, u_long, caddr_t); static void nve_intr(void *); static void nve_tick(void *); static void nve_setmulti(struct nve_softc *); static void nve_watchdog(struct ifnet *); static void nve_update_stats(struct nve_softc *); static int nve_ifmedia_upd(struct ifnet *); static void nve_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int nve_miibus_readreg(device_t, int, int); static void nve_miibus_writereg(device_t, int, int, int); static void nve_dmamap_cb(void *, bus_dma_segment_t *, int, int); static void nve_dmamap_tx_cb(void *, bus_dma_segment_t *, int, bus_size_t, int); static NV_SINT32 nve_osalloc(PNV_VOID, PMEMORY_BLOCK); static NV_SINT32 nve_osfree(PNV_VOID, PMEMORY_BLOCK); static NV_SINT32 nve_osallocex(PNV_VOID, PMEMORY_BLOCKEX); static NV_SINT32 nve_osfreeex(PNV_VOID, PMEMORY_BLOCKEX); static NV_SINT32 nve_osclear(PNV_VOID, PNV_VOID, NV_SINT32); static NV_SINT32 nve_osdelay(PNV_VOID, NV_UINT32); static NV_SINT32 nve_osallocrxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID *); static NV_SINT32 nve_osfreerxbuf(PNV_VOID, PMEMORY_BLOCK, PNV_VOID); static NV_SINT32 nve_ospackettx(PNV_VOID, PNV_VOID, NV_UINT32); static NV_SINT32 nve_ospacketrx(PNV_VOID, PNV_VOID, NV_UINT32, NV_UINT8 *, NV_UINT8); static NV_SINT32 nve_oslinkchg(PNV_VOID, NV_SINT32); static NV_SINT32 nve_osalloctimer(PNV_VOID, PNV_VOID *); static NV_SINT32 nve_osfreetimer(PNV_VOID, PNV_VOID); static NV_SINT32 nve_osinittimer(PNV_VOID, PNV_VOID, PTIMER_FUNC, PNV_VOID); static NV_SINT32 nve_ossettimer(PNV_VOID, PNV_VOID, NV_UINT32); static NV_SINT32 nve_oscanceltimer(PNV_VOID, PNV_VOID); static NV_SINT32 nve_ospreprocpkt(PNV_VOID, PNV_VOID, PNV_VOID *, NV_UINT8 *, NV_UINT8); static PNV_VOID nve_ospreprocpktnopq(PNV_VOID, PNV_VOID); static NV_SINT32 nve_osindicatepkt(PNV_VOID, PNV_VOID *, NV_UINT32); static NV_SINT32 nve_oslockalloc(PNV_VOID, NV_SINT32, PNV_VOID *); static NV_SINT32 nve_oslockacquire(PNV_VOID, NV_SINT32, PNV_VOID); static NV_SINT32 nve_oslockrelease(PNV_VOID, NV_SINT32, PNV_VOID); static PNV_VOID nve_osreturnbufvirt(PNV_VOID, PNV_VOID); static device_method_t nve_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nve_probe), DEVMETHOD(device_attach, nve_attach), DEVMETHOD(device_detach, nve_detach), DEVMETHOD(device_shutdown, nve_shutdown), /* Bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, nve_miibus_readreg), DEVMETHOD(miibus_writereg, nve_miibus_writereg), {0, 0} }; static driver_t nve_driver = { "nve", nve_methods, sizeof(struct nve_softc) }; static devclass_t nve_devclass; static int nve_pollinterval = 0; SYSCTL_INT(_hw, OID_AUTO, nve_pollinterval, CTLFLAG_RW, &nve_pollinterval, 0, "delay between interface polls"); DRIVER_MODULE(nve, pci, nve_driver, nve_devclass, 0, 0); DRIVER_MODULE(miibus, nve, miibus_driver, miibus_devclass, 0, 0); static struct nve_type nve_devs[] = { {NVIDIA_VENDORID, NFORCE_MCPNET1_DEVICEID, "NVIDIA nForce MCP Networking Adapter"}, {NVIDIA_VENDORID, NFORCE_MCPNET2_DEVICEID, "NVIDIA nForce MCP2 Networking Adapter"}, {NVIDIA_VENDORID, NFORCE_MCPNET3_DEVICEID, "NVIDIA nForce MCP3 Networking Adapter"}, {NVIDIA_VENDORID, NFORCE_MCPNET4_DEVICEID, "NVIDIA nForce MCP4 Networking Adapter"}, {NVIDIA_VENDORID, NFORCE_MCPNET5_DEVICEID, "NVIDIA nForce MCP5 Networking Adapter"}, {NVIDIA_VENDORID, NFORCE_MCPNET6_DEVICEID, "NVIDIA nForce MCP6 Networking Adapter"}, {NVIDIA_VENDORID, NFORCE_MCPNET7_DEVICEID, "NVIDIA nForce MCP7 Networking Adapter"}, {NVIDIA_VENDORID, NFORCE_MCPNET8_DEVICEID, "NVIDIA nForce MCP8 Networking Adapter"}, {NVIDIA_VENDORID, NFORCE_MCPNET9_DEVICEID, "NVIDIA nForce MCP9 Networking Adapter"}, {NVIDIA_VENDORID, NFORCE_MCPNET10_DEVICEID, "NVIDIA nForce MCP10 Networking Adapter"}, {NVIDIA_VENDORID, NFORCE_MCPNET11_DEVICEID, "NVIDIA nForce MCP11 Networking Adapter"}, {0, 0, NULL} }; /* DMA MEM map callback function to get data segment physical address */ static void nve_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nsegs, int error) { if (error) return; KASSERT(nsegs == 1, ("Too many DMA segments returned when mapping DMA memory")); *(bus_addr_t *)arg = segs->ds_addr; } /* DMA RX map callback function to get data segment physical address */ static void nve_dmamap_rx_cb(void *arg, bus_dma_segment_t * segs, int nsegs, bus_size_t mapsize, int error) { if (error) return; *(bus_addr_t *)arg = segs->ds_addr; } /* * DMA TX buffer callback function to allocate fragment data segment * addresses */ static void nve_dmamap_tx_cb(void *arg, bus_dma_segment_t * segs, int nsegs, bus_size_t mapsize, int error) { struct nve_tx_desc *info; info = arg; if (error) return; KASSERT(nsegs < NV_MAX_FRAGS, ("Too many DMA segments returned when mapping mbuf")); info->numfrags = nsegs; bcopy(segs, info->frags, nsegs * sizeof(bus_dma_segment_t)); } /* Probe for supported hardware ID's */ static int nve_probe(device_t dev) { struct nve_type *t; t = nve_devs; /* Check for matching PCI DEVICE ID's */ while (t->name != NULL) { if ((pci_get_vendor(dev) == t->vid_id) && (pci_get_device(dev) == t->dev_id)) { device_set_desc(dev, t->name); return (0); } t++; } return (ENXIO); } /* Attach driver and initialise hardware for use */ static int nve_attach(device_t dev) { u_char eaddr[ETHER_ADDR_LEN]; struct nve_softc *sc; struct ifnet *ifp; OS_API *osapi; ADAPTER_OPEN_PARAMS OpenParams; int error = 0, i, rid, unit; DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - entry\n"); sc = device_get_softc(dev); unit = device_get_unit(dev); /* Allocate mutex */ mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); mtx_init(&sc->osmtx, device_get_nameunit(dev), NULL, MTX_SPIN); sc->dev = dev; sc->unit = unit; /* Preinitialize data structures */ bzero(&OpenParams, sizeof(ADAPTER_OPEN_PARAMS)); /* Enable bus mastering */ pci_enable_busmaster(dev); /* Allocate memory mapped address space */ rid = NV_RID; sc->res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); if (sc->res == NULL) { device_printf(dev, "couldn't map memory\n"); error = ENXIO; goto fail; } sc->sc_st = rman_get_bustag(sc->res); sc->sc_sh = rman_get_bushandle(sc->res); /* Allocate interrupt */ rid = 0; sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } /* Allocate DMA tags */ error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * NV_MAX_FRAGS, NV_MAX_FRAGS, MCLBYTES, 0, busdma_lock_mutex, &Giant, &sc->mtag); if (error) { device_printf(dev, "couldn't allocate dma tag\n"); goto fail; } error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct nve_rx_desc) * RX_RING_SIZE, 1, sizeof(struct nve_rx_desc) * RX_RING_SIZE, 0, busdma_lock_mutex, &Giant, &sc->rtag); if (error) { device_printf(dev, "couldn't allocate dma tag\n"); goto fail; } error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct nve_tx_desc) * TX_RING_SIZE, 1, sizeof(struct nve_tx_desc) * TX_RING_SIZE, 0, busdma_lock_mutex, &Giant, &sc->ttag); if (error) { device_printf(dev, "couldn't allocate dma tag\n"); goto fail; } /* Allocate DMA safe memory and get the DMA addresses. */ error = bus_dmamem_alloc(sc->ttag, (void **)&sc->tx_desc, BUS_DMA_WAITOK, &sc->tmap); if (error) { device_printf(dev, "couldn't allocate dma memory\n"); goto fail; } bzero(sc->tx_desc, sizeof(struct nve_tx_desc) * TX_RING_SIZE); error = bus_dmamap_load(sc->ttag, sc->tmap, sc->tx_desc, sizeof(struct nve_tx_desc) * TX_RING_SIZE, nve_dmamap_cb, &sc->tx_addr, 0); if (error) { device_printf(dev, "couldn't map dma memory\n"); goto fail; } error = bus_dmamem_alloc(sc->rtag, (void **)&sc->rx_desc, BUS_DMA_WAITOK, &sc->rmap); if (error) { device_printf(dev, "couldn't allocate dma memory\n"); goto fail; } bzero(sc->rx_desc, sizeof(struct nve_rx_desc) * RX_RING_SIZE); error = bus_dmamap_load(sc->rtag, sc->rmap, sc->rx_desc, sizeof(struct nve_rx_desc) * RX_RING_SIZE, nve_dmamap_cb, &sc->rx_addr, 0); if (error) { device_printf(dev, "couldn't map dma memory\n"); goto fail; } /* Initialize rings. */ if (nve_init_rings(sc)) { device_printf(dev, "failed to init rings\n"); error = ENXIO; goto fail; } /* Setup NVIDIA API callback routines */ osapi = &sc->osapi; osapi->pOSCX = sc; osapi->pfnAllocMemory = nve_osalloc; osapi->pfnFreeMemory = nve_osfree; osapi->pfnAllocMemoryEx = nve_osallocex; osapi->pfnFreeMemoryEx = nve_osfreeex; osapi->pfnClearMemory = nve_osclear; osapi->pfnStallExecution = nve_osdelay; osapi->pfnAllocReceiveBuffer = nve_osallocrxbuf; osapi->pfnFreeReceiveBuffer = nve_osfreerxbuf; osapi->pfnPacketWasSent = nve_ospackettx; osapi->pfnPacketWasReceived = nve_ospacketrx; osapi->pfnLinkStateHasChanged = nve_oslinkchg; osapi->pfnAllocTimer = nve_osalloctimer; osapi->pfnFreeTimer = nve_osfreetimer; osapi->pfnInitializeTimer = nve_osinittimer; osapi->pfnSetTimer = nve_ossettimer; osapi->pfnCancelTimer = nve_oscanceltimer; osapi->pfnPreprocessPacket = nve_ospreprocpkt; osapi->pfnPreprocessPacketNopq = nve_ospreprocpktnopq; osapi->pfnIndicatePackets = nve_osindicatepkt; osapi->pfnLockAlloc = nve_oslockalloc; osapi->pfnLockAcquire = nve_oslockacquire; osapi->pfnLockRelease = nve_oslockrelease; osapi->pfnReturnBufferVirtual = nve_osreturnbufvirt; sc->linkup = FALSE; sc->max_frame_size = ETHERMTU + ETHER_HDR_LEN + FCS_LEN; /* TODO - We don't support hardware offload yet */ sc->hwmode = 1; sc->media = 0; /* Set NVIDIA API startup parameters */ OpenParams.MaxDpcLoop = 2; OpenParams.MaxRxPkt = RX_RING_SIZE; OpenParams.MaxTxPkt = TX_RING_SIZE; OpenParams.SentPacketStatusSuccess = 1; OpenParams.SentPacketStatusFailure = 0; OpenParams.MaxRxPktToAccumulate = 6; OpenParams.ulPollInterval = nve_pollinterval; OpenParams.SetForcedModeEveryNthRxPacket = 0; OpenParams.SetForcedModeEveryNthTxPacket = 0; OpenParams.RxForcedInterrupt = 0; OpenParams.TxForcedInterrupt = 0; OpenParams.pOSApi = osapi; OpenParams.pvHardwareBaseAddress = rman_get_virtual(sc->res); OpenParams.bASFEnabled = 0; OpenParams.ulDescriptorVersion = sc->hwmode; OpenParams.ulMaxPacketSize = sc->max_frame_size; OpenParams.DeviceId = pci_get_device(dev); /* Open NVIDIA Hardware API */ error = ADAPTER_Open(&OpenParams, (void **)&(sc->hwapi), &sc->phyaddr); if (error) { device_printf(dev, "failed to open NVIDIA Hardware API: 0x%x\n", error); goto fail; } /* TODO - Add support for MODE2 hardware offload */ bzero(&sc->adapterdata, sizeof(sc->adapterdata)); sc->adapterdata.ulMediaIF = sc->media; sc->adapterdata.ulModeRegTxReadCompleteEnable = 1; sc->hwapi->pfnSetCommonData(sc->hwapi->pADCX, &sc->adapterdata); /* MAC is loaded backwards into h/w reg */ sc->hwapi->pfnGetNodeAddress(sc->hwapi->pADCX, sc->original_mac_addr); for (i = 0; i < 6; i++) { eaddr[i] = sc->original_mac_addr[5 - i]; } sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, eaddr); /* Display ethernet address ,... */ device_printf(dev, "Ethernet address %6D\n", eaddr, ":"); /* Allocate interface structures */ ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } /* Probe device for MII interface to PHY */ DEBUGOUT(NVE_DEBUG_INIT, "nve: do mii_phy_probe\n"); if (mii_phy_probe(dev, &sc->miibus, nve_ifmedia_upd, nve_ifmedia_sts)) { device_printf(dev, "MII without any phy!\n"); error = ENXIO; goto fail; } /* Setup interface parameters */ ifp->if_softc = sc; if_initname(ifp, "nve", unit); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = nve_ioctl; ifp->if_output = ether_output; ifp->if_start = nve_ifstart; ifp->if_watchdog = nve_watchdog; ifp->if_timer = 0; ifp->if_init = nve_init; ifp->if_mtu = ETHERMTU; ifp->if_baudrate = IF_Mbps(100); ifp->if_snd.ifq_maxlen = TX_RING_SIZE - 1; ifp->if_capabilities |= IFCAP_VLAN_MTU; /* Attach to OS's managers. */ ether_ifattach(ifp, eaddr); callout_handle_init(&sc->stat_ch); /* Activate our interrupt handler. - attach last to avoid lock */ error = bus_setup_intr(sc->dev, sc->irq, INTR_TYPE_NET, nve_intr, sc, &sc->sc_ih); if (error) { device_printf(sc->dev, "couldn't set up interrupt handler\n"); goto fail; } DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_attach - exit\n"); fail: if (error) nve_detach(dev); return (error); } /* Detach interface for module unload */ static int nve_detach(device_t dev) { struct nve_softc *sc = device_get_softc(dev); struct ifnet *ifp; KASSERT(mtx_initialized(&sc->mtx), ("mutex not initialized")); NVE_LOCK(sc); DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - entry\n"); ifp = sc->ifp; if (device_is_attached(dev)) { nve_stop(sc); ether_ifdetach(ifp); if_free(ifp); } if (sc->miibus) device_delete_child(dev, sc->miibus); bus_generic_detach(dev); /* Reload unreversed address back into MAC in original state */ if (sc->original_mac_addr) sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, sc->original_mac_addr); DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnClose\n"); /* Detach from NVIDIA hardware API */ if (sc->hwapi->pfnClose) sc->hwapi->pfnClose(sc->hwapi->pADCX, FALSE); /* Release resources */ if (sc->sc_ih) bus_teardown_intr(sc->dev, sc->irq, sc->sc_ih); if (sc->irq) bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq); if (sc->res) bus_release_resource(sc->dev, SYS_RES_MEMORY, NV_RID, sc->res); nve_free_rings(sc); if (sc->tx_desc) { bus_dmamap_unload(sc->rtag, sc->rmap); bus_dmamem_free(sc->rtag, sc->rx_desc, sc->rmap); bus_dmamap_destroy(sc->rtag, sc->rmap); } if (sc->mtag) bus_dma_tag_destroy(sc->mtag); if (sc->ttag) bus_dma_tag_destroy(sc->ttag); if (sc->rtag) bus_dma_tag_destroy(sc->rtag); NVE_UNLOCK(sc); mtx_destroy(&sc->mtx); mtx_destroy(&sc->osmtx); DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_detach - exit\n"); return (0); } /* Initialise interface and start it "RUNNING" */ static void nve_init(void *xsc) { struct nve_softc *sc = xsc; struct ifnet *ifp; int error; NVE_LOCK(sc); DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - entry (%d)\n", sc->linkup); ifp = sc->ifp; /* Do nothing if already running */ if (ifp->if_flags & IFF_RUNNING) goto fail; nve_stop(sc); DEBUGOUT(NVE_DEBUG_INIT, "nve: do pfnInit\n"); /* Setup Hardware interface and allocate memory structures */ error = sc->hwapi->pfnInit(sc->hwapi->pADCX, 0, /* force speed */ 0, /* force full duplex */ 0, /* force mode */ 0, /* force async mode */ &sc->linkup); if (error) { device_printf(sc->dev, "failed to start NVIDIA Hardware interface\n"); goto fail; } /* Set the MAC address */ sc->hwapi->pfnSetNodeAddress(sc->hwapi->pADCX, IFP2ENADDR(sc->ifp)); sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); sc->hwapi->pfnStart(sc->hwapi->pADCX); /* Setup multicast filter */ nve_setmulti(sc); nve_ifmedia_upd(ifp); /* Update interface parameters */ ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->stat_ch = timeout(nve_tick, sc, hz); DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init - exit\n"); fail: NVE_UNLOCK(sc); return; } /* Stop interface activity ie. not "RUNNING" */ static void nve_stop(struct nve_softc *sc) { struct ifnet *ifp; NVE_LOCK(sc); DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - entry\n"); ifp = sc->ifp; ifp->if_timer = 0; /* Cancel tick timer */ untimeout(nve_tick, sc, sc->stat_ch); /* Stop hardware activity */ sc->hwapi->pfnDisableInterrupts(sc->hwapi->pADCX); sc->hwapi->pfnStop(sc->hwapi->pADCX, 0); DEBUGOUT(NVE_DEBUG_DEINIT, "nve: do pfnDeinit\n"); /* Shutdown interface and deallocate memory buffers */ if (sc->hwapi->pfnDeinit) sc->hwapi->pfnDeinit(sc->hwapi->pADCX, 0); sc->linkup = 0; sc->cur_rx = 0; sc->pending_rxs = 0; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_stop - exit\n"); NVE_UNLOCK(sc); return; } /* Shutdown interface for unload/reboot */ static void nve_shutdown(device_t dev) { struct nve_softc *sc; DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_shutdown\n"); sc = device_get_softc(dev); /* Stop hardware activity */ nve_stop(sc); } /* Allocate TX ring buffers */ static int nve_init_rings(struct nve_softc *sc) { int error, i; NVE_LOCK(sc); DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - entry\n"); sc->cur_rx = sc->cur_tx = sc->pending_rxs = sc->pending_txs = 0; /* Initialise RX ring */ for (i = 0; i < RX_RING_SIZE; i++) { struct nve_rx_desc *desc = sc->rx_desc + i; struct nve_map_buffer *buf = &desc->buf; buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (buf->mbuf == NULL) { device_printf(sc->dev, "couldn't allocate mbuf\n"); nve_free_rings(sc); error = ENOBUFS; goto fail; } buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES; m_adj(buf->mbuf, ETHER_ALIGN); error = bus_dmamap_create(sc->mtag, 0, &buf->map); if (error) { device_printf(sc->dev, "couldn't create dma map\n"); nve_free_rings(sc); goto fail; } error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf, nve_dmamap_rx_cb, &desc->paddr, 0); if (error) { device_printf(sc->dev, "couldn't dma map mbuf\n"); nve_free_rings(sc); goto fail; } bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD); desc->buflength = buf->mbuf->m_len; desc->vaddr = mtod(buf->mbuf, caddr_t); } bus_dmamap_sync(sc->rtag, sc->rmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Initialize TX ring */ for (i = 0; i < TX_RING_SIZE; i++) { struct nve_tx_desc *desc = sc->tx_desc + i; struct nve_map_buffer *buf = &desc->buf; buf->mbuf = NULL; error = bus_dmamap_create(sc->mtag, 0, &buf->map); if (error) { device_printf(sc->dev, "couldn't create dma map\n"); nve_free_rings(sc); goto fail; } } bus_dmamap_sync(sc->ttag, sc->tmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); DEBUGOUT(NVE_DEBUG_INIT, "nve: nve_init_rings - exit\n"); fail: NVE_UNLOCK(sc); return (error); } /* Free the TX ring buffers */ static void nve_free_rings(struct nve_softc *sc) { int i; NVE_LOCK(sc); DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - entry\n"); for (i = 0; i < RX_RING_SIZE; i++) { struct nve_rx_desc *desc = sc->rx_desc + i; struct nve_map_buffer *buf = &desc->buf; if (buf->mbuf) { bus_dmamap_unload(sc->mtag, buf->map); bus_dmamap_destroy(sc->mtag, buf->map); m_freem(buf->mbuf); } buf->mbuf = NULL; } for (i = 0; i < TX_RING_SIZE; i++) { struct nve_tx_desc *desc = sc->tx_desc + i; struct nve_map_buffer *buf = &desc->buf; if (buf->mbuf) { bus_dmamap_unload(sc->mtag, buf->map); bus_dmamap_destroy(sc->mtag, buf->map); m_freem(buf->mbuf); } buf->mbuf = NULL; } DEBUGOUT(NVE_DEBUG_DEINIT, "nve: nve_free_rings - exit\n"); NVE_UNLOCK(sc); } /* Main loop for sending packets from OS to interface */ static void nve_ifstart(struct ifnet *ifp) { struct nve_softc *sc = ifp->if_softc; struct nve_map_buffer *buf; struct mbuf *m0, *m; struct nve_tx_desc *desc; ADAPTER_WRITE_DATA txdata; int error, i; DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - entry\n"); /* If link is down/busy or queue is empty do nothing */ if (ifp->if_flags & IFF_OACTIVE || ifp->if_snd.ifq_head == NULL) return; /* Transmit queued packets until sent or TX ring is full */ while (sc->pending_txs < TX_RING_SIZE) { desc = sc->tx_desc + sc->cur_tx; buf = &desc->buf; /* Get next packet to send. */ IF_DEQUEUE(&ifp->if_snd, m0); /* If nothing to send, return. */ if (m0 == NULL) return; /* Map MBUF for DMA access */ error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m0, nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT); if (error && error != EFBIG) { m_freem(m0); sc->tx_errors++; continue; } /* * Packet has too many fragments - defrag into new mbuf * cluster */ if (error) { m = m_defrag(m0, M_DONTWAIT); if (m == NULL) { m_freem(m0); sc->tx_errors++; continue; } m_freem(m0); m0 = m; error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m, nve_dmamap_tx_cb, desc, BUS_DMA_NOWAIT); if (error) { m_freem(m); sc->tx_errors++; continue; } } /* Do sync on DMA bounce buffer */ bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREWRITE); buf->mbuf = m0; txdata.ulNumberOfElements = desc->numfrags; txdata.pvID = (PVOID)desc; /* Put fragments into API element list */ txdata.ulTotalLength = buf->mbuf->m_len; for (i = 0; i < desc->numfrags; i++) { txdata.sElement[i].ulLength = (ulong)desc->frags[i].ds_len; txdata.sElement[i].pPhysical = (PVOID)desc->frags[i].ds_addr; } /* Send packet to Nvidia API for transmission */ error = sc->hwapi->pfnWrite(sc->hwapi->pADCX, &txdata); switch (error) { case ADAPTERERR_NONE: /* Packet was queued in API TX queue successfully */ sc->pending_txs++; sc->cur_tx = (sc->cur_tx + 1) % TX_RING_SIZE; break; case ADAPTERERR_TRANSMIT_QUEUE_FULL: /* The API TX queue is full - requeue the packet */ device_printf(sc->dev, "nve_ifstart: transmit queue is full\n"); ifp->if_flags |= IFF_OACTIVE; bus_dmamap_unload(sc->mtag, buf->map); IF_PREPEND(&ifp->if_snd, buf->mbuf); buf->mbuf = NULL; return; default: /* The API failed to queue/send the packet so dump it */ device_printf(sc->dev, "nve_ifstart: transmit error\n"); bus_dmamap_unload(sc->mtag, buf->map); m_freem(buf->mbuf); buf->mbuf = NULL; sc->tx_errors++; return; } /* Set watchdog timer. */ ifp->if_timer = 8; /* Copy packet to BPF tap */ BPF_MTAP(ifp, m0); } ifp->if_flags |= IFF_OACTIVE; DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_ifstart - exit\n"); } /* Handle IOCTL events */ static int nve_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct nve_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; NVE_LOCK(sc); DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - entry\n"); switch (command) { case SIOCSIFMTU: /* Set MTU size */ if (ifp->if_mtu == ifr->ifr_mtu) break; if (ifr->ifr_mtu + ifp->if_hdrlen <= MAX_PACKET_SIZE_1518) { ifp->if_mtu = ifr->ifr_mtu; nve_stop(sc); nve_init(sc); } else error = EINVAL; break; case SIOCSIFFLAGS: /* Setup interface flags */ if (ifp->if_flags & IFF_UP) { if ((ifp->if_flags & IFF_RUNNING) == 0) { nve_init(sc); break; } } else { if (ifp->if_flags & IFF_RUNNING) { nve_stop(sc); break; } } /* Handle IFF_PROMISC and IFF_ALLMULTI flags. */ nve_setmulti(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: /* Setup multicast filter */ if (ifp->if_flags & IFF_RUNNING) { nve_setmulti(sc); } break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: /* Get/Set interface media parameters */ mii = device_get_softc(sc->miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: /* Everything else we forward to generic ether ioctl */ error = ether_ioctl(ifp, (int)command, data); break; } DEBUGOUT(NVE_DEBUG_IOCTL, "nve: nve_ioctl - exit\n"); NVE_UNLOCK(sc); return (error); } /* Interrupt service routine */ static void nve_intr(void *arg) { struct nve_softc *sc = arg; struct ifnet *ifp = sc->ifp; DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - entry\n"); if (!ifp->if_flags & IFF_UP) { nve_stop(sc); return; } /* Handle interrupt event */ if (sc->hwapi->pfnQueryInterrupt(sc->hwapi->pADCX)) { sc->hwapi->pfnHandleInterrupt(sc->hwapi->pADCX); sc->hwapi->pfnEnableInterrupts(sc->hwapi->pADCX); } if (ifp->if_snd.ifq_head != NULL) nve_ifstart(ifp); /* If no pending packets we don't need a timeout */ if (sc->pending_txs == 0) sc->ifp->if_timer = 0; DEBUGOUT(NVE_DEBUG_INTERRUPT, "nve: nve_intr - exit\n"); return; } /* Setup multicast filters */ static void nve_setmulti(struct nve_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; PACKET_FILTER hwfilter; int i; u_int8_t andaddr[6], oraddr[6]; NVE_LOCK(sc); DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - entry\n"); ifp = sc->ifp; /* Initialize filter */ hwfilter.ulFilterFlags = 0; for (i = 0; i < 6; i++) { hwfilter.acMulticastAddress[i] = 0; hwfilter.acMulticastMask[i] = 0; } if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) { /* Accept all packets */ hwfilter.ulFilterFlags |= ACCEPT_ALL_PACKETS; sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter); NVE_UNLOCK(sc); return; } /* Setup multicast filter */ + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { u_char *addrp; if (ifma->ifma_addr->sa_family != AF_LINK) continue; addrp = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); for (i = 0; i < 6; i++) { u_int8_t mcaddr = addrp[i]; andaddr[i] &= mcaddr; oraddr[i] |= mcaddr; } } + IF_ADDR_UNLOCK(ifp); for (i = 0; i < 6; i++) { hwfilter.acMulticastAddress[i] = andaddr[i] & oraddr[i]; hwfilter.acMulticastMask[i] = andaddr[i] | (~oraddr[i]); } /* Send filter to NVIDIA API */ sc->hwapi->pfnSetPacketFilter(sc->hwapi->pADCX, &hwfilter); NVE_UNLOCK(sc); DEBUGOUT(NVE_DEBUG_RUNNING, "nve: nve_setmulti - exit\n"); return; } /* Change the current media/mediaopts */ static int nve_ifmedia_upd(struct ifnet *ifp) { struct nve_softc *sc = ifp->if_softc; struct mii_data *mii; DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_upd\n"); mii = device_get_softc(sc->miibus); if (mii->mii_instance) { struct mii_softc *miisc; for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; miisc = LIST_NEXT(miisc, mii_list)) { mii_phy_reset(miisc); } } mii_mediachg(mii); return (0); } /* Update current miibus PHY status of media */ static void nve_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct nve_softc *sc; struct mii_data *mii; DEBUGOUT(NVE_DEBUG_MII, "nve: nve_ifmedia_sts\n"); sc = ifp->if_softc; mii = device_get_softc(sc->miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } /* miibus tick timer - maintain link status */ static void nve_tick(void *xsc) { struct nve_softc *sc = xsc; struct mii_data *mii; struct ifnet *ifp; NVE_LOCK(sc); ifp = sc->ifp; nve_update_stats(sc); mii = device_get_softc(sc->miibus); mii_tick(mii); if (mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { if (ifp->if_snd.ifq_head != NULL) nve_ifstart(ifp); } sc->stat_ch = timeout(nve_tick, sc, hz); NVE_UNLOCK(sc); return; } /* Update ifnet data structure with collected interface stats from API */ static void nve_update_stats(struct nve_softc *sc) { struct ifnet *ifp = sc->ifp; ADAPTER_STATS stats; NVE_LOCK(sc); if (sc->hwapi) { sc->hwapi->pfnGetStatistics(sc->hwapi->pADCX, &stats); ifp->if_ipackets = stats.ulSuccessfulReceptions; ifp->if_ierrors = stats.ulMissedFrames + stats.ulFailedReceptions + stats.ulCRCErrors + stats.ulFramingErrors + stats.ulOverFlowErrors; ifp->if_opackets = stats.ulSuccessfulTransmissions; ifp->if_oerrors = sc->tx_errors + stats.ulFailedTransmissions + stats.ulRetryErrors + stats.ulUnderflowErrors + stats.ulLossOfCarrierErrors + stats.ulLateCollisionErrors; ifp->if_collisions = stats.ulLateCollisionErrors; } NVE_UNLOCK(sc); return; } /* miibus Read PHY register wrapper - calls Nvidia API entry point */ static int nve_miibus_readreg(device_t dev, int phy, int reg) { struct nve_softc *sc = device_get_softc(dev); ULONG data; DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - entry\n"); ADAPTER_ReadPhy(sc->hwapi->pADCX, phy, reg, &data); DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_readreg - exit\n"); return (data); } /* miibus Write PHY register wrapper - calls Nvidia API entry point */ static void nve_miibus_writereg(device_t dev, int phy, int reg, int data) { struct nve_softc *sc = device_get_softc(dev); DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - entry\n"); ADAPTER_WritePhy(sc->hwapi->pADCX, phy, reg, (ulong)data); DEBUGOUT(NVE_DEBUG_MII, "nve: nve_miibus_writereg - exit\n"); return; } /* Watchdog timer to prevent PHY lockups */ static void nve_watchdog(struct ifnet *ifp) { struct nve_softc *sc = ifp->if_softc; device_printf(sc->dev, "device timeout (%d)\n", sc->pending_txs); sc->tx_errors++; nve_stop(sc); ifp->if_flags &= ~IFF_RUNNING; nve_init(sc); if (ifp->if_snd.ifq_head != NULL) nve_ifstart(ifp); return; } /* --- Start of NVOSAPI interface --- */ /* Allocate DMA enabled general use memory for API */ static NV_SINT32 nve_osalloc(PNV_VOID ctx, PMEMORY_BLOCK mem) { struct nve_softc *sc; bus_addr_t mem_physical; DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc - %d\n", mem->uiLength); sc = (struct nve_softc *)ctx; mem->pLogical = (PVOID)contigmalloc(mem->uiLength, M_DEVBUF, M_NOWAIT | M_ZERO, 0, ~0, PAGE_SIZE, 0); if (!mem->pLogical) { device_printf(sc->dev, "memory allocation failed\n"); return (0); } memset(mem->pLogical, 0, (ulong)mem->uiLength); mem_physical = vtophys(mem->pLogical); mem->pPhysical = (PVOID)mem_physical; DEBUGOUT(NVE_DEBUG_API, "nve: nve_osalloc 0x%x/0x%x - %d\n", (uint)mem->pLogical, (uint)mem->pPhysical, (uint)mem->uiLength); return (1); } /* Free allocated memory */ static NV_SINT32 nve_osfree(PNV_VOID ctx, PMEMORY_BLOCK mem) { DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfree - 0x%x - %d\n", (uint)mem->pLogical, (uint) mem->uiLength); contigfree(mem->pLogical, PAGE_SIZE, M_DEVBUF); return (1); } /* Copied directly from nvnet.c */ static NV_SINT32 nve_osallocex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex) { MEMORY_BLOCK mem_block; DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocex\n"); mem_block_ex->pLogical = NULL; mem_block_ex->uiLengthOrig = mem_block_ex->uiLength; if ((mem_block_ex->AllocFlags & ALLOC_MEMORY_ALIGNED) && (mem_block_ex->AlignmentSize > 1)) { DEBUGOUT(NVE_DEBUG_API, " aligning on %d\n", mem_block_ex->AlignmentSize); mem_block_ex->uiLengthOrig += mem_block_ex->AlignmentSize; } mem_block.uiLength = mem_block_ex->uiLengthOrig; if (nve_osalloc(ctx, &mem_block) == 0) { return (0); } mem_block_ex->pLogicalOrig = mem_block.pLogical; mem_block_ex->pPhysicalOrigLow = (unsigned long)mem_block.pPhysical; mem_block_ex->pPhysicalOrigHigh = 0; mem_block_ex->pPhysical = mem_block.pPhysical; mem_block_ex->pLogical = mem_block.pLogical; if (mem_block_ex->uiLength != mem_block_ex->uiLengthOrig) { unsigned int offset; offset = mem_block_ex->pPhysicalOrigLow & (mem_block_ex->AlignmentSize - 1); if (offset) { mem_block_ex->pPhysical = (PVOID)((ulong)mem_block_ex->pPhysical + mem_block_ex->AlignmentSize - offset); mem_block_ex->pLogical = (PVOID)((ulong)mem_block_ex->pLogical + mem_block_ex->AlignmentSize - offset); } /* if (offset) */ } /* if (mem_block_ex->uiLength != *mem_block_ex->uiLengthOrig) */ return (1); } /* Copied directly from nvnet.c */ static NV_SINT32 nve_osfreeex(PNV_VOID ctx, PMEMORY_BLOCKEX mem_block_ex) { MEMORY_BLOCK mem_block; DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreeex\n"); mem_block.pLogical = mem_block_ex->pLogicalOrig; mem_block.pPhysical = (PVOID)((ulong)mem_block_ex->pPhysicalOrigLow); mem_block.uiLength = mem_block_ex->uiLengthOrig; return (nve_osfree(ctx, &mem_block)); } /* Clear memory region */ static NV_SINT32 nve_osclear(PNV_VOID ctx, PNV_VOID mem, NV_SINT32 length) { DEBUGOUT(NVE_DEBUG_API, "nve: nve_osclear\n"); memset(mem, 0, length); return (1); } /* Sleep for a tick */ static NV_SINT32 nve_osdelay(PNV_VOID ctx, NV_UINT32 usec) { DELAY(usec); return (1); } /* Allocate memory for rx buffer */ static NV_SINT32 nve_osallocrxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID *id) { struct nve_softc *sc = ctx; struct nve_rx_desc *desc; struct nve_map_buffer *buf; int error; NVE_LOCK(sc); DEBUGOUT(NVE_DEBUG_API, "nve: nve_osallocrxbuf\n"); if (sc->pending_rxs == RX_RING_SIZE) { device_printf(sc->dev, "rx ring buffer is full\n"); goto fail; } desc = sc->rx_desc + sc->cur_rx; buf = &desc->buf; if (buf->mbuf == NULL) { buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (buf->mbuf == NULL) { device_printf(sc->dev, "failed to allocate memory\n"); goto fail; } buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES; m_adj(buf->mbuf, ETHER_ALIGN); error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf, nve_dmamap_rx_cb, &desc->paddr, 0); if (error) { device_printf(sc->dev, "failed to dmamap mbuf\n"); m_freem(buf->mbuf); buf->mbuf = NULL; goto fail; } bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD); desc->buflength = buf->mbuf->m_len; desc->vaddr = mtod(buf->mbuf, caddr_t); } sc->pending_rxs++; sc->cur_rx = (sc->cur_rx + 1) % RX_RING_SIZE; mem->pLogical = (void *)desc->vaddr; mem->pPhysical = (void *)desc->paddr; mem->uiLength = desc->buflength; *id = (void *)desc; NVE_UNLOCK(sc); return (1); fail: NVE_UNLOCK(sc); return (0); } /* Free the rx buffer */ static NV_SINT32 nve_osfreerxbuf(PNV_VOID ctx, PMEMORY_BLOCK mem, PNV_VOID id) { struct nve_softc *sc = ctx; struct nve_rx_desc *desc; struct nve_map_buffer *buf; NVE_LOCK(sc); DEBUGOUT(NVE_DEBUG_API, "nve: nve_osfreerxbuf\n"); desc = (struct nve_rx_desc *) id; buf = &desc->buf; if (buf->mbuf) { bus_dmamap_unload(sc->mtag, buf->map); bus_dmamap_destroy(sc->mtag, buf->map); m_freem(buf->mbuf); } sc->pending_rxs--; buf->mbuf = NULL; NVE_UNLOCK(sc); return (1); } /* This gets called by the Nvidia API after our TX packet has been sent */ static NV_SINT32 nve_ospackettx(PNV_VOID ctx, PNV_VOID id, NV_UINT32 success) { struct nve_softc *sc = ctx; struct nve_map_buffer *buf; struct nve_tx_desc *desc = (struct nve_tx_desc *) id; struct ifnet *ifp; NVE_LOCK(sc); DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospackettx\n"); ifp = sc->ifp; buf = &desc->buf; sc->pending_txs--; /* Unload and free mbuf cluster */ if (buf->mbuf == NULL) goto fail; bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->mtag, buf->map); m_freem(buf->mbuf); buf->mbuf = NULL; /* Send more packets if we have them */ if (sc->pending_txs < TX_RING_SIZE) sc->ifp->if_flags &= ~IFF_OACTIVE; if (ifp->if_snd.ifq_head != NULL && sc->pending_txs < TX_RING_SIZE) nve_ifstart(ifp); fail: NVE_UNLOCK(sc); return (1); } /* This gets called by the Nvidia API when a new packet has been received */ /* XXX What is newbuf used for? XXX */ static NV_SINT32 nve_ospacketrx(PNV_VOID ctx, PNV_VOID data, NV_UINT32 success, NV_UINT8 *newbuf, NV_UINT8 priority) { struct nve_softc *sc = ctx; struct ifnet *ifp; struct nve_rx_desc *desc; struct nve_map_buffer *buf; ADAPTER_READ_DATA *readdata; NVE_LOCK(sc); DEBUGOUT(NVE_DEBUG_API, "nve: nve_ospacketrx\n"); ifp = sc->ifp; readdata = (ADAPTER_READ_DATA *) data; desc = readdata->pvID; buf = &desc->buf; bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); if (success) { /* Sync DMA bounce buffer. */ bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); /* First mbuf in packet holds the ethernet and packet headers */ buf->mbuf->m_pkthdr.rcvif = ifp; buf->mbuf->m_pkthdr.len = buf->mbuf->m_len = readdata->ulTotalLength; bus_dmamap_unload(sc->mtag, buf->map); /* Give mbuf to OS. */ (*ifp->if_input) (ifp, buf->mbuf); if (readdata->ulFilterMatch & ADREADFL_MULTICAST_MATCH) ifp->if_imcasts++; /* Blat the mbuf pointer, kernel will free the mbuf cluster */ buf->mbuf = NULL; } else { bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->mtag, buf->map); m_freem(buf->mbuf); buf->mbuf = NULL; } sc->cur_rx = desc - sc->rx_desc; sc->pending_rxs--; NVE_UNLOCK(sc); return (1); } /* This gets called by NVIDIA API when the PHY link state changes */ static NV_SINT32 nve_oslinkchg(PNV_VOID ctx, NV_SINT32 enabled) { struct nve_softc *sc = (struct nve_softc *)ctx; struct ifnet *ifp; DEBUGOUT(NVE_DEBUG_API, "nve: nve_oslinkchg\n"); ifp = sc->ifp; if (enabled) ifp->if_flags |= IFF_UP; else ifp->if_flags &= ~IFF_UP; return (1); } /* Setup a watchdog timer */ static NV_SINT32 nve_osalloctimer(PNV_VOID ctx, PNV_VOID *timer) { struct nve_softc *sc = (struct nve_softc *)ctx; DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osalloctimer\n"); callout_handle_init(&sc->ostimer); *timer = &sc->ostimer; return (1); } /* Free the timer */ static NV_SINT32 nve_osfreetimer(PNV_VOID ctx, PNV_VOID timer) { DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osfreetimer\n"); return (1); } /* Setup timer parameters */ static NV_SINT32 nve_osinittimer(PNV_VOID ctx, PNV_VOID timer, PTIMER_FUNC func, PNV_VOID parameters) { struct nve_softc *sc = (struct nve_softc *)ctx; DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osinittimer\n"); sc->ostimer_func = func; sc->ostimer_params = parameters; return (1); } /* Set the timer to go off */ static NV_SINT32 nve_ossettimer(PNV_VOID ctx, PNV_VOID timer, NV_UINT32 delay) { struct nve_softc *sc = ctx; DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ossettimer\n"); *(struct callout_handle *)timer = timeout(sc->ostimer_func, sc->ostimer_params, delay); return (1); } /* Cancel the timer */ static NV_SINT32 nve_oscanceltimer(PNV_VOID ctx, PNV_VOID timer) { struct nve_softc *sc = ctx; DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_oscanceltimer\n"); untimeout(sc->ostimer_func, sc->ostimer_params, *(struct callout_handle *)timer); return (1); } static NV_SINT32 nve_ospreprocpkt(PNV_VOID ctx, PNV_VOID readdata, PNV_VOID *id, NV_UINT8 *newbuffer, NV_UINT8 priority) { /* Not implemented */ DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n"); return (1); } static PNV_VOID nve_ospreprocpktnopq(PNV_VOID ctx, PNV_VOID readdata) { /* Not implemented */ DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_ospreprocpkt\n"); return (NULL); } static NV_SINT32 nve_osindicatepkt(PNV_VOID ctx, PNV_VOID *id, NV_UINT32 pktno) { /* Not implemented */ DEBUGOUT(NVE_DEBUG_BROKEN, "nve: nve_osindicatepkt\n"); return (1); } /* Allocate mutex context (already done in nve_attach) */ static NV_SINT32 nve_oslockalloc(PNV_VOID ctx, NV_SINT32 type, PNV_VOID *pLock) { struct nve_softc *sc = (struct nve_softc *)ctx; DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockalloc\n"); *pLock = (void **)sc; return (1); } /* Obtain a spin lock */ static NV_SINT32 nve_oslockacquire(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock) { DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockacquire\n"); NVE_OSLOCK((struct nve_softc *)lock); return (1); } /* Release lock */ static NV_SINT32 nve_oslockrelease(PNV_VOID ctx, NV_SINT32 type, PNV_VOID lock) { DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_oslockrelease\n"); NVE_OSUNLOCK((struct nve_softc *)lock); return (1); } /* I have no idea what this is for */ static PNV_VOID nve_osreturnbufvirt(PNV_VOID ctx, PNV_VOID readdata) { /* Not implemented */ DEBUGOUT(NVE_DEBUG_LOCK, "nve: nve_osreturnbufvirt\n"); panic("nve: nve_osreturnbufvirtual not implemented\n"); return (NULL); } /* --- End on NVOSAPI interface --- */ Index: stable/6/sys/dev/owi/if_owi.c =================================================================== --- stable/6/sys/dev/owi/if_owi.c (revision 149421) +++ stable/6/sys/dev/owi/if_owi.c (revision 149422) @@ -1,2431 +1,2433 @@ /*- * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /* * Lucent WaveLAN/IEEE 802.11 PCMCIA driver for FreeBSD. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The WaveLAN/IEEE adapter is the second generation of the WaveLAN * from Lucent. Unlike the older cards, the new ones are programmed * entirely via a firmware-driven controller called the Hermes. * Unfortunately, Lucent will not release the Hermes programming manual * without an NDA (if at all). What they do release is an API library * called the HCF (Hardware Control Functions) which is supposed to * do the device-specific operations of a device driver for you. The * publically available version of the HCF library (the 'HCF Light') is * a) extremely gross, b) lacks certain features, particularly support * for 802.11 frames, and c) is contaminated by the GNU Public License. * * This driver does not use the HCF or HCF Light at all. Instead, it * programs the Hermes controller directly, using information gleaned * from the HCF Light code and corresponding documentation. * * This driver supports the ISA, PCMCIA and PCI versions of the Lucent * WaveLan cards (based on the Hermes chipset), as well as the newer * Prism 2 chipsets with firmware from Intersil and Symbol. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if !defined(lint) static const char rcsid[] = "$FreeBSD$"; #endif static void wi_intr(void *); static void wi_reset(struct wi_softc *); static int wi_ioctl(struct ifnet *, u_long, caddr_t); static void wi_init(void *); static void wi_start(struct ifnet *); static void wi_watchdog(struct ifnet *); static void wi_rxeof(struct wi_softc *); static void wi_txeof(struct wi_softc *, int); static void wi_update_stats(struct wi_softc *); static void wi_setmulti(struct wi_softc *); static int wi_cmd(struct wi_softc *, int, int, int, int); static int wi_read_record(struct wi_softc *, struct wi_ltv_gen *); static int wi_write_record(struct wi_softc *, struct wi_ltv_gen *); static int wi_read_data(struct wi_softc *, int, int, caddr_t, int); static int wi_write_data(struct wi_softc *, int, int, caddr_t, int); static int wi_seek(struct wi_softc *, int, int, int); static int wi_alloc_nicmem(struct wi_softc *, int, int *); static void wi_inquire(void *); static void wi_setdef(struct wi_softc *, struct wi_req *); #ifdef WICACHE static void wi_cache_store(struct wi_softc *, struct ether_header *, struct mbuf *, unsigned short); #endif static int wi_get_cur_ssid(struct wi_softc *, char *, int *); static int wi_media_change(struct ifnet *); static void wi_media_status(struct ifnet *, struct ifmediareq *); devclass_t owi_devclass; struct wi_card_ident wi_card_ident[] = { /* CARD_ID CARD_NAME FIRM_TYPE */ { WI_NIC_LUCENT_ID, WI_NIC_LUCENT_STR, WI_LUCENT }, { WI_NIC_SONY_ID, WI_NIC_SONY_STR, WI_LUCENT }, { WI_NIC_LUCENT_EMB_ID, WI_NIC_LUCENT_EMB_STR, WI_LUCENT }, { 0, NULL, 0 }, }; int owi_generic_detach(dev) device_t dev; { struct wi_softc *sc; struct ifnet *ifp; int s; sc = device_get_softc(dev); WI_LOCK(sc, s); ifp = sc->ifp; if (sc->wi_gone) { device_printf(dev, "already unloaded\n"); WI_UNLOCK(sc, s); return(ENODEV); } sc->wi_gone = !bus_child_present(dev); owi_stop(sc); /* Delete all remaining media. */ ifmedia_removeall(&sc->ifmedia); ether_ifdetach(ifp); bus_teardown_intr(dev, sc->irq, sc->wi_intrhand); owi_free(dev); sc->wi_gone = 1; WI_UNLOCK(sc, s); mtx_destroy(&sc->wi_mtx); return(0); } int owi_generic_attach(device_t dev) { struct wi_softc *sc; struct wi_ltv_macaddr mac; struct wi_ltv_gen gen; struct ifnet *ifp; int error; int s; /* XXX maybe we need the splimp stuff here XXX */ sc = device_get_softc(dev); ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); owi_free(dev); return (ENOSPC); } error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET, wi_intr, sc, &sc->wi_intrhand); if (error) { device_printf(dev, "bus_setup_intr() failed! (%d)\n", error); owi_free(dev); return (error); } mtx_init(&sc->wi_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); WI_LOCK(sc, s); /* Reset the NIC. */ wi_reset(sc); /* * Read the station address. * And do it twice. I've seen PRISM-based cards that return * an error when trying to read it the first time, which causes * the probe to fail. */ mac.wi_type = WI_RID_MAC_NODE; mac.wi_len = 4; wi_read_record(sc, (struct wi_ltv_gen *)&mac); if ((error = wi_read_record(sc, (struct wi_ltv_gen *)&mac)) != 0) { device_printf(dev, "mac read failed %d\n", error); owi_free(dev); return (error); } owi_get_id(sc); if_initname(ifp, device_get_name(dev), sc->wi_unit); ifp->if_softc = sc; ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = wi_ioctl; ifp->if_start = wi_start; ifp->if_watchdog = wi_watchdog; ifp->if_init = wi_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; bzero(sc->wi_node_name, sizeof(sc->wi_node_name)); bcopy(WI_DEFAULT_NODENAME, sc->wi_node_name, sizeof(WI_DEFAULT_NODENAME) - 1); bzero(sc->wi_net_name, sizeof(sc->wi_net_name)); bcopy(WI_DEFAULT_NETNAME, sc->wi_net_name, sizeof(WI_DEFAULT_NETNAME) - 1); bzero(sc->wi_ibss_name, sizeof(sc->wi_ibss_name)); bcopy(WI_DEFAULT_IBSS, sc->wi_ibss_name, sizeof(WI_DEFAULT_IBSS) - 1); sc->wi_portnum = WI_DEFAULT_PORT; sc->wi_ptype = WI_PORTTYPE_BSS; sc->wi_ap_density = WI_DEFAULT_AP_DENSITY; sc->wi_rts_thresh = WI_DEFAULT_RTS_THRESH; sc->wi_tx_rate = WI_DEFAULT_TX_RATE; sc->wi_max_data_len = WI_DEFAULT_DATALEN; sc->wi_create_ibss = WI_DEFAULT_CREATE_IBSS; sc->wi_pm_enabled = WI_DEFAULT_PM_ENABLED; sc->wi_max_sleep = WI_DEFAULT_MAX_SLEEP; sc->wi_roaming = WI_DEFAULT_ROAMING; sc->wi_authtype = WI_DEFAULT_AUTHTYPE; sc->wi_authmode = IEEE80211_AUTH_OPEN; /* * Read the default channel from the NIC. This may vary * depending on the country where the NIC was purchased, so * we can't hard-code a default and expect it to work for * everyone. */ gen.wi_type = WI_RID_OWN_CHNL; gen.wi_len = 2; wi_read_record(sc, &gen); sc->wi_channel = gen.wi_val; /* * Set flags based on firmware version. */ switch (sc->sc_firmware_type) { case WI_LUCENT: sc->wi_flags |= WI_FLAGS_HAS_ROAMING; if (sc->sc_sta_firmware_ver >= 60000) sc->wi_flags |= WI_FLAGS_HAS_MOR; if (sc->sc_sta_firmware_ver >= 60006) { sc->wi_flags |= WI_FLAGS_HAS_IBSS; sc->wi_flags |= WI_FLAGS_HAS_CREATE_IBSS; } sc->wi_ibss_port = htole16(1); break; } /* * Find out if we support WEP on this card. */ gen.wi_type = WI_RID_WEP_AVAIL; gen.wi_len = 2; wi_read_record(sc, &gen); sc->wi_has_wep = gen.wi_val; if (bootverbose) device_printf(sc->dev, "owi_has_wep = %d\n", sc->wi_has_wep); /* * Find supported rates. */ gen.wi_type = WI_RID_DATA_RATES; gen.wi_len = 2; if (wi_read_record(sc, &gen)) sc->wi_supprates = WI_SUPPRATES_1M | WI_SUPPRATES_2M | WI_SUPPRATES_5M | WI_SUPPRATES_11M; else sc->wi_supprates = gen.wi_val; bzero((char *)&sc->wi_stats, sizeof(sc->wi_stats)); wi_init(sc); owi_stop(sc); ifmedia_init(&sc->ifmedia, 0, wi_media_change, wi_media_status); #define ADD(m, c) ifmedia_add(&sc->ifmedia, (m), (c), NULL) if (sc->wi_supprates & WI_SUPPRATES_1M) { ADD(IFM_MAKEWORD(IFM_IEEE80211, IFM_IEEE80211_DS1, 0, 0), 0); ADD(IFM_MAKEWORD(IFM_IEEE80211, IFM_IEEE80211_DS1, IFM_IEEE80211_ADHOC, 0), 0); if (sc->wi_flags & WI_FLAGS_HAS_IBSS) ADD(IFM_MAKEWORD(IFM_IEEE80211, IFM_IEEE80211_DS1, IFM_IEEE80211_IBSS, 0), 0); if (sc->wi_flags & WI_FLAGS_HAS_CREATE_IBSS) ADD(IFM_MAKEWORD(IFM_IEEE80211, IFM_IEEE80211_DS1, IFM_IEEE80211_IBSSMASTER, 0), 0); } if (sc->wi_supprates & WI_SUPPRATES_2M) { ADD(IFM_MAKEWORD(IFM_IEEE80211, IFM_IEEE80211_DS2, 0, 0), 0); ADD(IFM_MAKEWORD(IFM_IEEE80211, IFM_IEEE80211_DS2, IFM_IEEE80211_ADHOC, 0), 0); if (sc->wi_flags & WI_FLAGS_HAS_IBSS) ADD(IFM_MAKEWORD(IFM_IEEE80211, IFM_IEEE80211_DS2, IFM_IEEE80211_IBSS, 0), 0); if (sc->wi_flags & WI_FLAGS_HAS_CREATE_IBSS) ADD(IFM_MAKEWORD(IFM_IEEE80211, IFM_IEEE80211_DS2, IFM_IEEE80211_IBSSMASTER, 0), 0); } if (sc->wi_supprates & WI_SUPPRATES_5M) { ADD(IFM_MAKEWORD(IFM_IEEE80211, IFM_IEEE80211_DS5, 0, 0), 0); ADD(IFM_MAKEWORD(IFM_IEEE80211, IFM_IEEE80211_DS5, IFM_IEEE80211_ADHOC, 0), 0); if (sc->wi_flags & WI_FLAGS_HAS_IBSS) ADD(IFM_MAKEWORD(IFM_IEEE80211, IFM_IEEE80211_DS5, IFM_IEEE80211_IBSS, 0), 0); if (sc->wi_flags & WI_FLAGS_HAS_CREATE_IBSS) ADD(IFM_MAKEWORD(IFM_IEEE80211, IFM_IEEE80211_DS5, IFM_IEEE80211_IBSSMASTER, 0), 0); } if (sc->wi_supprates & WI_SUPPRATES_11M) { ADD(IFM_MAKEWORD(IFM_IEEE80211, IFM_IEEE80211_DS11, 0, 0), 0); ADD(IFM_MAKEWORD(IFM_IEEE80211, IFM_IEEE80211_DS11, IFM_IEEE80211_ADHOC, 0), 0); if (sc->wi_flags & WI_FLAGS_HAS_IBSS) ADD(IFM_MAKEWORD(IFM_IEEE80211, IFM_IEEE80211_DS11, IFM_IEEE80211_IBSS, 0), 0); if (sc->wi_flags & WI_FLAGS_HAS_CREATE_IBSS) ADD(IFM_MAKEWORD(IFM_IEEE80211, IFM_IEEE80211_DS11, IFM_IEEE80211_IBSSMASTER, 0), 0); ADD(IFM_MAKEWORD(IFM_IEEE80211, IFM_MANUAL, 0, 0), 0); } ADD(IFM_MAKEWORD(IFM_IEEE80211, IFM_AUTO, IFM_IEEE80211_ADHOC, 0), 0); if (sc->wi_flags & WI_FLAGS_HAS_IBSS) ADD(IFM_MAKEWORD(IFM_IEEE80211, IFM_AUTO, IFM_IEEE80211_IBSS, 0), 0); if (sc->wi_flags & WI_FLAGS_HAS_CREATE_IBSS) ADD(IFM_MAKEWORD(IFM_IEEE80211, IFM_AUTO, IFM_IEEE80211_IBSSMASTER, 0), 0); ADD(IFM_MAKEWORD(IFM_IEEE80211, IFM_AUTO, 0, 0), 0); #undef ADD ifmedia_set(&sc->ifmedia, IFM_MAKEWORD(IFM_IEEE80211, IFM_AUTO, 0, 0)); /* * Call MI attach routine. */ ether_ifattach(ifp, (const u_int8_t *)mac.wi_mac_addr); callout_handle_init(&sc->wi_stat_ch); WI_UNLOCK(sc, s); return(0); } void owi_get_id(sc) struct wi_softc *sc; { struct wi_ltv_ver ver; struct wi_card_ident *id; /* getting chip identity */ memset(&ver, 0, sizeof(ver)); ver.wi_type = WI_RID_CARD_ID; ver.wi_len = 5; wi_read_record(sc, (struct wi_ltv_gen *)&ver); device_printf(sc->dev, "using "); sc->sc_firmware_type = WI_NOTYPE; for (id = wi_card_ident; id->card_name != NULL; id++) { if (le16toh(ver.wi_ver[0]) == id->card_id) { printf("%s", id->card_name); sc->sc_firmware_type = id->firm_type; break; } } if (sc->sc_firmware_type == WI_NOTYPE) { if ((le16toh(ver.wi_ver[0]) & 0x8000) == 0) { printf("Unknown Lucent chip"); sc->sc_firmware_type = WI_LUCENT; } } if (sc->sc_firmware_type != WI_LUCENT) return; /* get station firmware version */ memset(&ver, 0, sizeof(ver)); ver.wi_type = WI_RID_STA_IDENTITY; ver.wi_len = 5; wi_read_record(sc, (struct wi_ltv_gen *)&ver); ver.wi_ver[1] = le16toh(ver.wi_ver[1]); ver.wi_ver[2] = le16toh(ver.wi_ver[2]); ver.wi_ver[3] = le16toh(ver.wi_ver[3]); sc->sc_sta_firmware_ver = ver.wi_ver[2] * 10000 + ver.wi_ver[3] * 100 + ver.wi_ver[1]; printf("\n"); device_printf(sc->dev, "Lucent Firmware: "); printf("Station %u.%02u.%02u\n", sc->sc_sta_firmware_ver / 10000, (sc->sc_sta_firmware_ver % 10000) / 100, sc->sc_sta_firmware_ver % 100); return; } static void wi_rxeof(sc) struct wi_softc *sc; { struct ifnet *ifp; struct ether_header *eh; struct mbuf *m; int id; int s; WI_LOCK_ASSERT(sc); ifp = sc->ifp; id = CSR_READ_2(sc, WI_RX_FID); /* * if we have the procframe flag set, disregard all this and just * read the data from the device. */ if (sc->wi_procframe || sc->wi_debug.wi_monitor) { struct wi_frame *rx_frame; int datlen, hdrlen; /* first allocate mbuf for packet storage */ MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { ifp->if_ierrors++; return; } MCLGET(m, M_DONTWAIT); if (!(m->m_flags & M_EXT)) { m_freem(m); ifp->if_ierrors++; return; } m->m_pkthdr.rcvif = ifp; /* now read wi_frame first so we know how much data to read */ if (wi_read_data(sc, id, 0, mtod(m, caddr_t), sizeof(struct wi_frame))) { m_freem(m); ifp->if_ierrors++; return; } rx_frame = mtod(m, struct wi_frame *); switch ((rx_frame->wi_status & WI_STAT_MAC_PORT) >> 8) { case 7: switch (rx_frame->wi_frame_ctl & WI_FCTL_FTYPE) { case WI_FTYPE_DATA: hdrlen = WI_DATA_HDRLEN; datlen = rx_frame->wi_dat_len + WI_FCS_LEN; break; case WI_FTYPE_MGMT: hdrlen = WI_MGMT_HDRLEN; datlen = rx_frame->wi_dat_len + WI_FCS_LEN; break; case WI_FTYPE_CTL: /* * prism2 cards don't pass control packets * down properly or consistently, so we'll only * pass down the header. */ hdrlen = WI_CTL_HDRLEN; datlen = 0; break; default: device_printf(sc->dev, "received packet of " "unknown type on port 7\n"); m_freem(m); ifp->if_ierrors++; return; } break; case 0: hdrlen = WI_DATA_HDRLEN; datlen = rx_frame->wi_dat_len + WI_FCS_LEN; break; default: device_printf(sc->dev, "received packet on invalid " "port (wi_status=0x%x)\n", rx_frame->wi_status); m_freem(m); ifp->if_ierrors++; return; } if ((hdrlen + datlen + 2) > MCLBYTES) { device_printf(sc->dev, "oversized packet received " "(wi_dat_len=%d, wi_status=0x%x)\n", datlen, rx_frame->wi_status); m_freem(m); ifp->if_ierrors++; return; } if (wi_read_data(sc, id, hdrlen, mtod(m, caddr_t) + hdrlen, datlen + 2)) { m_freem(m); ifp->if_ierrors++; return; } m->m_pkthdr.len = m->m_len = hdrlen + datlen; ifp->if_ipackets++; /* Handle BPF listeners. */ BPF_MTAP(ifp, m); m_freem(m); } else { struct wi_frame rx_frame; /* First read in the frame header */ if (wi_read_data(sc, id, 0, (caddr_t)&rx_frame, sizeof(rx_frame))) { ifp->if_ierrors++; return; } if (rx_frame.wi_status & WI_STAT_ERRSTAT) { ifp->if_ierrors++; return; } MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { ifp->if_ierrors++; return; } MCLGET(m, M_DONTWAIT); if (!(m->m_flags & M_EXT)) { m_freem(m); ifp->if_ierrors++; return; } eh = mtod(m, struct ether_header *); m->m_pkthdr.rcvif = ifp; if (rx_frame.wi_status == WI_STAT_1042 || rx_frame.wi_status == WI_STAT_TUNNEL || rx_frame.wi_status == WI_STAT_WMP_MSG) { if((rx_frame.wi_dat_len + WI_SNAPHDR_LEN) > MCLBYTES) { device_printf(sc->dev, "oversized packet received " "(wi_dat_len=%d, wi_status=0x%x)\n", rx_frame.wi_dat_len, rx_frame.wi_status); m_freem(m); ifp->if_ierrors++; return; } m->m_pkthdr.len = m->m_len = rx_frame.wi_dat_len + WI_SNAPHDR_LEN; #if 0 bcopy((char *)&rx_frame.wi_addr1, (char *)&eh->ether_dhost, ETHER_ADDR_LEN); if (sc->wi_ptype == WI_PORTTYPE_ADHOC) { bcopy((char *)&rx_frame.wi_addr2, (char *)&eh->ether_shost, ETHER_ADDR_LEN); } else { bcopy((char *)&rx_frame.wi_addr3, (char *)&eh->ether_shost, ETHER_ADDR_LEN); } #else bcopy((char *)&rx_frame.wi_dst_addr, (char *)&eh->ether_dhost, ETHER_ADDR_LEN); bcopy((char *)&rx_frame.wi_src_addr, (char *)&eh->ether_shost, ETHER_ADDR_LEN); #endif bcopy((char *)&rx_frame.wi_type, (char *)&eh->ether_type, ETHER_TYPE_LEN); if (wi_read_data(sc, id, WI_802_11_OFFSET, mtod(m, caddr_t) + sizeof(struct ether_header), m->m_len + 2)) { m_freem(m); ifp->if_ierrors++; return; } } else { if((rx_frame.wi_dat_len + sizeof(struct ether_header)) > MCLBYTES) { device_printf(sc->dev, "oversized packet received " "(wi_dat_len=%d, wi_status=0x%x)\n", rx_frame.wi_dat_len, rx_frame.wi_status); m_freem(m); ifp->if_ierrors++; return; } m->m_pkthdr.len = m->m_len = rx_frame.wi_dat_len + sizeof(struct ether_header); if (wi_read_data(sc, id, WI_802_3_OFFSET, mtod(m, caddr_t), m->m_len + 2)) { m_freem(m); ifp->if_ierrors++; return; } } ifp->if_ipackets++; /* Receive packet. */ #ifdef WICACHE wi_cache_store(sc, eh, m, rx_frame.wi_q_info); #endif WI_UNLOCK(sc, s); (*ifp->if_input)(ifp, m); WI_LOCK(sc, s); } } static void wi_txeof(sc, status) struct wi_softc *sc; int status; { struct ifnet *ifp; ifp = sc->ifp; ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; if (status & WI_EV_TX_EXC) ifp->if_oerrors++; else ifp->if_opackets++; return; } static void wi_inquire(xsc) void *xsc; { struct wi_softc *sc; struct ifnet *ifp; int s; sc = xsc; ifp = sc->ifp; sc->wi_stat_ch = timeout(wi_inquire, sc, hz * 60); /* Don't do this while we're transmitting */ if (ifp->if_flags & IFF_OACTIVE) return; WI_LOCK(sc, s); wi_cmd(sc, WI_CMD_INQUIRE, WI_INFO_COUNTERS, 0, 0); WI_UNLOCK(sc, s); return; } static void wi_update_stats(sc) struct wi_softc *sc; { struct wi_ltv_gen gen; u_int16_t id; struct ifnet *ifp; u_int32_t *ptr; int len, i; u_int16_t t; ifp = sc->ifp; id = CSR_READ_2(sc, WI_INFO_FID); wi_read_data(sc, id, 0, (char *)&gen, 4); /* * if we just got our scan results, copy it over into the scan buffer * so we can return it to anyone that asks for it. (add a little * compatibility with the prism2 scanning mechanism) */ if (gen.wi_type == WI_INFO_SCAN_RESULTS) { sc->wi_scanbuf_len = gen.wi_len; wi_read_data(sc, id, 4, (char *)sc->wi_scanbuf, sc->wi_scanbuf_len * 2); return; } else if (gen.wi_type != WI_INFO_COUNTERS) return; len = (gen.wi_len - 1 < sizeof(sc->wi_stats) / 4) ? gen.wi_len - 1 : sizeof(sc->wi_stats) / 4; ptr = (u_int32_t *)&sc->wi_stats; for (i = 0; i < len - 1; i++) { t = CSR_READ_2(sc, WI_DATA1); #ifdef WI_HERMES_STATS_WAR if (t > 0xF000) t = ~t & 0xFFFF; #endif ptr[i] += t; } ifp->if_collisions = sc->wi_stats.wi_tx_single_retries + sc->wi_stats.wi_tx_multi_retries + sc->wi_stats.wi_tx_retry_limit; return; } static void wi_intr(xsc) void *xsc; { struct wi_softc *sc = xsc; struct ifnet *ifp; u_int16_t status; int s; WI_LOCK(sc, s); ifp = sc->ifp; if (sc->wi_gone || !(ifp->if_flags & IFF_UP)) { CSR_WRITE_2(sc, WI_EVENT_ACK, 0xFFFF); CSR_WRITE_2(sc, WI_INT_EN, 0); WI_UNLOCK(sc, s); return; } /* Disable interrupts. */ CSR_WRITE_2(sc, WI_INT_EN, 0); status = CSR_READ_2(sc, WI_EVENT_STAT); CSR_WRITE_2(sc, WI_EVENT_ACK, ~WI_INTRS); if (status & WI_EV_RX) { wi_rxeof(sc); CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_RX); } if (status & WI_EV_TX) { wi_txeof(sc, status); CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_TX); } if (status & WI_EV_ALLOC) { int id; id = CSR_READ_2(sc, WI_ALLOC_FID); CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_ALLOC); if (id == sc->wi_tx_data_id) wi_txeof(sc, status); } if (status & WI_EV_INFO) { wi_update_stats(sc); CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_INFO); } if (status & WI_EV_TX_EXC) { wi_txeof(sc, status); CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_TX_EXC); } if (status & WI_EV_INFO_DROP) { CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_INFO_DROP); } /* Re-enable interrupts. */ CSR_WRITE_2(sc, WI_INT_EN, WI_INTRS); if (ifp->if_snd.ifq_head != NULL) { wi_start(ifp); } WI_UNLOCK(sc, s); return; } static int wi_cmd(sc, cmd, val0, val1, val2) struct wi_softc *sc; int cmd; int val0; int val1; int val2; { int i, s = 0; static volatile int count = 0; if (count > 1) panic("Hey partner, hold on there!"); count++; /* wait for the busy bit to clear */ for (i = 500; i > 0; i--) { /* 5s */ if (!(CSR_READ_2(sc, WI_COMMAND) & WI_CMD_BUSY)) { break; } DELAY(10*1000); /* 10 m sec */ } if (i == 0) { device_printf(sc->dev, "owi_cmd: busy bit won't clear.\n" ); count--; return(ETIMEDOUT); } CSR_WRITE_2(sc, WI_PARAM0, val0); CSR_WRITE_2(sc, WI_PARAM1, val1); CSR_WRITE_2(sc, WI_PARAM2, val2); CSR_WRITE_2(sc, WI_COMMAND, cmd); for (i = 0; i < WI_TIMEOUT; i++) { /* * Wait for 'command complete' bit to be * set in the event status register. */ s = CSR_READ_2(sc, WI_EVENT_STAT); if (s & WI_EV_CMD) { /* Ack the event and read result code. */ s = CSR_READ_2(sc, WI_STATUS); CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_CMD); #ifdef foo if ((s & WI_CMD_CODE_MASK) != (cmd & WI_CMD_CODE_MASK)) return(EIO); #endif if (s & WI_STAT_CMD_RESULT) { count--; return(EIO); } break; } DELAY(WI_DELAY); } count--; if (i == WI_TIMEOUT) { device_printf(sc->dev, "timeout in wi_cmd 0x%04x; event status 0x%04x\n", cmd, s); return(ETIMEDOUT); } return(0); } static void wi_reset(sc) struct wi_softc *sc; { #define WI_INIT_TRIES 3 int i; int tries; tries = WI_INIT_TRIES; for (i = 0; i < tries; i++) { if (wi_cmd(sc, WI_CMD_INI, 0, 0, 0) == 0) break; DELAY(WI_DELAY * 1000); } sc->sc_enabled = 1; if (i == tries) { device_printf(sc->dev, "init failed\n"); return; } CSR_WRITE_2(sc, WI_INT_EN, 0); CSR_WRITE_2(sc, WI_EVENT_ACK, 0xFFFF); /* Calibrate timer. */ WI_SETVAL(WI_RID_TICK_TIME, 8); return; } /* * Read an LTV record from the NIC. */ static int wi_read_record(sc, ltv) struct wi_softc *sc; struct wi_ltv_gen *ltv; { u_int16_t *ptr; int i, len, code; struct wi_ltv_gen *oltv; oltv = ltv; /* Tell the NIC to enter record read mode. */ if (wi_cmd(sc, WI_CMD_ACCESS|WI_ACCESS_READ, ltv->wi_type, 0, 0)) return(EIO); /* Seek to the record. */ if (wi_seek(sc, ltv->wi_type, 0, WI_BAP1)) return(EIO); /* * Read the length and record type and make sure they * match what we expect (this verifies that we have enough * room to hold all of the returned data). */ len = CSR_READ_2(sc, WI_DATA1); if (len > ltv->wi_len) return(ENOSPC); code = CSR_READ_2(sc, WI_DATA1); if (code != ltv->wi_type) return(EIO); ltv->wi_len = len; ltv->wi_type = code; /* Now read the data. */ ptr = <v->wi_val; for (i = 0; i < ltv->wi_len - 1; i++) ptr[i] = CSR_READ_2(sc, WI_DATA1); if (ltv->wi_type == WI_RID_PORTTYPE && sc->wi_ptype == WI_PORTTYPE_IBSS && ltv->wi_val == sc->wi_ibss_port) { /* * Convert vendor IBSS port type to WI_PORTTYPE_IBSS. * Since Lucent uses port type 1 for BSS *and* IBSS we * have to rely on wi_ptype to distinguish this for us. */ ltv->wi_val = htole16(WI_PORTTYPE_IBSS); } return(0); } /* * Same as read, except we inject data instead of reading it. */ static int wi_write_record(sc, ltv) struct wi_softc *sc; struct wi_ltv_gen *ltv; { uint16_t *ptr; int i; struct wi_ltv_gen p2ltv; if (ltv->wi_type == WI_RID_PORTTYPE && le16toh(ltv->wi_val) == WI_PORTTYPE_IBSS) { /* Convert WI_PORTTYPE_IBSS to vendor IBSS port type. */ p2ltv.wi_type = WI_RID_PORTTYPE; p2ltv.wi_len = 2; p2ltv.wi_val = sc->wi_ibss_port; ltv = &p2ltv; } else { /* LUCENT */ switch (ltv->wi_type) { case WI_RID_TX_RATE: switch (ltv->wi_val) { case 1: ltv->wi_val = 1; break; /* 1Mb/s fixed */ case 2: ltv->wi_val = 2; break; /* 2Mb/s fixed */ case 3: ltv->wi_val = 3; break; /* 11Mb/s auto */ case 5: ltv->wi_val = 4; break; /* 5.5Mb/s fixed */ case 6: ltv->wi_val = 6; break; /* 2Mb/s auto */ case 7: ltv->wi_val = 7; break; /* 5.5Mb/s auto */ case 11: ltv->wi_val = 5; break; /* 11Mb/s fixed */ default: return EINVAL; } case WI_RID_TX_CRYPT_KEY: if (ltv->wi_val > WI_NLTV_KEYS) return (EINVAL); break; } } if (wi_seek(sc, ltv->wi_type, 0, WI_BAP1)) return(EIO); CSR_WRITE_2(sc, WI_DATA1, ltv->wi_len); CSR_WRITE_2(sc, WI_DATA1, ltv->wi_type); ptr = <v->wi_val; for (i = 0; i < ltv->wi_len - 1; i++) CSR_WRITE_2(sc, WI_DATA1, ptr[i]); if (wi_cmd(sc, WI_CMD_ACCESS|WI_ACCESS_WRITE, ltv->wi_type, 0, 0)) return(EIO); return(0); } static int wi_seek(sc, id, off, chan) struct wi_softc *sc; int id, off, chan; { int i; int selreg, offreg; int status; switch (chan) { case WI_BAP0: selreg = WI_SEL0; offreg = WI_OFF0; break; case WI_BAP1: selreg = WI_SEL1; offreg = WI_OFF1; break; default: device_printf(sc->dev, "invalid data path: %x\n", chan); return(EIO); } CSR_WRITE_2(sc, selreg, id); CSR_WRITE_2(sc, offreg, off); for (i = 0; i < WI_TIMEOUT; i++) { status = CSR_READ_2(sc, offreg); if (!(status & (WI_OFF_BUSY|WI_OFF_ERR))) break; DELAY(WI_DELAY); } if (i == WI_TIMEOUT) { device_printf(sc->dev, "timeout in wi_seek to %x/%x; last status %x\n", id, off, status); return(ETIMEDOUT); } return(0); } static int wi_read_data(sc, id, off, buf, len) struct wi_softc *sc; int id, off; caddr_t buf; int len; { int i; u_int16_t *ptr; if (wi_seek(sc, id, off, WI_BAP1)) return(EIO); ptr = (u_int16_t *)buf; for (i = 0; i < len / 2; i++) ptr[i] = CSR_READ_2(sc, WI_DATA1); return(0); } /* * According to the comments in the HCF Light code, there is a bug in * the Hermes (or possibly in certain Hermes firmware revisions) where * the chip's internal autoincrement counter gets thrown off during * data writes: the autoincrement is missed, causing one data word to * be overwritten and subsequent words to be written to the wrong memory * locations. The end result is that we could end up transmitting bogus * frames without realizing it. The workaround for this is to write a * couple of extra guard words after the end of the transfer, then * attempt to read then back. If we fail to locate the guard words where * we expect them, we preform the transfer over again. */ static int wi_write_data(sc, id, off, buf, len) struct wi_softc *sc; int id, off; caddr_t buf; int len; { int i; u_int16_t *ptr; #ifdef WI_HERMES_AUTOINC_WAR int retries; retries = 512; again: #endif if (wi_seek(sc, id, off, WI_BAP0)) return(EIO); ptr = (u_int16_t *)buf; for (i = 0; i < (len / 2); i++) CSR_WRITE_2(sc, WI_DATA0, ptr[i]); #ifdef WI_HERMES_AUTOINC_WAR CSR_WRITE_2(sc, WI_DATA0, 0x1234); CSR_WRITE_2(sc, WI_DATA0, 0x5678); if (wi_seek(sc, id, off + len, WI_BAP0)) return(EIO); if (CSR_READ_2(sc, WI_DATA0) != 0x1234 || CSR_READ_2(sc, WI_DATA0) != 0x5678) { if (--retries >= 0) goto again; device_printf(sc->dev, "owi_write_data device timeout\n"); return (EIO); } #endif return(0); } /* * Allocate a region of memory inside the NIC and zero * it out. */ static int wi_alloc_nicmem(sc, len, id) struct wi_softc *sc; int len; int *id; { int i; if (wi_cmd(sc, WI_CMD_ALLOC_MEM, len, 0, 0)) { device_printf(sc->dev, "failed to allocate %d bytes on NIC\n", len); return(ENOMEM); } for (i = 0; i < WI_TIMEOUT; i++) { if (CSR_READ_2(sc, WI_EVENT_STAT) & WI_EV_ALLOC) break; DELAY(WI_DELAY); } if (i == WI_TIMEOUT) { device_printf(sc->dev, "time out allocating memory on card\n"); return(ETIMEDOUT); } CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_ALLOC); *id = CSR_READ_2(sc, WI_ALLOC_FID); if (wi_seek(sc, *id, 0, WI_BAP0)) { device_printf(sc->dev, "seek failed while allocating memory on card\n"); return(EIO); } for (i = 0; i < len / 2; i++) CSR_WRITE_2(sc, WI_DATA0, 0); return(0); } static void wi_setmulti(sc) struct wi_softc *sc; { struct ifnet *ifp; int i = 0; struct ifmultiaddr *ifma; struct wi_ltv_mcast mcast; ifp = sc->ifp; bzero((char *)&mcast, sizeof(mcast)); mcast.wi_type = WI_RID_MCAST_LIST; mcast.wi_len = (3 * 16) + 1; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { wi_write_record(sc, (struct wi_ltv_gen *)&mcast); return; } + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (i < 16) { bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), (char *)&mcast.wi_mcast[i], ETHER_ADDR_LEN); i++; } else { bzero((char *)&mcast, sizeof(mcast)); break; } } + IF_ADDR_UNLOCK(ifp); mcast.wi_len = (i * 3) + 1; wi_write_record(sc, (struct wi_ltv_gen *)&mcast); return; } static void wi_setdef(sc, wreq) struct wi_softc *sc; struct wi_req *wreq; { struct sockaddr_dl *sdl; struct ifaddr *ifa; struct ifnet *ifp; ifp = sc->ifp; switch(wreq->wi_type) { case WI_RID_MAC_NODE: ifa = ifaddr_byindex(ifp->if_index); sdl = (struct sockaddr_dl *)ifa->ifa_addr; bcopy((char *)&wreq->wi_val, (char *)&IFP2ENADDR(sc->ifp), ETHER_ADDR_LEN); bcopy((char *)&wreq->wi_val, LLADDR(sdl), ETHER_ADDR_LEN); break; case WI_RID_PORTTYPE: sc->wi_ptype = le16toh(wreq->wi_val[0]); break; case WI_RID_TX_RATE: sc->wi_tx_rate = le16toh(wreq->wi_val[0]); break; case WI_RID_MAX_DATALEN: sc->wi_max_data_len = le16toh(wreq->wi_val[0]); break; case WI_RID_RTS_THRESH: sc->wi_rts_thresh = le16toh(wreq->wi_val[0]); break; case WI_RID_SYSTEM_SCALE: sc->wi_ap_density = le16toh(wreq->wi_val[0]); break; case WI_RID_CREATE_IBSS: sc->wi_create_ibss = le16toh(wreq->wi_val[0]); break; case WI_RID_OWN_CHNL: sc->wi_channel = le16toh(wreq->wi_val[0]); break; case WI_RID_NODENAME: bzero(sc->wi_node_name, sizeof(sc->wi_node_name)); bcopy((char *)&wreq->wi_val[1], sc->wi_node_name, 30); break; case WI_RID_DESIRED_SSID: bzero(sc->wi_net_name, sizeof(sc->wi_net_name)); bcopy((char *)&wreq->wi_val[1], sc->wi_net_name, 30); break; case WI_RID_OWN_SSID: bzero(sc->wi_ibss_name, sizeof(sc->wi_ibss_name)); bcopy((char *)&wreq->wi_val[1], sc->wi_ibss_name, 30); break; case WI_RID_PM_ENABLED: sc->wi_pm_enabled = le16toh(wreq->wi_val[0]); break; case WI_RID_MICROWAVE_OVEN: sc->wi_mor_enabled = le16toh(wreq->wi_val[0]); break; case WI_RID_MAX_SLEEP: sc->wi_max_sleep = le16toh(wreq->wi_val[0]); break; case WI_RID_CNFAUTHMODE: sc->wi_authtype = le16toh(wreq->wi_val[0]); break; case WI_RID_ROAMING_MODE: sc->wi_roaming = le16toh(wreq->wi_val[0]); break; case WI_RID_ENCRYPTION: sc->wi_use_wep = le16toh(wreq->wi_val[0]); break; case WI_RID_TX_CRYPT_KEY: sc->wi_tx_key = le16toh(wreq->wi_val[0]); break; case WI_RID_DEFLT_CRYPT_KEYS: bcopy((char *)wreq, (char *)&sc->wi_keys, sizeof(struct wi_ltv_keys)); break; default: break; } /* Reinitialize WaveLAN. */ wi_init(sc); return; } static int wi_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { int error = 0; int len; int s; uint16_t mif; uint16_t val; u_int8_t tmpkey[14]; char tmpssid[IEEE80211_NWID_LEN]; struct wi_softc *sc; struct wi_req wreq; struct ifreq *ifr; struct ieee80211req *ireq; struct thread *td = curthread; sc = ifp->if_softc; WI_LOCK(sc, s); ifr = (struct ifreq *)data; ireq = (struct ieee80211req *)data; if (sc->wi_gone) { error = ENODEV; goto out; } switch(command) { case SIOCSIFFLAGS: /* * Can't do promisc and hostap at the same time. If all that's * changing is the promisc flag, try to short-circuit a call to * wi_init() by just setting PROMISC in the hardware. */ if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING) { if (ifp->if_flags & IFF_PROMISC && !(sc->wi_if_flags & IFF_PROMISC)) { WI_SETVAL(WI_RID_PROMISC, 1); } else if (!(ifp->if_flags & IFF_PROMISC) && sc->wi_if_flags & IFF_PROMISC) { WI_SETVAL(WI_RID_PROMISC, 0); } else { wi_init(sc); } } else { wi_init(sc); } } else { if (ifp->if_flags & IFF_RUNNING) { owi_stop(sc); } } sc->wi_if_flags = ifp->if_flags; error = 0; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); break; case SIOCADDMULTI: case SIOCDELMULTI: wi_setmulti(sc); error = 0; break; case SIOCGWAVELAN: error = copyin(ifr->ifr_data, &wreq, sizeof(wreq)); if (error) break; if (wreq.wi_len > WI_MAX_DATALEN) { error = EINVAL; break; } /* Don't show WEP keys to non-root users. */ if (wreq.wi_type == WI_RID_DEFLT_CRYPT_KEYS && suser(td)) break; if (wreq.wi_type == WI_RID_IFACE_STATS) { bcopy((char *)&sc->wi_stats, (char *)&wreq.wi_val, sizeof(sc->wi_stats)); wreq.wi_len = (sizeof(sc->wi_stats) / 2) + 1; } else if (wreq.wi_type == WI_RID_DEFLT_CRYPT_KEYS) { bcopy((char *)&sc->wi_keys, (char *)&wreq, sizeof(struct wi_ltv_keys)); } #ifdef WICACHE else if (wreq.wi_type == WI_RID_ZERO_CACHE) { error = suser(td); if (error) break; sc->wi_sigitems = sc->wi_nextitem = 0; } else if (wreq.wi_type == WI_RID_READ_CACHE) { char *pt = (char *)&wreq.wi_val; bcopy((char *)&sc->wi_sigitems, (char *)pt, sizeof(int)); pt += (sizeof (int)); wreq.wi_len = sizeof(int) / 2; bcopy((char *)&sc->wi_sigcache, (char *)pt, sizeof(struct wi_sigcache) * sc->wi_sigitems); wreq.wi_len += ((sizeof(struct wi_sigcache) * sc->wi_sigitems) / 2) + 1; } #endif else if (wreq.wi_type == WI_RID_PROCFRAME) { wreq.wi_len = 2; wreq.wi_val[0] = sc->wi_procframe; } else if (wreq.wi_type == WI_RID_SCAN_RES) { memcpy((char *)wreq.wi_val, (char *)sc->wi_scanbuf, sc->wi_scanbuf_len * 2); wreq.wi_len = sc->wi_scanbuf_len; } else if (wreq.wi_type == WI_RID_MIF) { mif = wreq.wi_val[0]; error = wi_cmd(sc, WI_CMD_READMIF, mif, 0, 0); val = CSR_READ_2(sc, WI_RESP0); wreq.wi_len = 2; wreq.wi_val[0] = val; } else { if (wi_read_record(sc, (struct wi_ltv_gen *)&wreq)) { error = EINVAL; break; } } error = copyout(&wreq, ifr->ifr_data, sizeof(wreq)); break; case SIOCSWAVELAN: if ((error = suser(td))) goto out; error = copyin(ifr->ifr_data, &wreq, sizeof(wreq)); if (error) break; if (wreq.wi_len > WI_MAX_DATALEN) { error = EINVAL; break; } if (wreq.wi_type == WI_RID_IFACE_STATS) { error = EINVAL; break; } else if (wreq.wi_type == WI_RID_PROCFRAME) { sc->wi_procframe = wreq.wi_val[0]; /* * if we're getting a scan request from a wavelan card * (non-prism2), send out a cmd_inquire to the card to scan * results for the scan will be received through the info * interrupt handler. otherwise the scan request can be * directly handled by a prism2 card's rid interface. */ } else if (wreq.wi_type == WI_RID_SCAN_REQ) { wi_cmd(sc, WI_CMD_INQUIRE, WI_INFO_SCAN_RESULTS, 0, 0); } else if (wreq.wi_type == WI_RID_MIF) { mif = wreq.wi_val[0]; val = wreq.wi_val[1]; error = wi_cmd(sc, WI_CMD_WRITEMIF, mif, val, 0); } else { error = wi_write_record(sc, (struct wi_ltv_gen *)&wreq); if (!error) wi_setdef(sc, &wreq); } break; case SIOCG80211: switch(ireq->i_type) { case IEEE80211_IOC_SSID: if(ireq->i_val == -1) { bzero(tmpssid, IEEE80211_NWID_LEN); error = wi_get_cur_ssid(sc, tmpssid, &len); if (error != 0) break; error = copyout(tmpssid, ireq->i_data, IEEE80211_NWID_LEN); ireq->i_len = len; } else if (ireq->i_val == 0) { error = copyout(sc->wi_net_name, ireq->i_data, IEEE80211_NWID_LEN); ireq->i_len = IEEE80211_NWID_LEN; } else error = EINVAL; break; case IEEE80211_IOC_NUMSSIDS: ireq->i_val = 1; break; case IEEE80211_IOC_WEP: if(!sc->wi_has_wep) { ireq->i_val = IEEE80211_WEP_NOSUP; } else { if(sc->wi_use_wep) { ireq->i_val = IEEE80211_WEP_MIXED; } else { ireq->i_val = IEEE80211_WEP_OFF; } } break; case IEEE80211_IOC_WEPKEY: if(!sc->wi_has_wep || ireq->i_val < 0 || ireq->i_val > 3) { error = EINVAL; break; } len = sc->wi_keys.wi_keys[ireq->i_val].wi_keylen; if (suser(td)) bcopy(sc->wi_keys.wi_keys[ireq->i_val].wi_keydat, tmpkey, len); else bzero(tmpkey, len); ireq->i_len = len; error = copyout(tmpkey, ireq->i_data, len); break; case IEEE80211_IOC_NUMWEPKEYS: if(!sc->wi_has_wep) error = EINVAL; else ireq->i_val = 4; break; case IEEE80211_IOC_WEPTXKEY: if(!sc->wi_has_wep) error = EINVAL; else ireq->i_val = sc->wi_tx_key; break; case IEEE80211_IOC_AUTHMODE: ireq->i_val = sc->wi_authmode; break; case IEEE80211_IOC_STATIONNAME: error = copyout(sc->wi_node_name, ireq->i_data, IEEE80211_NWID_LEN); ireq->i_len = IEEE80211_NWID_LEN; break; case IEEE80211_IOC_CHANNEL: wreq.wi_type = WI_RID_CURRENT_CHAN; wreq.wi_len = WI_MAX_DATALEN; if (wi_read_record(sc, (struct wi_ltv_gen *)&wreq)) error = EINVAL; else { ireq->i_val = wreq.wi_val[0]; } break; case IEEE80211_IOC_POWERSAVE: if(sc->wi_pm_enabled) ireq->i_val = IEEE80211_POWERSAVE_ON; else ireq->i_val = IEEE80211_POWERSAVE_OFF; break; case IEEE80211_IOC_POWERSAVESLEEP: ireq->i_val = sc->wi_max_sleep; break; default: error = EINVAL; } break; case SIOCS80211: if ((error = suser(td))) goto out; switch(ireq->i_type) { case IEEE80211_IOC_SSID: if (ireq->i_val != 0 || ireq->i_len > IEEE80211_NWID_LEN) { error = EINVAL; break; } /* We set both of them */ bzero(sc->wi_net_name, IEEE80211_NWID_LEN); error = copyin(ireq->i_data, sc->wi_net_name, ireq->i_len); bcopy(sc->wi_net_name, sc->wi_ibss_name, IEEE80211_NWID_LEN); break; case IEEE80211_IOC_WEP: /* * These cards only support one mode so * we just turn wep on what ever is * passed in if it's not OFF. */ if (ireq->i_val == IEEE80211_WEP_OFF) { sc->wi_use_wep = 0; } else { sc->wi_use_wep = 1; } break; case IEEE80211_IOC_WEPKEY: if (ireq->i_val < 0 || ireq->i_val > 3 || ireq->i_len > 13) { error = EINVAL; break; } bzero(sc->wi_keys.wi_keys[ireq->i_val].wi_keydat, 13); error = copyin(ireq->i_data, sc->wi_keys.wi_keys[ireq->i_val].wi_keydat, ireq->i_len); if(error) break; sc->wi_keys.wi_keys[ireq->i_val].wi_keylen = ireq->i_len; break; case IEEE80211_IOC_WEPTXKEY: if (ireq->i_val < 0 || ireq->i_val > 3) { error = EINVAL; break; } sc->wi_tx_key = ireq->i_val; break; case IEEE80211_IOC_AUTHMODE: sc->wi_authmode = ireq->i_val; break; case IEEE80211_IOC_STATIONNAME: if (ireq->i_len > 32) { error = EINVAL; break; } bzero(sc->wi_node_name, 32); error = copyin(ireq->i_data, sc->wi_node_name, ireq->i_len); break; case IEEE80211_IOC_CHANNEL: /* * The actual range is 1-14, but if you * set it to 0 you get the default. So * we let that work too. */ if (ireq->i_val < 0 || ireq->i_val > 14) { error = EINVAL; break; } sc->wi_channel = ireq->i_val; break; case IEEE80211_IOC_POWERSAVE: switch (ireq->i_val) { case IEEE80211_POWERSAVE_OFF: sc->wi_pm_enabled = 0; break; case IEEE80211_POWERSAVE_ON: sc->wi_pm_enabled = 1; break; default: error = EINVAL; break; } break; case IEEE80211_IOC_POWERSAVESLEEP: if (ireq->i_val < 0) { error = EINVAL; break; } sc->wi_max_sleep = ireq->i_val; break; default: error = EINVAL; break; } /* Reinitialize WaveLAN. */ wi_init(sc); break; default: error = ether_ioctl(ifp, command, data); break; } out: WI_UNLOCK(sc, s); return(error); } static void wi_init(xsc) void *xsc; { struct wi_softc *sc = xsc; struct ifnet *ifp = sc->ifp; struct wi_ltv_macaddr mac; int id = 0; int s; WI_LOCK(sc, s); if (sc->wi_gone) { WI_UNLOCK(sc, s); return; } if (ifp->if_flags & IFF_RUNNING) owi_stop(sc); wi_reset(sc); /* Program max data length. */ WI_SETVAL(WI_RID_MAX_DATALEN, sc->wi_max_data_len); /* Set the port type. */ WI_SETVAL(WI_RID_PORTTYPE, sc->wi_ptype); /* Enable/disable IBSS creation. */ WI_SETVAL(WI_RID_CREATE_IBSS, sc->wi_create_ibss); /* Program the RTS/CTS threshold. */ WI_SETVAL(WI_RID_RTS_THRESH, sc->wi_rts_thresh); /* Program the TX rate */ WI_SETVAL(WI_RID_TX_RATE, sc->wi_tx_rate); /* Access point density */ WI_SETVAL(WI_RID_SYSTEM_SCALE, sc->wi_ap_density); /* Power Management Enabled */ WI_SETVAL(WI_RID_PM_ENABLED, sc->wi_pm_enabled); /* Power Managment Max Sleep */ WI_SETVAL(WI_RID_MAX_SLEEP, sc->wi_max_sleep); /* Roaming type */ WI_SETVAL(WI_RID_ROAMING_MODE, sc->wi_roaming); /* Specify the IBSS name */ WI_SETSTR(WI_RID_OWN_SSID, sc->wi_ibss_name); /* Specify the network name */ WI_SETSTR(WI_RID_DESIRED_SSID, sc->wi_net_name); /* Specify the frequency to use */ WI_SETVAL(WI_RID_OWN_CHNL, sc->wi_channel); /* Program the nodename. */ WI_SETSTR(WI_RID_NODENAME, sc->wi_node_name); /* Specify the authentication mode. */ WI_SETVAL(WI_RID_CNFAUTHMODE, sc->wi_authmode); /* Set our MAC address. */ mac.wi_len = 4; mac.wi_type = WI_RID_MAC_NODE; bcopy((char *)&IFP2ENADDR(sc->ifp), (char *)&mac.wi_mac_addr, ETHER_ADDR_LEN); wi_write_record(sc, (struct wi_ltv_gen *)&mac); /* * Initialize promisc mode. */ if (ifp->if_flags & IFF_PROMISC) WI_SETVAL(WI_RID_PROMISC, 1); else WI_SETVAL(WI_RID_PROMISC, 0); /* Configure WEP. */ if (sc->wi_has_wep) { WI_SETVAL(WI_RID_ENCRYPTION, sc->wi_use_wep); WI_SETVAL(WI_RID_TX_CRYPT_KEY, sc->wi_tx_key); sc->wi_keys.wi_len = (sizeof(struct wi_ltv_keys) / 2) + 1; sc->wi_keys.wi_type = WI_RID_DEFLT_CRYPT_KEYS; wi_write_record(sc, (struct wi_ltv_gen *)&sc->wi_keys); } /* Set multicast filter. */ wi_setmulti(sc); /* Enable desired port */ wi_cmd(sc, WI_CMD_ENABLE | sc->wi_portnum, 0, 0, 0); if (wi_alloc_nicmem(sc, ETHER_MAX_LEN + sizeof(struct wi_frame) + 8, &id)) device_printf(sc->dev, "tx buffer allocation failed\n"); sc->wi_tx_data_id = id; if (wi_alloc_nicmem(sc, ETHER_MAX_LEN + sizeof(struct wi_frame) + 8, &id)) device_printf(sc->dev, "mgmt. buffer allocation failed\n"); sc->wi_tx_mgmt_id = id; /* enable interrupts */ CSR_WRITE_2(sc, WI_INT_EN, WI_INTRS); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->wi_stat_ch = timeout(wi_inquire, sc, hz * 60); WI_UNLOCK(sc, s); return; } static void wi_start(ifp) struct ifnet *ifp; { struct wi_softc *sc; struct mbuf *m0; struct wi_frame tx_frame; struct ether_header *eh; int id; int s; sc = ifp->if_softc; WI_LOCK(sc, s); if (sc->wi_gone) { WI_UNLOCK(sc, s); return; } if (ifp->if_flags & IFF_OACTIVE) { WI_UNLOCK(sc, s); return; } IF_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) { WI_UNLOCK(sc, s); return; } bzero((char *)&tx_frame, sizeof(tx_frame)); tx_frame.wi_frame_ctl = htole16(WI_FTYPE_DATA); id = sc->wi_tx_data_id; eh = mtod(m0, struct ether_header *); /* * Use RFC1042 encoding for IP and ARP datagrams, * 802.3 for anything else. */ if (ntohs(eh->ether_type) > ETHER_MAX_LEN) { bcopy((char *)&eh->ether_dhost, (char *)&tx_frame.wi_addr1, ETHER_ADDR_LEN); bcopy((char *)&eh->ether_shost, (char *)&tx_frame.wi_addr2, ETHER_ADDR_LEN); bcopy((char *)&eh->ether_dhost, (char *)&tx_frame.wi_dst_addr, ETHER_ADDR_LEN); bcopy((char *)&eh->ether_shost, (char *)&tx_frame.wi_src_addr, ETHER_ADDR_LEN); tx_frame.wi_dat_len = m0->m_pkthdr.len - WI_SNAPHDR_LEN; tx_frame.wi_dat[0] = htons(WI_SNAP_WORD0); tx_frame.wi_dat[1] = htons(WI_SNAP_WORD1); tx_frame.wi_len = htons(m0->m_pkthdr.len - WI_SNAPHDR_LEN); tx_frame.wi_type = eh->ether_type; m_copydata(m0, sizeof(struct ether_header), m0->m_pkthdr.len - sizeof(struct ether_header), (caddr_t)&sc->wi_txbuf); wi_write_data(sc, id, 0, (caddr_t)&tx_frame, sizeof(struct wi_frame)); wi_write_data(sc, id, WI_802_11_OFFSET, (caddr_t)&sc->wi_txbuf, (m0->m_pkthdr.len - sizeof(struct ether_header)) + 2); } else { tx_frame.wi_dat_len = m0->m_pkthdr.len; eh->ether_type = htons(m0->m_pkthdr.len - WI_SNAPHDR_LEN); m_copydata(m0, 0, m0->m_pkthdr.len, (caddr_t)&sc->wi_txbuf); wi_write_data(sc, id, 0, (caddr_t)&tx_frame, sizeof(struct wi_frame)); wi_write_data(sc, id, WI_802_3_OFFSET, (caddr_t)&sc->wi_txbuf, m0->m_pkthdr.len + 2); } /* * If there's a BPF listner, bounce a copy of * this frame to him. Also, don't send this to the bpf sniffer * if we're in procframe or monitor sniffing mode. */ if (!(sc->wi_procframe || sc->wi_debug.wi_monitor)) BPF_MTAP(ifp, m0); m_freem(m0); if (wi_cmd(sc, WI_CMD_TX|WI_RECLAIM, id, 0, 0)) device_printf(sc->dev, "xmit failed\n"); ifp->if_flags |= IFF_OACTIVE; /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; WI_UNLOCK(sc, s); return; } void owi_stop(sc) struct wi_softc *sc; { struct ifnet *ifp; int s; WI_LOCK(sc, s); untimeout(wi_inquire, sc, sc->wi_stat_ch); if (sc->wi_gone) { WI_UNLOCK(sc, s); return; } ifp = sc->ifp; /* * If the card is gone and the memory port isn't mapped, we will * (hopefully) get 0xffff back from the status read, which is not * a valid status value. */ if (CSR_READ_2(sc, WI_STATUS) != 0xffff) { CSR_WRITE_2(sc, WI_INT_EN, 0); wi_cmd(sc, WI_CMD_DISABLE|sc->wi_portnum, 0, 0, 0); } ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE); WI_UNLOCK(sc, s); return; } static void wi_watchdog(ifp) struct ifnet *ifp; { struct wi_softc *sc; sc = ifp->if_softc; device_printf(sc->dev, "watchdog timeout\n"); wi_init(sc); ifp->if_oerrors++; return; } int owi_alloc(dev, rid) device_t dev; int rid; { struct wi_softc *sc = device_get_softc(dev); if (sc->wi_bus_type != WI_BUS_PCI_NATIVE) { sc->iobase_rid = rid; sc->iobase = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->iobase_rid, 0, ~0, (1 << 6), rman_make_alignment_flags(1 << 6) | RF_ACTIVE); if (!sc->iobase) { device_printf(dev, "No I/O space?!\n"); return (ENXIO); } sc->wi_io_addr = rman_get_start(sc->iobase); sc->wi_btag = rman_get_bustag(sc->iobase); sc->wi_bhandle = rman_get_bushandle(sc->iobase); } else { sc->mem_rid = rid; sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE); if (!sc->mem) { device_printf(dev, "No Mem space on prism2.5?\n"); return (ENXIO); } sc->wi_btag = rman_get_bustag(sc->mem); sc->wi_bhandle = rman_get_bushandle(sc->mem); } sc->irq_rid = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE | ((sc->wi_bus_type == WI_BUS_PCCARD) ? 0 : RF_SHAREABLE)); if (!sc->irq) { owi_free(dev); device_printf(dev, "No irq?!\n"); return (ENXIO); } sc->dev = dev; sc->wi_unit = device_get_unit(dev); return (0); } void owi_free(dev) device_t dev; { struct wi_softc *sc = device_get_softc(dev); if (sc->iobase != NULL) { bus_release_resource(dev, SYS_RES_IOPORT, sc->iobase_rid, sc->iobase); sc->iobase = NULL; } if (sc->irq != NULL) { bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); sc->irq = NULL; } if (sc->mem != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem); sc->mem = NULL; } if (sc->ifp != NULL) { if_free(sc->ifp); sc->ifp = NULL; } return; } void owi_shutdown(dev) device_t dev; { struct wi_softc *sc; sc = device_get_softc(dev); owi_stop(sc); return; } #ifdef WICACHE /* wavelan signal strength cache code. * store signal/noise/quality on per MAC src basis in * a small fixed cache. The cache wraps if > MAX slots * used. The cache may be zeroed out to start over. * Two simple filters exist to reduce computation: * 1. ip only (literally 0x800) which may be used * to ignore some packets. It defaults to ip only. * it could be used to focus on broadcast, non-IP 802.11 beacons. * 2. multicast/broadcast only. This may be used to * ignore unicast packets and only cache signal strength * for multicast/broadcast packets (beacons); e.g., Mobile-IP * beacons and not unicast traffic. * * The cache stores (MAC src(index), IP src (major clue), signal, * quality, noise) * * No apologies for storing IP src here. It's easy and saves much * trouble elsewhere. The cache is assumed to be INET dependent, * although it need not be. */ #ifdef documentation int wi_sigitems; /* number of cached entries */ struct wi_sigcache wi_sigcache[MAXWICACHE]; /* array of cache entries */ int wi_nextitem; /* index/# of entries */ #endif /* control variables for cache filtering. Basic idea is * to reduce cost (e.g., to only Mobile-IP agent beacons * which are broadcast or multicast). Still you might * want to measure signal strength with unicast ping packets * on a pt. to pt. ant. setup. */ /* set true if you want to limit cache items to broadcast/mcast * only packets (not unicast). Useful for mobile-ip beacons which * are broadcast/multicast at network layer. Default is all packets * so ping/unicast will work say with pt. to pt. antennae setup. */ static int wi_cache_mcastonly = 0; SYSCTL_INT(_machdep, OID_AUTO, wi_cache_mcastonly, CTLFLAG_RW, &wi_cache_mcastonly, 0, ""); /* set true if you want to limit cache items to IP packets only */ static int wi_cache_iponly = 1; SYSCTL_INT(_machdep, OID_AUTO, wi_cache_iponly, CTLFLAG_RW, &wi_cache_iponly, 0, ""); /* * Original comments: * ----------------- * wi_cache_store, per rx packet store signal * strength in MAC (src) indexed cache. * * follows linux driver in how signal strength is computed. * In ad hoc mode, we use the rx_quality field. * signal and noise are trimmed to fit in the range from 47..138. * rx_quality field MSB is signal strength. * rx_quality field LSB is noise. * "quality" is (signal - noise) as is log value. * note: quality CAN be negative. * * In BSS mode, we use the RID for communication quality. * TBD: BSS mode is currently untested. * * Bill's comments: * --------------- * Actually, we use the rx_quality field all the time for both "ad-hoc" * and BSS modes. Why? Because reading an RID is really, really expensive: * there's a bunch of PIO operations that have to be done to read a record * from the NIC, and reading the comms quality RID each time a packet is * received can really hurt performance. We don't have to do this anyway: * the comms quality field only reflects the values in the rx_quality field * anyway. The comms quality RID is only meaningful in infrastructure mode, * but the values it contains are updated based on the rx_quality from * frames received from the access point. * * Also, according to Lucent, the signal strength and noise level values * can be converted to dBms by subtracting 149, so I've modified the code * to do that instead of the scaling it did originally. */ static void wi_cache_store(struct wi_softc *sc, struct ether_header *eh, struct mbuf *m, unsigned short rx_quality) { struct ip *ip = 0; int i; static int cache_slot = 0; /* use this cache entry */ static int wrapindex = 0; /* next "free" cache entry */ int sig, noise; int sawip=0; /* * filters: * 1. ip only * 2. configurable filter to throw out unicast packets, * keep multicast only. */ if ((ntohs(eh->ether_type) == ETHERTYPE_IP)) { sawip = 1; } /* * filter for ip packets only */ if (wi_cache_iponly && !sawip) { return; } /* * filter for broadcast/multicast only */ if (wi_cache_mcastonly && ((eh->ether_dhost[0] & 1) == 0)) { return; } #ifdef SIGDEBUG printf("owi%d: q value %x (MSB=0x%x, LSB=0x%x) \n", sc->wi_unit, rx_quality & 0xffff, rx_quality >> 8, rx_quality & 0xff); #endif /* * find the ip header. we want to store the ip_src * address. */ if (sawip) ip = mtod(m, struct ip *); /* * do a linear search for a matching MAC address * in the cache table * . MAC address is 6 bytes, * . var w_nextitem holds total number of entries already cached */ for(i = 0; i < sc->wi_nextitem; i++) { if (! bcmp(eh->ether_shost , sc->wi_sigcache[i].macsrc, 6 )) { /* * Match!, * so we already have this entry, * update the data */ break; } } /* * did we find a matching mac address? * if yes, then overwrite a previously existing cache entry */ if (i < sc->wi_nextitem ) { cache_slot = i; } /* * else, have a new address entry,so * add this new entry, * if table full, then we need to replace LRU entry */ else { /* * check for space in cache table * note: wi_nextitem also holds number of entries * added in the cache table */ if ( sc->wi_nextitem < MAXWICACHE ) { cache_slot = sc->wi_nextitem; sc->wi_nextitem++; sc->wi_sigitems = sc->wi_nextitem; } /* no space found, so simply wrap with wrap index * and "zap" the next entry */ else { if (wrapindex == MAXWICACHE) { wrapindex = 0; } cache_slot = wrapindex++; } } /* * invariant: cache_slot now points at some slot * in cache. */ if (cache_slot < 0 || cache_slot >= MAXWICACHE) { log(LOG_ERR, "owi_cache_store, bad index: %d of " "[0..%d], gross cache error\n", cache_slot, MAXWICACHE); return; } /* * store items in cache * .ip source address * .mac src * .signal, etc. */ if (sawip) sc->wi_sigcache[cache_slot].ipsrc = ip->ip_src.s_addr; bcopy( eh->ether_shost, sc->wi_sigcache[cache_slot].macsrc, 6); sig = (rx_quality >> 8) & 0xFF; noise = rx_quality & 0xFF; /* * -149 is Lucent specific to convert to dBm. Prism2 cards do * things differently, sometimes don't have a noise measurement, * and is firmware dependent :-( */ sc->wi_sigcache[cache_slot].signal = sig - 149; sc->wi_sigcache[cache_slot].noise = noise - 149; sc->wi_sigcache[cache_slot].quality = sig - noise; return; } #endif static int wi_get_cur_ssid(sc, ssid, len) struct wi_softc *sc; char *ssid; int *len; { int error = 0; struct wi_req wreq; wreq.wi_len = WI_MAX_DATALEN; switch (sc->wi_ptype) { case WI_PORTTYPE_IBSS: case WI_PORTTYPE_ADHOC: wreq.wi_type = WI_RID_CURRENT_SSID; error = wi_read_record(sc, (struct wi_ltv_gen *)&wreq); if (error != 0) break; if (wreq.wi_val[0] > IEEE80211_NWID_LEN) { error = EINVAL; break; } *len = wreq.wi_val[0]; bcopy(&wreq.wi_val[1], ssid, IEEE80211_NWID_LEN); break; case WI_PORTTYPE_BSS: wreq.wi_type = WI_RID_COMMQUAL; error = wi_read_record(sc, (struct wi_ltv_gen *)&wreq); if (error != 0) break; if (wreq.wi_val[0] != 0) /* associated */ { wreq.wi_type = WI_RID_CURRENT_SSID; wreq.wi_len = WI_MAX_DATALEN; error = wi_read_record(sc, (struct wi_ltv_gen *)&wreq); if (error != 0) break; if (wreq.wi_val[0] > IEEE80211_NWID_LEN) { error = EINVAL; break; } *len = wreq.wi_val[0]; bcopy(&wreq.wi_val[1], ssid, IEEE80211_NWID_LEN); } else { *len = IEEE80211_NWID_LEN; bcopy(sc->wi_net_name, ssid, IEEE80211_NWID_LEN); } break; default: error = EINVAL; break; } return error; } static int wi_media_change(ifp) struct ifnet *ifp; { struct wi_softc *sc = ifp->if_softc; int otype = sc->wi_ptype; int orate = sc->wi_tx_rate; int ocreate_ibss = sc->wi_create_ibss; sc->wi_create_ibss = 0; switch (sc->ifmedia.ifm_cur->ifm_media & IFM_OMASK) { case 0: sc->wi_ptype = WI_PORTTYPE_BSS; break; case IFM_IEEE80211_ADHOC: sc->wi_ptype = WI_PORTTYPE_ADHOC; break; case IFM_IEEE80211_IBSSMASTER: case IFM_IEEE80211_IBSSMASTER|IFM_IEEE80211_IBSS: if (!(sc->wi_flags & WI_FLAGS_HAS_CREATE_IBSS)) return (EINVAL); sc->wi_create_ibss = 1; /* FALLTHROUGH */ case IFM_IEEE80211_IBSS: sc->wi_ptype = WI_PORTTYPE_IBSS; break; default: /* Invalid combination. */ return (EINVAL); } switch (IFM_SUBTYPE(sc->ifmedia.ifm_cur->ifm_media)) { case IFM_IEEE80211_DS1: sc->wi_tx_rate = 1; break; case IFM_IEEE80211_DS2: sc->wi_tx_rate = 2; break; case IFM_IEEE80211_DS5: sc->wi_tx_rate = 5; break; case IFM_IEEE80211_DS11: sc->wi_tx_rate = 11; break; case IFM_AUTO: sc->wi_tx_rate = 3; break; } if (ocreate_ibss != sc->wi_create_ibss || otype != sc->wi_ptype || orate != sc->wi_tx_rate) wi_init(sc); return(0); } static void wi_media_status(ifp, imr) struct ifnet *ifp; struct ifmediareq *imr; { struct wi_req wreq; struct wi_softc *sc = ifp->if_softc; if (sc->wi_tx_rate == 3) { imr->ifm_active = IFM_IEEE80211|IFM_AUTO; if (sc->wi_ptype == WI_PORTTYPE_ADHOC) imr->ifm_active |= IFM_IEEE80211_ADHOC; else if (sc->wi_ptype == WI_PORTTYPE_IBSS) { if (sc->wi_create_ibss) imr->ifm_active |= IFM_IEEE80211_IBSSMASTER; else imr->ifm_active |= IFM_IEEE80211_IBSS; } wreq.wi_type = WI_RID_CUR_TX_RATE; wreq.wi_len = WI_MAX_DATALEN; if (wi_read_record(sc, (struct wi_ltv_gen *)&wreq) == 0) { switch(wreq.wi_val[0]) { case 1: imr->ifm_active |= IFM_IEEE80211_DS1; break; case 2: imr->ifm_active |= IFM_IEEE80211_DS2; break; case 6: imr->ifm_active |= IFM_IEEE80211_DS5; break; case 11: imr->ifm_active |= IFM_IEEE80211_DS11; break; } } } else { imr->ifm_active = sc->ifmedia.ifm_cur->ifm_media; } imr->ifm_status = IFM_AVALID; if (sc->wi_ptype == WI_PORTTYPE_ADHOC || sc->wi_ptype == WI_PORTTYPE_IBSS) /* * XXX: It would be nice if we could give some actually * useful status like whether we joined another IBSS or * created one ourselves. */ imr->ifm_status |= IFM_ACTIVE; else { wreq.wi_type = WI_RID_COMMQUAL; wreq.wi_len = WI_MAX_DATALEN; if (wi_read_record(sc, (struct wi_ltv_gen *)&wreq) == 0 && wreq.wi_val[0] != 0) imr->ifm_status |= IFM_ACTIVE; } } Index: stable/6/sys/dev/pdq/pdq_ifsubr.c =================================================================== --- stable/6/sys/dev/pdq/pdq_ifsubr.c (revision 149421) +++ stable/6/sys/dev/pdq/pdq_ifsubr.c (revision 149422) @@ -1,715 +1,717 @@ /* $NetBSD: pdq_ifsubr.c,v 1.38 2001/12/21 23:21:47 matt Exp $ */ /*- * Copyright (c) 1995, 1996 Matt Thomas * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $NetBSD: pdq_ifsubr.c,v 1.12 1997/06/05 01:56:35 thomas Exp$ */ #include __FBSDID("$FreeBSD$"); /* * DEC PDQ FDDI Controller; code for BSD derived operating systems * * This module provide bus independent BSD specific O/S functions. * (ie. it provides an ifnet interface to the rest of the system) */ #define PDQ_OSSUPPORT #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include devclass_t pdq_devclass; static void pdq_ifinit( pdq_softc_t *sc) { if (PDQ_IFNET(sc)->if_flags & IFF_UP) { PDQ_IFNET(sc)->if_flags |= IFF_RUNNING; if (PDQ_IFNET(sc)->if_flags & IFF_PROMISC) { sc->sc_pdq->pdq_flags |= PDQ_PROMISC; } else { sc->sc_pdq->pdq_flags &= ~PDQ_PROMISC; } if (PDQ_IFNET(sc)->if_flags & IFF_LINK1) { sc->sc_pdq->pdq_flags |= PDQ_PASS_SMT; } else { sc->sc_pdq->pdq_flags &= ~PDQ_PASS_SMT; } sc->sc_pdq->pdq_flags |= PDQ_RUNNING; pdq_run(sc->sc_pdq); } else { PDQ_IFNET(sc)->if_flags &= ~IFF_RUNNING; sc->sc_pdq->pdq_flags &= ~PDQ_RUNNING; pdq_stop(sc->sc_pdq); } } static void pdq_ifwatchdog( struct ifnet *ifp) { /* * No progress was made on the transmit queue for PDQ_OS_TX_TRANSMIT * seconds. Remove all queued packets. */ ifp->if_flags &= ~IFF_OACTIVE; ifp->if_timer = 0; for (;;) { struct mbuf *m; IFQ_DEQUEUE(&ifp->if_snd, m); if (m == NULL) return; PDQ_OS_DATABUF_FREE(PDQ_OS_IFP_TO_SOFTC(ifp)->sc_pdq, m); } } static void pdq_ifstart( struct ifnet *ifp) { pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp); struct mbuf *m; int tx = 0; if ((ifp->if_flags & IFF_RUNNING) == 0) return; if (PDQ_IFNET(sc)->if_timer == 0) PDQ_IFNET(sc)->if_timer = PDQ_OS_TX_TIMEOUT; if ((sc->sc_pdq->pdq_flags & PDQ_TXOK) == 0) { PDQ_IFNET(sc)->if_flags |= IFF_OACTIVE; return; } sc->sc_flags |= PDQIF_DOWNCALL; for (;; tx = 1) { IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; #if defined(PDQ_BUS_DMA) && !defined(PDQ_BUS_DMA_NOTX) if ((m->m_flags & M_HASTXDMAMAP) == 0) { bus_dmamap_t map; if (PDQ_OS_HDR_OFFSET != PDQ_RX_FC_OFFSET) { m->m_data[0] = PDQ_FDDI_PH0; m->m_data[1] = PDQ_FDDI_PH1; m->m_data[2] = PDQ_FDDI_PH2; } if (!bus_dmamap_create(sc->sc_dmatag, m->m_pkthdr.len, 255, m->m_pkthdr.len, 0, BUS_DMA_NOWAIT, &map)) { if (!bus_dmamap_load_mbuf(sc->sc_dmatag, map, m, BUS_DMA_WRITE|BUS_DMA_NOWAIT)) { bus_dmamap_sync(sc->sc_dmatag, map, 0, m->m_pkthdr.len, BUS_DMASYNC_PREWRITE); M_SETCTX(m, map); m->m_flags |= M_HASTXDMAMAP; } } if ((m->m_flags & M_HASTXDMAMAP) == 0) break; } #else if (PDQ_OS_HDR_OFFSET != PDQ_RX_FC_OFFSET) { m->m_data[0] = PDQ_FDDI_PH0; m->m_data[1] = PDQ_FDDI_PH1; m->m_data[2] = PDQ_FDDI_PH2; } #endif if (pdq_queue_transmit_data(sc->sc_pdq, m) == PDQ_FALSE) break; } if (m != NULL) { ifp->if_flags |= IFF_OACTIVE; IF_PREPEND(&ifp->if_snd, m); } if (tx) PDQ_DO_TYPE2_PRODUCER(sc->sc_pdq); sc->sc_flags &= ~PDQIF_DOWNCALL; } void pdq_os_receive_pdu( pdq_t *pdq, struct mbuf *m, size_t pktlen, int drop) { pdq_softc_t *sc = pdq->pdq_os_ctx; struct ifnet *ifp = PDQ_IFNET(sc); struct fddi_header *fh; ifp->if_ipackets++; #if defined(PDQ_BUS_DMA) { /* * Even though the first mbuf start at the first fddi header octet, * the dmamap starts PDQ_OS_HDR_OFFSET octets earlier. Any additional * mbufs will start normally. */ int offset = PDQ_OS_HDR_OFFSET; struct mbuf *m0; for (m0 = m; m0 != NULL; m0 = m0->m_next, offset = 0) { pdq_os_databuf_sync(sc, m0, offset, m0->m_len, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_dmatag, M_GETCTX(m0, bus_dmamap_t)); bus_dmamap_destroy(sc->sc_dmatag, M_GETCTX(m0, bus_dmamap_t)); m0->m_flags &= ~M_HASRXDMAMAP; M_SETCTX(m0, NULL); } } #endif m->m_pkthdr.len = pktlen; fh = mtod(m, struct fddi_header *); if (drop || (fh->fddi_fc & (FDDIFC_L|FDDIFC_F)) != FDDIFC_LLC_ASYNC) { ifp->if_iqdrops++; ifp->if_ierrors++; PDQ_OS_DATABUF_FREE(pdq, m); return; } m->m_pkthdr.rcvif = ifp; (*ifp->if_input)(ifp, m); } void pdq_os_restart_transmitter( pdq_t *pdq) { pdq_softc_t *sc = pdq->pdq_os_ctx; PDQ_IFNET(sc)->if_flags &= ~IFF_OACTIVE; if (IFQ_IS_EMPTY(&PDQ_IFNET(sc)->if_snd) == 0) { PDQ_IFNET(sc)->if_timer = PDQ_OS_TX_TIMEOUT; if ((sc->sc_flags & PDQIF_DOWNCALL) == 0) pdq_ifstart(PDQ_IFNET(sc)); } else { PDQ_IFNET(sc)->if_timer = 0; } } void pdq_os_transmit_done( pdq_t *pdq, struct mbuf *m) { pdq_softc_t *sc = pdq->pdq_os_ctx; #if NBPFILTER > 0 if (PQD_IFNET(sc)->if_bpf != NULL) PDQ_BPF_MTAP(sc, m); #endif PDQ_OS_DATABUF_FREE(pdq, m); PDQ_IFNET(sc)->if_opackets++; } void pdq_os_addr_fill( pdq_t *pdq, pdq_lanaddr_t *addr, size_t num_addrs) { pdq_softc_t *sc = pdq->pdq_os_ctx; struct ifnet *ifp; struct ifmultiaddr *ifma; ifp = sc->ifp; /* * ADDR_FILTER_SET is always issued before FILTER_SET so * we can play with PDQ_ALLMULTI and not worry about * queueing a FILTER_SET ourselves. */ pdq->pdq_flags &= ~PDQ_ALLMULTI; #if defined(IFF_ALLMULTI) PDQ_IFNET(sc)->if_flags &= ~IFF_ALLMULTI; #endif + IF_ADDR_LOCK(PDQ_IFNET(sc)); for (ifma = TAILQ_FIRST(&PDQ_IFNET(sc)->if_multiaddrs); ifma && num_addrs > 0; ifma = TAILQ_NEXT(ifma, ifma_link)) { char *mcaddr; if (ifma->ifma_addr->sa_family != AF_LINK) continue; mcaddr = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); ((u_short *) addr->lanaddr_bytes)[0] = ((u_short *) mcaddr)[0]; ((u_short *) addr->lanaddr_bytes)[1] = ((u_short *) mcaddr)[1]; ((u_short *) addr->lanaddr_bytes)[2] = ((u_short *) mcaddr)[2]; addr++; num_addrs--; } + IF_ADDR_UNLOCK(PDQ_IFNET(sc)); /* * If not all the address fit into the CAM, turn on all-multicast mode. */ if (ifma != NULL) { pdq->pdq_flags |= PDQ_ALLMULTI; #if defined(IFF_ALLMULTI) PDQ_IFNET(sc)->if_flags |= IFF_ALLMULTI; #endif } } #if defined(IFM_FDDI) static int pdq_ifmedia_change( struct ifnet *ifp) { pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp); if (sc->sc_ifmedia.ifm_media & IFM_FDX) { if ((sc->sc_pdq->pdq_flags & PDQ_WANT_FDX) == 0) { sc->sc_pdq->pdq_flags |= PDQ_WANT_FDX; if (sc->sc_pdq->pdq_flags & PDQ_RUNNING) pdq_run(sc->sc_pdq); } } else if (sc->sc_pdq->pdq_flags & PDQ_WANT_FDX) { sc->sc_pdq->pdq_flags &= ~PDQ_WANT_FDX; if (sc->sc_pdq->pdq_flags & PDQ_RUNNING) pdq_run(sc->sc_pdq); } return 0; } static void pdq_ifmedia_status( struct ifnet *ifp, struct ifmediareq *ifmr) { pdq_softc_t * const sc = PDQ_OS_IFP_TO_SOFTC(ifp); ifmr->ifm_status = IFM_AVALID; if (sc->sc_pdq->pdq_flags & PDQ_IS_ONRING) ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active = (ifmr->ifm_current & ~IFM_FDX); if (sc->sc_pdq->pdq_flags & PDQ_IS_FDX) ifmr->ifm_active |= IFM_FDX; } void pdq_os_update_status( pdq_t *pdq, const void *arg) { pdq_softc_t * const sc = pdq->pdq_os_ctx; const pdq_response_status_chars_get_t *rsp = arg; int media = 0; switch (rsp->status_chars_get.pmd_type[0]) { case PDQ_PMD_TYPE_ANSI_MUTLI_MODE: media = IFM_FDDI_MMF; break; case PDQ_PMD_TYPE_ANSI_SINGLE_MODE_TYPE_1: media = IFM_FDDI_SMF; break; case PDQ_PMD_TYPE_ANSI_SIGNLE_MODE_TYPE_2: media = IFM_FDDI_SMF; break; case PDQ_PMD_TYPE_UNSHIELDED_TWISTED_PAIR: media = IFM_FDDI_UTP; break; default: media |= IFM_MANUAL; } if (rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS) media |= IFM_FDDI_DA; sc->sc_ifmedia.ifm_media = media | IFM_FDDI; } #endif /* defined(IFM_FDDI) */ static int pdq_ifioctl( struct ifnet *ifp, u_long cmd, caddr_t data) { pdq_softc_t *sc = PDQ_OS_IFP_TO_SOFTC(ifp); int error = 0; PDQ_LOCK(sc); switch (cmd) { case SIOCSIFFLAGS: { pdq_ifinit(sc); break; } case SIOCADDMULTI: case SIOCDELMULTI: { if (PDQ_IFNET(sc)->if_flags & IFF_RUNNING) { pdq_run(sc->sc_pdq); error = 0; } break; } #if defined(IFM_FDDI) && defined(SIOCSIFMEDIA) case SIOCSIFMEDIA: case SIOCGIFMEDIA: { struct ifreq *ifr = (struct ifreq *)data; error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, cmd); break; } #endif default: { error = fddi_ioctl(ifp, cmd, data); break; } } PDQ_UNLOCK(sc); return error; } #ifndef IFF_NOTRAILERS #define IFF_NOTRAILERS 0 #endif void pdq_ifattach(pdq_softc_t *sc) { struct ifnet *ifp; ifp = PDQ_IFNET(sc) = if_alloc(IFT_FDDI); if (ifp == NULL) panic("%s: can not if_alloc()", device_get_nameunit(sc->dev)); mtx_init(&sc->mtx, device_get_nameunit(sc->dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); ifp->if_softc = sc; ifp->if_init = (if_init_f_t *)pdq_ifinit; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_NOTRAILERS|IFF_MULTICAST; ifp->if_watchdog = pdq_ifwatchdog; ifp->if_ioctl = pdq_ifioctl; ifp->if_start = pdq_ifstart; #if defined(IFM_FDDI) { const int media = sc->sc_ifmedia.ifm_media; ifmedia_init(&sc->sc_ifmedia, IFM_FDX, pdq_ifmedia_change, pdq_ifmedia_status); ifmedia_add(&sc->sc_ifmedia, media, 0, 0); ifmedia_set(&sc->sc_ifmedia, media); } #endif fddi_ifattach(ifp, FDDI_BPF_SUPPORTED); } void pdq_ifdetach (pdq_softc_t *sc) { struct ifnet *ifp; ifp = sc->ifp; fddi_ifdetach(ifp, FDDI_BPF_SUPPORTED); if_free(ifp); pdq_stop(sc->sc_pdq); pdq_free(sc->dev); return; } void pdq_free (device_t dev) { pdq_softc_t *sc; sc = device_get_softc(dev); if (sc->io) bus_release_resource(dev, sc->io_type, sc->io_rid, sc->io); if (sc->mem) bus_release_resource(dev, sc->mem_type, sc->mem_rid, sc->mem); if (sc->irq_ih) bus_teardown_intr(dev, sc->irq, sc->irq_ih); if (sc->irq) bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); /* * Destroy the mutex. */ if (mtx_initialized(&sc->mtx) != 0) { mtx_destroy(&sc->mtx); } return; } #if defined(PDQ_BUS_DMA) int pdq_os_memalloc_contig( pdq_t *pdq) { pdq_softc_t * const sc = pdq->pdq_os_ctx; bus_dma_segment_t db_segs[1], ui_segs[1], cb_segs[1]; int db_nsegs = 0, ui_nsegs = 0; int steps = 0; int not_ok; not_ok = bus_dmamem_alloc(sc->sc_dmatag, sizeof(*pdq->pdq_dbp), sizeof(*pdq->pdq_dbp), sizeof(*pdq->pdq_dbp), db_segs, 1, &db_nsegs, BUS_DMA_NOWAIT); if (!not_ok) { steps = 1; not_ok = bus_dmamem_map(sc->sc_dmatag, db_segs, db_nsegs, sizeof(*pdq->pdq_dbp), (caddr_t *) &pdq->pdq_dbp, BUS_DMA_NOWAIT); } if (!not_ok) { steps = 2; not_ok = bus_dmamap_create(sc->sc_dmatag, db_segs[0].ds_len, 1, 0x2000, 0, BUS_DMA_NOWAIT, &sc->sc_dbmap); } if (!not_ok) { steps = 3; not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_dbmap, pdq->pdq_dbp, sizeof(*pdq->pdq_dbp), NULL, BUS_DMA_NOWAIT); } if (!not_ok) { steps = 4; pdq->pdq_pa_descriptor_block = sc->sc_dbmap->dm_segs[0].ds_addr; not_ok = bus_dmamem_alloc(sc->sc_dmatag, PDQ_OS_PAGESIZE, PDQ_OS_PAGESIZE, PDQ_OS_PAGESIZE, ui_segs, 1, &ui_nsegs, BUS_DMA_NOWAIT); } if (!not_ok) { steps = 5; not_ok = bus_dmamem_map(sc->sc_dmatag, ui_segs, ui_nsegs, PDQ_OS_PAGESIZE, (caddr_t *) &pdq->pdq_unsolicited_info.ui_events, BUS_DMA_NOWAIT); } if (!not_ok) { steps = 6; not_ok = bus_dmamap_create(sc->sc_dmatag, ui_segs[0].ds_len, 1, PDQ_OS_PAGESIZE, 0, BUS_DMA_NOWAIT, &sc->sc_uimap); } if (!not_ok) { steps = 7; not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_uimap, pdq->pdq_unsolicited_info.ui_events, PDQ_OS_PAGESIZE, NULL, BUS_DMA_NOWAIT); } if (!not_ok) { steps = 8; pdq->pdq_unsolicited_info.ui_pa_bufstart = sc->sc_uimap->dm_segs[0].ds_addr; cb_segs[0] = db_segs[0]; cb_segs[0].ds_addr += offsetof(pdq_descriptor_block_t, pdqdb_consumer); cb_segs[0].ds_len = sizeof(pdq_consumer_block_t); not_ok = bus_dmamem_map(sc->sc_dmatag, cb_segs, 1, sizeof(*pdq->pdq_cbp), (caddr_t *) &pdq->pdq_cbp, BUS_DMA_NOWAIT|BUS_DMA_COHERENT); } if (!not_ok) { steps = 9; not_ok = bus_dmamap_create(sc->sc_dmatag, cb_segs[0].ds_len, 1, 0x2000, 0, BUS_DMA_NOWAIT, &sc->sc_cbmap); } if (!not_ok) { steps = 10; not_ok = bus_dmamap_load(sc->sc_dmatag, sc->sc_cbmap, (caddr_t) pdq->pdq_cbp, sizeof(*pdq->pdq_cbp), NULL, BUS_DMA_NOWAIT); } if (!not_ok) { pdq->pdq_pa_consumer_block = sc->sc_cbmap->dm_segs[0].ds_addr; return not_ok; } switch (steps) { case 11: { bus_dmamap_unload(sc->sc_dmatag, sc->sc_cbmap); /* FALL THROUGH */ } case 10: { bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cbmap); /* FALL THROUGH */ } case 9: { bus_dmamem_unmap(sc->sc_dmatag, (caddr_t) pdq->pdq_cbp, sizeof(*pdq->pdq_cbp)); /* FALL THROUGH */ } case 8: { bus_dmamap_unload(sc->sc_dmatag, sc->sc_uimap); /* FALL THROUGH */ } case 7: { bus_dmamap_destroy(sc->sc_dmatag, sc->sc_uimap); /* FALL THROUGH */ } case 6: { bus_dmamem_unmap(sc->sc_dmatag, (caddr_t) pdq->pdq_unsolicited_info.ui_events, PDQ_OS_PAGESIZE); /* FALL THROUGH */ } case 5: { bus_dmamem_free(sc->sc_dmatag, ui_segs, ui_nsegs); /* FALL THROUGH */ } case 4: { bus_dmamap_unload(sc->sc_dmatag, sc->sc_dbmap); /* FALL THROUGH */ } case 3: { bus_dmamap_destroy(sc->sc_dmatag, sc->sc_dbmap); /* FALL THROUGH */ } case 2: { bus_dmamem_unmap(sc->sc_dmatag, (caddr_t) pdq->pdq_dbp, sizeof(*pdq->pdq_dbp)); /* FALL THROUGH */ } case 1: { bus_dmamem_free(sc->sc_dmatag, db_segs, db_nsegs); /* FALL THROUGH */ } } return not_ok; } extern void pdq_os_descriptor_block_sync( pdq_os_ctx_t *sc, size_t offset, size_t length, int ops) { bus_dmamap_sync(sc->sc_dmatag, sc->sc_dbmap, offset, length, ops); } extern void pdq_os_consumer_block_sync( pdq_os_ctx_t *sc, int ops) { bus_dmamap_sync(sc->sc_dmatag, sc->sc_cbmap, 0, sizeof(pdq_consumer_block_t), ops); } extern void pdq_os_unsolicited_event_sync( pdq_os_ctx_t *sc, size_t offset, size_t length, int ops) { bus_dmamap_sync(sc->sc_dmatag, sc->sc_uimap, offset, length, ops); } extern void pdq_os_databuf_sync( pdq_os_ctx_t *sc, struct mbuf *m, size_t offset, size_t length, int ops) { bus_dmamap_sync(sc->sc_dmatag, M_GETCTX(m, bus_dmamap_t), offset, length, ops); } extern void pdq_os_databuf_free( pdq_os_ctx_t *sc, struct mbuf *m) { if (m->m_flags & (M_HASRXDMAMAP|M_HASTXDMAMAP)) { bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); bus_dmamap_unload(sc->sc_dmatag, map); bus_dmamap_destroy(sc->sc_dmatag, map); m->m_flags &= ~(M_HASRXDMAMAP|M_HASTXDMAMAP); } m_freem(m); } extern struct mbuf * pdq_os_databuf_alloc( pdq_os_ctx_t *sc) { struct mbuf *m; bus_dmamap_t map; MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { printf("%s: can't alloc small buf\n", sc->sc_dev.dv_xname); return NULL; } MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { printf("%s: can't alloc cluster\n", sc->sc_dev.dv_xname); m_free(m); return NULL; } m->m_pkthdr.len = m->m_len = PDQ_OS_DATABUF_SIZE; if (bus_dmamap_create(sc->sc_dmatag, PDQ_OS_DATABUF_SIZE, 1, PDQ_OS_DATABUF_SIZE, 0, BUS_DMA_NOWAIT, &map)) { printf("%s: can't create dmamap\n", sc->sc_dev.dv_xname); m_free(m); return NULL; } if (bus_dmamap_load_mbuf(sc->sc_dmatag, map, m, BUS_DMA_READ|BUS_DMA_NOWAIT)) { printf("%s: can't load dmamap\n", sc->sc_dev.dv_xname); bus_dmamap_destroy(sc->sc_dmatag, map); m_free(m); return NULL; } m->m_flags |= M_HASRXDMAMAP; M_SETCTX(m, map); return m; } #endif Index: stable/6/sys/dev/ray/if_ray.c =================================================================== --- stable/6/sys/dev/ray/if_ray.c (revision 149421) +++ stable/6/sys/dev/ray/if_ray.c (revision 149422) @@ -1,3817 +1,3821 @@ /* $NetBSD: if_ray.c,v 1.12 2000/02/07 09:36:27 augustss Exp $ */ /*- * Copyright (C) 2000 * Dr. Duncan McLennan Barclay, dmlb@ragnet.demon.co.uk. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY DUNCAN BARCLAY AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL DUNCAN BARCLAY OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * */ /*- * Copyright (c) 2000 Christian E. Hopps * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Card configuration * ================== * * This card is unusual in that it uses both common and attribute * memory whilst working. It should use common memory and an IO port. * * The bus resource allocations need to work around the brain deadness * of pccardd (where it reads the CIS for common memory, sets it all * up and then throws it all away assuming the card is an ed * driver...). Note that this could be dangerous (because it doesn't * interact with pccardd) if you use other memory mapped cards in the * same pccard slot as currently old mappings are not cleaned up very well * by the bus_release_resource methods or pccardd. * * There is no support for running this driver on 4.0. * * Ad-hoc and infra-structure modes * ================================ * * The driver supports ad-hoc mode for V4 firmware and infrastructure * mode for V5 firmware. V5 firmware in ad-hoc mode is untested and should * work. * * The Linux driver also seems to have the capability to act as an AP. * I wonder what facilities the "AP" can provide within a driver? We can * probably use the BRIDGE code to form an ESS but I don't think * power saving etc. is easy. * * * Packet framing/encapsulation/translation * ======================================== * * Currently we support the Webgear encapsulation: * 802.11 header struct ieee80211_frame * 802.3 header struct ether_header * IP/ARP payload * * and RFC1042 encapsulation of IP datagrams (translation): * 802.11 header struct ieee80211_frame * 802.2 LLC header * 802.2 SNAP header * 802.3 Ethertype * IP/ARP payload * * Framing should be selected via if_media stuff or link types but * is currently hardcoded to: * V4 encapsulation * V5 translation * * * Authentication * ============== * * 802.11 provides two authentication mechanisms. The first is a very * simple host based mechanism (like xhost) called Open System and the * second is a more complex challenge/response called Shared Key built * ontop of WEP. * * This driver only supports Open System and does not implement any * host based control lists. In otherwords authentication is always * granted to hosts wanting to authenticate with this station. This is * the only sensible behaviour as the Open System mechanism uses MAC * addresses to identify hosts. Send me patches if you need it! */ /* * ***check all XXX_INFRA code - reassoc not done well at all! * ***watchdog to catch screwed up removals? * ***error handling of RAY_COM_RUNQ * ***error handling of ECF command completions * ***can't seem to create a n/w that Win95 wants to see. * ***remove panic in ray_com_ecf by re-quing or timeout * ***use new ioctl stuff - probably need to change RAY_COM_FCHKRUNNING things? * consider user doing: * ifconfig ray0 192.168.200.38 -bssid "freed" * ifconfig ray0 192.168.200.38 -bssid "fred" * here the second one would be missed in this code * check that v5 needs timeouts on ecf commands * write up driver structure in comments above * UPDATE_PARAMS seems to return via an interrupt - maybe the timeout * is needed for wrong values? * proper setting of mib_hop_seq_len with country code for v4 firmware * best done with raycontrol? * countrycode setting is broken I think * userupdate should trap and do via startjoin etc. * fragmentation when rx level drops? * v5 might not need download * defaults are as documented apart from hop_seq_length * settings are sane for ad-hoc not infra * * driver state * most state is implied by the sequence of commands in the runq * but in fact any of the rx and tx path that uses variables * in the sc_c are potentially going to get screwed? * * infra mode stuff * proper handling of the basic rate set - see the manual * all ray_sj, ray_assoc sequencues need a "nicer" solution as we * remember association and authentication * need to consider WEP * acting as ap - should be able to get working from the manual * need to finish RAY_ECMD_REJOIN_DONE * finish authenitcation code, it doesn't handle errors/timeouts/ * REJOIN etc. * * ray_nw_param * promisc in here too? - done * should be able to update the parameters before we download to the * device. This means we must attach a desired struct to the * runq entry and maybe have another big case statement to * move these desired into current when not running. * init must then use the current settings (pre-loaded * in attach now!) and pass to download. But we can't access * current nw params outside of the runq - ahhh * differeniate between parameters set in attach and init * sc_station_addr in here too (for changing mac address) * move desired into the command structure? * take downloaded MIB from a complete nw_param? * longer term need to attach a desired nw params to the runq entry * * * RAY_COM_RUNQ errors * * if sleeping in ccs_alloc with eintr/erestart/enxio/enodev * erestart try again from the top * XXX do not malloc more comqs * XXX ccs allocation hard * eintr clean up and return * enxio clean up and return - done in macro * * if sleeping in runq_arr itself with eintr/erestart/enxio/enodev * erestart try again from the top * XXX do not malloc more comqs * XXX ccs allocation hard * XXX reinsert comqs at head of list * eintr clean up and return * enxio clean up and return - done in macro */ #define XXX 0 #define XXX_ACTING_AP 0 #define XXX_INFRA 0 #define RAY_DEBUG ( \ /* RAY_DBG_AUTH | */ \ /* RAY_DBG_SUBR | */ \ /* RAY_DBG_BOOTPARAM | */ \ /* RAY_DBG_STARTJOIN | */ \ /* RAY_DBG_CCS | */ \ /* RAY_DBG_IOCTL | */ \ /* RAY_DBG_MBUF | */ \ /* RAY_DBG_RX | */ \ /* RAY_DBG_CM | */ \ /* RAY_DBG_COM | */ \ /* RAY_DBG_STOP | */ \ /* RAY_DBG_CTL | */ \ /* RAY_DBG_MGT | */ \ /* RAY_DBG_TX | */ \ /* RAY_DBG_DCOM | */ \ 0 \ ) /* * XXX build options - move to LINT */ #define RAY_CM_RID 0 /* pccardd abuses windows 0 and 1 */ #define RAY_AM_RID 3 /* pccardd abuses windows 0 and 1 */ #define RAY_COM_TIMEOUT (hz/2) /* Timeout for CCS commands */ #define RAY_TX_TIMEOUT (hz/2) /* Timeout for rescheduling TX */ #define RAY_ECF_SPIN_DELAY 1000 /* Wait 1ms before checking ECF ready */ #define RAY_ECF_SPIN_TRIES 10 /* Wait this many times for ECF ready */ /* * XXX build options - move to LINT */ #ifndef RAY_DEBUG #define RAY_DEBUG 0x0000 #endif /* RAY_DEBUG */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "card_if.h" #include #include #include #include static MALLOC_DEFINE(M_RAYCOM, "raycom", "Raylink command queue entry"); /* * Prototyping */ static int ray_attach (device_t); static int ray_ccs_alloc (struct ray_softc *sc, size_t *ccsp, char *wmesg); static void ray_ccs_fill (struct ray_softc *sc, size_t ccs, u_int cmd); static void ray_ccs_free (struct ray_softc *sc, size_t ccs); static int ray_ccs_tx (struct ray_softc *sc, size_t *ccsp, size_t *bufpp); static void ray_com_ecf (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_com_ecf_done (struct ray_softc *sc); static void ray_com_ecf_timo (void *xsc); static struct ray_comq_entry * ray_com_init (struct ray_comq_entry *com, ray_comqfn_t function, int flags, char *mesg); static struct ray_comq_entry * ray_com_malloc (ray_comqfn_t function, int flags, char *mesg); static void ray_com_runq (struct ray_softc *sc); static int ray_com_runq_add (struct ray_softc *sc, struct ray_comq_entry *com[], int ncom, char *wmesg); static void ray_com_runq_done (struct ray_softc *sc); static int ray_detach (device_t); static void ray_init (void *xsc); static int ray_init_user (struct ray_softc *sc); static void ray_init_assoc (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_init_assoc_done (struct ray_softc *sc, u_int8_t status, size_t ccs); static void ray_init_auth (struct ray_softc *sc, struct ray_comq_entry *com); static int ray_init_auth_send (struct ray_softc *sc, u_int8_t *dst, int sequence); static void ray_init_auth_done (struct ray_softc *sc, u_int8_t status); static void ray_init_download (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_init_download_done (struct ray_softc *sc, u_int8_t status, size_t ccs); static void ray_init_download_v4 (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_init_download_v5 (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_init_mcast (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_init_sj (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_init_sj_done (struct ray_softc *sc, u_int8_t status, size_t ccs); static void ray_intr (void *xsc); static void ray_intr_ccs (struct ray_softc *sc, u_int8_t cmd, u_int8_t status, size_t ccs); static void ray_intr_rcs (struct ray_softc *sc, u_int8_t cmd, size_t ccs); static void ray_intr_updt_errcntrs (struct ray_softc *sc); static int ray_ioctl (struct ifnet *ifp, u_long command, caddr_t data); static void ray_mcast (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_mcast_done (struct ray_softc *sc, u_int8_t status, size_t ccs); static int ray_mcast_user (struct ray_softc *sc); static int ray_probe (device_t); static void ray_promisc (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_repparams (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_repparams_done (struct ray_softc *sc, u_int8_t status, size_t ccs); static int ray_repparams_user (struct ray_softc *sc, struct ray_param_req *pr); static int ray_repstats_user (struct ray_softc *sc, struct ray_stats_req *sr); static int ray_res_alloc_am (struct ray_softc *sc); static int ray_res_alloc_cm (struct ray_softc *sc); static int ray_res_alloc_irq (struct ray_softc *sc); static void ray_res_release (struct ray_softc *sc); static void ray_rx (struct ray_softc *sc, size_t rcs); static void ray_rx_ctl (struct ray_softc *sc, struct mbuf *m0); static void ray_rx_data (struct ray_softc *sc, struct mbuf *m0, u_int8_t siglev, u_int8_t antenna); static void ray_rx_mgt (struct ray_softc *sc, struct mbuf *m0); static void ray_rx_mgt_auth (struct ray_softc *sc, struct mbuf *m0); static void ray_rx_mgt_beacon (struct ray_softc *sc, struct mbuf *m0); static void ray_rx_mgt_info (struct ray_softc *sc, struct mbuf *m0, union ieee80211_information *elements); static void ray_rx_update_cache (struct ray_softc *sc, u_int8_t *src, u_int8_t siglev, u_int8_t antenna); static void ray_stop (struct ray_softc *sc, struct ray_comq_entry *com); static int ray_stop_user (struct ray_softc *sc); static void ray_tx (struct ifnet *ifp); static void ray_tx_done (struct ray_softc *sc, u_int8_t status, size_t ccs); static void ray_tx_timo (void *xsc); static int ray_tx_send (struct ray_softc *sc, size_t ccs, int pktlen, u_int8_t *dst); static size_t ray_tx_wrhdr (struct ray_softc *sc, size_t bufp, u_int8_t type, u_int8_t fc1, u_int8_t *addr1, u_int8_t *addr2, u_int8_t *addr3); static void ray_upparams (struct ray_softc *sc, struct ray_comq_entry *com); static void ray_upparams_done (struct ray_softc *sc, u_int8_t status, size_t ccs); static int ray_upparams_user (struct ray_softc *sc, struct ray_param_req *pr); static void ray_watchdog (struct ifnet *ifp); static u_int8_t ray_tx_best_antenna (struct ray_softc *sc, u_int8_t *dst); #if RAY_DEBUG & RAY_DBG_COM static void ray_com_ecf_check (struct ray_softc *sc, size_t ccs, char *mesg); #endif /* RAY_DEBUG & RAY_DBG_COM */ #if RAY_DEBUG & RAY_DBG_MBUF static void ray_dump_mbuf (struct ray_softc *sc, struct mbuf *m, char *s); #endif /* RAY_DEBUG & RAY_DBG_MBUF */ /* * PC-Card (PCMCIA) driver definition */ static device_method_t ray_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ray_probe), DEVMETHOD(device_attach, ray_attach), DEVMETHOD(device_detach, ray_detach), { 0, 0 } }; static driver_t ray_driver = { "ray", ray_methods, sizeof(struct ray_softc) }; static devclass_t ray_devclass; DRIVER_MODULE(ray, pccard, ray_driver, ray_devclass, 0, 0); /* * Probe for the card by checking its startup results. * * Fixup any bugs/quirks for different firmware. */ static int ray_probe(device_t dev) { struct ray_softc *sc = device_get_softc(dev); struct ray_ecf_startup_v5 *ep = &sc->sc_ecf_startup; int error; sc->dev = dev; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); /* * Read startup results from the card. */ error = ray_res_alloc_cm(sc); if (error) return (error); error = ray_res_alloc_am(sc); if (error) { ray_res_release(sc); return (error); } RAY_MAP_CM(sc); SRAM_READ_REGION(sc, RAY_ECF_TO_HOST_BASE, ep, sizeof(sc->sc_ecf_startup)); ray_res_release(sc); /* * Check the card is okay and work out what version we are using. */ if (ep->e_status != RAY_ECFS_CARD_OK) { RAY_PRINTF(sc, "card failed self test 0x%b", ep->e_status, RAY_ECFS_PRINTFB); return (ENXIO); } if (sc->sc_version != RAY_ECFS_BUILD_4 && sc->sc_version != RAY_ECFS_BUILD_5) { RAY_PRINTF(sc, "unsupported firmware version 0x%0x", ep->e_fw_build_string); return (ENXIO); } RAY_DPRINTF(sc, RAY_DBG_BOOTPARAM, "found a card"); sc->sc_gone = 0; /* * Fixup tib size to be correct - on build 4 it is garbage */ if (sc->sc_version == RAY_ECFS_BUILD_4 && sc->sc_tibsize == 0x55) sc->sc_tibsize = sizeof(struct ray_tx_tib); return (0); } /* * Attach the card into the kernel */ static int ray_attach(device_t dev) { struct ray_softc *sc = device_get_softc(dev); struct ray_ecf_startup_v5 *ep = &sc->sc_ecf_startup; struct ifnet *ifp; size_t ccs; int i, error; ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) return (ENOSPC); RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); if ((sc == NULL) || (sc->sc_gone)) { if_free(ifp); return (ENXIO); } /* * Grab the resources I need */ error = ray_res_alloc_cm(sc); if (error) { if_free(ifp); return (error); } error = ray_res_alloc_am(sc); if (error) { if_free(ifp); ray_res_release(sc); return (error); } error = ray_res_alloc_irq(sc); if (error) { if_free(ifp); ray_res_release(sc); return (error); } /* * Reset any pending interrupts */ RAY_HCS_CLEAR_INTR(sc); /* * Set the parameters that will survive stop/init and * reset a few things on the card. * * Do not update these in ray_init_download's parameter setup * */ RAY_MAP_CM(sc); bzero(&sc->sc_d, sizeof(struct ray_nw_param)); bzero(&sc->sc_c, sizeof(struct ray_nw_param)); /* Clear statistics counters */ sc->sc_rxoverflow = 0; sc->sc_rxcksum = 0; sc->sc_rxhcksum = 0; sc->sc_rxnoise = 0; /* Clear signal and antenna cache */ bzero(sc->sc_siglevs, sizeof(sc->sc_siglevs)); /* Set all ccs to be free */ bzero(sc->sc_ccsinuse, sizeof(sc->sc_ccsinuse)); ccs = RAY_CCS_ADDRESS(0); for (i = 0; i < RAY_CCS_LAST; ccs += RAY_CCS_SIZE, i++) RAY_CCS_FREE(sc, ccs); /* * Initialise the network interface structure */ ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_timer = 0; ifp->if_flags = (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT); ifp->if_hdrlen = sizeof(struct ieee80211_frame) + sizeof(struct ether_header); ifp->if_baudrate = 1000000; /* Is this baud or bps ;-) */ ifp->if_start = ray_tx; ifp->if_ioctl = ray_ioctl; ifp->if_watchdog = ray_watchdog; ifp->if_init = ray_init; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; ether_ifattach(ifp, ep->e_station_addr); /* * Initialise the timers and driver */ callout_handle_init(&sc->com_timerh); callout_handle_init(&sc->tx_timerh); TAILQ_INIT(&sc->sc_comq); /* * Print out some useful information */ if (bootverbose || (RAY_DEBUG & RAY_DBG_BOOTPARAM)) { RAY_PRINTF(sc, "start up results"); if (sc->sc_version == RAY_ECFS_BUILD_4) printf(". Firmware version 4\n"); else printf(". Firmware version 5\n"); printf(". Status 0x%b\n", ep->e_status, RAY_ECFS_PRINTFB); printf(". Ether address %6D\n", ep->e_station_addr, ":"); if (sc->sc_version == RAY_ECFS_BUILD_4) { printf(". Program checksum %0x\n", ep->e_resv0); printf(". CIS checksum %0x\n", ep->e_rates[0]); } else { printf(". (reserved word) %0x\n", ep->e_resv0); printf(". Supported rates %8D\n", ep->e_rates, ":"); } printf(". Japan call sign %12D\n", ep->e_japan_callsign, ":"); if (sc->sc_version == RAY_ECFS_BUILD_5) { printf(". Program checksum %0x\n", ep->e_prg_cksum); printf(". CIS checksum %0x\n", ep->e_cis_cksum); printf(". Firmware version %0x\n", ep->e_fw_build_string); printf(". Firmware revision %0x\n", ep->e_fw_build); printf(". (reserved word) %0x\n", ep->e_fw_resv); printf(". ASIC version %0x\n", ep->e_asic_version); printf(". TIB size %0x\n", ep->e_tibsize); } } return (0); } /* * Detach the card * * This is usually called when the card is ejected, but * can be caused by a modunload of a controller driver. * The idea is to reset the driver's view of the device * and ensure that any driver entry points such as * read and write do not hang. */ static int ray_detach(device_t dev) { struct ray_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->ifp; struct ray_comq_entry *com; int s; s = splimp(); RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STOP, ""); if ((sc == NULL) || (sc->sc_gone)) return (0); /* * Mark as not running and detach the interface. * * N.B. if_detach can trigger ioctls so we do it first and * then clean the runq. */ sc->sc_gone = 1; sc->sc_c.np_havenet = 0; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); ether_ifdetach(ifp); if_free(ifp); /* * Stop the runq and wake up anyone sleeping for us. */ untimeout(ray_com_ecf_timo, sc, sc->com_timerh); untimeout(ray_tx_timo, sc, sc->tx_timerh); com = TAILQ_FIRST(&sc->sc_comq); TAILQ_FOREACH(com, &sc->sc_comq, c_chain) { com->c_flags |= RAY_COM_FDETACHED; com->c_retval = 0; RAY_DPRINTF(sc, RAY_DBG_STOP, "looking at com %p %b", com, com->c_flags, RAY_COM_FLAGS_PRINTFB); if (com->c_flags & RAY_COM_FWOK) { RAY_DPRINTF(sc, RAY_DBG_STOP, "waking com %p", com); wakeup(com->c_wakeup); } } /* * Release resources */ ray_res_release(sc); RAY_DPRINTF(sc, RAY_DBG_STOP, "unloading complete"); splx(s); return (0); } /* * Network ioctl request. */ static int ray_ioctl(register struct ifnet *ifp, u_long command, caddr_t data) { struct ray_softc *sc = ifp->if_softc; struct ray_param_req pr; struct ray_stats_req sr; struct ifreq *ifr = (struct ifreq *)data; int s, error, error2; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_IOCTL, ""); if ((sc == NULL) || (sc->sc_gone)) return (ENXIO); error = error2 = 0; s = splimp(); switch (command) { case SIOCSIFFLAGS: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "SIFFLAGS 0x%0x", ifp->if_flags); /* * If the interface is marked up we call ray_init_user. * This will deal with mcast and promisc flags as well as * initialising the hardware if it needs it. */ if (ifp->if_flags & IFF_UP) error = ray_init_user(sc); else error = ray_stop_user(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "ADDMULTI/DELMULTI"); error = ray_mcast_user(sc); break; case SIOCSRAYPARAM: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "SRAYPARAM"); if ((error = copyin(ifr->ifr_data, &pr, sizeof(pr)))) break; error = ray_upparams_user(sc, &pr); error2 = copyout(&pr, ifr->ifr_data, sizeof(pr)); error = error2 ? error2 : error; break; case SIOCGRAYPARAM: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GRAYPARAM"); if ((error = copyin(ifr->ifr_data, &pr, sizeof(pr)))) break; error = ray_repparams_user(sc, &pr); error2 = copyout(&pr, ifr->ifr_data, sizeof(pr)); error = error2 ? error2 : error; break; case SIOCGRAYSTATS: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GRAYSTATS"); error = ray_repstats_user(sc, &sr); error2 = copyout(&sr, ifr->ifr_data, sizeof(sr)); error = error2 ? error2 : error; break; case SIOCGRAYSIGLEV: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GRAYSIGLEV"); error = copyout(sc->sc_siglevs, ifr->ifr_data, sizeof(sc->sc_siglevs)); break; case SIOCGIFFLAGS: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GIFFLAGS"); error = EINVAL; break; case SIOCGIFMETRIC: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GIFMETRIC"); error = EINVAL; break; case SIOCGIFMTU: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GIFMTU"); error = EINVAL; break; case SIOCGIFPHYS: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GIFPYHS"); error = EINVAL; break; case SIOCSIFMEDIA: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "SIFMEDIA"); error = EINVAL; break; case SIOCGIFMEDIA: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "GIFMEDIA"); error = EINVAL; break; default: RAY_DPRINTF(sc, RAY_DBG_IOCTL, "OTHER (pass to ether)"); error = ether_ioctl(ifp, command, data); break; } splx(s); return (error); } /* * Ethernet layer entry to ray_init - discard errors */ static void ray_init(void *xsc) { struct ray_softc *sc = (struct ray_softc *)xsc; ray_init_user(sc); } /* * User land entry to network initialisation and changes in interface flags. * * We do a very little work here, just creating runq entries to * processes the actions needed to cope with interface flags. We do it * this way in case there are runq entries outstanding from earlier * ioctls that modify the interface flags. * * Returns values are either 0 for success, a varity of resource allocation * failures or errors in the command sent to the card. * * Note, IFF_RUNNING is eventually set by init_sj_done or init_assoc_done */ static int ray_init_user(struct ray_softc *sc) { struct ray_comq_entry *com[6]; int error, ncom; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); /* * Create the following runq entries to bring the card up. * * init_download - download the network to the card * init_mcast - reset multicast list * init_sj - find or start a BSS * init_auth - authenticate with an ESSID if needed * init_assoc - associate with an ESSID if needed * * They are only actually executed if the card is not running. * We may enter this routine from a simple change of IP * address and do not need to get the card to do these things. * However, we cannot perform the check here as there may be * commands in the runq that change the IFF_RUNNING state of * the interface. */ ncom = 0; com[ncom++] = RAY_COM_MALLOC(ray_init_download, RAY_COM_FCHKRUNNING); com[ncom++] = RAY_COM_MALLOC(ray_init_mcast, RAY_COM_FCHKRUNNING); com[ncom++] = RAY_COM_MALLOC(ray_init_sj, RAY_COM_FCHKRUNNING); com[ncom++] = RAY_COM_MALLOC(ray_init_auth, RAY_COM_FCHKRUNNING); com[ncom++] = RAY_COM_MALLOC(ray_init_assoc, RAY_COM_FCHKRUNNING); /* * Create runq entries to process flags * * promisc - set/reset PROMISC and ALLMULTI flags * * They are only actually executed if the card is running */ com[ncom++] = RAY_COM_MALLOC(ray_promisc, 0); RAY_COM_RUNQ(sc, com, ncom, "rayinit", error); /* XXX no real error processing from anything yet! */ RAY_COM_FREE(com, ncom); return (error); } /* * Runq entry for resetting driver and downloading start up structures to card */ static void ray_init_download(struct ray_softc *sc, struct ray_comq_entry *com) { struct ifnet *ifp = sc->ifp; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); /* If the card already running we might not need to download */ RAY_COM_CHKRUNNING(sc, com, ifp); /* * Reset instance variables * * The first set are network parameters that are read back when * the card starts or joins the network. * * The second set are network parameters that are downloaded to * the card. * * The third set are driver parameters. * * All of the variables in these sets can be updated by the * card or ioctls. * */ sc->sc_d.np_upd_param = 0; bzero(sc->sc_d.np_bss_id, ETHER_ADDR_LEN); sc->sc_d.np_inited = 0; sc->sc_d.np_def_txrate = RAY_MIB_BASIC_RATE_SET_DEFAULT; sc->sc_d.np_encrypt = 0; bzero(sc->sc_d.np_ssid, IEEE80211_NWID_LEN); if (sc->sc_version == RAY_ECFS_BUILD_4) { sc->sc_d.np_net_type = RAY_MIB_NET_TYPE_V4; strncpy(sc->sc_d.np_ssid, RAY_MIB_SSID_V4, IEEE80211_NWID_LEN); sc->sc_d.np_ap_status = RAY_MIB_AP_STATUS_V4; sc->sc_d.np_framing = RAY_FRAMING_ENCAPSULATION; } else { sc->sc_d.np_net_type = RAY_MIB_NET_TYPE_V5; strncpy(sc->sc_d.np_ssid, RAY_MIB_SSID_V5, IEEE80211_NWID_LEN); sc->sc_d.np_ap_status = RAY_MIB_AP_STATUS_V5; sc->sc_d.np_framing = RAY_FRAMING_TRANSLATION; } sc->sc_d.np_priv_start = RAY_MIB_PRIVACY_MUST_START_DEFAULT; sc->sc_d.np_priv_join = RAY_MIB_PRIVACY_CAN_JOIN_DEFAULT; sc->sc_d.np_promisc = !!(ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)); /* XXX this is a hack whilst I transition the code. The instance * XXX variables above should be set somewhere else. This is needed for * XXX start_join */ bcopy(&sc->sc_d, &com->c_desired, sizeof(struct ray_nw_param)); /* * Download the right firmware defaults */ if (sc->sc_version == RAY_ECFS_BUILD_4) ray_init_download_v4(sc, com); else ray_init_download_v5(sc, com); /* * Kick the card */ ray_ccs_fill(sc, com->c_ccs, RAY_CMD_DOWNLOAD_PARAMS); ray_com_ecf(sc, com); } #define PUT2(p, v) \ do { (p)[0] = ((v >> 8) & 0xff); (p)[1] = (v & 0xff); } while(0) /* * Firmware version 4 defaults - see if_raymib.h for details */ static void ray_init_download_v4(struct ray_softc *sc, struct ray_comq_entry *com) { struct ray_mib_4 ray_mib_4_default; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); RAY_MAP_CM(sc); #define MIB4(m) ray_mib_4_default.m MIB4(mib_net_type) = com->c_desired.np_net_type; MIB4(mib_ap_status) = com->c_desired.np_ap_status; bcopy(com->c_desired.np_ssid, MIB4(mib_ssid), IEEE80211_NWID_LEN); MIB4(mib_scan_mode) = RAY_MIB_SCAN_MODE_V4; MIB4(mib_apm_mode) = RAY_MIB_APM_MODE_V4; bcopy(sc->sc_station_addr, MIB4(mib_mac_addr), ETHER_ADDR_LEN); PUT2(MIB4(mib_frag_thresh), RAY_MIB_FRAG_THRESH_V4); PUT2(MIB4(mib_dwell_time), RAY_MIB_DWELL_TIME_V4); PUT2(MIB4(mib_beacon_period), RAY_MIB_BEACON_PERIOD_V4); MIB4(mib_dtim_interval) = RAY_MIB_DTIM_INTERVAL_V4; MIB4(mib_max_retry) = RAY_MIB_MAX_RETRY_V4; MIB4(mib_ack_timo) = RAY_MIB_ACK_TIMO_V4; MIB4(mib_sifs) = RAY_MIB_SIFS_V4; MIB4(mib_difs) = RAY_MIB_DIFS_V4; MIB4(mib_pifs) = RAY_MIB_PIFS_V4; PUT2(MIB4(mib_rts_thresh), RAY_MIB_RTS_THRESH_V4); PUT2(MIB4(mib_scan_dwell), RAY_MIB_SCAN_DWELL_V4); PUT2(MIB4(mib_scan_max_dwell), RAY_MIB_SCAN_MAX_DWELL_V4); MIB4(mib_assoc_timo) = RAY_MIB_ASSOC_TIMO_V4; MIB4(mib_adhoc_scan_cycle) = RAY_MIB_ADHOC_SCAN_CYCLE_V4; MIB4(mib_infra_scan_cycle) = RAY_MIB_INFRA_SCAN_CYCLE_V4; MIB4(mib_infra_super_scan_cycle) = RAY_MIB_INFRA_SUPER_SCAN_CYCLE_V4; MIB4(mib_promisc) = com->c_desired.np_promisc; PUT2(MIB4(mib_uniq_word), RAY_MIB_UNIQ_WORD_V4); MIB4(mib_slot_time) = RAY_MIB_SLOT_TIME_V4; MIB4(mib_roam_low_snr_thresh) = RAY_MIB_ROAM_LOW_SNR_THRESH_V4; MIB4(mib_low_snr_count) = RAY_MIB_LOW_SNR_COUNT_V4; MIB4(mib_infra_missed_beacon_count) = RAY_MIB_INFRA_MISSED_BEACON_COUNT_V4; MIB4(mib_adhoc_missed_beacon_count) = RAY_MIB_ADHOC_MISSED_BEACON_COUNT_V4; MIB4(mib_country_code) = RAY_MIB_COUNTRY_CODE_V4; MIB4(mib_hop_seq) = RAY_MIB_HOP_SEQ_V4; MIB4(mib_hop_seq_len) = RAY_MIB_HOP_SEQ_LEN_V4; MIB4(mib_cw_max) = RAY_MIB_CW_MAX_V4; MIB4(mib_cw_min) = RAY_MIB_CW_MIN_V4; MIB4(mib_noise_filter_gain) = RAY_MIB_NOISE_FILTER_GAIN_DEFAULT; MIB4(mib_noise_limit_offset) = RAY_MIB_NOISE_LIMIT_OFFSET_DEFAULT; MIB4(mib_rssi_thresh_offset) = RAY_MIB_RSSI_THRESH_OFFSET_DEFAULT; MIB4(mib_busy_thresh_offset) = RAY_MIB_BUSY_THRESH_OFFSET_DEFAULT; MIB4(mib_sync_thresh) = RAY_MIB_SYNC_THRESH_DEFAULT; MIB4(mib_test_mode) = RAY_MIB_TEST_MODE_DEFAULT; MIB4(mib_test_min_chan) = RAY_MIB_TEST_MIN_CHAN_DEFAULT; MIB4(mib_test_max_chan) = RAY_MIB_TEST_MAX_CHAN_DEFAULT; #undef MIB4 SRAM_WRITE_REGION(sc, RAY_HOST_TO_ECF_BASE, &ray_mib_4_default, sizeof(ray_mib_4_default)); } /* * Firmware version 5 defaults - see if_raymib.h for details */ static void ray_init_download_v5(struct ray_softc *sc, struct ray_comq_entry *com) { struct ray_mib_5 ray_mib_5_default; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); RAY_MAP_CM(sc); #define MIB5(m) ray_mib_5_default.m MIB5(mib_net_type) = com->c_desired.np_net_type; MIB5(mib_ap_status) = com->c_desired.np_ap_status; bcopy(com->c_desired.np_ssid, MIB5(mib_ssid), IEEE80211_NWID_LEN); MIB5(mib_scan_mode) = RAY_MIB_SCAN_MODE_V5; MIB5(mib_apm_mode) = RAY_MIB_APM_MODE_V5; bcopy(sc->sc_station_addr, MIB5(mib_mac_addr), ETHER_ADDR_LEN); PUT2(MIB5(mib_frag_thresh), RAY_MIB_FRAG_THRESH_V5); PUT2(MIB5(mib_dwell_time), RAY_MIB_DWELL_TIME_V5); PUT2(MIB5(mib_beacon_period), RAY_MIB_BEACON_PERIOD_V5); MIB5(mib_dtim_interval) = RAY_MIB_DTIM_INTERVAL_V5; MIB5(mib_max_retry) = RAY_MIB_MAX_RETRY_V5; MIB5(mib_ack_timo) = RAY_MIB_ACK_TIMO_V5; MIB5(mib_sifs) = RAY_MIB_SIFS_V5; MIB5(mib_difs) = RAY_MIB_DIFS_V5; MIB5(mib_pifs) = RAY_MIB_PIFS_V5; PUT2(MIB5(mib_rts_thresh), RAY_MIB_RTS_THRESH_V5); PUT2(MIB5(mib_scan_dwell), RAY_MIB_SCAN_DWELL_V5); PUT2(MIB5(mib_scan_max_dwell), RAY_MIB_SCAN_MAX_DWELL_V5); MIB5(mib_assoc_timo) = RAY_MIB_ASSOC_TIMO_V5; MIB5(mib_adhoc_scan_cycle) = RAY_MIB_ADHOC_SCAN_CYCLE_V5; MIB5(mib_infra_scan_cycle) = RAY_MIB_INFRA_SCAN_CYCLE_V5; MIB5(mib_infra_super_scan_cycle) = RAY_MIB_INFRA_SUPER_SCAN_CYCLE_V5; MIB5(mib_promisc) = com->c_desired.np_promisc; PUT2(MIB5(mib_uniq_word), RAY_MIB_UNIQ_WORD_V5); MIB5(mib_slot_time) = RAY_MIB_SLOT_TIME_V5; MIB5(mib_roam_low_snr_thresh) = RAY_MIB_ROAM_LOW_SNR_THRESH_V5; MIB5(mib_low_snr_count) = RAY_MIB_LOW_SNR_COUNT_V5; MIB5(mib_infra_missed_beacon_count) = RAY_MIB_INFRA_MISSED_BEACON_COUNT_V5; MIB5(mib_adhoc_missed_beacon_count) = RAY_MIB_ADHOC_MISSED_BEACON_COUNT_V5; MIB5(mib_country_code) = RAY_MIB_COUNTRY_CODE_V5; MIB5(mib_hop_seq) = RAY_MIB_HOP_SEQ_V5; MIB5(mib_hop_seq_len) = RAY_MIB_HOP_SEQ_LEN_V5; PUT2(MIB5(mib_cw_max), RAY_MIB_CW_MAX_V5); PUT2(MIB5(mib_cw_min), RAY_MIB_CW_MIN_V5); MIB5(mib_noise_filter_gain) = RAY_MIB_NOISE_FILTER_GAIN_DEFAULT; MIB5(mib_noise_limit_offset) = RAY_MIB_NOISE_LIMIT_OFFSET_DEFAULT; MIB5(mib_rssi_thresh_offset) = RAY_MIB_RSSI_THRESH_OFFSET_DEFAULT; MIB5(mib_busy_thresh_offset) = RAY_MIB_BUSY_THRESH_OFFSET_DEFAULT; MIB5(mib_sync_thresh) = RAY_MIB_SYNC_THRESH_DEFAULT; MIB5(mib_test_mode) = RAY_MIB_TEST_MODE_DEFAULT; MIB5(mib_test_min_chan) = RAY_MIB_TEST_MIN_CHAN_DEFAULT; MIB5(mib_test_max_chan) = RAY_MIB_TEST_MAX_CHAN_DEFAULT; MIB5(mib_allow_probe_resp) = RAY_MIB_ALLOW_PROBE_RESP_DEFAULT; MIB5(mib_privacy_must_start) = com->c_desired.np_priv_start; MIB5(mib_privacy_can_join) = com->c_desired.np_priv_join; MIB5(mib_basic_rate_set[0]) = com->c_desired.np_def_txrate; #undef MIB5 SRAM_WRITE_REGION(sc, RAY_HOST_TO_ECF_BASE, &ray_mib_5_default, sizeof(ray_mib_5_default)); } #undef PUT2 /* * Download completion routine */ static void ray_init_download_done(struct ray_softc *sc, u_int8_t status, size_t ccs) { RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); RAY_COM_CHECK(sc, ccs); RAY_CCSERR(sc, status, if_oerrors); /* XXX error counter */ ray_com_ecf_done(sc); } /* * Runq entry to empty the multicast filter list */ static void ray_init_mcast(struct ray_softc *sc, struct ray_comq_entry *com) { struct ifnet *ifp = sc->ifp; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); RAY_MAP_CM(sc); /* If the card already running we might not need to reset the list */ RAY_COM_CHKRUNNING(sc, com, ifp); /* * Kick the card */ ray_ccs_fill(sc, com->c_ccs, RAY_CMD_UPDATE_MCAST); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_update_mcast, c_nmcast, 0); ray_com_ecf(sc, com); } /* * Runq entry to starting or joining a network */ static void ray_init_sj(struct ray_softc *sc, struct ray_comq_entry *com) { struct ifnet *ifp = sc->ifp; struct ray_net_params np; int update; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); RAY_MAP_CM(sc); /* If the card already running we might not need to start the n/w */ RAY_COM_CHKRUNNING(sc, com, ifp); /* * Set up the right start or join command and determine * whether we should tell the card about a change in operating * parameters. */ sc->sc_c.np_havenet = 0; if (sc->sc_d.np_net_type == RAY_MIB_NET_TYPE_ADHOC) ray_ccs_fill(sc, com->c_ccs, RAY_CMD_START_NET); else ray_ccs_fill(sc, com->c_ccs, RAY_CMD_JOIN_NET); update = 0; if (sc->sc_c.np_net_type != sc->sc_d.np_net_type) update++; if (bcmp(sc->sc_c.np_ssid, sc->sc_d.np_ssid, IEEE80211_NWID_LEN)) update++; if (sc->sc_c.np_priv_join != sc->sc_d.np_priv_join) update++; if (sc->sc_c.np_priv_start != sc->sc_d.np_priv_start) update++; RAY_DPRINTF(sc, RAY_DBG_STARTJOIN, "%s updating nw params", update?"is":"not"); if (update) { bzero(&np, sizeof(np)); np.p_net_type = sc->sc_d.np_net_type; bcopy(sc->sc_d.np_ssid, np.p_ssid, IEEE80211_NWID_LEN); np.p_privacy_must_start = sc->sc_d.np_priv_start; np.p_privacy_can_join = sc->sc_d.np_priv_join; SRAM_WRITE_REGION(sc, RAY_HOST_TO_ECF_BASE, &np, sizeof(np)); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_net, c_upd_param, 1); } else SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_net, c_upd_param, 0); /* * Kick the card */ ray_com_ecf(sc, com); } /* * Complete start command or intermediate step in assoc command */ static void ray_init_sj_done(struct ray_softc *sc, u_int8_t status, size_t ccs) { struct ifnet *ifp = sc->ifp; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); RAY_MAP_CM(sc); RAY_COM_CHECK(sc, ccs); RAY_CCSERR(sc, status, if_oerrors); /* XXX error counter */ /* * Read back network parameters that the ECF sets */ SRAM_READ_REGION(sc, ccs, &sc->sc_c.p_1, sizeof(struct ray_cmd_net)); /* Adjust values for buggy firmware */ if (sc->sc_c.np_inited == 0x55) sc->sc_c.np_inited = 0; if (sc->sc_c.np_def_txrate == 0x55) sc->sc_c.np_def_txrate = sc->sc_d.np_def_txrate; if (sc->sc_c.np_encrypt == 0x55) sc->sc_c.np_encrypt = sc->sc_d.np_encrypt; /* * Update our local state if we updated the network parameters * when the START_NET or JOIN_NET was issued. */ if (sc->sc_c.np_upd_param) { RAY_DPRINTF(sc, RAY_DBG_STARTJOIN, "updated parameters"); SRAM_READ_REGION(sc, RAY_HOST_TO_ECF_BASE, &sc->sc_c.p_2, sizeof(struct ray_net_params)); } /* * Hurrah! The network is now active. * * Clearing IFF_OACTIVE will ensure that the system will send us * packets. Just before we return from the interrupt context * we check to see if packets have been queued. */ if (SRAM_READ_FIELD_1(sc, ccs, ray_cmd, c_cmd) == RAY_CMD_START_NET) { sc->sc_c.np_havenet = 1; sc->sc_c.np_framing = sc->sc_d.np_framing; ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; } ray_com_ecf_done(sc); } /* * Runq entry to authenticate with an access point or another station */ static void ray_init_auth(struct ray_softc *sc, struct ray_comq_entry *com) { struct ifnet *ifp = sc->ifp; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN | RAY_DBG_AUTH, ""); /* If card already running we might not need to authenticate */ RAY_COM_CHKRUNNING(sc, com, ifp); /* * Don't do anything if we are not in a managed network * * XXX V4 adhoc does not need this, V5 adhoc unknown */ if (sc->sc_c.np_net_type != RAY_MIB_NET_TYPE_INFRA) { ray_com_runq_done(sc); return; } /* * XXX_AUTH need to think of run queue when doing auths from request i.e. would * XXX_AUTH need to have auth at top of runq? * XXX_AUTH ditto for sending any auth response packets...what about timeouts? */ /* * Kick the card */ /* XXX_AUTH check exit status and retry or fail as we can't associate without this */ ray_init_auth_send(sc, sc->sc_c.np_bss_id, IEEE80211_AUTH_OPEN_REQUEST); } /* * Build and send an authentication packet * * If an error occurs, returns 1 else returns 0. */ static int ray_init_auth_send(struct ray_softc *sc, u_int8_t *dst, int sequence) { size_t ccs, bufp; int pktlen = 0; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN | RAY_DBG_AUTH, ""); /* Get a control block */ if (ray_ccs_tx(sc, &ccs, &bufp)) { RAY_RECERR(sc, "could not obtain a ccs"); return (1); } /* Fill the header in */ bufp = ray_tx_wrhdr(sc, bufp, IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_AUTH, IEEE80211_FC1_DIR_NODS, dst, IFP2ENADDR(sc->ifp), sc->sc_c.np_bss_id); /* Add algorithm number */ SRAM_WRITE_1(sc, bufp + pktlen++, IEEE80211_AUTH_ALG_OPEN); SRAM_WRITE_1(sc, bufp + pktlen++, 0); /* Add sequence number */ SRAM_WRITE_1(sc, bufp + pktlen++, sequence); SRAM_WRITE_1(sc, bufp + pktlen++, 0); /* Add status code */ SRAM_WRITE_1(sc, bufp + pktlen++, 0); SRAM_WRITE_1(sc, bufp + pktlen++, 0); pktlen += sizeof(struct ieee80211_frame); return (ray_tx_send(sc, ccs, pktlen, dst)); } /* * Complete authentication runq */ static void ray_init_auth_done(struct ray_softc *sc, u_int8_t status) { RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN | RAY_DBG_AUTH, ""); if (status != IEEE80211_STATUS_SUCCESS) RAY_RECERR(sc, "authentication failed with status %d", status); /* * XXX_AUTH retry? if not just recall ray_init_auth_send and dont clear runq? * XXX_AUTH association requires that authenitcation is successful * XXX_AUTH before we associate, and the runq is the only way to halt the * XXX_AUTH progress of associate. * XXX_AUTH In this case I might not need the RAY_AUTH_NEEDED state */ ray_com_runq_done(sc); } /* * Runq entry to starting an association with an access point */ static void ray_init_assoc(struct ray_softc *sc, struct ray_comq_entry *com) { struct ifnet *ifp = sc->ifp; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); /* If the card already running we might not need to associate */ RAY_COM_CHKRUNNING(sc, com, ifp); /* * Don't do anything if we are not in a managed network */ if (sc->sc_c.np_net_type != RAY_MIB_NET_TYPE_INFRA) { ray_com_runq_done(sc); return; } /* * Kick the card */ ray_ccs_fill(sc, com->c_ccs, RAY_CMD_START_ASSOC); ray_com_ecf(sc, com); } /* * Complete association */ static void ray_init_assoc_done(struct ray_softc *sc, u_int8_t status, size_t ccs) { struct ifnet *ifp = sc->ifp; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); RAY_COM_CHECK(sc, ccs); RAY_CCSERR(sc, status, if_oerrors); /* XXX error counter */ /* * Hurrah! The network is now active. * * Clearing IFF_OACTIVE will ensure that the system will send us * packets. Just before we return from the interrupt context * we check to see if packets have been queued. */ sc->sc_c.np_havenet = 1; sc->sc_c.np_framing = sc->sc_d.np_framing; ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; ray_com_ecf_done(sc); } /* * Network stop. * * Inhibit card - if we can't prevent reception then do not worry; * stopping a NIC only guarantees no TX. * * The change to the interface flags is done via the runq so that any * existing commands can execute normally. */ static int ray_stop_user(struct ray_softc *sc) { struct ray_comq_entry *com[1]; int error, ncom; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STOP, ""); /* * Schedule the real stop routine */ ncom = 0; com[ncom++] = RAY_COM_MALLOC(ray_stop, 0); RAY_COM_RUNQ(sc, com, ncom, "raystop", error); /* XXX no real error processing from anything yet! */ RAY_COM_FREE(com, ncom); return (error); } /* * Runq entry for stopping the interface activity */ static void ray_stop(struct ray_softc *sc, struct ray_comq_entry *com) { struct ifnet *ifp = sc->ifp; struct mbuf *m; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STOP, ""); /* * Mark as not running and drain output queue */ ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); ifp->if_timer = 0; for (;;) { IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; m_freem(m); } ray_com_runq_done(sc); } static void ray_watchdog(struct ifnet *ifp) { struct ray_softc *sc = ifp->if_softc; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); if ((sc == NULL) || (sc->sc_gone)) return; RAY_PRINTF(sc, "watchdog timeout"); } /* * Transmit packet handling */ /* * Send a packet. * * We make two assumptions here: * 1) That the current priority is set to splimp _before_ this code * is called *and* is returned to the appropriate priority after * return * 2) That the IFF_OACTIVE flag is checked before this code is called * (i.e. that the output part of the interface is idle) * * A simple one packet at a time TX routine is used - we don't bother * chaining TX buffers. Performance is sufficient to max out the * wireless link on a P75. * * AST J30 Windows 95A (100MHz Pentium) to * Libretto 50CT FreeBSD-3.1 (75MHz Pentium) 167.37kB/s * Nonname box FreeBSD-3.4 (233MHz AMD K6) 161.82kB/s * * Libretto 50CT FreeBSD-3.1 (75MHz Pentium) to * AST J30 Windows 95A (100MHz Pentium) 167.37kB/s * Nonname box FreeBSD-3.4 (233MHz AMD K6) 161.38kB/s * * Given that 160kB/s is saturating the 2Mb/s wireless link we * are about there. * * In short I'm happy that the added complexity of chaining TX * packets together isn't worth it for my machines. */ static void ray_tx(struct ifnet *ifp) { struct ray_softc *sc = ifp->if_softc; struct mbuf *m0, *m; struct ether_header *eh; struct llc *llc; size_t ccs, bufp; int pktlen, len; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_TX, ""); RAY_MAP_CM(sc); /* * Some simple checks first - some are overkill */ if ((sc == NULL) || (sc->sc_gone)) return; if (!(ifp->if_flags & IFF_RUNNING)) { RAY_RECERR(sc, "cannot transmit - not running"); return; } if (!sc->sc_c.np_havenet) { RAY_RECERR(sc, "cannot transmit - no network"); return; } if (!RAY_ECF_READY(sc)) { /* Can't assume that the ECF is busy because of this driver */ if ((sc->tx_timerh.callout == NULL) || (!callout_active(sc->tx_timerh.callout))) { sc->tx_timerh = timeout(ray_tx_timo, sc, RAY_TX_TIMEOUT); return; } } else untimeout(ray_tx_timo, sc, sc->tx_timerh); /* * We find a ccs before we process the mbuf so that we are sure it * is worthwhile processing the packet. All errors in the mbuf * processing are either errors in the mbuf or gross configuration * errors and the packet wouldn't get through anyway. */ if (ray_ccs_tx(sc, &ccs, &bufp)) { ifp->if_flags |= IFF_OACTIVE; return; } /* * Get the mbuf and process it - we have to remember to free the * ccs if there are any errors. */ IF_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) { RAY_CCS_FREE(sc, ccs); return; } pktlen = m0->m_pkthdr.len; if (pktlen > ETHER_MAX_LEN - ETHER_CRC_LEN) { RAY_RECERR(sc, "mbuf too long %d", pktlen); RAY_CCS_FREE(sc, ccs); ifp->if_oerrors++; m_freem(m0); return; } m0 = m_pullup(m0, sizeof(struct ether_header)); if (m0 == NULL) { RAY_RECERR(sc, "could not pullup ether"); RAY_CCS_FREE(sc, ccs); ifp->if_oerrors++; return; } eh = mtod(m0, struct ether_header *); /* * Write the 802.11 header according to network type etc. */ if (sc->sc_c.np_net_type == RAY_MIB_NET_TYPE_ADHOC) bufp = ray_tx_wrhdr(sc, bufp, IEEE80211_FC0_TYPE_DATA, IEEE80211_FC1_DIR_NODS, eh->ether_dhost, eh->ether_shost, sc->sc_c.np_bss_id); else if (sc->sc_c.np_ap_status == RAY_MIB_AP_STATUS_TERMINAL) bufp = ray_tx_wrhdr(sc, bufp, IEEE80211_FC0_TYPE_DATA, IEEE80211_FC1_DIR_TODS, sc->sc_c.np_bss_id, eh->ether_shost, eh->ether_dhost); else bufp = ray_tx_wrhdr(sc, bufp, IEEE80211_FC0_TYPE_DATA, IEEE80211_FC1_DIR_FROMDS, eh->ether_dhost, sc->sc_c.np_bss_id, eh->ether_shost); /* * Framing * * Add to the mbuf. */ switch (sc->sc_c.np_framing) { case RAY_FRAMING_ENCAPSULATION: /* Nice and easy - nothing! (just add an 802.11 header) */ break; case RAY_FRAMING_TRANSLATION: /* * Drop the first address in the ethernet header and * write an LLC and SNAP header over the second. */ m_adj(m0, ETHER_ADDR_LEN); if (m0 == NULL) { RAY_RECERR(sc, "could not get space for 802.2 header"); RAY_CCS_FREE(sc, ccs); ifp->if_oerrors++; return; } llc = mtod(m0, struct llc *); llc->llc_dsap = LLC_SNAP_LSAP; llc->llc_ssap = LLC_SNAP_LSAP; llc->llc_control = LLC_UI; llc->llc_un.type_snap.org_code[0] = 0; llc->llc_un.type_snap.org_code[1] = 0; llc->llc_un.type_snap.org_code[2] = 0; break; default: RAY_RECERR(sc, "unknown framing type %d", sc->sc_c.np_framing); RAY_CCS_FREE(sc, ccs); ifp->if_oerrors++; m_freem(m0); return; } if (m0 == NULL) { RAY_RECERR(sc, "could not frame packet"); RAY_CCS_FREE(sc, ccs); ifp->if_oerrors++; return; } RAY_MBUF_DUMP(sc, RAY_DBG_TX, m0, "framed packet"); /* * Copy the mbuf to the buffer in common memory * * We drop and don't bother wrapping as Ethernet packets are 1518 * bytes, we checked the mbuf earlier, and our TX buffers are 2048 * bytes. We don't have 530 bytes of headers etc. so something * must be fubar. */ pktlen = sizeof(struct ieee80211_frame); for (m = m0; m != NULL; m = m->m_next) { pktlen += m->m_len; if ((len = m->m_len) == 0) continue; if ((bufp + len) < RAY_TX_END) SRAM_WRITE_REGION(sc, bufp, mtod(m, u_int8_t *), len); else { RAY_RECERR(sc, "tx buffer overflow"); RAY_CCS_FREE(sc, ccs); ifp->if_oerrors++; m_freem(m0); return; } bufp += len; } /* * Send it off */ if (ray_tx_send(sc, ccs, pktlen, eh->ether_dhost)) ifp->if_oerrors++; else ifp->if_opackets++; m_freem(m0); } /* * Start timeout routine. * * Used when card was busy but we needed to send a packet. */ static void ray_tx_timo(void *xsc) { struct ray_softc *sc = (struct ray_softc *)xsc; struct ifnet *ifp = sc->ifp; int s; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); if (!(ifp->if_flags & IFF_OACTIVE) && (ifp->if_snd.ifq_head != NULL)) { s = splimp(); ray_tx(ifp); splx(s); } } /* * Write an 802.11 header into the Tx buffer space and return the * adjusted buffer pointer. */ static size_t ray_tx_wrhdr(struct ray_softc *sc, size_t bufp, u_int8_t type, u_int8_t fc1, u_int8_t *addr1, u_int8_t *addr2, u_int8_t *addr3) { struct ieee80211_frame header; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_TX, ""); RAY_MAP_CM(sc); bzero(&header, sizeof(struct ieee80211_frame)); header.i_fc[0] = (IEEE80211_FC0_VERSION_0 | type); header.i_fc[1] = fc1; bcopy(addr1, header.i_addr1, ETHER_ADDR_LEN); bcopy(addr2, header.i_addr2, ETHER_ADDR_LEN); bcopy(addr3, header.i_addr3, ETHER_ADDR_LEN); SRAM_WRITE_REGION(sc, bufp, (u_int8_t *)&header, sizeof(struct ieee80211_frame)); return (bufp + sizeof(struct ieee80211_frame)); } /* * Fill in a few loose ends and kick the card to send the packet * * Returns 0 on success, 1 on failure */ static int ray_tx_send(struct ray_softc *sc, size_t ccs, int pktlen, u_int8_t *dst) { int i = 0; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_TX, ""); RAY_MAP_CM(sc); while (!RAY_ECF_READY(sc)) { DELAY(RAY_ECF_SPIN_DELAY); if (++i > RAY_ECF_SPIN_TRIES) { RAY_RECERR(sc, "ECF busy, dropping packet"); RAY_CCS_FREE(sc, ccs); return (1); } } if (i != 0) RAY_RECERR(sc, "spun %d times", i); SRAM_WRITE_FIELD_2(sc, ccs, ray_cmd_tx, c_len, pktlen); SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd_tx, c_antenna, ray_tx_best_antenna(sc, dst)); SRAM_WRITE_1(sc, RAY_SCB_CCSI, RAY_CCS_INDEX(ccs)); RAY_ECF_START_CMD(sc); return (0); } /* * Determine best antenna to use from rx level and antenna cache */ static u_int8_t ray_tx_best_antenna(struct ray_softc *sc, u_int8_t *dst) { struct ray_siglev *sl; int i; u_int8_t antenna; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_TX, ""); if (sc->sc_version == RAY_ECFS_BUILD_4) return (0); /* try to find host */ for (i = 0; i < RAY_NSIGLEVRECS; i++) { sl = &sc->sc_siglevs[i]; if (bcmp(sl->rsl_host, dst, ETHER_ADDR_LEN) == 0) goto found; } /* not found, return default setting */ return (0); found: /* This is a simple thresholding scheme that takes the mean * of the best antenna history. This is okay but as it is a * filter, it adds a bit of lag in situations where the * best antenna swaps from one side to the other slowly. Don't know * how likely this is given the horrible fading though. */ antenna = 0; for (i = 0; i < RAY_NANTENNA; i++) { antenna += sl->rsl_antennas[i]; } return (antenna > (RAY_NANTENNA >> 1)); } /* * Transmit now complete so clear ccs and network flags. */ static void ray_tx_done(struct ray_softc *sc, u_int8_t status, size_t ccs) { struct ifnet *ifp = sc->ifp; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_TX, ""); RAY_CCSERR(sc, status, if_oerrors); RAY_CCS_FREE(sc, ccs); ifp->if_timer = 0; if (ifp->if_flags & IFF_OACTIVE) ifp->if_flags &= ~IFF_OACTIVE; } /* * Receiver packet handling */ /* * Receive a packet from the card */ static void ray_rx(struct ray_softc *sc, size_t rcs) { struct ieee80211_frame *header; struct ifnet *ifp = sc->ifp; struct mbuf *m0; size_t pktlen, fraglen, readlen, tmplen; size_t bufp, ebufp; u_int8_t siglev, antenna; u_int first, ni, i; u_int8_t *mp; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); RAY_DPRINTF(sc, RAY_DBG_CCS, "using rcs 0x%x", rcs); m0 = NULL; readlen = 0; /* * Get first part of packet and the length. Do some sanity checks * and get a mbuf. */ first = RAY_CCS_INDEX(rcs); pktlen = SRAM_READ_FIELD_2(sc, rcs, ray_cmd_rx, c_pktlen); siglev = SRAM_READ_FIELD_1(sc, rcs, ray_cmd_rx, c_siglev); antenna = SRAM_READ_FIELD_1(sc, rcs, ray_cmd_rx, c_antenna); if ((pktlen > MCLBYTES) || (pktlen < sizeof(struct ieee80211_frame))) { RAY_RECERR(sc, "packet too big or too small"); ifp->if_ierrors++; goto skip_read; } MGETHDR(m0, M_DONTWAIT, MT_DATA); if (m0 == NULL) { RAY_RECERR(sc, "MGETHDR failed"); ifp->if_ierrors++; goto skip_read; } if (pktlen > MHLEN) { MCLGET(m0, M_DONTWAIT); if (!(m0->m_flags & M_EXT)) { RAY_RECERR(sc, "MCLGET failed"); ifp->if_ierrors++; m_freem(m0); m0 = NULL; goto skip_read; } } m0->m_pkthdr.rcvif = ifp; m0->m_pkthdr.len = pktlen; m0->m_len = pktlen; mp = mtod(m0, u_int8_t *); /* * Walk the fragment chain to build the complete packet. * * The use of two index variables removes a race with the * hardware. If one index were used the clearing of the CCS would * happen before reading the next pointer and the hardware can get in. * Not my idea but verbatim from the NetBSD driver. */ i = ni = first; while ((i = ni) && (i != RAY_CCS_LINK_NULL)) { rcs = RAY_CCS_ADDRESS(i); ni = SRAM_READ_FIELD_1(sc, rcs, ray_cmd_rx, c_nextfrag); bufp = SRAM_READ_FIELD_2(sc, rcs, ray_cmd_rx, c_bufp); fraglen = SRAM_READ_FIELD_2(sc, rcs, ray_cmd_rx, c_len); if (fraglen + readlen > pktlen) { RAY_RECERR(sc, "bad length current 0x%zx pktlen 0x%zx", fraglen + readlen, pktlen); ifp->if_ierrors++; m_freem(m0); m0 = NULL; goto skip_read; } if ((i < RAY_RCS_FIRST) || (i > RAY_RCS_LAST)) { RAY_RECERR(sc, "bad rcs index 0x%x", i); ifp->if_ierrors++; m_freem(m0); m0 = NULL; goto skip_read; } ebufp = bufp + fraglen; if (ebufp <= RAY_RX_END) SRAM_READ_REGION(sc, bufp, mp, fraglen); else { SRAM_READ_REGION(sc, bufp, mp, (tmplen = RAY_RX_END - bufp)); SRAM_READ_REGION(sc, RAY_RX_BASE, mp + tmplen, ebufp - RAY_RX_END); } mp += fraglen; readlen += fraglen; } skip_read: /* * Walk the chain again to free the rcss. */ i = ni = first; while ((i = ni) && (i != RAY_CCS_LINK_NULL)) { rcs = RAY_CCS_ADDRESS(i); ni = SRAM_READ_FIELD_1(sc, rcs, ray_cmd_rx, c_nextfrag); RAY_CCS_FREE(sc, rcs); } if (m0 == NULL) return; /* * Check the 802.11 packet type and hand off to * appropriate functions. */ header = mtod(m0, struct ieee80211_frame *); if ((header->i_fc[0] & IEEE80211_FC0_VERSION_MASK) != IEEE80211_FC0_VERSION_0) { RAY_RECERR(sc, "header not version 0 fc0 0x%x", header->i_fc[0]); ifp->if_ierrors++; m_freem(m0); return; } switch (header->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { case IEEE80211_FC0_TYPE_DATA: ray_rx_data(sc, m0, siglev, antenna); break; case IEEE80211_FC0_TYPE_MGT: ray_rx_mgt(sc, m0); break; case IEEE80211_FC0_TYPE_CTL: ray_rx_ctl(sc, m0); break; default: RAY_RECERR(sc, "unknown packet fc0 0x%x", header->i_fc[0]); ifp->if_ierrors++; m_freem(m0); } } /* * Deal with DATA packet types */ static void ray_rx_data(struct ray_softc *sc, struct mbuf *m0, u_int8_t siglev, u_int8_t antenna) { struct ifnet *ifp = sc->ifp; struct ieee80211_frame *header = mtod(m0, struct ieee80211_frame *); struct llc *llc; u_int8_t *sa = NULL, *da = NULL, *ra = NULL, *ta = NULL; int trim = 0; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_RX, ""); /* * Check the the data packet subtype, some packets have * nothing in them so we will drop them here. */ switch (header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) { case IEEE80211_FC0_SUBTYPE_DATA: case IEEE80211_FC0_SUBTYPE_CF_ACK: case IEEE80211_FC0_SUBTYPE_CF_POLL: case IEEE80211_FC0_SUBTYPE_CF_ACPL: RAY_DPRINTF(sc, RAY_DBG_RX, "DATA packet"); break; case IEEE80211_FC0_SUBTYPE_NODATA: case IEEE80211_FC0_SUBTYPE_CFACK: case IEEE80211_FC0_SUBTYPE_CFPOLL: case IEEE80211_FC0_SUBTYPE_CF_ACK_CF_ACK: RAY_DPRINTF(sc, RAY_DBG_RX, "NULL packet"); m_freem(m0); return; break; default: RAY_RECERR(sc, "reserved DATA packet subtype 0x%x", header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK); ifp->if_ierrors++; m_freem(m0); return; } /* * Parse the To DS and From DS fields to determine the length * of the 802.11 header for use later on. * * Additionally, furtle out the right destination and * source MAC addresses for the packet. Packets may come via * APs so the MAC addresses of the immediate node may be * different from the node that actually sent us the packet. * * da destination address of final recipient * sa source address of orginator * ra receiver address of immediate recipient * ta transmitter address of immediate orginator * * Address matching is performed on da or sa with the AP or * BSSID in ra and ta. */ RAY_MBUF_DUMP(sc, RAY_DBG_RX, m0, "(1) packet before framing"); switch (header->i_fc[1] & IEEE80211_FC1_DIR_MASK) { case IEEE80211_FC1_DIR_NODS: da = ra = header->i_addr1; sa = ta = header->i_addr2; trim = sizeof(struct ieee80211_frame); RAY_DPRINTF(sc, RAY_DBG_RX, "from %6D to %6D", sa, ":", da, ":"); break; case IEEE80211_FC1_DIR_FROMDS: da = ra = header->i_addr1; ta = header->i_addr2; sa = header->i_addr3; trim = sizeof(struct ieee80211_frame); RAY_DPRINTF(sc, RAY_DBG_RX, "ap %6D from %6D to %6D", ta, ":", sa, ":", da, ":"); break; case IEEE80211_FC1_DIR_TODS: ra = header->i_addr1; sa = ta = header->i_addr2; da = header->i_addr3; trim = sizeof(struct ieee80211_frame); RAY_DPRINTF(sc, RAY_DBG_RX, "from %6D to %6D ap %6D", sa, ":", da, ":", ra, ":"); break; case IEEE80211_FC1_DIR_DSTODS: ra = header->i_addr1; ta = header->i_addr2; da = header->i_addr3; sa = (u_int8_t *)header+1; trim = sizeof(struct ieee80211_frame) + ETHER_ADDR_LEN; RAY_DPRINTF(sc, RAY_DBG_RX, "from %6D to %6D ap %6D to %6D", sa, ":", da, ":", ta, ":", ra, ":"); break; } /* * Framing * * Each case must leave an Ethernet header and adjust trim. */ switch (sc->sc_c.np_framing) { case RAY_FRAMING_ENCAPSULATION: /* A NOP as the Ethernet header is in the packet */ break; case RAY_FRAMING_TRANSLATION: /* Check that we have an LLC and SNAP sequence */ llc = (struct llc *)((u_int8_t *)header + trim); if (llc->llc_dsap == LLC_SNAP_LSAP && llc->llc_ssap == LLC_SNAP_LSAP && llc->llc_control == LLC_UI && llc->llc_un.type_snap.org_code[0] == 0 && llc->llc_un.type_snap.org_code[1] == 0 && llc->llc_un.type_snap.org_code[2] == 0) { struct ether_header *eh; /* * This is not magic. RFC1042 header is 8 * bytes, with the last two bytes being the * ether type. So all we need is another * ETHER_ADDR_LEN bytes to write the * destination into. */ trim -= ETHER_ADDR_LEN; eh = (struct ether_header *)((u_int8_t *)header + trim); /* * Copy carefully to avoid mashing the MAC * addresses. The address layout in the .11 header * does make sense, honest, but it is a pain. * * NODS da sa no risk * FROMDS da ta sa sa then da * DSTODS ra ta da sa sa then da * TODS ra sa da da then sa */ if (sa > da) { /* Copy sa first */ bcopy(sa, eh->ether_shost, ETHER_ADDR_LEN); bcopy(da, eh->ether_dhost, ETHER_ADDR_LEN); } else { /* Copy da first */ bcopy(da, eh->ether_dhost, ETHER_ADDR_LEN); bcopy(sa, eh->ether_shost, ETHER_ADDR_LEN); } } else { /* Assume RAY_FRAMING_ENCAPSULATION */ RAY_RECERR(sc, "got encapsulated packet but in translation mode"); } break; default: RAY_RECERR(sc, "unknown framing type %d", sc->sc_c.np_framing); ifp->if_ierrors++; m_freem(m0); return; } RAY_MBUF_DUMP(sc, RAY_DBG_RX, m0, "(2) packet after framing"); /* * Finally, do a bit of house keeping before sending the packet * up the stack. */ m_adj(m0, trim); RAY_MBUF_DUMP(sc, RAY_DBG_RX, m0, "(3) packet after trimming"); ifp->if_ipackets++; ray_rx_update_cache(sc, header->i_addr2, siglev, antenna); (*ifp->if_input)(ifp, m0); } /* * Deal with MGT packet types */ static void ray_rx_mgt(struct ray_softc *sc, struct mbuf *m0) { struct ifnet *ifp = sc->ifp; struct ieee80211_frame *header = mtod(m0, struct ieee80211_frame *); RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_MGT, ""); if ((header->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_NODS) { RAY_RECERR(sc, "MGT TODS/FROMDS wrong fc1 0x%x", header->i_fc[1] & IEEE80211_FC1_DIR_MASK); ifp->if_ierrors++; m_freem(m0); return; } /* * Check the the mgt packet subtype, some packets should be * dropped depending on the mode the station is in. See pg * 52(60) of docs * * P - proccess, J - Junk, E - ECF deals with, I - Illegal * ECF Proccesses * AHDOC procces or junk * INFRA STA process or junk * INFRA AP process or jumk * * +PPP IEEE80211_FC0_SUBTYPE_BEACON * +EEE IEEE80211_FC0_SUBTYPE_PROBE_REQ * +EEE IEEE80211_FC0_SUBTYPE_PROBE_RESP * PPP IEEE80211_FC0_SUBTYPE_AUTH * PPP IEEE80211_FC0_SUBTYPE_DEAUTH * JJP IEEE80211_FC0_SUBTYPE_ASSOC_REQ * JPJ IEEE80211_FC0_SUBTYPE_ASSOC_RESP * JPP IEEE80211_FC0_SUBTYPE_DISASSOC * JJP IEEE80211_FC0_SUBTYPE_REASSOC_REQ * JPJ IEEE80211_FC0_SUBTYPE_REASSOC_RESP * +EEE IEEE80211_FC0_SUBTYPE_ATIM */ RAY_MBUF_DUMP(sc, RAY_DBG_MGT, m0, "MGT packet"); switch (header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) { case IEEE80211_FC0_SUBTYPE_BEACON: RAY_DPRINTF(sc, RAY_DBG_MGT, "BEACON MGT packet"); ray_rx_mgt_beacon(sc, m0); break; case IEEE80211_FC0_SUBTYPE_AUTH: RAY_DPRINTF(sc, RAY_DBG_MGT, "AUTH MGT packet"); ray_rx_mgt_auth(sc, m0); break; case IEEE80211_FC0_SUBTYPE_DEAUTH: RAY_DPRINTF(sc, RAY_DBG_MGT, "DEAUTH MGT packet"); /* XXX ray_rx_mgt_deauth(sc, m0); */ break; case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: case IEEE80211_FC0_SUBTYPE_REASSOC_REQ: RAY_DPRINTF(sc, RAY_DBG_MGT, "(RE)ASSOC_REQ MGT packet"); if ((sc->sc_c.np_net_type == RAY_MIB_NET_TYPE_INFRA) && (sc->sc_c.np_ap_status == RAY_MIB_AP_STATUS_AP)) RAY_RECERR(sc, "can't be an AP yet"); /* XXX_ACTING_AP */ break; case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: case IEEE80211_FC0_SUBTYPE_REASSOC_RESP: RAY_DPRINTF(sc, RAY_DBG_MGT, "(RE)ASSOC_RESP MGT packet"); if ((sc->sc_c.np_net_type == RAY_MIB_NET_TYPE_INFRA) && (sc->sc_c.np_ap_status == RAY_MIB_AP_STATUS_TERMINAL)) RAY_RECERR(sc, "can't be in INFRA yet"); /* XXX_INFRA */ break; case IEEE80211_FC0_SUBTYPE_DISASSOC: RAY_DPRINTF(sc, RAY_DBG_MGT, "DISASSOC MGT packet"); if (sc->sc_c.np_net_type == RAY_MIB_NET_TYPE_INFRA) RAY_RECERR(sc, "can't be in INFRA yet"); /* XXX_INFRA */ break; case IEEE80211_FC0_SUBTYPE_PROBE_REQ: case IEEE80211_FC0_SUBTYPE_PROBE_RESP: case IEEE80211_FC0_SUBTYPE_ATIM: RAY_RECERR(sc, "unexpected MGT packet subtype 0x%0x", header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK); ifp->if_ierrors++; break; default: RAY_RECERR(sc, "reserved MGT packet subtype 0x%x", header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK); ifp->if_ierrors++; } m_freem(m0); } /* * Deal with BEACON management packet types * XXX furtle anything interesting out * XXX Note that there are rules governing what beacons to read * XXX see 8802 S7.2.3, S11.1.2.3 * XXX is this actually useful? */ static void ray_rx_mgt_beacon(struct ray_softc *sc, struct mbuf *m0) { struct ieee80211_frame *header = mtod(m0, struct ieee80211_frame *); ieee80211_mgt_beacon_t beacon = (u_int8_t *)(header+1); union ieee80211_information elements; u_int64_t *timestamp; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_MGT, ""); timestamp = (u_int64_t *)beacon; RAY_DPRINTF(sc, RAY_DBG_MGT, "timestamp\t0x%x", *timestamp); RAY_DPRINTF(sc, RAY_DBG_MGT, "interval\t\t0x%x", IEEE80211_BEACON_INTERVAL(beacon)); RAY_DPRINTF(sc, RAY_DBG_MGT, "capability\t0x%x", IEEE80211_BEACON_CAPABILITY(beacon)); ray_rx_mgt_info(sc, m0, &elements); } static void ray_rx_mgt_info(struct ray_softc *sc, struct mbuf *m0, union ieee80211_information *elements) { struct ifnet *ifp = sc->ifp; struct ieee80211_frame *header = mtod(m0, struct ieee80211_frame *); ieee80211_mgt_beacon_t beacon = (u_int8_t *)(header+1); ieee80211_mgt_beacon_t bp, be; int len; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_MGT, ""); bp = beacon + 12; be = mtod(m0, u_int8_t *) + m0->m_len; while (bp < be) { len = *(bp + 1); RAY_DPRINTF(sc, RAY_DBG_MGT, "id 0x%02x length %d", *bp, len); switch (*bp) { case IEEE80211_ELEMID_SSID: if (len > IEEE80211_NWID_LEN) { RAY_RECERR(sc, "bad SSD length: %d from %6D", len, header->i_addr2, ":"); } strncpy(elements->ssid, bp + 2, len); elements->ssid[len] = 0; RAY_DPRINTF(sc, RAY_DBG_MGT, "beacon ssid %s", elements->ssid); break; case IEEE80211_ELEMID_RATES: RAY_DPRINTF(sc, RAY_DBG_MGT, "rates"); break; case IEEE80211_ELEMID_FHPARMS: elements->fh.dwell = bp[2] + (bp[3] << 8); elements->fh.set = bp[4]; elements->fh.pattern = bp[5]; elements->fh.index = bp[6]; RAY_DPRINTF(sc, RAY_DBG_MGT, "fhparams dwell\t0x%04x", elements->fh.dwell); RAY_DPRINTF(sc, RAY_DBG_MGT, "fhparams set\t0x%02x", elements->fh.set); RAY_DPRINTF(sc, RAY_DBG_MGT, "fhparams pattern\t0x%02x", elements->fh.pattern); RAY_DPRINTF(sc, RAY_DBG_MGT, "fhparams index\t0x%02x", elements->fh.index); break; case IEEE80211_ELEMID_DSPARMS: RAY_RECERR(sc, "got direct sequence params!"); break; case IEEE80211_ELEMID_CFPARMS: RAY_DPRINTF(sc, RAY_DBG_MGT, "cfparams"); break; case IEEE80211_ELEMID_TIM: elements->tim.count = bp[2]; elements->tim.period = bp[3]; elements->tim.bitctl = bp[4]; RAY_DPRINTF(sc, RAY_DBG_MGT, "tim count\t0x%02x", elements->tim.count); RAY_DPRINTF(sc, RAY_DBG_MGT, "tim period\t0x%02x", elements->tim.period); RAY_DPRINTF(sc, RAY_DBG_MGT, "tim bitctl\t0x%02x", elements->tim.bitctl); #if RAY_DEBUG & RAY_DBG_MGT { int i; for (i = 5; i < len + 1; i++) RAY_DPRINTF(sc, RAY_DBG_MGT, "tim pvt[%03d]\t0x%02x", i-5, bp[i]); } #endif /* (RAY_DEBUG & RAY_DBG_MGT) */ break; case IEEE80211_ELEMID_IBSSPARMS: elements->ibss.atim = bp[2] + (bp[3] << 8); RAY_DPRINTF(sc, RAY_DBG_MGT, "ibssparams atim\t0x%02x", elements->ibss.atim); break; case IEEE80211_ELEMID_CHALLENGE: RAY_DPRINTF(sc, RAY_DBG_MGT, "challenge"); break; default: RAY_RECERR(sc, "reserved MGT element id 0x%x", *bp); ifp->if_ierrors++;break; } bp += bp[1] + 2; } } /* * Deal with AUTH management packet types */ static void ray_rx_mgt_auth(struct ray_softc *sc, struct mbuf *m0) { struct ieee80211_frame *header = mtod(m0, struct ieee80211_frame *); ieee80211_mgt_auth_t auth = (u_int8_t *)(header+1); RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_AUTH, ""); switch (IEEE80211_AUTH_ALGORITHM(auth)) { case IEEE80211_AUTH_ALG_OPEN: RAY_DPRINTF(sc, RAY_DBG_AUTH, "open system authentication sequence number %d", IEEE80211_AUTH_TRANSACTION(auth)); if (IEEE80211_AUTH_TRANSACTION(auth) == IEEE80211_AUTH_OPEN_REQUEST) { /* XXX_AUTH use ray_init_auth_send */ } else if (IEEE80211_AUTH_TRANSACTION(auth) == IEEE80211_AUTH_OPEN_RESPONSE) ray_init_auth_done(sc, IEEE80211_AUTH_STATUS(auth)); break; case IEEE80211_AUTH_ALG_SHARED: RAY_RECERR(sc, "shared key authentication sequence number %d", IEEE80211_AUTH_TRANSACTION(auth)); break; default: RAY_RECERR(sc, "reserved authentication subtype 0x%04hx", IEEE80211_AUTH_ALGORITHM(auth)); break; } } /* * Deal with CTL packet types */ static void ray_rx_ctl(struct ray_softc *sc, struct mbuf *m0) { struct ifnet *ifp = sc->ifp; struct ieee80211_frame *header = mtod(m0, struct ieee80211_frame *); RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_CTL, ""); if ((header->i_fc[1] & IEEE80211_FC1_DIR_MASK) != IEEE80211_FC1_DIR_NODS) { RAY_RECERR(sc, "CTL TODS/FROMDS wrong fc1 0x%x", header->i_fc[1] & IEEE80211_FC1_DIR_MASK); ifp->if_ierrors++; m_freem(m0); return; } /* * Check the the ctl packet subtype, some packets should be * dropped depending on the mode the station is in. The ECF * should deal with everything but the power save poll to an * AP. See pg 52(60) of docs. */ RAY_MBUF_DUMP(sc, RAY_DBG_CTL, m0, "CTL packet"); switch (header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) { case IEEE80211_FC0_SUBTYPE_PS_POLL: RAY_DPRINTF(sc, RAY_DBG_CTL, "PS_POLL CTL packet"); if ((sc->sc_d.np_net_type == RAY_MIB_NET_TYPE_INFRA) && (sc->sc_c.np_ap_status == RAY_MIB_AP_STATUS_AP)) RAY_RECERR(sc, "can't be an AP yet"); /* XXX_ACTING_AP */ break; case IEEE80211_FC0_SUBTYPE_RTS: case IEEE80211_FC0_SUBTYPE_CTS: case IEEE80211_FC0_SUBTYPE_ACK: case IEEE80211_FC0_SUBTYPE_CF_END: case IEEE80211_FC0_SUBTYPE_CF_END_ACK: RAY_RECERR(sc, "unexpected CTL packet subtype 0x%0x", header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK); ifp->if_ierrors++; break; default: RAY_RECERR(sc, "reserved CTL packet subtype 0x%x", header->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK); ifp->if_ierrors++; } m_freem(m0); } /* * Update rx level and antenna cache */ static void ray_rx_update_cache(struct ray_softc *sc, u_int8_t *src, u_int8_t siglev, u_int8_t antenna) { struct timeval mint; struct ray_siglev *sl; int i, mini; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); /* Try to find host */ for (i = 0; i < RAY_NSIGLEVRECS; i++) { sl = &sc->sc_siglevs[i]; if (bcmp(sl->rsl_host, src, ETHER_ADDR_LEN) == 0) goto found; } /* Not found, find oldest slot */ mini = 0; mint.tv_sec = LONG_MAX; mint.tv_usec = 0; for (i = 0; i < RAY_NSIGLEVRECS; i++) { sl = &sc->sc_siglevs[i]; if (timevalcmp(&sl->rsl_time, &mint, <)) { mini = i; mint = sl->rsl_time; } } sl = &sc->sc_siglevs[mini]; bzero(sl->rsl_siglevs, RAY_NSIGLEV); bzero(sl->rsl_antennas, RAY_NANTENNA); bcopy(src, sl->rsl_host, ETHER_ADDR_LEN); found: microtime(&sl->rsl_time); bcopy(sl->rsl_siglevs, &sl->rsl_siglevs[1], RAY_NSIGLEV-1); sl->rsl_siglevs[0] = siglev; if (sc->sc_version != RAY_ECFS_BUILD_4) { bcopy(sl->rsl_antennas, &sl->rsl_antennas[1], RAY_NANTENNA-1); sl->rsl_antennas[0] = antenna; } } /* * Interrupt handling */ /* * Process an interrupt */ static void ray_intr(void *xsc) { struct ray_softc *sc = (struct ray_softc *)xsc; struct ifnet *ifp = sc->ifp; size_t ccs; u_int8_t cmd, status; int ccsi; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); if ((sc == NULL) || (sc->sc_gone)) return; /* * Check that the interrupt was for us, if so get the rcs/ccs * and vector on the command contained within it. */ if (RAY_HCS_INTR(sc)) { ccsi = SRAM_READ_1(sc, RAY_SCB_RCSI); ccs = RAY_CCS_ADDRESS(ccsi); cmd = SRAM_READ_FIELD_1(sc, ccs, ray_cmd, c_cmd); status = SRAM_READ_FIELD_1(sc, ccs, ray_cmd, c_status); if (ccsi <= RAY_CCS_LAST) ray_intr_ccs(sc, cmd, status, ccs); else if (ccsi <= RAY_RCS_LAST) ray_intr_rcs(sc, cmd, ccs); else RAY_RECERR(sc, "bad ccs index 0x%x", ccsi); RAY_HCS_CLEAR_INTR(sc); } /* Send any packets lying around and update error counters */ if (!(ifp->if_flags & IFF_OACTIVE) && (ifp->if_snd.ifq_head != NULL)) ray_tx(ifp); if ((++sc->sc_checkcounters % 32) == 0) ray_intr_updt_errcntrs(sc); } /* * Read the error counters. */ static void ray_intr_updt_errcntrs(struct ray_softc *sc) { size_t csc; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); /* * The card implements the following protocol to keep the * values from being changed while read: It checks the `own' * bit and if zero writes the current internal counter value, * it then sets the `own' bit to 1. If the `own' bit was 1 it * incremenets its internal counter. The user thus reads the * counter if the `own' bit is one and then sets the own bit * to 0. */ csc = RAY_STATUS_BASE; if (SRAM_READ_FIELD_1(sc, csc, ray_csc, csc_mrxo_own)) { sc->sc_rxoverflow += SRAM_READ_FIELD_2(sc, csc, ray_csc, csc_mrx_overflow); SRAM_WRITE_FIELD_1(sc, csc, ray_csc, csc_mrxo_own, 0); } if (SRAM_READ_FIELD_1(sc, csc, ray_csc, csc_mrxc_own)) { sc->sc_rxcksum += SRAM_READ_FIELD_2(sc, csc, ray_csc, csc_mrx_overflow); SRAM_WRITE_FIELD_1(sc, csc, ray_csc, csc_mrxc_own, 0); } if (SRAM_READ_FIELD_1(sc, csc, ray_csc, csc_rxhc_own)) { sc->sc_rxhcksum += SRAM_READ_FIELD_2(sc, csc, ray_csc, csc_rx_hcksum); SRAM_WRITE_FIELD_1(sc, csc, ray_csc, csc_rxhc_own, 0); } sc->sc_rxnoise = SRAM_READ_FIELD_1(sc, csc, ray_csc, csc_rx_noise); } /* * Process CCS command completion */ static void ray_intr_ccs(struct ray_softc *sc, u_int8_t cmd, u_int8_t status, size_t ccs) { RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); switch (cmd) { case RAY_CMD_DOWNLOAD_PARAMS: RAY_DPRINTF(sc, RAY_DBG_COM, "START_PARAMS"); ray_init_download_done(sc, status, ccs); break; case RAY_CMD_UPDATE_PARAMS: RAY_DPRINTF(sc, RAY_DBG_COM, "UPDATE_PARAMS"); ray_upparams_done(sc, status, ccs); break; case RAY_CMD_REPORT_PARAMS: RAY_DPRINTF(sc, RAY_DBG_COM, "REPORT_PARAMS"); ray_repparams_done(sc, status, ccs); break; case RAY_CMD_UPDATE_MCAST: RAY_DPRINTF(sc, RAY_DBG_COM, "UPDATE_MCAST"); ray_mcast_done(sc, status, ccs); break; case RAY_CMD_START_NET: case RAY_CMD_JOIN_NET: RAY_DPRINTF(sc, RAY_DBG_COM, "START|JOIN_NET"); ray_init_sj_done(sc, status, ccs); break; case RAY_CMD_TX_REQ: RAY_DPRINTF(sc, RAY_DBG_COM, "TX_REQ"); ray_tx_done(sc, status, ccs); break; case RAY_CMD_START_ASSOC: RAY_DPRINTF(sc, RAY_DBG_COM, "START_ASSOC"); ray_init_assoc_done(sc, status, ccs); break; case RAY_CMD_UPDATE_APM: RAY_RECERR(sc, "unexpected UPDATE_APM"); break; case RAY_CMD_TEST_MEM: RAY_RECERR(sc, "unexpected TEST_MEM"); break; case RAY_CMD_SHUTDOWN: RAY_RECERR(sc, "unexpected SHUTDOWN"); break; case RAY_CMD_DUMP_MEM: RAY_RECERR(sc, "unexpected DUMP_MEM"); break; case RAY_CMD_START_TIMER: RAY_RECERR(sc, "unexpected START_TIMER"); break; default: RAY_RECERR(sc, "unknown command 0x%x", cmd); break; } } /* * Process ECF command request */ static void ray_intr_rcs(struct ray_softc *sc, u_int8_t cmd, size_t rcs) { RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); switch (cmd) { case RAY_ECMD_RX_DONE: RAY_DPRINTF(sc, RAY_DBG_RX, "RX_DONE"); ray_rx(sc, rcs); break; case RAY_ECMD_REJOIN_DONE: RAY_DPRINTF(sc, RAY_DBG_RX, "REJOIN_DONE"); sc->sc_c.np_havenet = 1; break; case RAY_ECMD_ROAM_START: RAY_DPRINTF(sc, RAY_DBG_RX, "ROAM_START"); sc->sc_c.np_havenet = 0; break; case RAY_ECMD_JAPAN_CALL_SIGNAL: RAY_RECERR(sc, "unexpected JAPAN_CALL_SIGNAL"); break; default: RAY_RECERR(sc, "unknown command 0x%x", cmd); break; } RAY_CCS_FREE(sc, rcs); } /* * User land entry to multicast list changes */ static int ray_mcast_user(struct ray_softc *sc) { struct ray_comq_entry *com[2]; int error, ncom; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); /* * Do all checking in the runq to preserve ordering. * * We run promisc to pick up changes to the ALL_MULTI * interface flag. */ ncom = 0; com[ncom++] = RAY_COM_MALLOC(ray_mcast, 0); com[ncom++] = RAY_COM_MALLOC(ray_promisc, 0); RAY_COM_RUNQ(sc, com, ncom, "raymcast", error); /* XXX no real error processing from anything yet! */ RAY_COM_FREE(com, ncom); return (error); } /* * Runq entry to setting the multicast filter list * * MUST always be followed by a call to ray_promisc to pick up changes * to promisc flag */ static void ray_mcast(struct ray_softc *sc, struct ray_comq_entry *com) { struct ifnet *ifp = sc->ifp; struct ifmultiaddr *ifma; size_t bufp; int count = 0; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); /* * If card is not running we don't need to update this. */ if (!(ifp->if_flags & IFF_RUNNING)) { RAY_DPRINTF(sc, RAY_DBG_IOCTL, "not running"); ray_com_runq_done(sc); return; } /* * The multicast list is only 16 items long so use promiscuous * mode and don't bother updating the multicast list. */ + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) count++; if (count == 0) { + IF_ADDR_UNLOCK(ifp); ray_com_runq_done(sc); return; } else if (count > 16) { ifp->if_flags |= IFF_ALLMULTI; + IF_ADDR_UNLOCK(ifp); ray_com_runq_done(sc); return; } else if (ifp->if_flags & IFF_ALLMULTI) ifp->if_flags &= ~IFF_ALLMULTI; /* * Kick the card */ ray_ccs_fill(sc, com->c_ccs, RAY_CMD_UPDATE_MCAST); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_update_mcast, c_nmcast, count); bufp = RAY_HOST_TO_ECF_BASE; TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { SRAM_WRITE_REGION( sc, bufp, LLADDR((struct sockaddr_dl *)ifma->ifma_addr), ETHER_ADDR_LEN ); bufp += ETHER_ADDR_LEN; } + IF_ADDR_UNLOCK(ifp); ray_com_ecf(sc, com); } /* * Complete the multicast filter list update */ static void ray_mcast_done(struct ray_softc *sc, u_int8_t status, size_t ccs) { RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_STARTJOIN, ""); RAY_COM_CHECK(sc, ccs); RAY_CCSERR(sc, status, if_oerrors); /* XXX error counter */ ray_com_ecf_done(sc); } /* * Runq entry to set/reset promiscuous mode */ static void ray_promisc(struct ray_softc *sc, struct ray_comq_entry *com) { struct ifnet *ifp = sc->ifp; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); /* * If card not running or we already have the right flags * we don't need to update this */ sc->sc_d.np_promisc = !!(ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)); if (!(ifp->if_flags & IFF_RUNNING) || (sc->sc_c.np_promisc == sc->sc_d.np_promisc)) { ray_com_runq_done(sc); return; } /* * Kick the card */ ray_ccs_fill(sc, com->c_ccs, RAY_CMD_UPDATE_PARAMS); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_update, c_paramid, RAY_MIB_PROMISC); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_update, c_nparam, 1); SRAM_WRITE_1(sc, RAY_HOST_TO_ECF_BASE, sc->sc_d.np_promisc); ray_com_ecf(sc, com); } /* * User land entry to parameter reporting * * As we by pass the runq to report current parameters this function * only provides a snap shot of the driver's state. */ static int ray_repparams_user(struct ray_softc *sc, struct ray_param_req *pr) { struct ray_comq_entry *com[1]; int error, ncom; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); /* * Test for illegal values or immediate responses */ if (pr->r_paramid > RAY_MIB_MAX) return (EINVAL); if ((sc->sc_version == RAY_ECFS_BUILD_4) && !(mib_info[pr->r_paramid][0] & RAY_V4)) return (EINVAL); if ((sc->sc_version == RAY_ECFS_BUILD_5) && !(mib_info[pr->r_paramid][0] & RAY_V5)) return (EINVAL); if (pr->r_paramid > RAY_MIB_LASTUSER) { switch (pr->r_paramid) { case RAY_MIB_VERSION: if (sc->sc_version == RAY_ECFS_BUILD_4) *pr->r_data = RAY_V4; else *pr->r_data = RAY_V5; break; case RAY_MIB_CUR_BSSID: bcopy(sc->sc_c.np_bss_id, pr->r_data, ETHER_ADDR_LEN); break; case RAY_MIB_CUR_INITED: *pr->r_data = sc->sc_c.np_inited; break; case RAY_MIB_CUR_DEF_TXRATE: *pr->r_data = sc->sc_c.np_def_txrate; break; case RAY_MIB_CUR_ENCRYPT: *pr->r_data = sc->sc_c.np_encrypt; break; case RAY_MIB_CUR_NET_TYPE: *pr->r_data = sc->sc_c.np_net_type; break; case RAY_MIB_CUR_SSID: bcopy(sc->sc_c.np_ssid, pr->r_data, IEEE80211_NWID_LEN); break; case RAY_MIB_CUR_PRIV_START: *pr->r_data = sc->sc_c.np_priv_start; break; case RAY_MIB_CUR_PRIV_JOIN: *pr->r_data = sc->sc_c.np_priv_join; break; case RAY_MIB_DES_BSSID: bcopy(sc->sc_d.np_bss_id, pr->r_data, ETHER_ADDR_LEN); break; case RAY_MIB_DES_INITED: *pr->r_data = sc->sc_d.np_inited; break; case RAY_MIB_DES_DEF_TXRATE: *pr->r_data = sc->sc_d.np_def_txrate; break; case RAY_MIB_DES_ENCRYPT: *pr->r_data = sc->sc_d.np_encrypt; break; case RAY_MIB_DES_NET_TYPE: *pr->r_data = sc->sc_d.np_net_type; break; case RAY_MIB_DES_SSID: bcopy(sc->sc_d.np_ssid, pr->r_data, IEEE80211_NWID_LEN); break; case RAY_MIB_DES_PRIV_START: *pr->r_data = sc->sc_d.np_priv_start; break; case RAY_MIB_DES_PRIV_JOIN: *pr->r_data = sc->sc_d.np_priv_join; break; case RAY_MIB_CUR_AP_STATUS: *pr->r_data = sc->sc_c.np_ap_status; break; case RAY_MIB_CUR_PROMISC: *pr->r_data = sc->sc_c.np_promisc; break; case RAY_MIB_DES_AP_STATUS: *pr->r_data = sc->sc_d.np_ap_status; break; case RAY_MIB_DES_PROMISC: *pr->r_data = sc->sc_d.np_promisc; break; case RAY_MIB_CUR_FRAMING: *pr->r_data = sc->sc_c.np_framing; break; case RAY_MIB_DES_FRAMING: *pr->r_data = sc->sc_d.np_framing; break; default: return (EINVAL); break; } pr->r_failcause = 0; if (sc->sc_version == RAY_ECFS_BUILD_4) pr->r_len = mib_info[pr->r_paramid][RAY_MIB_INFO_SIZ4]; else if (sc->sc_version == RAY_ECFS_BUILD_5) pr->r_len = mib_info[pr->r_paramid][RAY_MIB_INFO_SIZ5]; return (0); } pr->r_failcause = 0; ncom = 0; com[ncom++] = RAY_COM_MALLOC(ray_repparams, RAY_COM_FWOK); com[ncom-1]->c_pr = pr; RAY_COM_RUNQ(sc, com, ncom, "rayrparm", error); /* XXX no real error processing from anything yet! */ if (!com[0]->c_retval && pr->r_failcause) error = EINVAL; RAY_COM_FREE(com, ncom); return (error); } /* * Runq entry to read the required parameter * * The card and driver are happy for parameters to be read * whenever the card is plugged in */ static void ray_repparams(struct ray_softc *sc, struct ray_comq_entry *com) { RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); /* * Kick the card */ ray_ccs_fill(sc, com->c_ccs, RAY_CMD_REPORT_PARAMS); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_report, c_paramid, com->c_pr->r_paramid); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_report, c_nparam, 1); ray_com_ecf(sc, com); } /* * Complete the parameter reporting */ static void ray_repparams_done(struct ray_softc *sc, u_int8_t status, size_t ccs) { struct ray_comq_entry *com; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); RAY_COM_CHECK(sc, ccs); RAY_CCSERR(sc, status, if_oerrors); /* XXX error counter */ com = TAILQ_FIRST(&sc->sc_comq); com->c_pr->r_failcause = SRAM_READ_FIELD_1(sc, ccs, ray_cmd_report, c_failcause); com->c_pr->r_len = SRAM_READ_FIELD_1(sc, ccs, ray_cmd_report, c_len); SRAM_READ_REGION(sc, RAY_ECF_TO_HOST_BASE, com->c_pr->r_data, com->c_pr->r_len); ray_com_ecf_done(sc); } /* * User land entry (and exit) to the error counters */ static int ray_repstats_user(struct ray_softc *sc, struct ray_stats_req *sr) { RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); sr->rxoverflow = sc->sc_rxoverflow; sr->rxcksum = sc->sc_rxcksum; sr->rxhcksum = sc->sc_rxhcksum; sr->rxnoise = sc->sc_rxnoise; return (0); } /* * User land entry to parameter update changes * * As a parameter change can cause the network parameters to be * invalid we have to re-start/join. */ static int ray_upparams_user(struct ray_softc *sc, struct ray_param_req *pr) { struct ray_comq_entry *com[4]; int error, ncom, todo; #define RAY_UPP_SJ 0x1 #define RAY_UPP_PARAMS 0x2 RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); /* * Check that the parameter is available based on firmware version */ pr->r_failcause = 0; if (pr->r_paramid > RAY_MIB_LASTUSER) return (EINVAL); if ((sc->sc_version == RAY_ECFS_BUILD_4) && !(mib_info[pr->r_paramid][0] & RAY_V4)) return (EINVAL); if ((sc->sc_version == RAY_ECFS_BUILD_5) && !(mib_info[pr->r_paramid][0] & RAY_V5)) return (EINVAL); /* * Handle certain parameters specially */ todo = 0; switch (pr->r_paramid) { case RAY_MIB_NET_TYPE: /* Updated via START_NET JOIN_NET */ sc->sc_d.np_net_type = *pr->r_data; todo |= RAY_UPP_SJ; break; case RAY_MIB_SSID: /* Updated via START_NET JOIN_NET */ bcopy(pr->r_data, sc->sc_d.np_ssid, IEEE80211_NWID_LEN); todo |= RAY_UPP_SJ; break; case RAY_MIB_PRIVACY_MUST_START:/* Updated via START_NET */ if (sc->sc_c.np_net_type != RAY_MIB_NET_TYPE_ADHOC) return (EINVAL); sc->sc_d.np_priv_start = *pr->r_data; todo |= RAY_UPP_SJ; break; case RAY_MIB_PRIVACY_CAN_JOIN: /* Updated via START_NET JOIN_NET */ sc->sc_d.np_priv_join = *pr->r_data; todo |= RAY_UPP_SJ; break; case RAY_MIB_BASIC_RATE_SET: sc->sc_d.np_def_txrate = *pr->r_data; todo |= RAY_UPP_PARAMS; break; case RAY_MIB_AP_STATUS: /* Unsupported */ case RAY_MIB_MAC_ADDR: /* XXX Need interface up but could be done */ case RAY_MIB_PROMISC: /* BPF */ return (EINVAL); break; default: todo |= RAY_UPP_PARAMS; todo |= RAY_UPP_SJ; break; } /* * Generate the runq entries as needed */ ncom = 0; if (todo & RAY_UPP_PARAMS) { com[ncom++] = RAY_COM_MALLOC(ray_upparams, 0); com[ncom-1]->c_pr = pr; } if (todo & RAY_UPP_SJ) { com[ncom++] = RAY_COM_MALLOC(ray_init_sj, 0); com[ncom++] = RAY_COM_MALLOC(ray_init_auth, 0); com[ncom++] = RAY_COM_MALLOC(ray_init_assoc, 0); } RAY_COM_RUNQ(sc, com, ncom, "rayuparam", error); /* XXX no real error processing from anything yet! */ if (!com[0]->c_retval && pr->r_failcause) error = EINVAL; RAY_COM_FREE(com, ncom); return (error); } /* * Runq entry to update a parameter * * The card and driver are basically happy for parameters to be updated * whenever the card is plugged in. However, there may be a couple of * network hangs whilst the update is performed. Reading parameters back * straight away may give the wrong answer and some parameters cannot be * read at all. Local copies should be kept. */ static void ray_upparams(struct ray_softc *sc, struct ray_comq_entry *com) { RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); ray_ccs_fill(sc, com->c_ccs, RAY_CMD_UPDATE_PARAMS); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_update, c_paramid, com->c_pr->r_paramid); SRAM_WRITE_FIELD_1(sc, com->c_ccs, ray_cmd_update, c_nparam, 1); SRAM_WRITE_REGION(sc, RAY_HOST_TO_ECF_BASE, com->c_pr->r_data, com->c_pr->r_len); ray_com_ecf(sc, com); } /* * Complete the parameter update, note that promisc finishes up here too */ static void ray_upparams_done(struct ray_softc *sc, u_int8_t status, size_t ccs) { struct ray_comq_entry *com; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_MAP_CM(sc); RAY_COM_CHECK(sc, ccs); RAY_CCSERR(sc, status, if_oerrors); /* XXX error counter */ com = TAILQ_FIRST(&sc->sc_comq); switch (SRAM_READ_FIELD_1(sc, ccs, ray_cmd_update, c_paramid)) { case RAY_MIB_PROMISC: sc->sc_c.np_promisc = SRAM_READ_1(sc, RAY_HOST_TO_ECF_BASE); RAY_DPRINTF(sc, RAY_DBG_IOCTL, "promisc value %d", sc->sc_c.np_promisc); break; default: com->c_pr->r_failcause = SRAM_READ_FIELD_1(sc, ccs, ray_cmd_update, c_failcause); break; } ray_com_ecf_done(sc); } /* * Command queuing and execution */ /* * Set up a comq entry struct */ static struct ray_comq_entry * ray_com_init(struct ray_comq_entry *com, ray_comqfn_t function, int flags, char *mesg) { com->c_function = function; com->c_flags = flags; com->c_retval = 0; com->c_ccs = 0; com->c_wakeup = NULL; com->c_pr = NULL; com->c_mesg = mesg; return (com); } /* * Malloc and set up a comq entry struct */ static struct ray_comq_entry * ray_com_malloc(ray_comqfn_t function, int flags, char *mesg) { struct ray_comq_entry *com; MALLOC(com, struct ray_comq_entry *, sizeof(struct ray_comq_entry), M_RAYCOM, M_WAITOK); return (ray_com_init(com, function, flags, mesg)); } /* * Add an array of commands to the runq, get some ccs's for them and * then run, waiting on the last command. * * We add the commands to the queue first to preserve ioctl ordering. * * On recoverable errors, this routine removes the entries from the * runq. A caller can requeue the commands (and still preserve its own * processes ioctl ordering) but doesn't have to. When the card is * detached we get out quickly to prevent panics and don't bother * about the runq. */ static int ray_com_runq_add(struct ray_softc *sc, struct ray_comq_entry *com[], int ncom, char *wmesg) { int i, error; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_COM, ""); error = 0; /* * Add the commands to the runq but don't let it run until * the ccs's are allocated successfully */ com[0]->c_flags |= RAY_COM_FWAIT; for (i = 0; i < ncom; i++) { com[i]->c_wakeup = com[ncom-1]; RAY_DPRINTF(sc, RAY_DBG_COM, "adding %p", com[i]); RAY_DCOM(sc, RAY_DBG_DCOM, com[i], "adding"); TAILQ_INSERT_TAIL(&sc->sc_comq, com[i], c_chain); } com[ncom-1]->c_flags |= RAY_COM_FWOK; /* * Allocate ccs's for each command. */ for (i = 0; i < ncom; i++) { error = ray_ccs_alloc(sc, &com[i]->c_ccs, wmesg); if (error == ENXIO) return (ENXIO); else if (error) goto cleanup; } /* * Allow the queue to run and sleep if needed. * * Iff the FDETACHED flag is set in the com entry we waited on * the driver is in a zombie state! The softc structure has been * freed by the generic bus detach methods - eek. We tread very * carefully! */ com[0]->c_flags &= ~RAY_COM_FWAIT; ray_com_runq(sc); if (TAILQ_FIRST(&sc->sc_comq) != NULL) { RAY_DPRINTF(sc, RAY_DBG_COM, "sleeping"); error = tsleep(com[ncom-1], PCATCH | PRIBIO, wmesg, 0); if (com[ncom-1]->c_flags & RAY_COM_FDETACHED) return (ENXIO); RAY_DPRINTF(sc, RAY_DBG_COM, "awakened, tsleep returned 0x%x", error); } else error = 0; cleanup: /* * Only clean the queue on real errors - we don't care about it * when we detach as the queue entries are freed by the callers. */ if (error && (error != ENXIO)) for (i = 0; i < ncom; i++) if (!(com[i]->c_flags & RAY_COM_FCOMPLETED)) { RAY_DPRINTF(sc, RAY_DBG_COM, "removing %p", com[i]); RAY_DCOM(sc, RAY_DBG_DCOM, com[i], "removing"); TAILQ_REMOVE(&sc->sc_comq, com[i], c_chain); ray_ccs_free(sc, com[i]->c_ccs); com[i]->c_ccs = 0; } return (error); } /* * Run the command at the head of the queue (if not already running) */ static void ray_com_runq(struct ray_softc *sc) { struct ray_comq_entry *com; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_COM, ""); com = TAILQ_FIRST(&sc->sc_comq); if ((com == NULL) || (com->c_flags & RAY_COM_FRUNNING) || (com->c_flags & RAY_COM_FWAIT) || (com->c_flags & RAY_COM_FDETACHED)) return; com->c_flags |= RAY_COM_FRUNNING; RAY_DPRINTF(sc, RAY_DBG_COM, "running %p", com); RAY_DCOM(sc, RAY_DBG_DCOM, com, "running"); com->c_function(sc, com); } /* * Remove run command, free ccs and wakeup caller. * * Minimal checks are done here as we ensure that the com and command * handler were matched up earlier. Must be called at splnet or higher * so that entries on the command queue are correctly removed. * * Remove the com from the comq, and wakeup the caller if it requested * to be woken. This is used for ensuring a sequence of commands * completes. Finally, re-run the queue. */ static void ray_com_runq_done(struct ray_softc *sc) { struct ray_comq_entry *com; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_COM, ""); com = TAILQ_FIRST(&sc->sc_comq); /* XXX shall we check this as below */ RAY_DPRINTF(sc, RAY_DBG_COM, "removing %p", com); RAY_DCOM(sc, RAY_DBG_DCOM, com, "removing"); TAILQ_REMOVE(&sc->sc_comq, com, c_chain); com->c_flags &= ~RAY_COM_FRUNNING; com->c_flags |= RAY_COM_FCOMPLETED; com->c_retval = 0; ray_ccs_free(sc, com->c_ccs); com->c_ccs = 0; if (com->c_flags & RAY_COM_FWOK) wakeup(com->c_wakeup); ray_com_runq(sc); /* XXX what about error on completion then? deal with when i fix * XXX the status checking * * XXX all the runq_done calls from IFF_RUNNING checks in runq * XXX routines should return EIO but shouldn't abort the runq */ } /* * Send a command to the ECF. */ static void ray_com_ecf(struct ray_softc *sc, struct ray_comq_entry *com) { int i = 0; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_COM, ""); RAY_MAP_CM(sc); while (!RAY_ECF_READY(sc)) { DELAY(RAY_ECF_SPIN_DELAY); if (++i > RAY_ECF_SPIN_TRIES) RAY_PANIC(sc, "spun too long"); } if (i != 0) RAY_RECERR(sc, "spun %d times", i); RAY_DPRINTF(sc, RAY_DBG_COM, "sending %p", com); RAY_DCOM(sc, RAY_DBG_DCOM, com, "sending"); SRAM_WRITE_1(sc, RAY_SCB_CCSI, RAY_CCS_INDEX(com->c_ccs)); RAY_ECF_START_CMD(sc); if (RAY_COM_NEEDS_TIMO( SRAM_READ_FIELD_1(sc, com->c_ccs, ray_cmd, c_cmd))) { RAY_DPRINTF(sc, RAY_DBG_COM, "adding timeout"); sc->com_timerh = timeout(ray_com_ecf_timo, sc, RAY_COM_TIMEOUT); } } /* * Deal with commands that require a timeout to test completion. * * This routine is coded to only expect one outstanding request for the * timed out requests at a time, but thats all that can be outstanding * per hardware limitations and all that we issue anyway. * * We don't do any fancy testing of the command currently issued as we * know it must be a timeout based one...unless I've got this wrong! */ static void ray_com_ecf_timo(void *xsc) { struct ray_softc *sc = (struct ray_softc *)xsc; struct ray_comq_entry *com; u_int8_t cmd, status; int s; s = splnet(); RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_COM, ""); RAY_MAP_CM(sc); com = TAILQ_FIRST(&sc->sc_comq); cmd = SRAM_READ_FIELD_1(sc, com->c_ccs, ray_cmd, c_cmd); status = SRAM_READ_FIELD_1(sc, com->c_ccs, ray_cmd, c_status); switch (status) { case RAY_CCS_STATUS_COMPLETE: case RAY_CCS_STATUS_FREE: /* Buggy firmware */ ray_intr_ccs(sc, cmd, status, com->c_ccs); break; case RAY_CCS_STATUS_BUSY: sc->com_timerh = timeout(ray_com_ecf_timo, sc, RAY_COM_TIMEOUT); break; default: /* Replicates NetBSD */ if (sc->sc_ccsinuse[RAY_CCS_INDEX(com->c_ccs)] == 1) { /* give a chance for the interrupt to occur */ sc->sc_ccsinuse[RAY_CCS_INDEX(com->c_ccs)] = 2; sc->com_timerh = timeout(ray_com_ecf_timo, sc, RAY_COM_TIMEOUT); } else ray_intr_ccs(sc, cmd, status, com->c_ccs); break; } splx(s); } /* * Called when interrupt handler for the command has done all it * needs to. Will be called at splnet. */ static void ray_com_ecf_done(struct ray_softc *sc) { RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_COM, ""); untimeout(ray_com_ecf_timo, sc, sc->com_timerh); ray_com_runq_done(sc); } #if RAY_DEBUG & RAY_DBG_COM /* * Process completed ECF commands that probably came from the command queue * * This routine is called after vectoring the completed ECF command * to the appropriate _done routine. It helps check everything is okay. */ static void ray_com_ecf_check(struct ray_softc *sc, size_t ccs, char *mesg) { struct ray_comq_entry *com; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_COM, "%s", mesg); com = TAILQ_FIRST(&sc->sc_comq); if (com == NULL) RAY_PANIC(sc, "no command queue"); if (com->c_ccs != ccs) RAY_PANIC(sc, "ccs's don't match"); } #endif /* RAY_DEBUG & RAY_DBG_COM */ /* * CCS allocators */ /* * Obtain a ccs for a commmand * * Returns 0 and in `ccsp' the bus offset of the free ccs. Will block * awaiting free ccs if needed - if the sleep is interrupted * EINTR/ERESTART is returned, if the card is ejected we return ENXIO. */ static int ray_ccs_alloc(struct ray_softc *sc, size_t *ccsp, char *wmesg) { size_t ccs; u_int i; int error; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_CCS, ""); RAY_MAP_CM(sc); for (;;) { for (i = RAY_CCS_CMD_FIRST; i <= RAY_CCS_CMD_LAST; i++) { /* we probe here to make the card go */ (void)SRAM_READ_FIELD_1(sc, RAY_CCS_ADDRESS(i), ray_cmd, c_status); if (!sc->sc_ccsinuse[i]) break; } if (i > RAY_CCS_CMD_LAST) { RAY_DPRINTF(sc, RAY_DBG_CCS, "sleeping"); error = tsleep(ray_ccs_alloc, PCATCH | PRIBIO, wmesg, 0); if ((sc == NULL) || (sc->sc_gone)) return (ENXIO); RAY_DPRINTF(sc, RAY_DBG_CCS, "awakened, tsleep returned 0x%x", error); if (error) return (error); } else break; } RAY_DPRINTF(sc, RAY_DBG_CCS, "allocated 0x%02x", i); sc->sc_ccsinuse[i] = 1; ccs = RAY_CCS_ADDRESS(i); *ccsp = ccs; return (0); } /* * Fill the easy bits in of a pre-allocated CCS */ static void ray_ccs_fill(struct ray_softc *sc, size_t ccs, u_int cmd) { RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_CCS, ""); RAY_MAP_CM(sc); if (ccs == 0) RAY_PANIC(sc, "ccs not allocated"); SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd, c_status, RAY_CCS_STATUS_BUSY); SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd, c_cmd, cmd); SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd, c_link, RAY_CCS_LINK_NULL); } /* * Free up a ccs allocated via ray_ccs_alloc * * Return the old status. This routine is only used for ccs allocated via * ray_ccs_alloc (not tx, rx or ECF command requests). */ static void ray_ccs_free(struct ray_softc *sc, size_t ccs) { RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_CCS, ""); RAY_MAP_CM(sc); #if 1 | (RAY_DEBUG & RAY_DBG_CCS) if (!sc->sc_ccsinuse[RAY_CCS_INDEX(ccs)]) RAY_RECERR(sc, "freeing free ccs 0x%02zx", RAY_CCS_INDEX(ccs)); #endif /* RAY_DEBUG & RAY_DBG_CCS */ if (!sc->sc_gone) RAY_CCS_FREE(sc, ccs); sc->sc_ccsinuse[RAY_CCS_INDEX(ccs)] = 0; RAY_DPRINTF(sc, RAY_DBG_CCS, "freed 0x%02zx", RAY_CCS_INDEX(ccs)); wakeup(ray_ccs_alloc); } /* * Obtain a ccs and tx buffer to transmit with and fill them in. * * Returns 0 and in `ccsp' the bus offset of the free ccs. Will not block * and if none available and will returns EAGAIN. * * The caller must fill in the length later. * The caller must clear the ccs on errors. */ static int ray_ccs_tx(struct ray_softc *sc, size_t *ccsp, size_t *bufpp) { size_t ccs, bufp; int i; u_int8_t status; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_CCS, ""); RAY_MAP_CM(sc); i = RAY_CCS_TX_FIRST; do { status = SRAM_READ_FIELD_1(sc, RAY_CCS_ADDRESS(i), ray_cmd, c_status); if (status == RAY_CCS_STATUS_FREE) break; i++; } while (i <= RAY_CCS_TX_LAST); if (i > RAY_CCS_TX_LAST) { return (EAGAIN); } RAY_DPRINTF(sc, RAY_DBG_CCS, "allocated 0x%02x", i); /* * Reserve and fill the ccs - must do the length later. * * Even though build 4 and build 5 have different fields all these * are common apart from tx_rate. Neither the NetBSD driver or Linux * driver bother to overwrite this for build 4 cards. * * The start of the buffer must be aligned to a 256 byte boundary * (least significant byte of address = 0x00). */ ccs = RAY_CCS_ADDRESS(i); bufp = RAY_TX_BASE + i * RAY_TX_BUF_SIZE; bufp += sc->sc_tibsize; SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd_tx, c_status, RAY_CCS_STATUS_BUSY); SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd_tx, c_cmd, RAY_CMD_TX_REQ); SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd_tx, c_link, RAY_CCS_LINK_NULL); SRAM_WRITE_FIELD_2(sc, ccs, ray_cmd_tx, c_bufp, bufp); SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd_tx, c_tx_rate, sc->sc_c.np_def_txrate); SRAM_WRITE_FIELD_1(sc, ccs, ray_cmd_tx, c_apm_mode, 0); bufp += sizeof(struct ray_tx_phy_header); *ccsp = ccs; *bufpp = bufp; return (0); } /* * Routines to obtain resources for the card */ /* * Allocate the attribute memory on the card * * The attribute memory space is abused by these devices as IO space. As such * the OS card services don't have a chance of knowing that they need to keep * the attribute space mapped. We have to do it manually. */ static int ray_res_alloc_am(struct ray_softc *sc) { int error; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_CM, ""); sc->am_rid = RAY_AM_RID; sc->am_res = bus_alloc_resource(sc->dev, SYS_RES_MEMORY, &sc->am_rid, 0UL, ~0UL, 0x1000, RF_ACTIVE); if (!sc->am_res) { RAY_PRINTF(sc, "Cannot allocate attribute memory"); return (ENOMEM); } error = CARD_SET_MEMORY_OFFSET(device_get_parent(sc->dev), sc->dev, sc->am_rid, 0, NULL); if (error) { RAY_PRINTF(sc, "CARD_SET_MEMORY_OFFSET returned 0x%0x", error); return (error); } error = CARD_SET_RES_FLAGS(device_get_parent(sc->dev), sc->dev, SYS_RES_MEMORY, sc->am_rid, PCCARD_A_MEM_ATTR); if (error) { RAY_PRINTF(sc, "CARD_SET_RES_FLAGS returned 0x%0x", error); return (error); } error = CARD_SET_RES_FLAGS(device_get_parent(sc->dev), sc->dev, SYS_RES_MEMORY, sc->am_rid, PCCARD_A_MEM_8BIT); if (error) { RAY_PRINTF(sc, "CARD_SET_RES_FLAGS returned 0x%0x", error); return (error); } sc->am_bsh = rman_get_bushandle(sc->am_res); sc->am_bst = rman_get_bustag(sc->am_res); #if RAY_DEBUG & (RAY_DBG_CM | RAY_DBG_BOOTPARAM) { u_long flags; u_int32_t offset; CARD_GET_RES_FLAGS(device_get_parent(sc->dev), sc->dev, SYS_RES_MEMORY, sc->am_rid, &flags); CARD_GET_MEMORY_OFFSET(device_get_parent(sc->dev), sc->dev, sc->am_rid, &offset); RAY_PRINTF(sc, "allocated attribute memory:\n" ". start 0x%0lx count 0x%0lx flags 0x%0lx offset 0x%0x", bus_get_resource_start(sc->dev, SYS_RES_MEMORY, sc->am_rid), bus_get_resource_count(sc->dev, SYS_RES_MEMORY, sc->am_rid), flags, offset); } #endif /* RAY_DEBUG & (RAY_DBG_CM | RAY_DBG_BOOTPARAM) */ return (0); } /* * Allocate the common memory on the card * * As this memory is described in the CIS, the OS card services should * have set the map up okay, but the card uses 8 bit RAM. This is not * described in the CIS. */ static int ray_res_alloc_cm(struct ray_softc *sc) { u_long start, count, end; int error; RAY_DPRINTF(sc, RAY_DBG_SUBR | RAY_DBG_CM, ""); RAY_DPRINTF(sc,RAY_DBG_CM | RAY_DBG_BOOTPARAM, "cm start 0x%0lx count 0x%0lx", bus_get_resource_start(sc->dev, SYS_RES_MEMORY, RAY_CM_RID), bus_get_resource_count(sc->dev, SYS_RES_MEMORY, RAY_CM_RID)); sc->cm_rid = RAY_CM_RID; start = bus_get_resource_start(sc->dev, SYS_RES_MEMORY, sc->cm_rid); count = bus_get_resource_count(sc->dev, SYS_RES_MEMORY, sc->cm_rid); end = start + count - 1; sc->cm_res = bus_alloc_resource(sc->dev, SYS_RES_MEMORY, &sc->cm_rid, start, end, count, RF_ACTIVE); if (!sc->cm_res) { RAY_PRINTF(sc, "Cannot allocate common memory"); return (ENOMEM); } error = CARD_SET_MEMORY_OFFSET(device_get_parent(sc->dev), sc->dev, sc->cm_rid, 0, NULL); if (error) { RAY_PRINTF(sc, "CARD_SET_MEMORY_OFFSET returned 0x%0x", error); return (error); } error = CARD_SET_RES_FLAGS(device_get_parent(sc->dev), sc->dev, SYS_RES_MEMORY, sc->cm_rid, PCCARD_A_MEM_COM); if (error) { RAY_PRINTF(sc, "CARD_SET_RES_FLAGS returned 0x%0x", error); return (error); } error = CARD_SET_RES_FLAGS(device_get_parent(sc->dev), sc->dev, SYS_RES_MEMORY, sc->cm_rid, PCCARD_A_MEM_8BIT); if (error) { RAY_PRINTF(sc, "CARD_SET_RES_FLAGS returned 0x%0x", error); return (error); } sc->cm_bsh = rman_get_bushandle(sc->cm_res); sc->cm_bst = rman_get_bustag(sc->cm_res); #if RAY_DEBUG & (RAY_DBG_CM | RAY_DBG_BOOTPARAM) { u_long flags; u_int32_t offset; CARD_GET_RES_FLAGS(device_get_parent(sc->dev), sc->dev, SYS_RES_MEMORY, sc->cm_rid, &flags); CARD_GET_MEMORY_OFFSET(device_get_parent(sc->dev), sc->dev, sc->cm_rid, &offset); RAY_PRINTF(sc, "allocated common memory:\n" ". start 0x%0lx count 0x%0lx flags 0x%0lx offset 0x%0x", bus_get_resource_start(sc->dev, SYS_RES_MEMORY, sc->cm_rid), bus_get_resource_count(sc->dev, SYS_RES_MEMORY, sc->cm_rid), flags, offset); } #endif /* RAY_DEBUG & (RAY_DBG_CM | RAY_DBG_BOOTPARAM) */ return (0); } /* * Get an irq and attach it to the bus */ static int ray_res_alloc_irq(struct ray_softc *sc) { int error; RAY_DPRINTF(sc, RAY_DBG_SUBR, ""); RAY_DPRINTF(sc,RAY_DBG_CM | RAY_DBG_BOOTPARAM, "irq start 0x%0lx count 0x%0lx", bus_get_resource_start(sc->dev, SYS_RES_IRQ, 0), bus_get_resource_count(sc->dev, SYS_RES_IRQ, 0)); sc->irq_rid = 0; sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE); if (!sc->irq_res) { RAY_PRINTF(sc, "Cannot allocate irq"); return (ENOMEM); } if ((error = bus_setup_intr(sc->dev, sc->irq_res, INTR_TYPE_NET, ray_intr, sc, &sc->irq_handle)) != 0) { RAY_PRINTF(sc, "Failed to setup irq"); return (error); } RAY_DPRINTF(sc, RAY_DBG_CM | RAY_DBG_BOOTPARAM, "allocated irq:\n" ". start 0x%0lx count 0x%0lx", bus_get_resource_start(sc->dev, SYS_RES_IRQ, sc->irq_rid), bus_get_resource_count(sc->dev, SYS_RES_IRQ, sc->irq_rid)); return (0); } /* * Release all of the card's resources */ static void ray_res_release(struct ray_softc *sc) { if (sc->irq_res != 0) { bus_teardown_intr(sc->dev, sc->irq_res, sc->irq_handle); bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); sc->irq_res = 0; } if (sc->am_res != 0) { bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->am_rid, sc->am_res); sc->am_res = 0; } if (sc->cm_res != 0) { bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->cm_rid, sc->cm_res); sc->cm_res = 0; } } /* * mbuf dump */ #if RAY_DEBUG & RAY_DBG_MBUF static void ray_dump_mbuf(struct ray_softc *sc, struct mbuf *m, char *s) { u_int8_t *d, *ed; u_int i; char p[17]; RAY_PRINTF(sc, "%s", s); RAY_PRINTF(sc, "\nm0->data\t0x%p\nm_pkthdr.len\t%d\nm_len\t%d", mtod(m, u_int8_t *), m->m_pkthdr.len, m->m_len); i = 0; bzero(p, 17); for (; m; m = m->m_next) { d = mtod(m, u_int8_t *); ed = d + m->m_len; for (; d < ed; i++, d++) { if ((i % 16) == 0) { printf(" %s\n\t", p); } else if ((i % 8) == 0) printf(" "); printf(" %02x", *d); p[i % 16] = ((*d >= 0x20) && (*d < 0x80)) ? *d : '.'; } } if ((i - 1) % 16) printf(" %s\n", p); } #endif /* RAY_DEBUG & RAY_DBG_MBUF */ Index: stable/6/sys/dev/re/if_re.c =================================================================== --- stable/6/sys/dev/re/if_re.c (revision 149421) +++ stable/6/sys/dev/re/if_re.c (revision 149422) @@ -1,2492 +1,2494 @@ /*- * Copyright (c) 1997, 1998-2003 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * RealTek 8139C+/8169/8169S/8110S PCI NIC driver * * Written by Bill Paul * Senior Networking Software Engineer * Wind River Systems */ /* * This driver is designed to support RealTek's next generation of * 10/100 and 10/100/1000 PCI ethernet controllers. There are currently * four devices in this family: the RTL8139C+, the RTL8169, the RTL8169S * and the RTL8110S. * * The 8139C+ is a 10/100 ethernet chip. It is backwards compatible * with the older 8139 family, however it also supports a special * C+ mode of operation that provides several new performance enhancing * features. These include: * * o Descriptor based DMA mechanism. Each descriptor represents * a single packet fragment. Data buffers may be aligned on * any byte boundary. * * o 64-bit DMA * * o TCP/IP checksum offload for both RX and TX * * o High and normal priority transmit DMA rings * * o VLAN tag insertion and extraction * * o TCP large send (segmentation offload) * * Like the 8139, the 8139C+ also has a built-in 10/100 PHY. The C+ * programming API is fairly straightforward. The RX filtering, EEPROM * access and PHY access is the same as it is on the older 8139 series * chips. * * The 8169 is a 64-bit 10/100/1000 gigabit ethernet MAC. It has almost the * same programming API and feature set as the 8139C+ with the following * differences and additions: * * o 1000Mbps mode * * o Jumbo frames * * o GMII and TBI ports/registers for interfacing with copper * or fiber PHYs * * o RX and TX DMA rings can have up to 1024 descriptors * (the 8139C+ allows a maximum of 64) * * o Slight differences in register layout from the 8139C+ * * The TX start and timer interrupt registers are at different locations * on the 8169 than they are on the 8139C+. Also, the status word in the * RX descriptor has a slightly different bit layout. The 8169 does not * have a built-in PHY. Most reference boards use a Marvell 88E1000 'Alaska' * copper gigE PHY. * * The 8169S/8110S 10/100/1000 devices have built-in copper gigE PHYs * (the 'S' stands for 'single-chip'). These devices have the same * programming API as the older 8169, but also have some vendor-specific * registers for the on-board PHY. The 8110S is a LAN-on-motherboard * part designed to be pin-compatible with the RealTek 8100 10/100 chip. * * This driver takes advantage of the RX and TX checksum offload and * VLAN tag insertion/extraction features. It also implements TX * interrupt moderation using the timer interrupt registers, which * significantly reduces TX interrupt load. There is also support * for jumbo frames, however the 8169/8169S/8110S can not transmit * jumbo frames larger than 7440, so the max MTU possible with this * driver is 7422 bytes. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(re, pci, 1, 1, 1); MODULE_DEPEND(re, ether, 1, 1, 1); MODULE_DEPEND(re, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Default to using PIO access for this driver. */ #define RE_USEIOSPACE #include #define RE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) /* * Various supported device vendors/types and their names. */ static struct rl_type re_devs[] = { { DLINK_VENDORID, DLINK_DEVICEID_528T, RL_HWREV_8169S, "D-Link DGE-528(T) Gigabit Ethernet Adapter" }, { RT_VENDORID, RT_DEVICEID_8139, RL_HWREV_8139CPLUS, "RealTek 8139C+ 10/100BaseTX" }, { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8169, "RealTek 8169 Gigabit Ethernet" }, { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8169S, "RealTek 8169S Single-chip Gigabit Ethernet" }, { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8169SB, "RealTek 8169SB Single-chip Gigabit Ethernet" }, { RT_VENDORID, RT_DEVICEID_8169, RL_HWREV_8110S, "RealTek 8110S Single-chip Gigabit Ethernet" }, { COREGA_VENDORID, COREGA_DEVICEID_CGLAPCIGT, RL_HWREV_8169S, "Corega CG-LAPCIGT (RTL8169S) Gigabit Ethernet" }, { 0, 0, 0, NULL } }; static struct rl_hwrev re_hwrevs[] = { { RL_HWREV_8139, RL_8139, "" }, { RL_HWREV_8139A, RL_8139, "A" }, { RL_HWREV_8139AG, RL_8139, "A-G" }, { RL_HWREV_8139B, RL_8139, "B" }, { RL_HWREV_8130, RL_8139, "8130" }, { RL_HWREV_8139C, RL_8139, "C" }, { RL_HWREV_8139D, RL_8139, "8139D/8100B/8100C" }, { RL_HWREV_8139CPLUS, RL_8139CPLUS, "C+"}, { RL_HWREV_8169, RL_8169, "8169"}, { RL_HWREV_8169S, RL_8169, "8169S"}, { RL_HWREV_8169SB, RL_8169, "8169SB"}, { RL_HWREV_8110S, RL_8169, "8110S"}, { RL_HWREV_8100, RL_8139, "8100"}, { RL_HWREV_8101, RL_8139, "8101"}, { 0, 0, NULL } }; static int re_probe (device_t); static int re_attach (device_t); static int re_detach (device_t); static int re_encap (struct rl_softc *, struct mbuf **, int *); static void re_dma_map_addr (void *, bus_dma_segment_t *, int, int); static void re_dma_map_desc (void *, bus_dma_segment_t *, int, bus_size_t, int); static int re_allocmem (device_t, struct rl_softc *); static int re_newbuf (struct rl_softc *, int, struct mbuf *); static int re_rx_list_init (struct rl_softc *); static int re_tx_list_init (struct rl_softc *); #ifdef RE_FIXUP_RX static __inline void re_fixup_rx (struct mbuf *); #endif static void re_rxeof (struct rl_softc *); static void re_txeof (struct rl_softc *); #ifdef DEVICE_POLLING static void re_poll (struct ifnet *, enum poll_cmd, int); static void re_poll_locked (struct ifnet *, enum poll_cmd, int); #endif static void re_intr (void *); static void re_tick (void *); static void re_tick_locked (struct rl_softc *); static void re_start (struct ifnet *); static void re_start_locked (struct ifnet *); static int re_ioctl (struct ifnet *, u_long, caddr_t); static void re_init (void *); static void re_init_locked (struct rl_softc *); static void re_stop (struct rl_softc *); static void re_watchdog (struct ifnet *); static int re_suspend (device_t); static int re_resume (device_t); static void re_shutdown (device_t); static int re_ifmedia_upd (struct ifnet *); static void re_ifmedia_sts (struct ifnet *, struct ifmediareq *); static void re_eeprom_putbyte (struct rl_softc *, int); static void re_eeprom_getword (struct rl_softc *, int, u_int16_t *); static void re_read_eeprom (struct rl_softc *, caddr_t, int, int, int); static int re_gmii_readreg (device_t, int, int); static int re_gmii_writereg (device_t, int, int, int); static int re_miibus_readreg (device_t, int, int); static int re_miibus_writereg (device_t, int, int, int); static void re_miibus_statchg (device_t); static void re_setmulti (struct rl_softc *); static void re_reset (struct rl_softc *); static int re_diag (struct rl_softc *); #ifdef RE_USEIOSPACE #define RL_RES SYS_RES_IOPORT #define RL_RID RL_PCI_LOIO #else #define RL_RES SYS_RES_MEMORY #define RL_RID RL_PCI_LOMEM #endif static device_method_t re_methods[] = { /* Device interface */ DEVMETHOD(device_probe, re_probe), DEVMETHOD(device_attach, re_attach), DEVMETHOD(device_detach, re_detach), DEVMETHOD(device_suspend, re_suspend), DEVMETHOD(device_resume, re_resume), DEVMETHOD(device_shutdown, re_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, re_miibus_readreg), DEVMETHOD(miibus_writereg, re_miibus_writereg), DEVMETHOD(miibus_statchg, re_miibus_statchg), { 0, 0 } }; static driver_t re_driver = { "re", re_methods, sizeof(struct rl_softc) }; static devclass_t re_devclass; DRIVER_MODULE(re, pci, re_driver, re_devclass, 0, 0); DRIVER_MODULE(re, cardbus, re_driver, re_devclass, 0, 0); DRIVER_MODULE(miibus, re, miibus_driver, miibus_devclass, 0, 0); #define EE_SET(x) \ CSR_WRITE_1(sc, RL_EECMD, \ CSR_READ_1(sc, RL_EECMD) | x) #define EE_CLR(x) \ CSR_WRITE_1(sc, RL_EECMD, \ CSR_READ_1(sc, RL_EECMD) & ~x) /* * Send a read command and address to the EEPROM, check for ACK. */ static void re_eeprom_putbyte(sc, addr) struct rl_softc *sc; int addr; { register int d, i; d = addr | sc->rl_eecmd_read; /* * Feed in each bit and strobe the clock. */ for (i = 0x400; i; i >>= 1) { if (d & i) { EE_SET(RL_EE_DATAIN); } else { EE_CLR(RL_EE_DATAIN); } DELAY(100); EE_SET(RL_EE_CLK); DELAY(150); EE_CLR(RL_EE_CLK); DELAY(100); } } /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void re_eeprom_getword(sc, addr, dest) struct rl_softc *sc; int addr; u_int16_t *dest; { register int i; u_int16_t word = 0; /* Enter EEPROM access mode. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); /* * Send address of word we want to read. */ re_eeprom_putbyte(sc, addr); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); /* * Start reading bits from EEPROM. */ for (i = 0x8000; i; i >>= 1) { EE_SET(RL_EE_CLK); DELAY(100); if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) word |= i; EE_CLR(RL_EE_CLK); DELAY(100); } /* Turn off EEPROM access mode. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); *dest = word; } /* * Read a sequence of words from the EEPROM. */ static void re_read_eeprom(sc, dest, off, cnt, swap) struct rl_softc *sc; caddr_t dest; int off; int cnt; int swap; { int i; u_int16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { re_eeprom_getword(sc, off + i, &word); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } } static int re_gmii_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct rl_softc *sc; u_int32_t rval; int i; if (phy != 1) return (0); sc = device_get_softc(dev); /* Let the rgephy driver read the GMEDIASTAT register */ if (reg == RL_GMEDIASTAT) { rval = CSR_READ_1(sc, RL_GMEDIASTAT); return (rval); } CSR_WRITE_4(sc, RL_PHYAR, reg << 16); DELAY(1000); for (i = 0; i < RL_TIMEOUT; i++) { rval = CSR_READ_4(sc, RL_PHYAR); if (rval & RL_PHYAR_BUSY) break; DELAY(100); } if (i == RL_TIMEOUT) { printf ("re%d: PHY read failed\n", sc->rl_unit); return (0); } return (rval & RL_PHYAR_PHYDATA); } static int re_gmii_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct rl_softc *sc; u_int32_t rval; int i; sc = device_get_softc(dev); CSR_WRITE_4(sc, RL_PHYAR, (reg << 16) | (data & RL_PHYAR_PHYDATA) | RL_PHYAR_BUSY); DELAY(1000); for (i = 0; i < RL_TIMEOUT; i++) { rval = CSR_READ_4(sc, RL_PHYAR); if (!(rval & RL_PHYAR_BUSY)) break; DELAY(100); } if (i == RL_TIMEOUT) { printf ("re%d: PHY write failed\n", sc->rl_unit); return (0); } return (0); } static int re_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct rl_softc *sc; u_int16_t rval = 0; u_int16_t re8139_reg = 0; sc = device_get_softc(dev); if (sc->rl_type == RL_8169) { rval = re_gmii_readreg(dev, phy, reg); return (rval); } /* Pretend the internal PHY is only at address 0 */ if (phy) { return (0); } switch (reg) { case MII_BMCR: re8139_reg = RL_BMCR; break; case MII_BMSR: re8139_reg = RL_BMSR; break; case MII_ANAR: re8139_reg = RL_ANAR; break; case MII_ANER: re8139_reg = RL_ANER; break; case MII_ANLPAR: re8139_reg = RL_LPAR; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); /* * Allow the rlphy driver to read the media status * register. If we have a link partner which does not * support NWAY, this is the register which will tell * us the results of parallel detection. */ case RL_MEDIASTAT: rval = CSR_READ_1(sc, RL_MEDIASTAT); return (rval); default: printf("re%d: bad phy register\n", sc->rl_unit); return (0); } rval = CSR_READ_2(sc, re8139_reg); return (rval); } static int re_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct rl_softc *sc; u_int16_t re8139_reg = 0; int rval = 0; sc = device_get_softc(dev); if (sc->rl_type == RL_8169) { rval = re_gmii_writereg(dev, phy, reg, data); return (rval); } /* Pretend the internal PHY is only at address 0 */ if (phy) return (0); switch (reg) { case MII_BMCR: re8139_reg = RL_BMCR; break; case MII_BMSR: re8139_reg = RL_BMSR; break; case MII_ANAR: re8139_reg = RL_ANAR; break; case MII_ANER: re8139_reg = RL_ANER; break; case MII_ANLPAR: re8139_reg = RL_LPAR; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); break; default: printf("re%d: bad phy register\n", sc->rl_unit); return (0); } CSR_WRITE_2(sc, re8139_reg, data); return (0); } static void re_miibus_statchg(dev) device_t dev; { } /* * Program the 64-bit multicast hash filter. */ static void re_setmulti(sc) struct rl_softc *sc; { struct ifnet *ifp; int h = 0; u_int32_t hashes[2] = { 0, 0 }; struct ifmultiaddr *ifma; u_int32_t rxfilt; int mcnt = 0; RL_LOCK_ASSERT(sc); ifp = sc->rl_ifp; rxfilt = CSR_READ_4(sc, RL_RXCFG); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { rxfilt |= RL_RXCFG_RX_MULTI; CSR_WRITE_4(sc, RL_RXCFG, rxfilt); CSR_WRITE_4(sc, RL_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, RL_MAR4, 0xFFFFFFFF); return; } /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, RL_MAR0, 0); CSR_WRITE_4(sc, RL_MAR4, 0); /* now program new ones */ + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); mcnt++; } + IF_ADDR_UNLOCK(ifp); if (mcnt) rxfilt |= RL_RXCFG_RX_MULTI; else rxfilt &= ~RL_RXCFG_RX_MULTI; CSR_WRITE_4(sc, RL_RXCFG, rxfilt); CSR_WRITE_4(sc, RL_MAR0, hashes[0]); CSR_WRITE_4(sc, RL_MAR4, hashes[1]); } static void re_reset(sc) struct rl_softc *sc; { register int i; RL_LOCK_ASSERT(sc); CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); for (i = 0; i < RL_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) break; } if (i == RL_TIMEOUT) printf("re%d: reset never completed!\n", sc->rl_unit); CSR_WRITE_1(sc, 0x82, 1); } /* * The following routine is designed to test for a defect on some * 32-bit 8169 cards. Some of these NICs have the REQ64# and ACK64# * lines connected to the bus, however for a 32-bit only card, they * should be pulled high. The result of this defect is that the * NIC will not work right if you plug it into a 64-bit slot: DMA * operations will be done with 64-bit transfers, which will fail * because the 64-bit data lines aren't connected. * * There's no way to work around this (short of talking a soldering * iron to the board), however we can detect it. The method we use * here is to put the NIC into digital loopback mode, set the receiver * to promiscuous mode, and then try to send a frame. We then compare * the frame data we sent to what was received. If the data matches, * then the NIC is working correctly, otherwise we know the user has * a defective NIC which has been mistakenly plugged into a 64-bit PCI * slot. In the latter case, there's no way the NIC can work correctly, * so we print out a message on the console and abort the device attach. */ static int re_diag(sc) struct rl_softc *sc; { struct ifnet *ifp = sc->rl_ifp; struct mbuf *m0; struct ether_header *eh; struct rl_desc *cur_rx; u_int16_t status; u_int32_t rxstat; int total_len, i, error = 0; u_int8_t dst[] = { 0x00, 'h', 'e', 'l', 'l', 'o' }; u_int8_t src[] = { 0x00, 'w', 'o', 'r', 'l', 'd' }; /* Allocate a single mbuf */ MGETHDR(m0, M_DONTWAIT, MT_DATA); if (m0 == NULL) return (ENOBUFS); RL_LOCK(sc); /* * Initialize the NIC in test mode. This sets the chip up * so that it can send and receive frames, but performs the * following special functions: * - Puts receiver in promiscuous mode * - Enables digital loopback mode * - Leaves interrupts turned off */ ifp->if_flags |= IFF_PROMISC; sc->rl_testmode = 1; re_init_locked(sc); re_stop(sc); DELAY(100000); re_init_locked(sc); /* Put some data in the mbuf */ eh = mtod(m0, struct ether_header *); bcopy ((char *)&dst, eh->ether_dhost, ETHER_ADDR_LEN); bcopy ((char *)&src, eh->ether_shost, ETHER_ADDR_LEN); eh->ether_type = htons(ETHERTYPE_IP); m0->m_pkthdr.len = m0->m_len = ETHER_MIN_LEN - ETHER_CRC_LEN; /* * Queue the packet, start transmission. * Note: IF_HANDOFF() ultimately calls re_start() for us. */ CSR_WRITE_2(sc, RL_ISR, 0xFFFF); RL_UNLOCK(sc); /* XXX: re_diag must not be called when in ALTQ mode */ IF_HANDOFF(&ifp->if_snd, m0, ifp); RL_LOCK(sc); m0 = NULL; /* Wait for it to propagate through the chip */ DELAY(100000); for (i = 0; i < RL_TIMEOUT; i++) { status = CSR_READ_2(sc, RL_ISR); if ((status & (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) == (RL_ISR_TIMEOUT_EXPIRED|RL_ISR_RX_OK)) break; DELAY(10); } if (i == RL_TIMEOUT) { printf("re%d: diagnostic failed, failed to receive packet " "in loopback mode\n", sc->rl_unit); error = EIO; goto done; } /* * The packet should have been dumped into the first * entry in the RX DMA ring. Grab it from there. */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_POSTREAD); bus_dmamap_sync(sc->rl_ldata.rl_mtag, sc->rl_ldata.rl_rx_dmamap[0], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->rl_ldata.rl_mtag, sc->rl_ldata.rl_rx_dmamap[0]); m0 = sc->rl_ldata.rl_rx_mbuf[0]; sc->rl_ldata.rl_rx_mbuf[0] = NULL; eh = mtod(m0, struct ether_header *); cur_rx = &sc->rl_ldata.rl_rx_list[0]; total_len = RL_RXBYTES(cur_rx); rxstat = le32toh(cur_rx->rl_cmdstat); if (total_len != ETHER_MIN_LEN) { printf("re%d: diagnostic failed, received short packet\n", sc->rl_unit); error = EIO; goto done; } /* Test that the received packet data matches what we sent. */ if (bcmp((char *)&eh->ether_dhost, (char *)&dst, ETHER_ADDR_LEN) || bcmp((char *)&eh->ether_shost, (char *)&src, ETHER_ADDR_LEN) || ntohs(eh->ether_type) != ETHERTYPE_IP) { printf("re%d: WARNING, DMA FAILURE!\n", sc->rl_unit); printf("re%d: expected TX data: %6D/%6D/0x%x\n", sc->rl_unit, dst, ":", src, ":", ETHERTYPE_IP); printf("re%d: received RX data: %6D/%6D/0x%x\n", sc->rl_unit, eh->ether_dhost, ":", eh->ether_shost, ":", ntohs(eh->ether_type)); printf("re%d: You may have a defective 32-bit NIC plugged " "into a 64-bit PCI slot.\n", sc->rl_unit); printf("re%d: Please re-install the NIC in a 32-bit slot " "for proper operation.\n", sc->rl_unit); printf("re%d: Read the re(4) man page for more details.\n", sc->rl_unit); error = EIO; } done: /* Turn interface off, release resources */ sc->rl_testmode = 0; ifp->if_flags &= ~IFF_PROMISC; re_stop(sc); if (m0 != NULL) m_freem(m0); RL_UNLOCK(sc); return (error); } /* * Probe for a RealTek 8139C+/8169/8110 chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int re_probe(dev) device_t dev; { struct rl_type *t; struct rl_softc *sc; int rid; u_int32_t hwrev; t = re_devs; sc = device_get_softc(dev); while (t->rl_name != NULL) { if ((pci_get_vendor(dev) == t->rl_vid) && (pci_get_device(dev) == t->rl_did)) { /* * Temporarily map the I/O space * so we can read the chip ID register. */ rid = RL_RID; sc->rl_res = bus_alloc_resource_any(dev, RL_RES, &rid, RF_ACTIVE); if (sc->rl_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); return (ENXIO); } sc->rl_btag = rman_get_bustag(sc->rl_res); sc->rl_bhandle = rman_get_bushandle(sc->rl_res); hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res); if (t->rl_basetype == hwrev) { device_set_desc(dev, t->rl_name); return (BUS_PROBE_DEFAULT); } } t++; } return (ENXIO); } /* * This routine takes the segment list provided as the result of * a bus_dma_map_load() operation and assigns the addresses/lengths * to RealTek DMA descriptors. This can be called either by the RX * code or the TX code. In the RX case, we'll probably wind up mapping * at most one segment. For the TX case, there could be any number of * segments since TX packets may span multiple mbufs. In either case, * if the number of segments is larger than the rl_maxsegs limit * specified by the caller, we abort the mapping operation. Sadly, * whoever designed the buffer mapping API did not provide a way to * return an error from here, so we have to fake it a bit. */ static void re_dma_map_desc(arg, segs, nseg, mapsize, error) void *arg; bus_dma_segment_t *segs; int nseg; bus_size_t mapsize; int error; { struct rl_dmaload_arg *ctx; struct rl_desc *d = NULL; int i = 0, idx; if (error) return; ctx = arg; /* Signal error to caller if there's too many segments */ if (nseg > ctx->rl_maxsegs) { ctx->rl_maxsegs = 0; return; } /* * Map the segment array into descriptors. Note that we set the * start-of-frame and end-of-frame markers for either TX or RX, but * they really only have meaning in the TX case. (In the RX case, * it's the chip that tells us where packets begin and end.) * We also keep track of the end of the ring and set the * end-of-ring bits as needed, and we set the ownership bits * in all except the very first descriptor. (The caller will * set this descriptor later when it start transmission or * reception.) */ idx = ctx->rl_idx; for (;;) { u_int32_t cmdstat; d = &ctx->rl_ring[idx]; if (le32toh(d->rl_cmdstat) & RL_RDESC_STAT_OWN) { ctx->rl_maxsegs = 0; return; } cmdstat = segs[i].ds_len; d->rl_bufaddr_lo = htole32(RL_ADDR_LO(segs[i].ds_addr)); d->rl_bufaddr_hi = htole32(RL_ADDR_HI(segs[i].ds_addr)); if (i == 0) cmdstat |= RL_TDESC_CMD_SOF; else cmdstat |= RL_TDESC_CMD_OWN; if (idx == (RL_RX_DESC_CNT - 1)) cmdstat |= RL_TDESC_CMD_EOR; d->rl_cmdstat = htole32(cmdstat | ctx->rl_flags); i++; if (i == nseg) break; RL_DESC_INC(idx); } d->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); ctx->rl_maxsegs = nseg; ctx->rl_idx = idx; } /* * Map a single buffer address. */ static void re_dma_map_addr(arg, segs, nseg, error) void *arg; bus_dma_segment_t *segs; int nseg; int error; { bus_addr_t *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; } static int re_allocmem(dev, sc) device_t dev; struct rl_softc *sc; { int error; int nseg; int i; /* * Allocate map for RX mbufs. */ nseg = 32; error = bus_dma_tag_create(sc->rl_parent_tag, ETHER_ALIGN, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rl_ldata.rl_mtag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* * Allocate map for TX descriptor list. */ error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, RL_TX_LIST_SZ, 1, RL_TX_LIST_SZ, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rl_ldata.rl_tx_list_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for the TX ring */ error = bus_dmamem_alloc(sc->rl_ldata.rl_tx_list_tag, (void **)&sc->rl_ldata.rl_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->rl_ldata.rl_tx_list_map); if (error) return (ENOMEM); /* Load the map for the TX ring. */ error = bus_dmamap_load(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ, re_dma_map_addr, &sc->rl_ldata.rl_tx_list_addr, BUS_DMA_NOWAIT); /* Create DMA maps for TX buffers */ for (i = 0; i < RL_TX_DESC_CNT; i++) { error = bus_dmamap_create(sc->rl_ldata.rl_mtag, 0, &sc->rl_ldata.rl_tx_dmamap[i]); if (error) { device_printf(dev, "can't create DMA map for TX\n"); return (ENOMEM); } } /* * Allocate map for RX descriptor list. */ error = bus_dma_tag_create(sc->rl_parent_tag, RL_RING_ALIGN, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, RL_RX_LIST_SZ, 1, RL_RX_LIST_SZ, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rl_ldata.rl_rx_list_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for the RX ring */ error = bus_dmamem_alloc(sc->rl_ldata.rl_rx_list_tag, (void **)&sc->rl_ldata.rl_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->rl_ldata.rl_rx_list_map); if (error) return (ENOMEM); /* Load the map for the RX ring. */ error = bus_dmamap_load(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ, re_dma_map_addr, &sc->rl_ldata.rl_rx_list_addr, BUS_DMA_NOWAIT); /* Create DMA maps for RX buffers */ for (i = 0; i < RL_RX_DESC_CNT; i++) { error = bus_dmamap_create(sc->rl_ldata.rl_mtag, 0, &sc->rl_ldata.rl_rx_dmamap[i]); if (error) { device_printf(dev, "can't create DMA map for RX\n"); return (ENOMEM); } } return (0); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int re_attach(dev) device_t dev; { u_char eaddr[ETHER_ADDR_LEN]; u_int16_t as[3]; struct rl_softc *sc; struct ifnet *ifp; struct rl_hwrev *hw_rev; int hwrev; u_int16_t re_did = 0; int unit, error = 0, rid, i; sc = device_get_softc(dev); unit = device_get_unit(dev); mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = RL_RID; sc->rl_res = bus_alloc_resource_any(dev, RL_RES, &rid, RF_ACTIVE); if (sc->rl_res == NULL) { printf ("re%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->rl_btag = rman_get_bustag(sc->rl_res); sc->rl_bhandle = rman_get_bushandle(sc->rl_res); /* Allocate interrupt */ rid = 0; sc->rl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->rl_irq == NULL) { printf("re%d: couldn't map interrupt\n", unit); error = ENXIO; goto fail; } /* Reset the adapter. */ RL_LOCK(sc); re_reset(sc); RL_UNLOCK(sc); hw_rev = re_hwrevs; hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; while (hw_rev->rl_desc != NULL) { if (hw_rev->rl_rev == hwrev) { sc->rl_type = hw_rev->rl_type; break; } hw_rev++; } if (sc->rl_type == RL_8169) { /* Set RX length mask */ sc->rl_rxlenmask = RL_RDESC_STAT_GFRAGLEN; /* Force station address autoload from the EEPROM */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_AUTOLOAD); for (i = 0; i < RL_TIMEOUT; i++) { if (!(CSR_READ_1(sc, RL_EECMD) & RL_EEMODE_AUTOLOAD)) break; DELAY(100); } if (i == RL_TIMEOUT) printf ("re%d: eeprom autoload timed out\n", unit); for (i = 0; i < ETHER_ADDR_LEN; i++) eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); } else { /* Set RX length mask */ sc->rl_rxlenmask = RL_RDESC_STAT_FRAGLEN; sc->rl_eecmd_read = RL_EECMD_READ_6BIT; re_read_eeprom(sc, (caddr_t)&re_did, 0, 1, 0); if (re_did != 0x8129) sc->rl_eecmd_read = RL_EECMD_READ_8BIT; /* * Get station address from the EEPROM. */ re_read_eeprom(sc, (caddr_t)as, RL_EE_EADDR, 3, 0); for (i = 0; i < 3; i++) { eaddr[(i * 2) + 0] = as[i] & 0xff; eaddr[(i * 2) + 1] = as[i] >> 8; } } sc->rl_unit = unit; /* * Allocate the parent bus DMA tag appropriate for PCI. */ #define RL_NSEG_NEW 32 error = bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MAXBSIZE, RL_NSEG_NEW, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->rl_parent_tag); if (error) goto fail; error = re_allocmem(dev, sc); if (error) goto fail; ifp = sc->rl_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { printf("re%d: can not if_alloc()\n", sc->rl_unit); error = ENOSPC; goto fail; } /* Do MII setup */ if (mii_phy_probe(dev, &sc->rl_miibus, re_ifmedia_upd, re_ifmedia_sts)) { printf("re%d: MII without any phy!\n", sc->rl_unit); error = ENXIO; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = re_ioctl; ifp->if_capabilities = IFCAP_VLAN_MTU; ifp->if_start = re_start; ifp->if_hwassist = /*RE_CSUM_FEATURES*/0; ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif ifp->if_watchdog = re_watchdog; ifp->if_init = re_init; if (sc->rl_type == RL_8169) ifp->if_baudrate = 1000000000; else ifp->if_baudrate = 100000000; IFQ_SET_MAXLEN(&ifp->if_snd, RL_IFQ_MAXLEN); ifp->if_snd.ifq_drv_maxlen = RL_IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); ifp->if_capenable = ifp->if_capabilities & ~IFCAP_HWCSUM; callout_handle_init(&sc->rl_stat_ch); /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* Perform hardware diagnostic. */ error = re_diag(sc); if (error) { printf("re%d: attach aborted due to hardware diag failure\n", unit); ether_ifdetach(ifp); if_free(ifp); goto fail; } /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->rl_irq, INTR_TYPE_NET | INTR_MPSAFE, re_intr, sc, &sc->rl_intrhand); if (error) { printf("re%d: couldn't set up irq\n", unit); ether_ifdetach(ifp); if_free(ifp); } fail: if (error) re_detach(dev); return (error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int re_detach(dev) device_t dev; { struct rl_softc *sc; struct ifnet *ifp; int i; int attached; sc = device_get_softc(dev); ifp = sc->rl_ifp; KASSERT(mtx_initialized(&sc->rl_mtx), ("re mutex not initialized")); attached = device_is_attached(dev); /* These should only be active if attach succeeded */ if (attached) ether_ifdetach(ifp); if (ifp == NULL) if_free(ifp); RL_LOCK(sc); #if 0 sc->suspended = 1; #endif /* These should only be active if attach succeeded */ if (attached) { re_stop(sc); /* * Force off the IFF_UP flag here, in case someone * still had a BPF descriptor attached to this * interface. If they do, ether_ifdetach() will cause * the BPF code to try and clear the promisc mode * flag, which will bubble down to re_ioctl(), * which will try to call re_init() again. This will * turn the NIC back on and restart the MII ticker, * which will panic the system when the kernel tries * to invoke the re_tick() function that isn't there * anymore. */ ifp->if_flags &= ~IFF_UP; } if (sc->rl_miibus) device_delete_child(dev, sc->rl_miibus); bus_generic_detach(dev); /* * The rest is resource deallocation, so we should already be * stopped here. */ RL_UNLOCK(sc); if (sc->rl_intrhand) bus_teardown_intr(dev, sc->rl_irq, sc->rl_intrhand); if (sc->rl_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq); if (sc->rl_res) bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res); /* Unload and free the RX DMA ring memory and map */ if (sc->rl_ldata.rl_rx_list_tag) { bus_dmamap_unload(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map); bus_dmamem_free(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list, sc->rl_ldata.rl_rx_list_map); bus_dma_tag_destroy(sc->rl_ldata.rl_rx_list_tag); } /* Unload and free the TX DMA ring memory and map */ if (sc->rl_ldata.rl_tx_list_tag) { bus_dmamap_unload(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map); bus_dmamem_free(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list, sc->rl_ldata.rl_tx_list_map); bus_dma_tag_destroy(sc->rl_ldata.rl_tx_list_tag); } /* Destroy all the RX and TX buffer maps */ if (sc->rl_ldata.rl_mtag) { for (i = 0; i < RL_TX_DESC_CNT; i++) bus_dmamap_destroy(sc->rl_ldata.rl_mtag, sc->rl_ldata.rl_tx_dmamap[i]); for (i = 0; i < RL_RX_DESC_CNT; i++) bus_dmamap_destroy(sc->rl_ldata.rl_mtag, sc->rl_ldata.rl_rx_dmamap[i]); bus_dma_tag_destroy(sc->rl_ldata.rl_mtag); } /* Unload and free the stats buffer and map */ if (sc->rl_ldata.rl_stag) { bus_dmamap_unload(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_rx_list_map); bus_dmamem_free(sc->rl_ldata.rl_stag, sc->rl_ldata.rl_stats, sc->rl_ldata.rl_smap); bus_dma_tag_destroy(sc->rl_ldata.rl_stag); } if (sc->rl_parent_tag) bus_dma_tag_destroy(sc->rl_parent_tag); mtx_destroy(&sc->rl_mtx); return (0); } static int re_newbuf(sc, idx, m) struct rl_softc *sc; int idx; struct mbuf *m; { struct rl_dmaload_arg arg; struct mbuf *n = NULL; int error; if (m == NULL) { n = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (n == NULL) return (ENOBUFS); m = n; } else m->m_data = m->m_ext.ext_buf; m->m_len = m->m_pkthdr.len = MCLBYTES; #ifdef RE_FIXUP_RX /* * This is part of an evil trick to deal with non-x86 platforms. * The RealTek chip requires RX buffers to be aligned on 64-bit * boundaries, but that will hose non-x86 machines. To get around * this, we leave some empty space at the start of each buffer * and for non-x86 hosts, we copy the buffer back six bytes * to achieve word alignment. This is slightly more efficient * than allocating a new buffer, copying the contents, and * discarding the old buffer. */ m_adj(m, RE_ETHER_ALIGN); #endif arg.sc = sc; arg.rl_idx = idx; arg.rl_maxsegs = 1; arg.rl_flags = 0; arg.rl_ring = sc->rl_ldata.rl_rx_list; error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, sc->rl_ldata.rl_rx_dmamap[idx], m, re_dma_map_desc, &arg, BUS_DMA_NOWAIT); if (error || arg.rl_maxsegs != 1) { if (n != NULL) m_freem(n); return (ENOMEM); } sc->rl_ldata.rl_rx_list[idx].rl_cmdstat |= htole32(RL_RDESC_CMD_OWN); sc->rl_ldata.rl_rx_mbuf[idx] = m; bus_dmamap_sync(sc->rl_ldata.rl_mtag, sc->rl_ldata.rl_rx_dmamap[idx], BUS_DMASYNC_PREREAD); return (0); } #ifdef RE_FIXUP_RX static __inline void re_fixup_rx(m) struct mbuf *m; { int i; uint16_t *src, *dst; src = mtod(m, uint16_t *); dst = src - (RE_ETHER_ALIGN - ETHER_ALIGN) / sizeof *src; for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) *dst++ = *src++; m->m_data -= RE_ETHER_ALIGN - ETHER_ALIGN; return; } #endif static int re_tx_list_init(sc) struct rl_softc *sc; { RL_LOCK_ASSERT(sc); bzero ((char *)sc->rl_ldata.rl_tx_list, RL_TX_LIST_SZ); bzero ((char *)&sc->rl_ldata.rl_tx_mbuf, (RL_TX_DESC_CNT * sizeof(struct mbuf *))); bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREWRITE); sc->rl_ldata.rl_tx_prodidx = 0; sc->rl_ldata.rl_tx_considx = 0; sc->rl_ldata.rl_tx_free = RL_TX_DESC_CNT; return (0); } static int re_rx_list_init(sc) struct rl_softc *sc; { int i; bzero ((char *)sc->rl_ldata.rl_rx_list, RL_RX_LIST_SZ); bzero ((char *)&sc->rl_ldata.rl_rx_mbuf, (RL_RX_DESC_CNT * sizeof(struct mbuf *))); for (i = 0; i < RL_RX_DESC_CNT; i++) { if (re_newbuf(sc, i, NULL) == ENOBUFS) return (ENOBUFS); } /* Flush the RX descriptors */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); sc->rl_ldata.rl_rx_prodidx = 0; sc->rl_head = sc->rl_tail = NULL; return (0); } /* * RX handler for C+ and 8169. For the gigE chips, we support * the reception of jumbo frames that have been fragmented * across multiple 2K mbuf cluster buffers. */ static void re_rxeof(sc) struct rl_softc *sc; { struct mbuf *m; struct ifnet *ifp; int i, total_len; struct rl_desc *cur_rx; u_int32_t rxstat, rxvlan; RL_LOCK_ASSERT(sc); ifp = sc->rl_ifp; i = sc->rl_ldata.rl_rx_prodidx; /* Invalidate the descriptor memory */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_POSTREAD); while (!RL_OWN(&sc->rl_ldata.rl_rx_list[i])) { cur_rx = &sc->rl_ldata.rl_rx_list[i]; m = sc->rl_ldata.rl_rx_mbuf[i]; total_len = RL_RXBYTES(cur_rx); rxstat = le32toh(cur_rx->rl_cmdstat); rxvlan = le32toh(cur_rx->rl_vlanctl); /* Invalidate the RX mbuf and unload its map */ bus_dmamap_sync(sc->rl_ldata.rl_mtag, sc->rl_ldata.rl_rx_dmamap[i], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->rl_ldata.rl_mtag, sc->rl_ldata.rl_rx_dmamap[i]); if (!(rxstat & RL_RDESC_STAT_EOF)) { m->m_len = RE_RX_DESC_BUFLEN; if (sc->rl_head == NULL) sc->rl_head = sc->rl_tail = m; else { m->m_flags &= ~M_PKTHDR; sc->rl_tail->m_next = m; sc->rl_tail = m; } re_newbuf(sc, i, NULL); RL_DESC_INC(i); continue; } /* * NOTE: for the 8139C+, the frame length field * is always 12 bits in size, but for the gigE chips, * it is 13 bits (since the max RX frame length is 16K). * Unfortunately, all 32 bits in the status word * were already used, so to make room for the extra * length bit, RealTek took out the 'frame alignment * error' bit and shifted the other status bits * over one slot. The OWN, EOR, FS and LS bits are * still in the same places. We have already extracted * the frame length and checked the OWN bit, so rather * than using an alternate bit mapping, we shift the * status bits one space to the right so we can evaluate * them using the 8169 status as though it was in the * same format as that of the 8139C+. */ if (sc->rl_type == RL_8169) rxstat >>= 1; /* * if total_len > 2^13-1, both _RXERRSUM and _GIANT will be * set, but if CRC is clear, it will still be a valid frame. */ if (rxstat & RL_RDESC_STAT_RXERRSUM && !(total_len > 8191 && (rxstat & RL_RDESC_STAT_ERRS) == RL_RDESC_STAT_GIANT)) { ifp->if_ierrors++; /* * If this is part of a multi-fragment packet, * discard all the pieces. */ if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } re_newbuf(sc, i, m); RL_DESC_INC(i); continue; } /* * If allocating a replacement mbuf fails, * reload the current one. */ if (re_newbuf(sc, i, NULL)) { ifp->if_ierrors++; if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } re_newbuf(sc, i, m); RL_DESC_INC(i); continue; } RL_DESC_INC(i); if (sc->rl_head != NULL) { m->m_len = total_len % RE_RX_DESC_BUFLEN; if (m->m_len == 0) m->m_len = RE_RX_DESC_BUFLEN; /* * Special case: if there's 4 bytes or less * in this buffer, the mbuf can be discarded: * the last 4 bytes is the CRC, which we don't * care about anyway. */ if (m->m_len <= ETHER_CRC_LEN) { sc->rl_tail->m_len -= (ETHER_CRC_LEN - m->m_len); m_freem(m); } else { m->m_len -= ETHER_CRC_LEN; m->m_flags &= ~M_PKTHDR; sc->rl_tail->m_next = m; } m = sc->rl_head; sc->rl_head = sc->rl_tail = NULL; m->m_pkthdr.len = total_len - ETHER_CRC_LEN; } else m->m_pkthdr.len = m->m_len = (total_len - ETHER_CRC_LEN); #ifdef RE_FIXUP_RX re_fixup_rx(m); #endif ifp->if_ipackets++; m->m_pkthdr.rcvif = ifp; /* Do RX checksumming if enabled */ if (ifp->if_capenable & IFCAP_RXCSUM) { /* Check IP header checksum */ if (rxstat & RL_RDESC_STAT_PROTOID) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (!(rxstat & RL_RDESC_STAT_IPSUMBAD)) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; /* Check TCP/UDP checksum */ if ((RL_TCPPKT(rxstat) && !(rxstat & RL_RDESC_STAT_TCPSUMBAD)) || (RL_UDPPKT(rxstat) && !(rxstat & RL_RDESC_STAT_UDPSUMBAD))) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } if (rxvlan & RL_RDESC_VLANCTL_TAG) VLAN_INPUT_TAG(ifp, m, ntohs((rxvlan & RL_RDESC_VLANCTL_DATA)), continue); RL_UNLOCK(sc); (*ifp->if_input)(ifp, m); RL_LOCK(sc); } /* Flush the RX DMA ring */ bus_dmamap_sync(sc->rl_ldata.rl_rx_list_tag, sc->rl_ldata.rl_rx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); sc->rl_ldata.rl_rx_prodidx = i; } static void re_txeof(sc) struct rl_softc *sc; { struct ifnet *ifp; u_int32_t txstat; int idx; ifp = sc->rl_ifp; idx = sc->rl_ldata.rl_tx_considx; /* Invalidate the TX descriptor list */ bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_POSTREAD); while (idx != sc->rl_ldata.rl_tx_prodidx) { txstat = le32toh(sc->rl_ldata.rl_tx_list[idx].rl_cmdstat); if (txstat & RL_TDESC_CMD_OWN) break; /* * We only stash mbufs in the last descriptor * in a fragment chain, which also happens to * be the only place where the TX status bits * are valid. */ if (txstat & RL_TDESC_CMD_EOF) { m_freem(sc->rl_ldata.rl_tx_mbuf[idx]); sc->rl_ldata.rl_tx_mbuf[idx] = NULL; bus_dmamap_unload(sc->rl_ldata.rl_mtag, sc->rl_ldata.rl_tx_dmamap[idx]); if (txstat & (RL_TDESC_STAT_EXCESSCOL| RL_TDESC_STAT_COLCNT)) ifp->if_collisions++; if (txstat & RL_TDESC_STAT_TXERRSUM) ifp->if_oerrors++; else ifp->if_opackets++; } sc->rl_ldata.rl_tx_free++; RL_DESC_INC(idx); } /* No changes made to the TX ring, so no flush needed */ if (idx != sc->rl_ldata.rl_tx_considx) { sc->rl_ldata.rl_tx_considx = idx; ifp->if_flags &= ~IFF_OACTIVE; ifp->if_timer = 0; } /* * If not all descriptors have been released reaped yet, * reload the timer so that we will eventually get another * interrupt that will cause us to re-enter this routine. * This is done in case the transmitter has gone idle. */ if (sc->rl_ldata.rl_tx_free != RL_TX_DESC_CNT) CSR_WRITE_4(sc, RL_TIMERCNT, 1); } static void re_tick(xsc) void *xsc; { struct rl_softc *sc; sc = xsc; RL_LOCK(sc); re_tick_locked(sc); RL_UNLOCK(sc); } static void re_tick_locked(sc) struct rl_softc *sc; { struct mii_data *mii; RL_LOCK_ASSERT(sc); mii = device_get_softc(sc->rl_miibus); mii_tick(mii); sc->rl_stat_ch = timeout(re_tick, sc, hz); } #ifdef DEVICE_POLLING static void re_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct rl_softc *sc = ifp->if_softc; RL_LOCK(sc); re_poll_locked(ifp, cmd, count); RL_UNLOCK(sc); } static void re_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct rl_softc *sc = ifp->if_softc; RL_LOCK_ASSERT(sc); if (!(ifp->if_capenable & IFCAP_POLLING)) { ether_poll_deregister(ifp); cmd = POLL_DEREGISTER; } if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */ CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); return; } sc->rxcycles = count; re_rxeof(sc); re_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) re_start_locked(ifp); if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ u_int16_t status; status = CSR_READ_2(sc, RL_ISR); if (status == 0xffff) return; if (status) CSR_WRITE_2(sc, RL_ISR, status); /* * XXX check behaviour on receiver stalls. */ if (status & RL_ISR_SYSTEM_ERR) { re_reset(sc); re_init_locked(sc); } } } #endif /* DEVICE_POLLING */ static void re_intr(arg) void *arg; { struct rl_softc *sc; struct ifnet *ifp; u_int16_t status; sc = arg; RL_LOCK(sc); ifp = sc->rl_ifp; if (sc->suspended || !(ifp->if_flags & IFF_UP)) goto done_locked; #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) goto done_locked; if ((ifp->if_capenable & IFCAP_POLLING) && ether_poll_register(re_poll, ifp)) { /* ok, disable interrupts */ CSR_WRITE_2(sc, RL_IMR, 0x0000); re_poll_locked(ifp, 0, 1); goto done_locked; } #endif /* DEVICE_POLLING */ for (;;) { status = CSR_READ_2(sc, RL_ISR); /* If the card has gone away the read returns 0xffff. */ if (status == 0xffff) break; if (status) CSR_WRITE_2(sc, RL_ISR, status); if ((status & RL_INTRS_CPLUS) == 0) break; if ((status & RL_ISR_RX_OK) || (status & RL_ISR_RX_ERR)) re_rxeof(sc); if ((status & RL_ISR_TIMEOUT_EXPIRED) || (status & RL_ISR_TX_ERR) || (status & RL_ISR_TX_DESC_UNAVAIL)) re_txeof(sc); if (status & RL_ISR_SYSTEM_ERR) { re_reset(sc); re_init_locked(sc); } if (status & RL_ISR_LINKCHG) { untimeout(re_tick, sc, sc->rl_stat_ch); re_tick_locked(sc); } } if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) re_start_locked(ifp); done_locked: RL_UNLOCK(sc); } static int re_encap(sc, m_head, idx) struct rl_softc *sc; struct mbuf **m_head; int *idx; { struct mbuf *m_new = NULL; struct rl_dmaload_arg arg; bus_dmamap_t map; int error; struct m_tag *mtag; RL_LOCK_ASSERT(sc); if (sc->rl_ldata.rl_tx_free <= 4) return (EFBIG); /* * Set up checksum offload. Note: checksum offload bits must * appear in all descriptors of a multi-descriptor transmit * attempt. This is according to testing done with an 8169 * chip. This is a requirement. */ arg.rl_flags = 0; if ((*m_head)->m_pkthdr.csum_flags & CSUM_IP) arg.rl_flags |= RL_TDESC_CMD_IPCSUM; if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) arg.rl_flags |= RL_TDESC_CMD_TCPCSUM; if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) arg.rl_flags |= RL_TDESC_CMD_UDPCSUM; arg.sc = sc; arg.rl_idx = *idx; arg.rl_maxsegs = sc->rl_ldata.rl_tx_free; if (arg.rl_maxsegs > 4) arg.rl_maxsegs -= 4; arg.rl_ring = sc->rl_ldata.rl_tx_list; map = sc->rl_ldata.rl_tx_dmamap[*idx]; error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, map, *m_head, re_dma_map_desc, &arg, BUS_DMA_NOWAIT); if (error && error != EFBIG) { printf("re%d: can't map mbuf (error %d)\n", sc->rl_unit, error); return (ENOBUFS); } /* Too many segments to map, coalesce into a single mbuf */ if (error || arg.rl_maxsegs == 0) { m_new = m_defrag(*m_head, M_DONTWAIT); if (m_new == NULL) return (ENOBUFS); else *m_head = m_new; arg.sc = sc; arg.rl_idx = *idx; arg.rl_maxsegs = sc->rl_ldata.rl_tx_free; arg.rl_ring = sc->rl_ldata.rl_tx_list; error = bus_dmamap_load_mbuf(sc->rl_ldata.rl_mtag, map, *m_head, re_dma_map_desc, &arg, BUS_DMA_NOWAIT); if (error) { printf("re%d: can't map mbuf (error %d)\n", sc->rl_unit, error); return (EFBIG); } } /* * Insure that the map for this transmission * is placed at the array index of the last descriptor * in this chain. (Swap last and first dmamaps.) */ sc->rl_ldata.rl_tx_dmamap[*idx] = sc->rl_ldata.rl_tx_dmamap[arg.rl_idx]; sc->rl_ldata.rl_tx_dmamap[arg.rl_idx] = map; sc->rl_ldata.rl_tx_mbuf[arg.rl_idx] = *m_head; sc->rl_ldata.rl_tx_free -= arg.rl_maxsegs; /* * Set up hardware VLAN tagging. Note: vlan tag info must * appear in the first descriptor of a multi-descriptor * transmission attempt. */ mtag = VLAN_OUTPUT_TAG(sc->rl_ifp, *m_head); if (mtag != NULL) sc->rl_ldata.rl_tx_list[*idx].rl_vlanctl = htole32(htons(VLAN_TAG_VALUE(mtag)) | RL_TDESC_VLANCTL_TAG); /* Transfer ownership of packet to the chip. */ sc->rl_ldata.rl_tx_list[arg.rl_idx].rl_cmdstat |= htole32(RL_TDESC_CMD_OWN); if (*idx != arg.rl_idx) sc->rl_ldata.rl_tx_list[*idx].rl_cmdstat |= htole32(RL_TDESC_CMD_OWN); RL_DESC_INC(arg.rl_idx); *idx = arg.rl_idx; return (0); } static void re_start(ifp) struct ifnet *ifp; { struct rl_softc *sc; sc = ifp->if_softc; RL_LOCK(sc); re_start_locked(ifp); RL_UNLOCK(sc); } /* * Main transmit routine for C+ and gigE NICs. */ static void re_start_locked(ifp) struct ifnet *ifp; { struct rl_softc *sc; struct mbuf *m_head = NULL; int idx, queued = 0; sc = ifp->if_softc; RL_LOCK_ASSERT(sc); idx = sc->rl_ldata.rl_tx_prodidx; while (sc->rl_ldata.rl_tx_mbuf[idx] == NULL) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (re_encap(sc, &m_head, &idx)) { IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); queued++; } if (queued == 0) return; /* Flush the TX descriptors */ bus_dmamap_sync(sc->rl_ldata.rl_tx_list_tag, sc->rl_ldata.rl_tx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); sc->rl_ldata.rl_tx_prodidx = idx; /* * RealTek put the TX poll request register in a different * location on the 8169 gigE chip. I don't know why. */ if (sc->rl_type == RL_8169) CSR_WRITE_2(sc, RL_GTXSTART, RL_TXSTART_START); else CSR_WRITE_2(sc, RL_TXSTART, RL_TXSTART_START); /* * Use the countdown timer for interrupt moderation. * 'TX done' interrupts are disabled. Instead, we reset the * countdown timer, which will begin counting until it hits * the value in the TIMERINT register, and then trigger an * interrupt. Each time we write to the TIMERCNT register, * the timer count is reset to 0. */ CSR_WRITE_4(sc, RL_TIMERCNT, 1); /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; } static void re_init(xsc) void *xsc; { struct rl_softc *sc = xsc; RL_LOCK(sc); re_init_locked(sc); RL_UNLOCK(sc); } static void re_init_locked(sc) struct rl_softc *sc; { struct ifnet *ifp = sc->rl_ifp; struct mii_data *mii; u_int32_t rxcfg = 0; RL_LOCK_ASSERT(sc); mii = device_get_softc(sc->rl_miibus); /* * Cancel pending I/O and free all RX/TX buffers. */ re_stop(sc); /* * Enable C+ RX and TX mode, as well as VLAN stripping and * RX checksum offload. We must configure the C+ register * before all others. */ CSR_WRITE_2(sc, RL_CPLUS_CMD, RL_CPLUSCMD_RXENB| RL_CPLUSCMD_TXENB|RL_CPLUSCMD_PCI_MRW| RL_CPLUSCMD_VLANSTRIP| (ifp->if_capenable & IFCAP_RXCSUM ? RL_CPLUSCMD_RXCSUM_ENB : 0)); /* * Init our MAC address. Even though the chipset * documentation doesn't mention it, we need to enter "Config * register write enable" mode to modify the ID registers. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); CSR_WRITE_STREAM_4(sc, RL_IDR0, *(u_int32_t *)(&IFP2ENADDR(sc->rl_ifp)[0])); CSR_WRITE_STREAM_4(sc, RL_IDR4, *(u_int32_t *)(&IFP2ENADDR(sc->rl_ifp)[4])); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); /* * For C+ mode, initialize the RX descriptors and mbufs. */ re_rx_list_init(sc); re_tx_list_init(sc); /* * Enable transmit and receive. */ CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); /* * Set the initial TX and RX configuration. */ if (sc->rl_testmode) { if (sc->rl_type == RL_8169) CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG|RL_LOOPTEST_ON); else CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG|RL_LOOPTEST_ON_CPLUS); } else CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG); /* Set the individual bit to receive frames for this host only. */ rxcfg = CSR_READ_4(sc, RL_RXCFG); rxcfg |= RL_RXCFG_RX_INDIV; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) rxcfg |= RL_RXCFG_RX_ALLPHYS; else rxcfg &= ~RL_RXCFG_RX_ALLPHYS; CSR_WRITE_4(sc, RL_RXCFG, rxcfg); /* * Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) rxcfg |= RL_RXCFG_RX_BROAD; else rxcfg &= ~RL_RXCFG_RX_BROAD; CSR_WRITE_4(sc, RL_RXCFG, rxcfg); /* * Program the multicast filter, if necessary. */ re_setmulti(sc); #ifdef DEVICE_POLLING /* * Disable interrupts if we are polling. */ if (ifp->if_flags & IFF_POLLING) CSR_WRITE_2(sc, RL_IMR, 0); else /* otherwise ... */ #endif /* DEVICE_POLLING */ /* * Enable interrupts. */ if (sc->rl_testmode) CSR_WRITE_2(sc, RL_IMR, 0); else CSR_WRITE_2(sc, RL_IMR, RL_INTRS_CPLUS); /* Set initial TX threshold */ sc->rl_txthresh = RL_TX_THRESH_INIT; /* Start RX/TX process. */ CSR_WRITE_4(sc, RL_MISSEDPKT, 0); #ifdef notdef /* Enable receiver and transmitter. */ CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); #endif /* * Load the addresses of the RX and TX lists into the chip. */ CSR_WRITE_4(sc, RL_RXLIST_ADDR_HI, RL_ADDR_HI(sc->rl_ldata.rl_rx_list_addr)); CSR_WRITE_4(sc, RL_RXLIST_ADDR_LO, RL_ADDR_LO(sc->rl_ldata.rl_rx_list_addr)); CSR_WRITE_4(sc, RL_TXLIST_ADDR_HI, RL_ADDR_HI(sc->rl_ldata.rl_tx_list_addr)); CSR_WRITE_4(sc, RL_TXLIST_ADDR_LO, RL_ADDR_LO(sc->rl_ldata.rl_tx_list_addr)); CSR_WRITE_1(sc, RL_EARLY_TX_THRESH, 16); /* * Initialize the timer interrupt register so that * a timer interrupt will be generated once the timer * reaches a certain number of ticks. The timer is * reloaded on each transmit. This gives us TX interrupt * moderation, which dramatically improves TX frame rate. */ if (sc->rl_type == RL_8169) CSR_WRITE_4(sc, RL_TIMERINT_8169, 0x800); else CSR_WRITE_4(sc, RL_TIMERINT, 0x400); /* * For 8169 gigE NICs, set the max allowed RX packet * size so we can receive jumbo frames. */ if (sc->rl_type == RL_8169) CSR_WRITE_2(sc, RL_MAXRXPKTLEN, 16383); if (sc->rl_testmode) return; mii_mediachg(mii); CSR_WRITE_1(sc, RL_CFG1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->rl_stat_ch = timeout(re_tick, sc, hz); } /* * Set media options. */ static int re_ifmedia_upd(ifp) struct ifnet *ifp; { struct rl_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->rl_miibus); mii_mediachg(mii); return (0); } /* * Report current media status. */ static void re_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct rl_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->rl_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static int re_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct rl_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; switch (command) { case SIOCSIFMTU: if (ifr->ifr_mtu > RL_JUMBO_MTU) error = EINVAL; ifp->if_mtu = ifr->ifr_mtu; break; case SIOCSIFFLAGS: RL_LOCK(sc); if (ifp->if_flags & IFF_UP) re_init_locked(sc); else if (ifp->if_flags & IFF_RUNNING) re_stop(sc); RL_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: RL_LOCK(sc); re_setmulti(sc); RL_UNLOCK(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->rl_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_POLLING); ifp->if_capenable |= ifr->ifr_reqcap & (IFCAP_HWCSUM | IFCAP_POLLING); if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist = RE_CSUM_FEATURES; else ifp->if_hwassist = 0; if (ifp->if_flags & IFF_RUNNING) re_init(sc); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void re_watchdog(ifp) struct ifnet *ifp; { struct rl_softc *sc; sc = ifp->if_softc; RL_LOCK(sc); printf("re%d: watchdog timeout\n", sc->rl_unit); ifp->if_oerrors++; re_txeof(sc); re_rxeof(sc); re_init_locked(sc); RL_UNLOCK(sc); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void re_stop(sc) struct rl_softc *sc; { register int i; struct ifnet *ifp; RL_LOCK_ASSERT(sc); ifp = sc->rl_ifp; ifp->if_timer = 0; untimeout(re_tick, sc, sc->rl_stat_ch); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); #ifdef DEVICE_POLLING ether_poll_deregister(ifp); #endif /* DEVICE_POLLING */ CSR_WRITE_1(sc, RL_COMMAND, 0x00); CSR_WRITE_2(sc, RL_IMR, 0x0000); if (sc->rl_head != NULL) { m_freem(sc->rl_head); sc->rl_head = sc->rl_tail = NULL; } /* Free the TX list buffers. */ for (i = 0; i < RL_TX_DESC_CNT; i++) { if (sc->rl_ldata.rl_tx_mbuf[i] != NULL) { bus_dmamap_unload(sc->rl_ldata.rl_mtag, sc->rl_ldata.rl_tx_dmamap[i]); m_freem(sc->rl_ldata.rl_tx_mbuf[i]); sc->rl_ldata.rl_tx_mbuf[i] = NULL; } } /* Free the RX list buffers. */ for (i = 0; i < RL_RX_DESC_CNT; i++) { if (sc->rl_ldata.rl_rx_mbuf[i] != NULL) { bus_dmamap_unload(sc->rl_ldata.rl_mtag, sc->rl_ldata.rl_rx_dmamap[i]); m_freem(sc->rl_ldata.rl_rx_mbuf[i]); sc->rl_ldata.rl_rx_mbuf[i] = NULL; } } } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int re_suspend(dev) device_t dev; { struct rl_softc *sc; sc = device_get_softc(dev); RL_LOCK(sc); re_stop(sc); sc->suspended = 1; RL_UNLOCK(sc); return (0); } /* * Device resume routine. Restore some PCI settings in case the BIOS * doesn't, re-enable busmastering, and restart the interface if * appropriate. */ static int re_resume(dev) device_t dev; { struct rl_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); RL_LOCK(sc); ifp = sc->rl_ifp; /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) re_init_locked(sc); sc->suspended = 0; RL_UNLOCK(sc); return (0); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void re_shutdown(dev) device_t dev; { struct rl_softc *sc; sc = device_get_softc(dev); RL_LOCK(sc); re_stop(sc); RL_UNLOCK(sc); } Index: stable/6/sys/dev/sn/if_sn.c =================================================================== --- stable/6/sys/dev/sn/if_sn.c (revision 149421) +++ stable/6/sys/dev/sn/if_sn.c (revision 149422) @@ -1,1427 +1,1431 @@ /*- * Copyright (c) 1996 Gardner Buchanan * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Gardner Buchanan. * 4. The name of Gardner Buchanan may not be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * This is a driver for SMC's 9000 series of Ethernet adapters. * * This FreeBSD driver is derived from the smc9194 Linux driver by * Erik Stahlman and is Copyright (C) 1996 by Erik Stahlman. * This driver also shamelessly borrows from the FreeBSD ep driver * which is Copyright (C) 1994 Herb Peyerl * All rights reserved. * * It is set up for my SMC91C92 equipped Ampro LittleBoard embedded * PC. It is adapted from Erik Stahlman's Linux driver which worked * with his EFA Info*Express SVC VLB adaptor. According to SMC's databook, * it will work for the entire SMC 9xxx series. (Ha Ha) * * "Features" of the SMC chip: * 4608 byte packet memory. (for the 91C92. Others have more) * EEPROM for configuration * AUI/TP selection * * Authors: * Erik Stahlman erik@vt.edu * Herb Peyerl hpeyerl@novatel.ca * Andres Vega Garcia avega@sophia.inria.fr * Serge Babkin babkin@hq.icb.chel.su * Gardner Buchanan gbuchanan@shl.com * * Sources: * o SMC databook * o "smc9194.c:v0.10(FIXED) 02/15/96 by Erik Stahlman (erik@vt.edu)" * o "if_ep.c,v 1.19 1995/01/24 20:53:45 davidg Exp" * * Known Bugs: * o Setting of the hardware address isn't supported. * o Hardware padding isn't used. */ /* * Modifications for Megahertz X-Jack Ethernet Card (XJ-10BT) * * Copyright (c) 1996 by Tatsumi Hosokawa * BSD-nomads, Tokyo, Japan. */ /* * Multicast support by Kei TANAKA * Special thanks to itojun@itojun.org */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #endif #include #include #include #include /* Exported variables */ devclass_t sn_devclass; static int snioctl(struct ifnet * ifp, u_long, caddr_t); static void snresume(struct ifnet *); static void sninit_locked(void *); static void snstart_locked(struct ifnet *); static void sninit(void *); static void snread(struct ifnet *); static void snstart(struct ifnet *); static void snstop(struct sn_softc *); static void snwatchdog(struct ifnet *); static void sn_setmcast(struct sn_softc *); static int sn_getmcf(struct ifnet *ifp, u_char *mcf); /* I (GB) have been unlucky getting the hardware padding * to work properly. */ #define SW_PAD static const char *chip_ids[15] = { NULL, NULL, NULL, /* 3 */ "SMC91C90/91C92", /* 4 */ "SMC91C94", /* 5 */ "SMC91C95", NULL, /* 7 */ "SMC91C100", /* 8 */ "SMC91C100FD", /* 9 */ "SMC91C110", NULL, NULL, NULL, NULL, NULL }; int sn_attach(device_t dev) { struct sn_softc *sc = device_get_softc(dev); struct ifnet *ifp; uint16_t i; uint8_t *p; int rev; uint16_t address; int err; u_char eaddr[6]; ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); return (ENOSPC); } SN_LOCK_INIT(sc); snstop(sc); sc->pages_wanted = -1; if (bootverbose || 1) { SMC_SELECT_BANK(sc, 3); rev = (CSR_READ_2(sc, REVISION_REG_W) >> 4) & 0xf; if (chip_ids[rev]) device_printf(dev, " %s ", chip_ids[rev]); else device_printf(dev, " unsupported chip"); SMC_SELECT_BANK(sc, 1); i = CSR_READ_2(sc, CONFIG_REG_W); printf("%s\n", i & CR_AUI_SELECT ? "AUI" : "UTP"); } /* * Read the station address from the chip. The MAC address is bank 1, * regs 4 - 9 */ SMC_SELECT_BANK(sc, 1); p = (uint8_t *) eaddr; for (i = 0; i < 6; i += 2) { address = CSR_READ_2(sc, IAR_ADDR0_REG_W + i); p[i + 1] = address >> 8; p[i] = address & 0xFF; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_start = snstart; ifp->if_ioctl = snioctl; ifp->if_watchdog = snwatchdog; ifp->if_init = sninit; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; ifp->if_timer = 0; ether_ifattach(ifp, eaddr); /* * Activate the interrupt so we can get card interrupts. This * needs to be done last so that we don't have/hold the lock * during startup to avoid LORs in the network layer. */ if ((err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE, sn_intr, sc, &sc->intrhand)) != 0) { sn_detach(dev); return err; } return 0; } int sn_detach(device_t dev) { struct sn_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->ifp; snstop(sc); ifp->if_flags &= ~IFF_RUNNING; ether_ifdetach(ifp); if_free(ifp); sn_deactivate(dev); SN_LOCK_DESTORY(sc); return 0; } static void sninit(void *xsc) { struct sn_softc *sc = xsc; SN_LOCK(sc); sninit_locked(sc); SN_UNLOCK(sc); } /* * Reset and initialize the chip */ static void sninit_locked(void *xsc) { struct sn_softc *sc = xsc; struct ifnet *ifp = sc->ifp; int flags; int mask; SN_ASSERT_LOCKED(sc); /* * This resets the registers mostly to defaults, but doesn't affect * EEPROM. After the reset cycle, we pause briefly for the chip to * be happy. */ SMC_SELECT_BANK(sc, 0); CSR_WRITE_2(sc, RECV_CONTROL_REG_W, RCR_SOFTRESET); SMC_DELAY(sc); CSR_WRITE_2(sc, RECV_CONTROL_REG_W, 0x0000); SMC_DELAY(sc); SMC_DELAY(sc); CSR_WRITE_2(sc, TXMIT_CONTROL_REG_W, 0x0000); /* * Set the control register to automatically release succesfully * transmitted packets (making the best use out of our limited * memory) and to enable the EPH interrupt on certain TX errors. */ SMC_SELECT_BANK(sc, 1); CSR_WRITE_2(sc, CONTROL_REG_W, (CTR_AUTO_RELEASE | CTR_TE_ENABLE | CTR_CR_ENABLE | CTR_LE_ENABLE)); /* Set squelch level to 240mV (default 480mV) */ flags = CSR_READ_2(sc, CONFIG_REG_W); flags |= CR_SET_SQLCH; CSR_WRITE_2(sc, CONFIG_REG_W, flags); /* * Reset the MMU and wait for it to be un-busy. */ SMC_SELECT_BANK(sc, 2); CSR_WRITE_2(sc, MMU_CMD_REG_W, MMUCR_RESET); while (CSR_READ_2(sc, MMU_CMD_REG_W) & MMUCR_BUSY) /* NOTHING */ ; /* * Disable all interrupts */ CSR_WRITE_1(sc, INTR_MASK_REG_B, 0x00); sn_setmcast(sc); /* * Set the transmitter control. We want it enabled. */ flags = TCR_ENABLE; #ifndef SW_PAD /* * I (GB) have been unlucky getting this to work. */ flags |= TCR_PAD_ENABLE; #endif /* SW_PAD */ CSR_WRITE_2(sc, TXMIT_CONTROL_REG_W, flags); /* * Now, enable interrupts */ SMC_SELECT_BANK(sc, 2); mask = IM_EPH_INT | IM_RX_OVRN_INT | IM_RCV_INT | IM_TX_INT; CSR_WRITE_1(sc, INTR_MASK_REG_B, mask); sc->intr_mask = mask; sc->pages_wanted = -1; /* * Mark the interface running but not active. */ ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; /* * Attempt to push out any waiting packets. */ snstart_locked(ifp); } static void snstart(struct ifnet *ifp) { struct sn_softc *sc = ifp->if_softc; SN_LOCK(sc); snstart_locked(ifp); SN_UNLOCK(sc); } static void snstart_locked(struct ifnet *ifp) { struct sn_softc *sc = ifp->if_softc; u_int len; struct mbuf *m; struct mbuf *top; int pad; int mask; uint16_t length; uint16_t numPages; uint8_t packet_no; int time_out; int junk = 0; SN_ASSERT_LOCKED(sc); if (sc->ifp->if_flags & IFF_OACTIVE) return; if (sc->pages_wanted != -1) { if_printf(ifp, "snstart() while memory allocation pending\n"); return; } startagain: /* * Sneak a peek at the next packet */ m = sc->ifp->if_snd.ifq_head; if (m == 0) return; /* * Compute the frame length and set pad to give an overall even * number of bytes. Below we assume that the packet length is even. */ for (len = 0, top = m; m; m = m->m_next) len += m->m_len; pad = (len & 1); /* * We drop packets that are too large. Perhaps we should truncate * them instead? */ if (len + pad > ETHER_MAX_LEN - ETHER_CRC_LEN) { if_printf(ifp, "large packet discarded (A)\n"); ++sc->ifp->if_oerrors; IF_DEQUEUE(&sc->ifp->if_snd, m); m_freem(m); goto readcheck; } #ifdef SW_PAD /* * If HW padding is not turned on, then pad to ETHER_MIN_LEN. */ if (len < ETHER_MIN_LEN - ETHER_CRC_LEN) pad = ETHER_MIN_LEN - ETHER_CRC_LEN - len; #endif /* SW_PAD */ length = pad + len; /* * The MMU wants the number of pages to be the number of 256 byte * 'pages', minus 1 (A packet can't ever have 0 pages. We also * include space for the status word, byte count and control bytes in * the allocation request. */ numPages = (length + 6) >> 8; /* * Now, try to allocate the memory */ SMC_SELECT_BANK(sc, 2); CSR_WRITE_2(sc, MMU_CMD_REG_W, MMUCR_ALLOC | numPages); /* * Wait a short amount of time to see if the allocation request * completes. Otherwise, I enable the interrupt and wait for * completion asyncronously. */ time_out = MEMORY_WAIT_TIME; do { if (CSR_READ_1(sc, INTR_STAT_REG_B) & IM_ALLOC_INT) break; } while (--time_out); if (!time_out || junk > 10) { /* * No memory now. Oh well, wait until the chip finds memory * later. Remember how many pages we were asking for and * enable the allocation completion interrupt. Also set a * watchdog in case we miss the interrupt. We mark the * interface active since there is no point in attempting an * snstart() until after the memory is available. */ mask = CSR_READ_1(sc, INTR_MASK_REG_B) | IM_ALLOC_INT; CSR_WRITE_1(sc, INTR_MASK_REG_B, mask); sc->intr_mask = mask; sc->ifp->if_timer = 1; sc->ifp->if_flags |= IFF_OACTIVE; sc->pages_wanted = numPages; return; } /* * The memory allocation completed. Check the results. */ packet_no = CSR_READ_1(sc, ALLOC_RESULT_REG_B); if (packet_no & ARR_FAILED) { if (junk++ > 10) if_printf(ifp, "Memory allocation failed\n"); goto startagain; } /* * We have a packet number, so tell the card to use it. */ CSR_WRITE_1(sc, PACKET_NUM_REG_B, packet_no); /* * Point to the beginning of the packet */ CSR_WRITE_2(sc, POINTER_REG_W, PTR_AUTOINC | 0x0000); /* * Send the packet length (+6 for status, length and control byte) * and the status word (set to zeros) */ CSR_WRITE_2(sc, DATA_REG_W, 0); CSR_WRITE_1(sc, DATA_REG_B, (length + 6) & 0xFF); CSR_WRITE_1(sc, DATA_REG_B, (length + 6) >> 8); /* * Get the packet from the kernel. This will include the Ethernet * frame header, MAC Addresses etc. */ IF_DEQUEUE(&sc->ifp->if_snd, m); /* * Push out the data to the card. */ for (top = m; m != 0; m = m->m_next) { /* * Push out words. */ CSR_WRITE_MULTI_2(sc, DATA_REG_W, mtod(m, uint16_t *), m->m_len / 2); /* * Push out remaining byte. */ if (m->m_len & 1) CSR_WRITE_1(sc, DATA_REG_B, *(mtod(m, caddr_t) + m->m_len - 1)); } /* * Push out padding. */ while (pad > 1) { CSR_WRITE_2(sc, DATA_REG_W, 0); pad -= 2; } if (pad) CSR_WRITE_1(sc, DATA_REG_B, 0); /* * Push out control byte and unused packet byte The control byte is 0 * meaning the packet is even lengthed and no special CRC handling is * desired. */ CSR_WRITE_2(sc, DATA_REG_W, 0); /* * Enable the interrupts and let the chipset deal with it Also set a * watchdog in case we miss the interrupt. */ mask = CSR_READ_1(sc, INTR_MASK_REG_B) | (IM_TX_INT | IM_TX_EMPTY_INT); CSR_WRITE_1(sc, INTR_MASK_REG_B, mask); sc->intr_mask = mask; CSR_WRITE_2(sc, MMU_CMD_REG_W, MMUCR_ENQUEUE); sc->ifp->if_flags |= IFF_OACTIVE; sc->ifp->if_timer = 1; BPF_MTAP(ifp, top); sc->ifp->if_opackets++; m_freem(top); readcheck: /* * Is another packet coming in? We don't want to overflow the tiny * RX FIFO. If nothing has arrived then attempt to queue another * transmit packet. */ if (CSR_READ_2(sc, FIFO_PORTS_REG_W) & FIFO_REMPTY) goto startagain; return; } /* Resume a packet transmit operation after a memory allocation * has completed. * * This is basically a hacked up copy of snstart() which handles * a completed memory allocation the same way snstart() does. * It then passes control to snstart to handle any other queued * packets. */ static void snresume(struct ifnet *ifp) { struct sn_softc *sc = ifp->if_softc; u_int len; struct mbuf *m; struct mbuf *top; int pad; int mask; uint16_t length; uint16_t numPages; uint16_t pages_wanted; uint8_t packet_no; if (sc->pages_wanted < 0) return; pages_wanted = sc->pages_wanted; sc->pages_wanted = -1; /* * Sneak a peek at the next packet */ m = sc->ifp->if_snd.ifq_head; if (m == 0) { if_printf(ifp, "snresume() with nothing to send\n"); return; } /* * Compute the frame length and set pad to give an overall even * number of bytes. Below we assume that the packet length is even. */ for (len = 0, top = m; m; m = m->m_next) len += m->m_len; pad = (len & 1); /* * We drop packets that are too large. Perhaps we should truncate * them instead? */ if (len + pad > ETHER_MAX_LEN - ETHER_CRC_LEN) { if_printf(ifp, "large packet discarded (B)\n"); ++sc->ifp->if_oerrors; IF_DEQUEUE(&sc->ifp->if_snd, m); m_freem(m); return; } #ifdef SW_PAD /* * If HW padding is not turned on, then pad to ETHER_MIN_LEN. */ if (len < ETHER_MIN_LEN - ETHER_CRC_LEN) pad = ETHER_MIN_LEN - ETHER_CRC_LEN - len; #endif /* SW_PAD */ length = pad + len; /* * The MMU wants the number of pages to be the number of 256 byte * 'pages', minus 1 (A packet can't ever have 0 pages. We also * include space for the status word, byte count and control bytes in * the allocation request. */ numPages = (length + 6) >> 8; SMC_SELECT_BANK(sc, 2); /* * The memory allocation completed. Check the results. If it failed, * we simply set a watchdog timer and hope for the best. */ packet_no = CSR_READ_1(sc, ALLOC_RESULT_REG_B); if (packet_no & ARR_FAILED) { if_printf(ifp, "Memory allocation failed. Weird.\n"); sc->ifp->if_timer = 1; goto try_start; } /* * We have a packet number, so tell the card to use it. */ CSR_WRITE_1(sc, PACKET_NUM_REG_B, packet_no); /* * Now, numPages should match the pages_wanted recorded when the * memory allocation was initiated. */ if (pages_wanted != numPages) { if_printf(ifp, "memory allocation wrong size. Weird.\n"); /* * If the allocation was the wrong size we simply release the * memory once it is granted. Wait for the MMU to be un-busy. */ while (CSR_READ_2(sc, MMU_CMD_REG_W) & MMUCR_BUSY) /* NOTHING */ ; CSR_WRITE_2(sc, MMU_CMD_REG_W, MMUCR_FREEPKT); return; } /* * Point to the beginning of the packet */ CSR_WRITE_2(sc, POINTER_REG_W, PTR_AUTOINC | 0x0000); /* * Send the packet length (+6 for status, length and control byte) * and the status word (set to zeros) */ CSR_WRITE_2(sc, DATA_REG_W, 0); CSR_WRITE_1(sc, DATA_REG_B, (length + 6) & 0xFF); CSR_WRITE_1(sc, DATA_REG_B, (length + 6) >> 8); /* * Get the packet from the kernel. This will include the Ethernet * frame header, MAC Addresses etc. */ IF_DEQUEUE(&sc->ifp->if_snd, m); /* * Push out the data to the card. */ for (top = m; m != 0; m = m->m_next) { /* * Push out words. */ CSR_WRITE_MULTI_2(sc, DATA_REG_W, mtod(m, uint16_t *), m->m_len / 2); /* * Push out remaining byte. */ if (m->m_len & 1) CSR_WRITE_1(sc, DATA_REG_B, *(mtod(m, caddr_t) + m->m_len - 1)); } /* * Push out padding. */ while (pad > 1) { CSR_WRITE_2(sc, DATA_REG_W, 0); pad -= 2; } if (pad) CSR_WRITE_1(sc, DATA_REG_B, 0); /* * Push out control byte and unused packet byte The control byte is 0 * meaning the packet is even lengthed and no special CRC handling is * desired. */ CSR_WRITE_2(sc, DATA_REG_W, 0); /* * Enable the interrupts and let the chipset deal with it Also set a * watchdog in case we miss the interrupt. */ mask = CSR_READ_1(sc, INTR_MASK_REG_B) | (IM_TX_INT | IM_TX_EMPTY_INT); CSR_WRITE_1(sc, INTR_MASK_REG_B, mask); sc->intr_mask = mask; CSR_WRITE_2(sc, MMU_CMD_REG_W, MMUCR_ENQUEUE); BPF_MTAP(ifp, top); sc->ifp->if_opackets++; m_freem(top); try_start: /* * Now pass control to snstart() to queue any additional packets */ sc->ifp->if_flags &= ~IFF_OACTIVE; snstart(ifp); /* * We've sent something, so we're active. Set a watchdog in case the * TX_EMPTY interrupt is lost. */ sc->ifp->if_flags |= IFF_OACTIVE; sc->ifp->if_timer = 1; return; } void sn_intr(void *arg) { int status, interrupts; struct sn_softc *sc = (struct sn_softc *) arg; struct ifnet *ifp = sc->ifp; /* * Chip state registers */ uint8_t mask; uint8_t packet_no; uint16_t tx_status; uint16_t card_stats; SN_LOCK(sc); /* * Clear the watchdog. */ ifp->if_timer = 0; SMC_SELECT_BANK(sc, 2); /* * Obtain the current interrupt mask and clear the hardware mask * while servicing interrupts. */ mask = CSR_READ_1(sc, INTR_MASK_REG_B); CSR_WRITE_1(sc, INTR_MASK_REG_B, 0x00); /* * Get the set of interrupts which occurred and eliminate any which * are masked. */ interrupts = CSR_READ_1(sc, INTR_STAT_REG_B); status = interrupts & mask; /* * Now, process each of the interrupt types. */ /* * Receive Overrun. */ if (status & IM_RX_OVRN_INT) { /* * Acknowlege Interrupt */ SMC_SELECT_BANK(sc, 2); CSR_WRITE_1(sc, INTR_ACK_REG_B, IM_RX_OVRN_INT); ++sc->ifp->if_ierrors; } /* * Got a packet. */ if (status & IM_RCV_INT) { int packet_number; SMC_SELECT_BANK(sc, 2); packet_number = CSR_READ_2(sc, FIFO_PORTS_REG_W); if (packet_number & FIFO_REMPTY) { /* * we got called , but nothing was on the FIFO */ printf("sn: Receive interrupt with nothing on FIFO\n"); goto out; } snread(ifp); } /* * An on-card memory allocation came through. */ if (status & IM_ALLOC_INT) { /* * Disable this interrupt. */ mask &= ~IM_ALLOC_INT; sc->ifp->if_flags &= ~IFF_OACTIVE; snresume(sc->ifp); } /* * TX Completion. Handle a transmit error message. This will only be * called when there is an error, because of the AUTO_RELEASE mode. */ if (status & IM_TX_INT) { /* * Acknowlege Interrupt */ SMC_SELECT_BANK(sc, 2); CSR_WRITE_1(sc, INTR_ACK_REG_B, IM_TX_INT); packet_no = CSR_READ_2(sc, FIFO_PORTS_REG_W); packet_no &= FIFO_TX_MASK; /* * select this as the packet to read from */ CSR_WRITE_1(sc, PACKET_NUM_REG_B, packet_no); /* * Position the pointer to the first word from this packet */ CSR_WRITE_2(sc, POINTER_REG_W, PTR_AUTOINC | PTR_READ | 0x0000); /* * Fetch the TX status word. The value found here will be a * copy of the EPH_STATUS_REG_W at the time the transmit * failed. */ tx_status = CSR_READ_2(sc, DATA_REG_W); if (tx_status & EPHSR_TX_SUC) { device_printf(sc->dev, "Successful packet caused interrupt\n"); } else { ++sc->ifp->if_oerrors; } if (tx_status & EPHSR_LATCOL) ++sc->ifp->if_collisions; /* * Some of these errors will have disabled transmit. * Re-enable transmit now. */ SMC_SELECT_BANK(sc, 0); #ifdef SW_PAD CSR_WRITE_2(sc, TXMIT_CONTROL_REG_W, TCR_ENABLE); #else CSR_WRITE_2(sc, TXMIT_CONTROL_REG_W, TCR_ENABLE | TCR_PAD_ENABLE); #endif /* SW_PAD */ /* * kill the failed packet. Wait for the MMU to be un-busy. */ SMC_SELECT_BANK(sc, 2); while (CSR_READ_2(sc, MMU_CMD_REG_W) & MMUCR_BUSY) /* NOTHING */ ; CSR_WRITE_2(sc, MMU_CMD_REG_W, MMUCR_FREEPKT); /* * Attempt to queue more transmits. */ sc->ifp->if_flags &= ~IFF_OACTIVE; snstart_locked(sc->ifp); } /* * Transmit underrun. We use this opportunity to update transmit * statistics from the card. */ if (status & IM_TX_EMPTY_INT) { /* * Acknowlege Interrupt */ SMC_SELECT_BANK(sc, 2); CSR_WRITE_1(sc, INTR_ACK_REG_B, IM_TX_EMPTY_INT); /* * Disable this interrupt. */ mask &= ~IM_TX_EMPTY_INT; SMC_SELECT_BANK(sc, 0); card_stats = CSR_READ_2(sc, COUNTER_REG_W); /* * Single collisions */ sc->ifp->if_collisions += card_stats & ECR_COLN_MASK; /* * Multiple collisions */ sc->ifp->if_collisions += (card_stats & ECR_MCOLN_MASK) >> 4; SMC_SELECT_BANK(sc, 2); /* * Attempt to enqueue some more stuff. */ sc->ifp->if_flags &= ~IFF_OACTIVE; snstart_locked(sc->ifp); } /* * Some other error. Try to fix it by resetting the adapter. */ if (status & IM_EPH_INT) { snstop(sc); sninit_locked(sc); } out: /* * Handled all interrupt sources. */ SMC_SELECT_BANK(sc, 2); /* * Reestablish interrupts from mask which have not been deselected * during this interrupt. Note that the hardware mask, which was set * to 0x00 at the start of this service routine, may have been * updated by one or more of the interrupt handers and we must let * those new interrupts stay enabled here. */ mask |= CSR_READ_1(sc, INTR_MASK_REG_B); CSR_WRITE_1(sc, INTR_MASK_REG_B, mask); sc->intr_mask = mask; SN_UNLOCK(sc); } static void snread(struct ifnet *ifp) { struct sn_softc *sc = ifp->if_softc; struct ether_header *eh; struct mbuf *m; short status; int packet_number; uint16_t packet_length; uint8_t *data; SMC_SELECT_BANK(sc, 2); #if 0 packet_number = CSR_READ_2(sc, FIFO_PORTS_REG_W); if (packet_number & FIFO_REMPTY) { /* * we got called , but nothing was on the FIFO */ printf("sn: Receive interrupt with nothing on FIFO\n"); return; } #endif read_another: /* * Start reading from the start of the packet. Since PTR_RCV is set, * packet number is found in FIFO_PORTS_REG_W, FIFO_RX_MASK. */ CSR_WRITE_2(sc, POINTER_REG_W, PTR_READ | PTR_RCV | PTR_AUTOINC | 0x0000); /* * First two words are status and packet_length */ status = CSR_READ_2(sc, DATA_REG_W); packet_length = CSR_READ_2(sc, DATA_REG_W) & RLEN_MASK; /* * The packet length contains 3 extra words: status, length, and a * extra word with the control byte. */ packet_length -= 6; /* * Account for receive errors and discard. */ if (status & RS_ERRORS) { ++sc->ifp->if_ierrors; goto out; } /* * A packet is received. */ /* * Adjust for odd-length packet. */ if (status & RS_ODDFRAME) packet_length++; /* * Allocate a header mbuf from the kernel. */ MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) goto out; m->m_pkthdr.rcvif = sc->ifp; m->m_pkthdr.len = m->m_len = packet_length; /* * Attach an mbuf cluster */ MCLGET(m, M_DONTWAIT); /* * Insist on getting a cluster */ if ((m->m_flags & M_EXT) == 0) { m_freem(m); ++sc->ifp->if_ierrors; printf("sn: snread() kernel memory allocation problem\n"); goto out; } eh = mtod(m, struct ether_header *); /* * Get packet, including link layer address, from interface. */ data = (uint8_t *) eh; CSR_READ_MULTI_2(sc, DATA_REG_W, (uint16_t *) data, packet_length >> 1); if (packet_length & 1) { data += packet_length & ~1; *data = CSR_READ_1(sc, DATA_REG_B); } ++sc->ifp->if_ipackets; /* * Remove link layer addresses and whatnot. */ m->m_pkthdr.len = m->m_len = packet_length; /* * Drop locks before calling if_input() since it may re-enter * snstart() in the netisr case. This would result in a * lock reversal. Better performance might be obtained by * chaining all packets received, dropping the lock, and then * calling if_input() on each one. */ SN_UNLOCK(sc); (*ifp->if_input)(ifp, m); SN_LOCK(sc); out: /* * Error or good, tell the card to get rid of this packet Wait for * the MMU to be un-busy. */ SMC_SELECT_BANK(sc, 2); while (CSR_READ_2(sc, MMU_CMD_REG_W) & MMUCR_BUSY) /* NOTHING */ ; CSR_WRITE_2(sc, MMU_CMD_REG_W, MMUCR_RELEASE); /* * Check whether another packet is ready */ packet_number = CSR_READ_2(sc, FIFO_PORTS_REG_W); if (packet_number & FIFO_REMPTY) { return; } goto read_another; } /* * Handle IOCTLS. This function is completely stolen from if_ep.c * As with its progenitor, it does not handle hardware address * changes. */ static int snioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct sn_softc *sc = ifp->if_softc; int error = 0; switch (cmd) { case SIOCSIFFLAGS: SN_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0 && ifp->if_flags & IFF_RUNNING) { ifp->if_flags &= ~IFF_RUNNING; snstop(sc); } else { /* reinitialize card on any parameter change */ sninit_locked(sc); } SN_UNLOCK(sc); break; #ifdef notdef case SIOCGHWADDR: bcopy((caddr_t) sc->sc_addr, (caddr_t) & ifr->ifr_data, sizeof(sc->sc_addr)); break; #endif case SIOCADDMULTI: /* update multicast filter list. */ SN_LOCK(sc); sn_setmcast(sc); error = 0; SN_UNLOCK(sc); break; case SIOCDELMULTI: /* update multicast filter list. */ SN_LOCK(sc); sn_setmcast(sc); error = 0; SN_UNLOCK(sc); break; default: error = EINVAL; error = ether_ioctl(ifp, cmd, data); break; } return (error); } static void snwatchdog(struct ifnet *ifp) { sn_intr(ifp->if_softc); } /* 1. zero the interrupt mask * 2. clear the enable receive flag * 3. clear the enable xmit flags */ static void snstop(struct sn_softc *sc) { struct ifnet *ifp = sc->ifp; /* * Clear interrupt mask; disable all interrupts. */ SMC_SELECT_BANK(sc, 2); CSR_WRITE_1(sc, INTR_MASK_REG_B, 0x00); /* * Disable transmitter and Receiver */ SMC_SELECT_BANK(sc, 0); CSR_WRITE_2(sc, RECV_CONTROL_REG_W, 0x0000); CSR_WRITE_2(sc, TXMIT_CONTROL_REG_W, 0x0000); /* * Cancel watchdog. */ ifp->if_timer = 0; } int sn_activate(device_t dev) { struct sn_softc *sc = device_get_softc(dev); sc->port_rid = 0; sc->port_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->port_rid, 0, ~0, SMC_IO_EXTENT, RF_ACTIVE); if (!sc->port_res) { if (bootverbose) device_printf(dev, "Cannot allocate ioport\n"); return ENOMEM; } sc->irq_rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE); if (!sc->irq_res) { if (bootverbose) device_printf(dev, "Cannot allocate irq\n"); sn_deactivate(dev); return ENOMEM; } sc->bst = rman_get_bustag(sc->port_res); sc->bsh = rman_get_bushandle(sc->port_res); return (0); } void sn_deactivate(device_t dev) { struct sn_softc *sc = device_get_softc(dev); if (sc->intrhand) bus_teardown_intr(dev, sc->irq_res, sc->intrhand); sc->intrhand = 0; if (sc->port_res) bus_release_resource(dev, SYS_RES_IOPORT, sc->port_rid, sc->port_res); sc->port_res = 0; if (sc->irq_res) bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); sc->irq_res = 0; return; } /* * Function: sn_probe( device_t dev) * * Purpose: * Tests to see if a given ioaddr points to an SMC9xxx chip. * Tries to cause as little damage as possible if it's not a SMC chip. * Returns a 0 on success * * Algorithm: * (1) see if the high byte of BANK_SELECT is 0x33 * (2) compare the ioaddr with the base register's address * (3) see if I recognize the chip ID in the appropriate register * * */ int sn_probe(device_t dev) { struct sn_softc *sc = device_get_softc(dev); uint16_t bank; uint16_t revision_register; uint16_t base_address_register; int err; if ((err = sn_activate(dev)) != 0) return err; /* * First, see if the high byte is 0x33 */ bank = CSR_READ_2(sc, BANK_SELECT_REG_W); if ((bank & BSR_DETECT_MASK) != BSR_DETECT_VALUE) { #ifdef SN_DEBUG device_printf(dev, "test1 failed\n"); #endif goto error; } /* * The above MIGHT indicate a device, but I need to write to further * test this. Go to bank 0, then test that the register still * reports the high byte is 0x33. */ CSR_WRITE_2(sc, BANK_SELECT_REG_W, 0x0000); bank = CSR_READ_2(sc, BANK_SELECT_REG_W); if ((bank & BSR_DETECT_MASK) != BSR_DETECT_VALUE) { #ifdef SN_DEBUG device_printf(dev, "test2 failed\n"); #endif goto error; } /* * well, we've already written once, so hopefully another time won't * hurt. This time, I need to switch the bank register to bank 1, so * I can access the base address register. The contents of the * BASE_ADDR_REG_W register, after some jiggery pokery, is expected * to match the I/O port address where the adapter is being probed. */ CSR_WRITE_2(sc, BANK_SELECT_REG_W, 0x0001); base_address_register = (CSR_READ_2(sc, BASE_ADDR_REG_W) >> 3) & 0x3e0; if (rman_get_start(sc->port_res) != base_address_register) { /* * Well, the base address register didn't match. Must not * have been a SMC chip after all. */ #ifdef SN_DEBUG device_printf(dev, "test3 failed ioaddr = 0x%x, " "base_address_register = 0x%x\n", rman_get_start(sc->port_res), base_address_register); #endif goto error; } /* * Check if the revision register is something that I recognize. * These might need to be added to later, as future revisions could * be added. */ CSR_WRITE_2(sc, BANK_SELECT_REG_W, 0x3); revision_register = CSR_READ_2(sc, REVISION_REG_W); if (!chip_ids[(revision_register >> 4) & 0xF]) { /* * I don't regonize this chip, so... */ #ifdef SN_DEBUG device_printf(dev, "test4 failed\n"); #endif goto error; } /* * at this point I'll assume that the chip is an SMC9xxx. It might be * prudent to check a listing of MAC addresses against the hardware * address, or do some other tests. */ sn_deactivate(dev); return 0; error: sn_deactivate(dev); return ENXIO; } #define MCFSZ 8 static void sn_setmcast(struct sn_softc *sc) { struct ifnet *ifp = sc->ifp; int flags; uint8_t mcf[MCFSZ]; SN_ASSERT_LOCKED(sc); /* * Set the receiver filter. We want receive enabled and auto strip * of CRC from received packet. If we are promiscuous then set that * bit too. */ flags = RCR_ENABLE | RCR_STRIP_CRC; if (ifp->if_flags & IFF_PROMISC) { flags |= RCR_PROMISC | RCR_ALMUL; } else if (ifp->if_flags & IFF_ALLMULTI) { flags |= RCR_ALMUL; } else { if (sn_getmcf(ifp, mcf)) { /* set filter */ SMC_SELECT_BANK(sc, 3); CSR_WRITE_2(sc, MULTICAST1_REG_W, ((uint16_t)mcf[1] << 8) | mcf[0]); CSR_WRITE_2(sc, MULTICAST2_REG_W, ((uint16_t)mcf[3] << 8) | mcf[2]); CSR_WRITE_2(sc, MULTICAST3_REG_W, ((uint16_t)mcf[5] << 8) | mcf[4]); CSR_WRITE_2(sc, MULTICAST4_REG_W, ((uint16_t)mcf[7] << 8) | mcf[6]); } else { flags |= RCR_ALMUL; } } SMC_SELECT_BANK(sc, 0); CSR_WRITE_2(sc, RECV_CONTROL_REG_W, flags); } static int sn_getmcf(struct ifnet *ifp, uint8_t *mcf) { int i; uint32_t index, index2; uint8_t *af = mcf; struct ifmultiaddr *ifma; bzero(mcf, MCFSZ); + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { - if (ifma->ifma_addr->sa_family != AF_LINK) + if (ifma->ifma_addr->sa_family != AF_LINK) { + IF_ADDR_UNLOCK(ifp); return 0; + } index = ether_crc32_le(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) & 0x3f; index2 = 0; for (i = 0; i < 6; i++) { index2 <<= 1; index2 |= (index & 0x01); index >>= 1; } af[index2 >> 3] |= 1 << (index2 & 7); } + IF_ADDR_UNLOCK(ifp); return 1; /* use multicast filter */ } Index: stable/6/sys/dev/snc/dp83932.c =================================================================== --- stable/6/sys/dev/snc/dp83932.c (revision 149421) +++ stable/6/sys/dev/snc/dp83932.c (revision 149422) @@ -1,1217 +1,1219 @@ /* $FreeBSD$ */ /* $NecBSD: dp83932.c,v 1.5 1999/07/29 05:08:44 kmatsuda Exp $ */ /* $NetBSD: if_snc.c,v 1.18 1998/04/25 21:27:40 scottr Exp $ */ /*- * Copyright (c) 1997, 1998, 1999 * Kouichi Matsuda. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Kouichi Matsuda for * NetBSD/pc98. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Modified for FreeBSD(98) 4.0 from NetBSD/pc98 1.4.2 by Motomichi Matsuzaki. */ /* * Modified for NetBSD/pc98 1.2G from NetBSD/mac68k 1.2G by Kouichi Matsuda. * Make adapted for NEC PC-9801-83, 84, PC-9801-103, 104, PC-9801N-25 and * PC-9801N-J02, J02R, which uses National Semiconductor DP83934AVQB as * Ethernet Controller and National Semiconductor NS46C46 as * (64 * 16 bits) Microwire Serial EEPROM. */ /*- * National Semiconductor DP8393X SONIC Driver * Copyright (c) 1991 Algorithmics Ltd (http://www.algor.co.uk) * You may use, copy, and modify this program so long as you retain the * copyright line. * * This driver has been substantially modified since Algorithmics donated * it. * * Denton Gentry * and also * Yanagisawa Takeshi * did the work to get this running on the Macintosh. */ #include "opt_inet.h" #include #include #include #include #include #include #include #include #if NRND > 0 #include #endif #include #include #include #include #include #include #include #include #include #include #include hide void sncwatchdog(struct ifnet *); hide void sncinit(void *); hide int sncstop(struct snc_softc *sc); hide int sncioctl(struct ifnet *ifp, u_long cmd, caddr_t data); hide void sncstart(struct ifnet *ifp); hide void sncreset(struct snc_softc *sc); hide void caminitialise(struct snc_softc *); hide void camentry(struct snc_softc *, int, u_char *ea); hide void camprogram(struct snc_softc *); hide void initialise_tda(struct snc_softc *); hide void initialise_rda(struct snc_softc *); hide void initialise_rra(struct snc_softc *); #ifdef SNCDEBUG hide void camdump(struct snc_softc *sc); #endif hide void sonictxint(struct snc_softc *); hide void sonicrxint(struct snc_softc *); hide u_int sonicput(struct snc_softc *sc, struct mbuf *m0, int mtd_next); hide int sonic_read(struct snc_softc *, u_int32_t, int); hide struct mbuf *sonic_get(struct snc_softc *, u_int32_t, int); int snc_enable(struct snc_softc *); void snc_disable(struct snc_softc *); int snc_mediachange(struct ifnet *); void snc_mediastatus(struct ifnet *, struct ifmediareq *); #ifdef NetBSD #if NetBSD <= 199714 struct cfdriver snc_cd = { NULL, "snc", DV_IFNET }; #endif #endif #undef assert #undef _assert #ifdef NDEBUG #define assert(e) ((void)0) #define _assert(e) ((void)0) #else #define _assert(e) assert(e) #ifdef __STDC__ #define assert(e) ((e) ? (void)0 : __assert("snc ", __FILE__, __LINE__, #e)) #else /* PCC */ #define assert(e) ((e) ? (void)0 : __assert("snc "__FILE__, __LINE__, "e")) #endif #endif #ifdef SNCDEBUG #define SNC_SHOWTXHDR 0x01 /* show tx ether_header */ #define SNC_SHOWRXHDR 0x02 /* show rx ether_header */ #define SNC_SHOWCAMENT 0x04 /* show CAM entry */ #endif /* SNCDEBUG */ int sncdebug = 0; void sncconfig(sc, media, nmedia, defmedia, myea) struct snc_softc *sc; int *media, nmedia, defmedia; u_int8_t *myea; { struct ifnet *ifp; int i; #ifdef SNCDEBUG if ((sncdebug & SNC_SHOWCAMENT) != 0) { camdump(sc); } #endif ifp = sc->sc_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) panic("%s: can not if_alloc()\n", device_get_nameunit(sc->sc_dev)); #ifdef SNCDEBUG device_printf(sc->sc_dev, "buffers: rra=0x%x cda=0x%x rda=0x%x tda=0x%x\n", sc->v_rra[0], sc->v_cda, sc->v_rda, sc->mtda[0].mtd_vtxp); #endif ifp->if_softc = sc; if_initname(ifp, device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev)); ifp->if_ioctl = sncioctl; ifp->if_start = sncstart; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT; ifp->if_watchdog = sncwatchdog; ifp->if_init = sncinit; ifp->if_mtu = ETHERMTU; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; /* Initialize media goo. */ ifmedia_init(&sc->sc_media, 0, snc_mediachange, snc_mediastatus); if (media != NULL) { for (i = 0; i < nmedia; i++) ifmedia_add(&sc->sc_media, media[i], 0, NULL); ifmedia_set(&sc->sc_media, defmedia); } else { ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); } ether_ifattach(ifp, myea); #if NRND > 0 rnd_attach_source(&sc->rnd_source, device_get_nameunit(sc->sc_dev), RND_TYPE_NET, 0); #endif } void sncshutdown(arg) void *arg; { sncstop((struct snc_softc *)arg); } /* * Media change callback. */ int snc_mediachange(ifp) struct ifnet *ifp; { struct snc_softc *sc = ifp->if_softc; if (sc->sc_mediachange) return ((*sc->sc_mediachange)(sc)); return (EINVAL); } /* * Media status callback. */ void snc_mediastatus(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct snc_softc *sc = ifp->if_softc; if (sc->sc_enabled == 0) { ifmr->ifm_active = IFM_ETHER | IFM_NONE; ifmr->ifm_status = 0; return; } if (sc->sc_mediastatus) (*sc->sc_mediastatus)(sc, ifmr); } hide int sncioctl(ifp, cmd, data) struct ifnet *ifp; u_long cmd; caddr_t data; { struct ifreq *ifr; struct snc_softc *sc = ifp->if_softc; int s = splhardnet(), err = 0; int temp; switch (cmd) { case SIOCSIFFLAGS: if ((ifp->if_flags & IFF_UP) == 0 && (ifp->if_flags & IFF_RUNNING) != 0) { /* * If interface is marked down and it is running, * then stop it. */ sncstop(sc); ifp->if_flags &= ~IFF_RUNNING; snc_disable(sc); } else if ((ifp->if_flags & IFF_UP) != 0 && (ifp->if_flags & IFF_RUNNING) == 0) { /* * If interface is marked up and it is stopped, * then start it. */ if ((err = snc_enable(sc)) != 0) break; sncinit(sc); } else if (sc->sc_enabled) { /* * reset the interface to pick up any other changes * in flags */ temp = ifp->if_flags & IFF_UP; sncreset(sc); ifp->if_flags |= temp; sncstart(ifp); } break; case SIOCADDMULTI: case SIOCDELMULTI: if (sc->sc_enabled == 0) { err = EIO; break; } temp = ifp->if_flags & IFF_UP; sncreset(sc); ifp->if_flags |= temp; err = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: ifr = (struct ifreq *) data; err = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); break; default: err = ether_ioctl(ifp, cmd, data); break; } splx(s); return (err); } /* * Encapsulate a packet of type family for the local net. */ hide void sncstart(ifp) struct ifnet *ifp; { struct snc_softc *sc = ifp->if_softc; struct mbuf *m; int mtd_next; if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) return; outloop: /* Check for room in the xmit buffer. */ if ((mtd_next = (sc->mtd_free + 1)) == NTDA) mtd_next = 0; if (mtd_next == sc->mtd_hw) { ifp->if_flags |= IFF_OACTIVE; return; } IF_DEQUEUE(&ifp->if_snd, m); if (m == 0) return; /* We need the header for m_pkthdr.len. */ M_ASSERTPKTHDR(m); /* * If there is nothing in the o/p queue, and there is room in * the Tx ring, then send the packet directly. Otherwise append * it to the o/p queue. */ if ((sonicput(sc, m, mtd_next)) == 0) { IF_PREPEND(&ifp->if_snd, m); return; } /* * If bpf is listening on this interface, let it see the packet * before we commit it to the wire, but only if we are really * committed to send it. * * XXX: Locking must protect m against premature m_freem() in * sonictxint(). */ BPF_MTAP(ifp, m); sc->mtd_prev = sc->mtd_free; sc->mtd_free = mtd_next; ifp->if_opackets++; /* # of pkts */ /* Jump back for possibly more punishment. */ goto outloop; } /* * reset and restart the SONIC. Called in case of fatal * hardware/software errors. */ hide void sncreset(sc) struct snc_softc *sc; { sncstop(sc); sncinit(sc); } hide void sncinit(xsc) void *xsc; { struct snc_softc *sc = xsc; u_long s_rcr; int s; if (sc->sc_ifp->if_flags & IFF_RUNNING) /* already running */ return; s = splhardnet(); NIC_PUT(sc, SNCR_CR, CR_RST); /* DCR only accessable in reset mode! */ /* config it */ NIC_PUT(sc, SNCR_DCR, (sc->sncr_dcr | (sc->bitmode ? DCR_DW32 : DCR_DW16))); NIC_PUT(sc, SNCR_DCR2, sc->sncr_dcr2); s_rcr = RCR_BRD | RCR_LBNONE; if (sc->sc_ifp->if_flags & IFF_PROMISC) s_rcr |= RCR_PRO; if (sc->sc_ifp->if_flags & IFF_ALLMULTI) s_rcr |= RCR_AMC; NIC_PUT(sc, SNCR_RCR, s_rcr); NIC_PUT(sc, SNCR_IMR, (IMR_PRXEN | IMR_PTXEN | IMR_TXEREN | IMR_LCDEN)); /* clear pending interrupts */ NIC_PUT(sc, SNCR_ISR, ISR_ALL); /* clear tally counters */ NIC_PUT(sc, SNCR_CRCT, -1); NIC_PUT(sc, SNCR_FAET, -1); NIC_PUT(sc, SNCR_MPT, -1); initialise_tda(sc); initialise_rda(sc); initialise_rra(sc); /* enable the chip */ NIC_PUT(sc, SNCR_CR, 0); wbflush(); /* program the CAM */ camprogram(sc); /* get it to read resource descriptors */ NIC_PUT(sc, SNCR_CR, CR_RRRA); wbflush(); while ((NIC_GET(sc, SNCR_CR)) & CR_RRRA) continue; /* enable rx */ NIC_PUT(sc, SNCR_CR, CR_RXEN); wbflush(); /* flag interface as "running" */ sc->sc_ifp->if_flags |= IFF_RUNNING; sc->sc_ifp->if_flags &= ~IFF_OACTIVE; splx(s); return; } /* * close down an interface and free its buffers * Called on final close of device, or if sncinit() fails * part way through. */ hide int sncstop(sc) struct snc_softc *sc; { struct mtd *mtd; int s = splhardnet(); /* stick chip in reset */ NIC_PUT(sc, SNCR_CR, CR_RST); wbflush(); /* free all receive buffers (currently static so nothing to do) */ /* free all pending transmit mbufs */ while (sc->mtd_hw != sc->mtd_free) { mtd = &sc->mtda[sc->mtd_hw]; if (mtd->mtd_mbuf) m_freem(mtd->mtd_mbuf); if (++sc->mtd_hw == NTDA) sc->mtd_hw = 0; } sc->sc_ifp->if_timer = 0; sc->sc_ifp->if_flags &= ~(IFF_RUNNING | IFF_UP); splx(s); return (0); } /* * Called if any Tx packets remain unsent after 5 seconds, * In all cases we just reset the chip, and any retransmission * will be handled by higher level protocol timeouts. */ hide void sncwatchdog(ifp) struct ifnet *ifp; { struct snc_softc *sc = ifp->if_softc; struct mtd *mtd; int temp; if (sc->mtd_hw != sc->mtd_free) { /* something still pending for transmit */ mtd = &sc->mtda[sc->mtd_hw]; if (SRO(sc, mtd->mtd_vtxp, TXP_STATUS) == 0) log(LOG_ERR, "%s: Tx - timeout\n", device_get_nameunit(sc->sc_dev)); else log(LOG_ERR, "%s: Tx - lost interrupt\n", device_get_nameunit(sc->sc_dev)); temp = ifp->if_flags & IFF_UP; sncreset(sc); ifp->if_flags |= temp; } } /* * stuff packet into sonic (at splnet) */ hide u_int sonicput(sc, m0, mtd_next) struct snc_softc *sc; struct mbuf *m0; int mtd_next; { struct mtd *mtdp; struct mbuf *m; u_int32_t buff; u_int32_t txp; u_int len = 0; u_int totlen = 0; #ifdef whyonearthwouldyoudothis if (NIC_GET(sc, SNCR_CR) & CR_TXP) return (0); #endif /* grab the replacement mtd */ mtdp = &sc->mtda[sc->mtd_free]; buff = mtdp->mtd_vbuf; /* this packet goes to mtdnext fill in the TDA */ mtdp->mtd_mbuf = m0; txp = mtdp->mtd_vtxp; /* Write to the config word. Every (NTDA/2)+1 packets we set an intr */ if (sc->mtd_pint == 0) { sc->mtd_pint = NTDA/2; SWO(sc, txp, TXP_CONFIG, TCR_PINT); } else { sc->mtd_pint--; SWO(sc, txp, TXP_CONFIG, 0); } for (m = m0; m; m = m->m_next) { len = m->m_len; totlen += len; (*sc->sc_copytobuf)(sc, mtod(m, caddr_t), buff, len); buff += len; } if (totlen >= TXBSIZE) { panic("%s: sonicput: packet overflow", device_get_nameunit(sc->sc_dev)); } SWO(sc, txp, TXP_FRAGOFF + (0 * TXP_FRAGSIZE) + TXP_FPTRLO, LOWER(mtdp->mtd_vbuf)); SWO(sc, txp, TXP_FRAGOFF + (0 * TXP_FRAGSIZE) + TXP_FPTRHI, UPPER(mtdp->mtd_vbuf)); if (totlen < ETHERMIN + sizeof(struct ether_header)) { int pad = ETHERMIN + sizeof(struct ether_header) - totlen; (*sc->sc_zerobuf)(sc, mtdp->mtd_vbuf + totlen, pad); totlen = ETHERMIN + sizeof(struct ether_header); } SWO(sc, txp, TXP_FRAGOFF + (0 * TXP_FRAGSIZE) + TXP_FSIZE, totlen); SWO(sc, txp, TXP_FRAGCNT, 1); SWO(sc, txp, TXP_PKTSIZE, totlen); /* link onto the next mtd that will be used */ SWO(sc, txp, TXP_FRAGOFF + (1 * TXP_FRAGSIZE) + TXP_FPTRLO, LOWER(sc->mtda[mtd_next].mtd_vtxp) | EOL); /* * The previous txp.tlink currently contains a pointer to * our txp | EOL. Want to clear the EOL, so write our * pointer to the previous txp. */ SWO(sc, sc->mtda[sc->mtd_prev].mtd_vtxp, sc->mtd_tlinko, LOWER(mtdp->mtd_vtxp)); /* make sure chip is running */ wbflush(); NIC_PUT(sc, SNCR_CR, CR_TXP); wbflush(); sc->sc_ifp->if_timer = 5; /* 5 seconds to watch for failing to transmit */ return (totlen); } /* * These are called from sonicioctl() when /etc/ifconfig is run to set * the address or switch the i/f on. */ /* * CAM support */ hide void caminitialise(sc) struct snc_softc *sc; { u_int32_t v_cda = sc->v_cda; int i; int camoffset; for (i = 0; i < MAXCAM; i++) { camoffset = i * CDA_CAMDESC; SWO(sc, v_cda, (camoffset + CDA_CAMEP), i); SWO(sc, v_cda, (camoffset + CDA_CAMAP2), 0); SWO(sc, v_cda, (camoffset + CDA_CAMAP1), 0); SWO(sc, v_cda, (camoffset + CDA_CAMAP0), 0); } SWO(sc, v_cda, CDA_ENABLE, 0); #ifdef SNCDEBUG if ((sncdebug & SNC_SHOWCAMENT) != 0) { camdump(sc); } #endif } hide void camentry(sc, entry, ea) int entry; u_char *ea; struct snc_softc *sc; { u_int32_t v_cda = sc->v_cda; int camoffset = entry * CDA_CAMDESC; SWO(sc, v_cda, camoffset + CDA_CAMEP, entry); SWO(sc, v_cda, camoffset + CDA_CAMAP2, (ea[5] << 8) | ea[4]); SWO(sc, v_cda, camoffset + CDA_CAMAP1, (ea[3] << 8) | ea[2]); SWO(sc, v_cda, camoffset + CDA_CAMAP0, (ea[1] << 8) | ea[0]); SWO(sc, v_cda, CDA_ENABLE, (SRO(sc, v_cda, CDA_ENABLE) | (1 << entry))); } hide void camprogram(sc) struct snc_softc *sc; { struct ifmultiaddr *ifma; struct ifnet *ifp; int timeout; int mcount = 0; caminitialise(sc); ifp = sc->sc_ifp; /* Always load our own address first. */ camentry (sc, mcount, IFP2ENADDR(sc->sc_ifp)); mcount++; /* Assume we won't need allmulti bit. */ ifp->if_flags &= ~IFF_ALLMULTI; /* Loop through multicast addresses */ + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (mcount == MAXCAM) { ifp->if_flags |= IFF_ALLMULTI; break; } /* program the CAM with the specified entry */ camentry(sc, mcount, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); mcount++; } + IF_ADDR_UNLOCK(ifp); NIC_PUT(sc, SNCR_CDP, LOWER(sc->v_cda)); NIC_PUT(sc, SNCR_CDC, MAXCAM); NIC_PUT(sc, SNCR_CR, CR_LCAM); wbflush(); timeout = 10000; while ((NIC_GET(sc, SNCR_CR) & CR_LCAM) && timeout--) continue; if (timeout == 0) { /* XXX */ panic("%s: CAM initialisation failed\n", device_get_nameunit(sc->sc_dev)); } timeout = 10000; while (((NIC_GET(sc, SNCR_ISR) & ISR_LCD) == 0) && timeout--) continue; if (NIC_GET(sc, SNCR_ISR) & ISR_LCD) NIC_PUT(sc, SNCR_ISR, ISR_LCD); else device_printf(sc->sc_dev, "CAM initialisation without interrupt\n"); } #ifdef SNCDEBUG hide void camdump(sc) struct snc_softc *sc; { int i; printf("CAM entries:\n"); NIC_PUT(sc, SNCR_CR, CR_RST); wbflush(); for (i = 0; i < 16; i++) { u_short ap2, ap1, ap0; NIC_PUT(sc, SNCR_CEP, i); wbflush(); ap2 = NIC_GET(sc, SNCR_CAP2); ap1 = NIC_GET(sc, SNCR_CAP1); ap0 = NIC_GET(sc, SNCR_CAP0); printf("%d: ap2=0x%x ap1=0x%x ap0=0x%x\n", i, ap2, ap1, ap0); } printf("CAM enable 0x%x\n", NIC_GET(sc, SNCR_CEP)); NIC_PUT(sc, SNCR_CR, 0); wbflush(); } #endif hide void initialise_tda(sc) struct snc_softc *sc; { struct mtd *mtd; int i; for (i = 0; i < NTDA; i++) { mtd = &sc->mtda[i]; mtd->mtd_mbuf = 0; } sc->mtd_hw = 0; sc->mtd_prev = NTDA - 1; sc->mtd_free = 0; sc->mtd_tlinko = TXP_FRAGOFF + 1*TXP_FRAGSIZE + TXP_FPTRLO; sc->mtd_pint = NTDA/2; NIC_PUT(sc, SNCR_UTDA, UPPER(sc->mtda[0].mtd_vtxp)); NIC_PUT(sc, SNCR_CTDA, LOWER(sc->mtda[0].mtd_vtxp)); } hide void initialise_rda(sc) struct snc_softc *sc; { int i; u_int32_t vv_rda = 0; u_int32_t v_rda = 0; /* link the RDA's together into a circular list */ for (i = 0; i < (sc->sc_nrda - 1); i++) { v_rda = sc->v_rda + (i * RXPKT_SIZE(sc)); vv_rda = sc->v_rda + ((i+1) * RXPKT_SIZE(sc)); SWO(sc, v_rda, RXPKT_RLINK, LOWER(vv_rda)); SWO(sc, v_rda, RXPKT_INUSE, 1); } v_rda = sc->v_rda + ((sc->sc_nrda - 1) * RXPKT_SIZE(sc)); SWO(sc, v_rda, RXPKT_RLINK, LOWER(sc->v_rda) | EOL); SWO(sc, v_rda, RXPKT_INUSE, 1); /* mark end of receive descriptor list */ sc->sc_rdamark = sc->sc_nrda - 1; sc->sc_rxmark = 0; NIC_PUT(sc, SNCR_URDA, UPPER(sc->v_rda)); NIC_PUT(sc, SNCR_CRDA, LOWER(sc->v_rda)); wbflush(); } hide void initialise_rra(sc) struct snc_softc *sc; { int i; u_int v; int bitmode = sc->bitmode; if (bitmode) NIC_PUT(sc, SNCR_EOBC, RBASIZE(sc) / 2 - 2); else NIC_PUT(sc, SNCR_EOBC, RBASIZE(sc) / 2 - 1); NIC_PUT(sc, SNCR_URRA, UPPER(sc->v_rra[0])); NIC_PUT(sc, SNCR_RSA, LOWER(sc->v_rra[0])); /* rea must point just past the end of the rra space */ NIC_PUT(sc, SNCR_REA, LOWER(sc->v_rea)); NIC_PUT(sc, SNCR_RRP, LOWER(sc->v_rra[0])); NIC_PUT(sc, SNCR_RSC, 0); /* fill up SOME of the rra with buffers */ for (i = 0; i < NRBA; i++) { v = SONIC_GETDMA(sc->rbuf[i]); SWO(sc, sc->v_rra[i], RXRSRC_PTRHI, UPPER(v)); SWO(sc, sc->v_rra[i], RXRSRC_PTRLO, LOWER(v)); SWO(sc, sc->v_rra[i], RXRSRC_WCHI, UPPER(NBPG/2)); SWO(sc, sc->v_rra[i], RXRSRC_WCLO, LOWER(NBPG/2)); } sc->sc_rramark = NRBA; NIC_PUT(sc, SNCR_RWP, LOWER(sc->v_rra[sc->sc_rramark])); wbflush(); } void sncintr(arg) void *arg; { struct snc_softc *sc = (struct snc_softc *)arg; int isr; if (sc->sc_enabled == 0) return; while ((isr = (NIC_GET(sc, SNCR_ISR) & ISR_ALL)) != 0) { /* scrub the interrupts that we are going to service */ NIC_PUT(sc, SNCR_ISR, isr); wbflush(); if (isr & (ISR_BR | ISR_LCD | ISR_TC)) device_printf(sc->sc_dev, "unexpected interrupt status 0x%x\n", isr); if (isr & (ISR_TXDN | ISR_TXER | ISR_PINT)) sonictxint(sc); if (isr & ISR_PKTRX) sonicrxint(sc); if (isr & (ISR_HBL | ISR_RDE | ISR_RBE | ISR_RBAE | ISR_RFO)) { if (isr & ISR_HBL) /* * The repeater is not providing a heartbeat. * In itself this isn't harmful, lots of the * cheap repeater hubs don't supply a heartbeat. * So ignore the lack of heartbeat. Its only * if we can't detect a carrier that we have a * problem. */ ; if (isr & ISR_RDE) device_printf(sc->sc_dev, "receive descriptors exhausted\n"); if (isr & ISR_RBE) device_printf(sc->sc_dev, "receive buffers exhausted\n"); if (isr & ISR_RBAE) device_printf(sc->sc_dev, "receive buffer area exhausted\n"); if (isr & ISR_RFO) device_printf(sc->sc_dev, "receive FIFO overrun\n"); } if (isr & (ISR_CRC | ISR_FAE | ISR_MP)) { #ifdef notdef if (isr & ISR_CRC) sc->sc_crctally++; if (isr & ISR_FAE) sc->sc_faetally++; if (isr & ISR_MP) sc->sc_mptally++; #endif } sncstart(sc->sc_ifp); #if NRND > 0 if (isr) rnd_add_uint32(&sc->rnd_source, isr); #endif } return; } /* * Transmit interrupt routine */ hide void sonictxint(sc) struct snc_softc *sc; { struct mtd *mtd; u_int32_t txp; unsigned short txp_status; int mtd_hw; struct ifnet *ifp = sc->sc_ifp; mtd_hw = sc->mtd_hw; if (mtd_hw == sc->mtd_free) return; while (mtd_hw != sc->mtd_free) { mtd = &sc->mtda[mtd_hw]; txp = mtd->mtd_vtxp; if (SRO(sc, txp, TXP_STATUS) == 0) { break; /* it hasn't really gone yet */ } #ifdef SNCDEBUG if ((sncdebug & SNC_SHOWTXHDR) != 0) { struct ether_header eh; (*sc->sc_copyfrombuf)(sc, &eh, mtd->mtd_vbuf, sizeof(eh)); device_printf(sc->sc_dev, "xmit status=0x%x len=%d type=0x%x from %6D", SRO(sc, txp, TXP_STATUS), SRO(sc, txp, TXP_PKTSIZE), htons(eh.ether_type), eh.ether_shost, ":"); printf(" (to %6D)\n", eh.ether_dhost, ":"); } #endif /* SNCDEBUG */ ifp->if_flags &= ~IFF_OACTIVE; if (mtd->mtd_mbuf != 0) { m_freem(mtd->mtd_mbuf); mtd->mtd_mbuf = 0; } if (++mtd_hw == NTDA) mtd_hw = 0; txp_status = SRO(sc, txp, TXP_STATUS); ifp->if_collisions += (txp_status & TCR_EXC) ? 16 : ((txp_status & TCR_NC) >> 12); if ((txp_status & TCR_PTX) == 0) { ifp->if_oerrors++; device_printf(sc->sc_dev, "Tx packet status=0x%x\n", txp_status); /* XXX - DG This looks bogus */ if (mtd_hw != sc->mtd_free) { printf("resubmitting remaining packets\n"); mtd = &sc->mtda[mtd_hw]; NIC_PUT(sc, SNCR_CTDA, LOWER(mtd->mtd_vtxp)); NIC_PUT(sc, SNCR_CR, CR_TXP); wbflush(); break; } } } sc->mtd_hw = mtd_hw; return; } /* * Receive interrupt routine */ hide void sonicrxint(sc) struct snc_softc *sc; { u_int32_t rda; int orra; int len; int rramark; int rdamark; u_int16_t rxpkt_ptr; rda = sc->v_rda + (sc->sc_rxmark * RXPKT_SIZE(sc)); while (SRO(sc, rda, RXPKT_INUSE) == 0) { u_int status = SRO(sc, rda, RXPKT_STATUS); orra = RBASEQ(SRO(sc, rda, RXPKT_SEQNO)) & RRAMASK; rxpkt_ptr = SRO(sc, rda, RXPKT_PTRLO); /* * Do not trunc ether_header length. * Our sonic_read() and sonic_get() require it. */ len = SRO(sc, rda, RXPKT_BYTEC) - FCSSIZE; if (status & RCR_PRX) { /* XXX: Does PGOFSET require? */ u_int32_t pkt = sc->rbuf[orra & RBAMASK] + (rxpkt_ptr & PGOFSET); if (sonic_read(sc, pkt, len)) sc->sc_ifp->if_ipackets++; else sc->sc_ifp->if_ierrors++; } else sc->sc_ifp->if_ierrors++; /* * give receive buffer area back to chip. * * If this was the last packet in the RRA, give the RRA to * the chip again. * If sonic read didnt copy it out then we would have to * wait !! * (dont bother add it back in again straight away) * * Really, we're doing v_rra[rramark] = v_rra[orra] but * we have to use the macros because SONIC might be in * 16 or 32 bit mode. */ if (status & RCR_LPKT) { u_int32_t tmp1, tmp2; rramark = sc->sc_rramark; tmp1 = sc->v_rra[rramark]; tmp2 = sc->v_rra[orra]; SWO(sc, tmp1, RXRSRC_PTRLO, SRO(sc, tmp2, RXRSRC_PTRLO)); SWO(sc, tmp1, RXRSRC_PTRHI, SRO(sc, tmp2, RXRSRC_PTRHI)); SWO(sc, tmp1, RXRSRC_WCLO, SRO(sc, tmp2, RXRSRC_WCLO)); SWO(sc, tmp1, RXRSRC_WCHI, SRO(sc, tmp2, RXRSRC_WCHI)); /* zap old rra for fun */ SWO(sc, tmp2, RXRSRC_WCHI, 0); SWO(sc, tmp2, RXRSRC_WCLO, 0); sc->sc_rramark = (++rramark) & RRAMASK; NIC_PUT(sc, SNCR_RWP, LOWER(sc->v_rra[rramark])); wbflush(); } /* * give receive descriptor back to chip simple * list is circular */ rdamark = sc->sc_rdamark; SWO(sc, rda, RXPKT_INUSE, 1); SWO(sc, rda, RXPKT_RLINK, SRO(sc, rda, RXPKT_RLINK) | EOL); SWO(sc, (sc->v_rda + (rdamark * RXPKT_SIZE(sc))), RXPKT_RLINK, SRO(sc, (sc->v_rda + (rdamark * RXPKT_SIZE(sc))), RXPKT_RLINK) & ~EOL); sc->sc_rdamark = sc->sc_rxmark; if (++sc->sc_rxmark >= sc->sc_nrda) sc->sc_rxmark = 0; rda = sc->v_rda + (sc->sc_rxmark * RXPKT_SIZE(sc)); } } /* * sonic_read -- pull packet off interface and forward to * appropriate protocol handler */ hide int sonic_read(sc, pkt, len) struct snc_softc *sc; u_int32_t pkt; int len; { struct ifnet *ifp = sc->sc_ifp; struct ether_header *et; struct mbuf *m; if (len <= sizeof(struct ether_header) || len > ETHERMTU + sizeof(struct ether_header)) { device_printf(sc->sc_dev, "invalid packet length %d bytes\n", len); return (0); } /* Pull packet off interface. */ m = sonic_get(sc, pkt, len); if (m == 0) { return (0); } /* We assume that the header fit entirely in one mbuf. */ et = mtod(m, struct ether_header *); #ifdef SNCDEBUG if ((sncdebug & SNC_SHOWRXHDR) != 0) { device_printf(sc->sc_dev, "rcvd 0x%x len=%d type=0x%x from %6D", pkt, len, htons(et->ether_type), et->ether_shost, ":"); printf(" (to %6D)\n", et->ether_dhost, ":"); } #endif /* SNCDEBUG */ /* Pass the packet up. */ (*ifp->if_input)(ifp, m); return (1); } /* * munge the received packet into an mbuf chain */ hide struct mbuf * sonic_get(sc, pkt, datalen) struct snc_softc *sc; u_int32_t pkt; int datalen; { struct mbuf *m, *top, **mp; int len; /* * Do not trunc ether_header length. * Our sonic_read() and sonic_get() require it. */ MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == 0) return (0); m->m_pkthdr.rcvif = sc->sc_ifp; m->m_pkthdr.len = datalen; len = MHLEN; top = 0; mp = ⊤ while (datalen > 0) { if (top) { MGET(m, M_DONTWAIT, MT_DATA); if (m == 0) { m_freem(top); return (0); } len = MLEN; } if (datalen >= MINCLSIZE) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { if (top) m_freem(top); return (0); } len = MCLBYTES; } #if 0 /* XXX: Require? */ if (!top) { register int pad = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header); m->m_data += pad; len -= pad; } #endif m->m_len = len = min(datalen, len); (*sc->sc_copyfrombuf)(sc, mtod(m, caddr_t), pkt, len); pkt += len; datalen -= len; *mp = m; mp = &m->m_next; } return (top); } /* * Enable power on the interface. */ int snc_enable(sc) struct snc_softc *sc; { #ifdef SNCDEBUG device_printf(sc->sc_dev, "snc_enable()\n"); #endif /* SNCDEBUG */ if (sc->sc_enabled == 0 && sc->sc_enable != NULL) { if ((*sc->sc_enable)(sc) != 0) { device_printf(sc->sc_dev, "device enable failed\n"); return (EIO); } } sc->sc_enabled = 1; return (0); } /* * Disable power on the interface. */ void snc_disable(sc) struct snc_softc *sc; { #ifdef SNCDEBUG device_printf(sc->sc_dev, "snc_disable()\n"); #endif /* SNCDEBUG */ if (sc->sc_enabled != 0 && sc->sc_disable != NULL) { (*sc->sc_disable)(sc); sc->sc_enabled = 0; } } Index: stable/6/sys/dev/tx/if_tx.c =================================================================== --- stable/6/sys/dev/tx/if_tx.c (revision 149421) +++ stable/6/sys/dev/tx/if_tx.c (revision 149422) @@ -1,1897 +1,1899 @@ /*- * Copyright (c) 1997 Semen Ustimenko (semenu@FreeBSD.org) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * EtherPower II 10/100 Fast Ethernet (SMC 9432 serie) * * These cards are based on SMC83c17x (EPIC) chip and one of the various * PHYs (QS6612, AC101 and LXT970 were seen). The media support depends on * card model. All cards support 10baseT/UTP and 100baseTX half- and full- * duplex (SMB9432TX). SMC9432BTX also supports 10baseT/BNC. SMC9432FTX also * supports fibre optics. * * Thanks are going to Steve Bauer and Jason Wright. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for DELAY */ #include #include #include #include #include #include #include "miidevs.h" #include #include "miibus_if.h" #include #include MODULE_DEPEND(tx, pci, 1, 1, 1); MODULE_DEPEND(tx, ether, 1, 1, 1); MODULE_DEPEND(tx, miibus, 1, 1, 1); static int epic_ifioctl(struct ifnet *, u_long, caddr_t); static void epic_intr(void *); static void epic_tx_underrun(epic_softc_t *); static void epic_ifstart(struct ifnet *); static void epic_ifwatchdog(struct ifnet *); static void epic_stats_update(epic_softc_t *); static void epic_init(void *); static void epic_stop(epic_softc_t *); static void epic_rx_done(epic_softc_t *); static void epic_tx_done(epic_softc_t *); static int epic_init_rings(epic_softc_t *); static void epic_free_rings(epic_softc_t *); static void epic_stop_activity(epic_softc_t *); static int epic_queue_last_packet(epic_softc_t *); static void epic_start_activity(epic_softc_t *); static void epic_set_rx_mode(epic_softc_t *); static void epic_set_tx_mode(epic_softc_t *); static void epic_set_mc_table(epic_softc_t *); static int epic_read_eeprom(epic_softc_t *,u_int16_t); static void epic_output_eepromw(epic_softc_t *, u_int16_t); static u_int16_t epic_input_eepromw(epic_softc_t *); static u_int8_t epic_eeprom_clock(epic_softc_t *,u_int8_t); static void epic_write_eepromreg(epic_softc_t *,u_int8_t); static u_int8_t epic_read_eepromreg(epic_softc_t *); static int epic_read_phy_reg(epic_softc_t *, int, int); static void epic_write_phy_reg(epic_softc_t *, int, int, int); static int epic_miibus_readreg(device_t, int, int); static int epic_miibus_writereg(device_t, int, int, int); static void epic_miibus_statchg(device_t); static void epic_miibus_mediainit(device_t); static int epic_ifmedia_upd(struct ifnet *); static void epic_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int epic_probe(device_t); static int epic_attach(device_t); static void epic_shutdown(device_t); static int epic_detach(device_t); static void epic_release(epic_softc_t *); static struct epic_type *epic_devtype(device_t); static device_method_t epic_methods[] = { /* Device interface */ DEVMETHOD(device_probe, epic_probe), DEVMETHOD(device_attach, epic_attach), DEVMETHOD(device_detach, epic_detach), DEVMETHOD(device_shutdown, epic_shutdown), /* MII interface */ DEVMETHOD(miibus_readreg, epic_miibus_readreg), DEVMETHOD(miibus_writereg, epic_miibus_writereg), DEVMETHOD(miibus_statchg, epic_miibus_statchg), DEVMETHOD(miibus_mediainit, epic_miibus_mediainit), { 0, 0 } }; static driver_t epic_driver = { "tx", epic_methods, sizeof(epic_softc_t) }; static devclass_t epic_devclass; DRIVER_MODULE(tx, pci, epic_driver, epic_devclass, 0, 0); DRIVER_MODULE(miibus, tx, miibus_driver, miibus_devclass, 0, 0); static struct epic_type epic_devs[] = { { SMC_VENDORID, SMC_DEVICEID_83C170, "SMC EtherPower II 10/100" }, { 0, 0, NULL } }; static int epic_probe(dev) device_t dev; { struct epic_type *t; t = epic_devtype(dev); if (t != NULL) { device_set_desc(dev, t->name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static struct epic_type * epic_devtype(dev) device_t dev; { struct epic_type *t; t = epic_devs; while (t->name != NULL) { if ((pci_get_vendor(dev) == t->ven_id) && (pci_get_device(dev) == t->dev_id)) { return (t); } t++; } return (NULL); } #ifdef EPIC_USEIOSPACE #define EPIC_RES SYS_RES_IOPORT #define EPIC_RID PCIR_BASEIO #else #define EPIC_RES SYS_RES_MEMORY #define EPIC_RID PCIR_BASEMEM #endif static void epic_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { u_int32_t *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; } /* * Attach routine: map registers, allocate softc, rings and descriptors. * Reset to known state. */ static int epic_attach(dev) device_t dev; { struct ifnet *ifp; epic_softc_t *sc; int unit, error; int i, s, rid, tmp; u_char eaddr[6]; s = splimp(); sc = device_get_softc(dev); unit = device_get_unit(dev); /* Preinitialize softc structure. */ sc->unit = unit; sc->dev = dev; /* Fill ifnet structure. */ ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST|IFF_NEEDSGIANT; ifp->if_ioctl = epic_ifioctl; ifp->if_start = epic_ifstart; ifp->if_watchdog = epic_ifwatchdog; ifp->if_init = epic_init; ifp->if_timer = 0; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = TX_RING_SIZE - 1; /* Enable busmastering. */ pci_enable_busmaster(dev); rid = EPIC_RID; sc->res = bus_alloc_resource_any(dev, EPIC_RES, &rid, RF_ACTIVE); if (sc->res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } sc->sc_st = rman_get_bustag(sc->res); sc->sc_sh = rman_get_bushandle(sc->res); /* Allocate interrupt. */ rid = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } /* Allocate DMA tags. */ error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * EPIC_MAX_FRAGS, EPIC_MAX_FRAGS, MCLBYTES, 0, busdma_lock_mutex, &Giant, &sc->mtag); if (error) { device_printf(dev, "couldn't allocate dma tag\n"); goto fail; } error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct epic_rx_desc) * RX_RING_SIZE, 1, sizeof(struct epic_rx_desc) * RX_RING_SIZE, 0, busdma_lock_mutex, &Giant, &sc->rtag); if (error) { device_printf(dev, "couldn't allocate dma tag\n"); goto fail; } error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct epic_tx_desc) * TX_RING_SIZE, 1, sizeof(struct epic_tx_desc) * TX_RING_SIZE, 0, busdma_lock_mutex, &Giant, &sc->ttag); if (error) { device_printf(dev, "couldn't allocate dma tag\n"); goto fail; } error = bus_dma_tag_create(NULL, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct epic_frag_list) * TX_RING_SIZE, 1, sizeof(struct epic_frag_list) * TX_RING_SIZE, 0, busdma_lock_mutex, &Giant, &sc->ftag); if (error) { device_printf(dev, "couldn't allocate dma tag\n"); goto fail; } /* Allocate DMA safe memory and get the DMA addresses. */ error = bus_dmamem_alloc(sc->ftag, (void **)&sc->tx_flist, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->fmap); if (error) { device_printf(dev, "couldn't allocate dma memory\n"); goto fail; } error = bus_dmamap_load(sc->ftag, sc->fmap, sc->tx_flist, sizeof(struct epic_frag_list) * TX_RING_SIZE, epic_dma_map_addr, &sc->frag_addr, 0); if (error) { device_printf(dev, "couldn't map dma memory\n"); goto fail; } error = bus_dmamem_alloc(sc->ttag, (void **)&sc->tx_desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->tmap); if (error) { device_printf(dev, "couldn't allocate dma memory\n"); goto fail; } error = bus_dmamap_load(sc->ttag, sc->tmap, sc->tx_desc, sizeof(struct epic_tx_desc) * TX_RING_SIZE, epic_dma_map_addr, &sc->tx_addr, 0); if (error) { device_printf(dev, "couldn't map dma memory\n"); goto fail; } error = bus_dmamem_alloc(sc->rtag, (void **)&sc->rx_desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->rmap); if (error) { device_printf(dev, "couldn't allocate dma memory\n"); goto fail; } error = bus_dmamap_load(sc->rtag, sc->rmap, sc->rx_desc, sizeof(struct epic_rx_desc) * RX_RING_SIZE, epic_dma_map_addr, &sc->rx_addr, 0); if (error) { device_printf(dev, "couldn't map dma memory\n"); goto fail; } /* Bring the chip out of low-power mode. */ CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET); DELAY(500); /* Workaround for Application Note 7-15. */ for (i = 0; i < 16; i++) CSR_WRITE_4(sc, TEST1, TEST1_CLOCK_TEST); /* Read MAC address from EEPROM. */ for (i = 0; i < ETHER_ADDR_LEN / sizeof(u_int16_t); i++) ((u_int16_t *)eaddr)[i] = epic_read_eeprom(sc,i); /* Set Non-Volatile Control Register from EEPROM. */ CSR_WRITE_4(sc, NVCTL, epic_read_eeprom(sc, EEPROM_NVCTL) & 0x1F); /* Set defaults. */ sc->tx_threshold = TRANSMIT_THRESHOLD; sc->txcon = TXCON_DEFAULT; sc->miicfg = MIICFG_SMI_ENABLE; sc->phyid = EPIC_UNKN_PHY; sc->serinst = -1; /* Fetch card id. */ sc->cardvend = pci_read_config(dev, PCIR_SUBVEND_0, 2); sc->cardid = pci_read_config(dev, PCIR_SUBDEV_0, 2); if (sc->cardvend != SMC_VENDORID) device_printf(dev, "unknown card vendor %04xh\n", sc->cardvend); /* Do ifmedia setup. */ if (mii_phy_probe(dev, &sc->miibus, epic_ifmedia_upd, epic_ifmedia_sts)) { device_printf(dev, "ERROR! MII without any PHY!?\n"); error = ENXIO; goto fail; } /* board type and ... */ printf(" type "); for(i = 0x2c; i < 0x32; i++) { tmp = epic_read_eeprom(sc, i); if (' ' == (u_int8_t)tmp) break; printf("%c", (u_int8_t)tmp); tmp >>= 8; if (' ' == (u_int8_t)tmp) break; printf("%c", (u_int8_t)tmp); } printf("\n"); /* Initialize rings. */ if (epic_init_rings(sc)) { device_printf(dev, "failed to init rings\n"); error = ENXIO; goto fail; } ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable |= IFCAP_VLAN_MTU; callout_handle_init(&sc->stat_ch); /* Activate our interrupt handler. */ error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET, epic_intr, sc, &sc->sc_ih); if (error) { device_printf(dev, "couldn't set up irq\n"); goto fail; } /* Attach to OS's managers. */ ether_ifattach(ifp, eaddr); splx(s); return (0); fail: epic_release(sc); splx(s); return (error); } /* * Free any resources allocated by the driver. */ static void epic_release(epic_softc_t *sc) { if (sc->ifp != NULL) if_free(sc->ifp); if (sc->irq) bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->irq); if (sc->res) bus_release_resource(sc->dev, EPIC_RES, EPIC_RID, sc->res); epic_free_rings(sc); if (sc->tx_flist) { bus_dmamap_unload(sc->ftag, sc->fmap); bus_dmamem_free(sc->ftag, sc->tx_flist, sc->fmap); bus_dmamap_destroy(sc->ftag, sc->fmap); } if (sc->tx_desc) { bus_dmamap_unload(sc->ttag, sc->tmap); bus_dmamem_free(sc->ttag, sc->tx_desc, sc->tmap); bus_dmamap_destroy(sc->ttag, sc->tmap); } if (sc->rx_desc) { bus_dmamap_unload(sc->rtag, sc->rmap); bus_dmamem_free(sc->rtag, sc->rx_desc, sc->rmap); bus_dmamap_destroy(sc->rtag, sc->rmap); } if (sc->mtag) bus_dma_tag_destroy(sc->mtag); if (sc->ftag) bus_dma_tag_destroy(sc->ftag); if (sc->ttag) bus_dma_tag_destroy(sc->ttag); if (sc->rtag) bus_dma_tag_destroy(sc->rtag); } /* * Detach driver and free resources. */ static int epic_detach(dev) device_t dev; { struct ifnet *ifp; epic_softc_t *sc; int s; s = splimp(); sc = device_get_softc(dev); ifp = sc->ifp; ether_ifdetach(ifp); epic_stop(sc); bus_generic_detach(dev); device_delete_child(dev, sc->miibus); bus_teardown_intr(dev, sc->irq, sc->sc_ih); epic_release(sc); splx(s); return (0); } #undef EPIC_RES #undef EPIC_RID /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void epic_shutdown(dev) device_t dev; { epic_softc_t *sc; sc = device_get_softc(dev); epic_stop(sc); } /* * This is if_ioctl handler. */ static int epic_ifioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { epic_softc_t *sc = ifp->if_softc; struct mii_data *mii; struct ifreq *ifr = (struct ifreq *) data; int x, error = 0; x = splimp(); switch (command) { case SIOCSIFMTU: if (ifp->if_mtu == ifr->ifr_mtu) break; /* XXX Though the datasheet doesn't imply any * limitations on RX and TX sizes beside max 64Kb * DMA transfer, seems we can't send more then 1600 * data bytes per ethernet packet (transmitter hangs * up if more data is sent). */ if (ifr->ifr_mtu + ifp->if_hdrlen <= EPIC_MAX_MTU) { ifp->if_mtu = ifr->ifr_mtu; epic_stop(sc); epic_init(sc); } else error = EINVAL; break; case SIOCSIFFLAGS: /* * If the interface is marked up and stopped, then start it. * If it is marked down and running, then stop it. */ if (ifp->if_flags & IFF_UP) { if ((ifp->if_flags & IFF_RUNNING) == 0) { epic_init(sc); break; } } else { if (ifp->if_flags & IFF_RUNNING) { epic_stop(sc); break; } } /* Handle IFF_PROMISC and IFF_ALLMULTI flags. */ epic_stop_activity(sc); epic_set_mc_table(sc); epic_set_rx_mode(sc); epic_start_activity(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: epic_set_mc_table(sc); error = 0; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: mii = device_get_softc(sc->miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); break; } splx(x); return (error); } static void epic_dma_map_txbuf(void *arg, bus_dma_segment_t *segs, int nseg, bus_size_t mapsize, int error) { struct epic_frag_list *flist; int i; if (error) return; KASSERT(nseg <= EPIC_MAX_FRAGS, ("too many DMA segments")); flist = arg; /* Fill fragments list. */ for (i = 0; i < nseg; i++) { KASSERT(segs[i].ds_len <= MCLBYTES, ("segment size too large")); flist->frag[i].fraglen = segs[i].ds_len; flist->frag[i].fragaddr = segs[i].ds_addr; } flist->numfrags = nseg; } static void epic_dma_map_rxbuf(void *arg, bus_dma_segment_t *segs, int nseg, bus_size_t mapsize, int error) { struct epic_rx_desc *desc; if (error) return; KASSERT(nseg == 1, ("too many DMA segments")); desc = arg; desc->bufaddr = segs->ds_addr; } /* * This is if_start handler. It takes mbufs from if_snd queue * and queue them for transmit, one by one, until TX ring become full * or queue become empty. */ static void epic_ifstart(ifp) struct ifnet * ifp; { epic_softc_t *sc = ifp->if_softc; struct epic_tx_buffer *buf; struct epic_tx_desc *desc; struct epic_frag_list *flist; struct mbuf *m0, *m; int error; while (sc->pending_txs < TX_RING_SIZE) { buf = sc->tx_buffer + sc->cur_tx; desc = sc->tx_desc + sc->cur_tx; flist = sc->tx_flist + sc->cur_tx; /* Get next packet to send. */ IF_DEQUEUE(&ifp->if_snd, m0); /* If nothing to send, return. */ if (m0 == NULL) return; error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m0, epic_dma_map_txbuf, flist, 0); if (error && error != EFBIG) { m_freem(m0); ifp->if_oerrors++; continue; } /* * If packet was more than EPIC_MAX_FRAGS parts, * recopy packet to a newly allocated mbuf cluster. */ if (error) { m = m_defrag(m0, M_DONTWAIT); if (m == NULL) { m_freem(m0); ifp->if_oerrors++; continue; } m_freem(m0); m0 = m; error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m, epic_dma_map_txbuf, flist, 0); if (error) { m_freem(m); ifp->if_oerrors++; continue; } } bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREWRITE); buf->mbuf = m0; sc->pending_txs++; sc->cur_tx = (sc->cur_tx + 1) & TX_RING_MASK; desc->control = 0x01; desc->txlength = max(m0->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN); desc->status = 0x8000; bus_dmamap_sync(sc->ttag, sc->tmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->ftag, sc->fmap, BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, COMMAND, COMMAND_TXQUEUED); /* Set watchdog timer. */ ifp->if_timer = 8; BPF_MTAP(ifp, m0); } ifp->if_flags |= IFF_OACTIVE; } /* * Synopsis: Finish all received frames. */ static void epic_rx_done(sc) epic_softc_t *sc; { struct ifnet *ifp = sc->ifp; u_int16_t len; struct epic_rx_buffer *buf; struct epic_rx_desc *desc; struct mbuf *m; bus_dmamap_t map; int error; bus_dmamap_sync(sc->rtag, sc->rmap, BUS_DMASYNC_POSTREAD); while ((sc->rx_desc[sc->cur_rx].status & 0x8000) == 0) { buf = sc->rx_buffer + sc->cur_rx; desc = sc->rx_desc + sc->cur_rx; /* Switch to next descriptor. */ sc->cur_rx = (sc->cur_rx + 1) & RX_RING_MASK; /* * Check for RX errors. This should only happen if * SAVE_ERRORED_PACKETS is set. RX errors generate * RXE interrupt usually. */ if ((desc->status & 1) == 0) { ifp->if_ierrors++; desc->status = 0x8000; continue; } /* Save packet length and mbuf contained packet. */ bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTREAD); len = desc->rxlength - ETHER_CRC_LEN; m = buf->mbuf; /* Try to get an mbuf cluster. */ buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (buf->mbuf == NULL) { buf->mbuf = m; desc->status = 0x8000; ifp->if_ierrors++; continue; } buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES; m_adj(buf->mbuf, ETHER_ALIGN); /* Point to new mbuf, and give descriptor to chip. */ error = bus_dmamap_load_mbuf(sc->mtag, sc->sparemap, buf->mbuf, epic_dma_map_rxbuf, desc, 0); if (error) { buf->mbuf = m; desc->status = 0x8000; ifp->if_ierrors++; continue; } desc->status = 0x8000; bus_dmamap_unload(sc->mtag, buf->map); map = buf->map; buf->map = sc->sparemap; sc->sparemap = map; bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD); /* First mbuf in packet holds the ethernet and packet headers */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len; /* Give mbuf to OS. */ (*ifp->if_input)(ifp, m); /* Successfuly received frame */ ifp->if_ipackets++; } bus_dmamap_sync(sc->rtag, sc->rmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } /* * Synopsis: Do last phase of transmission. I.e. if desc is * transmitted, decrease pending_txs counter, free mbuf contained * packet, switch to next descriptor and repeat until no packets * are pending or descriptor is not transmitted yet. */ static void epic_tx_done(sc) epic_softc_t *sc; { struct epic_tx_buffer *buf; struct epic_tx_desc *desc; u_int16_t status; bus_dmamap_sync(sc->ttag, sc->tmap, BUS_DMASYNC_POSTREAD); while (sc->pending_txs > 0) { buf = sc->tx_buffer + sc->dirty_tx; desc = sc->tx_desc + sc->dirty_tx; status = desc->status; /* * If packet is not transmitted, thou followed * packets are not transmitted too. */ if (status & 0x8000) break; /* Packet is transmitted. Switch to next and free mbuf. */ sc->pending_txs--; sc->dirty_tx = (sc->dirty_tx + 1) & TX_RING_MASK; bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->mtag, buf->map); m_freem(buf->mbuf); buf->mbuf = NULL; /* Check for errors and collisions. */ if (status & 0x0001) sc->ifp->if_opackets++; else sc->ifp->if_oerrors++; sc->ifp->if_collisions += (status >> 8) & 0x1F; #ifdef EPIC_DIAG if ((status & 0x1001) == 0x1001) device_printf(sc->dev, "Tx ERROR: excessive coll. number\n"); #endif } if (sc->pending_txs < TX_RING_SIZE) sc->ifp->if_flags &= ~IFF_OACTIVE; bus_dmamap_sync(sc->ttag, sc->tmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } /* * Interrupt function */ static void epic_intr(arg) void *arg; { epic_softc_t *sc; int status, i; sc = arg; i = 4; while (i-- && ((status = CSR_READ_4(sc, INTSTAT)) & INTSTAT_INT_ACTV)) { CSR_WRITE_4(sc, INTSTAT, status); if (status & (INTSTAT_RQE|INTSTAT_RCC|INTSTAT_OVW)) { epic_rx_done(sc); if (status & (INTSTAT_RQE|INTSTAT_OVW)) { #ifdef EPIC_DIAG if (status & INTSTAT_OVW) device_printf(sc->dev, "RX buffer overflow\n"); if (status & INTSTAT_RQE) device_printf(sc->dev, "RX FIFO overflow\n"); #endif if ((CSR_READ_4(sc, COMMAND) & COMMAND_RXQUEUED) == 0) CSR_WRITE_4(sc, COMMAND, COMMAND_RXQUEUED); sc->ifp->if_ierrors++; } } if (status & (INTSTAT_TXC|INTSTAT_TCC|INTSTAT_TQE)) { epic_tx_done(sc); if (sc->ifp->if_snd.ifq_head != NULL) epic_ifstart(sc->ifp); } /* Check for rare errors */ if (status & (INTSTAT_FATAL|INTSTAT_PMA|INTSTAT_PTA| INTSTAT_APE|INTSTAT_DPE|INTSTAT_TXU|INTSTAT_RXE)) { if (status & (INTSTAT_FATAL|INTSTAT_PMA|INTSTAT_PTA| INTSTAT_APE|INTSTAT_DPE)) { device_printf(sc->dev, "PCI fatal errors occured: %s%s%s%s\n", (status & INTSTAT_PMA) ? "PMA " : "", (status & INTSTAT_PTA) ? "PTA " : "", (status & INTSTAT_APE) ? "APE " : "", (status & INTSTAT_DPE) ? "DPE" : ""); epic_stop(sc); epic_init(sc); break; } if (status & INTSTAT_RXE) { #ifdef EPIC_DIAG device_printf(sc->dev, "CRC/Alignment error\n"); #endif sc->ifp->if_ierrors++; } if (status & INTSTAT_TXU) { epic_tx_underrun(sc); sc->ifp->if_oerrors++; } } } /* If no packets are pending, then no timeouts. */ if (sc->pending_txs == 0) sc->ifp->if_timer = 0; } /* * Handle the TX underrun error: increase the TX threshold * and restart the transmitter. */ static void epic_tx_underrun(sc) epic_softc_t *sc; { if (sc->tx_threshold > TRANSMIT_THRESHOLD_MAX) { sc->txcon &= ~TXCON_EARLY_TRANSMIT_ENABLE; #ifdef EPIC_DIAG device_printf(sc->dev, "Tx UNDERRUN: early TX disabled\n"); #endif } else { sc->tx_threshold += 0x40; #ifdef EPIC_DIAG device_printf(sc->dev, "Tx UNDERRUN: TX threshold increased to %d\n", sc->tx_threshold); #endif } /* We must set TXUGO to reset the stuck transmitter. */ CSR_WRITE_4(sc, COMMAND, COMMAND_TXUGO); /* Update the TX threshold */ epic_stop_activity(sc); epic_set_tx_mode(sc); epic_start_activity(sc); } /* * Synopsis: This one is called if packets wasn't transmitted * during timeout. Try to deallocate transmitted packets, and * if success continue to work. */ static void epic_ifwatchdog(ifp) struct ifnet *ifp; { epic_softc_t *sc; int x; x = splimp(); sc = ifp->if_softc; device_printf(sc->dev, "device timeout %d packets\n", sc->pending_txs); /* Try to finish queued packets. */ epic_tx_done(sc); /* If not successful. */ if (sc->pending_txs > 0) { ifp->if_oerrors += sc->pending_txs; /* Reinitialize board. */ device_printf(sc->dev, "reinitialization\n"); epic_stop(sc); epic_init(sc); } else device_printf(sc->dev, "seems we can continue normaly\n"); /* Start output. */ if (ifp->if_snd.ifq_head) epic_ifstart(ifp); splx(x); } /* * Despite the name of this function, it doesn't update statistics, it only * helps in autonegotiation process. */ static void epic_stats_update(epic_softc_t * sc) { struct mii_data * mii; int s; s = splimp(); mii = device_get_softc(sc->miibus); mii_tick(mii); sc->stat_ch = timeout((timeout_t *)epic_stats_update, sc, hz); splx(s); } /* * Set media options. */ static int epic_ifmedia_upd(ifp) struct ifnet *ifp; { epic_softc_t *sc; struct mii_data *mii; struct ifmedia *ifm; struct mii_softc *miisc; int cfg, media; sc = ifp->if_softc; mii = device_get_softc(sc->miibus); ifm = &mii->mii_media; media = ifm->ifm_cur->ifm_media; /* Do not do anything if interface is not up. */ if ((ifp->if_flags & IFF_UP) == 0) return (0); /* * Lookup current selected PHY. */ if (IFM_INST(media) == sc->serinst) { sc->phyid = EPIC_SERIAL; sc->physc = NULL; } else { /* If we're not selecting serial interface, select MII mode. */ sc->miicfg &= ~MIICFG_SERIAL_ENABLE; CSR_WRITE_4(sc, MIICFG, sc->miicfg); /* Default to unknown PHY. */ sc->phyid = EPIC_UNKN_PHY; /* Lookup selected PHY. */ for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; miisc = LIST_NEXT(miisc, mii_list)) { if (IFM_INST(media) == miisc->mii_inst) { sc->physc = miisc; break; } } /* Identify selected PHY. */ if (sc->physc) { int id1, id2, model, oui; id1 = PHY_READ(sc->physc, MII_PHYIDR1); id2 = PHY_READ(sc->physc, MII_PHYIDR2); oui = MII_OUI(id1, id2); model = MII_MODEL(id2); switch (oui) { case MII_OUI_QUALSEMI: if (model == MII_MODEL_QUALSEMI_QS6612) sc->phyid = EPIC_QS6612_PHY; break; case MII_OUI_xxALTIMA: if (model == MII_MODEL_xxALTIMA_AC101) sc->phyid = EPIC_AC101_PHY; break; case MII_OUI_xxLEVEL1: if (model == MII_MODEL_xxLEVEL1_LXT970) sc->phyid = EPIC_LXT970_PHY; break; } } } /* * Do PHY specific card setup. */ /* * Call this, to isolate all not selected PHYs and * set up selected. */ mii_mediachg(mii); /* Do our own setup. */ switch (sc->phyid) { case EPIC_QS6612_PHY: break; case EPIC_AC101_PHY: /* We have to powerup fiber tranceivers. */ if (IFM_SUBTYPE(media) == IFM_100_FX) sc->miicfg |= MIICFG_694_ENABLE; else sc->miicfg &= ~MIICFG_694_ENABLE; CSR_WRITE_4(sc, MIICFG, sc->miicfg); break; case EPIC_LXT970_PHY: /* We have to powerup fiber tranceivers. */ cfg = PHY_READ(sc->physc, MII_LXTPHY_CONFIG); if (IFM_SUBTYPE(media) == IFM_100_FX) cfg |= CONFIG_LEDC1 | CONFIG_LEDC0; else cfg &= ~(CONFIG_LEDC1 | CONFIG_LEDC0); PHY_WRITE(sc->physc, MII_LXTPHY_CONFIG, cfg); break; case EPIC_SERIAL: /* Select serial PHY (10base2/BNC usually). */ sc->miicfg |= MIICFG_694_ENABLE | MIICFG_SERIAL_ENABLE; CSR_WRITE_4(sc, MIICFG, sc->miicfg); /* There is no driver to fill this. */ mii->mii_media_active = media; mii->mii_media_status = 0; /* * We need to call this manually as it wasn't called * in mii_mediachg(). */ epic_miibus_statchg(sc->dev); break; default: device_printf(sc->dev, "ERROR! Unknown PHY selected\n"); return (EINVAL); } return (0); } /* * Report current media status. */ static void epic_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { epic_softc_t *sc; struct mii_data *mii; struct ifmedia *ifm; sc = ifp->if_softc; mii = device_get_softc(sc->miibus); ifm = &mii->mii_media; /* Nothing should be selected if interface is down. */ if ((ifp->if_flags & IFF_UP) == 0) { ifmr->ifm_active = IFM_NONE; ifmr->ifm_status = 0; return; } /* Call underlying pollstat, if not serial PHY. */ if (sc->phyid != EPIC_SERIAL) mii_pollstat(mii); /* Simply copy media info. */ ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } /* * Callback routine, called on media change. */ static void epic_miibus_statchg(dev) device_t dev; { epic_softc_t *sc; struct mii_data *mii; int media; sc = device_get_softc(dev); mii = device_get_softc(sc->miibus); media = mii->mii_media_active; sc->txcon &= ~(TXCON_LOOPBACK_MODE | TXCON_FULL_DUPLEX); /* * If we are in full-duplex mode or loopback operation, * we need to decouple receiver and transmitter. */ if (IFM_OPTIONS(media) & (IFM_FDX | IFM_LOOP)) sc->txcon |= TXCON_FULL_DUPLEX; /* On some cards we need manualy set fullduplex led. */ if (sc->cardid == SMC9432FTX || sc->cardid == SMC9432FTX_SC) { if (IFM_OPTIONS(media) & IFM_FDX) sc->miicfg |= MIICFG_694_ENABLE; else sc->miicfg &= ~MIICFG_694_ENABLE; CSR_WRITE_4(sc, MIICFG, sc->miicfg); } /* Update baudrate. */ if (IFM_SUBTYPE(media) == IFM_100_TX || IFM_SUBTYPE(media) == IFM_100_FX) sc->ifp->if_baudrate = 100000000; else sc->ifp->if_baudrate = 10000000; epic_stop_activity(sc); epic_set_tx_mode(sc); epic_start_activity(sc); } static void epic_miibus_mediainit(dev) device_t dev; { epic_softc_t *sc; struct mii_data *mii; struct ifmedia *ifm; int media; sc = device_get_softc(dev); mii = device_get_softc(sc->miibus); ifm = &mii->mii_media; /* * Add Serial Media Interface if present, this applies to * SMC9432BTX serie. */ if (CSR_READ_4(sc, MIICFG) & MIICFG_PHY_PRESENT) { /* Store its instance. */ sc->serinst = mii->mii_instance++; /* Add as 10base2/BNC media. */ media = IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0, sc->serinst); ifmedia_add(ifm, media, 0, NULL); /* Report to user. */ device_printf(sc->dev, "serial PHY detected (10Base2/BNC)\n"); } } /* * Reset chip and update media. */ static void epic_init(xsc) void *xsc; { epic_softc_t *sc = xsc; struct ifnet *ifp = sc->ifp; int s, i; s = splimp(); /* If interface is already running, then we need not do anything. */ if (ifp->if_flags & IFF_RUNNING) { splx(s); return; } /* Soft reset the chip (we have to power up card before). */ CSR_WRITE_4(sc, GENCTL, 0); CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET); /* * Reset takes 15 pci ticks which depends on PCI bus speed. * Assuming it >= 33000000 hz, we have wait at least 495e-6 sec. */ DELAY(500); /* Wake up */ CSR_WRITE_4(sc, GENCTL, 0); /* Workaround for Application Note 7-15 */ for (i = 0; i < 16; i++) CSR_WRITE_4(sc, TEST1, TEST1_CLOCK_TEST); /* Give rings to EPIC */ CSR_WRITE_4(sc, PRCDAR, sc->rx_addr); CSR_WRITE_4(sc, PTCDAR, sc->tx_addr); /* Put node address to EPIC. */ CSR_WRITE_4(sc, LAN0, ((u_int16_t *)IFP2ENADDR(sc->ifp))[0]); CSR_WRITE_4(sc, LAN1, ((u_int16_t *)IFP2ENADDR(sc->ifp))[1]); CSR_WRITE_4(sc, LAN2, ((u_int16_t *)IFP2ENADDR(sc->ifp))[2]); /* Set tx mode, includeing transmit threshold. */ epic_set_tx_mode(sc); /* Compute and set RXCON. */ epic_set_rx_mode(sc); /* Set multicast table. */ epic_set_mc_table(sc); /* Enable interrupts by setting the interrupt mask. */ CSR_WRITE_4(sc, INTMASK, INTSTAT_RCC | /* INTSTAT_RQE | INTSTAT_OVW | INTSTAT_RXE | */ /* INTSTAT_TXC | */ INTSTAT_TCC | INTSTAT_TQE | INTSTAT_TXU | INTSTAT_FATAL); /* Acknowledge all pending interrupts. */ CSR_WRITE_4(sc, INTSTAT, CSR_READ_4(sc, INTSTAT)); /* Enable interrupts, set for PCI read multiple and etc */ CSR_WRITE_4(sc, GENCTL, GENCTL_ENABLE_INTERRUPT | GENCTL_MEMORY_READ_MULTIPLE | GENCTL_ONECOPY | GENCTL_RECEIVE_FIFO_THRESHOLD64); /* Mark interface running ... */ if (ifp->if_flags & IFF_UP) ifp->if_flags |= IFF_RUNNING; else ifp->if_flags &= ~IFF_RUNNING; /* ... and free */ ifp->if_flags &= ~IFF_OACTIVE; /* Start Rx process */ epic_start_activity(sc); /* Set appropriate media */ epic_ifmedia_upd(ifp); sc->stat_ch = timeout((timeout_t *)epic_stats_update, sc, hz); splx(s); } /* * Synopsis: calculate and set Rx mode. Chip must be in idle state to * access RXCON. */ static void epic_set_rx_mode(sc) epic_softc_t *sc; { u_int32_t flags; u_int32_t rxcon; flags = sc->ifp->if_flags; rxcon = RXCON_DEFAULT; #ifdef EPIC_EARLY_RX rxcon |= RXCON_EARLY_RX; #endif rxcon |= (flags & IFF_PROMISC) ? RXCON_PROMISCUOUS_MODE : 0; CSR_WRITE_4(sc, RXCON, rxcon); } /* * Synopsis: Set transmit control register. Chip must be in idle state to * access TXCON. */ static void epic_set_tx_mode(sc) epic_softc_t *sc; { if (sc->txcon & TXCON_EARLY_TRANSMIT_ENABLE) CSR_WRITE_4(sc, ETXTHR, sc->tx_threshold); CSR_WRITE_4(sc, TXCON, sc->txcon); } /* * Synopsis: Program multicast filter honoring IFF_ALLMULTI and IFF_PROMISC * flags (note that setting PROMISC bit in EPIC's RXCON will only touch * individual frames, multicast filter must be manually programmed). * * Note: EPIC must be in idle state. */ static void epic_set_mc_table(sc) epic_softc_t *sc; { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int16_t filter[4]; u_int8_t h; ifp = sc->ifp; if (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC)) { CSR_WRITE_4(sc, MC0, 0xFFFF); CSR_WRITE_4(sc, MC1, 0xFFFF); CSR_WRITE_4(sc, MC2, 0xFFFF); CSR_WRITE_4(sc, MC3, 0xFFFF); return; } filter[0] = 0; filter[1] = 0; filter[2] = 0; filter[3] = 0; + IF_ADDR_LOCK(ifp); #if __FreeBSD_version < 500000 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { #else TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { #endif if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; filter[h >> 4] |= 1 << (h & 0xF); } + IF_ADDR_UNLOCK(ifp); CSR_WRITE_4(sc, MC0, filter[0]); CSR_WRITE_4(sc, MC1, filter[1]); CSR_WRITE_4(sc, MC2, filter[2]); CSR_WRITE_4(sc, MC3, filter[3]); } /* * Synopsis: Start receive process and transmit one, if they need. */ static void epic_start_activity(sc) epic_softc_t *sc; { /* Start rx process. */ CSR_WRITE_4(sc, COMMAND, COMMAND_RXQUEUED | COMMAND_START_RX | (sc->pending_txs ? COMMAND_TXQUEUED : 0)); } /* * Synopsis: Completely stop Rx and Tx processes. If TQE is set additional * packet needs to be queued to stop Tx DMA. */ static void epic_stop_activity(sc) epic_softc_t *sc; { int status, i; /* Stop Tx and Rx DMA. */ CSR_WRITE_4(sc, COMMAND, COMMAND_STOP_RX | COMMAND_STOP_RDMA | COMMAND_STOP_TDMA); /* Wait Rx and Tx DMA to stop (why 1 ms ??? XXX). */ for (i = 0; i < 0x1000; i++) { status = CSR_READ_4(sc, INTSTAT) & (INTSTAT_TXIDLE | INTSTAT_RXIDLE); if (status == (INTSTAT_TXIDLE | INTSTAT_RXIDLE)) break; DELAY(1); } /* Catch all finished packets. */ epic_rx_done(sc); epic_tx_done(sc); status = CSR_READ_4(sc, INTSTAT); if ((status & INTSTAT_RXIDLE) == 0) device_printf(sc->dev, "ERROR! Can't stop Rx DMA\n"); if ((status & INTSTAT_TXIDLE) == 0) device_printf(sc->dev, "ERROR! Can't stop Tx DMA\n"); /* * May need to queue one more packet if TQE, this is rare * but existing case. */ if ((status & INTSTAT_TQE) && !(status & INTSTAT_TXIDLE)) (void)epic_queue_last_packet(sc); } /* * The EPIC transmitter may stuck in TQE state. It will not go IDLE until * a packet from current descriptor will be copied to internal RAM. We * compose a dummy packet here and queue it for transmission. * * XXX the packet will then be actually sent over network... */ static int epic_queue_last_packet(sc) epic_softc_t *sc; { struct epic_tx_desc *desc; struct epic_frag_list *flist; struct epic_tx_buffer *buf; struct mbuf *m0; int error, i; device_printf(sc->dev, "queue last packet\n"); desc = sc->tx_desc + sc->cur_tx; flist = sc->tx_flist + sc->cur_tx; buf = sc->tx_buffer + sc->cur_tx; if ((desc->status & 0x8000) || (buf->mbuf != NULL)) return (EBUSY); MGETHDR(m0, M_DONTWAIT, MT_DATA); if (m0 == NULL) return (ENOBUFS); /* Prepare mbuf. */ m0->m_len = min(MHLEN, ETHER_MIN_LEN - ETHER_CRC_LEN); m0->m_pkthdr.len = m0->m_len; m0->m_pkthdr.rcvif = sc->ifp; bzero(mtod(m0, caddr_t), m0->m_len); /* Fill fragments list. */ error = bus_dmamap_load_mbuf(sc->mtag, buf->map, m0, epic_dma_map_txbuf, flist, 0); if (error) { m_freem(m0); return (error); } bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREWRITE); /* Fill in descriptor. */ buf->mbuf = m0; sc->pending_txs++; sc->cur_tx = (sc->cur_tx + 1) & TX_RING_MASK; desc->control = 0x01; desc->txlength = max(m0->m_pkthdr.len, ETHER_MIN_LEN - ETHER_CRC_LEN); desc->status = 0x8000; bus_dmamap_sync(sc->ttag, sc->tmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->ftag, sc->fmap, BUS_DMASYNC_PREWRITE); /* Launch transmission. */ CSR_WRITE_4(sc, COMMAND, COMMAND_STOP_TDMA | COMMAND_TXQUEUED); /* Wait Tx DMA to stop (for how long??? XXX) */ for (i = 0; i < 1000; i++) { if (CSR_READ_4(sc, INTSTAT) & INTSTAT_TXIDLE) break; DELAY(1); } if ((CSR_READ_4(sc, INTSTAT) & INTSTAT_TXIDLE) == 0) device_printf(sc->dev, "ERROR! can't stop Tx DMA (2)\n"); else epic_tx_done(sc); return (0); } /* * Synopsis: Shut down board and deallocates rings. */ static void epic_stop(sc) epic_softc_t *sc; { int s; s = splimp(); sc->ifp->if_timer = 0; untimeout((timeout_t *)epic_stats_update, sc, sc->stat_ch); /* Disable interrupts */ CSR_WRITE_4(sc, INTMASK, 0); CSR_WRITE_4(sc, GENCTL, 0); /* Try to stop Rx and TX processes */ epic_stop_activity(sc); /* Reset chip */ CSR_WRITE_4(sc, GENCTL, GENCTL_SOFT_RESET); DELAY(1000); /* Make chip go to bed */ CSR_WRITE_4(sc, GENCTL, GENCTL_POWER_DOWN); /* Mark as stoped */ sc->ifp->if_flags &= ~IFF_RUNNING; splx(s); } /* * Synopsis: This function should free all memory allocated for rings. */ static void epic_free_rings(sc) epic_softc_t *sc; { int i; for (i = 0; i < RX_RING_SIZE; i++) { struct epic_rx_buffer *buf = sc->rx_buffer + i; struct epic_rx_desc *desc = sc->rx_desc + i; desc->status = 0; desc->buflength = 0; desc->bufaddr = 0; if (buf->mbuf) { bus_dmamap_unload(sc->mtag, buf->map); bus_dmamap_destroy(sc->mtag, buf->map); m_freem(buf->mbuf); } buf->mbuf = NULL; } if (sc->sparemap != NULL) bus_dmamap_destroy(sc->mtag, sc->sparemap); for (i = 0; i < TX_RING_SIZE; i++) { struct epic_tx_buffer *buf = sc->tx_buffer + i; struct epic_tx_desc *desc = sc->tx_desc + i; desc->status = 0; desc->buflength = 0; desc->bufaddr = 0; if (buf->mbuf) { bus_dmamap_unload(sc->mtag, buf->map); bus_dmamap_destroy(sc->mtag, buf->map); m_freem(buf->mbuf); } buf->mbuf = NULL; } } /* * Synopsis: Allocates mbufs for Rx ring and point Rx descs to them. * Point Tx descs to fragment lists. Check that all descs and fraglists * are bounded and aligned properly. */ static int epic_init_rings(sc) epic_softc_t *sc; { int error, i; sc->cur_rx = sc->cur_tx = sc->dirty_tx = sc->pending_txs = 0; /* Initialize the RX descriptor ring. */ for (i = 0; i < RX_RING_SIZE; i++) { struct epic_rx_buffer *buf = sc->rx_buffer + i; struct epic_rx_desc *desc = sc->rx_desc + i; desc->status = 0; /* Owned by driver */ desc->next = sc->rx_addr + ((i + 1) & RX_RING_MASK) * sizeof(struct epic_rx_desc); if ((desc->next & 3) || ((desc->next & PAGE_MASK) + sizeof *desc) > PAGE_SIZE) { epic_free_rings(sc); return (EFAULT); } buf->mbuf = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (buf->mbuf == NULL) { epic_free_rings(sc); return (ENOBUFS); } buf->mbuf->m_len = buf->mbuf->m_pkthdr.len = MCLBYTES; m_adj(buf->mbuf, ETHER_ALIGN); error = bus_dmamap_create(sc->mtag, 0, &buf->map); if (error) { epic_free_rings(sc); return (error); } error = bus_dmamap_load_mbuf(sc->mtag, buf->map, buf->mbuf, epic_dma_map_rxbuf, desc, 0); if (error) { epic_free_rings(sc); return (error); } bus_dmamap_sync(sc->mtag, buf->map, BUS_DMASYNC_PREREAD); desc->buflength = buf->mbuf->m_len; /* Max RX buffer length */ desc->status = 0x8000; /* Set owner bit to NIC */ } bus_dmamap_sync(sc->rtag, sc->rmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Create the spare DMA map. */ error = bus_dmamap_create(sc->mtag, 0, &sc->sparemap); if (error) { epic_free_rings(sc); return (error); } /* Initialize the TX descriptor ring. */ for (i = 0; i < TX_RING_SIZE; i++) { struct epic_tx_buffer *buf = sc->tx_buffer + i; struct epic_tx_desc *desc = sc->tx_desc + i; desc->status = 0; desc->next = sc->tx_addr + ((i + 1) & TX_RING_MASK) * sizeof(struct epic_tx_desc); if ((desc->next & 3) || ((desc->next & PAGE_MASK) + sizeof *desc) > PAGE_SIZE) { epic_free_rings(sc); return (EFAULT); } buf->mbuf = NULL; desc->bufaddr = sc->frag_addr + i * sizeof(struct epic_frag_list); if ((desc->bufaddr & 3) || ((desc->bufaddr & PAGE_MASK) + sizeof(struct epic_frag_list)) > PAGE_SIZE) { epic_free_rings(sc); return (EFAULT); } error = bus_dmamap_create(sc->mtag, 0, &buf->map); if (error) { epic_free_rings(sc); return (error); } } bus_dmamap_sync(sc->ttag, sc->tmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->ftag, sc->fmap, BUS_DMASYNC_PREWRITE); return (0); } /* * EEPROM operation functions */ static void epic_write_eepromreg(sc, val) epic_softc_t *sc; u_int8_t val; { u_int16_t i; CSR_WRITE_1(sc, EECTL, val); for (i = 0; i < 0xFF; i++) { if ((CSR_READ_1(sc, EECTL) & 0x20) == 0) break; } } static u_int8_t epic_read_eepromreg(sc) epic_softc_t *sc; { return (CSR_READ_1(sc, EECTL)); } static u_int8_t epic_eeprom_clock(sc, val) epic_softc_t *sc; u_int8_t val; { epic_write_eepromreg(sc, val); epic_write_eepromreg(sc, (val | 0x4)); epic_write_eepromreg(sc, val); return (epic_read_eepromreg(sc)); } static void epic_output_eepromw(sc, val) epic_softc_t *sc; u_int16_t val; { int i; for (i = 0xF; i >= 0; i--) { if (val & (1 << i)) epic_eeprom_clock(sc, 0x0B); else epic_eeprom_clock(sc, 0x03); } } static u_int16_t epic_input_eepromw(sc) epic_softc_t *sc; { u_int16_t retval = 0; int i; for (i = 0xF; i >= 0; i--) { if (epic_eeprom_clock(sc, 0x3) & 0x10) retval |= (1 << i); } return (retval); } static int epic_read_eeprom(sc, loc) epic_softc_t *sc; u_int16_t loc; { u_int16_t dataval; u_int16_t read_cmd; epic_write_eepromreg(sc, 3); if (epic_read_eepromreg(sc) & 0x40) read_cmd = (loc & 0x3F) | 0x180; else read_cmd = (loc & 0xFF) | 0x600; epic_output_eepromw(sc, read_cmd); dataval = epic_input_eepromw(sc); epic_write_eepromreg(sc, 1); return (dataval); } /* * Here goes MII read/write routines. */ static int epic_read_phy_reg(sc, phy, reg) epic_softc_t *sc; int phy, reg; { int i; CSR_WRITE_4(sc, MIICTL, ((reg << 4) | (phy << 9) | 0x01)); for (i = 0; i < 0x100; i++) { if ((CSR_READ_4(sc, MIICTL) & 0x01) == 0) break; DELAY(1); } return (CSR_READ_4(sc, MIIDATA)); } static void epic_write_phy_reg(sc, phy, reg, val) epic_softc_t *sc; int phy, reg, val; { int i; CSR_WRITE_4(sc, MIIDATA, val); CSR_WRITE_4(sc, MIICTL, ((reg << 4) | (phy << 9) | 0x02)); for(i = 0; i < 0x100; i++) { if ((CSR_READ_4(sc, MIICTL) & 0x02) == 0) break; DELAY(1); } } static int epic_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { epic_softc_t *sc; sc = device_get_softc(dev); return (PHY_READ_2(sc, phy, reg)); } static int epic_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { epic_softc_t *sc; sc = device_get_softc(dev); PHY_WRITE_2(sc, phy, reg, data); return (0); } Index: stable/6/sys/dev/txp/if_txp.c =================================================================== --- stable/6/sys/dev/txp/if_txp.c (revision 149421) +++ stable/6/sys/dev/txp/if_txp.c (revision 149422) @@ -1,1883 +1,1885 @@ /* $OpenBSD: if_txp.c,v 1.48 2001/06/27 06:34:50 kjc Exp $ */ /*- * Copyright (c) 2001 * Jason L. Wright , Theo de Raadt, and * Aaron Campbell . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Jason L. Wright, * Theo de Raadt and Aaron Campbell. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Driver for 3c990 (Typhoon) Ethernet ASIC */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include /* for DELAY */ #include #include #include #include #include #include #include #include #define TXP_USEIOSPACE #define __STRICT_ALIGNMENT #include #include #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif /* * Various supported device vendors/types and their names. */ static struct txp_type txp_devs[] = { { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_TX_95, "3Com 3cR990-TX-95 Etherlink with 3XP Processor" }, { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_TX_97, "3Com 3cR990-TX-97 Etherlink with 3XP Processor" }, { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990B_TXM, "3Com 3cR990B-TXM Etherlink with 3XP Processor" }, { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_SRV_95, "3Com 3cR990-SRV-95 Etherlink Server with 3XP Processor" }, { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990_SRV_97, "3Com 3cR990-SRV-97 Etherlink Server with 3XP Processor" }, { TXP_VENDORID_3COM, TXP_DEVICEID_3CR990B_SRV, "3Com 3cR990B-SRV Etherlink Server with 3XP Processor" }, { 0, 0, NULL } }; static int txp_probe (device_t); static int txp_attach (device_t); static int txp_detach (device_t); static void txp_intr (void *); static void txp_tick (void *); static int txp_shutdown (device_t); static int txp_ioctl (struct ifnet *, u_long, caddr_t); static void txp_start (struct ifnet *); static void txp_stop (struct txp_softc *); static void txp_init (void *); static void txp_watchdog (struct ifnet *); static void txp_release_resources(struct txp_softc *); static int txp_chip_init(struct txp_softc *); static int txp_reset_adapter(struct txp_softc *); static int txp_download_fw(struct txp_softc *); static int txp_download_fw_wait(struct txp_softc *); static int txp_download_fw_section (struct txp_softc *, struct txp_fw_section_header *, int); static int txp_alloc_rings(struct txp_softc *); static int txp_rxring_fill(struct txp_softc *); static void txp_rxring_empty(struct txp_softc *); static void txp_set_filter(struct txp_softc *); static int txp_cmd_desc_numfree(struct txp_softc *); static int txp_command (struct txp_softc *, u_int16_t, u_int16_t, u_int32_t, u_int32_t, u_int16_t *, u_int32_t *, u_int32_t *, int); static int txp_command2 (struct txp_softc *, u_int16_t, u_int16_t, u_int32_t, u_int32_t, struct txp_ext_desc *, u_int8_t, struct txp_rsp_desc **, int); static int txp_response (struct txp_softc *, u_int32_t, u_int16_t, u_int16_t, struct txp_rsp_desc **); static void txp_rsp_fixup (struct txp_softc *, struct txp_rsp_desc *, struct txp_rsp_desc *); static void txp_capabilities(struct txp_softc *); static void txp_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int txp_ifmedia_upd(struct ifnet *); #ifdef TXP_DEBUG static void txp_show_descriptor(void *); #endif static void txp_tx_reclaim(struct txp_softc *, struct txp_tx_ring *); static void txp_rxbuf_reclaim(struct txp_softc *); static void txp_rx_reclaim(struct txp_softc *, struct txp_rx_ring *); #ifdef TXP_USEIOSPACE #define TXP_RES SYS_RES_IOPORT #define TXP_RID TXP_PCI_LOIO #else #define TXP_RES SYS_RES_MEMORY #define TXP_RID TXP_PCI_LOMEM #endif static device_method_t txp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, txp_probe), DEVMETHOD(device_attach, txp_attach), DEVMETHOD(device_detach, txp_detach), DEVMETHOD(device_shutdown, txp_shutdown), { 0, 0 } }; static driver_t txp_driver = { "txp", txp_methods, sizeof(struct txp_softc) }; static devclass_t txp_devclass; DRIVER_MODULE(txp, pci, txp_driver, txp_devclass, 0, 0); MODULE_DEPEND(txp, pci, 1, 1, 1); MODULE_DEPEND(txp, ether, 1, 1, 1); static int txp_probe(dev) device_t dev; { struct txp_type *t; t = txp_devs; while(t->txp_name != NULL) { if ((pci_get_vendor(dev) == t->txp_vid) && (pci_get_device(dev) == t->txp_did)) { device_set_desc(dev, t->txp_name); return(BUS_PROBE_DEFAULT); } t++; } return(ENXIO); } static int txp_attach(dev) device_t dev; { struct txp_softc *sc; struct ifnet *ifp; u_int16_t p1; u_int32_t p2; int unit, error = 0, rid; u_char eaddr[6]; sc = device_get_softc(dev); unit = device_get_unit(dev); sc->sc_dev = dev; sc->sc_cold = 1; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = TXP_RID; sc->sc_res = bus_alloc_resource_any(dev, TXP_RES, &rid, RF_ACTIVE); if (sc->sc_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } sc->sc_bt = rman_get_bustag(sc->sc_res); sc->sc_bh = rman_get_bushandle(sc->sc_res); /* Allocate interrupt */ rid = 0; sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->sc_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); txp_release_resources(sc); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET, txp_intr, sc, &sc->sc_intrhand); if (error) { txp_release_resources(sc); device_printf(dev, "couldn't set up irq\n"); goto fail; } if (txp_chip_init(sc)) { txp_release_resources(sc); /* XXX: set error to ??? */ goto fail; } sc->sc_fwbuf = contigmalloc(32768, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); error = txp_download_fw(sc); contigfree(sc->sc_fwbuf, 32768, M_DEVBUF); sc->sc_fwbuf = NULL; if (error) { txp_release_resources(sc); goto fail; } sc->sc_ldata = contigmalloc(sizeof(struct txp_ldata), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); bzero(sc->sc_ldata, sizeof(struct txp_ldata)); if (txp_alloc_rings(sc)) { txp_release_resources(sc); /* XXX: set error to ??? */ goto fail; } if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0, NULL, NULL, NULL, 1)) { txp_release_resources(sc); /* XXX: set error to ??? */ goto fail; } if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0, &p1, &p2, NULL, 1)) { txp_release_resources(sc); /* XXX: set error to ??? */ goto fail; } txp_set_filter(sc); eaddr[0] = ((u_int8_t *)&p1)[1]; eaddr[1] = ((u_int8_t *)&p1)[0]; eaddr[2] = ((u_int8_t *)&p2)[3]; eaddr[3] = ((u_int8_t *)&p2)[2]; eaddr[4] = ((u_int8_t *)&p2)[1]; eaddr[5] = ((u_int8_t *)&p2)[0]; sc->sc_cold = 0; ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts); ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL); ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL); ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); sc->sc_xcvr = TXP_XCVR_AUTO; txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0, NULL, NULL, NULL, 0); ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO); ifp = sc->sc_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { txp_release_resources(sc); device_printf(dev, "couldn't set up irq\n"); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT; ifp->if_ioctl = txp_ioctl; ifp->if_start = txp_start; ifp->if_watchdog = txp_watchdog; ifp->if_init = txp_init; ifp->if_baudrate = 100000000; ifp->if_snd.ifq_maxlen = TX_ENTRIES; ifp->if_hwassist = 0; txp_capabilities(sc); /* * Attach us everywhere */ ether_ifattach(ifp, eaddr); callout_handle_init(&sc->sc_tick); return(0); fail: txp_release_resources(sc); mtx_destroy(&sc->sc_mtx); return(error); } static int txp_detach(dev) device_t dev; { struct txp_softc *sc; struct ifnet *ifp; int i; sc = device_get_softc(dev); ifp = sc->sc_ifp; txp_stop(sc); txp_shutdown(dev); ifmedia_removeall(&sc->sc_ifmedia); ether_ifdetach(ifp); for (i = 0; i < RXBUF_ENTRIES; i++) free(sc->sc_rxbufs[i].rb_sd, M_DEVBUF); txp_release_resources(sc); mtx_destroy(&sc->sc_mtx); return(0); } static void txp_release_resources(sc) struct txp_softc *sc; { device_t dev; dev = sc->sc_dev; if (sc->sc_ifp) if_free(sc->sc_ifp); if (sc->sc_intrhand != NULL) bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand); if (sc->sc_irq != NULL) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq); if (sc->sc_res != NULL) bus_release_resource(dev, TXP_RES, TXP_RID, sc->sc_res); if (sc->sc_ldata != NULL) contigfree(sc->sc_ldata, sizeof(struct txp_ldata), M_DEVBUF); return; } static int txp_chip_init(sc) struct txp_softc *sc; { /* disable interrupts */ WRITE_REG(sc, TXP_IER, 0); WRITE_REG(sc, TXP_IMR, TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | TXP_INT_LATCH); /* ack all interrupts */ WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH | TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0); if (txp_reset_adapter(sc)) return (-1); /* disable interrupts */ WRITE_REG(sc, TXP_IER, 0); WRITE_REG(sc, TXP_IMR, TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | TXP_INT_LATCH); /* ack all interrupts */ WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH | TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0); return (0); } static int txp_reset_adapter(sc) struct txp_softc *sc; { u_int32_t r; int i; r = 0; WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL); DELAY(1000); WRITE_REG(sc, TXP_SRR, 0); /* Should wait max 6 seconds */ for (i = 0; i < 6000; i++) { r = READ_REG(sc, TXP_A2H_0); if (r == STAT_WAITING_FOR_HOST_REQUEST) break; DELAY(1000); } if (r != STAT_WAITING_FOR_HOST_REQUEST) { device_printf(sc->sc_dev, "reset hung\n"); return (-1); } return (0); } static int txp_download_fw(sc) struct txp_softc *sc; { struct txp_fw_file_header *fileheader; struct txp_fw_section_header *secthead; int sect; u_int32_t r, i, ier, imr; r = 0; ier = READ_REG(sc, TXP_IER); WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0); imr = READ_REG(sc, TXP_IMR); WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0); for (i = 0; i < 10000; i++) { r = READ_REG(sc, TXP_A2H_0); if (r == STAT_WAITING_FOR_HOST_REQUEST) break; DELAY(50); } if (r != STAT_WAITING_FOR_HOST_REQUEST) { device_printf(sc->sc_dev, "not waiting for host request\n"); return (-1); } /* Ack the status */ WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0); fileheader = (struct txp_fw_file_header *)tc990image; if (bcmp("TYPHOON", fileheader->magicid, sizeof(fileheader->magicid))) { device_printf(sc->sc_dev, "fw invalid magic\n"); return (-1); } /* Tell boot firmware to get ready for image */ WRITE_REG(sc, TXP_H2A_1, fileheader->addr); WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE); if (txp_download_fw_wait(sc)) { device_printf(sc->sc_dev, "fw wait failed, initial\n"); return (-1); } secthead = (struct txp_fw_section_header *)(((u_int8_t *)tc990image) + sizeof(struct txp_fw_file_header)); for (sect = 0; sect < fileheader->nsections; sect++) { if (txp_download_fw_section(sc, secthead, sect)) return (-1); secthead = (struct txp_fw_section_header *) (((u_int8_t *)secthead) + secthead->nbytes + sizeof(*secthead)); } WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE); for (i = 0; i < 10000; i++) { r = READ_REG(sc, TXP_A2H_0); if (r == STAT_WAITING_FOR_BOOT) break; DELAY(50); } if (r != STAT_WAITING_FOR_BOOT) { device_printf(sc->sc_dev, "not waiting for boot\n"); return (-1); } WRITE_REG(sc, TXP_IER, ier); WRITE_REG(sc, TXP_IMR, imr); return (0); } static int txp_download_fw_wait(sc) struct txp_softc *sc; { u_int32_t i, r; r = 0; for (i = 0; i < 10000; i++) { r = READ_REG(sc, TXP_ISR); if (r & TXP_INT_A2H_0) break; DELAY(50); } if (!(r & TXP_INT_A2H_0)) { device_printf(sc->sc_dev, "fw wait failed comm0\n"); return (-1); } WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0); r = READ_REG(sc, TXP_A2H_0); if (r != STAT_WAITING_FOR_SEGMENT) { device_printf(sc->sc_dev, "fw not waiting for segment\n"); return (-1); } return (0); } static int txp_download_fw_section(sc, sect, sectnum) struct txp_softc *sc; struct txp_fw_section_header *sect; int sectnum; { vm_offset_t dma; int rseg, err = 0; struct mbuf m; u_int16_t csum; /* Skip zero length sections */ if (sect->nbytes == 0) return (0); /* Make sure we aren't past the end of the image */ rseg = ((u_int8_t *)sect) - ((u_int8_t *)tc990image); if (rseg >= sizeof(tc990image)) { device_printf(sc->sc_dev, "fw invalid section address, " "section %d\n", sectnum); return (-1); } /* Make sure this section doesn't go past the end */ rseg += sect->nbytes; if (rseg >= sizeof(tc990image)) { device_printf(sc->sc_dev, "fw truncated section %d\n", sectnum); return (-1); } bcopy(((u_int8_t *)sect) + sizeof(*sect), sc->sc_fwbuf, sect->nbytes); dma = vtophys(sc->sc_fwbuf); /* * dummy up mbuf and verify section checksum */ m.m_type = MT_DATA; m.m_next = m.m_nextpkt = NULL; m.m_len = sect->nbytes; m.m_data = sc->sc_fwbuf; m.m_flags = 0; csum = in_cksum(&m, sect->nbytes); if (csum != sect->cksum) { device_printf(sc->sc_dev, "fw section %d, bad " "cksum (expected 0x%x got 0x%x)\n", sectnum, sect->cksum, csum); err = -1; goto bail; } WRITE_REG(sc, TXP_H2A_1, sect->nbytes); WRITE_REG(sc, TXP_H2A_2, sect->cksum); WRITE_REG(sc, TXP_H2A_3, sect->addr); WRITE_REG(sc, TXP_H2A_4, 0); WRITE_REG(sc, TXP_H2A_5, dma & 0xffffffff); WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE); if (txp_download_fw_wait(sc)) { device_printf(sc->sc_dev, "fw wait failed, " "section %d\n", sectnum); err = -1; } bail: return (err); } static void txp_intr(vsc) void *vsc; { struct txp_softc *sc = vsc; struct txp_hostvar *hv = sc->sc_hostvar; u_int32_t isr; /* mask all interrupts */ WRITE_REG(sc, TXP_IMR, TXP_INT_RESERVED | TXP_INT_SELF | TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 | TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_LATCH); isr = READ_REG(sc, TXP_ISR); while (isr) { WRITE_REG(sc, TXP_ISR, isr); if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff)) txp_rx_reclaim(sc, &sc->sc_rxhir); if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff)) txp_rx_reclaim(sc, &sc->sc_rxlor); if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx) txp_rxbuf_reclaim(sc); if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons != TXP_OFFSET2IDX(*(sc->sc_txhir.r_off)))) txp_tx_reclaim(sc, &sc->sc_txhir); if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons != TXP_OFFSET2IDX(*(sc->sc_txlor.r_off)))) txp_tx_reclaim(sc, &sc->sc_txlor); isr = READ_REG(sc, TXP_ISR); } /* unmask all interrupts */ WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3); txp_start(sc->sc_ifp); return; } static void txp_rx_reclaim(sc, r) struct txp_softc *sc; struct txp_rx_ring *r; { struct ifnet *ifp = sc->sc_ifp; struct txp_rx_desc *rxd; struct mbuf *m; struct txp_swdesc *sd = NULL; u_int32_t roff, woff; roff = *r->r_roff; woff = *r->r_woff; rxd = r->r_desc + (roff / sizeof(struct txp_rx_desc)); while (roff != woff) { if (rxd->rx_flags & RX_FLAGS_ERROR) { device_printf(sc->sc_dev, "error 0x%x\n", rxd->rx_stat); ifp->if_ierrors++; goto next; } /* retrieve stashed pointer */ sd = rxd->rx_sd; m = sd->sd_mbuf; sd->sd_mbuf = NULL; m->m_pkthdr.len = m->m_len = rxd->rx_len; #ifdef __STRICT_ALIGNMENT { /* * XXX Nice chip, except it won't accept "off by 2" * buffers, so we're force to copy. Supposedly * this will be fixed in a newer firmware rev * and this will be temporary. */ struct mbuf *mnew; MGETHDR(mnew, M_DONTWAIT, MT_DATA); if (mnew == NULL) { m_freem(m); goto next; } if (m->m_len > (MHLEN - 2)) { MCLGET(mnew, M_DONTWAIT); if (!(mnew->m_flags & M_EXT)) { m_freem(mnew); m_freem(m); goto next; } } mnew->m_pkthdr.rcvif = ifp; m_adj(mnew, 2); mnew->m_pkthdr.len = mnew->m_len = m->m_len; m_copydata(m, 0, m->m_pkthdr.len, mtod(mnew, caddr_t)); m_freem(m); m = mnew; } #endif if (rxd->rx_stat & RX_STAT_IPCKSUMBAD) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; else if (rxd->rx_stat & RX_STAT_IPCKSUMGOOD) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED|CSUM_IP_VALID; if ((rxd->rx_stat & RX_STAT_TCPCKSUMGOOD) || (rxd->rx_stat & RX_STAT_UDPCKSUMGOOD)) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } if (rxd->rx_stat & RX_STAT_VLAN) { VLAN_INPUT_TAG(ifp, m, htons(rxd->rx_vlan >> 16), goto next); } (*ifp->if_input)(ifp, m); next: roff += sizeof(struct txp_rx_desc); if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) { roff = 0; rxd = r->r_desc; } else rxd++; woff = *r->r_woff; } *r->r_roff = woff; return; } static void txp_rxbuf_reclaim(sc) struct txp_softc *sc; { struct ifnet *ifp = sc->sc_ifp; struct txp_hostvar *hv = sc->sc_hostvar; struct txp_rxbuf_desc *rbd; struct txp_swdesc *sd; u_int32_t i; if (!(ifp->if_flags & IFF_RUNNING)) return; i = sc->sc_rxbufprod; rbd = sc->sc_rxbufs + i; while (1) { sd = rbd->rb_sd; if (sd->sd_mbuf != NULL) break; MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA); if (sd->sd_mbuf == NULL) goto err_sd; MCLGET(sd->sd_mbuf, M_DONTWAIT); if ((sd->sd_mbuf->m_flags & M_EXT) == 0) goto err_mbuf; sd->sd_mbuf->m_pkthdr.rcvif = ifp; sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES; rbd->rb_paddrlo = vtophys(mtod(sd->sd_mbuf, vm_offset_t)) & 0xffffffff; rbd->rb_paddrhi = 0; hv->hv_rx_buf_write_idx = TXP_IDX2OFFSET(i); if (++i == RXBUF_ENTRIES) { i = 0; rbd = sc->sc_rxbufs; } else rbd++; } sc->sc_rxbufprod = i; return; err_mbuf: m_freem(sd->sd_mbuf); err_sd: free(sd, M_DEVBUF); } /* * Reclaim mbufs and entries from a transmit ring. */ static void txp_tx_reclaim(sc, r) struct txp_softc *sc; struct txp_tx_ring *r; { struct ifnet *ifp = sc->sc_ifp; u_int32_t idx = TXP_OFFSET2IDX(*(r->r_off)); u_int32_t cons = r->r_cons, cnt = r->r_cnt; struct txp_tx_desc *txd = r->r_desc + cons; struct txp_swdesc *sd = sc->sc_txd + cons; struct mbuf *m; while (cons != idx) { if (cnt == 0) break; if ((txd->tx_flags & TX_FLAGS_TYPE_M) == TX_FLAGS_TYPE_DATA) { m = sd->sd_mbuf; if (m != NULL) { m_freem(m); txd->tx_addrlo = 0; txd->tx_addrhi = 0; ifp->if_opackets++; } } ifp->if_flags &= ~IFF_OACTIVE; if (++cons == TX_ENTRIES) { txd = r->r_desc; cons = 0; sd = sc->sc_txd; } else { txd++; sd++; } cnt--; } r->r_cons = cons; r->r_cnt = cnt; if (cnt == 0) ifp->if_timer = 0; } static int txp_shutdown(dev) device_t dev; { struct txp_softc *sc; sc = device_get_softc(dev); /* mask all interrupts */ WRITE_REG(sc, TXP_IMR, TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | TXP_INT_LATCH); txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0); txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0); txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL, 0); return(0); } static int txp_alloc_rings(sc) struct txp_softc *sc; { struct txp_boot_record *boot; struct txp_ldata *ld; u_int32_t r; int i; r = 0; ld = sc->sc_ldata; boot = &ld->txp_boot; /* boot record */ sc->sc_boot = boot; /* host variables */ bzero(&ld->txp_hostvar, sizeof(struct txp_hostvar)); boot->br_hostvar_lo = vtophys(&ld->txp_hostvar); boot->br_hostvar_hi = 0; sc->sc_hostvar = (struct txp_hostvar *)&ld->txp_hostvar; /* hi priority tx ring */ boot->br_txhipri_lo = vtophys(&ld->txp_txhiring);; boot->br_txhipri_hi = 0; boot->br_txhipri_siz = TX_ENTRIES * sizeof(struct txp_tx_desc); sc->sc_txhir.r_reg = TXP_H2A_1; sc->sc_txhir.r_desc = (struct txp_tx_desc *)&ld->txp_txhiring; sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0; sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx; /* lo priority tx ring */ boot->br_txlopri_lo = vtophys(&ld->txp_txloring); boot->br_txlopri_hi = 0; boot->br_txlopri_siz = TX_ENTRIES * sizeof(struct txp_tx_desc); sc->sc_txlor.r_reg = TXP_H2A_3; sc->sc_txlor.r_desc = (struct txp_tx_desc *)&ld->txp_txloring; sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0; sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx; /* high priority rx ring */ boot->br_rxhipri_lo = vtophys(&ld->txp_rxhiring); boot->br_rxhipri_hi = 0; boot->br_rxhipri_siz = RX_ENTRIES * sizeof(struct txp_rx_desc); sc->sc_rxhir.r_desc = (struct txp_rx_desc *)&ld->txp_rxhiring; sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx; sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx; /* low priority rx ring */ boot->br_rxlopri_lo = vtophys(&ld->txp_rxloring); boot->br_rxlopri_hi = 0; boot->br_rxlopri_siz = RX_ENTRIES * sizeof(struct txp_rx_desc); sc->sc_rxlor.r_desc = (struct txp_rx_desc *)&ld->txp_rxloring; sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx; sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx; /* command ring */ bzero(&ld->txp_cmdring, sizeof(struct txp_cmd_desc) * CMD_ENTRIES); boot->br_cmd_lo = vtophys(&ld->txp_cmdring); boot->br_cmd_hi = 0; boot->br_cmd_siz = CMD_ENTRIES * sizeof(struct txp_cmd_desc); sc->sc_cmdring.base = (struct txp_cmd_desc *)&ld->txp_cmdring; sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc); sc->sc_cmdring.lastwrite = 0; /* response ring */ bzero(&ld->txp_rspring, sizeof(struct txp_rsp_desc) * RSP_ENTRIES); boot->br_resp_lo = vtophys(&ld->txp_rspring); boot->br_resp_hi = 0; boot->br_resp_siz = CMD_ENTRIES * sizeof(struct txp_rsp_desc); sc->sc_rspring.base = (struct txp_rsp_desc *)&ld->txp_rspring; sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc); sc->sc_rspring.lastwrite = 0; /* receive buffer ring */ boot->br_rxbuf_lo = vtophys(&ld->txp_rxbufs); boot->br_rxbuf_hi = 0; boot->br_rxbuf_siz = RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc); sc->sc_rxbufs = (struct txp_rxbuf_desc *)&ld->txp_rxbufs; for (i = 0; i < RXBUF_ENTRIES; i++) { struct txp_swdesc *sd; if (sc->sc_rxbufs[i].rb_sd != NULL) continue; sc->sc_rxbufs[i].rb_sd = malloc(sizeof(struct txp_swdesc), M_DEVBUF, M_NOWAIT); if (sc->sc_rxbufs[i].rb_sd == NULL) return(ENOBUFS); sd = sc->sc_rxbufs[i].rb_sd; sd->sd_mbuf = NULL; } sc->sc_rxbufprod = 0; /* zero dma */ bzero(&ld->txp_zero, sizeof(u_int32_t)); boot->br_zero_lo = vtophys(&ld->txp_zero); boot->br_zero_hi = 0; /* See if it's waiting for boot, and try to boot it */ for (i = 0; i < 10000; i++) { r = READ_REG(sc, TXP_A2H_0); if (r == STAT_WAITING_FOR_BOOT) break; DELAY(50); } if (r != STAT_WAITING_FOR_BOOT) { device_printf(sc->sc_dev, "not waiting for boot\n"); return(ENXIO); } WRITE_REG(sc, TXP_H2A_2, 0); WRITE_REG(sc, TXP_H2A_1, vtophys(sc->sc_boot)); WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD); /* See if it booted */ for (i = 0; i < 10000; i++) { r = READ_REG(sc, TXP_A2H_0); if (r == STAT_RUNNING) break; DELAY(50); } if (r != STAT_RUNNING) { device_printf(sc->sc_dev, "fw not running\n"); return(ENXIO); } /* Clear TX and CMD ring write registers */ WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL); WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL); WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL); WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL); return (0); } static int txp_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct txp_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int s, error = 0; s = splnet(); switch(command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { txp_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) txp_stop(sc); } break; case SIOCADDMULTI: case SIOCDELMULTI: /* * Multicast list has changed; set the hardware * filter accordingly. */ txp_set_filter(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, command); break; default: error = ether_ioctl(ifp, command, data); break; } (void)splx(s); return(error); } static int txp_rxring_fill(sc) struct txp_softc *sc; { int i; struct ifnet *ifp; struct txp_swdesc *sd; ifp = sc->sc_ifp; for (i = 0; i < RXBUF_ENTRIES; i++) { sd = sc->sc_rxbufs[i].rb_sd; MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA); if (sd->sd_mbuf == NULL) return(ENOBUFS); MCLGET(sd->sd_mbuf, M_DONTWAIT); if ((sd->sd_mbuf->m_flags & M_EXT) == 0) { m_freem(sd->sd_mbuf); return(ENOBUFS); } sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES; sd->sd_mbuf->m_pkthdr.rcvif = ifp; sc->sc_rxbufs[i].rb_paddrlo = vtophys(mtod(sd->sd_mbuf, vm_offset_t)); sc->sc_rxbufs[i].rb_paddrhi = 0; } sc->sc_hostvar->hv_rx_buf_write_idx = (RXBUF_ENTRIES - 1) * sizeof(struct txp_rxbuf_desc); return(0); } static void txp_rxring_empty(sc) struct txp_softc *sc; { int i; struct txp_swdesc *sd; if (sc->sc_rxbufs == NULL) return; for (i = 0; i < RXBUF_ENTRIES; i++) { if (&sc->sc_rxbufs[i] == NULL) continue; sd = sc->sc_rxbufs[i].rb_sd; if (sd == NULL) continue; if (sd->sd_mbuf != NULL) { m_freem(sd->sd_mbuf); sd->sd_mbuf = NULL; } } return; } static void txp_init(xsc) void *xsc; { struct txp_softc *sc; struct ifnet *ifp; u_int16_t p1; u_int32_t p2; int s; sc = xsc; ifp = sc->sc_ifp; if (ifp->if_flags & IFF_RUNNING) return; txp_stop(sc); s = splnet(); txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0, NULL, NULL, NULL, 1); /* Set station address. */ ((u_int8_t *)&p1)[1] = IFP2ENADDR(sc->sc_ifp)[0]; ((u_int8_t *)&p1)[0] = IFP2ENADDR(sc->sc_ifp)[1]; ((u_int8_t *)&p2)[3] = IFP2ENADDR(sc->sc_ifp)[2]; ((u_int8_t *)&p2)[2] = IFP2ENADDR(sc->sc_ifp)[3]; ((u_int8_t *)&p2)[1] = IFP2ENADDR(sc->sc_ifp)[4]; ((u_int8_t *)&p2)[0] = IFP2ENADDR(sc->sc_ifp)[5]; txp_command(sc, TXP_CMD_STATION_ADDRESS_WRITE, p1, p2, 0, NULL, NULL, NULL, 1); txp_set_filter(sc); txp_rxring_fill(sc); txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1); txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1); WRITE_REG(sc, TXP_IER, TXP_INT_RESERVED | TXP_INT_SELF | TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 | TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_LATCH); WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; ifp->if_timer = 0; sc->sc_tick = timeout(txp_tick, sc, hz); splx(s); } static void txp_tick(vsc) void *vsc; { struct txp_softc *sc = vsc; struct ifnet *ifp = sc->sc_ifp; struct txp_rsp_desc *rsp = NULL; struct txp_ext_desc *ext; int s; s = splnet(); txp_rxbuf_reclaim(sc); if (txp_command2(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0, &rsp, 1)) goto out; if (rsp->rsp_numdesc != 6) goto out; if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0, NULL, NULL, NULL, 1)) goto out; ext = (struct txp_ext_desc *)(rsp + 1); ifp->if_ierrors += ext[3].ext_2 + ext[3].ext_3 + ext[3].ext_4 + ext[4].ext_1 + ext[4].ext_4; ifp->if_oerrors += ext[0].ext_1 + ext[1].ext_1 + ext[1].ext_4 + ext[2].ext_1; ifp->if_collisions += ext[0].ext_2 + ext[0].ext_3 + ext[1].ext_2 + ext[1].ext_3; ifp->if_opackets += rsp->rsp_par2; ifp->if_ipackets += ext[2].ext_3; out: if (rsp != NULL) free(rsp, M_DEVBUF); splx(s); sc->sc_tick = timeout(txp_tick, sc, hz); return; } static void txp_start(ifp) struct ifnet *ifp; { struct txp_softc *sc = ifp->if_softc; struct txp_tx_ring *r = &sc->sc_txhir; struct txp_tx_desc *txd; struct txp_frag_desc *fxd; struct mbuf *m, *m0; struct txp_swdesc *sd; u_int32_t firstprod, firstcnt, prod, cnt; struct m_tag *mtag; if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) return; prod = r->r_prod; cnt = r->r_cnt; while (1) { IF_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; firstprod = prod; firstcnt = cnt; sd = sc->sc_txd + prod; sd->sd_mbuf = m; if ((TX_ENTRIES - cnt) < 4) goto oactive; txd = r->r_desc + prod; txd->tx_flags = TX_FLAGS_TYPE_DATA; txd->tx_numdesc = 0; txd->tx_addrlo = 0; txd->tx_addrhi = 0; txd->tx_totlen = 0; txd->tx_pflags = 0; if (++prod == TX_ENTRIES) prod = 0; if (++cnt >= (TX_ENTRIES - 4)) goto oactive; mtag = VLAN_OUTPUT_TAG(ifp, m); if (mtag != NULL) { txd->tx_pflags = TX_PFLAGS_VLAN | (htons(VLAN_TAG_VALUE(mtag)) << TX_PFLAGS_VLANTAG_S); } if (m->m_pkthdr.csum_flags & CSUM_IP) txd->tx_pflags |= TX_PFLAGS_IPCKSUM; #if 0 if (m->m_pkthdr.csum_flags & CSUM_TCP) txd->tx_pflags |= TX_PFLAGS_TCPCKSUM; if (m->m_pkthdr.csum_flags & CSUM_UDP) txd->tx_pflags |= TX_PFLAGS_UDPCKSUM; #endif fxd = (struct txp_frag_desc *)(r->r_desc + prod); for (m0 = m; m0 != NULL; m0 = m0->m_next) { if (m0->m_len == 0) continue; if (++cnt >= (TX_ENTRIES - 4)) goto oactive; txd->tx_numdesc++; fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG; fxd->frag_rsvd1 = 0; fxd->frag_len = m0->m_len; fxd->frag_addrlo = vtophys(mtod(m0, vm_offset_t)); fxd->frag_addrhi = 0; fxd->frag_rsvd2 = 0; if (++prod == TX_ENTRIES) { fxd = (struct txp_frag_desc *)r->r_desc; prod = 0; } else fxd++; } ifp->if_timer = 5; BPF_MTAP(ifp, m); WRITE_REG(sc, r->r_reg, TXP_IDX2OFFSET(prod)); } r->r_prod = prod; r->r_cnt = cnt; return; oactive: ifp->if_flags |= IFF_OACTIVE; r->r_prod = firstprod; r->r_cnt = firstcnt; IF_PREPEND(&ifp->if_snd, m); return; } /* * Handle simple commands sent to the typhoon */ static int txp_command(sc, id, in1, in2, in3, out1, out2, out3, wait) struct txp_softc *sc; u_int16_t id, in1, *out1; u_int32_t in2, in3, *out2, *out3; int wait; { struct txp_rsp_desc *rsp = NULL; if (txp_command2(sc, id, in1, in2, in3, NULL, 0, &rsp, wait)) return (-1); if (!wait) return (0); if (out1 != NULL) *out1 = rsp->rsp_par1; if (out2 != NULL) *out2 = rsp->rsp_par2; if (out3 != NULL) *out3 = rsp->rsp_par3; free(rsp, M_DEVBUF); return (0); } static int txp_command2(sc, id, in1, in2, in3, in_extp, in_extn, rspp, wait) struct txp_softc *sc; u_int16_t id, in1; u_int32_t in2, in3; struct txp_ext_desc *in_extp; u_int8_t in_extn; struct txp_rsp_desc **rspp; int wait; { struct txp_hostvar *hv = sc->sc_hostvar; struct txp_cmd_desc *cmd; struct txp_ext_desc *ext; u_int32_t idx, i; u_int16_t seq; if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) { device_printf(sc->sc_dev, "no free cmd descriptors\n"); return (-1); } idx = sc->sc_cmdring.lastwrite; cmd = (struct txp_cmd_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx); bzero(cmd, sizeof(*cmd)); cmd->cmd_numdesc = in_extn; cmd->cmd_seq = seq = sc->sc_seq++; cmd->cmd_id = id; cmd->cmd_par1 = in1; cmd->cmd_par2 = in2; cmd->cmd_par3 = in3; cmd->cmd_flags = CMD_FLAGS_TYPE_CMD | (wait ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID; idx += sizeof(struct txp_cmd_desc); if (idx == sc->sc_cmdring.size) idx = 0; for (i = 0; i < in_extn; i++) { ext = (struct txp_ext_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx); bcopy(in_extp, ext, sizeof(struct txp_ext_desc)); in_extp++; idx += sizeof(struct txp_cmd_desc); if (idx == sc->sc_cmdring.size) idx = 0; } sc->sc_cmdring.lastwrite = idx; WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite); if (!wait) return (0); for (i = 0; i < 10000; i++) { idx = hv->hv_resp_read_idx; if (idx != hv->hv_resp_write_idx) { *rspp = NULL; if (txp_response(sc, idx, id, seq, rspp)) return (-1); if (*rspp != NULL) break; } DELAY(50); } if (i == 1000 || (*rspp) == NULL) { device_printf(sc->sc_dev, "0x%x command failed\n", id); return (-1); } return (0); } static int txp_response(sc, ridx, id, seq, rspp) struct txp_softc *sc; u_int32_t ridx; u_int16_t id; u_int16_t seq; struct txp_rsp_desc **rspp; { struct txp_hostvar *hv = sc->sc_hostvar; struct txp_rsp_desc *rsp; while (ridx != hv->hv_resp_write_idx) { rsp = (struct txp_rsp_desc *)(((u_int8_t *)sc->sc_rspring.base) + ridx); if (id == rsp->rsp_id && rsp->rsp_seq == seq) { *rspp = (struct txp_rsp_desc *)malloc( sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1), M_DEVBUF, M_NOWAIT); if ((*rspp) == NULL) return (-1); txp_rsp_fixup(sc, rsp, *rspp); return (0); } if (rsp->rsp_flags & RSP_FLAGS_ERROR) { device_printf(sc->sc_dev, "response error!\n"); txp_rsp_fixup(sc, rsp, NULL); ridx = hv->hv_resp_read_idx; continue; } switch (rsp->rsp_id) { case TXP_CMD_CYCLE_STATISTICS: case TXP_CMD_MEDIA_STATUS_READ: break; case TXP_CMD_HELLO_RESPONSE: device_printf(sc->sc_dev, "hello\n"); break; default: device_printf(sc->sc_dev, "unknown id(0x%x)\n", rsp->rsp_id); } txp_rsp_fixup(sc, rsp, NULL); ridx = hv->hv_resp_read_idx; hv->hv_resp_read_idx = ridx; } return (0); } static void txp_rsp_fixup(sc, rsp, dst) struct txp_softc *sc; struct txp_rsp_desc *rsp, *dst; { struct txp_rsp_desc *src = rsp; struct txp_hostvar *hv = sc->sc_hostvar; u_int32_t i, ridx; ridx = hv->hv_resp_read_idx; for (i = 0; i < rsp->rsp_numdesc + 1; i++) { if (dst != NULL) bcopy(src, dst++, sizeof(struct txp_rsp_desc)); ridx += sizeof(struct txp_rsp_desc); if (ridx == sc->sc_rspring.size) { src = sc->sc_rspring.base; ridx = 0; } else src++; sc->sc_rspring.lastwrite = hv->hv_resp_read_idx = ridx; } hv->hv_resp_read_idx = ridx; } static int txp_cmd_desc_numfree(sc) struct txp_softc *sc; { struct txp_hostvar *hv = sc->sc_hostvar; struct txp_boot_record *br = sc->sc_boot; u_int32_t widx, ridx, nfree; widx = sc->sc_cmdring.lastwrite; ridx = hv->hv_cmd_read_idx; if (widx == ridx) { /* Ring is completely free */ nfree = br->br_cmd_siz - sizeof(struct txp_cmd_desc); } else { if (widx > ridx) nfree = br->br_cmd_siz - (widx - ridx + sizeof(struct txp_cmd_desc)); else nfree = ridx - widx - sizeof(struct txp_cmd_desc); } return (nfree / sizeof(struct txp_cmd_desc)); } static void txp_stop(sc) struct txp_softc *sc; { struct ifnet *ifp; ifp = sc->sc_ifp; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); untimeout(txp_tick, sc, sc->sc_tick); txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1); txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1); txp_rxring_empty(sc); return; } static void txp_watchdog(ifp) struct ifnet *ifp; { return; } static int txp_ifmedia_upd(ifp) struct ifnet *ifp; { struct txp_softc *sc = ifp->if_softc; struct ifmedia *ifm = &sc->sc_ifmedia; u_int16_t new_xcvr; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) { if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) new_xcvr = TXP_XCVR_10_FDX; else new_xcvr = TXP_XCVR_10_HDX; } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) new_xcvr = TXP_XCVR_100_FDX; else new_xcvr = TXP_XCVR_100_HDX; } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { new_xcvr = TXP_XCVR_AUTO; } else return (EINVAL); /* nothing to do */ if (sc->sc_xcvr == new_xcvr) return (0); txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0, NULL, NULL, NULL, 0); sc->sc_xcvr = new_xcvr; return (0); } static void txp_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct txp_softc *sc = ifp->if_softc; struct ifmedia *ifm = &sc->sc_ifmedia; u_int16_t bmsr, bmcr, anlpar; ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0, &bmsr, NULL, NULL, 1)) goto bail; if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0, &bmsr, NULL, NULL, 1)) goto bail; if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0, &bmcr, NULL, NULL, 1)) goto bail; if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0, &anlpar, NULL, NULL, 1)) goto bail; if (bmsr & BMSR_LINK) ifmr->ifm_status |= IFM_ACTIVE; if (bmcr & BMCR_ISO) { ifmr->ifm_active |= IFM_NONE; ifmr->ifm_status = 0; return; } if (bmcr & BMCR_LOOP) ifmr->ifm_active |= IFM_LOOP; if (bmcr & BMCR_AUTOEN) { if ((bmsr & BMSR_ACOMP) == 0) { ifmr->ifm_active |= IFM_NONE; return; } if (anlpar & ANLPAR_T4) ifmr->ifm_active |= IFM_100_T4; else if (anlpar & ANLPAR_TX_FD) ifmr->ifm_active |= IFM_100_TX|IFM_FDX; else if (anlpar & ANLPAR_TX) ifmr->ifm_active |= IFM_100_TX; else if (anlpar & ANLPAR_10_FD) ifmr->ifm_active |= IFM_10_T|IFM_FDX; else if (anlpar & ANLPAR_10) ifmr->ifm_active |= IFM_10_T; else ifmr->ifm_active |= IFM_NONE; } else ifmr->ifm_active = ifm->ifm_cur->ifm_media; return; bail: ifmr->ifm_active |= IFM_NONE; ifmr->ifm_status &= ~IFM_AVALID; } #ifdef TXP_DEBUG static void txp_show_descriptor(d) void *d; { struct txp_cmd_desc *cmd = d; struct txp_rsp_desc *rsp = d; struct txp_tx_desc *txd = d; struct txp_frag_desc *frgd = d; switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) { case CMD_FLAGS_TYPE_CMD: /* command descriptor */ printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n", cmd->cmd_flags, cmd->cmd_numdesc, cmd->cmd_id, cmd->cmd_seq, cmd->cmd_par1, cmd->cmd_par2, cmd->cmd_par3); break; case CMD_FLAGS_TYPE_RESP: /* response descriptor */ printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n", rsp->rsp_flags, rsp->rsp_numdesc, rsp->rsp_id, rsp->rsp_seq, rsp->rsp_par1, rsp->rsp_par2, rsp->rsp_par3); break; case CMD_FLAGS_TYPE_DATA: /* data header (assuming tx for now) */ printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]", txd->tx_flags, txd->tx_numdesc, txd->tx_totlen, txd->tx_addrlo, txd->tx_addrhi, txd->tx_pflags); break; case CMD_FLAGS_TYPE_FRAG: /* fragment descriptor */ printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]", frgd->frag_flags, frgd->frag_rsvd1, frgd->frag_len, frgd->frag_addrlo, frgd->frag_addrhi, frgd->frag_rsvd2); break; default: printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n", cmd->cmd_flags & CMD_FLAGS_TYPE_M, cmd->cmd_flags, cmd->cmd_numdesc, cmd->cmd_id, cmd->cmd_seq, cmd->cmd_par1, cmd->cmd_par2, cmd->cmd_par3); break; } } #endif static void txp_set_filter(sc) struct txp_softc *sc; { struct ifnet *ifp = sc->sc_ifp; u_int32_t crc, carry, hashbit, hash[2]; u_int16_t filter; u_int8_t octet; int i, j, mcnt = 0; struct ifmultiaddr *ifma; char *enm; if (ifp->if_flags & IFF_PROMISC) { filter = TXP_RXFILT_PROMISC; goto setit; } filter = TXP_RXFILT_DIRECT; if (ifp->if_flags & IFF_BROADCAST) filter |= TXP_RXFILT_BROADCAST; if (ifp->if_flags & IFF_ALLMULTI) filter |= TXP_RXFILT_ALLMULTI; else { hash[0] = hash[1] = 0; + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; enm = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); mcnt++; crc = 0xffffffff; for (i = 0; i < ETHER_ADDR_LEN; i++) { octet = enm[i]; for (j = 0; j < 8; j++) { carry = ((crc & 0x80000000) ? 1 : 0) ^ (octet & 1); crc <<= 1; octet >>= 1; if (carry) crc = (crc ^ TXP_POLYNOMIAL) | carry; } } hashbit = (u_int16_t)(crc & (64 - 1)); hash[hashbit / 32] |= (1 << hashbit % 32); } + IF_ADDR_UNLOCK(ifp); if (mcnt > 0) { filter |= TXP_RXFILT_HASHMULTI; txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE, 2, hash[0], hash[1], NULL, NULL, NULL, 0); } } setit: txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0, NULL, NULL, NULL, 1); return; } static void txp_capabilities(sc) struct txp_softc *sc; { struct ifnet *ifp = sc->sc_ifp; struct txp_rsp_desc *rsp = NULL; struct txp_ext_desc *ext; if (txp_command2(sc, TXP_CMD_OFFLOAD_READ, 0, 0, 0, NULL, 0, &rsp, 1)) goto out; if (rsp->rsp_numdesc != 1) goto out; ext = (struct txp_ext_desc *)(rsp + 1); sc->sc_tx_capability = ext->ext_1 & OFFLOAD_MASK; sc->sc_rx_capability = ext->ext_2 & OFFLOAD_MASK; ifp->if_capabilities = 0; if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_VLAN) { sc->sc_tx_capability |= OFFLOAD_VLAN; sc->sc_rx_capability |= OFFLOAD_VLAN; ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; } #if 0 /* not ready yet */ if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPSEC) { sc->sc_tx_capability |= OFFLOAD_IPSEC; sc->sc_rx_capability |= OFFLOAD_IPSEC; ifp->if_capabilities |= IFCAP_IPSEC; } #endif if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPCKSUM) { sc->sc_tx_capability |= OFFLOAD_IPCKSUM; sc->sc_rx_capability |= OFFLOAD_IPCKSUM; ifp->if_capabilities |= IFCAP_HWCSUM; ifp->if_hwassist |= CSUM_IP; } if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_TCPCKSUM) { #if 0 sc->sc_tx_capability |= OFFLOAD_TCPCKSUM; #endif sc->sc_rx_capability |= OFFLOAD_TCPCKSUM; ifp->if_capabilities |= IFCAP_HWCSUM; } if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_UDPCKSUM) { #if 0 sc->sc_tx_capability |= OFFLOAD_UDPCKSUM; #endif sc->sc_rx_capability |= OFFLOAD_UDPCKSUM; ifp->if_capabilities |= IFCAP_HWCSUM; } ifp->if_capenable = ifp->if_capabilities; if (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0, sc->sc_tx_capability, sc->sc_rx_capability, NULL, NULL, NULL, 1)) goto out; out: if (rsp != NULL) free(rsp, M_DEVBUF); return; } Index: stable/6/sys/dev/usb/if_aue.c =================================================================== --- stable/6/sys/dev/usb/if_aue.c (revision 149421) +++ stable/6/sys/dev/usb/if_aue.c (revision 149422) @@ -1,1450 +1,1452 @@ /*- * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * ADMtek AN986 Pegasus and AN8511 Pegasus II USB to ethernet driver. * Datasheet is available from http://www.admtek.com.tw. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The Pegasus chip uses four USB "endpoints" to provide 10/100 ethernet * support: the control endpoint for reading/writing registers, burst * read endpoint for packet reception, burst write for packet transmission * and one for "interrupts." The chip uses the same RX filter scheme * as the other ADMtek ethernet parts: one perfect filter entry for the * the station address and a 64-bit multicast hash table. The chip supports * both MII and HomePNA attachments. * * Since the maximum data transfer speed of USB is supposed to be 12Mbps, * you're never really going to get 100Mbps speeds from this device. I * think the idea is to allow the device to connect to 10 or 100Mbps * networks, not necessarily to provide 100Mbps performance. Also, since * the controller uses an external PHY chip, it's possible that board * designers might simply choose a 10Mbps PHY. * * Registers are accessed using usbd_do_request(). Packet transfers are * done using usbd_transfer() and friends. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version < 500000 #include #endif #include #include #include #include #include "usbdevs.h" #include #include #include #include MODULE_DEPEND(aue, usb, 1, 1, 1); MODULE_DEPEND(aue, ether, 1, 1, 1); MODULE_DEPEND(aue, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Various supported device vendors/products. */ struct aue_type { struct usb_devno aue_dev; u_int16_t aue_flags; #define LSYS 0x0001 /* use Linksys reset */ #define PNA 0x0002 /* has Home PNA */ #define PII 0x0004 /* Pegasus II chip */ }; Static const struct aue_type aue_devs[] = { {{ USB_VENDOR_3COM, USB_PRODUCT_3COM_3C460B}, PII }, {{ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX1}, PNA|PII }, {{ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX2}, PII }, {{ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_UFE1000}, LSYS }, {{ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX4}, PNA }, {{ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX5}, PNA }, {{ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX6}, PII }, {{ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX7}, PII }, {{ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX8}, PII }, {{ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX9}, PNA }, {{ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_XX10}, 0 }, {{ USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_DSB650TX_PNA}, 0 }, {{ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_USB320_EC}, 0 }, {{ USB_VENDOR_ACCTON, USB_PRODUCT_ACCTON_SS1001}, PII }, {{ USB_VENDOR_ADMTEK, USB_PRODUCT_ADMTEK_PEGASUS}, PNA }, {{ USB_VENDOR_ADMTEK, USB_PRODUCT_ADMTEK_PEGASUSII}, PII }, {{ USB_VENDOR_ADMTEK, USB_PRODUCT_ADMTEK_PEGASUSII_2}, PII }, {{ USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_USB2LAN}, PII }, {{ USB_VENDOR_BILLIONTON, USB_PRODUCT_BILLIONTON_USB100}, 0 }, {{ USB_VENDOR_BILLIONTON, USB_PRODUCT_BILLIONTON_USBLP100}, PNA }, {{ USB_VENDOR_BILLIONTON, USB_PRODUCT_BILLIONTON_USBEL100}, 0 }, {{ USB_VENDOR_BILLIONTON, USB_PRODUCT_BILLIONTON_USBE100}, PII }, {{ USB_VENDOR_COREGA, USB_PRODUCT_COREGA_FETHER_USB_TX}, 0 }, {{ USB_VENDOR_COREGA, USB_PRODUCT_COREGA_FETHER_USB_TXS},PII }, {{ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650TX4}, LSYS|PII }, {{ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650TX1}, LSYS }, {{ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650TX}, LSYS }, {{ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650TX_PNA}, PNA }, {{ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650TX3}, LSYS|PII }, {{ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650TX2}, LSYS|PII }, {{ USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650}, LSYS }, {{ USB_VENDOR_ELECOM, USB_PRODUCT_ELECOM_LDUSBTX0}, 0 }, {{ USB_VENDOR_ELECOM, USB_PRODUCT_ELECOM_LDUSBTX1}, LSYS }, {{ USB_VENDOR_ELECOM, USB_PRODUCT_ELECOM_LDUSBTX2}, 0 }, {{ USB_VENDOR_ELECOM, USB_PRODUCT_ELECOM_LDUSBTX3}, LSYS }, {{ USB_VENDOR_ELECOM, USB_PRODUCT_ELECOM_LDUSBLTX}, PII }, {{ USB_VENDOR_ELSA, USB_PRODUCT_ELSA_USB2ETHERNET}, 0 }, {{ USB_VENDOR_HAWKING, USB_PRODUCT_HAWKING_UF100}, PII }, {{ USB_VENDOR_HP, USB_PRODUCT_HP_HN210E}, PII }, {{ USB_VENDOR_IODATA, USB_PRODUCT_IODATA_USBETTX}, 0 }, {{ USB_VENDOR_IODATA, USB_PRODUCT_IODATA_USBETTXS}, PII }, {{ USB_VENDOR_KINGSTON, USB_PRODUCT_KINGSTON_KNU101TX}, 0 }, {{ USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_USB10TX1}, LSYS|PII }, {{ USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_USB10T}, LSYS }, {{ USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_USB100TX}, LSYS }, {{ USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_USB100H1}, LSYS|PNA }, {{ USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_USB10TA}, LSYS }, {{ USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_USB10TX2}, LSYS|PII }, {{ USB_VENDOR_MICROSOFT, USB_PRODUCT_MICROSOFT_MN110}, PII }, {{ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_LUATX1}, 0 }, {{ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_LUATX5}, 0 }, {{ USB_VENDOR_MELCO, USB_PRODUCT_MELCO_LUA2TX5}, PII }, {{ USB_VENDOR_SIEMENS, USB_PRODUCT_SIEMENS_SPEEDSTREAM}, PII }, {{ USB_VENDOR_SMARTBRIDGES, USB_PRODUCT_SMARTBRIDGES_SMARTNIC},PII }, {{ USB_VENDOR_SMC, USB_PRODUCT_SMC_2202USB}, 0 }, {{ USB_VENDOR_SMC, USB_PRODUCT_SMC_2206USB}, PII }, {{ USB_VENDOR_SOHOWARE, USB_PRODUCT_SOHOWARE_NUB100}, 0 }, }; #define aue_lookup(v, p) ((const struct aue_type *)usb_lookup(aue_devs, v, p)) Static int aue_match(device_ptr_t); Static int aue_attach(device_ptr_t); Static int aue_detach(device_ptr_t); Static void aue_reset_pegasus_II(struct aue_softc *sc); Static int aue_encap(struct aue_softc *, struct mbuf *, int); #ifdef AUE_INTR_PIPE Static void aue_intr(usbd_xfer_handle, usbd_private_handle, usbd_status); #endif Static void aue_rxeof(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void aue_txeof(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void aue_tick(void *); Static void aue_rxstart(struct ifnet *); Static void aue_start(struct ifnet *); Static int aue_ioctl(struct ifnet *, u_long, caddr_t); Static void aue_init(void *); Static void aue_stop(struct aue_softc *); Static void aue_watchdog(struct ifnet *); Static void aue_shutdown(device_ptr_t); Static int aue_ifmedia_upd(struct ifnet *); Static void aue_ifmedia_sts(struct ifnet *, struct ifmediareq *); Static void aue_eeprom_getword(struct aue_softc *, int, u_int16_t *); Static void aue_read_eeprom(struct aue_softc *, caddr_t, int, int, int); Static int aue_miibus_readreg(device_ptr_t, int, int); Static int aue_miibus_writereg(device_ptr_t, int, int, int); Static void aue_miibus_statchg(device_ptr_t); Static void aue_setmulti(struct aue_softc *); Static void aue_reset(struct aue_softc *); Static int aue_csr_read_1(struct aue_softc *, int); Static int aue_csr_write_1(struct aue_softc *, int, int); Static int aue_csr_read_2(struct aue_softc *, int); Static int aue_csr_write_2(struct aue_softc *, int, int); Static device_method_t aue_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aue_match), DEVMETHOD(device_attach, aue_attach), DEVMETHOD(device_detach, aue_detach), DEVMETHOD(device_shutdown, aue_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, aue_miibus_readreg), DEVMETHOD(miibus_writereg, aue_miibus_writereg), DEVMETHOD(miibus_statchg, aue_miibus_statchg), { 0, 0 } }; Static driver_t aue_driver = { "aue", aue_methods, sizeof(struct aue_softc) }; Static devclass_t aue_devclass; DRIVER_MODULE(aue, uhub, aue_driver, aue_devclass, usbd_driver_load, 0); DRIVER_MODULE(miibus, aue, miibus_driver, miibus_devclass, 0, 0); #define AUE_SETBIT(sc, reg, x) \ aue_csr_write_1(sc, reg, aue_csr_read_1(sc, reg) | (x)) #define AUE_CLRBIT(sc, reg, x) \ aue_csr_write_1(sc, reg, aue_csr_read_1(sc, reg) & ~(x)) Static int aue_csr_read_1(struct aue_softc *sc, int reg) { usb_device_request_t req; usbd_status err; u_int8_t val = 0; if (sc->aue_dying) return (0); AUE_LOCK(sc); req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = AUE_UR_READREG; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, 1); err = usbd_do_request(sc->aue_udev, &req, &val); AUE_UNLOCK(sc); if (err) { return (0); } return (val); } Static int aue_csr_read_2(struct aue_softc *sc, int reg) { usb_device_request_t req; usbd_status err; u_int16_t val = 0; if (sc->aue_dying) return (0); AUE_LOCK(sc); req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = AUE_UR_READREG; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, 2); err = usbd_do_request(sc->aue_udev, &req, &val); AUE_UNLOCK(sc); if (err) { return (0); } return (val); } Static int aue_csr_write_1(struct aue_softc *sc, int reg, int val) { usb_device_request_t req; usbd_status err; if (sc->aue_dying) return (0); AUE_LOCK(sc); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = AUE_UR_WRITEREG; USETW(req.wValue, val); USETW(req.wIndex, reg); USETW(req.wLength, 1); err = usbd_do_request(sc->aue_udev, &req, &val); AUE_UNLOCK(sc); if (err) { return (-1); } return (0); } Static int aue_csr_write_2(struct aue_softc *sc, int reg, int val) { usb_device_request_t req; usbd_status err; if (sc->aue_dying) return (0); AUE_LOCK(sc); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = AUE_UR_WRITEREG; USETW(req.wValue, val); USETW(req.wIndex, reg); USETW(req.wLength, 2); err = usbd_do_request(sc->aue_udev, &req, &val); AUE_UNLOCK(sc); if (err) { return (-1); } return (0); } /* * Read a word of data stored in the EEPROM at address 'addr.' */ Static void aue_eeprom_getword(struct aue_softc *sc, int addr, u_int16_t *dest) { int i; u_int16_t word = 0; aue_csr_write_1(sc, AUE_EE_REG, addr); aue_csr_write_1(sc, AUE_EE_CTL, AUE_EECTL_READ); for (i = 0; i < AUE_TIMEOUT; i++) { if (aue_csr_read_1(sc, AUE_EE_CTL) & AUE_EECTL_DONE) break; } if (i == AUE_TIMEOUT) { printf("aue%d: EEPROM read timed out\n", sc->aue_unit); } word = aue_csr_read_2(sc, AUE_EE_DATA); *dest = word; return; } /* * Read a sequence of words from the EEPROM. */ Static void aue_read_eeprom(struct aue_softc *sc, caddr_t dest, int off, int cnt, int swap) { int i; u_int16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { aue_eeprom_getword(sc, off + i, &word); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } return; } Static int aue_miibus_readreg(device_ptr_t dev, int phy, int reg) { struct aue_softc *sc = USBGETSOFTC(dev); int i; u_int16_t val = 0; /* * The Am79C901 HomePNA PHY actually contains * two transceivers: a 1Mbps HomePNA PHY and a * 10Mbps full/half duplex ethernet PHY with * NWAY autoneg. However in the ADMtek adapter, * only the 1Mbps PHY is actually connected to * anything, so we ignore the 10Mbps one. It * happens to be configured for MII address 3, * so we filter that out. */ if (sc->aue_vendor == USB_VENDOR_ADMTEK && sc->aue_product == USB_PRODUCT_ADMTEK_PEGASUS) { if (phy == 3) return (0); #ifdef notdef if (phy != 1) return (0); #endif } aue_csr_write_1(sc, AUE_PHY_ADDR, phy); aue_csr_write_1(sc, AUE_PHY_CTL, reg | AUE_PHYCTL_READ); for (i = 0; i < AUE_TIMEOUT; i++) { if (aue_csr_read_1(sc, AUE_PHY_CTL) & AUE_PHYCTL_DONE) break; } if (i == AUE_TIMEOUT) { printf("aue%d: MII read timed out\n", sc->aue_unit); } val = aue_csr_read_2(sc, AUE_PHY_DATA); return (val); } Static int aue_miibus_writereg(device_ptr_t dev, int phy, int reg, int data) { struct aue_softc *sc = USBGETSOFTC(dev); int i; if (phy == 3) return (0); aue_csr_write_2(sc, AUE_PHY_DATA, data); aue_csr_write_1(sc, AUE_PHY_ADDR, phy); aue_csr_write_1(sc, AUE_PHY_CTL, reg | AUE_PHYCTL_WRITE); for (i = 0; i < AUE_TIMEOUT; i++) { if (aue_csr_read_1(sc, AUE_PHY_CTL) & AUE_PHYCTL_DONE) break; } if (i == AUE_TIMEOUT) { printf("aue%d: MII read timed out\n", sc->aue_unit); } return(0); } Static void aue_miibus_statchg(device_ptr_t dev) { struct aue_softc *sc = USBGETSOFTC(dev); struct mii_data *mii = GET_MII(sc); AUE_CLRBIT(sc, AUE_CTL0, AUE_CTL0_RX_ENB | AUE_CTL0_TX_ENB); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) { AUE_SETBIT(sc, AUE_CTL1, AUE_CTL1_SPEEDSEL); } else { AUE_CLRBIT(sc, AUE_CTL1, AUE_CTL1_SPEEDSEL); } if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) AUE_SETBIT(sc, AUE_CTL1, AUE_CTL1_DUPLEX); else AUE_CLRBIT(sc, AUE_CTL1, AUE_CTL1_DUPLEX); AUE_SETBIT(sc, AUE_CTL0, AUE_CTL0_RX_ENB | AUE_CTL0_TX_ENB); /* * Set the LED modes on the LinkSys adapter. * This turns on the 'dual link LED' bin in the auxmode * register of the Broadcom PHY. */ if (sc->aue_flags & LSYS) { u_int16_t auxmode; auxmode = aue_miibus_readreg(dev, 0, 0x1b); aue_miibus_writereg(dev, 0, 0x1b, auxmode | 0x04); } return; } #define AUE_BITS 6 Static void aue_setmulti(struct aue_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int32_t h = 0, i; ifp = sc->aue_ifp; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { AUE_SETBIT(sc, AUE_CTL0, AUE_CTL0_ALLMULTI); return; } AUE_CLRBIT(sc, AUE_CTL0, AUE_CTL0_ALLMULTI); /* first, zot all the existing hash bits */ for (i = 0; i < 8; i++) aue_csr_write_1(sc, AUE_MAR0 + i, 0); /* now program new ones */ + IF_ADDR_LOCK(ifp); #if __FreeBSD_version >= 500000 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) #else LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) #endif { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_le(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) & ((1 << AUE_BITS) - 1); AUE_SETBIT(sc, AUE_MAR + (h >> 3), 1 << (h & 0x7)); } + IF_ADDR_UNLOCK(ifp); return; } Static void aue_reset_pegasus_II(struct aue_softc *sc) { /* Magic constants taken from Linux driver. */ aue_csr_write_1(sc, AUE_REG_1D, 0); aue_csr_write_1(sc, AUE_REG_7B, 2); #if 0 if ((sc->aue_flags & HAS_HOME_PNA) && mii_mode) aue_csr_write_1(sc, AUE_REG_81, 6); else #endif aue_csr_write_1(sc, AUE_REG_81, 2); } Static void aue_reset(struct aue_softc *sc) { int i; AUE_SETBIT(sc, AUE_CTL1, AUE_CTL1_RESETMAC); for (i = 0; i < AUE_TIMEOUT; i++) { if (!(aue_csr_read_1(sc, AUE_CTL1) & AUE_CTL1_RESETMAC)) break; } if (i == AUE_TIMEOUT) printf("aue%d: reset failed\n", sc->aue_unit); /* * The PHY(s) attached to the Pegasus chip may be held * in reset until we flip on the GPIO outputs. Make sure * to set the GPIO pins high so that the PHY(s) will * be enabled. * * Note: We force all of the GPIO pins low first, *then* * enable the ones we want. */ aue_csr_write_1(sc, AUE_GPIO0, AUE_GPIO_OUT0|AUE_GPIO_SEL0); aue_csr_write_1(sc, AUE_GPIO0, AUE_GPIO_OUT0|AUE_GPIO_SEL0|AUE_GPIO_SEL1); if (sc->aue_flags & LSYS) { /* Grrr. LinkSys has to be different from everyone else. */ aue_csr_write_1(sc, AUE_GPIO0, AUE_GPIO_SEL0 | AUE_GPIO_SEL1); aue_csr_write_1(sc, AUE_GPIO0, AUE_GPIO_SEL0 | AUE_GPIO_SEL1 | AUE_GPIO_OUT0); } if (sc->aue_flags & PII) aue_reset_pegasus_II(sc); /* Wait a little while for the chip to get its brains in order. */ DELAY(10000); return; } /* * Probe for a Pegasus chip. */ USB_MATCH(aue) { USB_MATCH_START(aue, uaa); if (uaa->iface != NULL) return (UMATCH_NONE); return (aue_lookup(uaa->vendor, uaa->product) != NULL ? UMATCH_VENDOR_PRODUCT : UMATCH_NONE); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ USB_ATTACH(aue) { USB_ATTACH_START(aue, sc, uaa); char devinfo[1024]; u_char eaddr[ETHER_ADDR_LEN]; struct ifnet *ifp; usbd_interface_handle iface; usbd_status err; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; int i; bzero(sc, sizeof(struct aue_softc)); usbd_devinfo(uaa->device, 0, devinfo); sc->aue_dev = self; sc->aue_udev = uaa->device; sc->aue_unit = device_get_unit(self); if (usbd_set_config_no(sc->aue_udev, AUE_CONFIG_NO, 0)) { printf("aue%d: getting interface handle failed\n", sc->aue_unit); USB_ATTACH_ERROR_RETURN; } err = usbd_device2interface_handle(uaa->device, AUE_IFACE_IDX, &iface); if (err) { printf("aue%d: getting interface handle failed\n", sc->aue_unit); USB_ATTACH_ERROR_RETURN; } sc->aue_iface = iface; sc->aue_flags = aue_lookup(uaa->vendor, uaa->product)->aue_flags; sc->aue_product = uaa->product; sc->aue_vendor = uaa->vendor; id = usbd_get_interface_descriptor(sc->aue_iface); usbd_devinfo(uaa->device, 0, devinfo); device_set_desc_copy(self, devinfo); printf("%s: %s\n", USBDEVNAME(self), devinfo); /* Find endpoints. */ for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(iface, i); if (ed == NULL) { printf("aue%d: couldn't get ep %d\n", sc->aue_unit, i); USB_ATTACH_ERROR_RETURN; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->aue_ed[AUE_ENDPT_RX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->aue_ed[AUE_ENDPT_TX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { sc->aue_ed[AUE_ENDPT_INTR] = ed->bEndpointAddress; } } #if __FreeBSD_version >= 500000 mtx_init(&sc->aue_mtx, device_get_nameunit(self), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); #endif AUE_LOCK(sc); /* Reset the adapter. */ aue_reset(sc); /* * Get station address from the EEPROM. */ aue_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 0); ifp = sc->aue_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { printf("aue%d: can not if_alloc()\n", sc->aue_unit); USB_ATTACH_ERROR_RETURN; } ifp->if_softc = sc; if_initname(ifp, "aue", sc->aue_unit); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT; ifp->if_ioctl = aue_ioctl; ifp->if_start = aue_start; ifp->if_watchdog = aue_watchdog; ifp->if_init = aue_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; /* * Do MII setup. * NOTE: Doing this causes child devices to be attached to us, * which we would normally disconnect at in the detach routine * using device_delete_child(). However the USB code is set up * such that when this driver is removed, all children devices * are removed as well. In effect, the USB code ends up detaching * all of our children for us, so we don't have to do is ourselves * in aue_detach(). It's important to point this out since if * we *do* try to detach the child devices ourselves, we will * end up getting the children deleted twice, which will crash * the system. */ if (mii_phy_probe(self, &sc->aue_miibus, aue_ifmedia_upd, aue_ifmedia_sts)) { printf("aue%d: MII without any PHY!\n", sc->aue_unit); AUE_UNLOCK(sc); #if __FreeBSD_version >= 500000 mtx_destroy(&sc->aue_mtx); #endif USB_ATTACH_ERROR_RETURN; } sc->aue_qdat.ifp = ifp; sc->aue_qdat.if_rxstart = aue_rxstart; /* * Call MI attach routine. */ #if __FreeBSD_version >= 500000 ether_ifattach(ifp, eaddr); #else ether_ifattach(ifp, ETHER_BPF_SUPPORTED); #endif callout_handle_init(&sc->aue_stat_ch); usb_register_netisr(); sc->aue_dying = 0; AUE_UNLOCK(sc); USB_ATTACH_SUCCESS_RETURN; } Static int aue_detach(device_ptr_t dev) { struct aue_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); AUE_LOCK(sc); ifp = sc->aue_ifp; sc->aue_dying = 1; untimeout(aue_tick, sc, sc->aue_stat_ch); #if __FreeBSD_version >= 500000 ether_ifdetach(ifp); #else ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); if_free(ifp); #endif if (sc->aue_ep[AUE_ENDPT_TX] != NULL) usbd_abort_pipe(sc->aue_ep[AUE_ENDPT_TX]); if (sc->aue_ep[AUE_ENDPT_RX] != NULL) usbd_abort_pipe(sc->aue_ep[AUE_ENDPT_RX]); #ifdef AUE_INTR_PIPE if (sc->aue_ep[AUE_ENDPT_INTR] != NULL) usbd_abort_pipe(sc->aue_ep[AUE_ENDPT_INTR]); #endif AUE_UNLOCK(sc); #if __FreeBSD_version >= 500000 mtx_destroy(&sc->aue_mtx); #endif return (0); } #ifdef AUE_INTR_PIPE Static void aue_intr(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct aue_softc *sc = priv; struct ifnet *ifp; struct aue_intrpkt *p; AUE_LOCK(sc); ifp = sc->aue_ifp; if (!(ifp->if_flags & IFF_RUNNING)) { AUE_UNLOCK(sc); return; } if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { AUE_UNLOCK(sc); return; } printf("aue%d: usb error on intr: %s\n", sc->aue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->aue_ep[AUE_ENDPT_RX]); AUE_UNLOCK(sc); return; } usbd_get_xfer_status(xfer, NULL, (void **)&p, NULL, NULL); if (p->aue_txstat0) ifp->if_oerrors++; if (p->aue_txstat0 & (AUE_TXSTAT0_LATECOLL & AUE_TXSTAT0_EXCESSCOLL)) ifp->if_collisions++; AUE_UNLOCK(sc); return; } #endif Static void aue_rxstart(struct ifnet *ifp) { struct aue_softc *sc; struct ue_chain *c; sc = ifp->if_softc; AUE_LOCK(sc); c = &sc->aue_cdata.ue_rx_chain[sc->aue_cdata.ue_rx_prod]; c->ue_mbuf = usb_ether_newbuf(); if (c->ue_mbuf == NULL) { printf("%s: no memory for rx list " "-- packet dropped!\n", USBDEVNAME(sc->aue_dev)); ifp->if_ierrors++; AUE_UNLOCK(sc); return; } /* Setup new transfer. */ usbd_setup_xfer(c->ue_xfer, sc->aue_ep[AUE_ENDPT_RX], c, mtod(c->ue_mbuf, char *), UE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, aue_rxeof); usbd_transfer(c->ue_xfer); AUE_UNLOCK(sc); return; } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ Static void aue_rxeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct ue_chain *c = priv; struct aue_softc *sc = c->ue_sc; struct mbuf *m; struct ifnet *ifp; int total_len = 0; struct aue_rxpkt r; if (sc->aue_dying) return; AUE_LOCK(sc); ifp = sc->aue_ifp; if (!(ifp->if_flags & IFF_RUNNING)) { AUE_UNLOCK(sc); return; } if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { AUE_UNLOCK(sc); return; } if (usbd_ratecheck(&sc->aue_rx_notice)) printf("aue%d: usb error on rx: %s\n", sc->aue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->aue_ep[AUE_ENDPT_RX]); goto done; } usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL); if (total_len <= 4 + ETHER_CRC_LEN) { ifp->if_ierrors++; goto done; } m = c->ue_mbuf; bcopy(mtod(m, char *) + total_len - 4, (char *)&r, sizeof(r)); /* Turn off all the non-error bits in the rx status word. */ r.aue_rxstat &= AUE_RXSTAT_MASK; if (r.aue_rxstat) { ifp->if_ierrors++; goto done; } /* No errors; receive the packet. */ total_len -= (4 + ETHER_CRC_LEN); ifp->if_ipackets++; m->m_pkthdr.rcvif = (void *)&sc->aue_qdat; m->m_pkthdr.len = m->m_len = total_len; /* Put the packet on the special USB input queue. */ usb_ether_input(m); AUE_UNLOCK(sc); return; done: /* Setup new transfer. */ usbd_setup_xfer(xfer, sc->aue_ep[AUE_ENDPT_RX], c, mtod(c->ue_mbuf, char *), UE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, aue_rxeof); usbd_transfer(xfer); AUE_UNLOCK(sc); return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ Static void aue_txeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct ue_chain *c = priv; struct aue_softc *sc = c->ue_sc; struct ifnet *ifp; usbd_status err; AUE_LOCK(sc); ifp = sc->aue_ifp; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { AUE_UNLOCK(sc); return; } printf("aue%d: usb error on tx: %s\n", sc->aue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->aue_ep[AUE_ENDPT_TX]); AUE_UNLOCK(sc); return; } ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; usbd_get_xfer_status(c->ue_xfer, NULL, NULL, NULL, &err); if (c->ue_mbuf != NULL) { c->ue_mbuf->m_pkthdr.rcvif = ifp; usb_tx_done(c->ue_mbuf); c->ue_mbuf = NULL; } if (err) ifp->if_oerrors++; else ifp->if_opackets++; AUE_UNLOCK(sc); return; } Static void aue_tick(void *xsc) { struct aue_softc *sc = xsc; struct ifnet *ifp; struct mii_data *mii; if (sc == NULL) return; AUE_LOCK(sc); ifp = sc->aue_ifp; mii = GET_MII(sc); if (mii == NULL) { AUE_UNLOCK(sc); return; } mii_tick(mii); if (!sc->aue_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->aue_link++; if (ifp->if_snd.ifq_head != NULL) aue_start(ifp); } sc->aue_stat_ch = timeout(aue_tick, sc, hz); AUE_UNLOCK(sc); return; } Static int aue_encap(struct aue_softc *sc, struct mbuf *m, int idx) { int total_len; struct ue_chain *c; usbd_status err; c = &sc->aue_cdata.ue_tx_chain[idx]; /* * Copy the mbuf data into a contiguous buffer, leaving two * bytes at the beginning to hold the frame length. */ m_copydata(m, 0, m->m_pkthdr.len, c->ue_buf + 2); c->ue_mbuf = m; total_len = m->m_pkthdr.len + 2; /* * The ADMtek documentation says that the packet length is * supposed to be specified in the first two bytes of the * transfer, however it actually seems to ignore this info * and base the frame size on the bulk transfer length. */ c->ue_buf[0] = (u_int8_t)m->m_pkthdr.len; c->ue_buf[1] = (u_int8_t)(m->m_pkthdr.len >> 8); usbd_setup_xfer(c->ue_xfer, sc->aue_ep[AUE_ENDPT_TX], c, c->ue_buf, total_len, USBD_FORCE_SHORT_XFER, 10000, aue_txeof); /* Transmit */ err = usbd_transfer(c->ue_xfer); if (err != USBD_IN_PROGRESS) { aue_stop(sc); return (EIO); } sc->aue_cdata.ue_tx_cnt++; return (0); } Static void aue_start(struct ifnet *ifp) { struct aue_softc *sc = ifp->if_softc; struct mbuf *m_head = NULL; AUE_LOCK(sc); if (!sc->aue_link) { AUE_UNLOCK(sc); return; } if (ifp->if_flags & IFF_OACTIVE) { AUE_UNLOCK(sc); return; } IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) { AUE_UNLOCK(sc); return; } if (aue_encap(sc, m_head, 0)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; AUE_UNLOCK(sc); return; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); ifp->if_flags |= IFF_OACTIVE; /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; AUE_UNLOCK(sc); return; } Static void aue_init(void *xsc) { struct aue_softc *sc = xsc; struct ifnet *ifp = sc->aue_ifp; struct mii_data *mii = GET_MII(sc); struct ue_chain *c; usbd_status err; int i; AUE_LOCK(sc); if (ifp->if_flags & IFF_RUNNING) { AUE_UNLOCK(sc); return; } /* * Cancel pending I/O and free all RX/TX buffers. */ aue_reset(sc); /* Set MAC address */ for (i = 0; i < ETHER_ADDR_LEN; i++) aue_csr_write_1(sc, AUE_PAR0 + i, IFP2ENADDR(sc->aue_ifp)[i]); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) AUE_SETBIT(sc, AUE_CTL2, AUE_CTL2_RX_PROMISC); else AUE_CLRBIT(sc, AUE_CTL2, AUE_CTL2_RX_PROMISC); /* Init TX ring. */ if (usb_ether_tx_list_init(sc, &sc->aue_cdata, sc->aue_udev) == ENOBUFS) { printf("aue%d: tx list init failed\n", sc->aue_unit); AUE_UNLOCK(sc); return; } /* Init RX ring. */ if (usb_ether_rx_list_init(sc, &sc->aue_cdata, sc->aue_udev) == ENOBUFS) { printf("aue%d: rx list init failed\n", sc->aue_unit); AUE_UNLOCK(sc); return; } #ifdef AUE_INTR_PIPE sc->aue_cdata.ue_ibuf = malloc(AUE_INTR_PKTLEN, M_USBDEV, M_NOWAIT); #endif /* Load the multicast filter. */ aue_setmulti(sc); /* Enable RX and TX */ aue_csr_write_1(sc, AUE_CTL0, AUE_CTL0_RXSTAT_APPEND | AUE_CTL0_RX_ENB); AUE_SETBIT(sc, AUE_CTL0, AUE_CTL0_TX_ENB); AUE_SETBIT(sc, AUE_CTL2, AUE_CTL2_EP3_CLR); mii_mediachg(mii); /* Open RX and TX pipes. */ err = usbd_open_pipe(sc->aue_iface, sc->aue_ed[AUE_ENDPT_RX], USBD_EXCLUSIVE_USE, &sc->aue_ep[AUE_ENDPT_RX]); if (err) { printf("aue%d: open rx pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); AUE_UNLOCK(sc); return; } err = usbd_open_pipe(sc->aue_iface, sc->aue_ed[AUE_ENDPT_TX], USBD_EXCLUSIVE_USE, &sc->aue_ep[AUE_ENDPT_TX]); if (err) { printf("aue%d: open tx pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); AUE_UNLOCK(sc); return; } #ifdef AUE_INTR_PIPE err = usbd_open_pipe_intr(sc->aue_iface, sc->aue_ed[AUE_ENDPT_INTR], USBD_SHORT_XFER_OK, &sc->aue_ep[AUE_ENDPT_INTR], sc, sc->aue_cdata.ue_ibuf, AUE_INTR_PKTLEN, aue_intr, AUE_INTR_INTERVAL); if (err) { printf("aue%d: open intr pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); AUE_UNLOCK(sc); return; } #endif /* Start up the receive pipe. */ for (i = 0; i < UE_RX_LIST_CNT; i++) { c = &sc->aue_cdata.ue_rx_chain[i]; usbd_setup_xfer(c->ue_xfer, sc->aue_ep[AUE_ENDPT_RX], c, mtod(c->ue_mbuf, char *), UE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, aue_rxeof); usbd_transfer(c->ue_xfer); } ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->aue_stat_ch = timeout(aue_tick, sc, hz); AUE_UNLOCK(sc); return; } /* * Set media options. */ Static int aue_ifmedia_upd(struct ifnet *ifp) { struct aue_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); sc->aue_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) mii_phy_reset(miisc); } mii_mediachg(mii); return (0); } /* * Report current media status. */ Static void aue_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct aue_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } Static int aue_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct aue_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; int error = 0; AUE_LOCK(sc); switch(command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->aue_if_flags & IFF_PROMISC)) { AUE_SETBIT(sc, AUE_CTL2, AUE_CTL2_RX_PROMISC); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->aue_if_flags & IFF_PROMISC) { AUE_CLRBIT(sc, AUE_CTL2, AUE_CTL2_RX_PROMISC); } else if (!(ifp->if_flags & IFF_RUNNING)) aue_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) aue_stop(sc); } sc->aue_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: aue_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = GET_MII(sc); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); break; } AUE_UNLOCK(sc); return (error); } Static void aue_watchdog(struct ifnet *ifp) { struct aue_softc *sc = ifp->if_softc; struct ue_chain *c; usbd_status stat; AUE_LOCK(sc); ifp->if_oerrors++; printf("aue%d: watchdog timeout\n", sc->aue_unit); c = &sc->aue_cdata.ue_tx_chain[0]; usbd_get_xfer_status(c->ue_xfer, NULL, NULL, NULL, &stat); aue_txeof(c->ue_xfer, c, stat); if (ifp->if_snd.ifq_head != NULL) aue_start(ifp); AUE_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ Static void aue_stop(struct aue_softc *sc) { usbd_status err; struct ifnet *ifp; AUE_LOCK(sc); ifp = sc->aue_ifp; ifp->if_timer = 0; aue_csr_write_1(sc, AUE_CTL0, 0); aue_csr_write_1(sc, AUE_CTL1, 0); aue_reset(sc); untimeout(aue_tick, sc, sc->aue_stat_ch); /* Stop transfers. */ if (sc->aue_ep[AUE_ENDPT_RX] != NULL) { err = usbd_abort_pipe(sc->aue_ep[AUE_ENDPT_RX]); if (err) { printf("aue%d: abort rx pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->aue_ep[AUE_ENDPT_RX]); if (err) { printf("aue%d: close rx pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); } sc->aue_ep[AUE_ENDPT_RX] = NULL; } if (sc->aue_ep[AUE_ENDPT_TX] != NULL) { err = usbd_abort_pipe(sc->aue_ep[AUE_ENDPT_TX]); if (err) { printf("aue%d: abort tx pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->aue_ep[AUE_ENDPT_TX]); if (err) { printf("aue%d: close tx pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); } sc->aue_ep[AUE_ENDPT_TX] = NULL; } #ifdef AUE_INTR_PIPE if (sc->aue_ep[AUE_ENDPT_INTR] != NULL) { err = usbd_abort_pipe(sc->aue_ep[AUE_ENDPT_INTR]); if (err) { printf("aue%d: abort intr pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->aue_ep[AUE_ENDPT_INTR]); if (err) { printf("aue%d: close intr pipe failed: %s\n", sc->aue_unit, usbd_errstr(err)); } sc->aue_ep[AUE_ENDPT_INTR] = NULL; } #endif /* Free RX resources. */ usb_ether_rx_list_free(&sc->aue_cdata); /* Free TX resources. */ usb_ether_tx_list_free(&sc->aue_cdata); #ifdef AUE_INTR_PIPE free(sc->aue_cdata.ue_ibuf, M_USBDEV); sc->aue_cdata.ue_ibuf = NULL; #endif sc->aue_link = 0; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); AUE_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ Static void aue_shutdown(device_ptr_t dev) { struct aue_softc *sc; sc = device_get_softc(dev); sc->aue_dying++; AUE_LOCK(sc); aue_reset(sc); aue_stop(sc); AUE_UNLOCK(sc); return; } Index: stable/6/sys/dev/usb/if_axe.c =================================================================== --- stable/6/sys/dev/usb/if_axe.c (revision 149421) +++ stable/6/sys/dev/usb/if_axe.c (revision 149422) @@ -1,1106 +1,1108 @@ /*- * Copyright (c) 1997, 1998, 1999, 2000-2003 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * ASIX Electronics AX88172 USB 2.0 ethernet driver. Used in the * LinkSys USB200M and various other adapters. * * Manuals available from: * http://www.asix.com.tw/datasheet/mac/Ax88172.PDF * Note: you need the manual for the AX88170 chip (USB 1.x ethernet * controller) to find the definitions for the RX control register. * http://www.asix.com.tw/datasheet/mac/Ax88170.PDF * * Written by Bill Paul * Senior Engineer * Wind River Systems */ /* * The AX88172 provides USB ethernet supports at 10 and 100Mbps. * It uses an external PHY (reference designs use a RealTek chip), * and has a 64-bit multicast hash filter. There is some information * missing from the manual which one needs to know in order to make * the chip function: * * - You must set bit 7 in the RX control register, otherwise the * chip won't receive any packets. * - You must initialize all 3 IPG registers, or you won't be able * to send any packets. * * Note that this device appears to only support loading the station * address via autload from the EEPROM (i.e. there's no way to manaully * set it). * * (Adam Weinberger wanted me to name this driver if_gir.c.) */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version < 500000 #include #endif #include #include #include #include #include "usbdevs.h" #include #include #include /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #include /* * Various supported device vendors/products. */ Static struct axe_type axe_devs[] = { { USB_VENDOR_ASIX, USB_PRODUCT_ASIX_AX88172 }, { USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DUBE100 }, { USB_VENDOR_JVC, USB_PRODUCT_JVC_MP_PRX1 }, { USB_VENDOR_LINKSYS2, USB_PRODUCT_LINKSYS2_USB200M }, { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_LUAU2KTX }, { USB_VENDOR_NETGEAR, USB_PRODUCT_NETGEAR_FA120 }, { USB_VENDOR_SYSTEMTALKS, USB_PRODUCT_SYSTEMTALKS_SGCX2UL }, { USB_VENDOR_SITECOM, USB_PRODUCT_SITECOM_LN029 }, { 0, 0 } }; Static int axe_match(device_ptr_t); Static int axe_attach(device_ptr_t); Static int axe_detach(device_ptr_t); Static int axe_encap(struct axe_softc *, struct mbuf *, int); Static void axe_rxeof(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void axe_txeof(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void axe_tick(void *); Static void axe_rxstart(struct ifnet *); Static void axe_start(struct ifnet *); Static int axe_ioctl(struct ifnet *, u_long, caddr_t); Static void axe_init(void *); Static void axe_stop(struct axe_softc *); Static void axe_watchdog(struct ifnet *); Static void axe_shutdown(device_ptr_t); Static int axe_miibus_readreg(device_ptr_t, int, int); Static int axe_miibus_writereg(device_ptr_t, int, int, int); Static void axe_miibus_statchg(device_ptr_t); Static int axe_cmd(struct axe_softc *, int, int, int, void *); Static int axe_ifmedia_upd(struct ifnet *); Static void axe_ifmedia_sts(struct ifnet *, struct ifmediareq *); Static void axe_setmulti(struct axe_softc *); Static device_method_t axe_methods[] = { /* Device interface */ DEVMETHOD(device_probe, axe_match), DEVMETHOD(device_attach, axe_attach), DEVMETHOD(device_detach, axe_detach), DEVMETHOD(device_shutdown, axe_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, axe_miibus_readreg), DEVMETHOD(miibus_writereg, axe_miibus_writereg), DEVMETHOD(miibus_statchg, axe_miibus_statchg), { 0, 0 } }; Static driver_t axe_driver = { "axe", axe_methods, sizeof(struct axe_softc) }; Static devclass_t axe_devclass; DRIVER_MODULE(axe, uhub, axe_driver, axe_devclass, usbd_driver_load, 0); DRIVER_MODULE(miibus, axe, miibus_driver, miibus_devclass, 0, 0); MODULE_DEPEND(axe, usb, 1, 1, 1); MODULE_DEPEND(axe, miibus, 1, 1, 1); Static int axe_cmd(struct axe_softc *sc, int cmd, int index, int val, void *buf) { usb_device_request_t req; usbd_status err; if (sc->axe_dying) return(0); if (AXE_CMD_DIR(cmd)) req.bmRequestType = UT_WRITE_VENDOR_DEVICE; else req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = AXE_CMD_CMD(cmd); USETW(req.wValue, val); USETW(req.wIndex, index); USETW(req.wLength, AXE_CMD_LEN(cmd)); err = usbd_do_request(sc->axe_udev, &req, buf); if (err) return(-1); return(0); } Static int axe_miibus_readreg(device_ptr_t dev, int phy, int reg) { struct axe_softc *sc = USBGETSOFTC(dev); usbd_status err; u_int16_t val; if (sc->axe_dying) return(0); #ifdef notdef /* * The chip tells us the MII address of any supported * PHYs attached to the chip, so only read from those. */ if (sc->axe_phyaddrs[0] != AXE_NOPHY && phy != sc->axe_phyaddrs[0]) return (0); if (sc->axe_phyaddrs[1] != AXE_NOPHY && phy != sc->axe_phyaddrs[1]) return (0); #endif if (sc->axe_phyaddrs[0] != 0xFF && sc->axe_phyaddrs[0] != phy) return (0); AXE_LOCK(sc); axe_cmd(sc, AXE_CMD_MII_OPMODE_SW, 0, 0, NULL); err = axe_cmd(sc, AXE_CMD_MII_READ_REG, reg, phy, (void *)&val); axe_cmd(sc, AXE_CMD_MII_OPMODE_HW, 0, 0, NULL); AXE_UNLOCK(sc); if (err) { printf("axe%d: read PHY failed\n", sc->axe_unit); return(-1); } if (val) sc->axe_phyaddrs[0] = phy; return (val); } Static int axe_miibus_writereg(device_ptr_t dev, int phy, int reg, int val) { struct axe_softc *sc = USBGETSOFTC(dev); usbd_status err; if (sc->axe_dying) return(0); AXE_LOCK(sc); axe_cmd(sc, AXE_CMD_MII_OPMODE_SW, 0, 0, NULL); err = axe_cmd(sc, AXE_CMD_MII_WRITE_REG, reg, phy, (void *)&val); axe_cmd(sc, AXE_CMD_MII_OPMODE_HW, 0, 0, NULL); AXE_UNLOCK(sc); if (err) { printf("axe%d: write PHY failed\n", sc->axe_unit); return(-1); } return (0); } Static void axe_miibus_statchg(device_ptr_t dev) { #ifdef notdef struct axe_softc *sc = USBGETSOFTC(dev); struct mii_data *mii = GET_MII(sc); #endif /* doesn't seem to be necessary */ return; } /* * Set media options. */ Static int axe_ifmedia_upd(struct ifnet *ifp) { struct axe_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); sc->axe_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) mii_phy_reset(miisc); } mii_mediachg(mii); return (0); } /* * Report current media status. */ Static void axe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct axe_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } Static void axe_setmulti(struct axe_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int32_t h = 0; u_int16_t rxmode; u_int8_t hashtbl[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; ifp = sc->axe_ifp; AXE_LOCK(sc); axe_cmd(sc, AXE_CMD_RXCTL_READ, 0, 0, (void *)&rxmode); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { rxmode |= AXE_RXCMD_ALLMULTI; axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL); AXE_UNLOCK(sc); return; } else rxmode &= ~AXE_RXCMD_ALLMULTI; + IF_ADDR_LOCK(ifp); #if __FreeBSD_version >= 500000 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) #else LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) #endif { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; hashtbl[h / 8] |= 1 << (h % 8); } + IF_ADDR_UNLOCK(ifp); axe_cmd(sc, AXE_CMD_WRITE_MCAST, 0, 0, (void *)&hashtbl); axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL); AXE_UNLOCK(sc); return; } Static void axe_reset(struct axe_softc *sc) { if (sc->axe_dying) return; if (usbd_set_config_no(sc->axe_udev, AXE_CONFIG_NO, 1) || usbd_device2interface_handle(sc->axe_udev, AXE_IFACE_IDX, &sc->axe_iface)) { printf("axe%d: getting interface handle failed\n", sc->axe_unit); } /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); return; } /* * Probe for a AX88172 chip. */ USB_MATCH(axe) { USB_MATCH_START(axe, uaa); struct axe_type *t; if (!uaa->iface) return(UMATCH_NONE); t = axe_devs; while(t->axe_vid) { if (uaa->vendor == t->axe_vid && uaa->product == t->axe_did) { return(UMATCH_VENDOR_PRODUCT); } t++; } return(UMATCH_NONE); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ USB_ATTACH(axe) { USB_ATTACH_START(axe, sc, uaa); char devinfo[1024]; u_char eaddr[ETHER_ADDR_LEN]; struct ifnet *ifp; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; int i; bzero(sc, sizeof(struct axe_softc)); sc->axe_udev = uaa->device; sc->axe_dev = self; sc->axe_unit = device_get_unit(self); if (usbd_set_config_no(sc->axe_udev, AXE_CONFIG_NO, 1)) { printf("axe%d: getting interface handle failed\n", sc->axe_unit); USB_ATTACH_ERROR_RETURN; } if (usbd_device2interface_handle(uaa->device, AXE_IFACE_IDX, &sc->axe_iface)) { printf("axe%d: getting interface handle failed\n", sc->axe_unit); USB_ATTACH_ERROR_RETURN; } id = usbd_get_interface_descriptor(sc->axe_iface); usbd_devinfo(uaa->device, 0, devinfo); device_set_desc_copy(self, devinfo); printf("%s: %s\n", USBDEVNAME(self), devinfo); /* Find endpoints. */ for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(sc->axe_iface, i); if (!ed) { printf("axe%d: couldn't get ep %d\n", sc->axe_unit, i); USB_ATTACH_ERROR_RETURN; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->axe_ed[AXE_ENDPT_RX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->axe_ed[AXE_ENDPT_TX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { sc->axe_ed[AXE_ENDPT_INTR] = ed->bEndpointAddress; } } #if __FreeBSD_version >= 500000 mtx_init(&sc->axe_mtx, device_get_nameunit(self), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); #endif AXE_LOCK(sc); /* * Get station address. */ axe_cmd(sc, AXE_CMD_READ_NODEID, 0, 0, &eaddr); /* * Load IPG values and PHY indexes. */ axe_cmd(sc, AXE_CMD_READ_IPG012, 0, 0, (void *)&sc->axe_ipgs); axe_cmd(sc, AXE_CMD_READ_PHYID, 0, 0, (void *)&sc->axe_phyaddrs); /* * Work around broken adapters that appear to lie about * their PHY addresses. */ sc->axe_phyaddrs[0] = sc->axe_phyaddrs[1] = 0xFF; ifp = sc->axe_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { printf("axe%d: can not if_alloc()\n", sc->axe_unit); USB_ATTACH_ERROR_RETURN; } ifp->if_softc = sc; if_initname(ifp, "axe", sc->axe_unit); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT; ifp->if_ioctl = axe_ioctl; ifp->if_start = axe_start; ifp->if_watchdog = axe_watchdog; ifp->if_init = axe_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; sc->axe_qdat.ifp = ifp; sc->axe_qdat.if_rxstart = axe_rxstart; if (mii_phy_probe(self, &sc->axe_miibus, axe_ifmedia_upd, axe_ifmedia_sts)) { printf("axe%d: MII without any PHY!\n", sc->axe_unit); AXE_UNLOCK(sc); #if __FreeBSD_version >= 500000 mtx_destroy(&sc->axe_mtx); #endif USB_ATTACH_ERROR_RETURN; } /* * Call MI attach routine. */ #if __FreeBSD_version >= 500000 ether_ifattach(ifp, eaddr); #else ether_ifattach(ifp, ETHER_BPF_SUPPORTED); #endif callout_handle_init(&sc->axe_stat_ch); usb_register_netisr(); sc->axe_dying = 0; AXE_UNLOCK(sc); USB_ATTACH_SUCCESS_RETURN; } Static int axe_detach(device_ptr_t dev) { struct axe_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); AXE_LOCK(sc); ifp = sc->axe_ifp; sc->axe_dying = 1; untimeout(axe_tick, sc, sc->axe_stat_ch); #if __FreeBSD_version >= 500000 ether_ifdetach(ifp); #else ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); if_free(ifp); #endif if (sc->axe_ep[AXE_ENDPT_TX] != NULL) usbd_abort_pipe(sc->axe_ep[AXE_ENDPT_TX]); if (sc->axe_ep[AXE_ENDPT_RX] != NULL) usbd_abort_pipe(sc->axe_ep[AXE_ENDPT_RX]); if (sc->axe_ep[AXE_ENDPT_INTR] != NULL) usbd_abort_pipe(sc->axe_ep[AXE_ENDPT_INTR]); AXE_UNLOCK(sc); #if __FreeBSD_version >= 500000 mtx_destroy(&sc->axe_mtx); #endif return(0); } Static void axe_rxstart(struct ifnet *ifp) { struct axe_softc *sc; struct ue_chain *c; sc = ifp->if_softc; AXE_LOCK(sc); c = &sc->axe_cdata.ue_rx_chain[sc->axe_cdata.ue_rx_prod]; c->ue_mbuf = usb_ether_newbuf(); if (c->ue_mbuf == NULL) { printf("%s: no memory for rx list " "-- packet dropped!\n", USBDEVNAME(sc->axe_dev)); ifp->if_ierrors++; AXE_UNLOCK(sc); return; } /* Setup new transfer. */ usbd_setup_xfer(c->ue_xfer, sc->axe_ep[AXE_ENDPT_RX], c, mtod(c->ue_mbuf, char *), UE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, axe_rxeof); usbd_transfer(c->ue_xfer); AXE_UNLOCK(sc); return; } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ Static void axe_rxeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct axe_softc *sc; struct ue_chain *c; struct mbuf *m; struct ifnet *ifp; int total_len = 0; c = priv; sc = c->ue_sc; AXE_LOCK(sc); ifp = sc->axe_ifp; if (!(ifp->if_flags & IFF_RUNNING)) { AXE_UNLOCK(sc); return; } if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { AXE_UNLOCK(sc); return; } if (usbd_ratecheck(&sc->axe_rx_notice)) printf("axe%d: usb error on rx: %s\n", sc->axe_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->axe_ep[AXE_ENDPT_RX]); goto done; } usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL); m = c->ue_mbuf; if (total_len < sizeof(struct ether_header)) { ifp->if_ierrors++; goto done; } ifp->if_ipackets++; m->m_pkthdr.rcvif = (void *)&sc->axe_qdat; m->m_pkthdr.len = m->m_len = total_len; /* Put the packet on the special USB input queue. */ usb_ether_input(m); AXE_UNLOCK(sc); return; done: /* Setup new transfer. */ usbd_setup_xfer(c->ue_xfer, sc->axe_ep[AXE_ENDPT_RX], c, mtod(c->ue_mbuf, char *), UE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, axe_rxeof); usbd_transfer(c->ue_xfer); AXE_UNLOCK(sc); return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ Static void axe_txeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct axe_softc *sc; struct ue_chain *c; struct ifnet *ifp; usbd_status err; c = priv; sc = c->ue_sc; AXE_LOCK(sc); ifp = sc->axe_ifp; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { AXE_UNLOCK(sc); return; } printf("axe%d: usb error on tx: %s\n", sc->axe_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->axe_ep[AXE_ENDPT_TX]); AXE_UNLOCK(sc); return; } ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; usbd_get_xfer_status(c->ue_xfer, NULL, NULL, NULL, &err); if (c->ue_mbuf != NULL) { c->ue_mbuf->m_pkthdr.rcvif = ifp; usb_tx_done(c->ue_mbuf); c->ue_mbuf = NULL; } if (err) ifp->if_oerrors++; else ifp->if_opackets++; AXE_UNLOCK(sc); return; } Static void axe_tick(void *xsc) { struct axe_softc *sc; struct ifnet *ifp; struct mii_data *mii; sc = xsc; if (sc == NULL) return; AXE_LOCK(sc); ifp = sc->axe_ifp; mii = GET_MII(sc); if (mii == NULL) { AXE_UNLOCK(sc); return; } mii_tick(mii); if (!sc->axe_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->axe_link++; if (ifp->if_snd.ifq_head != NULL) axe_start(ifp); } sc->axe_stat_ch = timeout(axe_tick, sc, hz); AXE_UNLOCK(sc); return; } Static int axe_encap(struct axe_softc *sc, struct mbuf *m, int idx) { struct ue_chain *c; usbd_status err; c = &sc->axe_cdata.ue_tx_chain[idx]; /* * Copy the mbuf data into a contiguous buffer, leaving two * bytes at the beginning to hold the frame length. */ m_copydata(m, 0, m->m_pkthdr.len, c->ue_buf); c->ue_mbuf = m; usbd_setup_xfer(c->ue_xfer, sc->axe_ep[AXE_ENDPT_TX], c, c->ue_buf, m->m_pkthdr.len, USBD_FORCE_SHORT_XFER, 10000, axe_txeof); /* Transmit */ err = usbd_transfer(c->ue_xfer); if (err != USBD_IN_PROGRESS) { axe_stop(sc); return(EIO); } sc->axe_cdata.ue_tx_cnt++; return(0); } Static void axe_start(struct ifnet *ifp) { struct axe_softc *sc; struct mbuf *m_head = NULL; sc = ifp->if_softc; AXE_LOCK(sc); if (!sc->axe_link) { AXE_UNLOCK(sc); return; } if (ifp->if_flags & IFF_OACTIVE) { AXE_UNLOCK(sc); return; } IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) { AXE_UNLOCK(sc); return; } if (axe_encap(sc, m_head, 0)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; AXE_UNLOCK(sc); return; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); ifp->if_flags |= IFF_OACTIVE; /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; AXE_UNLOCK(sc); return; } Static void axe_init(void *xsc) { struct axe_softc *sc = xsc; struct ifnet *ifp = sc->axe_ifp; struct ue_chain *c; usbd_status err; int i; int rxmode; if (ifp->if_flags & IFF_RUNNING) return; AXE_LOCK(sc); /* * Cancel pending I/O and free all RX/TX buffers. */ axe_reset(sc); #ifdef notdef /* Set MAC address */ axe_mac(sc, IFP2ENADDR(sc->axe_ifp), 1); #endif /* Enable RX logic. */ /* Init TX ring. */ if (usb_ether_tx_list_init(sc, &sc->axe_cdata, sc->axe_udev) == ENOBUFS) { printf("axe%d: tx list init failed\n", sc->axe_unit); AXE_UNLOCK(sc); return; } /* Init RX ring. */ if (usb_ether_rx_list_init(sc, &sc->axe_cdata, sc->axe_udev) == ENOBUFS) { printf("axe%d: rx list init failed\n", sc->axe_unit); AXE_UNLOCK(sc); return; } /* Set transmitter IPG values */ axe_cmd(sc, AXE_CMD_WRITE_IPG0, 0, sc->axe_ipgs[0], NULL); axe_cmd(sc, AXE_CMD_WRITE_IPG1, 0, sc->axe_ipgs[1], NULL); axe_cmd(sc, AXE_CMD_WRITE_IPG2, 0, sc->axe_ipgs[2], NULL); /* Enable receiver, set RX mode */ rxmode = AXE_RXCMD_UNICAST|AXE_RXCMD_MULTICAST|AXE_RXCMD_ENABLE; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) rxmode |= AXE_RXCMD_PROMISC; if (ifp->if_flags & IFF_BROADCAST) rxmode |= AXE_RXCMD_BROADCAST; axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL); /* Load the multicast filter. */ axe_setmulti(sc); /* Open RX and TX pipes. */ err = usbd_open_pipe(sc->axe_iface, sc->axe_ed[AXE_ENDPT_RX], USBD_EXCLUSIVE_USE, &sc->axe_ep[AXE_ENDPT_RX]); if (err) { printf("axe%d: open rx pipe failed: %s\n", sc->axe_unit, usbd_errstr(err)); AXE_UNLOCK(sc); return; } err = usbd_open_pipe(sc->axe_iface, sc->axe_ed[AXE_ENDPT_TX], USBD_EXCLUSIVE_USE, &sc->axe_ep[AXE_ENDPT_TX]); if (err) { printf("axe%d: open tx pipe failed: %s\n", sc->axe_unit, usbd_errstr(err)); AXE_UNLOCK(sc); return; } /* Start up the receive pipe. */ for (i = 0; i < UE_RX_LIST_CNT; i++) { c = &sc->axe_cdata.ue_rx_chain[i]; usbd_setup_xfer(c->ue_xfer, sc->axe_ep[AXE_ENDPT_RX], c, mtod(c->ue_mbuf, char *), UE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, axe_rxeof); usbd_transfer(c->ue_xfer); } ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; AXE_UNLOCK(sc); sc->axe_stat_ch = timeout(axe_tick, sc, hz); return; } Static int axe_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct axe_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; u_int16_t rxmode; int error = 0; switch(command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->axe_if_flags & IFF_PROMISC)) { AXE_LOCK(sc); axe_cmd(sc, AXE_CMD_RXCTL_READ, 0, 0, (void *)&rxmode); rxmode |= AXE_RXCMD_PROMISC; axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL); AXE_UNLOCK(sc); axe_setmulti(sc); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->axe_if_flags & IFF_PROMISC) { AXE_LOCK(sc); axe_cmd(sc, AXE_CMD_RXCTL_READ, 0, 0, (void *)&rxmode); rxmode &= ~AXE_RXCMD_PROMISC; axe_cmd(sc, AXE_CMD_RXCTL_WRITE, 0, rxmode, NULL); AXE_UNLOCK(sc); axe_setmulti(sc); } else if (!(ifp->if_flags & IFF_RUNNING)) axe_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) axe_stop(sc); } sc->axe_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: axe_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = GET_MII(sc); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); break; } AXE_UNLOCK(sc); return(error); } Static void axe_watchdog(struct ifnet *ifp) { struct axe_softc *sc; struct ue_chain *c; usbd_status stat; sc = ifp->if_softc; AXE_LOCK(sc); ifp->if_oerrors++; printf("axe%d: watchdog timeout\n", sc->axe_unit); c = &sc->axe_cdata.ue_tx_chain[0]; usbd_get_xfer_status(c->ue_xfer, NULL, NULL, NULL, &stat); axe_txeof(c->ue_xfer, c, stat); AXE_UNLOCK(sc); if (ifp->if_snd.ifq_head != NULL) axe_start(ifp); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ Static void axe_stop(struct axe_softc *sc) { usbd_status err; struct ifnet *ifp; AXE_LOCK(sc); ifp = sc->axe_ifp; ifp->if_timer = 0; untimeout(axe_tick, sc, sc->axe_stat_ch); /* Stop transfers. */ if (sc->axe_ep[AXE_ENDPT_RX] != NULL) { err = usbd_abort_pipe(sc->axe_ep[AXE_ENDPT_RX]); if (err) { printf("axe%d: abort rx pipe failed: %s\n", sc->axe_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->axe_ep[AXE_ENDPT_RX]); if (err) { printf("axe%d: close rx pipe failed: %s\n", sc->axe_unit, usbd_errstr(err)); } sc->axe_ep[AXE_ENDPT_RX] = NULL; } if (sc->axe_ep[AXE_ENDPT_TX] != NULL) { err = usbd_abort_pipe(sc->axe_ep[AXE_ENDPT_TX]); if (err) { printf("axe%d: abort tx pipe failed: %s\n", sc->axe_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->axe_ep[AXE_ENDPT_TX]); if (err) { printf("axe%d: close tx pipe failed: %s\n", sc->axe_unit, usbd_errstr(err)); } sc->axe_ep[AXE_ENDPT_TX] = NULL; } if (sc->axe_ep[AXE_ENDPT_INTR] != NULL) { err = usbd_abort_pipe(sc->axe_ep[AXE_ENDPT_INTR]); if (err) { printf("axe%d: abort intr pipe failed: %s\n", sc->axe_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->axe_ep[AXE_ENDPT_INTR]); if (err) { printf("axe%d: close intr pipe failed: %s\n", sc->axe_unit, usbd_errstr(err)); } sc->axe_ep[AXE_ENDPT_INTR] = NULL; } axe_reset(sc); /* Free RX resources. */ usb_ether_rx_list_free(&sc->axe_cdata); /* Free TX resources. */ usb_ether_tx_list_free(&sc->axe_cdata); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); sc->axe_link = 0; AXE_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ Static void axe_shutdown(device_ptr_t dev) { struct axe_softc *sc; sc = device_get_softc(dev); axe_stop(sc); return; } Index: stable/6/sys/dev/usb/if_cue.c =================================================================== --- stable/6/sys/dev/usb/if_cue.c (revision 149421) +++ stable/6/sys/dev/usb/if_cue.c (revision 149422) @@ -1,1098 +1,1100 @@ /*- * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * CATC USB-EL1210A USB to ethernet driver. Used in the CATC Netmate * adapters and others. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The CATC USB-EL1210A provides USB ethernet support at 10Mbps. The * RX filter uses a 512-bit multicast hash table, single perfect entry * for the station address, and promiscuous mode. Unlike the ADMtek * and KLSI chips, the CATC ASIC supports read and write combining * mode where multiple packets can be transfered using a single bulk * transaction, which helps performance a great deal. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version < 500000 #include #endif #include #include #include #include #include "usbdevs.h" #include #include /* * Various supported device vendors/products. */ Static struct cue_type cue_devs[] = { { USB_VENDOR_CATC, USB_PRODUCT_CATC_NETMATE }, { USB_VENDOR_CATC, USB_PRODUCT_CATC_NETMATE2 }, { USB_VENDOR_SMARTBRIDGES, USB_PRODUCT_SMARTBRIDGES_SMARTLINK }, { 0, 0 } }; Static int cue_match(device_ptr_t); Static int cue_attach(device_ptr_t); Static int cue_detach(device_ptr_t); Static int cue_encap(struct cue_softc *, struct mbuf *, int); Static void cue_rxeof(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void cue_txeof(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void cue_tick(void *); Static void cue_rxstart(struct ifnet *); Static void cue_start(struct ifnet *); Static int cue_ioctl(struct ifnet *, u_long, caddr_t); Static void cue_init(void *); Static void cue_stop(struct cue_softc *); Static void cue_watchdog(struct ifnet *); Static void cue_shutdown(device_ptr_t); Static void cue_setmulti(struct cue_softc *); Static uint32_t cue_mchash(const uint8_t *); Static void cue_reset(struct cue_softc *); Static int cue_csr_read_1(struct cue_softc *, int); Static int cue_csr_write_1(struct cue_softc *, int, int); Static int cue_csr_read_2(struct cue_softc *, int); #ifdef notdef Static int cue_csr_write_2(struct cue_softc *, int, int); #endif Static int cue_mem(struct cue_softc *, int, int, void *, int); Static int cue_getmac(struct cue_softc *, void *); Static device_method_t cue_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cue_match), DEVMETHOD(device_attach, cue_attach), DEVMETHOD(device_detach, cue_detach), DEVMETHOD(device_shutdown, cue_shutdown), { 0, 0 } }; Static driver_t cue_driver = { "cue", cue_methods, sizeof(struct cue_softc) }; Static devclass_t cue_devclass; DRIVER_MODULE(cue, uhub, cue_driver, cue_devclass, usbd_driver_load, 0); MODULE_DEPEND(cue, usb, 1, 1, 1); MODULE_DEPEND(cue, ether, 1, 1, 1); #define CUE_SETBIT(sc, reg, x) \ cue_csr_write_1(sc, reg, cue_csr_read_1(sc, reg) | (x)) #define CUE_CLRBIT(sc, reg, x) \ cue_csr_write_1(sc, reg, cue_csr_read_1(sc, reg) & ~(x)) Static int cue_csr_read_1(struct cue_softc *sc, int reg) { usb_device_request_t req; usbd_status err; u_int8_t val = 0; if (sc->cue_dying) return(0); CUE_LOCK(sc); req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = CUE_CMD_READREG; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, 1); err = usbd_do_request(sc->cue_udev, &req, &val); CUE_UNLOCK(sc); if (err) return(0); return(val); } Static int cue_csr_read_2(struct cue_softc *sc, int reg) { usb_device_request_t req; usbd_status err; u_int16_t val = 0; if (sc->cue_dying) return(0); CUE_LOCK(sc); req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = CUE_CMD_READREG; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, 2); err = usbd_do_request(sc->cue_udev, &req, &val); CUE_UNLOCK(sc); if (err) return(0); return(val); } Static int cue_csr_write_1(struct cue_softc *sc, int reg, int val) { usb_device_request_t req; usbd_status err; if (sc->cue_dying) return(0); CUE_LOCK(sc); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = CUE_CMD_WRITEREG; USETW(req.wValue, val); USETW(req.wIndex, reg); USETW(req.wLength, 0); err = usbd_do_request(sc->cue_udev, &req, NULL); CUE_UNLOCK(sc); if (err) return(-1); return(0); } #ifdef notdef Static int cue_csr_write_2(struct cue_softc *sc, int reg, int val) { usb_device_request_t req; usbd_status err; if (sc->cue_dying) return(0); CUE_LOCK(sc); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = CUE_CMD_WRITEREG; USETW(req.wValue, val); USETW(req.wIndex, reg); USETW(req.wLength, 0); err = usbd_do_request(sc->cue_udev, &req, NULL); CUE_UNLOCK(sc); if (err) return(-1); return(0); } #endif Static int cue_mem(struct cue_softc *sc, int cmd, int addr, void *buf, int len) { usb_device_request_t req; usbd_status err; if (sc->cue_dying) return(0); CUE_LOCK(sc); if (cmd == CUE_CMD_READSRAM) req.bmRequestType = UT_READ_VENDOR_DEVICE; else req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = cmd; USETW(req.wValue, 0); USETW(req.wIndex, addr); USETW(req.wLength, len); err = usbd_do_request(sc->cue_udev, &req, buf); CUE_UNLOCK(sc); if (err) return(-1); return(0); } Static int cue_getmac(struct cue_softc *sc, void *buf) { usb_device_request_t req; usbd_status err; if (sc->cue_dying) return(0); CUE_LOCK(sc); req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = CUE_CMD_GET_MACADDR; USETW(req.wValue, 0); USETW(req.wIndex, 0); USETW(req.wLength, ETHER_ADDR_LEN); err = usbd_do_request(sc->cue_udev, &req, buf); CUE_UNLOCK(sc); if (err) { printf("cue%d: read MAC address failed\n", sc->cue_unit); return(-1); } return(0); } #define CUE_BITS 9 Static uint32_t cue_mchash(const uint8_t *addr) { uint32_t crc; /* Compute CRC for the address value. */ crc = ether_crc32_le(addr, ETHER_ADDR_LEN); return (crc & ((1 << CUE_BITS) - 1)); } Static void cue_setmulti(struct cue_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int32_t h = 0, i; ifp = sc->cue_ifp; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { for (i = 0; i < CUE_MCAST_TABLE_LEN; i++) sc->cue_mctab[i] = 0xFF; cue_mem(sc, CUE_CMD_WRITESRAM, CUE_MCAST_TABLE_ADDR, &sc->cue_mctab, CUE_MCAST_TABLE_LEN); return; } /* first, zot all the existing hash bits */ for (i = 0; i < CUE_MCAST_TABLE_LEN; i++) sc->cue_mctab[i] = 0; /* now program new ones */ + IF_ADDR_LOCK(ifp); #if __FreeBSD_version >= 500000 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) #else LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) #endif { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = cue_mchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); sc->cue_mctab[h >> 3] |= 1 << (h & 0x7); } + IF_ADDR_UNLOCK(ifp); /* * Also include the broadcast address in the filter * so we can receive broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { #if __FreeBSD_version >= 500000 h = cue_mchash(ifp->if_broadcastaddr); #else h = cue_mchash(etherbroadcastaddr); #endif sc->cue_mctab[h >> 3] |= 1 << (h & 0x7); } cue_mem(sc, CUE_CMD_WRITESRAM, CUE_MCAST_TABLE_ADDR, &sc->cue_mctab, CUE_MCAST_TABLE_LEN); return; } Static void cue_reset(struct cue_softc *sc) { usb_device_request_t req; usbd_status err; if (sc->cue_dying) return; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = CUE_CMD_RESET; USETW(req.wValue, 0); USETW(req.wIndex, 0); USETW(req.wLength, 0); err = usbd_do_request(sc->cue_udev, &req, NULL); if (err) printf("cue%d: reset failed\n", sc->cue_unit); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); return; } /* * Probe for a Pegasus chip. */ USB_MATCH(cue) { USB_MATCH_START(cue, uaa); struct cue_type *t; if (!uaa->iface) return(UMATCH_NONE); t = cue_devs; while(t->cue_vid) { if (uaa->vendor == t->cue_vid && uaa->product == t->cue_did) { return(UMATCH_VENDOR_PRODUCT); } t++; } return(UMATCH_NONE); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ USB_ATTACH(cue) { USB_ATTACH_START(cue, sc, uaa); char devinfo[1024]; u_char eaddr[ETHER_ADDR_LEN]; struct ifnet *ifp; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; int i; bzero(sc, sizeof(struct cue_softc)); sc->cue_dev = self; sc->cue_iface = uaa->iface; sc->cue_udev = uaa->device; sc->cue_unit = device_get_unit(self); if (usbd_set_config_no(sc->cue_udev, CUE_CONFIG_NO, 0)) { printf("cue%d: getting interface handle failed\n", sc->cue_unit); USB_ATTACH_ERROR_RETURN; } id = usbd_get_interface_descriptor(uaa->iface); usbd_devinfo(uaa->device, 0, devinfo); device_set_desc_copy(self, devinfo); printf("%s: %s\n", USBDEVNAME(self), devinfo); /* Find endpoints. */ for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(uaa->iface, i); if (!ed) { printf("cue%d: couldn't get ep %d\n", sc->cue_unit, i); USB_ATTACH_ERROR_RETURN; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->cue_ed[CUE_ENDPT_RX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->cue_ed[CUE_ENDPT_TX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { sc->cue_ed[CUE_ENDPT_INTR] = ed->bEndpointAddress; } } #if __FreeBSD_version >= 500000 mtx_init(&sc->cue_mtx, device_get_nameunit(self), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); #endif CUE_LOCK(sc); #ifdef notdef /* Reset the adapter. */ cue_reset(sc); #endif /* * Get station address. */ cue_getmac(sc, &eaddr); ifp = sc->cue_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { printf("cue%d: can not if_alloc()\n", sc->cue_unit); USB_ATTACH_ERROR_RETURN; } ifp->if_softc = sc; if_initname(ifp, "cue", sc->cue_unit); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT; ifp->if_ioctl = cue_ioctl; ifp->if_start = cue_start; ifp->if_watchdog = cue_watchdog; ifp->if_init = cue_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; sc->cue_qdat.ifp = ifp; sc->cue_qdat.if_rxstart = cue_rxstart; /* * Call MI attach routine. */ #if __FreeBSD_version >= 500000 ether_ifattach(ifp, eaddr); #else ether_ifattach(ifp, ETHER_BPF_SUPPORTED); #endif callout_handle_init(&sc->cue_stat_ch); usb_register_netisr(); sc->cue_dying = 0; CUE_UNLOCK(sc); USB_ATTACH_SUCCESS_RETURN; } Static int cue_detach(device_ptr_t dev) { struct cue_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); CUE_LOCK(sc); ifp = sc->cue_ifp; sc->cue_dying = 1; untimeout(cue_tick, sc, sc->cue_stat_ch); #if __FreeBSD_version >= 500000 ether_ifdetach(ifp); #else ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); if_free(ifp); #endif if (sc->cue_ep[CUE_ENDPT_TX] != NULL) usbd_abort_pipe(sc->cue_ep[CUE_ENDPT_TX]); if (sc->cue_ep[CUE_ENDPT_RX] != NULL) usbd_abort_pipe(sc->cue_ep[CUE_ENDPT_RX]); if (sc->cue_ep[CUE_ENDPT_INTR] != NULL) usbd_abort_pipe(sc->cue_ep[CUE_ENDPT_INTR]); CUE_UNLOCK(sc); #if __FreeBSD_version >= 500000 mtx_destroy(&sc->cue_mtx); #endif return(0); } Static void cue_rxstart(struct ifnet *ifp) { struct cue_softc *sc; struct ue_chain *c; sc = ifp->if_softc; CUE_LOCK(sc); c = &sc->cue_cdata.ue_rx_chain[sc->cue_cdata.ue_rx_prod]; c->ue_mbuf = usb_ether_newbuf(); if (c->ue_mbuf == NULL) { printf("%s: no memory for rx list " "-- packet dropped!\n", USBDEVNAME(sc->cue_dev)); ifp->if_ierrors++; CUE_UNLOCK(sc); return; } /* Setup new transfer. */ usbd_setup_xfer(c->ue_xfer, sc->cue_ep[CUE_ENDPT_RX], c, mtod(c->ue_mbuf, char *), UE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, cue_rxeof); usbd_transfer(c->ue_xfer); CUE_UNLOCK(sc); return; } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ Static void cue_rxeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct cue_softc *sc; struct ue_chain *c; struct mbuf *m; struct ifnet *ifp; int total_len = 0; u_int16_t len; c = priv; sc = c->ue_sc; CUE_LOCK(sc); ifp = sc->cue_ifp; if (!(ifp->if_flags & IFF_RUNNING)) { CUE_UNLOCK(sc); return; } if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { CUE_UNLOCK(sc); return; } if (usbd_ratecheck(&sc->cue_rx_notice)) printf("cue%d: usb error on rx: %s\n", sc->cue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->cue_ep[CUE_ENDPT_RX]); goto done; } usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL); m = c->ue_mbuf; len = *mtod(m, u_int16_t *); /* No errors; receive the packet. */ total_len = len; if (len < sizeof(struct ether_header)) { ifp->if_ierrors++; goto done; } ifp->if_ipackets++; m_adj(m, sizeof(u_int16_t)); m->m_pkthdr.rcvif = (void *)&sc->cue_qdat; m->m_pkthdr.len = m->m_len = total_len; /* Put the packet on the special USB input queue. */ usb_ether_input(m); CUE_UNLOCK(sc); return; done: /* Setup new transfer. */ usbd_setup_xfer(c->ue_xfer, sc->cue_ep[CUE_ENDPT_RX], c, mtod(c->ue_mbuf, char *), UE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, cue_rxeof); usbd_transfer(c->ue_xfer); CUE_UNLOCK(sc); return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ Static void cue_txeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct cue_softc *sc; struct ue_chain *c; struct ifnet *ifp; usbd_status err; c = priv; sc = c->ue_sc; CUE_LOCK(sc); ifp = sc->cue_ifp; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { CUE_UNLOCK(sc); return; } printf("cue%d: usb error on tx: %s\n", sc->cue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->cue_ep[CUE_ENDPT_TX]); CUE_UNLOCK(sc); return; } ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; usbd_get_xfer_status(c->ue_xfer, NULL, NULL, NULL, &err); if (c->ue_mbuf != NULL) { c->ue_mbuf->m_pkthdr.rcvif = ifp; usb_tx_done(c->ue_mbuf); c->ue_mbuf = NULL; } if (err) ifp->if_oerrors++; else ifp->if_opackets++; CUE_UNLOCK(sc); return; } Static void cue_tick(void *xsc) { struct cue_softc *sc; struct ifnet *ifp; sc = xsc; if (sc == NULL) return; CUE_LOCK(sc); ifp = sc->cue_ifp; ifp->if_collisions += cue_csr_read_2(sc, CUE_TX_SINGLECOLL); ifp->if_collisions += cue_csr_read_2(sc, CUE_TX_MULTICOLL); ifp->if_collisions += cue_csr_read_2(sc, CUE_TX_EXCESSCOLL); if (cue_csr_read_2(sc, CUE_RX_FRAMEERR)) ifp->if_ierrors++; sc->cue_stat_ch = timeout(cue_tick, sc, hz); CUE_UNLOCK(sc); return; } Static int cue_encap(struct cue_softc *sc, struct mbuf *m, int idx) { int total_len; struct ue_chain *c; usbd_status err; c = &sc->cue_cdata.ue_tx_chain[idx]; /* * Copy the mbuf data into a contiguous buffer, leaving two * bytes at the beginning to hold the frame length. */ m_copydata(m, 0, m->m_pkthdr.len, c->ue_buf + 2); c->ue_mbuf = m; total_len = m->m_pkthdr.len + 2; /* The first two bytes are the frame length */ c->ue_buf[0] = (u_int8_t)m->m_pkthdr.len; c->ue_buf[1] = (u_int8_t)(m->m_pkthdr.len >> 8); usbd_setup_xfer(c->ue_xfer, sc->cue_ep[CUE_ENDPT_TX], c, c->ue_buf, total_len, 0, 10000, cue_txeof); /* Transmit */ err = usbd_transfer(c->ue_xfer); if (err != USBD_IN_PROGRESS) { cue_stop(sc); return(EIO); } sc->cue_cdata.ue_tx_cnt++; return(0); } Static void cue_start(struct ifnet *ifp) { struct cue_softc *sc; struct mbuf *m_head = NULL; sc = ifp->if_softc; CUE_LOCK(sc); if (ifp->if_flags & IFF_OACTIVE) { CUE_UNLOCK(sc); return; } IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) { CUE_UNLOCK(sc); return; } if (cue_encap(sc, m_head, 0)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; CUE_UNLOCK(sc); return; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); ifp->if_flags |= IFF_OACTIVE; /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; CUE_UNLOCK(sc); return; } Static void cue_init(void *xsc) { struct cue_softc *sc = xsc; struct ifnet *ifp = sc->cue_ifp; struct ue_chain *c; usbd_status err; int i; if (ifp->if_flags & IFF_RUNNING) return; CUE_LOCK(sc); /* * Cancel pending I/O and free all RX/TX buffers. */ #ifdef foo cue_reset(sc); #endif /* Set MAC address */ for (i = 0; i < ETHER_ADDR_LEN; i++) cue_csr_write_1(sc, CUE_PAR0 - i, IFP2ENADDR(sc->cue_ifp)[i]); /* Enable RX logic. */ cue_csr_write_1(sc, CUE_ETHCTL, CUE_ETHCTL_RX_ON|CUE_ETHCTL_MCAST_ON); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { CUE_SETBIT(sc, CUE_ETHCTL, CUE_ETHCTL_PROMISC); } else { CUE_CLRBIT(sc, CUE_ETHCTL, CUE_ETHCTL_PROMISC); } /* Init TX ring. */ if (usb_ether_tx_list_init(sc, &sc->cue_cdata, sc->cue_udev) == ENOBUFS) { printf("cue%d: tx list init failed\n", sc->cue_unit); CUE_UNLOCK(sc); return; } /* Init RX ring. */ if (usb_ether_rx_list_init(sc, &sc->cue_cdata, sc->cue_udev) == ENOBUFS) { printf("cue%d: rx list init failed\n", sc->cue_unit); CUE_UNLOCK(sc); return; } /* Load the multicast filter. */ cue_setmulti(sc); /* * Set the number of RX and TX buffers that we want * to reserve inside the ASIC. */ cue_csr_write_1(sc, CUE_RX_BUFPKTS, CUE_RX_FRAMES); cue_csr_write_1(sc, CUE_TX_BUFPKTS, CUE_TX_FRAMES); /* Set advanced operation modes. */ cue_csr_write_1(sc, CUE_ADVANCED_OPMODES, CUE_AOP_EMBED_RXLEN|0x01); /* 1 wait state */ /* Program the LED operation. */ cue_csr_write_1(sc, CUE_LEDCTL, CUE_LEDCTL_FOLLOW_LINK); /* Open RX and TX pipes. */ err = usbd_open_pipe(sc->cue_iface, sc->cue_ed[CUE_ENDPT_RX], USBD_EXCLUSIVE_USE, &sc->cue_ep[CUE_ENDPT_RX]); if (err) { printf("cue%d: open rx pipe failed: %s\n", sc->cue_unit, usbd_errstr(err)); CUE_UNLOCK(sc); return; } err = usbd_open_pipe(sc->cue_iface, sc->cue_ed[CUE_ENDPT_TX], USBD_EXCLUSIVE_USE, &sc->cue_ep[CUE_ENDPT_TX]); if (err) { printf("cue%d: open tx pipe failed: %s\n", sc->cue_unit, usbd_errstr(err)); CUE_UNLOCK(sc); return; } /* Start up the receive pipe. */ for (i = 0; i < UE_RX_LIST_CNT; i++) { c = &sc->cue_cdata.ue_rx_chain[i]; usbd_setup_xfer(c->ue_xfer, sc->cue_ep[CUE_ENDPT_RX], c, mtod(c->ue_mbuf, char *), UE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, cue_rxeof); usbd_transfer(c->ue_xfer); } ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; CUE_UNLOCK(sc); sc->cue_stat_ch = timeout(cue_tick, sc, hz); return; } Static int cue_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct cue_softc *sc = ifp->if_softc; int error = 0; CUE_LOCK(sc); switch(command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->cue_if_flags & IFF_PROMISC)) { CUE_SETBIT(sc, CUE_ETHCTL, CUE_ETHCTL_PROMISC); cue_setmulti(sc); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->cue_if_flags & IFF_PROMISC) { CUE_CLRBIT(sc, CUE_ETHCTL, CUE_ETHCTL_PROMISC); cue_setmulti(sc); } else if (!(ifp->if_flags & IFF_RUNNING)) cue_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) cue_stop(sc); } sc->cue_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: cue_setmulti(sc); error = 0; break; default: error = ether_ioctl(ifp, command, data); break; } CUE_UNLOCK(sc); return(error); } Static void cue_watchdog(struct ifnet *ifp) { struct cue_softc *sc; struct ue_chain *c; usbd_status stat; sc = ifp->if_softc; CUE_LOCK(sc); ifp->if_oerrors++; printf("cue%d: watchdog timeout\n", sc->cue_unit); c = &sc->cue_cdata.ue_tx_chain[0]; usbd_get_xfer_status(c->ue_xfer, NULL, NULL, NULL, &stat); cue_txeof(c->ue_xfer, c, stat); if (ifp->if_snd.ifq_head != NULL) cue_start(ifp); CUE_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ Static void cue_stop(struct cue_softc *sc) { usbd_status err; struct ifnet *ifp; CUE_LOCK(sc); ifp = sc->cue_ifp; ifp->if_timer = 0; cue_csr_write_1(sc, CUE_ETHCTL, 0); cue_reset(sc); untimeout(cue_tick, sc, sc->cue_stat_ch); /* Stop transfers. */ if (sc->cue_ep[CUE_ENDPT_RX] != NULL) { err = usbd_abort_pipe(sc->cue_ep[CUE_ENDPT_RX]); if (err) { printf("cue%d: abort rx pipe failed: %s\n", sc->cue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->cue_ep[CUE_ENDPT_RX]); if (err) { printf("cue%d: close rx pipe failed: %s\n", sc->cue_unit, usbd_errstr(err)); } sc->cue_ep[CUE_ENDPT_RX] = NULL; } if (sc->cue_ep[CUE_ENDPT_TX] != NULL) { err = usbd_abort_pipe(sc->cue_ep[CUE_ENDPT_TX]); if (err) { printf("cue%d: abort tx pipe failed: %s\n", sc->cue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->cue_ep[CUE_ENDPT_TX]); if (err) { printf("cue%d: close tx pipe failed: %s\n", sc->cue_unit, usbd_errstr(err)); } sc->cue_ep[CUE_ENDPT_TX] = NULL; } if (sc->cue_ep[CUE_ENDPT_INTR] != NULL) { err = usbd_abort_pipe(sc->cue_ep[CUE_ENDPT_INTR]); if (err) { printf("cue%d: abort intr pipe failed: %s\n", sc->cue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->cue_ep[CUE_ENDPT_INTR]); if (err) { printf("cue%d: close intr pipe failed: %s\n", sc->cue_unit, usbd_errstr(err)); } sc->cue_ep[CUE_ENDPT_INTR] = NULL; } /* Free RX resources. */ usb_ether_rx_list_free(&sc->cue_cdata); /* Free TX resources. */ usb_ether_tx_list_free(&sc->cue_cdata); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); CUE_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ Static void cue_shutdown(device_ptr_t dev) { struct cue_softc *sc; sc = device_get_softc(dev); CUE_LOCK(sc); cue_reset(sc); cue_stop(sc); CUE_UNLOCK(sc); return; } Index: stable/6/sys/dev/usb/if_kue.c =================================================================== --- stable/6/sys/dev/usb/if_kue.c (revision 149421) +++ stable/6/sys/dev/usb/if_kue.c (revision 149422) @@ -1,1033 +1,1035 @@ /*- * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Kawasaki LSI KL5KUSB101B USB to ethernet adapter driver. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The KLSI USB to ethernet adapter chip contains an USB serial interface, * ethernet MAC and embedded microcontroller (called the QT Engine). * The chip must have firmware loaded into it before it will operate. * Packets are passed between the chip and host via bulk transfers. * There is an interrupt endpoint mentioned in the software spec, however * it's currently unused. This device is 10Mbps half-duplex only, hence * there is no media selection logic. The MAC supports a 128 entry * multicast filter, though the exact size of the filter can depend * on the firmware. Curiously, while the software spec describes various * ethernet statistics counters, my sample adapter and firmware combination * claims not to support any statistics counters at all. * * Note that once we load the firmware in the device, we have to be * careful not to load it again: if you restart your computer but * leave the adapter attached to the USB controller, it may remain * powered on and retain its firmware. In this case, we don't need * to load the firmware a second time. * * Special thanks to Rob Furr for providing an ADS Technologies * adapter for development and testing. No monkeys were harmed during * the development of this driver. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version < 500000 #include #endif #include #include #include #include #include "usbdevs.h" #include #include #include MODULE_DEPEND(kue, usb, 1, 1, 1); MODULE_DEPEND(kue, ether, 1, 1, 1); /* * Various supported device vendors/products. */ Static struct kue_type kue_devs[] = { { USB_VENDOR_AOX, USB_PRODUCT_AOX_USB101 }, { USB_VENDOR_KLSI, USB_PRODUCT_AOX_USB101 }, { USB_VENDOR_ADS, USB_PRODUCT_ADS_UBS10BT }, { USB_VENDOR_ATEN, USB_PRODUCT_ATEN_UC10T }, { USB_VENDOR_NETGEAR, USB_PRODUCT_NETGEAR_EA101 }, { USB_VENDOR_PERACOM, USB_PRODUCT_PERACOM_ENET }, { USB_VENDOR_PERACOM, USB_PRODUCT_PERACOM_ENET2 }, { USB_VENDOR_ENTREGA, USB_PRODUCT_ENTREGA_E45 }, { USB_VENDOR_3COM, USB_PRODUCT_3COM_3C19250 }, { USB_VENDOR_COREGA, USB_PRODUCT_COREGA_ETHER_USB_T }, { USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DSB650C }, { USB_VENDOR_SMC, USB_PRODUCT_SMC_2102USB }, { USB_VENDOR_LINKSYS, USB_PRODUCT_LINKSYS_USB10T }, { USB_VENDOR_KLSI, USB_PRODUCT_KLSI_DUH3E10BT }, { USB_VENDOR_KLSI, USB_PRODUCT_KLSI_DUH3E10BTN }, { USB_VENDOR_PERACOM, USB_PRODUCT_PERACOM_ENET3 }, { USB_VENDOR_IODATA, USB_PRODUCT_IODATA_USBETT }, { USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_URE450 }, { 0, 0 } }; Static int kue_match(device_ptr_t); Static int kue_attach(device_ptr_t); Static int kue_detach(device_ptr_t); Static void kue_shutdown(device_ptr_t); Static int kue_encap(struct kue_softc *, struct mbuf *, int); Static void kue_rxeof(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void kue_txeof(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void kue_start(struct ifnet *); Static void kue_rxstart(struct ifnet *); Static int kue_ioctl(struct ifnet *, u_long, caddr_t); Static void kue_init(void *); Static void kue_stop(struct kue_softc *); Static void kue_watchdog(struct ifnet *); Static void kue_setmulti(struct kue_softc *); Static void kue_reset(struct kue_softc *); Static usbd_status kue_do_request(usbd_device_handle, usb_device_request_t *, void *); Static usbd_status kue_ctl(struct kue_softc *, int, u_int8_t, u_int16_t, char *, int); Static usbd_status kue_setword(struct kue_softc *, u_int8_t, u_int16_t); Static int kue_load_fw(struct kue_softc *); Static device_method_t kue_methods[] = { /* Device interface */ DEVMETHOD(device_probe, kue_match), DEVMETHOD(device_attach, kue_attach), DEVMETHOD(device_detach, kue_detach), DEVMETHOD(device_shutdown, kue_shutdown), { 0, 0 } }; Static driver_t kue_driver = { "kue", kue_methods, sizeof(struct kue_softc) }; Static devclass_t kue_devclass; DRIVER_MODULE(kue, uhub, kue_driver, kue_devclass, usbd_driver_load, 0); /* * We have a custom do_request function which is almost like the * regular do_request function, except it has a much longer timeout. * Why? Because we need to make requests over the control endpoint * to download the firmware to the device, which can take longer * than the default timeout. */ Static usbd_status kue_do_request(usbd_device_handle dev, usb_device_request_t *req, void *data) { usbd_xfer_handle xfer; usbd_status err; xfer = usbd_alloc_xfer(dev); usbd_setup_default_xfer(xfer, dev, 0, 500000, req, data, UGETW(req->wLength), USBD_SHORT_XFER_OK, 0); err = usbd_sync_transfer(xfer); usbd_free_xfer(xfer); return(err); } Static usbd_status kue_setword(struct kue_softc *sc, u_int8_t breq, u_int16_t word) { usbd_device_handle dev; usb_device_request_t req; usbd_status err; if (sc->kue_dying) return(USBD_NORMAL_COMPLETION); dev = sc->kue_udev; KUE_LOCK(sc); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = breq; USETW(req.wValue, word); USETW(req.wIndex, 0); USETW(req.wLength, 0); err = kue_do_request(dev, &req, NULL); KUE_UNLOCK(sc); return(err); } Static usbd_status kue_ctl(struct kue_softc *sc, int rw, u_int8_t breq, u_int16_t val, char *data, int len) { usbd_device_handle dev; usb_device_request_t req; usbd_status err; dev = sc->kue_udev; if (sc->kue_dying) return(USBD_NORMAL_COMPLETION); KUE_LOCK(sc); if (rw == KUE_CTL_WRITE) req.bmRequestType = UT_WRITE_VENDOR_DEVICE; else req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = breq; USETW(req.wValue, val); USETW(req.wIndex, 0); USETW(req.wLength, len); err = kue_do_request(dev, &req, data); KUE_UNLOCK(sc); return(err); } Static int kue_load_fw(struct kue_softc *sc) { usbd_status err; usb_device_descriptor_t *dd; int hwrev; dd = &sc->kue_udev->ddesc; hwrev = UGETW(dd->bcdDevice); /* * First, check if we even need to load the firmware. * If the device was still attached when the system was * rebooted, it may already have firmware loaded in it. * If this is the case, we don't need to do it again. * And in fact, if we try to load it again, we'll hang, * so we have to avoid this condition if we don't want * to look stupid. * * We can test this quickly by checking the bcdRevision * code. The NIC will return a different revision code if * it's probed while the firmware is still loaded and * running. */ if (hwrev == 0x0202) return(0); /* Load code segment */ err = kue_ctl(sc, KUE_CTL_WRITE, KUE_CMD_SEND_SCAN, 0, kue_code_seg, sizeof(kue_code_seg)); if (err) { printf("kue%d: failed to load code segment: %s\n", sc->kue_unit, usbd_errstr(err)); return(ENXIO); } /* Load fixup segment */ err = kue_ctl(sc, KUE_CTL_WRITE, KUE_CMD_SEND_SCAN, 0, kue_fix_seg, sizeof(kue_fix_seg)); if (err) { printf("kue%d: failed to load fixup segment: %s\n", sc->kue_unit, usbd_errstr(err)); return(ENXIO); } /* Send trigger command. */ err = kue_ctl(sc, KUE_CTL_WRITE, KUE_CMD_SEND_SCAN, 0, kue_trig_seg, sizeof(kue_trig_seg)); if (err) { printf("kue%d: failed to load trigger segment: %s\n", sc->kue_unit, usbd_errstr(err)); return(ENXIO); } return(0); } Static void kue_setmulti(struct kue_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; int i = 0; ifp = sc->kue_ifp; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { sc->kue_rxfilt |= KUE_RXFILT_ALLMULTI; sc->kue_rxfilt &= ~KUE_RXFILT_MULTICAST; kue_setword(sc, KUE_CMD_SET_PKT_FILTER, sc->kue_rxfilt); return; } sc->kue_rxfilt &= ~KUE_RXFILT_ALLMULTI; + IF_ADDR_LOCK(ifp); #if __FreeBSD_version >= 500000 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) #else LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) #endif { if (ifma->ifma_addr->sa_family != AF_LINK) continue; /* * If there are too many addresses for the * internal filter, switch over to allmulti mode. */ if (i == KUE_MCFILTCNT(sc)) break; bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), KUE_MCFILT(sc, i), ETHER_ADDR_LEN); i++; } + IF_ADDR_UNLOCK(ifp); if (i == KUE_MCFILTCNT(sc)) sc->kue_rxfilt |= KUE_RXFILT_ALLMULTI; else { sc->kue_rxfilt |= KUE_RXFILT_MULTICAST; kue_ctl(sc, KUE_CTL_WRITE, KUE_CMD_SET_MCAST_FILTERS, i, sc->kue_mcfilters, i * ETHER_ADDR_LEN); } kue_setword(sc, KUE_CMD_SET_PKT_FILTER, sc->kue_rxfilt); return; } /* * Issue a SET_CONFIGURATION command to reset the MAC. This should be * done after the firmware is loaded into the adapter in order to * bring it into proper operation. */ Static void kue_reset(struct kue_softc *sc) { if (usbd_set_config_no(sc->kue_udev, KUE_CONFIG_NO, 0) || usbd_device2interface_handle(sc->kue_udev, KUE_IFACE_IDX, &sc->kue_iface)) { printf("kue%d: getting interface handle failed\n", sc->kue_unit); } /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); return; } /* * Probe for a KLSI chip. */ USB_MATCH(kue) { USB_MATCH_START(kue, uaa); struct kue_type *t; if (!uaa->iface) return(UMATCH_NONE); t = kue_devs; while(t->kue_vid) { if (uaa->vendor == t->kue_vid && uaa->product == t->kue_did) { return(UMATCH_VENDOR_PRODUCT); } t++; } return(UMATCH_NONE); } /* * Attach the interface. Allocate softc structures, do * setup and ethernet/BPF attach. */ USB_ATTACH(kue) { USB_ATTACH_START(kue, sc, uaa); char devinfo[1024]; struct ifnet *ifp; usbd_status err; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; int i; bzero(sc, sizeof(struct kue_softc)); sc->kue_dev = self; sc->kue_iface = uaa->iface; sc->kue_udev = uaa->device; sc->kue_unit = device_get_unit(self); id = usbd_get_interface_descriptor(uaa->iface); usbd_devinfo(uaa->device, 0, devinfo); device_set_desc_copy(self, devinfo); printf("%s: %s\n", USBDEVNAME(self), devinfo); /* Find endpoints. */ for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(uaa->iface, i); if (!ed) { printf("kue%d: couldn't get ep %d\n", sc->kue_unit, i); USB_ATTACH_ERROR_RETURN; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->kue_ed[KUE_ENDPT_RX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->kue_ed[KUE_ENDPT_TX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { sc->kue_ed[KUE_ENDPT_INTR] = ed->bEndpointAddress; } } #if __FreeBSD_version >= 500000 mtx_init(&sc->kue_mtx, device_get_nameunit(self), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); #endif KUE_LOCK(sc); /* Load the firmware into the NIC. */ if (kue_load_fw(sc)) { KUE_UNLOCK(sc); #if __FreeBSD_version >= 500000 mtx_destroy(&sc->kue_mtx); #endif USB_ATTACH_ERROR_RETURN; } /* Reset the adapter. */ kue_reset(sc); /* Read ethernet descriptor */ err = kue_ctl(sc, KUE_CTL_READ, KUE_CMD_GET_ETHER_DESCRIPTOR, 0, (char *)&sc->kue_desc, sizeof(sc->kue_desc)); sc->kue_mcfilters = malloc(KUE_MCFILTCNT(sc) * ETHER_ADDR_LEN, M_USBDEV, M_NOWAIT); ifp = sc->kue_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { printf("kue%d: can not if_alloc()\n", sc->kue_unit); USB_ATTACH_ERROR_RETURN; } ifp->if_softc = sc; if_initname(ifp, "kue", sc->kue_unit); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT; ifp->if_ioctl = kue_ioctl; ifp->if_start = kue_start; ifp->if_watchdog = kue_watchdog; ifp->if_init = kue_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; sc->kue_qdat.ifp = ifp; sc->kue_qdat.if_rxstart = kue_rxstart; /* * Call MI attach routine. */ #if __FreeBSD_version >= 500000 ether_ifattach(ifp, sc->kue_desc.kue_macaddr); #else ether_ifattach(ifp, ETHER_BPF_SUPPORTED); #endif usb_register_netisr(); sc->kue_dying = 0; KUE_UNLOCK(sc); USB_ATTACH_SUCCESS_RETURN; } Static int kue_detach(device_ptr_t dev) { struct kue_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); KUE_LOCK(sc); ifp = sc->kue_ifp; sc->kue_dying = 1; if (ifp != NULL) #if __FreeBSD_version >= 500000 ether_ifdetach(ifp); #else ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); if_free(ifp); #endif if (sc->kue_ep[KUE_ENDPT_TX] != NULL) usbd_abort_pipe(sc->kue_ep[KUE_ENDPT_TX]); if (sc->kue_ep[KUE_ENDPT_RX] != NULL) usbd_abort_pipe(sc->kue_ep[KUE_ENDPT_RX]); if (sc->kue_ep[KUE_ENDPT_INTR] != NULL) usbd_abort_pipe(sc->kue_ep[KUE_ENDPT_INTR]); if (sc->kue_mcfilters != NULL) free(sc->kue_mcfilters, M_USBDEV); KUE_UNLOCK(sc); #if __FreeBSD_version >= 500000 mtx_destroy(&sc->kue_mtx); #endif return(0); } Static void kue_rxstart(struct ifnet *ifp) { struct kue_softc *sc; struct ue_chain *c; sc = ifp->if_softc; KUE_LOCK(sc); c = &sc->kue_cdata.ue_rx_chain[sc->kue_cdata.ue_rx_prod]; c->ue_mbuf = usb_ether_newbuf(); if (c->ue_mbuf == NULL) { printf("%s: no memory for rx list " "-- packet dropped!\n", USBDEVNAME(sc->kue_dev)); ifp->if_ierrors++; KUE_UNLOCK(sc); return; } /* Setup new transfer. */ usbd_setup_xfer(c->ue_xfer, sc->kue_ep[KUE_ENDPT_RX], c, mtod(c->ue_mbuf, char *), UE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, kue_rxeof); usbd_transfer(c->ue_xfer); KUE_UNLOCK(sc); return; } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ Static void kue_rxeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct kue_softc *sc; struct ue_chain *c; struct mbuf *m; struct ifnet *ifp; int total_len = 0; u_int16_t len; c = priv; sc = c->ue_sc; KUE_LOCK(sc); ifp = sc->kue_ifp; if (!(ifp->if_flags & IFF_RUNNING)) { KUE_UNLOCK(sc); return; } if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { KUE_UNLOCK(sc); return; } if (usbd_ratecheck(&sc->kue_rx_notice)) printf("kue%d: usb error on rx: %s\n", sc->kue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->kue_ep[KUE_ENDPT_RX]); goto done; } usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL); m = c->ue_mbuf; if (total_len <= 1) goto done; len = *mtod(m, u_int16_t *); m_adj(m, sizeof(u_int16_t)); /* No errors; receive the packet. */ total_len = len; if (len < sizeof(struct ether_header)) { ifp->if_ierrors++; goto done; } ifp->if_ipackets++; m->m_pkthdr.rcvif = (void *)&sc->kue_qdat; m->m_pkthdr.len = m->m_len = total_len; /* Put the packet on the special USB input queue. */ usb_ether_input(m); KUE_UNLOCK(sc); return; done: /* Setup new transfer. */ usbd_setup_xfer(c->ue_xfer, sc->kue_ep[KUE_ENDPT_RX], c, mtod(c->ue_mbuf, char *), UE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, kue_rxeof); usbd_transfer(c->ue_xfer); KUE_UNLOCK(sc); return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ Static void kue_txeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct kue_softc *sc; struct ue_chain *c; struct ifnet *ifp; usbd_status err; c = priv; sc = c->ue_sc; KUE_LOCK(sc); ifp = sc->kue_ifp; ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { KUE_UNLOCK(sc); return; } printf("kue%d: usb error on tx: %s\n", sc->kue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->kue_ep[KUE_ENDPT_TX]); KUE_UNLOCK(sc); return; } usbd_get_xfer_status(c->ue_xfer, NULL, NULL, NULL, &err); if (c->ue_mbuf != NULL) { c->ue_mbuf->m_pkthdr.rcvif = ifp; usb_tx_done(c->ue_mbuf); c->ue_mbuf = NULL; } if (err) ifp->if_oerrors++; else ifp->if_opackets++; KUE_UNLOCK(sc); return; } Static int kue_encap(struct kue_softc *sc, struct mbuf *m, int idx) { int total_len; struct ue_chain *c; usbd_status err; c = &sc->kue_cdata.ue_tx_chain[idx]; /* * Copy the mbuf data into a contiguous buffer, leaving two * bytes at the beginning to hold the frame length. */ m_copydata(m, 0, m->m_pkthdr.len, c->ue_buf + 2); c->ue_mbuf = m; total_len = m->m_pkthdr.len + 2; total_len += 64 - (total_len % 64); /* Frame length is specified in the first 2 bytes of the buffer. */ c->ue_buf[0] = (u_int8_t)m->m_pkthdr.len; c->ue_buf[1] = (u_int8_t)(m->m_pkthdr.len >> 8); usbd_setup_xfer(c->ue_xfer, sc->kue_ep[KUE_ENDPT_TX], c, c->ue_buf, total_len, 0, 10000, kue_txeof); /* Transmit */ err = usbd_transfer(c->ue_xfer); if (err != USBD_IN_PROGRESS) { kue_stop(sc); return(EIO); } sc->kue_cdata.ue_tx_cnt++; return(0); } Static void kue_start(struct ifnet *ifp) { struct kue_softc *sc; struct mbuf *m_head = NULL; sc = ifp->if_softc; KUE_LOCK(sc); if (ifp->if_flags & IFF_OACTIVE) { KUE_UNLOCK(sc); return; } IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) { KUE_UNLOCK(sc); return; } if (kue_encap(sc, m_head, 0)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; KUE_UNLOCK(sc); return; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); ifp->if_flags |= IFF_OACTIVE; /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; KUE_UNLOCK(sc); return; } Static void kue_init(void *xsc) { struct kue_softc *sc = xsc; struct ifnet *ifp = sc->kue_ifp; struct ue_chain *c; usbd_status err; int i; KUE_LOCK(sc); if (ifp->if_flags & IFF_RUNNING) { KUE_UNLOCK(sc); return; } /* Set MAC address */ kue_ctl(sc, KUE_CTL_WRITE, KUE_CMD_SET_MAC, 0, IFP2ENADDR(sc->kue_ifp), ETHER_ADDR_LEN); sc->kue_rxfilt = KUE_RXFILT_UNICAST|KUE_RXFILT_BROADCAST; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) sc->kue_rxfilt |= KUE_RXFILT_PROMISC; kue_setword(sc, KUE_CMD_SET_PKT_FILTER, sc->kue_rxfilt); /* I'm not sure how to tune these. */ #ifdef notdef /* * Leave this one alone for now; setting it * wrong causes lockups on some machines/controllers. */ kue_setword(sc, KUE_CMD_SET_SOFS, 1); #endif kue_setword(sc, KUE_CMD_SET_URB_SIZE, 64); /* Init TX ring. */ if (usb_ether_tx_list_init(sc, &sc->kue_cdata, sc->kue_udev) == ENOBUFS) { printf("kue%d: tx list init failed\n", sc->kue_unit); KUE_UNLOCK(sc); return; } /* Init RX ring. */ if (usb_ether_rx_list_init(sc, &sc->kue_cdata, sc->kue_udev) == ENOBUFS) { printf("kue%d: rx list init failed\n", sc->kue_unit); KUE_UNLOCK(sc); return; } /* Load the multicast filter. */ kue_setmulti(sc); /* Open RX and TX pipes. */ err = usbd_open_pipe(sc->kue_iface, sc->kue_ed[KUE_ENDPT_RX], USBD_EXCLUSIVE_USE, &sc->kue_ep[KUE_ENDPT_RX]); if (err) { printf("kue%d: open rx pipe failed: %s\n", sc->kue_unit, usbd_errstr(err)); KUE_UNLOCK(sc); return; } err = usbd_open_pipe(sc->kue_iface, sc->kue_ed[KUE_ENDPT_TX], USBD_EXCLUSIVE_USE, &sc->kue_ep[KUE_ENDPT_TX]); if (err) { printf("kue%d: open tx pipe failed: %s\n", sc->kue_unit, usbd_errstr(err)); KUE_UNLOCK(sc); return; } /* Start up the receive pipe. */ for (i = 0; i < UE_RX_LIST_CNT; i++) { c = &sc->kue_cdata.ue_rx_chain[i]; usbd_setup_xfer(c->ue_xfer, sc->kue_ep[KUE_ENDPT_RX], c, mtod(c->ue_mbuf, char *), UE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, kue_rxeof); usbd_transfer(c->ue_xfer); } ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; KUE_UNLOCK(sc); return; } Static int kue_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct kue_softc *sc = ifp->if_softc; int error = 0; KUE_LOCK(sc); switch(command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->kue_if_flags & IFF_PROMISC)) { sc->kue_rxfilt |= KUE_RXFILT_PROMISC; kue_setword(sc, KUE_CMD_SET_PKT_FILTER, sc->kue_rxfilt); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->kue_if_flags & IFF_PROMISC) { sc->kue_rxfilt &= ~KUE_RXFILT_PROMISC; kue_setword(sc, KUE_CMD_SET_PKT_FILTER, sc->kue_rxfilt); } else if (!(ifp->if_flags & IFF_RUNNING)) kue_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) kue_stop(sc); } sc->kue_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: kue_setmulti(sc); error = 0; break; default: error = ether_ioctl(ifp, command, data); break; } KUE_UNLOCK(sc); return(error); } Static void kue_watchdog(struct ifnet *ifp) { struct kue_softc *sc; struct ue_chain *c; usbd_status stat; sc = ifp->if_softc; KUE_LOCK(sc); ifp->if_oerrors++; printf("kue%d: watchdog timeout\n", sc->kue_unit); c = &sc->kue_cdata.ue_tx_chain[0]; usbd_get_xfer_status(c->ue_xfer, NULL, NULL, NULL, &stat); kue_txeof(c->ue_xfer, c, stat); if (ifp->if_snd.ifq_head != NULL) kue_start(ifp); KUE_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ Static void kue_stop(struct kue_softc *sc) { usbd_status err; struct ifnet *ifp; KUE_LOCK(sc); ifp = sc->kue_ifp; ifp->if_timer = 0; /* Stop transfers. */ if (sc->kue_ep[KUE_ENDPT_RX] != NULL) { err = usbd_abort_pipe(sc->kue_ep[KUE_ENDPT_RX]); if (err) { printf("kue%d: abort rx pipe failed: %s\n", sc->kue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->kue_ep[KUE_ENDPT_RX]); if (err) { printf("kue%d: close rx pipe failed: %s\n", sc->kue_unit, usbd_errstr(err)); } sc->kue_ep[KUE_ENDPT_RX] = NULL; } if (sc->kue_ep[KUE_ENDPT_TX] != NULL) { err = usbd_abort_pipe(sc->kue_ep[KUE_ENDPT_TX]); if (err) { printf("kue%d: abort tx pipe failed: %s\n", sc->kue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->kue_ep[KUE_ENDPT_TX]); if (err) { printf("kue%d: close tx pipe failed: %s\n", sc->kue_unit, usbd_errstr(err)); } sc->kue_ep[KUE_ENDPT_TX] = NULL; } if (sc->kue_ep[KUE_ENDPT_INTR] != NULL) { err = usbd_abort_pipe(sc->kue_ep[KUE_ENDPT_INTR]); if (err) { printf("kue%d: abort intr pipe failed: %s\n", sc->kue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->kue_ep[KUE_ENDPT_INTR]); if (err) { printf("kue%d: close intr pipe failed: %s\n", sc->kue_unit, usbd_errstr(err)); } sc->kue_ep[KUE_ENDPT_INTR] = NULL; } /* Free RX resources. */ usb_ether_rx_list_free(&sc->kue_cdata); /* Free TX resources. */ usb_ether_tx_list_free(&sc->kue_cdata); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); KUE_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ Static void kue_shutdown(device_ptr_t dev) { struct kue_softc *sc; sc = device_get_softc(dev); kue_stop(sc); return; } Index: stable/6/sys/dev/usb/if_rue.c =================================================================== --- stable/6/sys/dev/usb/if_rue.c (revision 149421) +++ stable/6/sys/dev/usb/if_rue.c (revision 149422) @@ -1,1404 +1,1406 @@ /*- * Copyright (c) 2001-2003, Shunsuke Akiyama . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * RealTek RTL8150 USB to fast ethernet controller driver. * Datasheet is available from * ftp://ftp.realtek.com.tw/lancard/data_sheet/8150/. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version < 500000 #include #endif #include #include #include #include #include "usbdevs.h" #include #include #include #include /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #ifdef USB_DEBUG Static int ruedebug = 0; SYSCTL_NODE(_hw_usb, OID_AUTO, rue, CTLFLAG_RW, 0, "USB rue"); SYSCTL_INT(_hw_usb_rue, OID_AUTO, debug, CTLFLAG_RW, &ruedebug, 0, "rue debug level"); #define DPRINTFN(n, x) do { \ if (ruedebug > (n)) \ logprintf x; \ } while (0); #else #define DPRINTFN(n, x) #endif #define DPRINTF(x) DPRINTFN(0, x) /* * Various supported device vendors/products. */ Static struct rue_type rue_devs[] = { { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_LUAKTX }, { USB_VENDOR_REALTEK, USB_PRODUCT_REALTEK_USBKR100 }, { 0, 0 } }; Static int rue_match(device_ptr_t); Static int rue_attach(device_ptr_t); Static int rue_detach(device_ptr_t); Static int rue_encap(struct rue_softc *, struct mbuf *, int); #ifdef RUE_INTR_PIPE Static void rue_intr(usbd_xfer_handle, usbd_private_handle, usbd_status); #endif Static void rue_rxeof(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void rue_txeof(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void rue_tick(void *); Static void rue_rxstart(struct ifnet *); Static void rue_start(struct ifnet *); Static int rue_ioctl(struct ifnet *, u_long, caddr_t); Static void rue_init(void *); Static void rue_stop(struct rue_softc *); Static void rue_watchdog(struct ifnet *); Static void rue_shutdown(device_ptr_t); Static int rue_ifmedia_upd(struct ifnet *); Static void rue_ifmedia_sts(struct ifnet *, struct ifmediareq *); Static int rue_miibus_readreg(device_ptr_t, int, int); Static int rue_miibus_writereg(device_ptr_t, int, int, int); Static void rue_miibus_statchg(device_ptr_t); Static void rue_setmulti(struct rue_softc *); Static void rue_reset(struct rue_softc *); Static int rue_read_mem(struct rue_softc *, u_int16_t, void *, u_int16_t); Static int rue_write_mem(struct rue_softc *, u_int16_t, void *, u_int16_t); Static int rue_csr_read_1(struct rue_softc *, int); Static int rue_csr_write_1(struct rue_softc *, int, u_int8_t); Static int rue_csr_read_2(struct rue_softc *, int); Static int rue_csr_write_2(struct rue_softc *, int, u_int16_t); Static int rue_csr_write_4(struct rue_softc *, int, u_int32_t); Static device_method_t rue_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rue_match), DEVMETHOD(device_attach, rue_attach), DEVMETHOD(device_detach, rue_detach), DEVMETHOD(device_shutdown, rue_shutdown), /* Bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, rue_miibus_readreg), DEVMETHOD(miibus_writereg, rue_miibus_writereg), DEVMETHOD(miibus_statchg, rue_miibus_statchg), { 0, 0 } }; Static driver_t rue_driver = { "rue", rue_methods, sizeof(struct rue_softc) }; Static devclass_t rue_devclass; DRIVER_MODULE(rue, uhub, rue_driver, rue_devclass, usbd_driver_load, 0); DRIVER_MODULE(miibus, rue, miibus_driver, miibus_devclass, 0, 0); MODULE_DEPEND(rue, usb, 1, 1, 1); MODULE_DEPEND(rue, ether, 1, 1, 1); MODULE_DEPEND(rue, miibus, 1, 1, 1); #define RUE_SETBIT(sc, reg, x) \ rue_csr_write_1(sc, reg, rue_csr_read_1(sc, reg) | (x)) #define RUE_CLRBIT(sc, reg, x) \ rue_csr_write_1(sc, reg, rue_csr_read_1(sc, reg) & ~(x)) #define RUE_SETBIT_2(sc, reg, x) \ rue_csr_write_2(sc, reg, rue_csr_read_2(sc, reg) | (x)) #define RUE_CLRBIT_2(sc, reg, x) \ rue_csr_write_2(sc, reg, rue_csr_read_2(sc, reg) & ~(x)) Static int rue_read_mem(struct rue_softc *sc, u_int16_t addr, void *buf, u_int16_t len) { usb_device_request_t req; usbd_status err; if (sc->rue_dying) return (0); RUE_LOCK(sc); req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = UR_SET_ADDRESS; USETW(req.wValue, addr); USETW(req.wIndex, 0); USETW(req.wLength, len); err = usbd_do_request(sc->rue_udev, &req, buf); RUE_UNLOCK(sc); if (err) { printf("rue%d: control pipe read failed: %s\n", sc->rue_unit, usbd_errstr(err)); return (-1); } return (0); } Static int rue_write_mem(struct rue_softc *sc, u_int16_t addr, void *buf, u_int16_t len) { usb_device_request_t req; usbd_status err; if (sc->rue_dying) return (0); RUE_LOCK(sc); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = UR_SET_ADDRESS; USETW(req.wValue, addr); USETW(req.wIndex, 0); USETW(req.wLength, len); err = usbd_do_request(sc->rue_udev, &req, buf); RUE_UNLOCK(sc); if (err) { printf("rue%d: control pipe write failed: %s\n", sc->rue_unit, usbd_errstr(err)); return (-1); } return (0); } Static int rue_csr_read_1(struct rue_softc *sc, int reg) { int err; u_int8_t val = 0; err = rue_read_mem(sc, reg, &val, 1); if (err) return (0); return (val); } Static int rue_csr_read_2(struct rue_softc *sc, int reg) { int err; u_int16_t val = 0; uWord w; USETW(w, val); err = rue_read_mem(sc, reg, &w, 2); val = UGETW(w); if (err) return (0); return (val); } Static int rue_csr_write_1(struct rue_softc *sc, int reg, u_int8_t val) { int err; err = rue_write_mem(sc, reg, &val, 1); if (err) return (-1); return (0); } Static int rue_csr_write_2(struct rue_softc *sc, int reg, u_int16_t val) { int err; uWord w; USETW(w, val); err = rue_write_mem(sc, reg, &w, 2); if (err) return (-1); return (0); } Static int rue_csr_write_4(struct rue_softc *sc, int reg, u_int32_t val) { int err; uDWord dw; USETDW(dw, val); err = rue_write_mem(sc, reg, &dw, 4); if (err) return (-1); return (0); } Static int rue_miibus_readreg(device_ptr_t dev, int phy, int reg) { struct rue_softc *sc = USBGETSOFTC(dev); int rval; int ruereg; if (phy != 0) /* RTL8150 supports PHY == 0, only */ return (0); switch (reg) { case MII_BMCR: ruereg = RUE_BMCR; break; case MII_BMSR: ruereg = RUE_BMSR; break; case MII_ANAR: ruereg = RUE_ANAR; break; case MII_ANER: ruereg = RUE_AER; break; case MII_ANLPAR: ruereg = RUE_ANLP; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); break; default: if (RUE_REG_MIN <= reg && reg <= RUE_REG_MAX) { rval = rue_csr_read_1(sc, reg); return (rval); } printf("rue%d: bad phy register\n", sc->rue_unit); return (0); } rval = rue_csr_read_2(sc, ruereg); return (rval); } Static int rue_miibus_writereg(device_ptr_t dev, int phy, int reg, int data) { struct rue_softc *sc = USBGETSOFTC(dev); int ruereg; if (phy != 0) /* RTL8150 supports PHY == 0, only */ return (0); switch (reg) { case MII_BMCR: ruereg = RUE_BMCR; break; case MII_BMSR: ruereg = RUE_BMSR; break; case MII_ANAR: ruereg = RUE_ANAR; break; case MII_ANER: ruereg = RUE_AER; break; case MII_ANLPAR: ruereg = RUE_ANLP; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); break; default: if (RUE_REG_MIN <= reg && reg <= RUE_REG_MAX) { rue_csr_write_1(sc, reg, data); return (0); } printf("rue%d: bad phy register\n", sc->rue_unit); return (0); } rue_csr_write_2(sc, ruereg, data); return (0); } Static void rue_miibus_statchg(device_ptr_t dev) { /* * When the code below is enabled the card starts doing weird * things after link going from UP to DOWN and back UP. * * Looks like some of register writes below messes up PHY * interface. * * No visible regressions were found after commenting this code * out, so that disable it for good. */ #if 0 struct rue_softc *sc = USBGETSOFTC(dev); struct mii_data *mii = GET_MII(sc); int bmcr; RUE_CLRBIT(sc, RUE_CR, (RUE_CR_RE | RUE_CR_TE)); bmcr = rue_csr_read_2(sc, RUE_BMCR); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) bmcr |= RUE_BMCR_SPD_SET; else bmcr &= ~RUE_BMCR_SPD_SET; if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) bmcr |= RUE_BMCR_DUPLEX; else bmcr &= ~RUE_BMCR_DUPLEX; rue_csr_write_2(sc, RUE_BMCR, bmcr); RUE_SETBIT(sc, RUE_CR, (RUE_CR_RE | RUE_CR_TE)); #endif } /* * Program the 64-bit multicast hash filter. */ Static void rue_setmulti(struct rue_softc *sc) { struct ifnet *ifp; int h = 0; u_int32_t hashes[2] = { 0, 0 }; struct ifmultiaddr *ifma; u_int32_t rxcfg; int mcnt = 0; ifp = sc->rue_ifp; rxcfg = rue_csr_read_2(sc, RUE_RCR); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { rxcfg |= (RUE_RCR_AAM | RUE_RCR_AAP); rxcfg &= ~RUE_RCR_AM; rue_csr_write_2(sc, RUE_RCR, rxcfg); rue_csr_write_4(sc, RUE_MAR0, 0xFFFFFFFF); rue_csr_write_4(sc, RUE_MAR4, 0xFFFFFFFF); return; } /* first, zot all the existing hash bits */ rue_csr_write_4(sc, RUE_MAR0, 0); rue_csr_write_4(sc, RUE_MAR4, 0); /* now program new ones */ + IF_ADDR_LOCK(ifp); #if __FreeBSD_version >= 500000 TAILQ_FOREACH (ifma, &ifp->if_multiaddrs, ifma_link) #else LIST_FOREACH (ifma, &ifp->if_multiaddrs, ifma_link) #endif { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); mcnt++; } + IF_ADDR_UNLOCK(ifp); if (mcnt) rxcfg |= RUE_RCR_AM; else rxcfg &= ~RUE_RCR_AM; rxcfg &= ~(RUE_RCR_AAM | RUE_RCR_AAP); rue_csr_write_2(sc, RUE_RCR, rxcfg); rue_csr_write_4(sc, RUE_MAR0, hashes[0]); rue_csr_write_4(sc, RUE_MAR4, hashes[1]); } Static void rue_reset(struct rue_softc *sc) { int i; rue_csr_write_1(sc, RUE_CR, RUE_CR_SOFT_RST); for (i = 0; i < RUE_TIMEOUT; i++) { DELAY(500); if (!(rue_csr_read_1(sc, RUE_CR) & RUE_CR_SOFT_RST)) break; } if (i == RUE_TIMEOUT) printf("rue%d: reset never completed!\n", sc->rue_unit); DELAY(10000); } /* * Probe for a RTL8150 chip. */ USB_MATCH(rue) { USB_MATCH_START(rue, uaa); struct rue_type *t; if (uaa->iface == NULL) return (UMATCH_NONE); t = rue_devs; while (t->rue_vid) { if (uaa->vendor == t->rue_vid && uaa->product == t->rue_did) { return (UMATCH_VENDOR_PRODUCT); } t++; } return (UMATCH_NONE); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ USB_ATTACH(rue) { USB_ATTACH_START(rue, sc, uaa); char *devinfo; u_char eaddr[ETHER_ADDR_LEN]; struct ifnet *ifp; usbd_interface_handle iface; usbd_status err; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; int i; struct rue_type *t; devinfo = malloc(1024, M_USBDEV, M_WAITOK); bzero(sc, sizeof (struct rue_softc)); usbd_devinfo(uaa->device, 0, devinfo); sc->rue_dev = self; sc->rue_udev = uaa->device; sc->rue_unit = device_get_unit(self); if (usbd_set_config_no(sc->rue_udev, RUE_CONFIG_NO, 0)) { printf("rue%d: getting interface handle failed\n", sc->rue_unit); goto error; } err = usbd_device2interface_handle(uaa->device, RUE_IFACE_IDX, &iface); if (err) { printf("rue%d: getting interface handle failed\n", sc->rue_unit); goto error; } sc->rue_iface = iface; t = rue_devs; while (t->rue_vid) { if (uaa->vendor == t->rue_vid && uaa->product == t->rue_did) { sc->rue_info = t; break; } t++; } id = usbd_get_interface_descriptor(sc->rue_iface); usbd_devinfo(uaa->device, 0, devinfo); device_set_desc_copy(self, devinfo); printf("%s: %s\n", USBDEVNAME(self), devinfo); /* Find endpoints */ for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(iface, i); if (ed == NULL) { printf("rue%d: couldn't get ep %d\n", sc->rue_unit, i); goto error; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->rue_ed[RUE_ENDPT_RX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) { sc->rue_ed[RUE_ENDPT_TX] = ed->bEndpointAddress; } else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_INTERRUPT) { sc->rue_ed[RUE_ENDPT_INTR] = ed->bEndpointAddress; } } #if __FreeBSD_version >= 500000 mtx_init(&sc->rue_mtx, device_get_nameunit(self), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); #endif RUE_LOCK(sc); /* Reset the adapter */ rue_reset(sc); /* Get station address from the EEPROM */ err = rue_read_mem(sc, RUE_EEPROM_IDR0, (caddr_t)&eaddr, ETHER_ADDR_LEN); if (err) { printf("rue%d: couldn't get station address\n", sc->rue_unit); goto error1; } ifp = sc->rue_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { printf("rue%d: can not if_alloc()\n", sc->rue_unit); goto error1; } ifp->if_softc = sc; if_initname(ifp, "rue", sc->rue_unit); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT; ifp->if_ioctl = rue_ioctl; ifp->if_start = rue_start; ifp->if_watchdog = rue_watchdog; ifp->if_init = rue_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; /* MII setup */ if (mii_phy_probe(self, &sc->rue_miibus, rue_ifmedia_upd, rue_ifmedia_sts)) { printf("rue%d: MII without any PHY!\n", sc->rue_unit); goto error2; } sc->rue_qdat.ifp = ifp; sc->rue_qdat.if_rxstart = rue_rxstart; /* Call MI attach routine */ #if __FreeBSD_version >= 500000 ether_ifattach(ifp, eaddr); #else ether_ifattach(ifp, ETHER_BPF_SUPPORTED); #endif callout_handle_init(&sc->rue_stat_ch); usb_register_netisr(); sc->rue_dying = 0; RUE_UNLOCK(sc); free(devinfo, M_USBDEV); USB_ATTACH_SUCCESS_RETURN; error2: if_free(ifp); error1: RUE_UNLOCK(sc); #if __FreeBSD_version >= 500000 mtx_destroy(&sc->rue_mtx); #endif error: free(devinfo, M_USBDEV); USB_ATTACH_ERROR_RETURN; } Static int rue_detach(device_ptr_t dev) { struct rue_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); RUE_LOCK(sc); ifp = sc->rue_ifp; sc->rue_dying = 1; untimeout(rue_tick, sc, sc->rue_stat_ch); #if __FreeBSD_version >= 500000 ether_ifdetach(ifp); #else ether_ifdetach(ifp, ETHER_BPF_SUPPORTED); #endif if (sc->rue_ep[RUE_ENDPT_TX] != NULL) usbd_abort_pipe(sc->rue_ep[RUE_ENDPT_TX]); if (sc->rue_ep[RUE_ENDPT_RX] != NULL) usbd_abort_pipe(sc->rue_ep[RUE_ENDPT_RX]); #ifdef RUE_INTR_PIPE if (sc->rue_ep[RUE_ENDPT_INTR] != NULL) usbd_abort_pipe(sc->rue_ep[RUE_ENDPT_INTR]); #endif RUE_UNLOCK(sc); #if __FreeBSD_version >= 500000 mtx_destroy(&sc->rue_mtx); #endif return (0); } #ifdef RUE_INTR_PIPE Static void rue_intr(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct rue_softc *sc = priv; struct ifnet *ifp; struct rue_intrpkt *p; RUE_LOCK(sc); ifp = sc->rue_ifp; if (!(ifp->if_flags & IFF_RUNNING)) { RUE_UNLOCK(sc); return; } if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { RUE_UNLOCK(sc); return; } printf("rue%d: usb error on intr: %s\n", sc->rue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->rue_ep[RUE_ENDPT_INTR]); RUE_UNLOCK(sc); return; } usbd_get_xfer_status(xfer, NULL, (void **)&p, NULL, NULL); ifp->if_ierrors += p->rue_rxlost_cnt; ifp->if_ierrors += p->rue_crcerr_cnt; ifp->if_collisions += p->rue_col_cnt; RUE_UNLOCK(sc); } #endif Static void rue_rxstart(struct ifnet *ifp) { struct rue_softc *sc; struct ue_chain *c; sc = ifp->if_softc; RUE_LOCK(sc); c = &sc->rue_cdata.ue_rx_chain[sc->rue_cdata.ue_rx_prod]; c->ue_mbuf = usb_ether_newbuf(); if (c->ue_mbuf == NULL) { printf("%s: no memory for rx list " "-- packet dropped!\n", USBDEVNAME(sc->rue_dev)); ifp->if_ierrors++; RUE_UNLOCK(sc); return; } /* Setup new transfer. */ usbd_setup_xfer(c->ue_xfer, sc->rue_ep[RUE_ENDPT_RX], c, mtod(c->ue_mbuf, char *), UE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, rue_rxeof); usbd_transfer(c->ue_xfer); RUE_UNLOCK(sc); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ Static void rue_rxeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct ue_chain *c = priv; struct rue_softc *sc = c->ue_sc; struct mbuf *m; struct ifnet *ifp; int total_len = 0; struct rue_rxpkt r; if (sc->rue_dying) return; RUE_LOCK(sc); ifp = sc->rue_ifp; if (!(ifp->if_flags & IFF_RUNNING)) { RUE_UNLOCK(sc); return; } if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { RUE_UNLOCK(sc); return; } if (usbd_ratecheck(&sc->rue_rx_notice)) printf("rue%d: usb error on rx: %s\n", sc->rue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->rue_ep[RUE_ENDPT_RX]); goto done; } usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL); if (total_len <= ETHER_CRC_LEN) { ifp->if_ierrors++; goto done; } m = c->ue_mbuf; bcopy(mtod(m, char *) + total_len - 4, (char *)&r, sizeof (r)); /* Check recieve packet was valid or not */ if ((r.rue_rxstat & RUE_RXSTAT_VALID) == 0) { ifp->if_ierrors++; goto done; } /* No errors; receive the packet. */ total_len -= ETHER_CRC_LEN; ifp->if_ipackets++; m->m_pkthdr.rcvif = (void *)&sc->rue_qdat; m->m_pkthdr.len = m->m_len = total_len; /* Put the packet on the special USB input queue. */ usb_ether_input(m); RUE_UNLOCK(sc); return; done: /* Setup new transfer. */ usbd_setup_xfer(xfer, sc->rue_ep[RUE_ENDPT_RX], c, mtod(c->ue_mbuf, char *), UE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, rue_rxeof); usbd_transfer(xfer); RUE_UNLOCK(sc); } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ Static void rue_txeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct ue_chain *c = priv; struct rue_softc *sc = c->ue_sc; struct ifnet *ifp; usbd_status err; RUE_LOCK(sc); ifp = sc->rue_ifp; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { RUE_UNLOCK(sc); return; } printf("rue%d: usb error on tx: %s\n", sc->rue_unit, usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->rue_ep[RUE_ENDPT_TX]); RUE_UNLOCK(sc); return; } ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; usbd_get_xfer_status(c->ue_xfer, NULL, NULL, NULL, &err); if (c->ue_mbuf != NULL) { c->ue_mbuf->m_pkthdr.rcvif = ifp; usb_tx_done(c->ue_mbuf); c->ue_mbuf = NULL; } if (err) ifp->if_oerrors++; else ifp->if_opackets++; RUE_UNLOCK(sc); } Static void rue_tick(void *xsc) { struct rue_softc *sc = xsc; struct ifnet *ifp; struct mii_data *mii; if (sc == NULL) return; RUE_LOCK(sc); ifp = sc->rue_ifp; mii = GET_MII(sc); if (mii == NULL) { RUE_UNLOCK(sc); return; } mii_tick(mii); if (!sc->rue_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->rue_link++; if (ifp->if_snd.ifq_head != NULL) rue_start(ifp); } sc->rue_stat_ch = timeout(rue_tick, sc, hz); RUE_UNLOCK(sc); } Static int rue_encap(struct rue_softc *sc, struct mbuf *m, int idx) { int total_len; struct ue_chain *c; usbd_status err; c = &sc->rue_cdata.ue_tx_chain[idx]; /* * Copy the mbuf data into a contiguous buffer */ m_copydata(m, 0, m->m_pkthdr.len, c->ue_buf); c->ue_mbuf = m; total_len = m->m_pkthdr.len; /* * This is an undocumented behavior. * RTL8150 chip doesn't send frame length smaller than * RUE_MIN_FRAMELEN (60) byte packet. */ if (total_len < RUE_MIN_FRAMELEN) total_len = RUE_MIN_FRAMELEN; usbd_setup_xfer(c->ue_xfer, sc->rue_ep[RUE_ENDPT_TX], c, c->ue_buf, total_len, USBD_FORCE_SHORT_XFER, 10000, rue_txeof); /* Transmit */ err = usbd_transfer(c->ue_xfer); if (err != USBD_IN_PROGRESS) { rue_stop(sc); return (EIO); } sc->rue_cdata.ue_tx_cnt++; return (0); } Static void rue_start(struct ifnet *ifp) { struct rue_softc *sc = ifp->if_softc; struct mbuf *m_head = NULL; RUE_LOCK(sc); if (!sc->rue_link) { RUE_UNLOCK(sc); return; } if (ifp->if_flags & IFF_OACTIVE) { RUE_UNLOCK(sc); return; } IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) { RUE_UNLOCK(sc); return; } if (rue_encap(sc, m_head, 0)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; RUE_UNLOCK(sc); return; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); ifp->if_flags |= IFF_OACTIVE; /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; RUE_UNLOCK(sc); } Static void rue_init(void *xsc) { struct rue_softc *sc = xsc; struct ifnet *ifp = sc->rue_ifp; struct mii_data *mii = GET_MII(sc); struct ue_chain *c; usbd_status err; int i; int rxcfg; RUE_LOCK(sc); if (ifp->if_flags & IFF_RUNNING) { RUE_UNLOCK(sc); return; } /* * Cancel pending I/O and free all RX/TX buffers. */ rue_reset(sc); /* Set MAC address */ rue_write_mem(sc, RUE_IDR0, IFP2ENADDR(sc->rue_ifp), ETHER_ADDR_LEN); /* Init TX ring. */ if (usb_ether_tx_list_init(sc, &sc->rue_cdata, sc->rue_udev) == ENOBUFS) { printf("rue%d: tx list init failed\n", sc->rue_unit); RUE_UNLOCK(sc); return; } /* Init RX ring. */ if (usb_ether_rx_list_init(sc, &sc->rue_cdata, sc->rue_udev) == ENOBUFS) { printf("rue%d: rx list init failed\n", sc->rue_unit); RUE_UNLOCK(sc); return; } #ifdef RUE_INTR_PIPE sc->rue_cdata.ue_ibuf = malloc(RUE_INTR_PKTLEN, M_USBDEV, M_NOWAIT); #endif /* * Set the initial TX and RX configuration. */ rue_csr_write_1(sc, RUE_TCR, RUE_TCR_CONFIG); rxcfg = RUE_RCR_CONFIG; /* Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) rxcfg |= RUE_RCR_AB; else rxcfg &= ~RUE_RCR_AB; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) rxcfg |= RUE_RCR_AAP; else rxcfg &= ~RUE_RCR_AAP; rue_csr_write_2(sc, RUE_RCR, rxcfg); /* Load the multicast filter. */ rue_setmulti(sc); /* Enable RX and TX */ rue_csr_write_1(sc, RUE_CR, (RUE_CR_TE | RUE_CR_RE | RUE_CR_EP3CLREN)); mii_mediachg(mii); /* Open RX and TX pipes. */ err = usbd_open_pipe(sc->rue_iface, sc->rue_ed[RUE_ENDPT_RX], USBD_EXCLUSIVE_USE, &sc->rue_ep[RUE_ENDPT_RX]); if (err) { printf("rue%d: open rx pipe failed: %s\n", sc->rue_unit, usbd_errstr(err)); RUE_UNLOCK(sc); return; } err = usbd_open_pipe(sc->rue_iface, sc->rue_ed[RUE_ENDPT_TX], USBD_EXCLUSIVE_USE, &sc->rue_ep[RUE_ENDPT_TX]); if (err) { printf("rue%d: open tx pipe failed: %s\n", sc->rue_unit, usbd_errstr(err)); RUE_UNLOCK(sc); return; } #ifdef RUE_INTR_PIPE err = usbd_open_pipe_intr(sc->rue_iface, sc->rue_ed[RUE_ENDPT_INTR], USBD_SHORT_XFER_OK, &sc->rue_ep[RUE_ENDPT_INTR], sc, sc->rue_cdata.ue_ibuf, RUE_INTR_PKTLEN, rue_intr, RUE_INTR_INTERVAL); if (err) { printf("rue%d: open intr pipe failed: %s\n", sc->rue_unit, usbd_errstr(err)); RUE_UNLOCK(sc); return; } #endif /* Start up the receive pipe. */ for (i = 0; i < UE_RX_LIST_CNT; i++) { c = &sc->rue_cdata.ue_rx_chain[i]; usbd_setup_xfer(c->ue_xfer, sc->rue_ep[RUE_ENDPT_RX], c, mtod(c->ue_mbuf, char *), UE_BUFSZ, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, rue_rxeof); usbd_transfer(c->ue_xfer); } ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->rue_stat_ch = timeout(rue_tick, sc, hz); RUE_UNLOCK(sc); } /* * Set media options. */ Static int rue_ifmedia_upd(struct ifnet *ifp) { struct rue_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); sc->rue_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; LIST_FOREACH (miisc, &mii->mii_phys, mii_list) mii_phy_reset(miisc); } mii_mediachg(mii); return (0); } /* * Report current media status. */ Static void rue_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct rue_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } Static int rue_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct rue_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; int error = 0; RUE_LOCK(sc); switch (command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->rue_if_flags & IFF_PROMISC)) { RUE_SETBIT_2(sc, RUE_RCR, (RUE_RCR_AAM | RUE_RCR_AAP)); rue_setmulti(sc); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->rue_if_flags & IFF_PROMISC) { RUE_CLRBIT_2(sc, RUE_RCR, (RUE_RCR_AAM | RUE_RCR_AAP)); rue_setmulti(sc); } else if (!(ifp->if_flags & IFF_RUNNING)) rue_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) rue_stop(sc); } sc->rue_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: rue_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = GET_MII(sc); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); break; } RUE_UNLOCK(sc); return (error); } Static void rue_watchdog(struct ifnet *ifp) { struct rue_softc *sc = ifp->if_softc; struct ue_chain *c; usbd_status stat; RUE_LOCK(sc); ifp->if_oerrors++; printf("rue%d: watchdog timeout\n", sc->rue_unit); c = &sc->rue_cdata.ue_tx_chain[0]; usbd_get_xfer_status(c->ue_xfer, NULL, NULL, NULL, &stat); rue_txeof(c->ue_xfer, c, stat); if (ifp->if_snd.ifq_head != NULL) rue_start(ifp); RUE_UNLOCK(sc); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ Static void rue_stop(struct rue_softc *sc) { usbd_status err; struct ifnet *ifp; RUE_LOCK(sc); ifp = sc->rue_ifp; ifp->if_timer = 0; rue_csr_write_1(sc, RUE_CR, 0x00); rue_reset(sc); untimeout(rue_tick, sc, sc->rue_stat_ch); /* Stop transfers. */ if (sc->rue_ep[RUE_ENDPT_RX] != NULL) { err = usbd_abort_pipe(sc->rue_ep[RUE_ENDPT_RX]); if (err) { printf("rue%d: abort rx pipe failed: %s\n", sc->rue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->rue_ep[RUE_ENDPT_RX]); if (err) { printf("rue%d: close rx pipe failed: %s\n", sc->rue_unit, usbd_errstr(err)); } sc->rue_ep[RUE_ENDPT_RX] = NULL; } if (sc->rue_ep[RUE_ENDPT_TX] != NULL) { err = usbd_abort_pipe(sc->rue_ep[RUE_ENDPT_TX]); if (err) { printf("rue%d: abort tx pipe failed: %s\n", sc->rue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->rue_ep[RUE_ENDPT_TX]); if (err) { printf("rue%d: close tx pipe failed: %s\n", sc->rue_unit, usbd_errstr(err)); } sc->rue_ep[RUE_ENDPT_TX] = NULL; } #ifdef RUE_INTR_PIPE if (sc->rue_ep[RUE_ENDPT_INTR] != NULL) { err = usbd_abort_pipe(sc->rue_ep[RUE_ENDPT_INTR]); if (err) { printf("rue%d: abort intr pipe failed: %s\n", sc->rue_unit, usbd_errstr(err)); } err = usbd_close_pipe(sc->rue_ep[RUE_ENDPT_INTR]); if (err) { printf("rue%d: close intr pipe failed: %s\n", sc->rue_unit, usbd_errstr(err)); } sc->rue_ep[RUE_ENDPT_INTR] = NULL; } #endif /* Free RX resources. */ usb_ether_rx_list_free(&sc->rue_cdata); /* Free TX resources. */ usb_ether_tx_list_free(&sc->rue_cdata); #ifdef RUE_INTR_PIPE free(sc->rue_cdata.ue_ibuf, M_USBDEV); sc->rue_cdata.ue_ibuf = NULL; #endif sc->rue_link = 0; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); RUE_UNLOCK(sc); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ Static void rue_shutdown(device_ptr_t dev) { struct rue_softc *sc; sc = device_get_softc(dev); sc->rue_dying++; RUE_LOCK(sc); rue_reset(sc); rue_stop(sc); RUE_UNLOCK(sc); } Index: stable/6/sys/dev/usb/if_udav.c =================================================================== --- stable/6/sys/dev/usb/if_udav.c (revision 149421) +++ stable/6/sys/dev/usb/if_udav.c (revision 149422) @@ -1,1943 +1,1945 @@ /* $NetBSD: if_udav.c,v 1.2 2003/09/04 15:17:38 tsutsui Exp $ */ /* $nabe: if_udav.c,v 1.3 2003/08/21 16:57:19 nabe Exp $ */ /* $FreeBSD$ */ /*- * Copyright (c) 2003 * Shingo WATANABE . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * DM9601(DAVICOM USB to Ethernet MAC Controller with Integrated 10/100 PHY) * The spec can be found at the following url. * http://www.davicom.com.tw/big5/download/Data%20Sheet/DM9601-DS-P01-930914.pdf */ /* * TODO: * Interrupt Endpoint support * External PHYs * powerhook() support? */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #if defined(__NetBSD__) #include "opt_ns.h" #endif #if defined(__NetBSD__) #include "bpfilter.h" #endif #if defined(__FreeBSD__) #define NBPFILTER 1 #endif #if defined(__NetBSD__) #include "rnd.h" #endif #include #include #include #include #include #include #include #if defined(__FreeBSD__) #include #include #include #endif #if defined(__NetBSD__) #include #endif #if NRND > 0 #include #endif #include #include #include #include #include #include #if NBPFILTER > 0 #include #endif #if defined(__NetBSD__) #ifndef BPF_MTAP #define BPF_MTAP(_ifp, _m) do { \ if ((_ifp)->if_bpf)) { \ bpf_mtap((_ifp)->if_bpf, (_m)) ; \ } \ } while (0) #endif #endif #if defined(__NetBSD__) #include #ifdef INET #include #include #endif /* INET */ #elif defined(__FreeBSD__) /* defined(__NetBSD__) */ #include #include #endif /* defined(__FreeBSD__) */ #if defined(__NetBSD__) #ifdef NS #include #include #endif #endif /* defined (__NetBSD__) */ #include #include #if __FreeBSD_version < 500000 #include #endif #include #include #include #include #include #include "usbdevs.h" #include #include #include #if defined(__FreeBSD__) MODULE_DEPEND(udav, usb, 1, 1, 1); MODULE_DEPEND(udav, ether, 1, 1, 1); MODULE_DEPEND(udav, miibus, 1, 1, 1); #endif /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #if !defined(__FreeBSD__) /* Function declarations */ USB_DECLARE_DRIVER(udav); #endif #if defined(__FreeBSD__) Static int udav_match(device_ptr_t); Static int udav_attach(device_ptr_t); Static int udav_detach(device_ptr_t); Static void udav_shutdown(device_ptr_t); #endif Static int udav_openpipes(struct udav_softc *); Static void udav_start(struct ifnet *); Static int udav_send(struct udav_softc *, struct mbuf *, int); Static void udav_txeof(usbd_xfer_handle, usbd_private_handle, usbd_status); #if defined(__FreeBSD__) Static void udav_rxstart(struct ifnet *ifp); #endif Static void udav_rxeof(usbd_xfer_handle, usbd_private_handle, usbd_status); Static void udav_tick(void *); Static void udav_tick_task(void *); Static int udav_ioctl(struct ifnet *, u_long, caddr_t); Static void udav_stop_task(struct udav_softc *); Static void udav_stop(struct ifnet *, int); Static void udav_watchdog(struct ifnet *); Static int udav_ifmedia_change(struct ifnet *); Static void udav_ifmedia_status(struct ifnet *, struct ifmediareq *); Static void udav_lock_mii(struct udav_softc *); Static void udav_unlock_mii(struct udav_softc *); Static int udav_miibus_readreg(device_ptr_t, int, int); Static void udav_miibus_writereg(device_ptr_t, int, int, int); Static void udav_miibus_statchg(device_ptr_t); #if defined(__NetBSD__) Static int udav_init(struct ifnet *); #elif defined(__FreeBSD__) Static void udav_init(void *); #endif Static void udav_setmulti(struct udav_softc *); Static void udav_reset(struct udav_softc *); Static int udav_csr_read(struct udav_softc *, int, void *, int); Static int udav_csr_write(struct udav_softc *, int, void *, int); Static int udav_csr_read1(struct udav_softc *, int); Static int udav_csr_write1(struct udav_softc *, int, unsigned char); #if 0 Static int udav_mem_read(struct udav_softc *, int, void *, int); Static int udav_mem_write(struct udav_softc *, int, void *, int); Static int udav_mem_write1(struct udav_softc *, int, unsigned char); #endif #if defined(__FreeBSD__) Static device_method_t udav_methods[] = { /* Device interface */ DEVMETHOD(device_probe, udav_match), DEVMETHOD(device_attach, udav_attach), DEVMETHOD(device_detach, udav_detach), DEVMETHOD(device_shutdown, udav_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, udav_miibus_readreg), DEVMETHOD(miibus_writereg, udav_miibus_writereg), DEVMETHOD(miibus_statchg, udav_miibus_statchg), { 0, 0 } }; Static driver_t udav_driver = { "udav", udav_methods, sizeof(struct udav_softc) }; Static devclass_t udav_devclass; DRIVER_MODULE(udav, uhub, udav_driver, udav_devclass, usbd_driver_load, 0); DRIVER_MODULE(miibus, udav, miibus_driver, miibus_devclass, 0, 0); #endif /* defined(__FreeBSD__) */ /* Macros */ #ifdef UDAV_DEBUG #define DPRINTF(x) if (udavdebug) logprintf x #define DPRINTFN(n,x) if (udavdebug >= (n)) logprintf x int udavdebug = 0; #else #define DPRINTF(x) #define DPRINTFN(n,x) #endif #define delay(d) DELAY(d) #define UDAV_SETBIT(sc, reg, x) \ udav_csr_write1(sc, reg, udav_csr_read1(sc, reg) | (x)) #define UDAV_CLRBIT(sc, reg, x) \ udav_csr_write1(sc, reg, udav_csr_read1(sc, reg) & ~(x)) static const struct udav_type { struct usb_devno udav_dev; u_int16_t udav_flags; #define UDAV_EXT_PHY 0x0001 } udav_devs [] = { /* Corega USB-TXC */ {{ USB_VENDOR_COREGA, USB_PRODUCT_COREGA_FETHER_USB_TXC }, 0}, #if 0 /* DAVICOM DM9601 Generic? */ /* XXX: The following ids was obtained from the data sheet. */ {{ 0x0a46, 0x9601 }, 0}, #endif }; #define udav_lookup(v, p) ((const struct udav_type *)usb_lookup(udav_devs, v, p)) /* Probe */ USB_MATCH(udav) { USB_MATCH_START(udav, uaa); if (uaa->iface != NULL) return (UMATCH_NONE); return (udav_lookup(uaa->vendor, uaa->product) != NULL ? UMATCH_VENDOR_PRODUCT : UMATCH_NONE); } /* Attach */ USB_ATTACH(udav) { USB_ATTACH_START(udav, sc, uaa); usbd_device_handle dev = uaa->device; usbd_interface_handle iface; usbd_status err; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; char devinfo[1024]; const char *devname ; struct ifnet *ifp; #if defined(__NetBSD__) struct mii_data *mii; #endif u_char eaddr[ETHER_ADDR_LEN]; int i; #if defined(__NetBSD__) int s; #endif bzero(sc, sizeof(struct udav_softc)); usbd_devinfo(dev, 0, devinfo); USB_ATTACH_SETUP; devname = USBDEVNAME(sc->sc_dev); printf("%s: %s\n", devname, devinfo); /* Move the device into the configured state. */ err = usbd_set_config_no(dev, UDAV_CONFIG_NO, 1); if (err) { printf("%s: setting config no failed\n", devname); goto bad; } usb_init_task(&sc->sc_tick_task, udav_tick_task, sc); lockinit(&sc->sc_mii_lock, PZERO, "udavmii", 0, 0); usb_init_task(&sc->sc_stop_task, (void (*)(void *)) udav_stop_task, sc); /* get control interface */ err = usbd_device2interface_handle(dev, UDAV_IFACE_INDEX, &iface); if (err) { printf("%s: failed to get interface, err=%s\n", devname, usbd_errstr(err)); goto bad; } sc->sc_udev = dev; sc->sc_ctl_iface = iface; sc->sc_flags = udav_lookup(uaa->vendor, uaa->product)->udav_flags; /* get interface descriptor */ id = usbd_get_interface_descriptor(sc->sc_ctl_iface); /* find endpoints */ sc->sc_bulkin_no = sc->sc_bulkout_no = sc->sc_intrin_no = -1; for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(sc->sc_ctl_iface, i); if (ed == NULL) { printf("%s: couldn't get endpoint %d\n", devname, i); goto bad; } if ((ed->bmAttributes & UE_XFERTYPE) == UE_BULK && UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN) sc->sc_bulkin_no = ed->bEndpointAddress; /* RX */ else if ((ed->bmAttributes & UE_XFERTYPE) == UE_BULK && UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT) sc->sc_bulkout_no = ed->bEndpointAddress; /* TX */ else if ((ed->bmAttributes & UE_XFERTYPE) == UE_INTERRUPT && UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN) sc->sc_intrin_no = ed->bEndpointAddress; /* Status */ } if (sc->sc_bulkin_no == -1 || sc->sc_bulkout_no == -1 || sc->sc_intrin_no == -1) { printf("%s: missing endpoint\n", devname); goto bad; } #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 mtx_init(&sc->sc_mtx, device_get_nameunit(self), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); #endif #if defined(__NetBSD__) s = splnet(); #elif defined(__FreeBSD__) UDAV_LOCK(sc); #endif /* reset the adapter */ udav_reset(sc); /* Get Ethernet Address */ err = udav_csr_read(sc, UDAV_PAR, (void *)eaddr, ETHER_ADDR_LEN); if (err) { printf("%s: read MAC address failed\n", devname); #if defined(__NetBSD__) splx(s); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); #endif goto bad; } /* Print Ethernet Address */ printf("%s: Ethernet address %s\n", devname, ether_sprintf(eaddr)); /* initialize interface infomation */ #if defined(__FreeBSD__) ifp = GET_IFP(sc) = if_alloc(IFT_ETHER); if (ifp == NULL) { printf("%s: can not if_alloc\n", devname); UDAV_UNLOCK(sc); goto bad; } #else ifp = GET_IFP(sc); #endif ifp->if_softc = sc; ifp->if_mtu = ETHERMTU; #if defined(__NetBSD__) strncpy(ifp->if_xname, devname, IFNAMSIZ); #elif defined(__FreeBSD__) if_initname(ifp, "udav", device_get_unit(self)); #endif ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT; ifp->if_start = udav_start; ifp->if_ioctl = udav_ioctl; ifp->if_watchdog = udav_watchdog; ifp->if_init = udav_init; #if defined(__NetBSD__) ifp->if_stop = udav_stop; #endif #if defined(__FreeBSD__) ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; #endif #if defined(__NetBSD__) IFQ_SET_READY(&ifp->if_snd); #endif #if defined(__NetBSD__) /* * Do ifmedia setup. */ mii = &sc->sc_mii; mii->mii_ifp = ifp; mii->mii_readreg = udav_miibus_readreg; mii->mii_writereg = udav_miibus_writereg; mii->mii_statchg = udav_miibus_statchg; mii->mii_flags = MIIF_AUTOTSLEEP; ifmedia_init(&mii->mii_media, 0, udav_ifmedia_change, udav_ifmedia_status); mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0); if (LIST_FIRST(&mii->mii_phys) == NULL) { ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL); ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); } else ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); /* attach the interface */ if_attach(ifp); Ether_ifattach(ifp, eaddr); #elif defined(__FreeBSD__) if (mii_phy_probe(self, &sc->sc_miibus, udav_ifmedia_change, udav_ifmedia_status)) { printf("%s: MII without any PHY!\n", USBDEVNAME(sc->sc_dev)); UDAV_UNLOCK(sc); mtx_destroy(&sc->sc_mtx); USB_ATTACH_ERROR_RETURN; } sc->sc_qdat.ifp = ifp; sc->sc_qdat.if_rxstart = udav_rxstart; /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); #endif #if NRND > 0 rnd_attach_source(&sc->rnd_source, devname, RND_TYPE_NET, 0); #endif usb_callout_init(sc->sc_stat_ch); #if defined(__FreeBSD__) usb_register_netisr(); #endif sc->sc_attached = 1; #if defined(__NetBSD__) splx(s); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); #endif usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, dev, USBDEV(sc->sc_dev)); USB_ATTACH_SUCCESS_RETURN; bad: sc->sc_dying = 1; USB_ATTACH_ERROR_RETURN; } /* detach */ USB_DETACH(udav) { USB_DETACH_START(udav, sc); struct ifnet *ifp = GET_IFP(sc); #if defined(__NetBSD__) int s; #endif DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); /* Detached before attached finished */ if (!sc->sc_attached) return (0); UDAV_LOCK(sc); usb_uncallout(sc->sc_stat_ch, udav_tick, sc); /* Remove any pending tasks */ usb_rem_task(sc->sc_udev, &sc->sc_tick_task); usb_rem_task(sc->sc_udev, &sc->sc_stop_task); #if defined(__NetBSD__) s = splusb(); #elif defined(__FreeBSD__) UDAV_LOCK(sc); #endif if (--sc->sc_refcnt >= 0) { /* Wait for processes to go away */ usb_detach_wait(USBDEV(sc->sc_dev)); } if (ifp->if_flags & IFF_RUNNING) udav_stop(GET_IFP(sc), 1); #if NRND > 0 rnd_detach_source(&sc->rnd_source); #endif #if defined(__NetBSD__) mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); #endif ether_ifdetach(ifp); #if defined(__NetBSD__) if_detach(ifp); #endif #if defined(__FreeBSD__) if_free(ifp); #endif #ifdef DIAGNOSTIC if (sc->sc_pipe_tx != NULL) printf("%s: detach has active tx endpoint.\n", USBDEVNAME(sc->sc_dev)); if (sc->sc_pipe_rx != NULL) printf("%s: detach has active rx endpoint.\n", USBDEVNAME(sc->sc_dev)); if (sc->sc_pipe_intr != NULL) printf("%s: detach has active intr endpoint.\n", USBDEVNAME(sc->sc_dev)); #endif sc->sc_attached = 0; #if defined(__NetBSD__) splx(s); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); #endif #if defined(__FreeBSD__) mtx_destroy(&sc->sc_mtx); #endif usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, USBDEV(sc->sc_dev)); return (0); } #if 0 /* read memory */ Static int udav_mem_read(struct udav_softc *sc, int offset, void *buf, int len) { usb_device_request_t req; usbd_status err; if (sc == NULL) return (0); DPRINTFN(0x200, ("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return (0); offset &= 0xffff; len &= 0xff; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = UDAV_REQ_MEM_READ; USETW(req.wValue, 0x0000); USETW(req.wIndex, offset); USETW(req.wLength, len); sc->sc_refcnt++; err = usbd_do_request(sc->sc_udev, &req, buf); if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); if (err) { DPRINTF(("%s: %s: read failed. off=%04x, err=%d\n", USBDEVNAME(sc->sc_dev), __func__, offset, err)); } return (err); } /* write memory */ Static int udav_mem_write(struct udav_softc *sc, int offset, void *buf, int len) { usb_device_request_t req; usbd_status err; if (sc == NULL) return (0); DPRINTFN(0x200, ("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return (0); offset &= 0xffff; len &= 0xff; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = UDAV_REQ_MEM_WRITE; USETW(req.wValue, 0x0000); USETW(req.wIndex, offset); USETW(req.wLength, len); sc->sc_refcnt++; err = usbd_do_request(sc->sc_udev, &req, buf); if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); if (err) { DPRINTF(("%s: %s: write failed. off=%04x, err=%d\n", USBDEVNAME(sc->sc_dev), __func__, offset, err)); } return (err); } /* write memory */ Static int udav_mem_write1(struct udav_softc *sc, int offset, unsigned char ch) { usb_device_request_t req; usbd_status err; if (sc == NULL) return (0); DPRINTFN(0x200, ("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return (0); offset &= 0xffff; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = UDAV_REQ_MEM_WRITE1; USETW(req.wValue, ch); USETW(req.wIndex, offset); USETW(req.wLength, 0x0000); sc->sc_refcnt++; err = usbd_do_request(sc->sc_udev, &req, NULL); if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); if (err) { DPRINTF(("%s: %s: write failed. off=%04x, err=%d\n", USBDEVNAME(sc->sc_dev), __func__, offset, err)); } return (err); } #endif /* read register(s) */ Static int udav_csr_read(struct udav_softc *sc, int offset, void *buf, int len) { usb_device_request_t req; usbd_status err; if (sc == NULL) return (0); DPRINTFN(0x200, ("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return (0); offset &= 0xff; len &= 0xff; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = UDAV_REQ_REG_READ; USETW(req.wValue, 0x0000); USETW(req.wIndex, offset); USETW(req.wLength, len); sc->sc_refcnt++; err = usbd_do_request(sc->sc_udev, &req, buf); if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); if (err) { DPRINTF(("%s: %s: read failed. off=%04x, err=%d\n", USBDEVNAME(sc->sc_dev), __func__, offset, err)); } return (err); } /* write register(s) */ Static int udav_csr_write(struct udav_softc *sc, int offset, void *buf, int len) { usb_device_request_t req; usbd_status err; if (sc == NULL) return (0); DPRINTFN(0x200, ("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return (0); offset &= 0xff; len &= 0xff; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = UDAV_REQ_REG_WRITE; USETW(req.wValue, 0x0000); USETW(req.wIndex, offset); USETW(req.wLength, len); sc->sc_refcnt++; err = usbd_do_request(sc->sc_udev, &req, buf); if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); if (err) { DPRINTF(("%s: %s: write failed. off=%04x, err=%d\n", USBDEVNAME(sc->sc_dev), __func__, offset, err)); } return (err); } Static int udav_csr_read1(struct udav_softc *sc, int offset) { u_int8_t val = 0; if (sc == NULL) return (0); DPRINTFN(0x200, ("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return (0); return (udav_csr_read(sc, offset, &val, 1) ? 0 : val); } /* write a register */ Static int udav_csr_write1(struct udav_softc *sc, int offset, unsigned char ch) { usb_device_request_t req; usbd_status err; if (sc == NULL) return (0); DPRINTFN(0x200, ("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return (0); offset &= 0xff; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = UDAV_REQ_REG_WRITE1; USETW(req.wValue, ch); USETW(req.wIndex, offset); USETW(req.wLength, 0x0000); sc->sc_refcnt++; err = usbd_do_request(sc->sc_udev, &req, NULL); if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); if (err) { DPRINTF(("%s: %s: write failed. off=%04x, err=%d\n", USBDEVNAME(sc->sc_dev), __func__, offset, err)); } return (err); } #if defined(__NetBSD__) Static int udav_init(struct ifnet *ifp) #elif defined(__FreeBSD__) Static void udav_init(void *xsc) #endif { #if defined(__NetBSD__) struct udav_softc *sc = ifp->if_softc; #elif defined(__FreeBSD__) struct udav_softc *sc = (struct udav_softc *)xsc; struct ifnet *ifp = GET_IFP(sc); #endif struct mii_data *mii = GET_MII(sc); u_char *eaddr; #if defined(__NetBSD__) int s; #endif DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) #if defined(__NetBSD__) return (EIO); #elif defined(__FreeBSD__) return ; #endif #if defined(__NetBSD__) s = splnet(); #elif defined(__FreeBSD__) UDAV_LOCK(sc); #endif /* Cancel pending I/O and free all TX/RX buffers */ udav_stop(ifp, 1); #if defined(__NetBSD__) eaddr = LLADDR(ifp->if_sadl); #elif defined(__FreeBSD__) eaddr = IFP2ENADDR(ifp); #endif udav_csr_write(sc, UDAV_PAR, eaddr, ETHER_ADDR_LEN); /* Initialize network control register */ /* Disable loopback */ UDAV_CLRBIT(sc, UDAV_NCR, UDAV_NCR_LBK0 | UDAV_NCR_LBK1); /* Initialize RX control register */ UDAV_SETBIT(sc, UDAV_RCR, UDAV_RCR_DIS_LONG | UDAV_RCR_DIS_CRC); /* If we want promiscuous mode, accept all physical frames. */ if (ifp->if_flags & IFF_PROMISC) UDAV_SETBIT(sc, UDAV_RCR, UDAV_RCR_ALL|UDAV_RCR_PRMSC); else UDAV_CLRBIT(sc, UDAV_RCR, UDAV_RCR_ALL|UDAV_RCR_PRMSC); /* Initialize transmit ring */ if (usb_ether_tx_list_init(sc, &sc->sc_cdata, sc->sc_udev) == ENOBUFS) { printf("%s: tx list init failed\n", USBDEVNAME(sc->sc_dev)); #if defined(__NetBSD__) splx(s); return (EIO); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); return ; #endif } /* Initialize receive ring */ if (usb_ether_rx_list_init(sc, &sc->sc_cdata, sc->sc_udev) == ENOBUFS) { printf("%s: rx list init failed\n", USBDEVNAME(sc->sc_dev)); #if defined(__NetBSD__) splx(s); return (EIO); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); return ; #endif } /* Load the multicast filter */ udav_setmulti(sc); /* Enable RX */ UDAV_SETBIT(sc, UDAV_RCR, UDAV_RCR_RXEN); /* clear POWER_DOWN state of internal PHY */ UDAV_SETBIT(sc, UDAV_GPCR, UDAV_GPCR_GEP_CNTL0); UDAV_CLRBIT(sc, UDAV_GPR, UDAV_GPR_GEPIO0); mii_mediachg(mii); if (sc->sc_pipe_tx == NULL || sc->sc_pipe_rx == NULL) { if (udav_openpipes(sc)) { #if defined(__NetBSD__) splx(s); return (EIO); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); return ; #endif } } ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; #if defined(__NetBSD__) splx(s); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); #endif usb_callout(sc->sc_stat_ch, hz, udav_tick, sc); #if defined(__NetBSD__) return (0); #elif defined(__FreeBSD__) return ; #endif } Static void udav_reset(struct udav_softc *sc) { int i; DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return; /* Select PHY */ #if 1 /* * XXX: force select internal phy. * external phy routines are not tested. */ UDAV_CLRBIT(sc, UDAV_NCR, UDAV_NCR_EXT_PHY); #else if (sc->sc_flags & UDAV_EXT_PHY) { UDAV_SETBIT(sc, UDAV_NCR, UDAV_NCR_EXT_PHY); } else { UDAV_CLRBIT(sc, UDAV_NCR, UDAV_NCR_EXT_PHY); } #endif UDAV_SETBIT(sc, UDAV_NCR, UDAV_NCR_RST); for (i = 0; i < UDAV_TX_TIMEOUT; i++) { if (!(udav_csr_read1(sc, UDAV_NCR) & UDAV_NCR_RST)) break; delay(10); /* XXX */ } delay(10000); /* XXX */ } #if defined(__NetBSD__) || defined(__OpenBSD__) int udav_activate(device_ptr_t self, enum devact act) { struct udav_softc *sc = (struct udav_softc *)self; DPRINTF(("%s: %s: enter, act=%d\n", USBDEVNAME(sc->sc_dev), __func__, act)); switch (act) { case DVACT_ACTIVATE: return (EOPNOTSUPP); break; case DVACT_DEACTIVATE: if_deactivate(&sc->sc_ec.ec_if); sc->sc_dying = 1; break; } return (0); } #endif #define UDAV_BITS 6 #define UDAV_CALCHASH(addr) \ (ether_crc32_le((addr), ETHER_ADDR_LEN) & ((1 << UDAV_BITS) - 1)) Static void udav_setmulti(struct udav_softc *sc) { struct ifnet *ifp; #if defined(__NetBSD__) struct ether_multi *enm; struct ether_multistep step; #elif defined(__FreeBSD__) struct ifmultiaddr *ifma; #endif u_int8_t hashes[8]; int h = 0; DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return; ifp = GET_IFP(sc); if (ifp->if_flags & IFF_PROMISC) { UDAV_SETBIT(sc, UDAV_RCR, UDAV_RCR_ALL|UDAV_RCR_PRMSC); return; } else if (ifp->if_flags & IFF_ALLMULTI) { #if defined(__NetBSD__) allmulti: #endif ifp->if_flags |= IFF_ALLMULTI; UDAV_SETBIT(sc, UDAV_RCR, UDAV_RCR_ALL); UDAV_CLRBIT(sc, UDAV_RCR, UDAV_RCR_PRMSC); return; } /* first, zot all the existing hash bits */ memset(hashes, 0x00, sizeof(hashes)); hashes[7] |= 0x80; /* broadcast address */ udav_csr_write(sc, UDAV_MAR, hashes, sizeof(hashes)); /* now program new ones */ #if defined(__NetBSD__) ETHER_FIRST_MULTI(step, &sc->sc_ec, enm); while (enm != NULL) { if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) goto allmulti; h = UDAV_CALCHASH(enm->enm_addrlo); hashes[h>>3] |= 1 << (h & 0x7); ETHER_NEXT_MULTI(step, enm); } #elif defined(__FreeBSD__) + IF_ADDR_LOCK(ifp); #if __FreeBSD_version >= 500000 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) #else LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) #endif { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = UDAV_CALCHASH(LLADDR((struct sockaddr_dl *) ifma->ifma_addr)); hashes[h>>3] |= 1 << (h & 0x7); } + IF_ADDR_UNLOCK(ifp); #endif /* disable all multicast */ ifp->if_flags &= ~IFF_ALLMULTI; UDAV_CLRBIT(sc, UDAV_RCR, UDAV_RCR_ALL); /* write hash value to the register */ udav_csr_write(sc, UDAV_MAR, hashes, sizeof(hashes)); } Static int udav_openpipes(struct udav_softc *sc) { struct ue_chain *c; usbd_status err; int i; int error = 0; if (sc->sc_dying) return (EIO); sc->sc_refcnt++; /* Open RX pipe */ err = usbd_open_pipe(sc->sc_ctl_iface, sc->sc_bulkin_no, USBD_EXCLUSIVE_USE, &sc->sc_pipe_rx); if (err) { printf("%s: open rx pipe failed: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); error = EIO; goto done; } /* Open TX pipe */ err = usbd_open_pipe(sc->sc_ctl_iface, sc->sc_bulkout_no, USBD_EXCLUSIVE_USE, &sc->sc_pipe_tx); if (err) { printf("%s: open tx pipe failed: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); error = EIO; goto done; } #if 0 /* XXX: interrupt endpoint is not yet supported */ /* Open Interrupt pipe */ err = usbd_open_pipe_intr(sc->sc_ctl_iface, sc->sc_intrin_no, USBD_EXCLUSIVE_USE, &sc->sc_pipe_intr, sc, &sc->sc_cdata.ue_ibuf, UDAV_INTR_PKGLEN, udav_intr, UDAV_INTR_INTERVAL); if (err) { printf("%s: open intr pipe failed: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); error = EIO; goto done; } #endif /* Start up the receive pipe. */ for (i = 0; i < UE_RX_LIST_CNT; i++) { c = &sc->sc_cdata.ue_rx_chain[i]; usbd_setup_xfer(c->ue_xfer, sc->sc_pipe_rx, c, c->ue_buf, UE_BUFSZ, USBD_SHORT_XFER_OK | USBD_NO_COPY, USBD_NO_TIMEOUT, udav_rxeof); (void)usbd_transfer(c->ue_xfer); DPRINTF(("%s: %s: start read\n", USBDEVNAME(sc->sc_dev), __func__)); } done: if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); return (error); } Static void udav_start(struct ifnet *ifp) { struct udav_softc *sc = ifp->if_softc; struct mbuf *m_head = NULL; DPRINTF(("%s: %s: enter, link=%d\n", USBDEVNAME(sc->sc_dev), __func__, sc->sc_link)); if (sc->sc_dying) return; if (!sc->sc_link) return; if (ifp->if_flags & IFF_OACTIVE) return; #if defined(__NetBSD__) IFQ_POLL(&ifp->if_snd, m_head); #elif defined(__FreeBSD__) IF_DEQUEUE(&ifp->if_snd, m_head); #endif if (m_head == NULL) return; if (udav_send(sc, m_head, 0)) { #if defined(__FreeBSD__) IF_PREPEND(&ifp->if_snd, m_head); #endif ifp->if_flags |= IFF_OACTIVE; return; } #if defined(__NetBSD__) IFQ_DEQUEUE(&ifp->if_snd, m_head); #endif #if NBPFILTER > 0 BPF_MTAP(ifp, m_head); #endif ifp->if_flags |= IFF_OACTIVE; /* Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; } Static int udav_send(struct udav_softc *sc, struct mbuf *m, int idx) { int total_len; struct ue_chain *c; usbd_status err; DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev),__func__)); c = &sc->sc_cdata.ue_tx_chain[idx]; /* Copy the mbuf data into a contiguous buffer */ /* first 2 bytes are packet length */ m_copydata(m, 0, m->m_pkthdr.len, c->ue_buf + 2); c->ue_mbuf = m; total_len = m->m_pkthdr.len; if (total_len < UDAV_MIN_FRAME_LEN) { memset(c->ue_buf + 2 + total_len, 0, UDAV_MIN_FRAME_LEN - total_len); total_len = UDAV_MIN_FRAME_LEN; } /* Frame length is specified in the first 2bytes of the buffer */ c->ue_buf[0] = (u_int8_t)total_len; c->ue_buf[1] = (u_int8_t)(total_len >> 8); total_len += 2; usbd_setup_xfer(c->ue_xfer, sc->sc_pipe_tx, c, c->ue_buf, total_len, USBD_FORCE_SHORT_XFER | USBD_NO_COPY, UDAV_TX_TIMEOUT, udav_txeof); /* Transmit */ sc->sc_refcnt++; err = usbd_transfer(c->ue_xfer); if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); if (err != USBD_IN_PROGRESS) { printf("%s: udav_send error=%s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); /* Stop the interface */ usb_add_task(sc->sc_udev, &sc->sc_stop_task); return (EIO); } DPRINTF(("%s: %s: send %d bytes\n", USBDEVNAME(sc->sc_dev), __func__, total_len)); sc->sc_cdata.ue_tx_cnt++; return (0); } Static void udav_txeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct ue_chain *c = priv; struct udav_softc *sc = c->ue_sc; struct ifnet *ifp = GET_IFP(sc); #if defined(__NetBSD__) int s; #endif if (sc->sc_dying) return; #if defined(__NetBSD__) s = splnet(); #elif defined(__FreeBSD__) UDAV_LOCK(sc); #endif DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) { #if defined(__NetBSD__) splx(s); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); #endif return; } ifp->if_oerrors++; printf("%s: usb error on tx: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(status)); if (status == USBD_STALLED) { sc->sc_refcnt++; usbd_clear_endpoint_stall(sc->sc_pipe_tx); if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); } #if defined(__NetBSD__) splx(s); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); #endif return; } ifp->if_opackets++; m_freem(c->ue_mbuf); c->ue_mbuf = NULL; #if defined(__NetBSD__) if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) #elif defined(__FreeBSD__) if ( ifp->if_snd.ifq_head != NULL ) #endif udav_start(ifp); #if defined(__NetBSD__) splx(s); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); #endif } Static void udav_rxeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct ue_chain *c = priv; struct udav_softc *sc = c->ue_sc; struct ifnet *ifp = GET_IFP(sc); struct mbuf *m; u_int32_t total_len; u_int8_t *pktstat; #if defined(__NetBSD__) int s; #endif DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev),__func__)); if (sc->sc_dying) return; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) return; sc->sc_rx_errs++; if (usbd_ratecheck(&sc->sc_rx_notice)) { printf("%s: %u usb errors on rx: %s\n", USBDEVNAME(sc->sc_dev), sc->sc_rx_errs, usbd_errstr(status)); sc->sc_rx_errs = 0; } if (status == USBD_STALLED) { sc->sc_refcnt++; usbd_clear_endpoint_stall(sc->sc_pipe_rx); if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); } goto done; } usbd_get_xfer_status(xfer, NULL, NULL, &total_len, NULL); /* copy data to mbuf */ m = c->ue_mbuf; memcpy(mtod(m, char *), c->ue_buf, total_len); /* first byte in received data */ pktstat = mtod(m, u_int8_t *); m_adj(m, sizeof(u_int8_t)); DPRINTF(("%s: RX Status: 0x%02x\n", USBDEVNAME(sc->sc_dev), *pktstat)); total_len = UGETW(mtod(m, u_int8_t *)); m_adj(m, sizeof(u_int16_t)); if (*pktstat & UDAV_RSR_LCS) { ifp->if_collisions++; goto done; } if (total_len < sizeof(struct ether_header) || *pktstat & UDAV_RSR_ERR) { ifp->if_ierrors++; goto done; } ifp->if_ipackets++; total_len -= ETHER_CRC_LEN; m->m_pkthdr.len = m->m_len = total_len; #if defined(__NetBSD__) m->m_pkthdr.rcvif = ifp; #elif defined(__FreeBSD__) m->m_pkthdr.rcvif = (struct ifnet *)&sc->sc_qdat; #endif #if defined(__NetBSD__) s = splnet(); #elif defined(__FreeBSD__) UDAV_LOCK(sc); #endif #if defined(__NetBSD__) c->ue_mbuf = usb_ether_newbuf(); if (c->ue_mbuf == NULL) { printf("%s: no memory for rx list " "-- packet dropped!\n", USBDEVNAME(sc->sc_dev)); ifp->if_ierrors++; goto done1; } #endif #if NBPFILTER > 0 BPF_MTAP(ifp, m); #endif DPRINTF(("%s: %s: deliver %d\n", USBDEVNAME(sc->sc_dev), __func__, m->m_len)); #if defined(__NetBSD__) IF_INPUT(ifp, m); #endif #if defined(__FreeBSD__) usb_ether_input(m); UDAV_UNLOCK(sc); return ; #endif #if defined(__NetBSD__) done1: splx(s); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); #endif done: /* Setup new transfer */ usbd_setup_xfer(xfer, sc->sc_pipe_rx, c, c->ue_buf, UE_BUFSZ, USBD_SHORT_XFER_OK | USBD_NO_COPY, USBD_NO_TIMEOUT, udav_rxeof); sc->sc_refcnt++; usbd_transfer(xfer); if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); DPRINTF(("%s: %s: start rx\n", USBDEVNAME(sc->sc_dev), __func__)); } #if 0 Static void udav_intr() { } #endif Static int udav_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct udav_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; #if defined(__NetBSD__) int s; #endif int error = 0; DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return (EIO); #if defined(__NetBSD__) s = splnet(); #elif defined(__FreeBSD__) UDAV_LOCK(sc); #endif switch (cmd) { #if defined(__FreeBSD__) case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC) { UDAV_SETBIT(sc, UDAV_RCR, UDAV_RCR_ALL|UDAV_RCR_PRMSC); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC)) { if (ifp->if_flags & IFF_ALLMULTI) UDAV_CLRBIT(sc, UDAV_RCR, UDAV_RCR_PRMSC); else UDAV_CLRBIT(sc, UDAV_RCR, UDAV_RCR_ALL|UDAV_RCR_PRMSC); } else if (!(ifp->if_flags & IFF_RUNNING)) udav_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) udav_stop(ifp, 1); } error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: udav_setmulti(sc); error = 0; break; #endif case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = GET_MII(sc); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); break; default: error = ether_ioctl(ifp, cmd, data); #if defined(__NetBSD__) if (error == ENETRESET) { udav_setmulti(sc); error = 0; } #endif break; } #if defined(__NetBSD__) splx(s); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); #endif return (error); } Static void udav_watchdog(struct ifnet *ifp) { struct udav_softc *sc = ifp->if_softc; struct ue_chain *c; usbd_status stat; #if defined(__NetBSD__) int s; #endif DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); ifp->if_oerrors++; printf("%s: watchdog timeout\n", USBDEVNAME(sc->sc_dev)); #if defined(__NetBSD__) s = splusb(); #elif defined(__FreeBSD__) UDAV_LOCK(sc) #endif c = &sc->sc_cdata.ue_tx_chain[0]; usbd_get_xfer_status(c->ue_xfer, NULL, NULL, NULL, &stat); udav_txeof(c->ue_xfer, c, stat); #if defined(__NetBSD__) if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) #elif defined(__FreeBSD__) if ( ifp->if_snd.ifq_head != NULL ) #endif udav_start(ifp); #if defined(__NetBSD__) splx(s); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); #endif } Static void udav_stop_task(struct udav_softc *sc) { udav_stop(GET_IFP(sc), 1); } /* Stop the adapter and free any mbufs allocated to the RX and TX lists. */ Static void udav_stop(struct ifnet *ifp, int disable) { struct udav_softc *sc = ifp->if_softc; usbd_status err; DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); ifp->if_timer = 0; udav_reset(sc); usb_uncallout(sc->sc_stat_ch, udav_tick, sc); /* Stop transfers */ /* RX endpoint */ if (sc->sc_pipe_rx != NULL) { err = usbd_abort_pipe(sc->sc_pipe_rx); if (err) printf("%s: abort rx pipe failed: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); err = usbd_close_pipe(sc->sc_pipe_rx); if (err) printf("%s: close rx pipe failed: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); sc->sc_pipe_rx = NULL; } /* TX endpoint */ if (sc->sc_pipe_tx != NULL) { err = usbd_abort_pipe(sc->sc_pipe_tx); if (err) printf("%s: abort tx pipe failed: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); err = usbd_close_pipe(sc->sc_pipe_tx); if (err) printf("%s: close tx pipe failed: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); sc->sc_pipe_tx = NULL; } #if 0 /* XXX: Interrupt endpoint is not yet supported!! */ /* Interrupt endpoint */ if (sc->sc_pipe_intr != NULL) { err = usbd_abort_pipe(sc->sc_pipe_intr); if (err) printf("%s: abort intr pipe failed: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); err = usbd_close_pipe(sc->sc_pipe_intr); if (err) printf("%s: close intr pipe failed: %s\n", USBDEVNAME(sc->sc_dev), usbd_errstr(err)); sc->sc_pipe_intr = NULL; } #endif /* Free RX resources. */ usb_ether_rx_list_free(&sc->sc_cdata); /* Free TX resources. */ usb_ether_tx_list_free(&sc->sc_cdata); sc->sc_link = 0; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); } /* Set media options */ Static int udav_ifmedia_change(struct ifnet *ifp) { struct udav_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return (0); sc->sc_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; for (miisc = LIST_FIRST(&mii->mii_phys); miisc != NULL; miisc = LIST_NEXT(miisc, mii_list)) mii_phy_reset(miisc); } return (mii_mediachg(mii)); } /* Report current media status. */ Static void udav_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct udav_softc *sc = ifp->if_softc; struct mii_data *mii = GET_MII(sc); DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return; if ((ifp->if_flags & IFF_RUNNING) == 0) { ifmr->ifm_active = IFM_ETHER | IFM_NONE; ifmr->ifm_status = 0; return; } mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } Static void udav_tick(void *xsc) { struct udav_softc *sc = xsc; if (sc == NULL) return; DPRINTFN(0xff, ("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return; /* Perform periodic stuff in process context */ usb_add_task(sc->sc_udev, &sc->sc_tick_task); } Static void udav_tick_task(void *xsc) { struct udav_softc *sc = xsc; struct ifnet *ifp; struct mii_data *mii; #if defined(__NetBSD__) int s; #endif if (sc == NULL) return; DPRINTFN(0xff, ("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); if (sc->sc_dying) return; ifp = GET_IFP(sc); mii = GET_MII(sc); if (mii == NULL) return; #if defined(__NetBSD__) s = splnet(); #elif defined(__FreeBSD__) UDAV_LOCK(sc); #endif mii_tick(mii); if (!sc->sc_link) { mii_pollstat(mii); if (mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { DPRINTF(("%s: %s: got link\n", USBDEVNAME(sc->sc_dev), __func__)); sc->sc_link++; #if defined(__NetBSD__) if (IFQ_IS_EMPTY(&ifp->if_snd) == 0) #elif defined(__FreeBSD__) if ( ifp->if_snd.ifq_head != NULL ) #endif udav_start(ifp); } } usb_callout(sc->sc_stat_ch, hz, udav_tick, sc); #if defined(__NetBSD__) splx(s); #elif defined(__FreeBSD__) UDAV_UNLOCK(sc); #endif } /* Get exclusive access to the MII registers */ Static void udav_lock_mii(struct udav_softc *sc) { DPRINTFN(0xff, ("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); sc->sc_refcnt++; #if defined(__NetBSD__) lockmgr(&sc->sc_mii_lock, LK_EXCLUSIVE, NULL); #elif defined(__FreeBSD__) lockmgr(&sc->sc_mii_lock, LK_EXCLUSIVE, NULL, NULL); #endif } Static void udav_unlock_mii(struct udav_softc *sc) { DPRINTFN(0xff, ("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); #if defined(__NetBSD__) lockmgr(&sc->sc_mii_lock, LK_RELEASE, NULL); #elif defined(__FreeBSD__) lockmgr(&sc->sc_mii_lock, LK_RELEASE, NULL, NULL); #endif if (--sc->sc_refcnt < 0) usb_detach_wakeup(USBDEV(sc->sc_dev)); } Static int udav_miibus_readreg(device_ptr_t dev, int phy, int reg) { struct udav_softc *sc; u_int8_t val[2]; u_int16_t data16; if (dev == NULL) return (0); sc = USBGETSOFTC(dev); DPRINTFN(0xff, ("%s: %s: enter, phy=%d reg=0x%04x\n", USBDEVNAME(sc->sc_dev), __func__, phy, reg)); if (sc->sc_dying) { #ifdef DIAGNOSTIC printf("%s: %s: dying\n", USBDEVNAME(sc->sc_dev), __func__); #endif return (0); } /* XXX: one PHY only for the internal PHY */ if (phy != 0) { DPRINTFN(0xff, ("%s: %s: phy=%d is not supported\n", USBDEVNAME(sc->sc_dev), __func__, phy)); return (0); } udav_lock_mii(sc); /* select internal PHY and set PHY register address */ udav_csr_write1(sc, UDAV_EPAR, UDAV_EPAR_PHY_ADR0 | (reg & UDAV_EPAR_EROA_MASK)); /* select PHY operation and start read command */ udav_csr_write1(sc, UDAV_EPCR, UDAV_EPCR_EPOS | UDAV_EPCR_ERPRR); /* XXX: should be wait? */ /* end read command */ UDAV_CLRBIT(sc, UDAV_EPCR, UDAV_EPCR_ERPRR); /* retrieve the result from data registers */ udav_csr_read(sc, UDAV_EPDRL, val, 2); udav_unlock_mii(sc); data16 = val[0] | (val[1] << 8); DPRINTFN(0xff, ("%s: %s: phy=%d reg=0x%04x => 0x%04x\n", USBDEVNAME(sc->sc_dev), __func__, phy, reg, data16)); return (data16); } Static void udav_miibus_writereg(device_ptr_t dev, int phy, int reg, int data) { struct udav_softc *sc; u_int8_t val[2]; if (dev == NULL) return; sc = USBGETSOFTC(dev); DPRINTFN(0xff, ("%s: %s: enter, phy=%d reg=0x%04x data=0x%04x\n", USBDEVNAME(sc->sc_dev), __func__, phy, reg, data)); if (sc->sc_dying) { #ifdef DIAGNOSTIC printf("%s: %s: dying\n", USBDEVNAME(sc->sc_dev), __func__); #endif return; } /* XXX: one PHY only for the internal PHY */ if (phy != 0) { DPRINTFN(0xff, ("%s: %s: phy=%d is not supported\n", USBDEVNAME(sc->sc_dev), __func__, phy)); return; } udav_lock_mii(sc); /* select internal PHY and set PHY register address */ udav_csr_write1(sc, UDAV_EPAR, UDAV_EPAR_PHY_ADR0 | (reg & UDAV_EPAR_EROA_MASK)); /* put the value to the data registers */ val[0] = data & 0xff; val[1] = (data >> 8) & 0xff; udav_csr_write(sc, UDAV_EPDRL, val, 2); /* select PHY operation and start write command */ udav_csr_write1(sc, UDAV_EPCR, UDAV_EPCR_EPOS | UDAV_EPCR_ERPRW); /* XXX: should be wait? */ /* end write command */ UDAV_CLRBIT(sc, UDAV_EPCR, UDAV_EPCR_ERPRW); udav_unlock_mii(sc); return; } Static void udav_miibus_statchg(device_ptr_t dev) { #ifdef UDAV_DEBUG struct udav_softc *sc; if (dev == NULL) return; sc = USBGETSOFTC(dev); DPRINTF(("%s: %s: enter\n", USBDEVNAME(sc->sc_dev), __func__)); #endif /* Nothing to do */ } #if defined(__FreeBSD__) /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ Static void udav_shutdown(device_ptr_t dev) { struct udav_softc *sc; sc = device_get_softc(dev); udav_stop_task(sc); return; } Static void udav_rxstart(struct ifnet *ifp) { struct udav_softc *sc; struct ue_chain *c; sc = ifp->if_softc; UDAV_LOCK(sc); c = &sc->sc_cdata.ue_rx_chain[sc->sc_cdata.ue_rx_prod]; c->ue_mbuf = usb_ether_newbuf(); if (c->ue_mbuf == NULL) { printf("%s: no memory for rx list " "-- packet dropped!\n", USBDEVNAME(sc->sc_dev)); ifp->if_ierrors++; UDAV_UNLOCK(sc); return; } /* Setup new transfer. */ usbd_setup_xfer(c->ue_xfer, sc->sc_pipe_rx, c, c->ue_buf, UE_BUFSZ, USBD_SHORT_XFER_OK | USBD_NO_COPY, USBD_NO_TIMEOUT, udav_rxeof); usbd_transfer(c->ue_xfer); UDAV_UNLOCK(sc); return; } #endif Index: stable/6/sys/dev/vge/if_vge.c =================================================================== --- stable/6/sys/dev/vge/if_vge.c (revision 149421) +++ stable/6/sys/dev/vge/if_vge.c (revision 149422) @@ -1,2435 +1,2437 @@ /*- * Copyright (c) 2004 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * VIA Networking Technologies VT612x PCI gigabit ethernet NIC driver. * * Written by Bill Paul * Senior Networking Software Engineer * Wind River Systems */ /* * The VIA Networking VT6122 is a 32bit, 33/66Mhz PCI device that * combines a tri-speed ethernet MAC and PHY, with the following * features: * * o Jumbo frame support up to 16K * o Transmit and receive flow control * o IPv4 checksum offload * o VLAN tag insertion and stripping * o TCP large send * o 64-bit multicast hash table filter * o 64 entry CAM filter * o 16K RX FIFO and 48K TX FIFO memory * o Interrupt moderation * * The VT6122 supports up to four transmit DMA queues. The descriptors * in the transmit ring can address up to 7 data fragments; frames which * span more than 7 data buffers must be coalesced, but in general the * BSD TCP/IP stack rarely generates frames more than 2 or 3 fragments * long. The receive descriptors address only a single buffer. * * There are two peculiar design issues with the VT6122. One is that * receive data buffers must be aligned on a 32-bit boundary. This is * not a problem where the VT6122 is used as a LOM device in x86-based * systems, but on architectures that generate unaligned access traps, we * have to do some copying. * * The other issue has to do with the way 64-bit addresses are handled. * The DMA descriptors only allow you to specify 48 bits of addressing * information. The remaining 16 bits are specified using one of the * I/O registers. If you only have a 32-bit system, then this isn't * an issue, but if you have a 64-bit system and more than 4GB of * memory, you must have to make sure your network data buffers reside * in the same 48-bit 'segment.' * * Special thanks to Ryan Fu at VIA Networking for providing documentation * and sample NICs for testing. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(vge, pci, 1, 1, 1); MODULE_DEPEND(vge, ether, 1, 1, 1); MODULE_DEPEND(vge, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #include #include #define VGE_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) /* * Various supported device vendors/types and their names. */ static struct vge_type vge_devs[] = { { VIA_VENDORID, VIA_DEVICEID_61XX, "VIA Networking Gigabit Ethernet" }, { 0, 0, NULL } }; static int vge_probe (device_t); static int vge_attach (device_t); static int vge_detach (device_t); static int vge_encap (struct vge_softc *, struct mbuf *, int); static void vge_dma_map_addr (void *, bus_dma_segment_t *, int, int); static void vge_dma_map_rx_desc (void *, bus_dma_segment_t *, int, bus_size_t, int); static void vge_dma_map_tx_desc (void *, bus_dma_segment_t *, int, bus_size_t, int); static int vge_allocmem (device_t, struct vge_softc *); static int vge_newbuf (struct vge_softc *, int, struct mbuf *); static int vge_rx_list_init (struct vge_softc *); static int vge_tx_list_init (struct vge_softc *); #ifdef VGE_FIXUP_RX static __inline void vge_fixup_rx (struct mbuf *); #endif static void vge_rxeof (struct vge_softc *); static void vge_txeof (struct vge_softc *); static void vge_intr (void *); static void vge_tick (void *); static void vge_tx_task (void *, int); static void vge_start (struct ifnet *); static int vge_ioctl (struct ifnet *, u_long, caddr_t); static void vge_init (void *); static void vge_stop (struct vge_softc *); static void vge_watchdog (struct ifnet *); static int vge_suspend (device_t); static int vge_resume (device_t); static void vge_shutdown (device_t); static int vge_ifmedia_upd (struct ifnet *); static void vge_ifmedia_sts (struct ifnet *, struct ifmediareq *); #ifdef VGE_EEPROM static void vge_eeprom_getword (struct vge_softc *, int, u_int16_t *); #endif static void vge_read_eeprom (struct vge_softc *, caddr_t, int, int, int); static void vge_miipoll_start (struct vge_softc *); static void vge_miipoll_stop (struct vge_softc *); static int vge_miibus_readreg (device_t, int, int); static int vge_miibus_writereg (device_t, int, int, int); static void vge_miibus_statchg (device_t); static void vge_cam_clear (struct vge_softc *); static int vge_cam_set (struct vge_softc *, uint8_t *); #if __FreeBSD_version < 502113 static uint32_t vge_mchash (uint8_t *); #endif static void vge_setmulti (struct vge_softc *); static void vge_reset (struct vge_softc *); #define VGE_PCI_LOIO 0x10 #define VGE_PCI_LOMEM 0x14 static device_method_t vge_methods[] = { /* Device interface */ DEVMETHOD(device_probe, vge_probe), DEVMETHOD(device_attach, vge_attach), DEVMETHOD(device_detach, vge_detach), DEVMETHOD(device_suspend, vge_suspend), DEVMETHOD(device_resume, vge_resume), DEVMETHOD(device_shutdown, vge_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, vge_miibus_readreg), DEVMETHOD(miibus_writereg, vge_miibus_writereg), DEVMETHOD(miibus_statchg, vge_miibus_statchg), { 0, 0 } }; static driver_t vge_driver = { "vge", vge_methods, sizeof(struct vge_softc) }; static devclass_t vge_devclass; DRIVER_MODULE(vge, pci, vge_driver, vge_devclass, 0, 0); DRIVER_MODULE(vge, cardbus, vge_driver, vge_devclass, 0, 0); DRIVER_MODULE(miibus, vge, miibus_driver, miibus_devclass, 0, 0); #ifdef VGE_EEPROM /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void vge_eeprom_getword(sc, addr, dest) struct vge_softc *sc; int addr; u_int16_t *dest; { register int i; u_int16_t word = 0; /* * Enter EEPROM embedded programming mode. In order to * access the EEPROM at all, we first have to set the * EELOAD bit in the CHIPCFG2 register. */ CSR_SETBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); /* Select the address of the word we want to read */ CSR_WRITE_1(sc, VGE_EEADDR, addr); /* Issue read command */ CSR_SETBIT_1(sc, VGE_EECMD, VGE_EECMD_ERD); /* Wait for the done bit to be set. */ for (i = 0; i < VGE_TIMEOUT; i++) { if (CSR_READ_1(sc, VGE_EECMD) & VGE_EECMD_EDONE) break; } if (i == VGE_TIMEOUT) { device_printf(sc->vge_dev, "EEPROM read timed out\n"); *dest = 0; return; } /* Read the result */ word = CSR_READ_2(sc, VGE_EERDDAT); /* Turn off EEPROM access mode. */ CSR_CLRBIT_1(sc, VGE_EECSR, VGE_EECSR_EMBP/*|VGE_EECSR_ECS*/); CSR_CLRBIT_1(sc, VGE_CHIPCFG2, VGE_CHIPCFG2_EELOAD); *dest = word; return; } #endif /* * Read a sequence of words from the EEPROM. */ static void vge_read_eeprom(sc, dest, off, cnt, swap) struct vge_softc *sc; caddr_t dest; int off; int cnt; int swap; { int i; #ifdef VGE_EEPROM u_int16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { vge_eeprom_getword(sc, off + i, &word); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } #else for (i = 0; i < ETHER_ADDR_LEN; i++) dest[i] = CSR_READ_1(sc, VGE_PAR0 + i); #endif } static void vge_miipoll_stop(sc) struct vge_softc *sc; { int i; CSR_WRITE_1(sc, VGE_MIICMD, 0); for (i = 0; i < VGE_TIMEOUT; i++) { DELAY(1); if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) break; } if (i == VGE_TIMEOUT) device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); return; } static void vge_miipoll_start(sc) struct vge_softc *sc; { int i; /* First, make sure we're idle. */ CSR_WRITE_1(sc, VGE_MIICMD, 0); CSR_WRITE_1(sc, VGE_MIIADDR, VGE_MIIADDR_SWMPL); for (i = 0; i < VGE_TIMEOUT; i++) { DELAY(1); if (CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) break; } if (i == VGE_TIMEOUT) { device_printf(sc->vge_dev, "failed to idle MII autopoll\n"); return; } /* Now enable auto poll mode. */ CSR_WRITE_1(sc, VGE_MIICMD, VGE_MIICMD_MAUTO); /* And make sure it started. */ for (i = 0; i < VGE_TIMEOUT; i++) { DELAY(1); if ((CSR_READ_1(sc, VGE_MIISTS) & VGE_MIISTS_IIDL) == 0) break; } if (i == VGE_TIMEOUT) device_printf(sc->vge_dev, "failed to start MII autopoll\n"); return; } static int vge_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct vge_softc *sc; int i; u_int16_t rval = 0; sc = device_get_softc(dev); if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) return(0); VGE_LOCK(sc); vge_miipoll_stop(sc); /* Specify the register we want to read. */ CSR_WRITE_1(sc, VGE_MIIADDR, reg); /* Issue read command. */ CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_RCMD); /* Wait for the read command bit to self-clear. */ for (i = 0; i < VGE_TIMEOUT; i++) { DELAY(1); if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_RCMD) == 0) break; } if (i == VGE_TIMEOUT) device_printf(sc->vge_dev, "MII read timed out\n"); else rval = CSR_READ_2(sc, VGE_MIIDATA); vge_miipoll_start(sc); VGE_UNLOCK(sc); return (rval); } static int vge_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct vge_softc *sc; int i, rval = 0; sc = device_get_softc(dev); if (phy != (CSR_READ_1(sc, VGE_MIICFG) & 0x1F)) return(0); VGE_LOCK(sc); vge_miipoll_stop(sc); /* Specify the register we want to write. */ CSR_WRITE_1(sc, VGE_MIIADDR, reg); /* Specify the data we want to write. */ CSR_WRITE_2(sc, VGE_MIIDATA, data); /* Issue write command. */ CSR_SETBIT_1(sc, VGE_MIICMD, VGE_MIICMD_WCMD); /* Wait for the write command bit to self-clear. */ for (i = 0; i < VGE_TIMEOUT; i++) { DELAY(1); if ((CSR_READ_1(sc, VGE_MIICMD) & VGE_MIICMD_WCMD) == 0) break; } if (i == VGE_TIMEOUT) { device_printf(sc->vge_dev, "MII write timed out\n"); rval = EIO; } vge_miipoll_start(sc); VGE_UNLOCK(sc); return (rval); } static void vge_cam_clear(sc) struct vge_softc *sc; { int i; /* * Turn off all the mask bits. This tells the chip * that none of the entries in the CAM filter are valid. * desired entries will be enabled as we fill the filter in. */ CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE); for (i = 0; i < 8; i++) CSR_WRITE_1(sc, VGE_CAM0 + i, 0); /* Clear the VLAN filter too. */ CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|VGE_CAMADDR_AVSEL|0); for (i = 0; i < 8; i++) CSR_WRITE_1(sc, VGE_CAM0 + i, 0); CSR_WRITE_1(sc, VGE_CAMADDR, 0); CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); sc->vge_camidx = 0; return; } static int vge_cam_set(sc, addr) struct vge_softc *sc; uint8_t *addr; { int i, error = 0; if (sc->vge_camidx == VGE_CAM_MAXADDRS) return(ENOSPC); /* Select the CAM data page. */ CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMDATA); /* Set the filter entry we want to update and enable writing. */ CSR_WRITE_1(sc, VGE_CAMADDR, VGE_CAMADDR_ENABLE|sc->vge_camidx); /* Write the address to the CAM registers */ for (i = 0; i < ETHER_ADDR_LEN; i++) CSR_WRITE_1(sc, VGE_CAM0 + i, addr[i]); /* Issue a write command. */ CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_WRITE); /* Wake for it to clear. */ for (i = 0; i < VGE_TIMEOUT; i++) { DELAY(1); if ((CSR_READ_1(sc, VGE_CAMCTL) & VGE_CAMCTL_WRITE) == 0) break; } if (i == VGE_TIMEOUT) { device_printf(sc->vge_dev, "setting CAM filter failed\n"); error = EIO; goto fail; } /* Select the CAM mask page. */ CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_CAMMASK); /* Set the mask bit that enables this filter. */ CSR_SETBIT_1(sc, VGE_CAM0 + (sc->vge_camidx/8), 1<<(sc->vge_camidx & 7)); sc->vge_camidx++; fail: /* Turn off access to CAM. */ CSR_WRITE_1(sc, VGE_CAMADDR, 0); CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); return (error); } #if __FreeBSD_version < 502113 static uint32_t vge_mchash(addr) uint8_t *addr; { uint32_t crc, carry; int idx, bit; uint8_t data; /* Compute CRC for the address value. */ crc = 0xFFFFFFFF; /* initial value */ for (idx = 0; idx < 6; idx++) { for (data = *addr++, bit = 0; bit < 8; bit++, data >>= 1) { carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01); crc <<= 1; if (carry) crc = (crc ^ 0x04c11db6) | carry; } } return(crc); } #endif /* * Program the multicast filter. We use the 64-entry CAM filter * for perfect filtering. If there's more than 64 multicast addresses, * we use the hash filter insted. */ static void vge_setmulti(sc) struct vge_softc *sc; { struct ifnet *ifp; int error = 0/*, h = 0*/; struct ifmultiaddr *ifma; u_int32_t h, hashes[2] = { 0, 0 }; ifp = sc->vge_ifp; /* First, zot all the multicast entries. */ vge_cam_clear(sc); CSR_WRITE_4(sc, VGE_MAR0, 0); CSR_WRITE_4(sc, VGE_MAR1, 0); /* * If the user wants allmulti or promisc mode, enable reception * of all multicast frames. */ if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { CSR_WRITE_4(sc, VGE_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, VGE_MAR1, 0xFFFFFFFF); return; } /* Now program new ones */ + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; error = vge_cam_set(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (error) break; } /* If there were too many addresses, use the hash filter. */ if (error) { vge_cam_clear(sc); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; #if __FreeBSD_version < 502113 h = vge_mchash(LLADDR((struct sockaddr_dl *) ifma->ifma_addr)) >> 26; #else h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; #endif if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } CSR_WRITE_4(sc, VGE_MAR0, hashes[0]); CSR_WRITE_4(sc, VGE_MAR1, hashes[1]); } + IF_ADDR_UNLOCK(ifp); return; } static void vge_reset(sc) struct vge_softc *sc; { register int i; CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_SOFTRESET); for (i = 0; i < VGE_TIMEOUT; i++) { DELAY(5); if ((CSR_READ_1(sc, VGE_CRS1) & VGE_CR1_SOFTRESET) == 0) break; } if (i == VGE_TIMEOUT) { device_printf(sc->vge_dev, "soft reset timed out"); CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_STOP_FORCE); DELAY(2000); } DELAY(5000); CSR_SETBIT_1(sc, VGE_EECSR, VGE_EECSR_RELOAD); for (i = 0; i < VGE_TIMEOUT; i++) { DELAY(5); if ((CSR_READ_1(sc, VGE_EECSR) & VGE_EECSR_RELOAD) == 0) break; } if (i == VGE_TIMEOUT) { device_printf(sc->vge_dev, "EEPROM reload timed out\n"); return; } CSR_CLRBIT_1(sc, VGE_CHIPCFG0, VGE_CHIPCFG0_PACPI); return; } /* * Probe for a VIA gigabit chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int vge_probe(dev) device_t dev; { struct vge_type *t; struct vge_softc *sc; t = vge_devs; sc = device_get_softc(dev); while (t->vge_name != NULL) { if ((pci_get_vendor(dev) == t->vge_vid) && (pci_get_device(dev) == t->vge_did)) { device_set_desc(dev, t->vge_name); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } static void vge_dma_map_rx_desc(arg, segs, nseg, mapsize, error) void *arg; bus_dma_segment_t *segs; int nseg; bus_size_t mapsize; int error; { struct vge_dmaload_arg *ctx; struct vge_rx_desc *d = NULL; if (error) return; ctx = arg; /* Signal error to caller if there's too many segments */ if (nseg > ctx->vge_maxsegs) { ctx->vge_maxsegs = 0; return; } /* * Map the segment array into descriptors. */ d = &ctx->sc->vge_ldata.vge_rx_list[ctx->vge_idx]; /* If this descriptor is still owned by the chip, bail. */ if (le32toh(d->vge_sts) & VGE_RDSTS_OWN) { device_printf(ctx->sc->vge_dev, "tried to map busy descriptor\n"); ctx->vge_maxsegs = 0; return; } d->vge_buflen = htole16(VGE_BUFLEN(segs[0].ds_len) | VGE_RXDESC_I); d->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); d->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF); d->vge_sts = 0; d->vge_ctl = 0; ctx->vge_maxsegs = 1; return; } static void vge_dma_map_tx_desc(arg, segs, nseg, mapsize, error) void *arg; bus_dma_segment_t *segs; int nseg; bus_size_t mapsize; int error; { struct vge_dmaload_arg *ctx; struct vge_tx_desc *d = NULL; struct vge_tx_frag *f; int i = 0; if (error) return; ctx = arg; /* Signal error to caller if there's too many segments */ if (nseg > ctx->vge_maxsegs) { ctx->vge_maxsegs = 0; return; } /* Map the segment array into descriptors. */ d = &ctx->sc->vge_ldata.vge_tx_list[ctx->vge_idx]; /* If this descriptor is still owned by the chip, bail. */ if (le32toh(d->vge_sts) & VGE_TDSTS_OWN) { ctx->vge_maxsegs = 0; return; } for (i = 0; i < nseg; i++) { f = &d->vge_frag[i]; f->vge_buflen = htole16(VGE_BUFLEN(segs[i].ds_len)); f->vge_addrlo = htole32(VGE_ADDR_LO(segs[i].ds_addr)); f->vge_addrhi = htole16(VGE_ADDR_HI(segs[i].ds_addr) & 0xFFFF); } /* Argh. This chip does not autopad short frames */ if (ctx->vge_m0->m_pkthdr.len < VGE_MIN_FRAMELEN) { f = &d->vge_frag[i]; f->vge_buflen = htole16(VGE_BUFLEN(VGE_MIN_FRAMELEN - ctx->vge_m0->m_pkthdr.len)); f->vge_addrlo = htole32(VGE_ADDR_LO(segs[0].ds_addr)); f->vge_addrhi = htole16(VGE_ADDR_HI(segs[0].ds_addr) & 0xFFFF); ctx->vge_m0->m_pkthdr.len = VGE_MIN_FRAMELEN; i++; } /* * When telling the chip how many segments there are, we * must use nsegs + 1 instead of just nsegs. Darned if I * know why. */ i++; d->vge_sts = ctx->vge_m0->m_pkthdr.len << 16; d->vge_ctl = ctx->vge_flags|(i << 28)|VGE_TD_LS_NORM; if (ctx->vge_m0->m_pkthdr.len > ETHERMTU + ETHER_HDR_LEN) d->vge_ctl |= VGE_TDCTL_JUMBO; ctx->vge_maxsegs = nseg; return; } /* * Map a single buffer address. */ static void vge_dma_map_addr(arg, segs, nseg, error) void *arg; bus_dma_segment_t *segs; int nseg; int error; { bus_addr_t *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; return; } static int vge_allocmem(dev, sc) device_t dev; struct vge_softc *sc; { int error; int nseg; int i; /* * Allocate map for RX mbufs. */ nseg = 32; error = bus_dma_tag_create(sc->vge_parent_tag, ETHER_ALIGN, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * nseg, nseg, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->vge_ldata.vge_mtag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* * Allocate map for TX descriptor list. */ error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->vge_ldata.vge_tx_list_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for the TX ring */ error = bus_dmamem_alloc(sc->vge_ldata.vge_tx_list_tag, (void **)&sc->vge_ldata.vge_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->vge_ldata.vge_tx_list_map); if (error) return (ENOMEM); /* Load the map for the TX ring. */ error = bus_dmamap_load(sc->vge_ldata.vge_tx_list_tag, sc->vge_ldata.vge_tx_list_map, sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ, vge_dma_map_addr, &sc->vge_ldata.vge_tx_list_addr, BUS_DMA_NOWAIT); /* Create DMA maps for TX buffers */ for (i = 0; i < VGE_TX_DESC_CNT; i++) { error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0, &sc->vge_ldata.vge_tx_dmamap[i]); if (error) { device_printf(dev, "can't create DMA map for TX\n"); return (ENOMEM); } } /* * Allocate map for RX descriptor list. */ error = bus_dma_tag_create(sc->vge_parent_tag, VGE_RING_ALIGN, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, VGE_TX_LIST_SZ, 1, VGE_TX_LIST_SZ, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->vge_ldata.vge_rx_list_tag); if (error) { device_printf(dev, "could not allocate dma tag\n"); return (ENOMEM); } /* Allocate DMA'able memory for the RX ring */ error = bus_dmamem_alloc(sc->vge_ldata.vge_rx_list_tag, (void **)&sc->vge_ldata.vge_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->vge_ldata.vge_rx_list_map); if (error) return (ENOMEM); /* Load the map for the RX ring. */ error = bus_dmamap_load(sc->vge_ldata.vge_rx_list_tag, sc->vge_ldata.vge_rx_list_map, sc->vge_ldata.vge_rx_list, VGE_TX_LIST_SZ, vge_dma_map_addr, &sc->vge_ldata.vge_rx_list_addr, BUS_DMA_NOWAIT); /* Create DMA maps for RX buffers */ for (i = 0; i < VGE_RX_DESC_CNT; i++) { error = bus_dmamap_create(sc->vge_ldata.vge_mtag, 0, &sc->vge_ldata.vge_rx_dmamap[i]); if (error) { device_printf(dev, "can't create DMA map for RX\n"); return (ENOMEM); } } return (0); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int vge_attach(dev) device_t dev; { u_char eaddr[ETHER_ADDR_LEN]; struct vge_softc *sc; struct ifnet *ifp; int unit, error = 0, rid; sc = device_get_softc(dev); unit = device_get_unit(dev); sc->vge_dev = dev; mtx_init(&sc->vge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = VGE_PCI_LOMEM; sc->vge_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); if (sc->vge_res == NULL) { printf ("vge%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->vge_btag = rman_get_bustag(sc->vge_res); sc->vge_bhandle = rman_get_bushandle(sc->vge_res); /* Allocate interrupt */ rid = 0; sc->vge_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (sc->vge_irq == NULL) { printf("vge%d: couldn't map interrupt\n", unit); error = ENXIO; goto fail; } /* Reset the adapter. */ vge_reset(sc); /* * Get station address from the EEPROM. */ vge_read_eeprom(sc, (caddr_t)eaddr, VGE_EE_EADDR, 3, 0); sc->vge_unit = unit; #if __FreeBSD_version < 502113 printf("vge%d: Ethernet address: %6D\n", unit, eaddr, ":"); #endif /* * Allocate the parent bus DMA tag appropriate for PCI. */ #define VGE_NSEG_NEW 32 error = bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MAXBSIZE, VGE_NSEG_NEW, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->vge_parent_tag); if (error) goto fail; error = vge_allocmem(dev, sc); if (error) goto fail; ifp = sc->vge_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { printf("vge%d: can not if_alloc()\n", sc->vge_unit); error = ENOSPC; goto fail; } /* Do MII setup */ if (mii_phy_probe(dev, &sc->vge_miibus, vge_ifmedia_upd, vge_ifmedia_sts)) { printf("vge%d: MII without any phy!\n", sc->vge_unit); error = ENXIO; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = vge_ioctl; ifp->if_capabilities = IFCAP_VLAN_MTU; ifp->if_start = vge_start; ifp->if_hwassist = VGE_CSUM_FEATURES; ifp->if_capabilities |= IFCAP_HWCSUM|IFCAP_VLAN_HWTAGGING; #ifdef DEVICE_POLLING #ifdef IFCAP_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif #endif ifp->if_watchdog = vge_watchdog; ifp->if_init = vge_init; ifp->if_baudrate = 1000000000; ifp->if_snd.ifq_maxlen = VGE_IFQ_MAXLEN; ifp->if_capenable = ifp->if_capabilities; TASK_INIT(&sc->vge_txtask, 0, vge_tx_task, ifp); /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->vge_irq, INTR_TYPE_NET|INTR_MPSAFE, vge_intr, sc, &sc->vge_intrhand); if (error) { printf("vge%d: couldn't set up irq\n", unit); ether_ifdetach(ifp); goto fail; } fail: if (error) vge_detach(dev); return (error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int vge_detach(dev) device_t dev; { struct vge_softc *sc; struct ifnet *ifp; int i; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->vge_mtx), ("vge mutex not initialized")); ifp = sc->vge_ifp; /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { vge_stop(sc); /* * Force off the IFF_UP flag here, in case someone * still had a BPF descriptor attached to this * interface. If they do, ether_ifattach() will cause * the BPF code to try and clear the promisc mode * flag, which will bubble down to vge_ioctl(), * which will try to call vge_init() again. This will * turn the NIC back on and restart the MII ticker, * which will panic the system when the kernel tries * to invoke the vge_tick() function that isn't there * anymore. */ ifp->if_flags &= ~IFF_UP; ether_ifdetach(ifp); if_free(ifp); } if (sc->vge_miibus) device_delete_child(dev, sc->vge_miibus); bus_generic_detach(dev); if (sc->vge_intrhand) bus_teardown_intr(dev, sc->vge_irq, sc->vge_intrhand); if (sc->vge_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vge_irq); if (sc->vge_res) bus_release_resource(dev, SYS_RES_MEMORY, VGE_PCI_LOMEM, sc->vge_res); /* Unload and free the RX DMA ring memory and map */ if (sc->vge_ldata.vge_rx_list_tag) { bus_dmamap_unload(sc->vge_ldata.vge_rx_list_tag, sc->vge_ldata.vge_rx_list_map); bus_dmamem_free(sc->vge_ldata.vge_rx_list_tag, sc->vge_ldata.vge_rx_list, sc->vge_ldata.vge_rx_list_map); bus_dma_tag_destroy(sc->vge_ldata.vge_rx_list_tag); } /* Unload and free the TX DMA ring memory and map */ if (sc->vge_ldata.vge_tx_list_tag) { bus_dmamap_unload(sc->vge_ldata.vge_tx_list_tag, sc->vge_ldata.vge_tx_list_map); bus_dmamem_free(sc->vge_ldata.vge_tx_list_tag, sc->vge_ldata.vge_tx_list, sc->vge_ldata.vge_tx_list_map); bus_dma_tag_destroy(sc->vge_ldata.vge_tx_list_tag); } /* Destroy all the RX and TX buffer maps */ if (sc->vge_ldata.vge_mtag) { for (i = 0; i < VGE_TX_DESC_CNT; i++) bus_dmamap_destroy(sc->vge_ldata.vge_mtag, sc->vge_ldata.vge_tx_dmamap[i]); for (i = 0; i < VGE_RX_DESC_CNT; i++) bus_dmamap_destroy(sc->vge_ldata.vge_mtag, sc->vge_ldata.vge_rx_dmamap[i]); bus_dma_tag_destroy(sc->vge_ldata.vge_mtag); } if (sc->vge_parent_tag) bus_dma_tag_destroy(sc->vge_parent_tag); mtx_destroy(&sc->vge_mtx); return (0); } static int vge_newbuf(sc, idx, m) struct vge_softc *sc; int idx; struct mbuf *m; { struct vge_dmaload_arg arg; struct mbuf *n = NULL; int i, error; if (m == NULL) { n = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (n == NULL) return (ENOBUFS); m = n; } else m->m_data = m->m_ext.ext_buf; #ifdef VGE_FIXUP_RX /* * This is part of an evil trick to deal with non-x86 platforms. * The VIA chip requires RX buffers to be aligned on 32-bit * boundaries, but that will hose non-x86 machines. To get around * this, we leave some empty space at the start of each buffer * and for non-x86 hosts, we copy the buffer back two bytes * to achieve word alignment. This is slightly more efficient * than allocating a new buffer, copying the contents, and * discarding the old buffer. */ m->m_len = m->m_pkthdr.len = MCLBYTES - VGE_ETHER_ALIGN; m_adj(m, VGE_ETHER_ALIGN); #else m->m_len = m->m_pkthdr.len = MCLBYTES; #endif arg.sc = sc; arg.vge_idx = idx; arg.vge_maxsegs = 1; arg.vge_flags = 0; error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, sc->vge_ldata.vge_rx_dmamap[idx], m, vge_dma_map_rx_desc, &arg, BUS_DMA_NOWAIT); if (error || arg.vge_maxsegs != 1) { if (n != NULL) m_freem(n); return (ENOMEM); } /* * Note: the manual fails to document the fact that for * proper opration, the driver needs to replentish the RX * DMA ring 4 descriptors at a time (rather than one at a * time, like most chips). We can allocate the new buffers * but we should not set the OWN bits until we're ready * to hand back 4 of them in one shot. */ #define VGE_RXCHUNK 4 sc->vge_rx_consumed++; if (sc->vge_rx_consumed == VGE_RXCHUNK) { for (i = idx; i != idx - sc->vge_rx_consumed; i--) sc->vge_ldata.vge_rx_list[i].vge_sts |= htole32(VGE_RDSTS_OWN); sc->vge_rx_consumed = 0; } sc->vge_ldata.vge_rx_mbuf[idx] = m; bus_dmamap_sync(sc->vge_ldata.vge_mtag, sc->vge_ldata.vge_rx_dmamap[idx], BUS_DMASYNC_PREREAD); return (0); } static int vge_tx_list_init(sc) struct vge_softc *sc; { bzero ((char *)sc->vge_ldata.vge_tx_list, VGE_TX_LIST_SZ); bzero ((char *)&sc->vge_ldata.vge_tx_mbuf, (VGE_TX_DESC_CNT * sizeof(struct mbuf *))); bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_PREWRITE); sc->vge_ldata.vge_tx_prodidx = 0; sc->vge_ldata.vge_tx_considx = 0; sc->vge_ldata.vge_tx_free = VGE_TX_DESC_CNT; return (0); } static int vge_rx_list_init(sc) struct vge_softc *sc; { int i; bzero ((char *)sc->vge_ldata.vge_rx_list, VGE_RX_LIST_SZ); bzero ((char *)&sc->vge_ldata.vge_rx_mbuf, (VGE_RX_DESC_CNT * sizeof(struct mbuf *))); sc->vge_rx_consumed = 0; for (i = 0; i < VGE_RX_DESC_CNT; i++) { if (vge_newbuf(sc, i, NULL) == ENOBUFS) return (ENOBUFS); } /* Flush the RX descriptors */ bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, sc->vge_ldata.vge_rx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); sc->vge_ldata.vge_rx_prodidx = 0; sc->vge_rx_consumed = 0; sc->vge_head = sc->vge_tail = NULL; return (0); } #ifdef VGE_FIXUP_RX static __inline void vge_fixup_rx(m) struct mbuf *m; { int i; uint16_t *src, *dst; src = mtod(m, uint16_t *); dst = src - 1; for (i = 0; i < (m->m_len / sizeof(uint16_t) + 1); i++) *dst++ = *src++; m->m_data -= ETHER_ALIGN; return; } #endif /* * RX handler. We support the reception of jumbo frames that have * been fragmented across multiple 2K mbuf cluster buffers. */ static void vge_rxeof(sc) struct vge_softc *sc; { struct mbuf *m; struct ifnet *ifp; int i, total_len; int lim = 0; struct vge_rx_desc *cur_rx; u_int32_t rxstat, rxctl; VGE_LOCK_ASSERT(sc); ifp = sc->vge_ifp; i = sc->vge_ldata.vge_rx_prodidx; /* Invalidate the descriptor memory */ bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, sc->vge_ldata.vge_rx_list_map, BUS_DMASYNC_POSTREAD); while (!VGE_OWN(&sc->vge_ldata.vge_rx_list[i])) { #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif /* DEVICE_POLLING */ cur_rx = &sc->vge_ldata.vge_rx_list[i]; m = sc->vge_ldata.vge_rx_mbuf[i]; total_len = VGE_RXBYTES(cur_rx); rxstat = le32toh(cur_rx->vge_sts); rxctl = le32toh(cur_rx->vge_ctl); /* Invalidate the RX mbuf and unload its map */ bus_dmamap_sync(sc->vge_ldata.vge_mtag, sc->vge_ldata.vge_rx_dmamap[i], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->vge_ldata.vge_mtag, sc->vge_ldata.vge_rx_dmamap[i]); /* * If the 'start of frame' bit is set, this indicates * either the first fragment in a multi-fragment receive, * or an intermediate fragment. Either way, we want to * accumulate the buffers. */ if (rxstat & VGE_RXPKT_SOF) { m->m_len = MCLBYTES - VGE_ETHER_ALIGN; if (sc->vge_head == NULL) sc->vge_head = sc->vge_tail = m; else { m->m_flags &= ~M_PKTHDR; sc->vge_tail->m_next = m; sc->vge_tail = m; } vge_newbuf(sc, i, NULL); VGE_RX_DESC_INC(i); continue; } /* * Bad/error frames will have the RXOK bit cleared. * However, there's one error case we want to allow: * if a VLAN tagged frame arrives and the chip can't * match it against the CAM filter, it considers this * a 'VLAN CAM filter miss' and clears the 'RXOK' bit. * We don't want to drop the frame though: our VLAN * filtering is done in software. */ if (!(rxstat & VGE_RDSTS_RXOK) && !(rxstat & VGE_RDSTS_VIDM) && !(rxstat & VGE_RDSTS_CSUMERR)) { ifp->if_ierrors++; /* * If this is part of a multi-fragment packet, * discard all the pieces. */ if (sc->vge_head != NULL) { m_freem(sc->vge_head); sc->vge_head = sc->vge_tail = NULL; } vge_newbuf(sc, i, m); VGE_RX_DESC_INC(i); continue; } /* * If allocating a replacement mbuf fails, * reload the current one. */ if (vge_newbuf(sc, i, NULL)) { ifp->if_ierrors++; if (sc->vge_head != NULL) { m_freem(sc->vge_head); sc->vge_head = sc->vge_tail = NULL; } vge_newbuf(sc, i, m); VGE_RX_DESC_INC(i); continue; } VGE_RX_DESC_INC(i); if (sc->vge_head != NULL) { m->m_len = total_len % (MCLBYTES - VGE_ETHER_ALIGN); /* * Special case: if there's 4 bytes or less * in this buffer, the mbuf can be discarded: * the last 4 bytes is the CRC, which we don't * care about anyway. */ if (m->m_len <= ETHER_CRC_LEN) { sc->vge_tail->m_len -= (ETHER_CRC_LEN - m->m_len); m_freem(m); } else { m->m_len -= ETHER_CRC_LEN; m->m_flags &= ~M_PKTHDR; sc->vge_tail->m_next = m; } m = sc->vge_head; sc->vge_head = sc->vge_tail = NULL; m->m_pkthdr.len = total_len - ETHER_CRC_LEN; } else m->m_pkthdr.len = m->m_len = (total_len - ETHER_CRC_LEN); #ifdef VGE_FIXUP_RX vge_fixup_rx(m); #endif ifp->if_ipackets++; m->m_pkthdr.rcvif = ifp; /* Do RX checksumming if enabled */ if (ifp->if_capenable & IFCAP_RXCSUM) { /* Check IP header checksum */ if (rxctl & VGE_RDCTL_IPPKT) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (rxctl & VGE_RDCTL_IPCSUMOK) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; /* Check TCP/UDP checksum */ if (rxctl & (VGE_RDCTL_TCPPKT|VGE_RDCTL_UDPPKT) && rxctl & VGE_RDCTL_PROTOCSUMOK) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } if (rxstat & VGE_RDSTS_VTAG) VLAN_INPUT_TAG(ifp, m, ntohs((rxctl & VGE_RDCTL_VLANID)), continue); VGE_UNLOCK(sc); (*ifp->if_input)(ifp, m); VGE_LOCK(sc); lim++; if (lim == VGE_RX_DESC_CNT) break; } /* Flush the RX DMA ring */ bus_dmamap_sync(sc->vge_ldata.vge_rx_list_tag, sc->vge_ldata.vge_rx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); sc->vge_ldata.vge_rx_prodidx = i; CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, lim); return; } static void vge_txeof(sc) struct vge_softc *sc; { struct ifnet *ifp; u_int32_t txstat; int idx; ifp = sc->vge_ifp; idx = sc->vge_ldata.vge_tx_considx; /* Invalidate the TX descriptor list */ bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_POSTREAD); while (idx != sc->vge_ldata.vge_tx_prodidx) { txstat = le32toh(sc->vge_ldata.vge_tx_list[idx].vge_sts); if (txstat & VGE_TDSTS_OWN) break; m_freem(sc->vge_ldata.vge_tx_mbuf[idx]); sc->vge_ldata.vge_tx_mbuf[idx] = NULL; bus_dmamap_unload(sc->vge_ldata.vge_mtag, sc->vge_ldata.vge_tx_dmamap[idx]); if (txstat & (VGE_TDSTS_EXCESSCOLL|VGE_TDSTS_COLL)) ifp->if_collisions++; if (txstat & VGE_TDSTS_TXERR) ifp->if_oerrors++; else ifp->if_opackets++; sc->vge_ldata.vge_tx_free++; VGE_TX_DESC_INC(idx); } /* No changes made to the TX ring, so no flush needed */ if (idx != sc->vge_ldata.vge_tx_considx) { sc->vge_ldata.vge_tx_considx = idx; ifp->if_flags &= ~IFF_OACTIVE; ifp->if_timer = 0; } /* * If not all descriptors have been released reaped yet, * reload the timer so that we will eventually get another * interrupt that will cause us to re-enter this routine. * This is done in case the transmitter has gone idle. */ if (sc->vge_ldata.vge_tx_free != VGE_TX_DESC_CNT) { CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); } return; } static void vge_tick(xsc) void *xsc; { struct vge_softc *sc; struct ifnet *ifp; struct mii_data *mii; sc = xsc; ifp = sc->vge_ifp; VGE_LOCK(sc); mii = device_get_softc(sc->vge_miibus); mii_tick(mii); if (sc->vge_link) { if (!(mii->mii_media_status & IFM_ACTIVE)) { sc->vge_link = 0; if_link_state_change(sc->vge_ifp, LINK_STATE_DOWN); } } else { if (mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->vge_link = 1; if_link_state_change(sc->vge_ifp, LINK_STATE_UP); #if __FreeBSD_version < 502114 if (ifp->if_snd.ifq_head != NULL) #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) #endif taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask); } } VGE_UNLOCK(sc); return; } #ifdef DEVICE_POLLING static void vge_poll (struct ifnet *ifp, enum poll_cmd cmd, int count) { struct vge_softc *sc = ifp->if_softc; VGE_LOCK(sc); #ifdef IFCAP_POLLING if (!(ifp->if_capenable & IFCAP_POLLING)) { ether_poll_deregister(ifp); cmd = POLL_DEREGISTER; } #endif if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */ CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); goto done; } sc->rxcycles = count; vge_rxeof(sc); vge_txeof(sc); #if __FreeBSD_version < 502114 if (ifp->if_snd.ifq_head != NULL) #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) #endif taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask); if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ u_int32_t status; status = CSR_READ_4(sc, VGE_ISR); if (status == 0xFFFFFFFF) goto done; if (status) CSR_WRITE_4(sc, VGE_ISR, status); /* * XXX check behaviour on receiver stalls. */ if (status & VGE_ISR_TXDMA_STALL || status & VGE_ISR_RXDMA_STALL) vge_init(sc); if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { vge_rxeof(sc); ifp->if_ierrors++; CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); } } done: VGE_UNLOCK(sc); } #endif /* DEVICE_POLLING */ static void vge_intr(arg) void *arg; { struct vge_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; if (sc->suspended) { return; } VGE_LOCK(sc); ifp = sc->vge_ifp; if (!(ifp->if_flags & IFF_UP)) { VGE_UNLOCK(sc); return; } #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) goto done; if ( #ifdef IFCAP_POLLING (ifp->if_capenable & IFCAP_POLLING) && #endif ether_poll_register(vge_poll, ifp)) { /* ok, disable interrupts */ CSR_WRITE_4(sc, VGE_IMR, 0); CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); vge_poll(ifp, 0, 1); goto done; } #endif /* DEVICE_POLLING */ /* Disable interrupts */ CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); for (;;) { status = CSR_READ_4(sc, VGE_ISR); /* If the card has gone away the read returns 0xffff. */ if (status == 0xFFFFFFFF) break; if (status) CSR_WRITE_4(sc, VGE_ISR, status); if ((status & VGE_INTRS) == 0) break; if (status & (VGE_ISR_RXOK|VGE_ISR_RXOK_HIPRIO)) vge_rxeof(sc); if (status & (VGE_ISR_RXOFLOW|VGE_ISR_RXNODESC)) { vge_rxeof(sc); CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); } if (status & (VGE_ISR_TXOK0|VGE_ISR_TIMER0)) vge_txeof(sc); if (status & (VGE_ISR_TXDMA_STALL|VGE_ISR_RXDMA_STALL)) vge_init(sc); if (status & VGE_ISR_LINKSTS) vge_tick(sc); } /* Re-enable interrupts */ CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); #ifdef DEVICE_POLLING done: #endif VGE_UNLOCK(sc); #if __FreeBSD_version < 502114 if (ifp->if_snd.ifq_head != NULL) #else if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) #endif taskqueue_enqueue(taskqueue_swi, &sc->vge_txtask); return; } static int vge_encap(sc, m_head, idx) struct vge_softc *sc; struct mbuf *m_head; int idx; { struct mbuf *m_new = NULL; struct vge_dmaload_arg arg; bus_dmamap_t map; int error; struct m_tag *mtag; if (sc->vge_ldata.vge_tx_free <= 2) return (EFBIG); arg.vge_flags = 0; if (m_head->m_pkthdr.csum_flags & CSUM_IP) arg.vge_flags |= VGE_TDCTL_IPCSUM; if (m_head->m_pkthdr.csum_flags & CSUM_TCP) arg.vge_flags |= VGE_TDCTL_TCPCSUM; if (m_head->m_pkthdr.csum_flags & CSUM_UDP) arg.vge_flags |= VGE_TDCTL_UDPCSUM; arg.sc = sc; arg.vge_idx = idx; arg.vge_m0 = m_head; arg.vge_maxsegs = VGE_TX_FRAGS; map = sc->vge_ldata.vge_tx_dmamap[idx]; error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map, m_head, vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT); if (error && error != EFBIG) { printf("vge%d: can't map mbuf (error %d)\n", sc->vge_unit, error); return (ENOBUFS); } /* Too many segments to map, coalesce into a single mbuf */ if (error || arg.vge_maxsegs == 0) { m_new = m_defrag(m_head, M_DONTWAIT); if (m_new == NULL) return (1); else m_head = m_new; arg.sc = sc; arg.vge_m0 = m_head; arg.vge_idx = idx; arg.vge_maxsegs = 1; error = bus_dmamap_load_mbuf(sc->vge_ldata.vge_mtag, map, m_head, vge_dma_map_tx_desc, &arg, BUS_DMA_NOWAIT); if (error) { printf("vge%d: can't map mbuf (error %d)\n", sc->vge_unit, error); return (EFBIG); } } sc->vge_ldata.vge_tx_mbuf[idx] = m_head; sc->vge_ldata.vge_tx_free--; /* * Set up hardware VLAN tagging. */ mtag = VLAN_OUTPUT_TAG(sc->vge_ifp, m_head); if (mtag != NULL) sc->vge_ldata.vge_tx_list[idx].vge_ctl |= htole32(htons(VLAN_TAG_VALUE(mtag)) | VGE_TDCTL_VTAG); sc->vge_ldata.vge_tx_list[idx].vge_sts |= htole32(VGE_TDSTS_OWN); return (0); } static void vge_tx_task(arg, npending) void *arg; int npending; { struct ifnet *ifp; ifp = arg; vge_start(ifp); return; } /* * Main transmit routine. */ static void vge_start(ifp) struct ifnet *ifp; { struct vge_softc *sc; struct mbuf *m_head = NULL; int idx, pidx = 0; sc = ifp->if_softc; VGE_LOCK(sc); if (!sc->vge_link || ifp->if_flags & IFF_OACTIVE) { VGE_UNLOCK(sc); return; } #if __FreeBSD_version < 502114 if (ifp->if_snd.ifq_head == NULL) { #else if (IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { #endif VGE_UNLOCK(sc); return; } idx = sc->vge_ldata.vge_tx_prodidx; pidx = idx - 1; if (pidx < 0) pidx = VGE_TX_DESC_CNT - 1; while (sc->vge_ldata.vge_tx_mbuf[idx] == NULL) { #if __FreeBSD_version < 502114 IF_DEQUEUE(&ifp->if_snd, m_head); #else IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); #endif if (m_head == NULL) break; if (vge_encap(sc, m_head, idx)) { #if __FreeBSD_version >= 502114 IFQ_DRV_PREPEND(&ifp->if_snd, m_head); #else IF_PREPEND(&ifp->if_snd, m_head); #endif ifp->if_flags |= IFF_OACTIVE; break; } sc->vge_ldata.vge_tx_list[pidx].vge_frag[0].vge_buflen |= htole16(VGE_TXDESC_Q); pidx = idx; VGE_TX_DESC_INC(idx); /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); } if (idx == sc->vge_ldata.vge_tx_prodidx) { VGE_UNLOCK(sc); return; } /* Flush the TX descriptors */ bus_dmamap_sync(sc->vge_ldata.vge_tx_list_tag, sc->vge_ldata.vge_tx_list_map, BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD); /* Issue a transmit command. */ CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_WAK0); sc->vge_ldata.vge_tx_prodidx = idx; /* * Use the countdown timer for interrupt moderation. * 'TX done' interrupts are disabled. Instead, we reset the * countdown timer, which will begin counting until it hits * the value in the SSTIMER register, and then trigger an * interrupt. Each time we set the TIMER0_ENABLE bit, the * the timer count is reloaded. Only when the transmitter * is idle will the timer hit 0 and an interrupt fire. */ CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_TIMER0_ENABLE); VGE_UNLOCK(sc); /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; return; } static void vge_init(xsc) void *xsc; { struct vge_softc *sc = xsc; struct ifnet *ifp = sc->vge_ifp; struct mii_data *mii; int i; VGE_LOCK(sc); mii = device_get_softc(sc->vge_miibus); /* * Cancel pending I/O and free all RX/TX buffers. */ vge_stop(sc); vge_reset(sc); /* * Initialize the RX and TX descriptors and mbufs. */ vge_rx_list_init(sc); vge_tx_list_init(sc); /* Set our station address */ for (i = 0; i < ETHER_ADDR_LEN; i++) CSR_WRITE_1(sc, VGE_PAR0 + i, IFP2ENADDR(sc->vge_ifp)[i]); /* * Set receive FIFO threshold. Also allow transmission and * reception of VLAN tagged frames. */ CSR_CLRBIT_1(sc, VGE_RXCFG, VGE_RXCFG_FIFO_THR|VGE_RXCFG_VTAGOPT); CSR_SETBIT_1(sc, VGE_RXCFG, VGE_RXFIFOTHR_128BYTES|VGE_VTAG_OPT2); /* Set DMA burst length */ CSR_CLRBIT_1(sc, VGE_DMACFG0, VGE_DMACFG0_BURSTLEN); CSR_SETBIT_1(sc, VGE_DMACFG0, VGE_DMABURST_128); CSR_SETBIT_1(sc, VGE_TXCFG, VGE_TXCFG_ARB_PRIO|VGE_TXCFG_NONBLK); /* Set collision backoff algorithm */ CSR_CLRBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_CRANDOM| VGE_CHIPCFG1_CAP|VGE_CHIPCFG1_MBA|VGE_CHIPCFG1_BAKOPT); CSR_SETBIT_1(sc, VGE_CHIPCFG1, VGE_CHIPCFG1_OFSET); /* Disable LPSEL field in priority resolution */ CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_LPSEL_DIS); /* * Load the addresses of the DMA queues into the chip. * Note that we only use one transmit queue. */ CSR_WRITE_4(sc, VGE_TXDESC_ADDR_LO0, VGE_ADDR_LO(sc->vge_ldata.vge_tx_list_addr)); CSR_WRITE_2(sc, VGE_TXDESCNUM, VGE_TX_DESC_CNT - 1); CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, VGE_ADDR_LO(sc->vge_ldata.vge_rx_list_addr)); CSR_WRITE_2(sc, VGE_RXDESCNUM, VGE_RX_DESC_CNT - 1); CSR_WRITE_2(sc, VGE_RXDESC_RESIDUECNT, VGE_RX_DESC_CNT); /* Enable and wake up the RX descriptor queue */ CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_RUN); CSR_WRITE_1(sc, VGE_RXQCSRS, VGE_RXQCSR_WAK); /* Enable the TX descriptor queue */ CSR_WRITE_2(sc, VGE_TXQCSRS, VGE_TXQCSR_RUN0); /* Set up the receive filter -- allow large frames for VLANs. */ CSR_WRITE_1(sc, VGE_RXCTL, VGE_RXCTL_RX_UCAST|VGE_RXCTL_RX_GIANT); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); } /* Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_BCAST); } /* Set multicast bit to capture multicast frames. */ if (ifp->if_flags & IFF_MULTICAST) { CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_MCAST); } /* Init the cam filter. */ vge_cam_clear(sc); /* Init the multicast filter. */ vge_setmulti(sc); /* Enable flow control */ CSR_WRITE_1(sc, VGE_CRS2, 0x8B); /* Enable jumbo frame reception (if desired) */ /* Start the MAC. */ CSR_WRITE_1(sc, VGE_CRC0, VGE_CR0_STOP); CSR_WRITE_1(sc, VGE_CRS1, VGE_CR1_NOPOLL); CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_TX_ENABLE|VGE_CR0_RX_ENABLE|VGE_CR0_START); /* * Configure one-shot timer for microsecond * resulution and load it for 500 usecs. */ CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_TIMER0_RES); CSR_WRITE_2(sc, VGE_SSTIMER, 400); /* * Configure interrupt moderation for receive. Enable * the holdoff counter and load it, and set the RX * suppression count to the number of descriptors we * want to allow before triggering an interrupt. * The holdoff timer is in units of 20 usecs. */ #ifdef notyet CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_TXINTSUP_DISABLE); /* Select the interrupt holdoff timer page. */ CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_INTHLDOFF); CSR_WRITE_1(sc, VGE_INTHOLDOFF, 10); /* ~200 usecs */ /* Enable use of the holdoff timer. */ CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_HOLDOFF); CSR_WRITE_1(sc, VGE_INTCTL1, VGE_INTCTL_SC_RELOAD); /* Select the RX suppression threshold page. */ CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_RXSUPPTHR); CSR_WRITE_1(sc, VGE_RXSUPPTHR, 64); /* interrupt after 64 packets */ /* Restore the page select bits. */ CSR_CLRBIT_1(sc, VGE_CAMCTL, VGE_CAMCTL_PAGESEL); CSR_SETBIT_1(sc, VGE_CAMCTL, VGE_PAGESEL_MAR); #endif #ifdef DEVICE_POLLING /* * Disable interrupts if we are polling. */ if (ifp->if_flags & IFF_POLLING) { CSR_WRITE_4(sc, VGE_IMR, 0); CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); } else /* otherwise ... */ #endif /* DEVICE_POLLING */ { /* * Enable interrupts. */ CSR_WRITE_4(sc, VGE_IMR, VGE_INTRS); CSR_WRITE_4(sc, VGE_ISR, 0); CSR_WRITE_1(sc, VGE_CRS3, VGE_CR3_INT_GMSK); } mii_mediachg(mii); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->vge_if_flags = 0; sc->vge_link = 0; VGE_UNLOCK(sc); return; } /* * Set media options. */ static int vge_ifmedia_upd(ifp) struct ifnet *ifp; { struct vge_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->vge_miibus); mii_mediachg(mii); return (0); } /* * Report current media status. */ static void vge_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct vge_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->vge_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static void vge_miibus_statchg(dev) device_t dev; { struct vge_softc *sc; struct mii_data *mii; struct ifmedia_entry *ife; sc = device_get_softc(dev); mii = device_get_softc(sc->vge_miibus); ife = mii->mii_media.ifm_cur; /* * If the user manually selects a media mode, we need to turn * on the forced MAC mode bit in the DIAGCTL register. If the * user happens to choose a full duplex mode, we also need to * set the 'force full duplex' bit. This applies only to * 10Mbps and 100Mbps speeds. In autoselect mode, forced MAC * mode is disabled, and in 1000baseT mode, full duplex is * always implied, so we turn on the forced mode bit but leave * the FDX bit cleared. */ switch (IFM_SUBTYPE(ife->ifm_media)) { case IFM_AUTO: CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); break; case IFM_1000_T: CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); break; case IFM_100_TX: case IFM_10_T: CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_MACFORCE); if ((ife->ifm_media & IFM_GMASK) == IFM_FDX) { CSR_SETBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); } else { CSR_CLRBIT_1(sc, VGE_DIAGCTL, VGE_DIAGCTL_FDXFORCE); } break; default: device_printf(dev, "unknown media type: %x\n", IFM_SUBTYPE(ife->ifm_media)); break; } return; } static int vge_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct vge_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; switch (command) { case SIOCSIFMTU: if (ifr->ifr_mtu > VGE_JUMBO_MTU) error = EINVAL; ifp->if_mtu = ifr->ifr_mtu; break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->vge_if_flags & IFF_PROMISC)) { CSR_SETBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); vge_setmulti(sc); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->vge_if_flags & IFF_PROMISC) { CSR_CLRBIT_1(sc, VGE_RXCTL, VGE_RXCTL_RX_PROMISC); vge_setmulti(sc); } else vge_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) vge_stop(sc); } sc->vge_if_flags = ifp->if_flags; break; case SIOCADDMULTI: case SIOCDELMULTI: vge_setmulti(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->vge_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: #ifdef IFCAP_POLLING ifp->if_capenable &= ~(IFCAP_HWCSUM | IFCAP_POLLING); #else ifp->if_capenable &= ~(IFCAP_HWCSUM); #endif ifp->if_capenable |= #ifdef IFCAP_POLLING ifr->ifr_reqcap & (IFCAP_HWCSUM | IFCAP_POLLING); #else ifr->ifr_reqcap & (IFCAP_HWCSUM); #endif if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist = VGE_CSUM_FEATURES; else ifp->if_hwassist = 0; if (ifp->if_flags & IFF_RUNNING) vge_init(sc); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void vge_watchdog(ifp) struct ifnet *ifp; { struct vge_softc *sc; sc = ifp->if_softc; VGE_LOCK(sc); printf("vge%d: watchdog timeout\n", sc->vge_unit); ifp->if_oerrors++; vge_txeof(sc); vge_rxeof(sc); vge_init(sc); VGE_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void vge_stop(sc) struct vge_softc *sc; { register int i; struct ifnet *ifp; VGE_LOCK(sc); ifp = sc->vge_ifp; ifp->if_timer = 0; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); #ifdef DEVICE_POLLING ether_poll_deregister(ifp); #endif /* DEVICE_POLLING */ CSR_WRITE_1(sc, VGE_CRC3, VGE_CR3_INT_GMSK); CSR_WRITE_1(sc, VGE_CRS0, VGE_CR0_STOP); CSR_WRITE_4(sc, VGE_ISR, 0xFFFFFFFF); CSR_WRITE_2(sc, VGE_TXQCSRC, 0xFFFF); CSR_WRITE_1(sc, VGE_RXQCSRC, 0xFF); CSR_WRITE_4(sc, VGE_RXDESC_ADDR_LO, 0); if (sc->vge_head != NULL) { m_freem(sc->vge_head); sc->vge_head = sc->vge_tail = NULL; } /* Free the TX list buffers. */ for (i = 0; i < VGE_TX_DESC_CNT; i++) { if (sc->vge_ldata.vge_tx_mbuf[i] != NULL) { bus_dmamap_unload(sc->vge_ldata.vge_mtag, sc->vge_ldata.vge_tx_dmamap[i]); m_freem(sc->vge_ldata.vge_tx_mbuf[i]); sc->vge_ldata.vge_tx_mbuf[i] = NULL; } } /* Free the RX list buffers. */ for (i = 0; i < VGE_RX_DESC_CNT; i++) { if (sc->vge_ldata.vge_rx_mbuf[i] != NULL) { bus_dmamap_unload(sc->vge_ldata.vge_mtag, sc->vge_ldata.vge_rx_dmamap[i]); m_freem(sc->vge_ldata.vge_rx_mbuf[i]); sc->vge_ldata.vge_rx_mbuf[i] = NULL; } } VGE_UNLOCK(sc); return; } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int vge_suspend(dev) device_t dev; { struct vge_softc *sc; sc = device_get_softc(dev); vge_stop(sc); sc->suspended = 1; return (0); } /* * Device resume routine. Restore some PCI settings in case the BIOS * doesn't, re-enable busmastering, and restart the interface if * appropriate. */ static int vge_resume(dev) device_t dev; { struct vge_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->vge_ifp; /* reenable busmastering */ pci_enable_busmaster(dev); pci_enable_io(dev, SYS_RES_MEMORY); /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) vge_init(sc); sc->suspended = 0; return (0); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void vge_shutdown(dev) device_t dev; { struct vge_softc *sc; sc = device_get_softc(dev); vge_stop(sc); } Index: stable/6/sys/dev/wi/if_wi.c =================================================================== --- stable/6/sys/dev/wi/if_wi.c (revision 149421) +++ stable/6/sys/dev/wi/if_wi.c (revision 149422) @@ -1,3232 +1,3234 @@ /* $NetBSD: wi.c,v 1.109 2003/01/09 08:52:19 dyoung Exp $ */ /*- * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /* * Lucent WaveLAN/IEEE 802.11 PCMCIA driver. * * Original FreeBSD driver written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The WaveLAN/IEEE adapter is the second generation of the WaveLAN * from Lucent. Unlike the older cards, the new ones are programmed * entirely via a firmware-driven controller called the Hermes. * Unfortunately, Lucent will not release the Hermes programming manual * without an NDA (if at all). What they do release is an API library * called the HCF (Hardware Control Functions) which is supposed to * do the device-specific operations of a device driver for you. The * publically available version of the HCF library (the 'HCF Light') is * a) extremely gross, b) lacks certain features, particularly support * for 802.11 frames, and c) is contaminated by the GNU Public License. * * This driver does not use the HCF or HCF Light at all. Instead, it * programs the Hermes controller directly, using information gleaned * from the HCF Light code and corresponding documentation. * * This driver supports the ISA, PCMCIA and PCI versions of the Lucent * WaveLan cards (based on the Hermes chipset), as well as the newer * Prism 2 chipsets with firmware from Intersil and Symbol. */ #include __FBSDID("$FreeBSD$"); #define WI_HERMES_AUTOINC_WAR /* Work around data write autoinc bug. */ #define WI_HERMES_STATS_WAR /* Work around stats counter bug. */ #define NBPFILTER 1 #include #include #if __FreeBSD_version >= 500033 #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void wi_start(struct ifnet *); static int wi_reset(struct wi_softc *); static void wi_watchdog(struct ifnet *); static int wi_ioctl(struct ifnet *, u_long, caddr_t); static int wi_media_change(struct ifnet *); static void wi_media_status(struct ifnet *, struct ifmediareq *); static void wi_rx_intr(struct wi_softc *); static void wi_tx_intr(struct wi_softc *); static void wi_tx_ex_intr(struct wi_softc *); static void wi_info_intr(struct wi_softc *); static int wi_get_cfg(struct ifnet *, u_long, caddr_t); static int wi_set_cfg(struct ifnet *, u_long, caddr_t); static int wi_write_txrate(struct wi_softc *); static int wi_write_wep(struct wi_softc *); static int wi_write_multi(struct wi_softc *); static int wi_alloc_fid(struct wi_softc *, int, int *); static void wi_read_nicid(struct wi_softc *); static int wi_write_ssid(struct wi_softc *, int, u_int8_t *, int); static int wi_cmd(struct wi_softc *, int, int, int, int); static int wi_seek_bap(struct wi_softc *, int, int); static int wi_read_bap(struct wi_softc *, int, int, void *, int); static int wi_write_bap(struct wi_softc *, int, int, void *, int); static int wi_mwrite_bap(struct wi_softc *, int, int, struct mbuf *, int); static int wi_read_rid(struct wi_softc *, int, void *, int *); static int wi_write_rid(struct wi_softc *, int, void *, int); static int wi_newstate(struct ieee80211com *, enum ieee80211_state, int); static int wi_scan_ap(struct wi_softc *, u_int16_t, u_int16_t); static void wi_scan_result(struct wi_softc *, int, int); static void wi_dump_pkt(struct wi_frame *, struct ieee80211_node *, int rssi); static int wi_get_debug(struct wi_softc *, struct wi_req *); static int wi_set_debug(struct wi_softc *, struct wi_req *); #if __FreeBSD_version >= 500000 /* support to download firmware for symbol CF card */ static int wi_symbol_write_firm(struct wi_softc *, const void *, int, const void *, int); static int wi_symbol_set_hcr(struct wi_softc *, int); #endif static __inline int wi_write_val(struct wi_softc *sc, int rid, u_int16_t val) { val = htole16(val); return wi_write_rid(sc, rid, &val, sizeof(val)); } SYSCTL_NODE(_hw, OID_AUTO, wi, CTLFLAG_RD, 0, "Wireless driver parameters"); static struct timeval lasttxerror; /* time of last tx error msg */ static int curtxeps; /* current tx error msgs/sec */ static int wi_txerate = 0; /* tx error rate: max msgs/sec */ SYSCTL_INT(_hw_wi, OID_AUTO, txerate, CTLFLAG_RW, &wi_txerate, 0, "max tx error msgs/sec; 0 to disable msgs"); #define WI_DEBUG #ifdef WI_DEBUG static int wi_debug = 0; SYSCTL_INT(_hw_wi, OID_AUTO, debug, CTLFLAG_RW, &wi_debug, 0, "control debugging printfs"); #define DPRINTF(X) if (wi_debug) printf X #define DPRINTF2(X) if (wi_debug > 1) printf X #define IFF_DUMPPKTS(_ifp) \ (((_ifp)->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2)) #else #define DPRINTF(X) #define DPRINTF2(X) #define IFF_DUMPPKTS(_ifp) 0 #endif #define WI_INTRS (WI_EV_RX | WI_EV_ALLOC | WI_EV_INFO) struct wi_card_ident wi_card_ident[] = { /* CARD_ID CARD_NAME FIRM_TYPE */ { WI_NIC_LUCENT_ID, WI_NIC_LUCENT_STR, WI_LUCENT }, { WI_NIC_SONY_ID, WI_NIC_SONY_STR, WI_LUCENT }, { WI_NIC_LUCENT_EMB_ID, WI_NIC_LUCENT_EMB_STR, WI_LUCENT }, { WI_NIC_EVB2_ID, WI_NIC_EVB2_STR, WI_INTERSIL }, { WI_NIC_HWB3763_ID, WI_NIC_HWB3763_STR, WI_INTERSIL }, { WI_NIC_HWB3163_ID, WI_NIC_HWB3163_STR, WI_INTERSIL }, { WI_NIC_HWB3163B_ID, WI_NIC_HWB3163B_STR, WI_INTERSIL }, { WI_NIC_EVB3_ID, WI_NIC_EVB3_STR, WI_INTERSIL }, { WI_NIC_HWB1153_ID, WI_NIC_HWB1153_STR, WI_INTERSIL }, { WI_NIC_P2_SST_ID, WI_NIC_P2_SST_STR, WI_INTERSIL }, { WI_NIC_EVB2_SST_ID, WI_NIC_EVB2_SST_STR, WI_INTERSIL }, { WI_NIC_3842_EVA_ID, WI_NIC_3842_EVA_STR, WI_INTERSIL }, { WI_NIC_3842_PCMCIA_AMD_ID, WI_NIC_3842_PCMCIA_STR, WI_INTERSIL }, { WI_NIC_3842_PCMCIA_SST_ID, WI_NIC_3842_PCMCIA_STR, WI_INTERSIL }, { WI_NIC_3842_PCMCIA_ATL_ID, WI_NIC_3842_PCMCIA_STR, WI_INTERSIL }, { WI_NIC_3842_PCMCIA_ATS_ID, WI_NIC_3842_PCMCIA_STR, WI_INTERSIL }, { WI_NIC_3842_MINI_AMD_ID, WI_NIC_3842_MINI_STR, WI_INTERSIL }, { WI_NIC_3842_MINI_SST_ID, WI_NIC_3842_MINI_STR, WI_INTERSIL }, { WI_NIC_3842_MINI_ATL_ID, WI_NIC_3842_MINI_STR, WI_INTERSIL }, { WI_NIC_3842_MINI_ATS_ID, WI_NIC_3842_MINI_STR, WI_INTERSIL }, { WI_NIC_3842_PCI_AMD_ID, WI_NIC_3842_PCI_STR, WI_INTERSIL }, { WI_NIC_3842_PCI_SST_ID, WI_NIC_3842_PCI_STR, WI_INTERSIL }, { WI_NIC_3842_PCI_ATS_ID, WI_NIC_3842_PCI_STR, WI_INTERSIL }, { WI_NIC_3842_PCI_ATL_ID, WI_NIC_3842_PCI_STR, WI_INTERSIL }, { WI_NIC_P3_PCMCIA_AMD_ID, WI_NIC_P3_PCMCIA_STR, WI_INTERSIL }, { WI_NIC_P3_PCMCIA_SST_ID, WI_NIC_P3_PCMCIA_STR, WI_INTERSIL }, { WI_NIC_P3_PCMCIA_ATL_ID, WI_NIC_P3_PCMCIA_STR, WI_INTERSIL }, { WI_NIC_P3_PCMCIA_ATS_ID, WI_NIC_P3_PCMCIA_STR, WI_INTERSIL }, { WI_NIC_P3_MINI_AMD_ID, WI_NIC_P3_MINI_STR, WI_INTERSIL }, { WI_NIC_P3_MINI_SST_ID, WI_NIC_P3_MINI_STR, WI_INTERSIL }, { WI_NIC_P3_MINI_ATL_ID, WI_NIC_P3_MINI_STR, WI_INTERSIL }, { WI_NIC_P3_MINI_ATS_ID, WI_NIC_P3_MINI_STR, WI_INTERSIL }, { 0, NULL, 0 }, }; devclass_t wi_devclass; int wi_attach(device_t dev) { struct wi_softc *sc = device_get_softc(dev); struct ieee80211com *ic = &sc->sc_ic; struct ifnet *ifp; int i, nrates, buflen; u_int16_t val; u_int8_t ratebuf[2 + IEEE80211_RATE_SIZE]; struct ieee80211_rateset *rs; static const u_int8_t empty_macaddr[IEEE80211_ADDR_LEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; int error; ifp = sc->sc_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc\n"); wi_free(dev); return (ENOSPC); } /* * NB: no locking is needed here; don't put it here * unless you can prove it! */ error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, wi_intr, sc, &sc->wi_intrhand); if (error) { device_printf(dev, "bus_setup_intr() failed! (%d)\n", error); wi_free(dev); return (error); } #if __FreeBSD_version >= 500000 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); #endif sc->sc_firmware_type = WI_NOTYPE; sc->wi_cmd_count = 500; /* Reset the NIC. */ if (wi_reset(sc) != 0) return ENXIO; /* XXX */ /* * Read the station address. * And do it twice. I've seen PRISM-based cards that return * an error when trying to read it the first time, which causes * the probe to fail. */ buflen = IEEE80211_ADDR_LEN; error = wi_read_rid(sc, WI_RID_MAC_NODE, ic->ic_myaddr, &buflen); if (error != 0) { buflen = IEEE80211_ADDR_LEN; error = wi_read_rid(sc, WI_RID_MAC_NODE, ic->ic_myaddr, &buflen); } if (error || IEEE80211_ADDR_EQ(ic->ic_myaddr, empty_macaddr)) { if (error != 0) device_printf(dev, "mac read failed %d\n", error); else device_printf(dev, "mac read failed (all zeros)\n"); wi_free(dev); return (error); } /* Read NIC identification */ wi_read_nicid(sc); ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = wi_ioctl; ifp->if_start = wi_start; ifp->if_watchdog = wi_watchdog; ifp->if_init = wi_init; IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); ic->ic_ifp = ifp; ic->ic_phytype = IEEE80211_T_DS; ic->ic_opmode = IEEE80211_M_STA; ic->ic_state = IEEE80211_S_INIT; ic->ic_caps = IEEE80211_C_PMGT | IEEE80211_C_WEP /* everyone supports WEP */ ; ic->ic_max_aid = WI_MAX_AID; /* * Query the card for available channels and setup the * channel table. We assume these are all 11b channels. */ buflen = sizeof(val); if (wi_read_rid(sc, WI_RID_CHANNEL_LIST, &val, &buflen) != 0) val = htole16(0x1fff); /* assume 1-11 */ KASSERT(val != 0, ("wi_attach: no available channels listed!")); val <<= 1; /* shift for base 1 indices */ for (i = 1; i < 16; i++) { if (!isset((u_int8_t*)&val, i)) continue; ic->ic_channels[i].ic_freq = ieee80211_ieee2mhz(i, IEEE80211_CHAN_B); ic->ic_channels[i].ic_flags = IEEE80211_CHAN_B; } /* * Read the default channel from the NIC. This may vary * depending on the country where the NIC was purchased, so * we can't hard-code a default and expect it to work for * everyone. * * If no channel is specified, let the 802.11 code select. */ buflen = sizeof(val); if (wi_read_rid(sc, WI_RID_OWN_CHNL, &val, &buflen) == 0) { val = le16toh(val); KASSERT(val < IEEE80211_CHAN_MAX && ic->ic_channels[val].ic_flags != 0, ("wi_attach: invalid own channel %u!", val)); ic->ic_ibss_chan = &ic->ic_channels[val]; } else { device_printf(dev, "WI_RID_OWN_CHNL failed, using first channel!\n"); ic->ic_ibss_chan = &ic->ic_channels[0]; } /* * Set flags based on firmware version. */ switch (sc->sc_firmware_type) { case WI_LUCENT: sc->sc_ntxbuf = 1; sc->sc_flags |= WI_FLAGS_HAS_SYSSCALE; #ifdef WI_HERMES_AUTOINC_WAR /* XXX: not confirmed, but never seen for recent firmware */ if (sc->sc_sta_firmware_ver < 40000) { sc->sc_flags |= WI_FLAGS_BUG_AUTOINC; } #endif if (sc->sc_sta_firmware_ver >= 60000) sc->sc_flags |= WI_FLAGS_HAS_MOR; if (sc->sc_sta_firmware_ver >= 60006) { ic->ic_caps |= IEEE80211_C_IBSS; ic->ic_caps |= IEEE80211_C_MONITOR; } sc->sc_ibss_port = htole16(1); sc->sc_min_rssi = WI_LUCENT_MIN_RSSI; sc->sc_max_rssi = WI_LUCENT_MAX_RSSI; sc->sc_dbm_offset = WI_LUCENT_DBM_OFFSET; break; case WI_INTERSIL: sc->sc_ntxbuf = WI_NTXBUF; sc->sc_flags |= WI_FLAGS_HAS_FRAGTHR; sc->sc_flags |= WI_FLAGS_HAS_ROAMING; sc->sc_flags |= WI_FLAGS_HAS_SYSSCALE; /* * Old firmware are slow, so give peace a chance. */ if (sc->sc_sta_firmware_ver < 10000) sc->wi_cmd_count = 5000; if (sc->sc_sta_firmware_ver > 10101) sc->sc_flags |= WI_FLAGS_HAS_DBMADJUST; if (sc->sc_sta_firmware_ver >= 800) { ic->ic_caps |= IEEE80211_C_IBSS; ic->ic_caps |= IEEE80211_C_MONITOR; } /* * version 0.8.3 and newer are the only ones that are known * to currently work. Earlier versions can be made to work, * at least according to the Linux driver. */ if (sc->sc_sta_firmware_ver >= 803) ic->ic_caps |= IEEE80211_C_HOSTAP; sc->sc_ibss_port = htole16(0); sc->sc_min_rssi = WI_PRISM_MIN_RSSI; sc->sc_max_rssi = WI_PRISM_MAX_RSSI; sc->sc_dbm_offset = WI_PRISM_DBM_OFFSET; break; case WI_SYMBOL: sc->sc_ntxbuf = 1; sc->sc_flags |= WI_FLAGS_HAS_DIVERSITY; if (sc->sc_sta_firmware_ver >= 25000) ic->ic_caps |= IEEE80211_C_IBSS; sc->sc_ibss_port = htole16(4); sc->sc_min_rssi = WI_PRISM_MIN_RSSI; sc->sc_max_rssi = WI_PRISM_MAX_RSSI; sc->sc_dbm_offset = WI_PRISM_DBM_OFFSET; break; } /* * Find out if we support WEP on this card. */ buflen = sizeof(val); if (wi_read_rid(sc, WI_RID_WEP_AVAIL, &val, &buflen) == 0 && val != htole16(0)) ic->ic_caps |= IEEE80211_C_WEP; /* Find supported rates. */ buflen = sizeof(ratebuf); rs = &ic->ic_sup_rates[IEEE80211_MODE_11B]; if (wi_read_rid(sc, WI_RID_DATA_RATES, ratebuf, &buflen) == 0) { nrates = le16toh(*(u_int16_t *)ratebuf); if (nrates > IEEE80211_RATE_MAXSIZE) nrates = IEEE80211_RATE_MAXSIZE; rs->rs_nrates = 0; for (i = 0; i < nrates; i++) if (ratebuf[2+i]) rs->rs_rates[rs->rs_nrates++] = ratebuf[2+i]; } else { /* XXX fallback on error? */ rs->rs_nrates = 0; } buflen = sizeof(val); if ((sc->sc_flags & WI_FLAGS_HAS_DBMADJUST) && wi_read_rid(sc, WI_RID_DBM_ADJUST, &val, &buflen) == 0) { sc->sc_dbm_offset = le16toh(val); } sc->sc_max_datalen = 2304; sc->sc_system_scale = 1; sc->sc_cnfauthmode = IEEE80211_AUTH_OPEN; sc->sc_roaming_mode = 1; sc->sc_portnum = WI_DEFAULT_PORT; sc->sc_authtype = WI_DEFAULT_AUTHTYPE; bzero(sc->sc_nodename, sizeof(sc->sc_nodename)); sc->sc_nodelen = sizeof(WI_DEFAULT_NODENAME) - 1; bcopy(WI_DEFAULT_NODENAME, sc->sc_nodename, sc->sc_nodelen); bzero(sc->sc_net_name, sizeof(sc->sc_net_name)); bcopy(WI_DEFAULT_NETNAME, sc->sc_net_name, sizeof(WI_DEFAULT_NETNAME) - 1); /* * Call MI attach routine. */ ieee80211_ifattach(ic); /* override state transition method */ sc->sc_newstate = ic->ic_newstate; ic->ic_newstate = wi_newstate; ieee80211_media_init(ic, wi_media_change, wi_media_status); #if NBPFILTER > 0 bpfattach2(ifp, DLT_IEEE802_11_RADIO, sizeof(struct ieee80211_frame) + sizeof(sc->sc_tx_th), &sc->sc_drvbpf); /* * Initialize constant fields. * XXX make header lengths a multiple of 32-bits so subsequent * headers are properly aligned; this is a kludge to keep * certain applications happy. * * NB: the channel is setup each time we transition to the * RUN state to avoid filling it in for each frame. */ sc->sc_tx_th_len = roundup(sizeof(sc->sc_tx_th), sizeof(u_int32_t)); sc->sc_tx_th.wt_ihdr.it_len = htole16(sc->sc_tx_th_len); sc->sc_tx_th.wt_ihdr.it_present = htole32(WI_TX_RADIOTAP_PRESENT); sc->sc_rx_th_len = roundup(sizeof(sc->sc_rx_th), sizeof(u_int32_t)); sc->sc_rx_th.wr_ihdr.it_len = htole16(sc->sc_rx_th_len); sc->sc_rx_th.wr_ihdr.it_present = htole32(WI_RX_RADIOTAP_PRESENT); #endif if (bootverbose) ieee80211_announce(ic); return (0); } int wi_detach(device_t dev) { struct wi_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->sc_ifp; WI_LOCK_DECL(); WI_LOCK(sc); /* check if device was removed */ sc->wi_gone |= !bus_child_present(dev); wi_stop(ifp, 0); #if NBPFILTER > 0 bpfdetach(ifp); #endif ieee80211_ifdetach(&sc->sc_ic); if_free(sc->sc_ifp); WI_UNLOCK(sc); bus_teardown_intr(dev, sc->irq, sc->wi_intrhand); wi_free(dev); #if __FreeBSD_version >= 500000 mtx_destroy(&sc->sc_mtx); #endif return (0); } #ifdef __NetBSD__ int wi_activate(struct device *self, enum devact act) { struct wi_softc *sc = (struct wi_softc *)self; int rv = 0, s; s = splnet(); switch (act) { case DVACT_ACTIVATE: rv = EOPNOTSUPP; break; case DVACT_DEACTIVATE: if_deactivate(sc->sc_ifp); break; } splx(s); return rv; } void wi_power(struct wi_softc *sc, int why) { struct ifnet *ifp = sc->sc_ifp; int s; s = splnet(); switch (why) { case PWR_SUSPEND: case PWR_STANDBY: wi_stop(ifp, 1); break; case PWR_RESUME: if (ifp->if_flags & IFF_UP) { wi_init(ifp); (void)wi_intr(sc); } break; case PWR_SOFTSUSPEND: case PWR_SOFTSTANDBY: case PWR_SOFTRESUME: break; } splx(s); } #endif /* __NetBSD__ */ void wi_shutdown(device_t dev) { struct wi_softc *sc = device_get_softc(dev); wi_stop(sc->sc_ifp, 1); } void wi_intr(void *arg) { struct wi_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; u_int16_t status; WI_LOCK_DECL(); WI_LOCK(sc); if (sc->wi_gone || !sc->sc_enabled || (ifp->if_flags & IFF_UP) == 0) { CSR_WRITE_2(sc, WI_INT_EN, 0); CSR_WRITE_2(sc, WI_EVENT_ACK, 0xFFFF); WI_UNLOCK(sc); return; } /* Disable interrupts. */ CSR_WRITE_2(sc, WI_INT_EN, 0); status = CSR_READ_2(sc, WI_EVENT_STAT); if (status & WI_EV_RX) wi_rx_intr(sc); if (status & WI_EV_ALLOC) wi_tx_intr(sc); if (status & WI_EV_TX_EXC) wi_tx_ex_intr(sc); if (status & WI_EV_INFO) wi_info_intr(sc); if ((ifp->if_flags & IFF_OACTIVE) == 0 && (sc->sc_flags & WI_FLAGS_OUTRANGE) == 0 && !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) wi_start(ifp); /* Re-enable interrupts. */ CSR_WRITE_2(sc, WI_INT_EN, WI_INTRS); WI_UNLOCK(sc); return; } void wi_init(void *arg) { struct wi_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = &sc->sc_ic; struct wi_joinreq join; int i; int error = 0, wasenabled; struct ifaddr *ifa; struct sockaddr_dl *sdl; WI_LOCK_DECL(); WI_LOCK(sc); if (sc->wi_gone) { WI_UNLOCK(sc); return; } if ((wasenabled = sc->sc_enabled)) wi_stop(ifp, 1); wi_reset(sc); /* common 802.11 configuration */ ic->ic_flags &= ~IEEE80211_F_IBSSON; sc->sc_flags &= ~WI_FLAGS_OUTRANGE; switch (ic->ic_opmode) { case IEEE80211_M_STA: wi_write_val(sc, WI_RID_PORTTYPE, WI_PORTTYPE_BSS); break; case IEEE80211_M_IBSS: wi_write_val(sc, WI_RID_PORTTYPE, sc->sc_ibss_port); ic->ic_flags |= IEEE80211_F_IBSSON; break; case IEEE80211_M_AHDEMO: wi_write_val(sc, WI_RID_PORTTYPE, WI_PORTTYPE_ADHOC); break; case IEEE80211_M_HOSTAP: /* * For PRISM cards, override the empty SSID, because in * HostAP mode the controller will lock up otherwise. */ if (sc->sc_firmware_type == WI_INTERSIL && ic->ic_des_esslen == 0) { ic->ic_des_essid[0] = ' '; ic->ic_des_esslen = 1; } wi_write_val(sc, WI_RID_PORTTYPE, WI_PORTTYPE_HOSTAP); break; case IEEE80211_M_MONITOR: if (sc->sc_firmware_type == WI_LUCENT) wi_write_val(sc, WI_RID_PORTTYPE, WI_PORTTYPE_ADHOC); wi_cmd(sc, WI_CMD_DEBUG | (WI_TEST_MONITOR << 8), 0, 0, 0); break; } /* Intersil interprets this RID as joining ESS even in IBSS mode */ if (sc->sc_firmware_type == WI_LUCENT && (ic->ic_flags & IEEE80211_F_IBSSON) && ic->ic_des_esslen > 0) wi_write_val(sc, WI_RID_CREATE_IBSS, 1); else wi_write_val(sc, WI_RID_CREATE_IBSS, 0); wi_write_val(sc, WI_RID_MAX_SLEEP, ic->ic_lintval); wi_write_ssid(sc, WI_RID_DESIRED_SSID, ic->ic_des_essid, ic->ic_des_esslen); wi_write_val(sc, WI_RID_OWN_CHNL, ieee80211_chan2ieee(ic, ic->ic_ibss_chan)); wi_write_ssid(sc, WI_RID_OWN_SSID, ic->ic_des_essid, ic->ic_des_esslen); ifa = ifaddr_byindex(ifp->if_index); sdl = (struct sockaddr_dl *) ifa->ifa_addr; IEEE80211_ADDR_COPY(ic->ic_myaddr, LLADDR(sdl)); wi_write_rid(sc, WI_RID_MAC_NODE, ic->ic_myaddr, IEEE80211_ADDR_LEN); if (ic->ic_caps & IEEE80211_C_PMGT) wi_write_val(sc, WI_RID_PM_ENABLED, (ic->ic_flags & IEEE80211_F_PMGTON) ? 1 : 0); /* not yet common 802.11 configuration */ wi_write_val(sc, WI_RID_MAX_DATALEN, sc->sc_max_datalen); wi_write_val(sc, WI_RID_RTS_THRESH, ic->ic_rtsthreshold); if (sc->sc_flags & WI_FLAGS_HAS_FRAGTHR) wi_write_val(sc, WI_RID_FRAG_THRESH, ic->ic_fragthreshold); /* driver specific 802.11 configuration */ if (sc->sc_flags & WI_FLAGS_HAS_SYSSCALE) wi_write_val(sc, WI_RID_SYSTEM_SCALE, sc->sc_system_scale); if (sc->sc_flags & WI_FLAGS_HAS_ROAMING) wi_write_val(sc, WI_RID_ROAMING_MODE, sc->sc_roaming_mode); if (sc->sc_flags & WI_FLAGS_HAS_MOR) wi_write_val(sc, WI_RID_MICROWAVE_OVEN, sc->sc_microwave_oven); wi_write_txrate(sc); wi_write_ssid(sc, WI_RID_NODENAME, sc->sc_nodename, sc->sc_nodelen); if (ic->ic_opmode == IEEE80211_M_HOSTAP && sc->sc_firmware_type == WI_INTERSIL) { wi_write_val(sc, WI_RID_OWN_BEACON_INT, ic->ic_lintval); wi_write_val(sc, WI_RID_BASIC_RATE, 0x03); /* 1, 2 */ wi_write_val(sc, WI_RID_SUPPORT_RATE, 0x0f); /* 1, 2, 5.5, 11 */ wi_write_val(sc, WI_RID_DTIM_PERIOD, 1); } /* * Initialize promisc mode. * Being in the Host-AP mode causes a great * deal of pain if primisc mode is set. * Therefore we avoid confusing the firmware * and always reset promisc mode in Host-AP * mode. Host-AP sees all the packets anyway. */ if (ic->ic_opmode != IEEE80211_M_HOSTAP && (ifp->if_flags & IFF_PROMISC) != 0) { wi_write_val(sc, WI_RID_PROMISC, 1); } else { wi_write_val(sc, WI_RID_PROMISC, 0); } /* Configure WEP. */ if (ic->ic_caps & IEEE80211_C_WEP) { sc->sc_cnfauthmode = ic->ic_bss->ni_authmode; wi_write_wep(sc); } /* Set multicast filter. */ wi_write_multi(sc); /* Allocate fids for the card */ if (sc->sc_firmware_type != WI_SYMBOL || !wasenabled) { sc->sc_buflen = IEEE80211_MAX_LEN + sizeof(struct wi_frame); if (sc->sc_firmware_type == WI_SYMBOL) sc->sc_buflen = 1585; /* XXX */ for (i = 0; i < sc->sc_ntxbuf; i++) { error = wi_alloc_fid(sc, sc->sc_buflen, &sc->sc_txd[i].d_fid); if (error) { device_printf(sc->sc_dev, "tx buffer allocation failed (error %u)\n", error); goto out; } sc->sc_txd[i].d_len = 0; } } sc->sc_txcur = sc->sc_txnext = 0; /* Enable desired port */ wi_cmd(sc, WI_CMD_ENABLE | sc->sc_portnum, 0, 0, 0); sc->sc_enabled = 1; ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; if (ic->ic_opmode == IEEE80211_M_AHDEMO || ic->ic_opmode == IEEE80211_M_IBSS || ic->ic_opmode == IEEE80211_M_MONITOR || ic->ic_opmode == IEEE80211_M_HOSTAP) ieee80211_create_ibss(ic, ic->ic_ibss_chan); /* Enable interrupts */ CSR_WRITE_2(sc, WI_INT_EN, WI_INTRS); if (!wasenabled && ic->ic_opmode == IEEE80211_M_HOSTAP && sc->sc_firmware_type == WI_INTERSIL) { /* XXX: some card need to be re-enabled for hostap */ wi_cmd(sc, WI_CMD_DISABLE | WI_PORT0, 0, 0, 0); wi_cmd(sc, WI_CMD_ENABLE | WI_PORT0, 0, 0, 0); } if (ic->ic_opmode == IEEE80211_M_STA && ((ic->ic_flags & IEEE80211_F_DESBSSID) || ic->ic_des_chan != IEEE80211_CHAN_ANYC)) { memset(&join, 0, sizeof(join)); if (ic->ic_flags & IEEE80211_F_DESBSSID) IEEE80211_ADDR_COPY(&join.wi_bssid, ic->ic_des_bssid); if (ic->ic_des_chan != IEEE80211_CHAN_ANYC) join.wi_chan = htole16( ieee80211_chan2ieee(ic, ic->ic_des_chan)); /* Lucent firmware does not support the JOIN RID. */ if (sc->sc_firmware_type != WI_LUCENT) wi_write_rid(sc, WI_RID_JOIN_REQ, &join, sizeof(join)); } WI_UNLOCK(sc); return; out: if (error) { if_printf(ifp, "interface not running\n"); wi_stop(ifp, 1); } WI_UNLOCK(sc); DPRINTF(("wi_init: return %d\n", error)); return; } void wi_stop(struct ifnet *ifp, int disable) { struct wi_softc *sc = ifp->if_softc; struct ieee80211com *ic = &sc->sc_ic; WI_LOCK_DECL(); WI_LOCK(sc); DELAY(100000); ieee80211_new_state(ic, IEEE80211_S_INIT, -1); if (sc->sc_enabled && !sc->wi_gone) { CSR_WRITE_2(sc, WI_INT_EN, 0); wi_cmd(sc, WI_CMD_DISABLE | sc->sc_portnum, 0, 0, 0); if (disable) { #ifdef __NetBSD__ if (sc->sc_disable) (*sc->sc_disable)(sc); #endif sc->sc_enabled = 0; } } else if (sc->wi_gone && disable) /* gone --> not enabled */ sc->sc_enabled = 0; sc->sc_tx_timer = 0; sc->sc_scan_timer = 0; sc->sc_false_syns = 0; sc->sc_naps = 0; ifp->if_flags &= ~(IFF_OACTIVE | IFF_RUNNING); ifp->if_timer = 0; WI_UNLOCK(sc); } static void wi_start(struct ifnet *ifp) { struct wi_softc *sc = ifp->if_softc; struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_node *ni; struct ieee80211_frame *wh; struct ether_header *eh; struct mbuf *m0; struct wi_frame frmhdr; int cur, fid, off, error; WI_LOCK_DECL(); WI_LOCK(sc); if (sc->wi_gone) { WI_UNLOCK(sc); return; } if (sc->sc_flags & WI_FLAGS_OUTRANGE) { WI_UNLOCK(sc); return; } memset(&frmhdr, 0, sizeof(frmhdr)); cur = sc->sc_txnext; for (;;) { IF_POLL(&ic->ic_mgtq, m0); if (m0 != NULL) { if (sc->sc_txd[cur].d_len != 0) { ifp->if_flags |= IFF_OACTIVE; break; } IF_DEQUEUE(&ic->ic_mgtq, m0); /* * Hack! The referenced node pointer is in the * rcvif field of the packet header. This is * placed there by ieee80211_mgmt_output because * we need to hold the reference with the frame * and there's no other way (other than packet * tags which we consider too expensive to use) * to pass it along. */ ni = (struct ieee80211_node *) m0->m_pkthdr.rcvif; m0->m_pkthdr.rcvif = NULL; m_copydata(m0, 4, ETHER_ADDR_LEN * 2, (caddr_t)&frmhdr.wi_ehdr); frmhdr.wi_ehdr.ether_type = 0; wh = mtod(m0, struct ieee80211_frame *); } else { if (ic->ic_state != IEEE80211_S_RUN) break; IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) break; if (sc->sc_txd[cur].d_len != 0) { IFQ_DRV_PREPEND(&ifp->if_snd, m0); ifp->if_flags |= IFF_OACTIVE; break; } if (m0->m_len < sizeof(struct ether_header) && (m0 = m_pullup(m0, sizeof(struct ether_header))) == NULL) { ifp->if_oerrors++; continue; } eh = mtod(m0, struct ether_header *); ni = ieee80211_find_txnode(ic, eh->ether_dhost); if (ni == NULL) { m_freem(m0); continue; } ifp->if_opackets++; m_copydata(m0, 0, ETHER_HDR_LEN, (caddr_t)&frmhdr.wi_ehdr); #if NBPFILTER > 0 BPF_MTAP(ifp, m0); #endif m0 = ieee80211_encap(ic, m0, ni); if (m0 == NULL) { ifp->if_oerrors++; ieee80211_free_node(ni); continue; } wh = mtod(m0, struct ieee80211_frame *); } #if NBPFILTER > 0 if (ic->ic_rawbpf) bpf_mtap(ic->ic_rawbpf, m0); #endif frmhdr.wi_tx_ctl = htole16(WI_ENC_TX_802_11|WI_TXCNTL_TX_EX); /* XXX check key for SWCRYPT instead of using operating mode */ if (ic->ic_opmode == IEEE80211_M_HOSTAP && (wh->i_fc[1] & IEEE80211_FC1_WEP)) { struct ieee80211_key *k; k = ieee80211_crypto_encap(ic, ni, m0); if (k == NULL) { if (ni != NULL) ieee80211_free_node(ni); m_freem(m0); continue; } frmhdr.wi_tx_ctl |= htole16(WI_TXCNTL_NOCRYPT); } #if NBPFILTER > 0 if (sc->sc_drvbpf) { sc->sc_tx_th.wt_rate = ni->ni_rates.rs_rates[ni->ni_txrate]; bpf_mtap2(sc->sc_drvbpf, &sc->sc_tx_th, sc->sc_tx_th_len, m0); } #endif m_copydata(m0, 0, sizeof(struct ieee80211_frame), (caddr_t)&frmhdr.wi_whdr); m_adj(m0, sizeof(struct ieee80211_frame)); frmhdr.wi_dat_len = htole16(m0->m_pkthdr.len); if (IFF_DUMPPKTS(ifp)) wi_dump_pkt(&frmhdr, NULL, -1); fid = sc->sc_txd[cur].d_fid; off = sizeof(frmhdr); error = wi_write_bap(sc, fid, 0, &frmhdr, sizeof(frmhdr)) != 0 || wi_mwrite_bap(sc, fid, off, m0, m0->m_pkthdr.len) != 0; m_freem(m0); if (ni != NULL) ieee80211_free_node(ni); if (error) { ifp->if_oerrors++; continue; } sc->sc_txd[cur].d_len = off; if (sc->sc_txcur == cur) { if (wi_cmd(sc, WI_CMD_TX | WI_RECLAIM, fid, 0, 0)) { if_printf(ifp, "xmit failed\n"); sc->sc_txd[cur].d_len = 0; continue; } sc->sc_tx_timer = 5; ifp->if_timer = 1; } sc->sc_txnext = cur = (cur + 1) % sc->sc_ntxbuf; } WI_UNLOCK(sc); } static int wi_reset(struct wi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; #define WI_INIT_TRIES 3 int i; int error = 0; int tries; /* Symbol firmware cannot be initialized more than once */ if (sc->sc_firmware_type == WI_SYMBOL && sc->sc_reset) return (0); if (sc->sc_firmware_type == WI_SYMBOL) tries = 1; else tries = WI_INIT_TRIES; for (i = 0; i < tries; i++) { if ((error = wi_cmd(sc, WI_CMD_INI, 0, 0, 0)) == 0) break; DELAY(WI_DELAY * 1000); } sc->sc_reset = 1; if (i == tries) { if_printf(ifp, "init failed\n"); return (error); } CSR_WRITE_2(sc, WI_INT_EN, 0); CSR_WRITE_2(sc, WI_EVENT_ACK, 0xFFFF); /* Calibrate timer. */ wi_write_val(sc, WI_RID_TICK_TIME, 8); return (0); #undef WI_INIT_TRIES } static void wi_watchdog(struct ifnet *ifp) { struct wi_softc *sc = ifp->if_softc; ifp->if_timer = 0; if (!sc->sc_enabled) return; if (sc->sc_tx_timer) { if (--sc->sc_tx_timer == 0) { if_printf(ifp, "device timeout\n"); ifp->if_oerrors++; wi_init(ifp->if_softc); return; } ifp->if_timer = 1; } if (sc->sc_scan_timer) { if (--sc->sc_scan_timer <= WI_SCAN_WAIT - WI_SCAN_INQWAIT && sc->sc_firmware_type == WI_INTERSIL) { DPRINTF(("wi_watchdog: inquire scan\n")); wi_cmd(sc, WI_CMD_INQUIRE, WI_INFO_SCAN_RESULTS, 0, 0); } if (sc->sc_scan_timer) ifp->if_timer = 1; } /* TODO: rate control */ ieee80211_watchdog(&sc->sc_ic); } static int wi_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct wi_softc *sc = ifp->if_softc; struct ieee80211com *ic = &sc->sc_ic; struct ifreq *ifr = (struct ifreq *)data; struct ieee80211req *ireq; u_int8_t nodename[IEEE80211_NWID_LEN]; int error = 0; #if __FreeBSD_version >= 500000 struct thread *td = curthread; #else struct proc *td = curproc; /* Little white lie */ #endif struct wi_req wreq; WI_LOCK_DECL(); if (sc->wi_gone) return (ENODEV); switch (cmd) { case SIOCSIFFLAGS: /* * Can't do promisc and hostap at the same time. If all that's * changing is the promisc flag, try to short-circuit a call to * wi_init() by just setting PROMISC in the hardware. */ WI_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (ic->ic_opmode != IEEE80211_M_HOSTAP && ifp->if_flags & IFF_RUNNING) { if (ifp->if_flags & IFF_PROMISC && !(sc->sc_if_flags & IFF_PROMISC)) { wi_write_val(sc, WI_RID_PROMISC, 1); } else if (!(ifp->if_flags & IFF_PROMISC) && sc->sc_if_flags & IFF_PROMISC) { wi_write_val(sc, WI_RID_PROMISC, 0); } else { wi_init(sc); } } else { wi_init(sc); } } else { if (ifp->if_flags & IFF_RUNNING) { wi_stop(ifp, 1); } sc->wi_gone = 0; } sc->sc_if_flags = ifp->if_flags; WI_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: WI_LOCK(sc); error = wi_write_multi(sc); WI_UNLOCK(sc); break; case SIOCGIFGENERIC: WI_LOCK(sc); error = wi_get_cfg(ifp, cmd, data); WI_UNLOCK(sc); break; case SIOCSIFGENERIC: error = suser(td); if (error == 0) error = wi_set_cfg(ifp, cmd, data); break; case SIOCGPRISM2DEBUG: error = copyin(ifr->ifr_data, &wreq, sizeof(wreq)); if (error) break; if (!(ifp->if_flags & IFF_RUNNING) || sc->sc_firmware_type == WI_LUCENT) { error = EIO; break; } error = wi_get_debug(sc, &wreq); if (error == 0) error = copyout(&wreq, ifr->ifr_data, sizeof(wreq)); break; case SIOCSPRISM2DEBUG: if ((error = suser(td))) return (error); error = copyin(ifr->ifr_data, &wreq, sizeof(wreq)); if (error) break; WI_LOCK(sc); error = wi_set_debug(sc, &wreq); WI_UNLOCK(sc); break; case SIOCG80211: ireq = (struct ieee80211req *) data; if (ireq->i_type == IEEE80211_IOC_STATIONNAME) { ireq->i_len = sc->sc_nodelen + 1; error = copyout(sc->sc_nodename, ireq->i_data, ireq->i_len); break; } goto ioctl_common; case SIOCS80211: ireq = (struct ieee80211req *) data; if (ireq->i_type == IEEE80211_IOC_STATIONNAME) { error = suser(td); if (error) break; if (ireq->i_val != 0 || ireq->i_len > IEEE80211_NWID_LEN) { error = EINVAL; break; } memset(nodename, 0, IEEE80211_NWID_LEN); error = copyin(ireq->i_data, nodename, ireq->i_len); if (error) break; WI_LOCK(sc); if (sc->sc_enabled) { error = wi_write_ssid(sc, WI_RID_NODENAME, nodename, ireq->i_len); } if (error == 0) { memcpy(sc->sc_nodename, nodename, IEEE80211_NWID_LEN); sc->sc_nodelen = ireq->i_len; } WI_UNLOCK(sc); break; } goto ioctl_common; default: ioctl_common: WI_LOCK(sc); error = ieee80211_ioctl(ic, cmd, data); if (error == ENETRESET) { if (sc->sc_enabled) wi_init(sc); /* XXX no error return */ error = 0; } WI_UNLOCK(sc); break; } return (error); } static int wi_media_change(struct ifnet *ifp) { struct wi_softc *sc = ifp->if_softc; int error; error = ieee80211_media_change(ifp); if (error == ENETRESET) { if (sc->sc_enabled) wi_init(sc); /* XXX no error return */ error = 0; } return error; } static void wi_media_status(struct ifnet *ifp, struct ifmediareq *imr) { struct wi_softc *sc = ifp->if_softc; struct ieee80211com *ic = &sc->sc_ic; u_int16_t val; int rate, len; if (sc->wi_gone || !sc->sc_enabled) { imr->ifm_active = IFM_IEEE80211 | IFM_NONE; imr->ifm_status = 0; return; } imr->ifm_status = IFM_AVALID; imr->ifm_active = IFM_IEEE80211; if (ic->ic_state == IEEE80211_S_RUN && (sc->sc_flags & WI_FLAGS_OUTRANGE) == 0) imr->ifm_status |= IFM_ACTIVE; len = sizeof(val); if (wi_read_rid(sc, WI_RID_CUR_TX_RATE, &val, &len) == 0 && len == sizeof(val)) { /* convert to 802.11 rate */ val = le16toh(val); rate = val * 2; if (sc->sc_firmware_type == WI_LUCENT) { if (rate == 10) rate = 11; /* 5.5Mbps */ } else { if (rate == 4*2) rate = 11; /* 5.5Mbps */ else if (rate == 8*2) rate = 22; /* 11Mbps */ } } else rate = 0; imr->ifm_active |= ieee80211_rate2media(ic, rate, IEEE80211_MODE_11B); switch (ic->ic_opmode) { case IEEE80211_M_STA: break; case IEEE80211_M_IBSS: imr->ifm_active |= IFM_IEEE80211_ADHOC; break; case IEEE80211_M_AHDEMO: imr->ifm_active |= IFM_IEEE80211_ADHOC | IFM_FLAG0; break; case IEEE80211_M_HOSTAP: imr->ifm_active |= IFM_IEEE80211_HOSTAP; break; case IEEE80211_M_MONITOR: imr->ifm_active |= IFM_IEEE80211_MONITOR; break; } } static void wi_sync_bssid(struct wi_softc *sc, u_int8_t new_bssid[IEEE80211_ADDR_LEN]) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_node *ni = ic->ic_bss; struct ifnet *ifp = sc->sc_ifp; if (IEEE80211_ADDR_EQ(new_bssid, ni->ni_bssid)) return; DPRINTF(("wi_sync_bssid: bssid %s -> ", ether_sprintf(ni->ni_bssid))); DPRINTF(("%s ?\n", ether_sprintf(new_bssid))); /* In promiscuous mode, the BSSID field is not a reliable * indicator of the firmware's BSSID. Damp spurious * change-of-BSSID indications. */ if ((ifp->if_flags & IFF_PROMISC) != 0 && !ppsratecheck(&sc->sc_last_syn, &sc->sc_false_syns, WI_MAX_FALSE_SYNS)) return; sc->sc_false_syns = MAX(0, sc->sc_false_syns - 1); /* * XXX hack; we should create a new node with the new bssid * and replace the existing ic_bss with it but since we don't * process management frames to collect state we cheat by * reusing the existing node as we know wi_newstate will be * called and it will overwrite the node state. */ ieee80211_sta_join(ic, ieee80211_ref_node(ni)); } static void wi_rx_monitor(struct wi_softc *sc, int fid) { struct ifnet *ifp = sc->sc_ifp; struct wi_frame *rx_frame; struct mbuf *m; int datlen, hdrlen; /* first allocate mbuf for packet storage */ m = m_getcl(M_DONTWAIT, MT_DATA, 0); if (m == NULL) { ifp->if_ierrors++; return; } m->m_pkthdr.rcvif = ifp; /* now read wi_frame first so we know how much data to read */ if (wi_read_bap(sc, fid, 0, mtod(m, caddr_t), sizeof(*rx_frame))) { ifp->if_ierrors++; goto done; } rx_frame = mtod(m, struct wi_frame *); switch ((rx_frame->wi_status & WI_STAT_MAC_PORT) >> 8) { case 7: switch (rx_frame->wi_whdr.i_fc[0] & IEEE80211_FC0_TYPE_MASK) { case IEEE80211_FC0_TYPE_DATA: hdrlen = WI_DATA_HDRLEN; datlen = rx_frame->wi_dat_len + WI_FCS_LEN; break; case IEEE80211_FC0_TYPE_MGT: hdrlen = WI_MGMT_HDRLEN; datlen = rx_frame->wi_dat_len + WI_FCS_LEN; break; case IEEE80211_FC0_TYPE_CTL: /* * prism2 cards don't pass control packets * down properly or consistently, so we'll only * pass down the header. */ hdrlen = WI_CTL_HDRLEN; datlen = 0; break; default: if_printf(ifp, "received packet of unknown type " "on port 7\n"); ifp->if_ierrors++; goto done; } break; case 0: hdrlen = WI_DATA_HDRLEN; datlen = rx_frame->wi_dat_len + WI_FCS_LEN; break; default: if_printf(ifp, "received packet on invalid " "port (wi_status=0x%x)\n", rx_frame->wi_status); ifp->if_ierrors++; goto done; } if (hdrlen + datlen + 2 > MCLBYTES) { if_printf(ifp, "oversized packet received " "(wi_dat_len=%d, wi_status=0x%x)\n", datlen, rx_frame->wi_status); ifp->if_ierrors++; goto done; } if (wi_read_bap(sc, fid, hdrlen, mtod(m, caddr_t) + hdrlen, datlen + 2) == 0) { m->m_pkthdr.len = m->m_len = hdrlen + datlen; ifp->if_ipackets++; BPF_MTAP(ifp, m); /* Handle BPF listeners. */ } else ifp->if_ierrors++; done: m_freem(m); } static void wi_rx_intr(struct wi_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ifnet *ifp = sc->sc_ifp; struct wi_frame frmhdr; struct mbuf *m; struct ieee80211_frame *wh; struct ieee80211_node *ni; int fid, len, off, rssi; u_int8_t dir; u_int16_t status; u_int32_t rstamp; fid = CSR_READ_2(sc, WI_RX_FID); if (sc->wi_debug.wi_monitor) { /* * If we are in monitor mode just * read the data from the device. */ wi_rx_monitor(sc, fid); CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_RX); return; } /* First read in the frame header */ if (wi_read_bap(sc, fid, 0, &frmhdr, sizeof(frmhdr))) { CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_RX); ifp->if_ierrors++; DPRINTF(("wi_rx_intr: read fid %x failed\n", fid)); return; } if (IFF_DUMPPKTS(ifp)) wi_dump_pkt(&frmhdr, NULL, frmhdr.wi_rx_signal); /* * Drop undecryptable or packets with receive errors here */ status = le16toh(frmhdr.wi_status); if (status & WI_STAT_ERRSTAT) { CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_RX); ifp->if_ierrors++; DPRINTF(("wi_rx_intr: fid %x error status %x\n", fid, status)); return; } rssi = frmhdr.wi_rx_signal; rstamp = (le16toh(frmhdr.wi_rx_tstamp0) << 16) | le16toh(frmhdr.wi_rx_tstamp1); len = le16toh(frmhdr.wi_dat_len); off = ALIGN(sizeof(struct ieee80211_frame)); /* * Sometimes the PRISM2.x returns bogusly large frames. Except * in monitor mode, just throw them away. */ if (off + len > MCLBYTES) { if (ic->ic_opmode != IEEE80211_M_MONITOR) { CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_RX); ifp->if_ierrors++; DPRINTF(("wi_rx_intr: oversized packet\n")); return; } else len = 0; } MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) { CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_RX); ifp->if_ierrors++; DPRINTF(("wi_rx_intr: MGET failed\n")); return; } if (off + len > MHLEN) { MCLGET(m, M_DONTWAIT); if ((m->m_flags & M_EXT) == 0) { CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_RX); m_freem(m); ifp->if_ierrors++; DPRINTF(("wi_rx_intr: MCLGET failed\n")); return; } } m->m_data += off - sizeof(struct ieee80211_frame); memcpy(m->m_data, &frmhdr.wi_whdr, sizeof(struct ieee80211_frame)); wi_read_bap(sc, fid, sizeof(frmhdr), m->m_data + sizeof(struct ieee80211_frame), len); m->m_pkthdr.len = m->m_len = sizeof(struct ieee80211_frame) + len; m->m_pkthdr.rcvif = ifp; CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_RX); wh = mtod(m, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_WEP) { /* * WEP is decrypted by hardware and the IV * is stripped. Clear WEP bit so we don't * try to process it in ieee80211_input. * XXX fix for TKIP, et. al. */ wh->i_fc[1] &= ~IEEE80211_FC1_WEP; } #if NBPFILTER > 0 if (sc->sc_drvbpf) { /* XXX replace divide by table */ sc->sc_rx_th.wr_rate = frmhdr.wi_rx_rate / 5; sc->sc_rx_th.wr_antsignal = frmhdr.wi_rx_signal; sc->sc_rx_th.wr_antnoise = frmhdr.wi_rx_silence; sc->sc_rx_th.wr_flags = 0; if (frmhdr.wi_status & WI_STAT_PCF) sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_CFP; /* XXX IEEE80211_RADIOTAP_F_WEP */ bpf_mtap2(sc->sc_drvbpf, &sc->sc_rx_th, sc->sc_rx_th_len, m); } #endif /* synchronize driver's BSSID with firmware's BSSID */ dir = wh->i_fc[1] & IEEE80211_FC1_DIR_MASK; if (ic->ic_opmode == IEEE80211_M_IBSS && dir == IEEE80211_FC1_DIR_NODS) wi_sync_bssid(sc, wh->i_addr3); /* * Locate the node for sender, track state, and * then pass this node (referenced) up to the 802.11 * layer for its use. */ ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *) wh); /* * Send frame up for processing. */ ieee80211_input(ic, m, ni, rssi, rstamp); /* * The frame may have caused the node to be marked for * reclamation (e.g. in response to a DEAUTH message) * so use free_node here instead of unref_node. */ ieee80211_free_node(ni); } static void wi_tx_ex_intr(struct wi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct wi_frame frmhdr; int fid; fid = CSR_READ_2(sc, WI_TX_CMP_FID); /* Read in the frame header */ if (wi_read_bap(sc, fid, 0, &frmhdr, sizeof(frmhdr)) == 0) { u_int16_t status = le16toh(frmhdr.wi_status); /* * Spontaneous station disconnects appear as xmit * errors. Don't announce them and/or count them * as an output error. */ if ((status & WI_TXSTAT_DISCONNECT) == 0) { if (ppsratecheck(&lasttxerror, &curtxeps, wi_txerate)) { if_printf(ifp, "tx failed"); if (status & WI_TXSTAT_RET_ERR) printf(", retry limit exceeded"); if (status & WI_TXSTAT_AGED_ERR) printf(", max transmit lifetime exceeded"); if (status & WI_TXSTAT_DISCONNECT) printf(", port disconnected"); if (status & WI_TXSTAT_FORM_ERR) printf(", invalid format (data len %u src %6D)", le16toh(frmhdr.wi_dat_len), frmhdr.wi_ehdr.ether_shost, ":"); if (status & ~0xf) printf(", status=0x%x", status); printf("\n"); } ifp->if_oerrors++; } else { DPRINTF(("port disconnected\n")); ifp->if_collisions++; /* XXX */ } } else DPRINTF(("wi_tx_ex_intr: read fid %x failed\n", fid)); CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_TX_EXC); } static void wi_tx_intr(struct wi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; int fid, cur; if (sc->wi_gone) return; fid = CSR_READ_2(sc, WI_ALLOC_FID); CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_ALLOC); cur = sc->sc_txcur; if (sc->sc_txd[cur].d_fid != fid) { if_printf(ifp, "bad alloc %x != %x, cur %d nxt %d\n", fid, sc->sc_txd[cur].d_fid, cur, sc->sc_txnext); return; } sc->sc_tx_timer = 0; sc->sc_txd[cur].d_len = 0; sc->sc_txcur = cur = (cur + 1) % sc->sc_ntxbuf; if (sc->sc_txd[cur].d_len == 0) ifp->if_flags &= ~IFF_OACTIVE; else { if (wi_cmd(sc, WI_CMD_TX | WI_RECLAIM, sc->sc_txd[cur].d_fid, 0, 0)) { if_printf(ifp, "xmit failed\n"); sc->sc_txd[cur].d_len = 0; } else { sc->sc_tx_timer = 5; ifp->if_timer = 1; } } } static void wi_info_intr(struct wi_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ifnet *ifp = sc->sc_ifp; int i, fid, len, off; u_int16_t ltbuf[2]; u_int16_t stat; u_int32_t *ptr; fid = CSR_READ_2(sc, WI_INFO_FID); wi_read_bap(sc, fid, 0, ltbuf, sizeof(ltbuf)); switch (le16toh(ltbuf[1])) { case WI_INFO_LINK_STAT: wi_read_bap(sc, fid, sizeof(ltbuf), &stat, sizeof(stat)); DPRINTF(("wi_info_intr: LINK_STAT 0x%x\n", le16toh(stat))); switch (le16toh(stat)) { case WI_INFO_LINK_STAT_CONNECTED: sc->sc_flags &= ~WI_FLAGS_OUTRANGE; if (ic->ic_state == IEEE80211_S_RUN && ic->ic_opmode != IEEE80211_M_IBSS) break; /* FALLTHROUGH */ case WI_INFO_LINK_STAT_AP_CHG: ieee80211_new_state(ic, IEEE80211_S_RUN, -1); break; case WI_INFO_LINK_STAT_AP_INR: sc->sc_flags &= ~WI_FLAGS_OUTRANGE; break; case WI_INFO_LINK_STAT_AP_OOR: if (sc->sc_firmware_type == WI_SYMBOL && sc->sc_scan_timer > 0) { if (wi_cmd(sc, WI_CMD_INQUIRE, WI_INFO_HOST_SCAN_RESULTS, 0, 0) != 0) sc->sc_scan_timer = 0; break; } if (ic->ic_opmode == IEEE80211_M_STA) sc->sc_flags |= WI_FLAGS_OUTRANGE; break; case WI_INFO_LINK_STAT_DISCONNECTED: case WI_INFO_LINK_STAT_ASSOC_FAILED: if (ic->ic_opmode == IEEE80211_M_STA) ieee80211_new_state(ic, IEEE80211_S_INIT, -1); break; } break; case WI_INFO_COUNTERS: /* some card versions have a larger stats structure */ len = min(le16toh(ltbuf[0]) - 1, sizeof(sc->sc_stats) / 4); ptr = (u_int32_t *)&sc->sc_stats; off = sizeof(ltbuf); for (i = 0; i < len; i++, off += 2, ptr++) { wi_read_bap(sc, fid, off, &stat, sizeof(stat)); #ifdef WI_HERMES_STATS_WAR if (stat & 0xf000) stat = ~stat; #endif *ptr += stat; } ifp->if_collisions = sc->sc_stats.wi_tx_single_retries + sc->sc_stats.wi_tx_multi_retries + sc->sc_stats.wi_tx_retry_limit; break; case WI_INFO_SCAN_RESULTS: case WI_INFO_HOST_SCAN_RESULTS: wi_scan_result(sc, fid, le16toh(ltbuf[0])); break; default: DPRINTF(("wi_info_intr: got fid %x type %x len %d\n", fid, le16toh(ltbuf[1]), le16toh(ltbuf[0]))); break; } CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_INFO); } static int wi_write_multi(struct wi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; int n; struct ifmultiaddr *ifma; struct wi_mcast mlist; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { allmulti: memset(&mlist, 0, sizeof(mlist)); return wi_write_rid(sc, WI_RID_MCAST_LIST, &mlist, sizeof(mlist)); } n = 0; + IF_ADDR_LOCK(ifp); #if __FreeBSD_version < 500000 LIST_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { #else TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { #endif if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (n >= 16) goto allmulti; IEEE80211_ADDR_COPY(&mlist.wi_mcast[n], (LLADDR((struct sockaddr_dl *)ifma->ifma_addr))); n++; } + IF_ADDR_UNLOCK(ifp); return wi_write_rid(sc, WI_RID_MCAST_LIST, &mlist, IEEE80211_ADDR_LEN * n); } static void wi_read_nicid(struct wi_softc *sc) { struct wi_card_ident *id; char *p; int len; u_int16_t ver[4]; /* getting chip identity */ memset(ver, 0, sizeof(ver)); len = sizeof(ver); wi_read_rid(sc, WI_RID_CARD_ID, ver, &len); device_printf(sc->sc_dev, "using "); sc->sc_firmware_type = WI_NOTYPE; for (id = wi_card_ident; id->card_name != NULL; id++) { if (le16toh(ver[0]) == id->card_id) { printf("%s", id->card_name); sc->sc_firmware_type = id->firm_type; break; } } if (sc->sc_firmware_type == WI_NOTYPE) { if (le16toh(ver[0]) & 0x8000) { printf("Unknown PRISM2 chip"); sc->sc_firmware_type = WI_INTERSIL; } else { printf("Unknown Lucent chip"); sc->sc_firmware_type = WI_LUCENT; } } /* get primary firmware version (Only Prism chips) */ if (sc->sc_firmware_type != WI_LUCENT) { memset(ver, 0, sizeof(ver)); len = sizeof(ver); wi_read_rid(sc, WI_RID_PRI_IDENTITY, ver, &len); sc->sc_pri_firmware_ver = le16toh(ver[2]) * 10000 + le16toh(ver[3]) * 100 + le16toh(ver[1]); } /* get station firmware version */ memset(ver, 0, sizeof(ver)); len = sizeof(ver); wi_read_rid(sc, WI_RID_STA_IDENTITY, ver, &len); sc->sc_sta_firmware_ver = le16toh(ver[2]) * 10000 + le16toh(ver[3]) * 100 + le16toh(ver[1]); if (sc->sc_firmware_type == WI_INTERSIL && (sc->sc_sta_firmware_ver == 10102 || sc->sc_sta_firmware_ver == 20102)) { char ident[12]; memset(ident, 0, sizeof(ident)); len = sizeof(ident); /* value should be the format like "V2.00-11" */ if (wi_read_rid(sc, WI_RID_SYMBOL_IDENTITY, ident, &len) == 0 && *(p = (char *)ident) >= 'A' && p[2] == '.' && p[5] == '-' && p[8] == '\0') { sc->sc_firmware_type = WI_SYMBOL; sc->sc_sta_firmware_ver = (p[1] - '0') * 10000 + (p[3] - '0') * 1000 + (p[4] - '0') * 100 + (p[6] - '0') * 10 + (p[7] - '0'); } } printf("\n"); device_printf(sc->sc_dev, "%s Firmware: ", sc->sc_firmware_type == WI_LUCENT ? "Lucent" : (sc->sc_firmware_type == WI_SYMBOL ? "Symbol" : "Intersil")); if (sc->sc_firmware_type != WI_LUCENT) /* XXX */ printf("Primary (%u.%u.%u), ", sc->sc_pri_firmware_ver / 10000, (sc->sc_pri_firmware_ver % 10000) / 100, sc->sc_pri_firmware_ver % 100); printf("Station (%u.%u.%u)\n", sc->sc_sta_firmware_ver / 10000, (sc->sc_sta_firmware_ver % 10000) / 100, sc->sc_sta_firmware_ver % 100); } static int wi_write_ssid(struct wi_softc *sc, int rid, u_int8_t *buf, int buflen) { struct wi_ssid ssid; if (buflen > IEEE80211_NWID_LEN) return ENOBUFS; memset(&ssid, 0, sizeof(ssid)); ssid.wi_len = htole16(buflen); memcpy(ssid.wi_ssid, buf, buflen); return wi_write_rid(sc, rid, &ssid, sizeof(ssid)); } static int wi_get_cfg(struct ifnet *ifp, u_long cmd, caddr_t data) { struct wi_softc *sc = ifp->if_softc; struct ieee80211com *ic = &sc->sc_ic; struct ifreq *ifr = (struct ifreq *)data; struct wi_req wreq; struct wi_scan_res *res; size_t reslen; int len, n, error, mif, val, off, i; error = copyin(ifr->ifr_data, &wreq, sizeof(wreq)); if (error) return error; len = (wreq.wi_len - 1) * 2; if (len < sizeof(u_int16_t)) return ENOSPC; if (len > sizeof(wreq.wi_val)) len = sizeof(wreq.wi_val); switch (wreq.wi_type) { case WI_RID_IFACE_STATS: memcpy(wreq.wi_val, &sc->sc_stats, sizeof(sc->sc_stats)); if (len < sizeof(sc->sc_stats)) error = ENOSPC; else len = sizeof(sc->sc_stats); break; case WI_RID_ENCRYPTION: case WI_RID_TX_CRYPT_KEY: case WI_RID_DEFLT_CRYPT_KEYS: case WI_RID_TX_RATE: return ieee80211_cfgget(ic, cmd, data); case WI_RID_MICROWAVE_OVEN: if (sc->sc_enabled && (sc->sc_flags & WI_FLAGS_HAS_MOR)) { error = wi_read_rid(sc, wreq.wi_type, wreq.wi_val, &len); break; } wreq.wi_val[0] = htole16(sc->sc_microwave_oven); len = sizeof(u_int16_t); break; case WI_RID_DBM_ADJUST: if (sc->sc_enabled && (sc->sc_flags & WI_FLAGS_HAS_DBMADJUST)) { error = wi_read_rid(sc, wreq.wi_type, wreq.wi_val, &len); break; } wreq.wi_val[0] = htole16(sc->sc_dbm_offset); len = sizeof(u_int16_t); break; case WI_RID_ROAMING_MODE: if (sc->sc_enabled && (sc->sc_flags & WI_FLAGS_HAS_ROAMING)) { error = wi_read_rid(sc, wreq.wi_type, wreq.wi_val, &len); break; } wreq.wi_val[0] = htole16(sc->sc_roaming_mode); len = sizeof(u_int16_t); break; case WI_RID_SYSTEM_SCALE: if (sc->sc_enabled && (sc->sc_flags & WI_FLAGS_HAS_SYSSCALE)) { error = wi_read_rid(sc, wreq.wi_type, wreq.wi_val, &len); break; } wreq.wi_val[0] = htole16(sc->sc_system_scale); len = sizeof(u_int16_t); break; case WI_RID_FRAG_THRESH: if (sc->sc_enabled && (sc->sc_flags & WI_FLAGS_HAS_FRAGTHR)) { error = wi_read_rid(sc, wreq.wi_type, wreq.wi_val, &len); break; } wreq.wi_val[0] = htole16(ic->ic_fragthreshold); len = sizeof(u_int16_t); break; case WI_RID_READ_APS: if (ic->ic_opmode == IEEE80211_M_HOSTAP) return ieee80211_cfgget(ic, cmd, data); if (sc->sc_scan_timer > 0) { error = EINPROGRESS; break; } n = sc->sc_naps; if (len < sizeof(n)) { error = ENOSPC; break; } if (len < sizeof(n) + sizeof(struct wi_apinfo) * n) n = (len - sizeof(n)) / sizeof(struct wi_apinfo); len = sizeof(n) + sizeof(struct wi_apinfo) * n; memcpy(wreq.wi_val, &n, sizeof(n)); memcpy((caddr_t)wreq.wi_val + sizeof(n), sc->sc_aps, sizeof(struct wi_apinfo) * n); break; case WI_RID_PRISM2: wreq.wi_val[0] = sc->sc_firmware_type != WI_LUCENT; len = sizeof(u_int16_t); break; case WI_RID_MIF: mif = wreq.wi_val[0]; error = wi_cmd(sc, WI_CMD_READMIF, mif, 0, 0); val = CSR_READ_2(sc, WI_RESP0); wreq.wi_val[0] = val; len = sizeof(u_int16_t); break; case WI_RID_ZERO_CACHE: case WI_RID_PROCFRAME: /* ignore for compatibility */ /* XXX ??? */ break; case WI_RID_READ_CACHE: return ieee80211_cfgget(ic, cmd, data); case WI_RID_SCAN_RES: /* compatibility interface */ if (ic->ic_opmode == IEEE80211_M_HOSTAP) return ieee80211_cfgget(ic, cmd, data); if (sc->sc_scan_timer > 0) { error = EINPROGRESS; break; } n = sc->sc_naps; if (sc->sc_firmware_type == WI_LUCENT) { off = 0; reslen = WI_WAVELAN_RES_SIZE; } else { off = sizeof(struct wi_scan_p2_hdr); reslen = WI_PRISM2_RES_SIZE; } if (len < off + reslen * n) n = (len - off) / reslen; len = off + reslen * n; if (off != 0) { struct wi_scan_p2_hdr *p2 = (struct wi_scan_p2_hdr *)wreq.wi_val; /* * Prepend Prism-specific header. */ if (len < sizeof(struct wi_scan_p2_hdr)) { error = ENOSPC; break; } p2 = (struct wi_scan_p2_hdr *)wreq.wi_val; p2->wi_rsvd = 0; p2->wi_reason = n; /* XXX */ } for (i = 0; i < n; i++, off += reslen) { const struct wi_apinfo *ap = &sc->sc_aps[i]; res = (struct wi_scan_res *)((char *)wreq.wi_val + off); res->wi_chan = ap->channel; res->wi_noise = ap->noise; res->wi_signal = ap->signal; IEEE80211_ADDR_COPY(res->wi_bssid, ap->bssid); res->wi_interval = ap->interval; res->wi_capinfo = ap->capinfo; res->wi_ssid_len = ap->namelen; memcpy(res->wi_ssid, ap->name, IEEE80211_NWID_LEN); if (sc->sc_firmware_type != WI_LUCENT) { /* XXX not saved from Prism cards */ memset(res->wi_srates, 0, sizeof(res->wi_srates)); res->wi_rate = ap->rate; res->wi_rsvd = 0; } } break; default: if (sc->sc_enabled) { error = wi_read_rid(sc, wreq.wi_type, wreq.wi_val, &len); break; } switch (wreq.wi_type) { case WI_RID_MAX_DATALEN: wreq.wi_val[0] = htole16(sc->sc_max_datalen); len = sizeof(u_int16_t); break; case WI_RID_RTS_THRESH: wreq.wi_val[0] = htole16(ic->ic_rtsthreshold); len = sizeof(u_int16_t); break; case WI_RID_CNFAUTHMODE: wreq.wi_val[0] = htole16(sc->sc_cnfauthmode); len = sizeof(u_int16_t); break; case WI_RID_NODENAME: if (len < sc->sc_nodelen + sizeof(u_int16_t)) { error = ENOSPC; break; } len = sc->sc_nodelen + sizeof(u_int16_t); wreq.wi_val[0] = htole16((sc->sc_nodelen + 1) / 2); memcpy(&wreq.wi_val[1], sc->sc_nodename, sc->sc_nodelen); break; default: return ieee80211_cfgget(ic, cmd, data); } break; } if (error) return error; wreq.wi_len = (len + 1) / 2 + 1; return copyout(&wreq, ifr->ifr_data, (wreq.wi_len + 1) * 2); } static int wi_set_cfg(struct ifnet *ifp, u_long cmd, caddr_t data) { struct wi_softc *sc = ifp->if_softc; struct ieee80211com *ic = &sc->sc_ic; struct ifreq *ifr = (struct ifreq *)data; struct wi_req wreq; struct mbuf *m; int i, len, error, mif, val; struct ieee80211_rateset *rs; WI_LOCK_DECL(); error = copyin(ifr->ifr_data, &wreq, sizeof(wreq)); if (error) return error; len = wreq.wi_len ? (wreq.wi_len - 1) * 2 : 0; switch (wreq.wi_type) { case WI_RID_DBM_ADJUST: return ENODEV; case WI_RID_NODENAME: if (le16toh(wreq.wi_val[0]) * 2 > len || le16toh(wreq.wi_val[0]) > sizeof(sc->sc_nodename)) { error = ENOSPC; break; } WI_LOCK(sc); if (sc->sc_enabled) error = wi_write_rid(sc, wreq.wi_type, wreq.wi_val, len); if (error == 0) { sc->sc_nodelen = le16toh(wreq.wi_val[0]) * 2; memcpy(sc->sc_nodename, &wreq.wi_val[1], sc->sc_nodelen); } WI_UNLOCK(sc); break; case WI_RID_MICROWAVE_OVEN: case WI_RID_ROAMING_MODE: case WI_RID_SYSTEM_SCALE: case WI_RID_FRAG_THRESH: /* XXX unlocked reads */ if (wreq.wi_type == WI_RID_MICROWAVE_OVEN && (sc->sc_flags & WI_FLAGS_HAS_MOR) == 0) break; if (wreq.wi_type == WI_RID_ROAMING_MODE && (sc->sc_flags & WI_FLAGS_HAS_ROAMING) == 0) break; if (wreq.wi_type == WI_RID_SYSTEM_SCALE && (sc->sc_flags & WI_FLAGS_HAS_SYSSCALE) == 0) break; if (wreq.wi_type == WI_RID_FRAG_THRESH && (sc->sc_flags & WI_FLAGS_HAS_FRAGTHR) == 0) break; /* FALLTHROUGH */ case WI_RID_RTS_THRESH: case WI_RID_CNFAUTHMODE: case WI_RID_MAX_DATALEN: WI_LOCK(sc); if (sc->sc_enabled) { error = wi_write_rid(sc, wreq.wi_type, wreq.wi_val, sizeof(u_int16_t)); if (error != 0) { WI_UNLOCK(sc); break; } } switch (wreq.wi_type) { case WI_RID_FRAG_THRESH: ic->ic_fragthreshold = le16toh(wreq.wi_val[0]); break; case WI_RID_RTS_THRESH: ic->ic_rtsthreshold = le16toh(wreq.wi_val[0]); break; case WI_RID_MICROWAVE_OVEN: sc->sc_microwave_oven = le16toh(wreq.wi_val[0]); break; case WI_RID_ROAMING_MODE: sc->sc_roaming_mode = le16toh(wreq.wi_val[0]); break; case WI_RID_SYSTEM_SCALE: sc->sc_system_scale = le16toh(wreq.wi_val[0]); break; case WI_RID_CNFAUTHMODE: sc->sc_cnfauthmode = le16toh(wreq.wi_val[0]); break; case WI_RID_MAX_DATALEN: sc->sc_max_datalen = le16toh(wreq.wi_val[0]); break; } WI_UNLOCK(sc); break; case WI_RID_TX_RATE: WI_LOCK(sc); switch (le16toh(wreq.wi_val[0])) { case 3: ic->ic_fixed_rate = IEEE80211_FIXED_RATE_NONE; break; default: rs = &ic->ic_sup_rates[IEEE80211_MODE_11B]; for (i = 0; i < rs->rs_nrates; i++) { if ((rs->rs_rates[i] & IEEE80211_RATE_VAL) / 2 == le16toh(wreq.wi_val[0])) break; } if (i == rs->rs_nrates) { WI_UNLOCK(sc); return EINVAL; } ic->ic_fixed_rate = i; } if (sc->sc_enabled) error = wi_write_txrate(sc); WI_UNLOCK(sc); break; case WI_RID_SCAN_APS: WI_LOCK(sc); if (sc->sc_enabled && ic->ic_opmode != IEEE80211_M_HOSTAP) error = wi_scan_ap(sc, 0x3fff, 0x000f); WI_UNLOCK(sc); break; case WI_RID_SCAN_REQ: /* compatibility interface */ WI_LOCK(sc); if (sc->sc_enabled && ic->ic_opmode != IEEE80211_M_HOSTAP) error = wi_scan_ap(sc, wreq.wi_val[0], wreq.wi_val[1]); WI_UNLOCK(sc); break; case WI_RID_MGMT_XMIT: WI_LOCK(sc); if (!sc->sc_enabled) error = ENETDOWN; else if (ic->ic_mgtq.ifq_len > 5) error = EAGAIN; else { /* NB: m_devget uses M_DONTWAIT so can hold the lock */ /* XXX wi_len looks in u_int8_t, not in u_int16_t */ m = m_devget((char *)&wreq.wi_val, wreq.wi_len, 0, ifp, NULL); if (m != NULL) IF_ENQUEUE(&ic->ic_mgtq, m); else error = ENOMEM; } WI_UNLOCK(sc); break; case WI_RID_MIF: mif = wreq.wi_val[0]; val = wreq.wi_val[1]; WI_LOCK(sc); error = wi_cmd(sc, WI_CMD_WRITEMIF, mif, val, 0); WI_UNLOCK(sc); break; case WI_RID_PROCFRAME: /* ignore for compatibility */ break; case WI_RID_OWN_SSID: if (le16toh(wreq.wi_val[0]) * 2 > len || le16toh(wreq.wi_val[0]) > IEEE80211_NWID_LEN) { error = ENOSPC; break; } WI_LOCK(sc); memset(ic->ic_des_essid, 0, IEEE80211_NWID_LEN); ic->ic_des_esslen = le16toh(wreq.wi_val[0]) * 2; memcpy(ic->ic_des_essid, &wreq.wi_val[1], ic->ic_des_esslen); if (sc->sc_enabled) wi_init(sc); /* XXX no error return */ WI_UNLOCK(sc); break; default: WI_LOCK(sc); if (sc->sc_enabled) error = wi_write_rid(sc, wreq.wi_type, wreq.wi_val, len); if (error == 0) { /* XXX ieee80211_cfgset does a copyin */ error = ieee80211_cfgset(ic, cmd, data); if (error == ENETRESET) { if (sc->sc_enabled) wi_init(sc); error = 0; } } WI_UNLOCK(sc); break; } return error; } static int wi_write_txrate(struct wi_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; int i; u_int16_t rate; if (ic->ic_fixed_rate == IEEE80211_FIXED_RATE_NONE) rate = 0; /* auto */ else rate = (ic->ic_sup_rates[IEEE80211_MODE_11B].rs_rates[ic->ic_fixed_rate] & IEEE80211_RATE_VAL) / 2; /* rate: 0, 1, 2, 5, 11 */ switch (sc->sc_firmware_type) { case WI_LUCENT: switch (rate) { case 0: /* auto == 11mbps auto */ rate = 3; break; /* case 1, 2 map to 1, 2*/ case 5: /* 5.5Mbps -> 4 */ rate = 4; break; case 11: /* 11mbps -> 5 */ rate = 5; break; default: break; } break; default: /* Choose a bit according to this table. * * bit | data rate * ----+------------------- * 0 | 1Mbps * 1 | 2Mbps * 2 | 5.5Mbps * 3 | 11Mbps */ for (i = 8; i > 0; i >>= 1) { if (rate >= i) break; } if (i == 0) rate = 0xf; /* auto */ else rate = i; break; } return wi_write_val(sc, WI_RID_TX_RATE, rate); } static int wi_write_wep(struct wi_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; int error = 0; int i, keylen; u_int16_t val; struct wi_key wkey[IEEE80211_WEP_NKID]; switch (sc->sc_firmware_type) { case WI_LUCENT: val = (ic->ic_flags & IEEE80211_F_PRIVACY) ? 1 : 0; error = wi_write_val(sc, WI_RID_ENCRYPTION, val); if (error) break; if ((ic->ic_flags & IEEE80211_F_PRIVACY) == 0) break; error = wi_write_val(sc, WI_RID_TX_CRYPT_KEY, ic->ic_def_txkey); if (error) break; memset(wkey, 0, sizeof(wkey)); for (i = 0; i < IEEE80211_WEP_NKID; i++) { keylen = ic->ic_nw_keys[i].wk_keylen; wkey[i].wi_keylen = htole16(keylen); memcpy(wkey[i].wi_keydat, ic->ic_nw_keys[i].wk_key, keylen); } error = wi_write_rid(sc, WI_RID_DEFLT_CRYPT_KEYS, wkey, sizeof(wkey)); break; case WI_INTERSIL: case WI_SYMBOL: if (ic->ic_flags & IEEE80211_F_PRIVACY) { /* * ONLY HWB3163 EVAL-CARD Firmware version * less than 0.8 variant2 * * If promiscuous mode disable, Prism2 chip * does not work with WEP . * It is under investigation for details. * (ichiro@netbsd.org) */ if (sc->sc_firmware_type == WI_INTERSIL && sc->sc_sta_firmware_ver < 802 ) { /* firm ver < 0.8 variant 2 */ wi_write_val(sc, WI_RID_PROMISC, 1); } wi_write_val(sc, WI_RID_CNFAUTHMODE, sc->sc_cnfauthmode); val = PRIVACY_INVOKED | EXCLUDE_UNENCRYPTED; /* * Encryption firmware has a bug for HostAP mode. */ if (sc->sc_firmware_type == WI_INTERSIL && ic->ic_opmode == IEEE80211_M_HOSTAP) val |= HOST_ENCRYPT; } else { wi_write_val(sc, WI_RID_CNFAUTHMODE, IEEE80211_AUTH_OPEN); val = HOST_ENCRYPT | HOST_DECRYPT; } error = wi_write_val(sc, WI_RID_P2_ENCRYPTION, val); if (error) break; if ((val & PRIVACY_INVOKED) == 0) break; error = wi_write_val(sc, WI_RID_P2_TX_CRYPT_KEY, ic->ic_def_txkey); if (error) break; if (val & HOST_DECRYPT) break; /* * It seems that the firmware accept 104bit key only if * all the keys have 104bit length. We get the length of * the transmit key and use it for all other keys. * Perhaps we should use software WEP for such situation. */ if (ic->ic_def_txkey != IEEE80211_KEYIX_NONE) keylen = ic->ic_nw_keys[ic->ic_def_txkey].wk_keylen; else /* XXX should not hapen */ keylen = IEEE80211_WEP_KEYLEN; if (keylen > IEEE80211_WEP_KEYLEN) keylen = 13; /* 104bit keys */ else keylen = IEEE80211_WEP_KEYLEN; for (i = 0; i < IEEE80211_WEP_NKID; i++) { error = wi_write_rid(sc, WI_RID_P2_CRYPT_KEY0 + i, ic->ic_nw_keys[i].wk_key, keylen); if (error) break; } break; } return error; } static int wi_cmd(struct wi_softc *sc, int cmd, int val0, int val1, int val2) { int i, s = 0; static volatile int count = 0; if (sc->wi_gone) return (ENODEV); if (count > 0) panic("Hey partner, hold on there!"); count++; /* wait for the busy bit to clear */ for (i = sc->wi_cmd_count; i > 0; i--) { /* 500ms */ if (!(CSR_READ_2(sc, WI_COMMAND) & WI_CMD_BUSY)) break; DELAY(1*1000); /* 1ms */ } if (i == 0) { device_printf(sc->sc_dev, "wi_cmd: busy bit won't clear.\n" ); sc->wi_gone = 1; count--; return(ETIMEDOUT); } CSR_WRITE_2(sc, WI_PARAM0, val0); CSR_WRITE_2(sc, WI_PARAM1, val1); CSR_WRITE_2(sc, WI_PARAM2, val2); CSR_WRITE_2(sc, WI_COMMAND, cmd); if (cmd == WI_CMD_INI) { /* XXX: should sleep here. */ DELAY(100*1000); /* 100ms delay for init */ } for (i = 0; i < WI_TIMEOUT; i++) { /* * Wait for 'command complete' bit to be * set in the event status register. */ s = CSR_READ_2(sc, WI_EVENT_STAT); if (s & WI_EV_CMD) { /* Ack the event and read result code. */ s = CSR_READ_2(sc, WI_STATUS); CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_CMD); if (s & WI_STAT_CMD_RESULT) { count--; return(EIO); } break; } DELAY(WI_DELAY); } count--; if (i == WI_TIMEOUT) { device_printf(sc->sc_dev, "timeout in wi_cmd 0x%04x; event status 0x%04x\n", cmd, s); if (s == 0xffff) sc->wi_gone = 1; return(ETIMEDOUT); } return (0); } static int wi_seek_bap(struct wi_softc *sc, int id, int off) { int i, status; CSR_WRITE_2(sc, WI_SEL0, id); CSR_WRITE_2(sc, WI_OFF0, off); for (i = 0; ; i++) { status = CSR_READ_2(sc, WI_OFF0); if ((status & WI_OFF_BUSY) == 0) break; if (i == WI_TIMEOUT) { device_printf(sc->sc_dev, "timeout in wi_seek to %x/%x\n", id, off); sc->sc_bap_off = WI_OFF_ERR; /* invalidate */ if (status == 0xffff) sc->wi_gone = 1; return ETIMEDOUT; } DELAY(1); } if (status & WI_OFF_ERR) { device_printf(sc->sc_dev, "failed in wi_seek to %x/%x\n", id, off); sc->sc_bap_off = WI_OFF_ERR; /* invalidate */ return EIO; } sc->sc_bap_id = id; sc->sc_bap_off = off; return 0; } static int wi_read_bap(struct wi_softc *sc, int id, int off, void *buf, int buflen) { u_int16_t *ptr; int i, error, cnt; if (buflen == 0) return 0; if (id != sc->sc_bap_id || off != sc->sc_bap_off) { if ((error = wi_seek_bap(sc, id, off)) != 0) return error; } cnt = (buflen + 1) / 2; ptr = (u_int16_t *)buf; for (i = 0; i < cnt; i++) *ptr++ = CSR_READ_2(sc, WI_DATA0); sc->sc_bap_off += cnt * 2; return 0; } static int wi_write_bap(struct wi_softc *sc, int id, int off, void *buf, int buflen) { u_int16_t *ptr; int i, error, cnt; if (buflen == 0) return 0; #ifdef WI_HERMES_AUTOINC_WAR again: #endif if (id != sc->sc_bap_id || off != sc->sc_bap_off) { if ((error = wi_seek_bap(sc, id, off)) != 0) return error; } cnt = (buflen + 1) / 2; ptr = (u_int16_t *)buf; for (i = 0; i < cnt; i++) CSR_WRITE_2(sc, WI_DATA0, ptr[i]); sc->sc_bap_off += cnt * 2; #ifdef WI_HERMES_AUTOINC_WAR /* * According to the comments in the HCF Light code, there is a bug * in the Hermes (or possibly in certain Hermes firmware revisions) * where the chip's internal autoincrement counter gets thrown off * during data writes: the autoincrement is missed, causing one * data word to be overwritten and subsequent words to be written to * the wrong memory locations. The end result is that we could end * up transmitting bogus frames without realizing it. The workaround * for this is to write a couple of extra guard words after the end * of the transfer, then attempt to read then back. If we fail to * locate the guard words where we expect them, we preform the * transfer over again. */ if ((sc->sc_flags & WI_FLAGS_BUG_AUTOINC) && (id & 0xf000) == 0) { CSR_WRITE_2(sc, WI_DATA0, 0x1234); CSR_WRITE_2(sc, WI_DATA0, 0x5678); wi_seek_bap(sc, id, sc->sc_bap_off); sc->sc_bap_off = WI_OFF_ERR; /* invalidate */ if (CSR_READ_2(sc, WI_DATA0) != 0x1234 || CSR_READ_2(sc, WI_DATA0) != 0x5678) { device_printf(sc->sc_dev, "detect auto increment bug, try again\n"); goto again; } } #endif return 0; } static int wi_mwrite_bap(struct wi_softc *sc, int id, int off, struct mbuf *m0, int totlen) { int error, len; struct mbuf *m; for (m = m0; m != NULL && totlen > 0; m = m->m_next) { if (m->m_len == 0) continue; len = min(m->m_len, totlen); if (((u_long)m->m_data) % 2 != 0 || len % 2 != 0) { m_copydata(m, 0, totlen, (caddr_t)&sc->sc_txbuf); return wi_write_bap(sc, id, off, (caddr_t)&sc->sc_txbuf, totlen); } if ((error = wi_write_bap(sc, id, off, m->m_data, len)) != 0) return error; off += m->m_len; totlen -= len; } return 0; } static int wi_alloc_fid(struct wi_softc *sc, int len, int *idp) { int i; if (wi_cmd(sc, WI_CMD_ALLOC_MEM, len, 0, 0)) { device_printf(sc->sc_dev, "failed to allocate %d bytes on NIC\n", len); return ENOMEM; } for (i = 0; i < WI_TIMEOUT; i++) { if (CSR_READ_2(sc, WI_EVENT_STAT) & WI_EV_ALLOC) break; DELAY(1); } if (i == WI_TIMEOUT) { device_printf(sc->sc_dev, "timeout in alloc\n"); return ETIMEDOUT; } *idp = CSR_READ_2(sc, WI_ALLOC_FID); CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_ALLOC); return 0; } static int wi_read_rid(struct wi_softc *sc, int rid, void *buf, int *buflenp) { int error, len; u_int16_t ltbuf[2]; /* Tell the NIC to enter record read mode. */ error = wi_cmd(sc, WI_CMD_ACCESS | WI_ACCESS_READ, rid, 0, 0); if (error) return error; error = wi_read_bap(sc, rid, 0, ltbuf, sizeof(ltbuf)); if (error) return error; if (le16toh(ltbuf[1]) != rid) { device_printf(sc->sc_dev, "record read mismatch, rid=%x, got=%x\n", rid, le16toh(ltbuf[1])); return EIO; } len = (le16toh(ltbuf[0]) - 1) * 2; /* already got rid */ if (*buflenp < len) { device_printf(sc->sc_dev, "record buffer is too small, " "rid=%x, size=%d, len=%d\n", rid, *buflenp, len); return ENOSPC; } *buflenp = len; return wi_read_bap(sc, rid, sizeof(ltbuf), buf, len); } static int wi_write_rid(struct wi_softc *sc, int rid, void *buf, int buflen) { int error; u_int16_t ltbuf[2]; ltbuf[0] = htole16((buflen + 1) / 2 + 1); /* includes rid */ ltbuf[1] = htole16(rid); error = wi_write_bap(sc, rid, 0, ltbuf, sizeof(ltbuf)); if (error) return error; error = wi_write_bap(sc, rid, sizeof(ltbuf), buf, buflen); if (error) return error; return wi_cmd(sc, WI_CMD_ACCESS | WI_ACCESS_WRITE, rid, 0, 0); } static int wi_newstate(struct ieee80211com *ic, enum ieee80211_state nstate, int arg) { struct ifnet *ifp = ic->ic_ifp; struct wi_softc *sc = ifp->if_softc; struct ieee80211_node *ni; int buflen; u_int16_t val; struct wi_ssid ssid; u_int8_t old_bssid[IEEE80211_ADDR_LEN]; DPRINTF(("%s: %s -> %s\n", __func__, ieee80211_state_name[ic->ic_state], ieee80211_state_name[nstate])); /* * Internal to the driver the INIT and RUN states are used * so bypass the net80211 state machine for other states. * Beware however that this requires use to net80211 state * management that otherwise would be handled for us. */ switch (nstate) { case IEEE80211_S_INIT: sc->sc_flags &= ~WI_FLAGS_OUTRANGE; return (*sc->sc_newstate)(ic, nstate, arg); case IEEE80211_S_SCAN: case IEEE80211_S_AUTH: case IEEE80211_S_ASSOC: ic->ic_state = nstate; /* NB: skip normal ieee80211 handling */ break; case IEEE80211_S_RUN: ni = ic->ic_bss; sc->sc_flags &= ~WI_FLAGS_OUTRANGE; buflen = IEEE80211_ADDR_LEN; IEEE80211_ADDR_COPY(old_bssid, ni->ni_bssid); wi_read_rid(sc, WI_RID_CURRENT_BSSID, ni->ni_bssid, &buflen); IEEE80211_ADDR_COPY(ni->ni_macaddr, ni->ni_bssid); buflen = sizeof(val); wi_read_rid(sc, WI_RID_CURRENT_CHAN, &val, &buflen); /* XXX validate channel */ ni->ni_chan = &ic->ic_channels[le16toh(val)]; ic->ic_ibss_chan = ni->ni_chan; #if NBPFILTER > 0 sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq = htole16(ni->ni_chan->ic_freq); sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags = htole16(ni->ni_chan->ic_flags); #endif if (ic->ic_opmode != IEEE80211_M_HOSTAP) { /* * XXX hack; unceremoniously clear * IEEE80211_F_DROPUNENC when operating with * wep enabled so we don't drop unencoded frames * at the 802.11 layer. This is necessary because * we must strip the WEP bit from the 802.11 header * before passing frames to ieee80211_input because * the card has already stripped the WEP crypto * header from the packet. */ if (ic->ic_flags & IEEE80211_F_PRIVACY) ic->ic_flags &= ~IEEE80211_F_DROPUNENC; /* XXX check return value */ buflen = sizeof(ssid); wi_read_rid(sc, WI_RID_CURRENT_SSID, &ssid, &buflen); ni->ni_esslen = le16toh(ssid.wi_len); if (ni->ni_esslen > IEEE80211_NWID_LEN) ni->ni_esslen = IEEE80211_NWID_LEN; /*XXX*/ memcpy(ni->ni_essid, ssid.wi_ssid, ni->ni_esslen); } return (*sc->sc_newstate)(ic, nstate, arg); } return 0; } static int wi_scan_ap(struct wi_softc *sc, u_int16_t chanmask, u_int16_t txrate) { int error = 0; u_int16_t val[2]; if (!sc->sc_enabled) return ENXIO; switch (sc->sc_firmware_type) { case WI_LUCENT: (void)wi_cmd(sc, WI_CMD_INQUIRE, WI_INFO_SCAN_RESULTS, 0, 0); break; case WI_INTERSIL: val[0] = htole16(chanmask); /* channel */ val[1] = htole16(txrate); /* tx rate */ error = wi_write_rid(sc, WI_RID_SCAN_REQ, val, sizeof(val)); break; case WI_SYMBOL: /* * XXX only supported on 3.x ? */ val[0] = BSCAN_BCAST | BSCAN_ONETIME; error = wi_write_rid(sc, WI_RID_BCAST_SCAN_REQ, val, sizeof(val[0])); break; } if (error == 0) { sc->sc_scan_timer = WI_SCAN_WAIT; sc->sc_ifp->if_timer = 1; DPRINTF(("wi_scan_ap: start scanning, " "chamask 0x%x txrate 0x%x\n", chanmask, txrate)); } return error; } static void wi_scan_result(struct wi_softc *sc, int fid, int cnt) { #define N(a) (sizeof (a) / sizeof (a[0])) int i, naps, off, szbuf; struct wi_scan_header ws_hdr; /* Prism2 header */ struct wi_scan_data_p2 ws_dat; /* Prism2 scantable*/ struct wi_apinfo *ap; off = sizeof(u_int16_t) * 2; memset(&ws_hdr, 0, sizeof(ws_hdr)); switch (sc->sc_firmware_type) { case WI_INTERSIL: wi_read_bap(sc, fid, off, &ws_hdr, sizeof(ws_hdr)); off += sizeof(ws_hdr); szbuf = sizeof(struct wi_scan_data_p2); break; case WI_SYMBOL: szbuf = sizeof(struct wi_scan_data_p2) + 6; break; case WI_LUCENT: szbuf = sizeof(struct wi_scan_data); break; default: device_printf(sc->sc_dev, "wi_scan_result: unknown firmware type %u\n", sc->sc_firmware_type); naps = 0; goto done; } naps = (cnt * 2 + 2 - off) / szbuf; if (naps > N(sc->sc_aps)) naps = N(sc->sc_aps); sc->sc_naps = naps; /* Read Data */ ap = sc->sc_aps; memset(&ws_dat, 0, sizeof(ws_dat)); for (i = 0; i < naps; i++, ap++) { wi_read_bap(sc, fid, off, &ws_dat, (sizeof(ws_dat) < szbuf ? sizeof(ws_dat) : szbuf)); DPRINTF2(("wi_scan_result: #%d: off %d bssid %s\n", i, off, ether_sprintf(ws_dat.wi_bssid))); off += szbuf; ap->scanreason = le16toh(ws_hdr.wi_reason); memcpy(ap->bssid, ws_dat.wi_bssid, sizeof(ap->bssid)); ap->channel = le16toh(ws_dat.wi_chid); ap->signal = le16toh(ws_dat.wi_signal); ap->noise = le16toh(ws_dat.wi_noise); ap->quality = ap->signal - ap->noise; ap->capinfo = le16toh(ws_dat.wi_capinfo); ap->interval = le16toh(ws_dat.wi_interval); ap->rate = le16toh(ws_dat.wi_rate); ap->namelen = le16toh(ws_dat.wi_namelen); if (ap->namelen > sizeof(ap->name)) ap->namelen = sizeof(ap->name); memcpy(ap->name, ws_dat.wi_name, ap->namelen); } done: /* Done scanning */ sc->sc_scan_timer = 0; DPRINTF(("wi_scan_result: scan complete: ap %d\n", naps)); #undef N } static void wi_dump_pkt(struct wi_frame *wh, struct ieee80211_node *ni, int rssi) { ieee80211_dump_pkt((u_int8_t *) &wh->wi_whdr, sizeof(wh->wi_whdr), ni ? ni->ni_rates.rs_rates[ni->ni_txrate] & IEEE80211_RATE_VAL : -1, rssi); printf(" status 0x%x rx_tstamp1 %u rx_tstamp0 0x%u rx_silence %u\n", le16toh(wh->wi_status), le16toh(wh->wi_rx_tstamp1), le16toh(wh->wi_rx_tstamp0), wh->wi_rx_silence); printf(" rx_signal %u rx_rate %u rx_flow %u\n", wh->wi_rx_signal, wh->wi_rx_rate, wh->wi_rx_flow); printf(" tx_rtry %u tx_rate %u tx_ctl 0x%x dat_len %u\n", wh->wi_tx_rtry, wh->wi_tx_rate, le16toh(wh->wi_tx_ctl), le16toh(wh->wi_dat_len)); printf(" ehdr dst %6D src %6D type 0x%x\n", wh->wi_ehdr.ether_dhost, ":", wh->wi_ehdr.ether_shost, ":", wh->wi_ehdr.ether_type); } int wi_alloc(device_t dev, int rid) { struct wi_softc *sc = device_get_softc(dev); if (sc->wi_bus_type != WI_BUS_PCI_NATIVE) { sc->iobase_rid = rid; sc->iobase = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->iobase_rid, 0, ~0, (1 << 6), rman_make_alignment_flags(1 << 6) | RF_ACTIVE); if (!sc->iobase) { device_printf(dev, "No I/O space?!\n"); return (ENXIO); } sc->wi_io_addr = rman_get_start(sc->iobase); sc->wi_btag = rman_get_bustag(sc->iobase); sc->wi_bhandle = rman_get_bushandle(sc->iobase); } else { sc->mem_rid = rid; sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE); if (!sc->mem) { device_printf(dev, "No Mem space on prism2.5?\n"); return (ENXIO); } sc->wi_btag = rman_get_bustag(sc->mem); sc->wi_bhandle = rman_get_bushandle(sc->mem); } sc->irq_rid = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE | ((sc->wi_bus_type == WI_BUS_PCCARD) ? 0 : RF_SHAREABLE)); if (!sc->irq) { wi_free(dev); device_printf(dev, "No irq?!\n"); return (ENXIO); } sc->sc_dev = dev; sc->sc_unit = device_get_unit(dev); return (0); } void wi_free(device_t dev) { struct wi_softc *sc = device_get_softc(dev); if (sc->iobase != NULL) { bus_release_resource(dev, SYS_RES_IOPORT, sc->iobase_rid, sc->iobase); sc->iobase = NULL; } if (sc->irq != NULL) { bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); sc->irq = NULL; } if (sc->mem != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem); sc->mem = NULL; } return; } static int wi_get_debug(struct wi_softc *sc, struct wi_req *wreq) { int error = 0; wreq->wi_len = 1; switch (wreq->wi_type) { case WI_DEBUG_SLEEP: wreq->wi_len++; wreq->wi_val[0] = sc->wi_debug.wi_sleep; break; case WI_DEBUG_DELAYSUPP: wreq->wi_len++; wreq->wi_val[0] = sc->wi_debug.wi_delaysupp; break; case WI_DEBUG_TXSUPP: wreq->wi_len++; wreq->wi_val[0] = sc->wi_debug.wi_txsupp; break; case WI_DEBUG_MONITOR: wreq->wi_len++; wreq->wi_val[0] = sc->wi_debug.wi_monitor; break; case WI_DEBUG_LEDTEST: wreq->wi_len += 3; wreq->wi_val[0] = sc->wi_debug.wi_ledtest; wreq->wi_val[1] = sc->wi_debug.wi_ledtest_param0; wreq->wi_val[2] = sc->wi_debug.wi_ledtest_param1; break; case WI_DEBUG_CONTTX: wreq->wi_len += 2; wreq->wi_val[0] = sc->wi_debug.wi_conttx; wreq->wi_val[1] = sc->wi_debug.wi_conttx_param0; break; case WI_DEBUG_CONTRX: wreq->wi_len++; wreq->wi_val[0] = sc->wi_debug.wi_contrx; break; case WI_DEBUG_SIGSTATE: wreq->wi_len += 2; wreq->wi_val[0] = sc->wi_debug.wi_sigstate; wreq->wi_val[1] = sc->wi_debug.wi_sigstate_param0; break; case WI_DEBUG_CONFBITS: wreq->wi_len += 2; wreq->wi_val[0] = sc->wi_debug.wi_confbits; wreq->wi_val[1] = sc->wi_debug.wi_confbits_param0; break; default: error = EIO; break; } return (error); } static int wi_set_debug(struct wi_softc *sc, struct wi_req *wreq) { int error = 0; u_int16_t cmd, param0 = 0, param1 = 0; switch (wreq->wi_type) { case WI_DEBUG_RESET: case WI_DEBUG_INIT: case WI_DEBUG_CALENABLE: break; case WI_DEBUG_SLEEP: sc->wi_debug.wi_sleep = 1; break; case WI_DEBUG_WAKE: sc->wi_debug.wi_sleep = 0; break; case WI_DEBUG_CHAN: param0 = wreq->wi_val[0]; break; case WI_DEBUG_DELAYSUPP: sc->wi_debug.wi_delaysupp = 1; break; case WI_DEBUG_TXSUPP: sc->wi_debug.wi_txsupp = 1; break; case WI_DEBUG_MONITOR: sc->wi_debug.wi_monitor = 1; break; case WI_DEBUG_LEDTEST: param0 = wreq->wi_val[0]; param1 = wreq->wi_val[1]; sc->wi_debug.wi_ledtest = 1; sc->wi_debug.wi_ledtest_param0 = param0; sc->wi_debug.wi_ledtest_param1 = param1; break; case WI_DEBUG_CONTTX: param0 = wreq->wi_val[0]; sc->wi_debug.wi_conttx = 1; sc->wi_debug.wi_conttx_param0 = param0; break; case WI_DEBUG_STOPTEST: sc->wi_debug.wi_delaysupp = 0; sc->wi_debug.wi_txsupp = 0; sc->wi_debug.wi_monitor = 0; sc->wi_debug.wi_ledtest = 0; sc->wi_debug.wi_ledtest_param0 = 0; sc->wi_debug.wi_ledtest_param1 = 0; sc->wi_debug.wi_conttx = 0; sc->wi_debug.wi_conttx_param0 = 0; sc->wi_debug.wi_contrx = 0; sc->wi_debug.wi_sigstate = 0; sc->wi_debug.wi_sigstate_param0 = 0; break; case WI_DEBUG_CONTRX: sc->wi_debug.wi_contrx = 1; break; case WI_DEBUG_SIGSTATE: param0 = wreq->wi_val[0]; sc->wi_debug.wi_sigstate = 1; sc->wi_debug.wi_sigstate_param0 = param0; break; case WI_DEBUG_CONFBITS: param0 = wreq->wi_val[0]; param1 = wreq->wi_val[1]; sc->wi_debug.wi_confbits = param0; sc->wi_debug.wi_confbits_param0 = param1; break; default: error = EIO; break; } if (error) return (error); cmd = WI_CMD_DEBUG | (wreq->wi_type << 8); error = wi_cmd(sc, cmd, param0, param1, 0); return (error); } #if __FreeBSD_version >= 500000 /* * Special routines to download firmware for Symbol CF card. * XXX: This should be modified generic into any PRISM-2 based card. */ #define WI_SBCF_PDIADDR 0x3100 /* unaligned load little endian */ #define GETLE32(p) ((p)[0] | ((p)[1]<<8) | ((p)[2]<<16) | ((p)[3]<<24)) #define GETLE16(p) ((p)[0] | ((p)[1]<<8)) int wi_symbol_load_firm(struct wi_softc *sc, const void *primsym, int primlen, const void *secsym, int seclen) { uint8_t ebuf[256]; int i; /* load primary code and run it */ wi_symbol_set_hcr(sc, WI_HCR_EEHOLD); if (wi_symbol_write_firm(sc, primsym, primlen, NULL, 0)) return EIO; wi_symbol_set_hcr(sc, WI_HCR_RUN); for (i = 0; ; i++) { if (i == 10) return ETIMEDOUT; tsleep(sc, PWAIT, "wiinit", 1); if (CSR_READ_2(sc, WI_CNTL) == WI_CNTL_AUX_ENA_STAT) break; /* write the magic key value to unlock aux port */ CSR_WRITE_2(sc, WI_PARAM0, WI_AUX_KEY0); CSR_WRITE_2(sc, WI_PARAM1, WI_AUX_KEY1); CSR_WRITE_2(sc, WI_PARAM2, WI_AUX_KEY2); CSR_WRITE_2(sc, WI_CNTL, WI_CNTL_AUX_ENA_CNTL); } /* issue read EEPROM command: XXX copied from wi_cmd() */ CSR_WRITE_2(sc, WI_PARAM0, 0); CSR_WRITE_2(sc, WI_PARAM1, 0); CSR_WRITE_2(sc, WI_PARAM2, 0); CSR_WRITE_2(sc, WI_COMMAND, WI_CMD_READEE); for (i = 0; i < WI_TIMEOUT; i++) { if (CSR_READ_2(sc, WI_EVENT_STAT) & WI_EV_CMD) break; DELAY(1); } CSR_WRITE_2(sc, WI_EVENT_ACK, WI_EV_CMD); CSR_WRITE_2(sc, WI_AUX_PAGE, WI_SBCF_PDIADDR / WI_AUX_PGSZ); CSR_WRITE_2(sc, WI_AUX_OFFSET, WI_SBCF_PDIADDR % WI_AUX_PGSZ); CSR_READ_MULTI_STREAM_2(sc, WI_AUX_DATA, (uint16_t *)ebuf, sizeof(ebuf) / 2); if (GETLE16(ebuf) > sizeof(ebuf)) return EIO; if (wi_symbol_write_firm(sc, secsym, seclen, ebuf + 4, GETLE16(ebuf))) return EIO; return 0; } static int wi_symbol_write_firm(struct wi_softc *sc, const void *buf, int buflen, const void *ebuf, int ebuflen) { const uint8_t *p, *ep, *q, *eq; char *tp; uint32_t addr, id, eid; int i, len, elen, nblk, pdrlen; /* * Parse the header of the firmware image. */ p = buf; ep = p + buflen; while (p < ep && *p++ != ' '); /* FILE: */ while (p < ep && *p++ != ' '); /* filename */ while (p < ep && *p++ != ' '); /* type of the firmware */ nblk = strtoul(p, &tp, 10); p = tp; pdrlen = strtoul(p + 1, &tp, 10); p = tp; while (p < ep && *p++ != 0x1a); /* skip rest of header */ /* * Block records: address[4], length[2], data[length]; */ for (i = 0; i < nblk; i++) { addr = GETLE32(p); p += 4; len = GETLE16(p); p += 2; CSR_WRITE_2(sc, WI_AUX_PAGE, addr / WI_AUX_PGSZ); CSR_WRITE_2(sc, WI_AUX_OFFSET, addr % WI_AUX_PGSZ); CSR_WRITE_MULTI_STREAM_2(sc, WI_AUX_DATA, (const uint16_t *)p, len / 2); p += len; } /* * PDR: id[4], address[4], length[4]; */ for (i = 0; i < pdrlen; ) { id = GETLE32(p); p += 4; i += 4; addr = GETLE32(p); p += 4; i += 4; len = GETLE32(p); p += 4; i += 4; /* replace PDR entry with the values from EEPROM, if any */ for (q = ebuf, eq = q + ebuflen; q < eq; q += elen * 2) { elen = GETLE16(q); q += 2; eid = GETLE16(q); q += 2; elen--; /* elen includes eid */ if (eid == 0) break; if (eid != id) continue; CSR_WRITE_2(sc, WI_AUX_PAGE, addr / WI_AUX_PGSZ); CSR_WRITE_2(sc, WI_AUX_OFFSET, addr % WI_AUX_PGSZ); CSR_WRITE_MULTI_STREAM_2(sc, WI_AUX_DATA, (const uint16_t *)q, len / 2); break; } } return 0; } static int wi_symbol_set_hcr(struct wi_softc *sc, int mode) { uint16_t hcr; CSR_WRITE_2(sc, WI_COR, WI_COR_RESET); tsleep(sc, PWAIT, "wiinit", 1); hcr = CSR_READ_2(sc, WI_HCR); hcr = (hcr & WI_HCR_4WIRE) | (mode & ~WI_HCR_4WIRE); CSR_WRITE_2(sc, WI_HCR, hcr); tsleep(sc, PWAIT, "wiinit", 1); CSR_WRITE_2(sc, WI_COR, WI_COR_IOMODE); tsleep(sc, PWAIT, "wiinit", 1); return 0; } #endif Index: stable/6/sys/dev/wl/if_wl.c =================================================================== --- stable/6/sys/dev/wl/if_wl.c (revision 149421) +++ stable/6/sys/dev/wl/if_wl.c (revision 149422) @@ -1,2648 +1,2650 @@ /*- * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain all copyright * notices, this list of conditions and the following disclaimer. * 2. The names of the authors may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ /* * if_wl.c - original MACH, then BSDI ISA wavelan driver * ported to mach by Anders Klemets * to BSDI by Robert Morris * to FreeBSD by Jim Binkley * to FreeBSD 2.2+ by Michael Smith * * 2.2 update: * Changed interface to match 2.1-2.2 differences. * Implement IRQ selection logic in wlprobe() * Implement PSA updating. * Pruned heading comments for relevance. * Ripped out all the 'interface counters' cruft. * Cut the missing-interrupt timer back to 100ms. * 2.2.1 update: * now supports all multicast mode (mrouted will work), * but unfortunately must do that by going into promiscuous mode * NWID sysctl added so that normally promiscuous mode is NWID-specific * but can be made NWID-inspecific * 7/14/97 jrb * * Work done: * Ported to FreeBSD, got promiscuous mode working with bpfs, * and rewired timer routine. The i82586 will hang occasionally on output * and the watchdog timer will kick it if so and log an entry. * 2 second timeout there. Apparently the chip loses an interrupt. * Code borrowed from if_ie.c for watchdog timer. * * The wavelan card is a 2mbit radio modem that emulates ethernet; * i.e., it uses MAC addresses. This should not be a surprise since * it uses an ethernet controller as a major hw item. * It can broadcast, unicast or apparently multicast in a base cell * using an omni-directional antennae that is * about 800 feet around the base cell barring walls and metal. * With directional antennae, it can be used point to point over a mile * or so apparently (haven't tried that). * * There are ISA and pcmcia versions (not supported by this code). * The ISA card has an Intel 82586 lan controller on it. It consists * of 2 pieces of hw, the lan controller (intel) and a radio-modem. * The latter has an extra set of controller registers that has nothing * to do with the i82586 and allows setting and monitoring of radio * signal strength, etc. There is a nvram area called the PSA that * contains a number of setup variables including the IRQ and so-called * NWID or Network ID. The NWID must be set the same for all radio * cards to communicate (unless you are using the ATT/NCR roaming feature * with their access points. There is no support for that here. Roaming * involves a link-layer beacon sent out from the access points. End * stations monitor the signal strength and only use the strongest * access point). This driver assumes that the base ISA port, IRQ, * and NWID are first set in nvram via the dos-side "instconf.exe" utility * supplied with the card. This driver takes the ISA port from * the kernel configuration setup, and then determines the IRQ either * from the kernel config (if an explicit IRQ is set) or from the * PSA on the card if not. * The hw also magically just uses the IRQ set in the nvram. * The NWID is used magically as well by the radio-modem * to determine which packets to keep or throw out. * * sample config: * * device wl0 at isa? port 0x300 net irq ? * * Ifdefs: * 1. WLDEBUG. (off) - if turned on enables IFF_DEBUG set via ifconfig debug * 2. MULTICAST (on) - turned on and works up to and including mrouted * 3. WLCACHE (off) - define to turn on a signal strength * (and other metric) cache that is indexed by sender MAC address. * Apps can read this out to learn the remote signal strength of a * sender. Note that it has a switch so that it only stores * broadcast/multicast senders but it could be set to store unicast * too only. Size is hardwired in if_wl_wavelan.h * * one further note: promiscuous mode is a curious thing. In this driver, * promiscuous mode apparently CAN catch ALL packets and ignore the NWID * setting. This is probably more useful in a sense (for snoopers) if * you are interested in all traffic as opposed to if you are interested * in just your own. There is a driver specific sysctl to turn promiscuous * from just promiscuous to wildly promiscuous... * * This driver also knows how to load the synthesizers in the 2.4 Gz * ISA Half-card, Product number 847647476 (USA/FCC IEEE Channel set). * This product consists of a "mothercard" that contains the 82586, * NVRAM that holds the PSA, and the ISA-buss interface custom ASIC. * The radio transceiver is a "daughtercard" called the WaveMODEM which * connects to the mothercard through two single-inline connectors: a * 20-pin connector provides DC-power and modem signals, and a 3-pin * connector which exports the antenna connection. The code herein * loads the receive and transmit synthesizers and the corresponding * transmitter output power value from an EEPROM controlled through * additional registers via the MMC. The EEPROM address selected * are those whose values are preset by the DOS utility programs * provided with the product, and this provides compatible operation * with the DOS Packet Driver software. A future modification will * add the necessary functionality to this driver and to the wlconfig * utility to completely replace the DOS Configuration Utilities. * The 2.4 Gz WaveMODEM is described in document number 407-024692/E, * and is available through Lucent Technologies OEM supply channels. * --RAB 1997/06/08. */ #define MULTICAST 1 /* * Olivetti PC586 Mach Ethernet driver v1.0 * Copyright Ing. C. Olivetti & C. S.p.A. 1988, 1989 * All rights reserved. * */ /* Copyright 1988, 1989 by Olivetti Advanced Technology Center, Inc., Cupertino, California. All Rights Reserved Permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee is hereby granted, provided that the above copyright notice appears in all copies and that both the copyright notice and this permission notice appear in supporting documentation, and that the name of Olivetti not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission. OLIVETTI DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL OLIVETTI BE LIABLE FOR ANY SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* Copyright 1988, 1989 by Intel Corporation, Santa Clara, California. All Rights Reserved Permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee is hereby granted, provided that the above copyright notice appears in all copies and that both the copyright notice and this permission notice appear in supporting documentation, and that the name of Intel not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission. INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT, NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /* * NOTE: * by rvb: * 1. The best book on the 82586 is: * LAN Components User's Manual by Intel * The copy I found was dated 1984. This really tells you * what the state machines are doing * 2. In the current design, we only do one write at a time, * though the hardware is capable of chaining and possibly * even batching. The problem is that we only make one * transmit buffer available in sram space. */ #include "opt_wavelan.h" #include "opt_inet.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #endif #include #include /* was 1000 in original, fed to DELAY(x) */ #define DELAYCONST 1000 #include /* Definitions for the Intel chip */ #include #include static char t_packet[ETHERMTU + sizeof(struct ether_header) + sizeof(long)]; struct wl_softc{ struct ifnet *ifp; u_char psa[0x40]; u_char nwid[2]; /* current radio modem nwid */ short base; short unit; int flags; int tbusy; /* flag to determine if xmit is busy */ u_short begin_fd; u_short end_fd; u_short end_rbd; u_short hacr; /* latest host adapter CR command */ short mode; u_char chan24; /* 2.4 Gz: channel number/EEPROM Area # */ u_short freq24; /* 2.4 Gz: resulting frequency */ int rid_ioport; int rid_irq; struct resource *res_ioport; struct resource *res_irq; void *intr_cookie; bus_space_tag_t bt; bus_space_handle_t bh; struct mtx wl_mtx; struct callout_handle watchdog_ch; #ifdef WLCACHE int w_sigitems; /* number of cached entries */ /* array of cache entries */ struct w_sigcache w_sigcache[ MAXCACHEITEMS ]; int w_nextcache; /* next free cache entry */ int w_wrapindex; /* next "free" cache entry */ #endif }; #define WL_LOCK(_sc) mtx_lock(&(_sc)->wl_mtx) #define WL_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->wl_mtx, MA_OWNED) #define WL_UNLOCK(_sc) mtx_unlock(&(_sc)->wl_mtx) static int wlprobe(device_t); static int wlattach(device_t); static int wldetach(device_t); static device_method_t wl_methods[] = { DEVMETHOD(device_probe, wlprobe), DEVMETHOD(device_attach, wlattach), DEVMETHOD(device_detach, wldetach), { 0, 0} }; static driver_t wl_driver = { "wl", wl_methods, sizeof (struct wl_softc) }; devclass_t wl_devclass; DRIVER_MODULE(wl, isa, wl_driver, wl_devclass, 0, 0); MODULE_DEPEND(wl, isa, 1, 1, 1); MODULE_DEPEND(wl, ether, 1, 1, 1); static struct isa_pnp_id wl_ids[] = { {0, NULL} }; /* * XXX The Wavelan appears to be prone to dropping stuff if you talk to * it too fast. This disgusting hack inserts a delay after each packet * is queued which helps avoid this behaviour on fast systems. */ static int wl_xmit_delay = 250; SYSCTL_INT(_machdep, OID_AUTO, wl_xmit_delay, CTLFLAG_RW, &wl_xmit_delay, 0, ""); /* * not XXX, but ZZZ (bizarre). * promiscuous mode can be toggled to ignore NWIDs. By default, * it does not. Caution should be exercised about combining * this mode with IFF_ALLMULTI which puts this driver in * promiscuous mode. */ static int wl_ignore_nwid = 0; SYSCTL_INT(_machdep, OID_AUTO, wl_ignore_nwid, CTLFLAG_RW, &wl_ignore_nwid, 0, ""); /* * Emit diagnostics about transmission problems */ static int xmt_watch = 0; SYSCTL_INT(_machdep, OID_AUTO, wl_xmit_watch, CTLFLAG_RW, &xmt_watch, 0, ""); /* * Collect SNR statistics */ static int gathersnr = 0; SYSCTL_INT(_machdep, OID_AUTO, wl_gather_snr, CTLFLAG_RW, &gathersnr, 0, ""); static int wl_allocate_resources(device_t device); static int wl_deallocate_resources(device_t device); static void wlstart(struct ifnet *ifp); static void wlinit(void *xsc); static int wlioctl(struct ifnet *ifp, u_long cmd, caddr_t data); static timeout_t wlwatchdog; static void wlintr(void *arg); static void wlxmt(struct wl_softc *sc, struct mbuf *m); static int wldiag(struct wl_softc *sc); static int wlconfig(struct wl_softc *sc); static int wlcmd(struct wl_softc *sc, char *str); static void wlmmcstat(struct wl_softc *sc); static u_short wlbldru(struct wl_softc *sc); static u_short wlmmcread(u_int base, u_short reg); static void wlinitmmc(struct wl_softc *sc); static int wlhwrst(struct wl_softc *sc); static void wlrustrt(struct wl_softc *sc); static void wlbldcu(struct wl_softc *sc); static int wlack(struct wl_softc *sc); static int wlread(struct wl_softc *sc, u_short fd_p); static void getsnr(struct wl_softc *sc); static void wlrcv(struct wl_softc *sc); static int wlrequeue(struct wl_softc *sc, u_short fd_p); static void wlsftwsleaze(u_short *countp, u_char **mb_pp, struct mbuf **tm_pp, struct wl_softc *sc); static void wlhdwsleaze(u_short *countp, u_char **mb_pp, struct mbuf **tm_pp, struct wl_softc *sc); #ifdef WLDEBUG static void wltbd(struct wl_softc *sc); #endif static void wlgetpsa(int base, u_char *buf); static void wlsetpsa(struct wl_softc *sc); static u_short wlpsacrc(u_char *buf); static void wldump(struct wl_softc *sc); #ifdef WLCACHE static void wl_cache_store(struct wl_softc *, int, struct ether_header *, struct mbuf *); static void wl_cache_zero(struct wl_softc *sc); #endif /* array for maping irq numbers to values for the irq parameter register */ static int irqvals[16] = { 0, 0, 0, 0x01, 0x02, 0x04, 0, 0x08, 0, 0, 0x10, 0x20, 0x40, 0, 0, 0x80 }; /* * wlprobe: * * This function "probes" or checks for the WaveLAN board on the bus to * see if it is there. As far as I can tell, the best break between this * routine and the attach code is to simply determine whether the board * is configured in properly. Currently my approach to this is to write * and read a word from the SRAM on the board being probed. If the word * comes back properly then we assume the board is there. The config * code expects to see a successful return from the probe routine before * attach will be called. * * input : address device is mapped to, and unit # being checked * output : a '1' is returned if the board exists, and a 0 otherwise * */ static int wlprobe(device_t device) { struct wl_softc *sc; short base; char *str = "wl%d: board out of range [0..%d]\n"; u_char inbuf[100]; unsigned long junk, oldpri, sirq; int error, irq; error = ISA_PNP_PROBE(device_get_parent(device), device, wl_ids); if (error == ENXIO || error == 0) return (error); sc = device_get_softc(device); error = wl_allocate_resources(device); if (error) goto errexit; base = rman_get_start(sc->res_ioport); /* TBD. not true. * regular CMD() will not work, since no softc yet */ #define PCMD(base, hacr) outw((base), (hacr)) oldpri = splimp(); PCMD(base, HACR_RESET); /* reset the board */ DELAY(DELAYCONST); /* >> 4 clocks at 6MHz */ PCMD(base, HACR_RESET); /* reset the board */ DELAY(DELAYCONST); /* >> 4 clocks at 6MHz */ splx(oldpri); /* clear reset command and set PIO#1 in autoincrement mode */ PCMD(base, HACR_DEFAULT); PCMD(base, HACR_DEFAULT); outw(PIOR1(base), 0); /* go to beginning of RAM */ outsw(PIOP1(base), str, strlen(str)/2+1); /* write string */ outw(PIOR1(base), 0); /* rewind */ insw(PIOP1(base), inbuf, strlen(str)/2+1); /* read result */ if (bcmp(str, inbuf, strlen(str))) { error = ENXIO; goto errexit; } sc->chan24 = 0; /* 2.4 Gz: config channel */ sc->freq24 = 0; /* 2.4 Gz: frequency */ /* read the PSA from the board into temporary storage */ wlgetpsa(base, inbuf); /* We read the IRQ value from the PSA on the board. */ for (irq = 15; irq >= 0; irq--) if (irqvals[irq] == inbuf[WLPSA_IRQNO]) break; if ((irq == 0) || (irqvals[irq] == 0)){ printf("wl%d: PSA corrupt (invalid IRQ value)\n", device_get_unit(device)); } else { /* * If the IRQ requested by the PSA is already claimed by another * device, the board won't work, but the user can still access the * driver to change the IRQ. */ if (bus_get_resource(device, SYS_RES_IRQ, 0, &sirq, &junk)) goto errexit; if (irq != (int)sirq) printf("wl%d: board is configured for interrupt %d\n", device_get_unit(device), irq); } wl_deallocate_resources(device); return (0); errexit: wl_deallocate_resources(device); return (error); } /* * wlattach: * * This function attaches a WaveLAN board to the "system". The rest of * runtime structures are initialized here (this routine is called after * a successful probe of the board). Once the ethernet address is read * and stored, the board's ifnet structure is attached and readied. * * input : isa_dev structure setup in autoconfig * output : board structs and ifnet is setup * */ static int wlattach(device_t device) { struct wl_softc *sc; short base; int error, i, j; int unit; struct ifnet *ifp; u_char eaddr[6]; sc = device_get_softc(device); ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(device, "can not if_alloc()\n"); return (ENOSPC); } mtx_init(&sc->wl_mtx, device_get_nameunit(device), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); error = wl_allocate_resources(device); if (error) { wl_deallocate_resources(device); return (ENXIO); } base = rman_get_start(sc->res_ioport); unit = device_get_unit(device); #ifdef WLDEBUG printf("wlattach: base %x, unit %d\n", base, unit); #endif sc->base = base; sc->unit = unit; sc->flags = 0; sc->mode = 0; sc->hacr = HACR_RESET; callout_handle_init(&sc->watchdog_ch); CMD(sc); /* reset the board */ DELAY(DELAYCONST); /* >> 4 clocks at 6MHz */ /* clear reset command and set PIO#2 in parameter access mode */ sc->hacr = (HACR_DEFAULT & ~HACR_16BITS); CMD(sc); /* Read the PSA from the board for our later reference */ wlgetpsa(base, sc->psa); /* fetch NWID */ sc->nwid[0] = sc->psa[WLPSA_NWID]; sc->nwid[1] = sc->psa[WLPSA_NWID+1]; /* fetch MAC address - decide which one first */ if (sc->psa[WLPSA_MACSEL] & 1) j = WLPSA_LOCALMAC; else j = WLPSA_UNIMAC; for (i=0; i < WAVELAN_ADDR_SIZE; ++i) eaddr[i] = sc->psa[j + i]; /* enter normal 16 bit mode operation */ sc->hacr = HACR_DEFAULT; CMD(sc); wlinitmmc(sc); outw(PIOR1(base), OFFSET_SCB + 8); /* address of scb_crcerrs */ outw(PIOP1(base), 0); /* clear scb_crcerrs */ outw(PIOP1(base), 0); /* clear scb_alnerrs */ outw(PIOP1(base), 0); /* clear scb_rscerrs */ outw(PIOP1(base), 0); /* clear scb_ovrnerrs */ ifp->if_softc = sc; ifp->if_mtu = WAVELAN_MTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX; #ifdef WLDEBUG ifp->if_flags |= IFF_DEBUG; #endif #if MULTICAST ifp->if_flags |= IFF_MULTICAST; #endif /* MULTICAST */ if_initname(ifp, device_get_name(device), device_get_unit(device)); ifp->if_init = wlinit; ifp->if_start = wlstart; ifp->if_ioctl = wlioctl; ifp->if_timer = 0; /* paranoia */ ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; /* no entries ifp->if_watchdog ifp->if_done ifp->if_reset */ ether_ifattach(ifp, eaddr); if_printf(ifp, "NWID 0x%02x%02x", sc->nwid[0], sc->nwid[1]); if (sc->freq24) printf(", Freq %d MHz",sc->freq24); /* 2.4 Gz */ printf("\n"); /* 2.4 Gz */ bus_setup_intr(device, sc->res_irq, INTR_TYPE_NET, wlintr, sc, &sc->intr_cookie); if (bootverbose) wldump(sc); return (0); } static int wldetach(device_t device) { struct wl_softc *sc = device_get_softc(device); device_t parent = device_get_parent(device); struct ifnet *ifp; ifp = sc->ifp; ether_ifdetach(ifp); if_free(ifp); WL_LOCK(sc); /* reset the board */ sc->hacr = HACR_RESET; CMD(sc); sc->hacr = HACR_DEFAULT; CMD(sc); if (sc->intr_cookie != NULL) { BUS_TEARDOWN_INTR(parent, device, sc->res_irq, sc->intr_cookie); sc->intr_cookie = NULL; } bus_generic_detach(device); wl_deallocate_resources(device); WL_UNLOCK(sc); mtx_destroy(&sc->wl_mtx); return (0); } static int wl_allocate_resources(device_t device) { struct wl_softc *sc = device_get_softc(device); int ports = 16; /* Number of ports */ sc->res_ioport = bus_alloc_resource(device, SYS_RES_IOPORT, &sc->rid_ioport, 0ul, ~0ul, ports, RF_ACTIVE); if (sc->res_ioport == NULL) goto errexit; sc->res_irq = bus_alloc_resource_any(device, SYS_RES_IRQ, &sc->rid_irq, RF_SHAREABLE|RF_ACTIVE); if (sc->res_irq == NULL) goto errexit; return (0); errexit: wl_deallocate_resources(device); return (ENXIO); } static int wl_deallocate_resources(device_t device) { struct wl_softc *sc = device_get_softc(device); if (sc->res_irq != 0) { bus_deactivate_resource(device, SYS_RES_IRQ, sc->rid_irq, sc->res_irq); bus_release_resource(device, SYS_RES_IRQ, sc->rid_irq, sc->res_irq); sc->res_irq = 0; } if (sc->res_ioport != 0) { bus_deactivate_resource(device, SYS_RES_IOPORT, sc->rid_ioport, sc->res_ioport); bus_release_resource(device, SYS_RES_IOPORT, sc->rid_ioport, sc->res_ioport); sc->res_ioport = 0; } return (0); } /* * Print out interesting information about the 82596. */ static void wldump(struct wl_softc *sc) { int base = sc->base; int i; printf("hasr %04x\n", inw(HASR(base))); printf("scb at %04x:\n ", OFFSET_SCB); outw(PIOR1(base), OFFSET_SCB); for (i = 0; i < 8; i++) printf("%04x ", inw(PIOP1(base))); printf("\n"); printf("cu at %04x:\n ", OFFSET_CU); outw(PIOR1(base), OFFSET_CU); for (i = 0; i < 8; i++) printf("%04x ", inw(PIOP1(base))); printf("\n"); printf("tbd at %04x:\n ", OFFSET_TBD); outw(PIOR1(base), OFFSET_TBD); for (i = 0; i < 4; i++) printf("%04x ", inw(PIOP1(base))); printf("\n"); } /* Initialize the Modem Management Controller */ static void wlinitmmc(struct wl_softc *sc) { int base = sc->base; int configured; int mode = sc->mode; int i; /* 2.4 Gz */ /* enter 8 bit operation */ sc->hacr = (HACR_DEFAULT & ~HACR_16BITS); CMD(sc); configured = sc->psa[WLPSA_CONFIGURED] & 1; /* * Set default modem control parameters. Taken from NCR document * 407-0024326 Rev. A */ MMC_WRITE(MMC_JABBER_ENABLE, 0x01); MMC_WRITE(MMC_ANTEN_SEL, 0x02); MMC_WRITE(MMC_IFS, 0x20); MMC_WRITE(MMC_MOD_DELAY, 0x04); MMC_WRITE(MMC_JAM_TIME, 0x38); MMC_WRITE(MMC_DECAY_PRM, 0x00); /* obsolete ? */ MMC_WRITE(MMC_DECAY_UPDAT_PRM, 0x00); if (!configured) { MMC_WRITE(MMC_LOOPT_SEL, 0x00); if (sc->psa[WLPSA_COMPATNO] & 1) { MMC_WRITE(MMC_THR_PRE_SET, 0x01); /* 0x04 for AT and 0x01 for MCA */ } else { MMC_WRITE(MMC_THR_PRE_SET, 0x04); /* 0x04 for AT and 0x01 for MCA */ } MMC_WRITE(MMC_QUALITY_THR, 0x03); } else { /* use configuration defaults from parameter storage area */ if (sc->psa[WLPSA_NWIDENABLE] & 1) { if ((mode & (MOD_PROM | MOD_ENAL)) && wl_ignore_nwid) { MMC_WRITE(MMC_LOOPT_SEL, 0x40); } else { MMC_WRITE(MMC_LOOPT_SEL, 0x00); } } else { MMC_WRITE(MMC_LOOPT_SEL, 0x40); /* disable network id check */ } MMC_WRITE(MMC_THR_PRE_SET, sc->psa[WLPSA_THRESH]); MMC_WRITE(MMC_QUALITY_THR, sc->psa[WLPSA_QUALTHRESH]); } MMC_WRITE(MMC_FREEZE, 0x00); MMC_WRITE(MMC_ENCR_ENABLE, 0x00); MMC_WRITE(MMC_NETW_ID_L,sc->nwid[1]); /* set NWID */ MMC_WRITE(MMC_NETW_ID_H,sc->nwid[0]); /* enter normal 16 bit mode operation */ sc->hacr = HACR_DEFAULT; CMD(sc); CMD(sc); /* virtualpc1 needs this! */ if (sc->psa[WLPSA_COMPATNO]== /* 2.4 Gz: half-card ver */ WLPSA_COMPATNO_WL24B) { /* 2.4 Gz */ i=sc->chan24<<4; /* 2.4 Gz: position ch # */ MMC_WRITE(MMC_EEADDR,i+0x0f); /* 2.4 Gz: named ch, wc=16 */ MMC_WRITE(MMC_EECTRL,MMC_EECTRL_DWLD+ /* 2.4 Gz: Download Synths */ MMC_EECTRL_EEOP_READ); /* 2.4 Gz: Read EEPROM */ for (i=0; i<1000; ++i) { /* 2.4 Gz: wait for download */ DELAY(40); /* 2.4 Gz */ if ((wlmmcread(base,MMC_EECTRLstat) /* 2.4 Gz: check DWLD and */ &(MMC_EECTRLstat_DWLD /* 2.4 Gz: EEBUSY */ +MMC_EECTRLstat_EEBUSY))==0) /* 2.4 Gz: */ break; /* 2.4 Gz: download finished */ } /* 2.4 Gz */ if (i==1000) printf("wl: synth load failed\n"); /* 2.4 Gz */ MMC_WRITE(MMC_EEADDR,0x61); /* 2.4 Gz: default pwr, wc=2 */ MMC_WRITE(MMC_EECTRL,MMC_EECTRL_DWLD+ /* 2.4 Gz: Download Xmit Pwr */ MMC_EECTRL_EEOP_READ); /* 2.4 Gz: Read EEPROM */ for (i=0; i<1000; ++i) { /* 2.4 Gz: wait for download */ DELAY(40); /* 2.4 Gz */ if ((wlmmcread(base,MMC_EECTRLstat) /* 2.4 Gz: check DWLD and */ &(MMC_EECTRLstat_DWLD /* 2.4 Gz: EEBUSY */ +MMC_EECTRLstat_EEBUSY))==0) /* 2.4 Gz: */ break; /* 2.4 Gz: download finished */ } /* 2.4 Gz */ if (i==1000) printf("wl: xmit pwr load failed\n"); /* 2.4 Gz */ MMC_WRITE(MMC_ANALCTRL, /* 2.4 Gz: EXT ant+polarity */ MMC_ANALCTRL_ANTPOL + /* 2.4 Gz: */ MMC_ANALCTRL_EXTANT); /* 2.4 Gz: */ i=sc->chan24<<4; /* 2.4 Gz: position ch # */ MMC_WRITE(MMC_EEADDR,i); /* 2.4 Gz: get frequency */ MMC_WRITE(MMC_EECTRL, /* 2.4 Gz: EEPROM read */ MMC_EECTRL_EEOP_READ); /* 2.4 Gz: */ DELAY(40); /* 2.4 Gz */ i = wlmmcread(base,MMC_EEDATALrv) /* 2.4 Gz: freq val */ + (wlmmcread(base,MMC_EEDATAHrv)<<8); /* 2.4 Gz */ sc->freq24 = (i>>6)+2400; /* 2.4 Gz: save real freq */ } } /* * wlinit: * * Another routine that interfaces the "if" layer to this driver. * Simply resets the structures that are used by "upper layers". * As well as calling wlhwrst that does reset the WaveLAN board. * * input : softc pointer for this interface * output : structures (if structs) and board are reset * */ static void wlinit(void *xsc) { struct wl_softc *sc = xsc; struct ifnet *ifp = sc->ifp; int stat; u_long oldpri; #ifdef WLDEBUG if (sc->ifp->if_flags & IFF_DEBUG) printf("wl%d: entered wlinit()\n",sc->unit); #endif WL_LOCK(sc); oldpri = splimp(); if ((stat = wlhwrst(sc)) == TRUE) { sc->ifp->if_flags |= IFF_RUNNING; /* same as DSF_RUNNING */ /* * OACTIVE is used by upper-level routines * and must be set */ sc->ifp->if_flags &= ~IFF_OACTIVE; /* same as tbusy below */ sc->flags |= DSF_RUNNING; sc->tbusy = 0; untimeout(wlwatchdog, sc, sc->watchdog_ch); wlstart(ifp); } else { printf("wl%d init(): trouble resetting board.\n", sc->unit); } splx(oldpri); WL_UNLOCK(sc); } /* * wlhwrst: * * This routine resets the WaveLAN board that corresponds to the * board number passed in. * * input : board number to do a hardware reset * output : board is reset * */ static int wlhwrst(struct wl_softc *sc) { #ifdef WLDEBUG if (sc->ifp->if_flags & IFF_DEBUG) printf("wl%d: entered wlhwrst()\n", sc->unit); #endif sc->hacr = HACR_RESET; CMD(sc); /* reset the board */ /* clear reset command and set PIO#1 in autoincrement mode */ sc->hacr = HACR_DEFAULT; CMD(sc); #ifdef WLDEBUG if (sc->ifp->if_flags & IFF_DEBUG) wlmmcstat(sc); /* Display MMC registers */ #endif /* WLDEBUG */ wlbldcu(sc); /* set up command unit structures */ if (wldiag(sc) == 0) return(0); if (wlconfig(sc) == 0) return(0); /* * insert code for loopback test here */ wlrustrt(sc); /* start receive unit */ /* enable interrupts */ sc->hacr = (HACR_DEFAULT | HACR_INTRON); CMD(sc); return(1); } /* * wlbldcu: * * This function builds up the command unit structures. It inits * the scp, iscp, scb, cb, tbd, and tbuf. * */ static void wlbldcu(struct wl_softc *sc) { short base = sc->base; scp_t scp; iscp_t iscp; scb_t scb; ac_t cb; tbd_t tbd; int i; bzero(&scp, sizeof(scp)); scp.scp_sysbus = 0; scp.scp_iscp = OFFSET_ISCP; scp.scp_iscp_base = 0; outw(PIOR1(base), OFFSET_SCP); outsw(PIOP1(base), &scp, sizeof(scp_t)/2); bzero(&iscp, sizeof(iscp)); iscp.iscp_busy = 1; iscp.iscp_scb_offset = OFFSET_SCB; iscp.iscp_scb = 0; iscp.iscp_scb_base = 0; outw(PIOR1(base), OFFSET_ISCP); outsw(PIOP1(base), &iscp, sizeof(iscp_t)/2); scb.scb_status = 0; scb.scb_command = SCB_RESET; scb.scb_cbl_offset = OFFSET_CU; scb.scb_rfa_offset = OFFSET_RU; scb.scb_crcerrs = 0; scb.scb_alnerrs = 0; scb.scb_rscerrs = 0; scb.scb_ovrnerrs = 0; outw(PIOR1(base), OFFSET_SCB); outsw(PIOP1(base), &scb, sizeof(scb_t)/2); SET_CHAN_ATTN(sc); outw(PIOR0(base), OFFSET_ISCP + 0); /* address of iscp_busy */ for (i = 1000000; inw(PIOP0(base)) && (i-- > 0); ) continue; if (i <= 0) printf("wl%d bldcu(): iscp_busy timeout.\n", sc->unit); outw(PIOR0(base), OFFSET_SCB + 0); /* address of scb_status */ for (i = STATUS_TRIES; i-- > 0; ) { if (inw(PIOP0(base)) == (SCB_SW_CX|SCB_SW_CNA)) break; } if (i <= 0) printf("wl%d bldcu(): not ready after reset.\n", sc->unit); wlack(sc); cb.ac_status = 0; cb.ac_command = AC_CW_EL; /* NOP */ cb.ac_link_offset = OFFSET_CU; outw(PIOR1(base), OFFSET_CU); outsw(PIOP1(base), &cb, 6/2); tbd.act_count = 0; tbd.next_tbd_offset = I82586NULL; tbd.buffer_addr = 0; tbd.buffer_base = 0; outw(PIOR1(base), OFFSET_TBD); outsw(PIOP1(base), &tbd, sizeof(tbd_t)/2); } /* * wlstart: * * send a packet * * input : board number * output : stuff sent to board if any there * */ static void wlstart(struct ifnet *ifp) { struct mbuf *m; struct wl_softc *sc = ifp->if_softc; short base = sc->base; int scb_status, cu_status, scb_command; WL_LOCK(sc); #ifdef WLDEBUG if (sc->ifp->if_flags & IFF_DEBUG) printf("%s: entered wlstart()\n", ifp->if_xname); #endif outw(PIOR1(base), OFFSET_CU); cu_status = inw(PIOP1(base)); outw(PIOR0(base),OFFSET_SCB + 0); /* scb_status */ scb_status = inw(PIOP0(base)); outw(PIOR0(base), OFFSET_SCB + 2); scb_command = inw(PIOP0(base)); /* * don't need OACTIVE check as tbusy here checks to see * if we are already busy */ if (sc->tbusy) { if ((scb_status & 0x0700) == SCB_CUS_IDLE && (cu_status & AC_SW_B) == 0){ sc->tbusy = 0; untimeout(wlwatchdog, sc, sc->watchdog_ch); sc->ifp->if_flags &= ~IFF_OACTIVE; /* * This is probably just a race. The xmt'r is just * became idle but WE have masked interrupts so ... */ #ifdef WLDEBUG printf("%s: CU idle, scb %04x %04x cu %04x\n", ifp->if_xname, scb_status, scb_command, cu_status); #endif if (xmt_watch) printf("!!"); } else { WL_UNLOCK(sc); return; /* genuinely still busy */ } } else if ((scb_status & 0x0700) == SCB_CUS_ACTV || (cu_status & AC_SW_B)){ #ifdef WLDEBUG printf("%s: CU unexpectedly busy; scb %04x cu %04x\n", ifp->if_xname, scb_status, cu_status); #endif if (xmt_watch) printf("%s: busy?!",ifp->if_xname); WL_UNLOCK(sc); return; /* hey, why are we busy? */ } /* get ourselves some data */ ifp = sc->ifp; IF_DEQUEUE(&ifp->if_snd, m); if (m != (struct mbuf *)0) { /* let BPF see it before we commit it */ BPF_MTAP(ifp, m); sc->tbusy++; /* set the watchdog timer so that if the board * fails to interrupt we will restart */ /* try 10 ticks, not very long */ sc->watchdog_ch = timeout(wlwatchdog, sc, 10); sc->ifp->if_flags |= IFF_OACTIVE; sc->ifp->if_opackets++; wlxmt(sc, m); } else { sc->ifp->if_flags &= ~IFF_OACTIVE; } WL_UNLOCK(sc); return; } /* * wlread: * * This routine does the actual copy of data (including ethernet header * structure) from the WaveLAN to an mbuf chain that will be passed up * to the "if" (network interface) layer. NOTE: we currently * don't handle trailer protocols, so if that is needed, it will * (at least in part) be added here. For simplicities sake, this * routine copies the receive buffers from the board into a local (stack) * buffer until the frame has been copied from the board. Once in * the local buffer, the contents are copied to an mbuf chain that * is then enqueued onto the appropriate "if" queue. * * input : board number, and a frame descriptor address * output : the packet is put into an mbuf chain, and passed up * assumes : if any errors occur, packet is "dropped on the floor" * */ static int wlread(struct wl_softc *sc, u_short fd_p) { struct ifnet *ifp = sc->ifp; short base = sc->base; fd_t fd; struct ether_header *eh; struct mbuf *m; rbd_t rbd; u_char *mb_p; u_short mlen, len; u_short bytes_in_msg, bytes_in_mbuf, bytes; WL_LOCK_ASSERT(sc); #ifdef WLDEBUG if (sc->ifp->if_flags & IFF_DEBUG) printf("wl%d: entered wlread()\n", sc->unit); #endif if ((ifp->if_flags & (IFF_UP|IFF_RUNNING)) != (IFF_UP|IFF_RUNNING)) { printf("%s read(): board is not running.\n", ifp->if_xname); sc->hacr &= ~HACR_INTRON; CMD(sc); /* turn off interrupts */ } /* * Collect message size. */ outw(PIOR1(base), fd_p); insw(PIOP1(base), &fd, sizeof(fd_t)/2); if (fd.rbd_offset == I82586NULL) { if (wlhwrst(sc) != TRUE) { sc->hacr &= ~HACR_INTRON; CMD(sc); /* turn off interrupts */ printf("wl%d read(): hwrst trouble.\n", sc->unit); } return 0; } outw(PIOR1(base), fd.rbd_offset); insw(PIOP1(base), &rbd, sizeof(rbd_t)/2); bytes_in_msg = rbd.status & RBD_SW_COUNT; /* * Allocate a cluster'd mbuf to receive the packet. */ m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { if (wlhwrst(sc) != TRUE) { sc->hacr &= ~HACR_INTRON; CMD(sc); /* turn off interrupts */ printf("wl%d read(): hwrst trouble.\n", sc->unit); } return 0; } m->m_pkthdr.len = m->m_len = MCLBYTES; m_adj(m, ETHER_ALIGN); /* align IP header */ /* * Collect the message data. */ mlen = 0; mb_p = mtod(m, u_char *); bytes_in_mbuf = m->m_len; /* Put the ethernet header inside the mbuf. */ bcopy(&fd.destination[0], mb_p, 14); mb_p += 14; mlen += 14; bytes_in_mbuf -= 14; bytes = min(bytes_in_mbuf, bytes_in_msg); for (;;) { if (bytes & 1) { len = bytes + 1; } else { len = bytes; } outw(PIOR1(base), rbd.buffer_addr); insw(PIOP1(base), mb_p, len/2); mlen += bytes; if (bytes > bytes_in_mbuf) { /* XXX something wrong, a packet should fit in 1 cluster */ m_freem(m); printf("wl%d read(): packet too large (%u > %u)\n", sc->unit, bytes, bytes_in_mbuf); if (wlhwrst(sc) != TRUE) { sc->hacr &= ~HACR_INTRON; CMD(sc); /* turn off interrupts */ printf("wl%d read(): hwrst trouble.\n", sc->unit); } return 0; } mb_p += bytes; bytes_in_mbuf -= bytes; bytes_in_msg -= bytes; if (bytes_in_msg == 0) { if (rbd.status & RBD_SW_EOF || rbd.next_rbd_offset == I82586NULL) { break; } outw(PIOR1(base), rbd.next_rbd_offset); insw(PIOP1(base), &rbd, sizeof(rbd_t)/2); bytes_in_msg = rbd.status & RBD_SW_COUNT; } else { rbd.buffer_addr += bytes; } bytes = min(bytes_in_mbuf, bytes_in_msg); } m->m_pkthdr.len = m->m_len = mlen; m->m_pkthdr.rcvif = ifp; /* * If hw is in promiscuous mode (note that I said hardware, not if * IFF_PROMISC is set in ifnet flags), then if this is a unicast * packet and the MAC dst is not us, drop it. This check in normally * inside ether_input(), but IFF_MULTI causes hw promisc without * a bpf listener, so this is wrong. * Greg Troxel , 1998-08-07 */ /* * TBD: also discard packets where NWID does not match. * However, there does not appear to be a way to read the nwid * for a received packet. -gdt 1998-08-07 */ /* XXX verify mbuf length */ eh = mtod(m, struct ether_header *); if ( #ifdef WL_USE_IFNET_PROMISC_CHECK /* not defined */ (sc->ifp->if_flags & (IFF_PROMISC|IFF_ALLMULTI)) #else /* hw is in promisc mode if this is true */ (sc->mode & (MOD_PROM | MOD_ENAL)) #endif && (eh->ether_dhost[0] & 1) == 0 && /* !mcast and !bcast */ bcmp(eh->ether_dhost, &IFP2ENADDR(sc->ifp), sizeof(eh->ether_dhost)) != 0 ) { m_freem(m); return 1; } #ifdef WLDEBUG if (sc->ifp->if_flags & IFF_DEBUG) printf("wl%d: wlrecv %u bytes\n", sc->unit, mlen); #endif #ifdef WLCACHE wl_cache_store(sc, base, eh, m); #endif /* * received packet is now in a chain of mbuf's. next step is * to pass the packet upwards. */ WL_UNLOCK(sc); (*ifp->if_input)(ifp, m); WL_LOCK(sc); return 1; } /* * wlioctl: * * This routine processes an ioctl request from the "if" layer * above. * * input : pointer the appropriate "if" struct, command, and data * output : based on command appropriate action is taken on the * WaveLAN board(s) or related structures * return : error is returned containing exit conditions * */ static int wlioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ifreq *ifr = (struct ifreq *)data; struct wl_softc *sc = ifp->if_softc; short base = sc->base; short mode = 0; int opri, error = 0; struct thread *td = curthread; /* XXX */ int irq, irqval, i, isroot; caddr_t up; #ifdef WLCACHE int size; char * cpt; #endif WL_LOCK(sc); #ifdef WLDEBUG if (sc->ifp->if_flags & IFF_DEBUG) printf("%s: entered wlioctl()\n", ifp->if_xname); #endif opri = splimp(); switch (cmd) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_ALLMULTI) { mode |= MOD_ENAL; } if (ifp->if_flags & IFF_PROMISC) { mode |= MOD_PROM; } if (ifp->if_flags & IFF_LINK0) { mode |= MOD_PROM; } /* * force a complete reset if the recieve multicast/ * promiscuous mode changes so that these take * effect immediately. * */ if (sc->mode != mode) { sc->mode = mode; if (sc->flags & DSF_RUNNING) { sc->flags &= ~DSF_RUNNING; wlinit(sc); } } /* if interface is marked DOWN and still running then * stop it. */ if ((ifp->if_flags & IFF_UP) == 0 && sc->flags & DSF_RUNNING) { printf("%s ioctl(): board is not running\n", ifp->if_xname); sc->flags &= ~DSF_RUNNING; sc->hacr &= ~HACR_INTRON; CMD(sc); /* turn off interrupts */ } /* else if interface is UP and RUNNING, start it */ else if (ifp->if_flags & IFF_UP && (sc->flags & DSF_RUNNING) == 0) { wlinit(sc); } /* if WLDEBUG set on interface, then printf rf-modem regs */ if (ifp->if_flags & IFF_DEBUG) wlmmcstat(sc); break; #if MULTICAST case SIOCADDMULTI: case SIOCDELMULTI: wlinit(sc); break; #endif /* MULTICAST */ /* DEVICE SPECIFIC */ /* copy the PSA out to the caller */ case SIOCGWLPSA: /* pointer to buffer in user space */ up = (void *)ifr->ifr_data; /* work out if they're root */ isroot = (suser(td) == 0); for (i = 0; i < 0x40; i++) { /* don't hand the DES key out to non-root users */ if ((i > WLPSA_DESKEY) && (i < (WLPSA_DESKEY + 8)) && !isroot) continue; if (subyte((up + i), sc->psa[i])) { WL_UNLOCK(sc); return(EFAULT); } } break; /* copy the PSA in from the caller; we only copy _some_ values */ case SIOCSWLPSA: /* root only */ if ((error = suser(td))) break; error = EINVAL; /* assume the worst */ /* pointer to buffer in user space containing data */ up = (void *)ifr->ifr_data; /* check validity of input range */ for (i = 0; i < 0x40; i++) if (fubyte(up + i) < 0) { WL_UNLOCK(sc); return(EFAULT); } /* check IRQ value */ irqval = fubyte(up+WLPSA_IRQNO); for (irq = 15; irq >= 0; irq--) if (irqvals[irq] == irqval) break; if (irq == 0) /* oops */ break; /* new IRQ */ sc->psa[WLPSA_IRQNO] = irqval; /* local MAC */ for (i = 0; i < 6; i++) sc->psa[WLPSA_LOCALMAC+i] = fubyte(up+WLPSA_LOCALMAC+i); /* MAC select */ sc->psa[WLPSA_MACSEL] = fubyte(up+WLPSA_MACSEL); /* default nwid */ sc->psa[WLPSA_NWID] = fubyte(up+WLPSA_NWID); sc->psa[WLPSA_NWID+1] = fubyte(up+WLPSA_NWID+1); error = 0; wlsetpsa(sc); /* update the PSA */ break; /* get the current NWID out of the sc since we stored it there */ case SIOCGWLCNWID: ifr->ifr_data = (caddr_t) (sc->nwid[0] << 8 | sc->nwid[1]); break; /* * change the nwid dynamically. This * ONLY changes the radio modem and does not * change the PSA. * * 2 steps: * 1. save in softc "soft registers" * 2. save in radio modem (MMC) */ case SIOCSWLCNWID: /* root only */ if ((error = suser(td))) break; if (!(ifp->if_flags & IFF_UP)) { error = EIO; /* only allowed while up */ } else { /* * soft c nwid shadows radio modem setting */ sc->nwid[0] = (int)ifr->ifr_data >> 8; sc->nwid[1] = (int)ifr->ifr_data & 0xff; MMC_WRITE(MMC_NETW_ID_L,sc->nwid[1]); MMC_WRITE(MMC_NETW_ID_H,sc->nwid[0]); } break; /* copy the EEPROM in 2.4 Gz WaveMODEM out to the caller */ case SIOCGWLEEPROM: /* root only */ if ((error = suser(td))) break; /* pointer to buffer in user space */ up = (void *)ifr->ifr_data; for (i=0x00; i<0x80; ++i) { /* 2.4 Gz: size of EEPROM */ MMC_WRITE(MMC_EEADDR,i); /* 2.4 Gz: get frequency */ MMC_WRITE(MMC_EECTRL, /* 2.4 Gz: EEPROM read */ MMC_EECTRL_EEOP_READ); /* 2.4 Gz: */ DELAY(40); /* 2.4 Gz */ if (subyte(up + 2*i, /* 2.4 Gz: pass low byte of */ wlmmcread(base,MMC_EEDATALrv))) {/* 2.4 Gz: EEPROM word */ WL_UNLOCK(sc); return(EFAULT); /* 2.4 Gz: */ } if (subyte(up + 2*i+1, /* 2.4 Gz: pass hi byte of */ wlmmcread(base,MMC_EEDATALrv))) {/* 2.4 Gz: EEPROM word */ WL_UNLOCK(sc); return(EFAULT); /* 2.4 Gz: */ } } break; #ifdef WLCACHE /* zero (Delete) the wl cache */ case SIOCDWLCACHE: /* root only */ if ((error = suser(td))) break; wl_cache_zero(sc); break; /* read out the number of used cache elements */ case SIOCGWLCITEM: ifr->ifr_data = (caddr_t) sc->w_sigitems; break; /* read out the wl cache */ case SIOCGWLCACHE: /* pointer to buffer in user space */ up = (void *)ifr->ifr_data; cpt = (char *) &sc->w_sigcache[0]; size = sc->w_sigitems * sizeof(struct w_sigcache); for (i = 0; i < size; i++) { if (subyte((up + i), *cpt++)) { WL_UNLOCK(sc); return(EFAULT); } } break; #endif default: error = ether_ioctl(ifp, cmd, data); break; } splx(opri); WL_UNLOCK(sc); return (error); } /* * wlwatchdog(): * * Called if the timer set in wlstart expires before an interrupt is received * from the wavelan. It seems to lose interrupts sometimes. * The watchdog routine gets called if the transmitter failed to interrupt * * input : which board is timing out * output : board reset * */ static void wlwatchdog(void *vsc) { struct wl_softc *sc = vsc; int unit = sc->unit; log(LOG_ERR, "wl%d: wavelan device timeout on xmit\n", unit); WL_LOCK(sc); sc->ifp->if_oerrors++; wlinit(sc); WL_UNLOCK(sc); } /* * wlintr: * * This function is the interrupt handler for the WaveLAN * board. This routine will be called whenever either a packet * is received, or a packet has successfully been transfered and * the unit is ready to transmit another packet. * * input : board number that interrupted * output : either a packet is received, or a packet is transfered * */ static void wlintr(void *arg) { struct wl_softc *sc = (struct wl_softc *)arg; short base = sc->base; int ac_status; u_short int_type, int_type1; WL_LOCK(sc); #ifdef WLDEBUG if (sc->ifp->if_flags & IFF_DEBUG) printf("wl%d: wlintr() called\n", sc->unit); #endif if ((int_type = inw(HASR(base))) & HASR_MMC_INTR) { /* handle interrupt from the modem management controler */ /* This will clear the interrupt condition */ (void) wlmmcread(base,MMC_DCE_STATUS); /* ignored for now */ } if (!(int_type & HASR_INTR)){ /* return if no interrupt from 82586 */ /* commented out. jrb. it happens when reinit occurs printf("wlintr: int_type %x, dump follows\n", int_type); wldump(unit); */ WL_UNLOCK(sc); return; } if (gathersnr) getsnr(sc); for (;;) { outw(PIOR0(base), OFFSET_SCB + 0); /* get scb status */ int_type = (inw(PIOP0(base)) & SCB_SW_INT); if (int_type == 0) /* no interrupts left */ break; int_type1 = wlack(sc); /* acknowledge interrupt(s) */ /* make sure no bits disappeared (others may appear) */ if ((int_type & int_type1) != int_type) printf("wlack() int bits disappeared : %04x != int_type %04x\n", int_type1, int_type); int_type = int_type1; /* go with the new status */ /* * incoming packet */ if (int_type & SCB_SW_FR) { sc->ifp->if_ipackets++; wlrcv(sc); } /* * receiver not ready */ if (int_type & SCB_SW_RNR) { sc->ifp->if_ierrors++; #ifdef WLDEBUG if (sc->ifp->if_flags & IFF_DEBUG) printf("wl%d intr(): receiver overrun! begin_fd = %x\n", sc->unit, sc->begin_fd); #endif wlrustrt(sc); } /* * CU not ready */ if (int_type & SCB_SW_CNA) { /* * At present, we don't care about CNA's. We * believe they are a side effect of XMT. */ } if (int_type & SCB_SW_CX) { /* * At present, we only request Interrupt for * XMT. */ outw(PIOR1(base), OFFSET_CU); /* get command status */ ac_status = inw(PIOP1(base)); if (xmt_watch) { /* report some anomalies */ if (sc->tbusy == 0) { printf("wl%d: xmt intr but not busy, CU %04x\n", sc->unit, ac_status); } if (ac_status == 0) { printf("wl%d: xmt intr but ac_status == 0\n", sc->unit); } if (ac_status & AC_SW_A) { printf("wl%d: xmt aborted\n", sc->unit); } #ifdef notdef if (ac_status & TC_CARRIER) { printf("wl%d: no carrier\n", sc->unit); } #endif /* notdef */ if (ac_status & TC_CLS) { printf("wl%d: no CTS\n", sc->unit); } if (ac_status & TC_DMA) { printf("wl%d: DMA underrun\n", sc->unit); } if (ac_status & TC_DEFER) { printf("wl%d: xmt deferred\n", sc->unit); } if (ac_status & TC_SQE) { printf("wl%d: heart beat\n", sc->unit); } if (ac_status & TC_COLLISION) { printf("wl%d: too many collisions\n", sc->unit); } } /* if the transmit actually failed, or returned some status */ if ((!(ac_status & AC_SW_OK)) || (ac_status & 0xfff)) { if (ac_status & (TC_COLLISION | TC_CLS | TC_DMA)) { sc->ifp->if_oerrors++; } /* count collisions */ sc->ifp->if_collisions += (ac_status & 0xf); /* if TC_COLLISION set and collision count zero, 16 collisions */ if ((ac_status & 0x20) == 0x20) { sc->ifp->if_collisions += 0x10; } } sc->tbusy = 0; untimeout(wlwatchdog, sc, sc->watchdog_ch); sc->ifp->if_flags &= ~IFF_OACTIVE; wlstart(sc->ifp); } } WL_UNLOCK(sc); return; } /* * wlrcv: * * This routine is called by the interrupt handler to initiate a * packet transfer from the board to the "if" layer above this * driver. This routine checks if a buffer has been successfully * received by the WaveLAN. If so, the routine wlread is called * to do the actual transfer of the board data (including the * ethernet header) into a packet (consisting of an mbuf chain). * * input : number of the board to check * output : if a packet is available, it is "sent up" * */ static void wlrcv(struct wl_softc *sc) { short base = sc->base; u_short fd_p, status, offset, link_offset; #ifdef WLDEBUG if (sc->ifp->if_flags & IFF_DEBUG) printf("wl%d: entered wlrcv()\n", sc->unit); #endif for (fd_p = sc->begin_fd; fd_p != I82586NULL; fd_p = sc->begin_fd) { outw(PIOR0(base), fd_p + 0); /* address of status */ status = inw(PIOP0(base)); outw(PIOR1(base), fd_p + 4); /* address of link_offset */ link_offset = inw(PIOP1(base)); offset = inw(PIOP1(base)); /* rbd_offset */ if (status == 0xffff || offset == 0xffff /*I82586NULL*/) { if (wlhwrst(sc) != TRUE) printf("wl%d rcv(): hwrst ffff trouble.\n", sc->unit); return; } else if (status & AC_SW_C) { if (status == (RFD_DONE|RFD_RSC)) { /* lost one */ #ifdef WLDEBUG if (sc->ifp->if_flags & IFF_DEBUG) printf("wl%d RCV: RSC %x\n", sc->unit, status); #endif sc->ifp->if_ierrors++; } else if (!(status & RFD_OK)) { printf("wl%d RCV: !OK %x\n", sc->unit, status); sc->ifp->if_ierrors++; } else if (status & 0xfff) { /* can't happen */ printf("wl%d RCV: ERRs %x\n", sc->unit, status); sc->ifp->if_ierrors++; } else if (!wlread(sc, fd_p)) return; if (!wlrequeue(sc, fd_p)) { /* abort on chain error */ if (wlhwrst(sc) != TRUE) printf("wl%d rcv(): hwrst trouble.\n", sc->unit); return; } sc->begin_fd = link_offset; } else { break; } } return; } /* * wlrequeue: * * This routine puts rbd's used in the last receive back onto the * free list for the next receive. * */ static int wlrequeue(struct wl_softc *sc, u_short fd_p) { short base = sc->base; fd_t fd; u_short l_rbdp, f_rbdp, rbd_offset; outw(PIOR0(base), fd_p + 6); rbd_offset = inw(PIOP0(base)); if ((f_rbdp = rbd_offset) != I82586NULL) { l_rbdp = f_rbdp; for (;;) { outw(PIOR0(base), l_rbdp + 0); /* address of status */ if (inw(PIOP0(base)) & RBD_SW_EOF) break; outw(PIOP0(base), 0); outw(PIOR0(base), l_rbdp + 2); /* next_rbd_offset */ if ((l_rbdp = inw(PIOP0(base))) == I82586NULL) break; } outw(PIOP0(base), 0); outw(PIOR0(base), l_rbdp + 2); /* next_rbd_offset */ outw(PIOP0(base), I82586NULL); outw(PIOR0(base), l_rbdp + 8); /* address of size */ outw(PIOP0(base), inw(PIOP0(base)) | AC_CW_EL); outw(PIOR0(base), sc->end_rbd + 2); outw(PIOP0(base), f_rbdp); /* end_rbd->next_rbd_offset */ outw(PIOR0(base), sc->end_rbd + 8); /* size */ outw(PIOP0(base), inw(PIOP0(base)) & ~AC_CW_EL); sc->end_rbd = l_rbdp; } fd.status = 0; fd.command = AC_CW_EL; fd.link_offset = I82586NULL; fd.rbd_offset = I82586NULL; outw(PIOR1(base), fd_p); outsw(PIOP1(base), &fd, 8/2); outw(PIOR1(base), sc->end_fd + 2); /* addr of command */ outw(PIOP1(base), 0); /* command = 0 */ outw(PIOP1(base), fd_p); /* end_fd->link_offset = fd_p */ sc->end_fd = fd_p; return 1; } #ifdef WLDEBUG static int xmt_debug = 0; #endif /* WLDEBUG */ /* * wlxmt: * * This routine fills in the appropriate registers and memory * locations on the WaveLAN board and starts the board off on * the transmit. * * input : pointers to board of interest's softc and the mbuf * output : board memory and registers are set for xfer and attention * */ static void wlxmt(struct wl_softc *sc, struct mbuf *m) { u_short xmtdata_p = OFFSET_TBUF; u_short xmtshort_p; struct mbuf *tm_p = m; struct ether_header *eh_p = mtod(m, struct ether_header *); u_char *mb_p = mtod(m, u_char *) + sizeof(struct ether_header); u_short count = m->m_len - sizeof(struct ether_header); ac_t cb; u_short tbd_p = OFFSET_TBD; u_short len, clen = 0; short base = sc->base; int spin; #ifdef WLDEBUG if (sc->ifp->if_flags & IFF_DEBUG) printf("%s: entered wlxmt()\n", sc->ifp->if_xname); #endif cb.ac_status = 0; cb.ac_command = (AC_CW_EL|AC_TRANSMIT|AC_CW_I); cb.ac_link_offset = I82586NULL; outw(PIOR1(base), OFFSET_CU); outsw(PIOP1(base), &cb, 6/2); outw(PIOP1(base), OFFSET_TBD); /* cb.cmd.transmit.tbd_offset */ outsw(PIOP1(base), eh_p->ether_dhost, WAVELAN_ADDR_SIZE/2); outw(PIOP1(base), eh_p->ether_type); #ifdef WLDEBUG if (sc->ifp->if_flags & IFF_DEBUG) { if (xmt_debug) { printf("XMT mbuf: L%d @%p ", count, (void *)mb_p); printf("ether type %x\n", eh_p->ether_type); } } #endif /* WLDEBUG */ outw(PIOR0(base), OFFSET_TBD); outw(PIOP0(base), 0); /* act_count */ outw(PIOR1(base), OFFSET_TBD + 4); outw(PIOP1(base), xmtdata_p); /* buffer_addr */ outw(PIOP1(base), 0); /* buffer_base */ for (;;) { if (count) { if (clen + count > WAVELAN_MTU) break; if (count & 1) len = count + 1; else len = count; outw(PIOR1(base), xmtdata_p); outsw(PIOP1(base), mb_p, len/2); clen += count; outw(PIOR0(base), tbd_p); /* address of act_count */ outw(PIOP0(base), inw(PIOP0(base)) + count); xmtdata_p += len; if ((tm_p = tm_p->m_next) == (struct mbuf *)0) break; if (count & 1) { /* go to the next descriptor */ outw(PIOR0(base), tbd_p + 2); tbd_p += sizeof (tbd_t); outw(PIOP0(base), tbd_p); /* next_tbd_offset */ outw(PIOR0(base), tbd_p); outw(PIOP0(base), 0); /* act_count */ outw(PIOR1(base), tbd_p + 4); outw(PIOP1(base), xmtdata_p); /* buffer_addr */ outw(PIOP1(base), 0); /* buffer_base */ /* at the end -> coallesce remaining mbufs */ if (tbd_p == OFFSET_TBD + (N_TBD-1) * sizeof (tbd_t)) { wlsftwsleaze(&count, &mb_p, &tm_p, sc); continue; } /* next mbuf short -> coallesce as needed */ if ( (tm_p->m_next == (struct mbuf *) 0) || #define HDW_THRESHOLD 55 tm_p->m_len > HDW_THRESHOLD) /* ok */; else { wlhdwsleaze(&count, &mb_p, &tm_p, sc); continue; } } } else if ((tm_p = tm_p->m_next) == (struct mbuf *)0) break; count = tm_p->m_len; mb_p = mtod(tm_p, u_char *); #ifdef WLDEBUG if (sc->ifp->if_flags & IFF_DEBUG) if (xmt_debug) printf("mbuf+ L%d @%p ", count, (void *)mb_p); #endif /* WLDEBUG */ } #ifdef WLDEBUG if (sc->ifp->if_flags & IFF_DEBUG) if (xmt_debug) printf("CLEN = %d\n", clen); #endif /* WLDEBUG */ outw(PIOR0(base), tbd_p); if (clen < ETHERMIN) { outw(PIOP0(base), inw(PIOP0(base)) + ETHERMIN - clen); outw(PIOR1(base), xmtdata_p); for (xmtshort_p = xmtdata_p; clen < ETHERMIN; clen += 2) outw(PIOP1(base), 0); } outw(PIOP0(base), inw(PIOP0(base)) | TBD_SW_EOF); outw(PIOR0(base), tbd_p + 2); outw(PIOP0(base), I82586NULL); #ifdef WLDEBUG if (sc->ifp->if_flags & IFF_DEBUG) { if (xmt_debug) { wltbd(sc); printf("\n"); } } #endif /* WLDEBUG */ outw(PIOR0(base), OFFSET_SCB + 2); /* address of scb_command */ /* * wait for 586 to clear previous command, complain if it takes * too long */ for (spin = 1;;spin = (spin + 1) % 10000) { if (inw(PIOP0(base)) == 0) { /* it's done, we can go */ break; } if ((spin == 0) && xmt_watch) { /* not waking up, and we care */ printf("%s: slow accepting xmit\n", sc->ifp->if_xname); } } outw(PIOP0(base), SCB_CU_STRT); /* new command */ SET_CHAN_ATTN(sc); m_freem(m); /* XXX * Pause to avoid transmit overrun problems. * The required delay tends to vary with platform type, and may be * related to interrupt loss. */ if (wl_xmit_delay) { DELAY(wl_xmit_delay); } return; } /* * wlbldru: * * This function builds the linear linked lists of fd's and * rbd's. Based on page 4-32 of 1986 Intel microcom handbook. * */ static u_short wlbldru(struct wl_softc *sc) { short base = sc->base; fd_t fd; rbd_t rbd; u_short fd_p = OFFSET_RU; u_short rbd_p = OFFSET_RBD; int i; sc->begin_fd = fd_p; for (i = 0; i < N_FD; i++) { fd.status = 0; fd.command = 0; fd.link_offset = fd_p + sizeof(fd_t); fd.rbd_offset = I82586NULL; outw(PIOR1(base), fd_p); outsw(PIOP1(base), &fd, 8/2); fd_p = fd.link_offset; } fd_p -= sizeof(fd_t); sc->end_fd = fd_p; outw(PIOR1(base), fd_p + 2); outw(PIOP1(base), AC_CW_EL); /* command */ outw(PIOP1(base), I82586NULL); /* link_offset */ fd_p = OFFSET_RU; outw(PIOR0(base), fd_p + 6); /* address of rbd_offset */ outw(PIOP0(base), rbd_p); outw(PIOR1(base), rbd_p); for (i = 0; i < N_RBD; i++) { rbd.status = 0; rbd.buffer_addr = rbd_p + sizeof(rbd_t) + 2; rbd.buffer_base = 0; rbd.size = RCVBUFSIZE; if (i != N_RBD-1) { rbd_p += sizeof(ru_t); rbd.next_rbd_offset = rbd_p; } else { rbd.next_rbd_offset = I82586NULL; rbd.size |= AC_CW_EL; sc->end_rbd = rbd_p; } outsw(PIOP1(base), &rbd, sizeof(rbd_t)/2); outw(PIOR1(base), rbd_p); } return sc->begin_fd; } /* * wlrustrt: * * This routine starts the receive unit running. First checks if the * board is actually ready, then the board is instructed to receive * packets again. * */ static void wlrustrt(struct wl_softc *sc) { short base = sc->base; u_short rfa; #ifdef WLDEBUG if (sc->ifp->if_flags & IFF_DEBUG) printf("wl%d: entered wlrustrt()\n", sc->unit); #endif outw(PIOR0(base), OFFSET_SCB); if (inw(PIOP0(base)) & SCB_RUS_READY){ printf("wlrustrt: RUS_READY\n"); return; } outw(PIOR0(base), OFFSET_SCB + 2); outw(PIOP0(base), SCB_RU_STRT); /* command */ rfa = wlbldru(sc); outw(PIOR0(base), OFFSET_SCB + 6); /* address of scb_rfa_offset */ outw(PIOP0(base), rfa); SET_CHAN_ATTN(sc); return; } /* * wldiag: * * This routine does a 586 op-code number 7, and obtains the * diagnose status for the WaveLAN. * */ static int wldiag(struct wl_softc *sc) { short base = sc->base; short status; #ifdef WLDEBUG if (sc->ifp->if_flags & IFF_DEBUG) printf("wl%d: entered wldiag()\n", sc->unit); #endif outw(PIOR0(base), OFFSET_SCB); status = inw(PIOP0(base)); if (status & SCB_SW_INT) { /* state is 2000 which seems ok printf("wl%d diag(): unexpected initial state %\n", sc->unit, inw(PIOP0(base))); */ wlack(sc); } outw(PIOR1(base), OFFSET_CU); outw(PIOP1(base), 0); /* ac_status */ outw(PIOP1(base), AC_DIAGNOSE|AC_CW_EL);/* ac_command */ if (wlcmd(sc, "diag()") == 0) return 0; outw(PIOR0(base), OFFSET_CU); if (inw(PIOP0(base)) & 0x0800) { printf("wl%d: i82586 Self Test failed!\n", sc->unit); return 0; } return TRUE; } /* * wlconfig: * * This routine does a standard config of the WaveLAN board. * */ static int wlconfig(struct wl_softc *sc) { configure_t configure; short base = sc->base; #if MULTICAST struct ifmultiaddr *ifma; u_char *addrp; int cnt = 0; #endif /* MULTICAST */ #ifdef WLDEBUG if (sc->ifp->if_flags & IFF_DEBUG) printf("wl%d: entered wlconfig()\n", sc->unit); #endif outw(PIOR0(base), OFFSET_SCB); if (inw(PIOP0(base)) & SCB_SW_INT) { /* printf("wl%d config(): unexpected initial state %x\n", sc->unit, inw(PIOP0(base))); */ } wlack(sc); outw(PIOR1(base), OFFSET_CU); outw(PIOP1(base), 0); /* ac_status */ outw(PIOP1(base), AC_CONFIGURE|AC_CW_EL); /* ac_command */ /* jrb hack */ configure.fifolim_bytecnt = 0x080c; configure.addrlen_mode = 0x0600; configure.linprio_interframe = 0x2060; configure.slot_time = 0xf200; configure.hardware = 0x0008; /* tx even w/o CD */ configure.min_frame_len = 0x0040; #if 0 /* This is the configuration block suggested by Marc Meertens * in an e-mail message to John * Ioannidis on 10 Nov 92. */ configure.fifolim_bytecnt = 0x040c; configure.addrlen_mode = 0x0600; configure.linprio_interframe = 0x2060; configure.slot_time = 0xf000; configure.hardware = 0x0008; /* tx even w/o CD */ configure.min_frame_len = 0x0040; #else /* * below is the default board configuration from p2-28 from 586 book */ configure.fifolim_bytecnt = 0x080c; configure.addrlen_mode = 0x2600; configure.linprio_interframe = 0x7820; /* IFS=120, ACS=2 */ configure.slot_time = 0xf00c; /* slottime=12 */ configure.hardware = 0x0008; /* tx even w/o CD */ configure.min_frame_len = 0x0040; #endif if (sc->mode & (MOD_PROM | MOD_ENAL)) configure.hardware |= 1; outw(PIOR1(base), OFFSET_CU + 6); outsw(PIOP1(base), &configure, sizeof(configure_t)/2); if (wlcmd(sc, "config()-configure") == 0) return 0; #if MULTICAST outw(PIOR1(base), OFFSET_CU); outw(PIOP1(base), 0); /* ac_status */ outw(PIOP1(base), AC_MCSETUP|AC_CW_EL); /* ac_command */ outw(PIOR1(base), OFFSET_CU + 8); + IF_ADDR_LOCK(sc->ifp); TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; addrp = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); outw(PIOP1(base), addrp[0] + (addrp[1] << 8)); outw(PIOP1(base), addrp[2] + (addrp[3] << 8)); outw(PIOP1(base), addrp[4] + (addrp[5] << 8)); ++cnt; } + IF_ADDR_UNLOCK(sc->ifp); outw(PIOR1(base), OFFSET_CU + 6); /* mc-cnt */ outw(PIOP1(base), cnt * WAVELAN_ADDR_SIZE); if (wlcmd(sc, "config()-mcaddress") == 0) return 0; #endif /* MULTICAST */ outw(PIOR1(base), OFFSET_CU); outw(PIOP1(base), 0); /* ac_status */ outw(PIOP1(base), AC_IASETUP|AC_CW_EL); /* ac_command */ outw(PIOR1(base), OFFSET_CU + 6); outsw(PIOP1(base), IFP2ENADDR(sc->ifp), WAVELAN_ADDR_SIZE/2); if (wlcmd(sc, "config()-address") == 0) return(0); wlinitmmc(sc); return(1); } /* * wlcmd: * * Set channel attention bit and busy wait until command has * completed. Then acknowledge the command completion. */ static int wlcmd(struct wl_softc *sc, char *str) { short base = sc->base; int i; outw(PIOR0(base), OFFSET_SCB + 2); /* address of scb_command */ outw(PIOP0(base), SCB_CU_STRT); SET_CHAN_ATTN(sc); outw(PIOR0(base), OFFSET_CU); for (i = 0; i < 0xffff; i++) if (inw(PIOP0(base)) & AC_SW_C) break; if (i == 0xffff || !(inw(PIOP0(base)) & AC_SW_OK)) { printf("wl%d: %s failed; status = %d, inw = %x, outw = %x\n", sc->unit, str, inw(PIOP0(base)) & AC_SW_OK, inw(PIOP0(base)), inw(PIOR0(base))); outw(PIOR0(base), OFFSET_SCB); printf("scb_status %x\n", inw(PIOP0(base))); outw(PIOR0(base), OFFSET_SCB+2); printf("scb_command %x\n", inw(PIOP0(base))); outw(PIOR0(base), OFFSET_SCB+4); printf("scb_cbl %x\n", inw(PIOP0(base))); outw(PIOR0(base), OFFSET_CU+2); printf("cu_cmd %x\n", inw(PIOP0(base))); return(0); } outw(PIOR0(base), OFFSET_SCB); if ((inw(PIOP0(base)) & SCB_SW_INT) && (inw(PIOP0(base)) != SCB_SW_CNA)) { /* printf("wl%d %s: unexpected final state %x\n", sc->unit, str, inw(PIOP0(base))); */ } wlack(sc); return(TRUE); } /* * wlack: if the 82596 wants attention because it has finished * sending or receiving a packet, acknowledge its desire and * return bits indicating the kind of attention. wlack() returns * these bits so that the caller can service exactly the * conditions that wlack() acknowledged. */ static int wlack(struct wl_softc *sc) { int i; u_short cmd; short base = sc->base; outw(PIOR1(base), OFFSET_SCB); if (!(cmd = (inw(PIOP1(base)) & SCB_SW_INT))) return(0); #ifdef WLDEBUG if (sc->ifp->if_flags & IFF_DEBUG) printf("wl%d: doing a wlack()\n", sc->unit); #endif outw(PIOP1(base), cmd); SET_CHAN_ATTN(sc); outw(PIOR0(base), OFFSET_SCB + 2); /* address of scb_command */ for (i = 1000000; inw(PIOP0(base)) && (i-- > 0); ) continue; if (i < 1) printf("wl%d wlack(): board not accepting command.\n", sc->unit); return(cmd); } #ifdef WLDEBUG static void wltbd(struct wl_softc *sc) { short base = sc->base; u_short tbd_p = OFFSET_TBD; tbd_t tbd; int i = 0; int sum = 0; for (;;) { outw(PIOR1(base), tbd_p); insw(PIOP1(base), &tbd, sizeof(tbd_t)/2); sum += (tbd.act_count & ~TBD_SW_EOF); printf("%d: addr %x, count %d (%d), next %x, base %x\n", i++, tbd.buffer_addr, (tbd.act_count & ~TBD_SW_EOF), sum, tbd.next_tbd_offset, tbd.buffer_base); if (tbd.act_count & TBD_SW_EOF) break; tbd_p = tbd.next_tbd_offset; } } #endif static void wlhdwsleaze(u_short *countp, u_char **mb_pp, struct mbuf **tm_pp, struct wl_softc *sc) { struct mbuf *tm_p = *tm_pp; u_char *mb_p = *mb_pp; u_short count = 0; u_char *cp; int len; /* * can we get a run that will be coallesced or * that terminates before breaking */ do { count += tm_p->m_len; if (tm_p->m_len & 1) break; } while ((tm_p = tm_p->m_next) != (struct mbuf *)0); if ( (tm_p == (struct mbuf *)0) || count > HDW_THRESHOLD) { *countp = (*tm_pp)->m_len; *mb_pp = mtod((*tm_pp), u_char *); return; } /* we need to copy */ tm_p = *tm_pp; mb_p = *mb_pp; count = 0; cp = (u_char *) t_packet; for (;;) { bcopy(mtod(tm_p, u_char *), cp, len = tm_p->m_len); count += len; if (count > HDW_THRESHOLD) break; cp += len; if (tm_p->m_next == (struct mbuf *)0) break; tm_p = tm_p->m_next; } *countp = count; *mb_pp = (u_char *) t_packet; *tm_pp = tm_p; return; } static void wlsftwsleaze(u_short *countp, u_char **mb_pp, struct mbuf **tm_pp, struct wl_softc *sc) { struct mbuf *tm_p = *tm_pp; u_short count = 0; u_char *cp = (u_char *) t_packet; int len; /* we need to copy */ for (;;) { bcopy(mtod(tm_p, u_char *), cp, len = tm_p->m_len); count += len; cp += len; if (tm_p->m_next == (struct mbuf *)0) break; tm_p = tm_p->m_next; } *countp = count; *mb_pp = (u_char *) t_packet; *tm_pp = tm_p; return; } static void wlmmcstat(struct wl_softc *sc) { short base = sc->base; u_short tmp; printf("wl%d: DCE_STATUS: 0x%x, ", sc->unit, wlmmcread(base,MMC_DCE_STATUS) & 0x0f); tmp = wlmmcread(base,MMC_CORRECT_NWID_H) << 8; tmp |= wlmmcread(base,MMC_CORRECT_NWID_L); printf("Correct NWID's: %d, ", tmp); tmp = wlmmcread(base,MMC_WRONG_NWID_H) << 8; tmp |= wlmmcread(base,MMC_WRONG_NWID_L); printf("Wrong NWID's: %d\n", tmp); printf("THR_PRE_SET: 0x%x, ", wlmmcread(base,MMC_THR_PRE_SET)); printf("SIGNAL_LVL: %d, SILENCE_LVL: %d\n", wlmmcread(base,MMC_SIGNAL_LVL), wlmmcread(base,MMC_SILENCE_LVL)); printf("SIGN_QUAL: 0x%x, NETW_ID: %x:%x, DES: %d\n", wlmmcread(base,MMC_SIGN_QUAL), wlmmcread(base,MMC_NETW_ID_H), wlmmcread(base,MMC_NETW_ID_L), wlmmcread(base,MMC_DES_AVAIL)); } static u_short wlmmcread(u_int base, u_short reg) { while (inw(HASR(base)) & HASR_MMC_BUSY) continue; outw(MMCR(base),reg << 1); while (inw(HASR(base)) & HASR_MMC_BUSY) continue; return (u_short)inw(MMCR(base)) >> 8; } static void getsnr(struct wl_softc *sc) { MMC_WRITE(MMC_FREEZE,1); /* * SNR retrieval procedure : * * read signal level : wlmmcread(base, MMC_SIGNAL_LVL); * read silence level : wlmmcread(base, MMC_SILENCE_LVL); */ MMC_WRITE(MMC_FREEZE,0); /* * SNR is signal:silence ratio. */ } /* ** wlgetpsa ** ** Reads the psa for the wavelan at (base) into (buf) */ static void wlgetpsa(int base, u_char *buf) { int i; PCMD(base, HACR_DEFAULT & ~HACR_16BITS); PCMD(base, HACR_DEFAULT & ~HACR_16BITS); for (i = 0; i < 0x40; i++) { outw(PIOR2(base), i); buf[i] = inb(PIOP2(base)); } PCMD(base, HACR_DEFAULT); PCMD(base, HACR_DEFAULT); } /* ** wlsetpsa ** ** Writes the psa for wavelan (unit) from the softc back to the ** board. Updates the CRC and sets the CRC OK flag. ** ** Do not call this when the board is operating, as it doesn't ** preserve the hacr. */ static void wlsetpsa(struct wl_softc *sc) { short base = sc->base; int i, oldpri; u_short crc; crc = wlpsacrc(sc->psa); /* calculate CRC of PSA */ sc->psa[WLPSA_CRCLOW] = crc & 0xff; sc->psa[WLPSA_CRCHIGH] = (crc >> 8) & 0xff; sc->psa[WLPSA_CRCOK] = 0x55; /* default to 'bad' until programming complete */ oldpri = splimp(); /* ick, long pause */ PCMD(base, HACR_DEFAULT & ~HACR_16BITS); PCMD(base, HACR_DEFAULT & ~HACR_16BITS); for (i = 0; i < 0x40; i++) { DELAY(DELAYCONST); outw(PIOR2(base),i); /* write param memory */ DELAY(DELAYCONST); outb(PIOP2(base), sc->psa[i]); } DELAY(DELAYCONST); outw(PIOR2(base),WLPSA_CRCOK); /* update CRC flag*/ DELAY(DELAYCONST); sc->psa[WLPSA_CRCOK] = 0xaa; /* OK now */ outb(PIOP2(base), 0xaa); /* all OK */ DELAY(DELAYCONST); PCMD(base, HACR_DEFAULT); PCMD(base, HACR_DEFAULT); splx(oldpri); } /* ** CRC routine provided by Christopher Giordano , ** from original code by Tomi Mikkonen (tomitm@remedy.fi) */ static u_int crc16_table[16] = { 0x0000, 0xCC01, 0xD801, 0x1400, 0xF001, 0x3C00, 0x2800, 0xE401, 0xA001, 0x6C00, 0x7800, 0xB401, 0x5000, 0x9C01, 0x8801, 0x4400 }; static u_short wlpsacrc(u_char *buf) { u_short crc = 0; int i, r1; for (i = 0; i < 0x3d; i++, buf++) { /* lower 4 bits */ r1 = crc16_table[crc & 0xF]; crc = (crc >> 4) & 0x0FFF; crc = crc ^ r1 ^ crc16_table[*buf & 0xF]; /* upper 4 bits */ r1 = crc16_table[crc & 0xF]; crc = (crc >> 4) & 0x0FFF; crc = crc ^ r1 ^ crc16_table[(*buf >> 4) & 0xF]; } return(crc); } #ifdef WLCACHE /* * wl_cache_store * * take input packet and cache various radio hw characteristics * indexed by MAC address. * * Some things to think about: * note that no space is malloced. * We might hash the mac address if the cache were bigger. * It is not clear that the cache is big enough. * It is also not clear how big it should be. * The cache is IP-specific. We don't care about that as * we want it to be IP-specific. * The last N recv. packets are saved. This will tend * to reward agents and mobile hosts that beacon. * That is probably fine for mobile ip. */ /* globals for wavelan signal strength cache */ /* this should go into softc structure above. */ /* set true if you want to limit cache items to broadcast/mcast * only packets (not unicast) */ static int wl_cache_mcastonly = 1; SYSCTL_INT(_machdep, OID_AUTO, wl_cache_mcastonly, CTLFLAG_RW, &wl_cache_mcastonly, 0, ""); /* set true if you want to limit cache items to IP packets only */ static int wl_cache_iponly = 1; SYSCTL_INT(_machdep, OID_AUTO, wl_cache_iponly, CTLFLAG_RW, &wl_cache_iponly, 0, ""); /* zero out the cache */ static void wl_cache_zero(struct wl_softc *sc) { bzero(&sc->w_sigcache[0], sizeof(struct w_sigcache) * MAXCACHEITEMS); sc->w_sigitems = 0; sc->w_nextcache = 0; sc->w_wrapindex = 0; } /* store hw signal info in cache. * index is MAC address, but an ip src gets stored too * There are two filters here controllable via sysctl: * throw out unicast (on by default, but can be turned off) * throw out non-ip (on by default, but can be turned off) */ static void wl_cache_store (struct wl_softc *sc, int base, struct ether_header *eh, struct mbuf *m) { #ifdef INET struct ip *ip = NULL; /* Avoid GCC warning */ int i; int signal, silence; int w_insertcache; /* computed index for cache entry storage */ int ipflag = wl_cache_iponly; #endif /* filters: * 1. ip only * 2. configurable filter to throw out unicast packets, * keep multicast only. */ #ifdef INET /* reject if not IP packet */ if ( wl_cache_iponly && (ntohs(eh->ether_type) != 0x800)) { return; } /* check if broadcast or multicast packet. we toss * unicast packets */ if (wl_cache_mcastonly && ((eh->ether_dhost[0] & 1) == 0)) { return; } /* find the ip header. we want to store the ip_src * address. use the mtod macro(in mbuf.h) * to typecast m to struct ip * */ if (ipflag) { ip = mtod(m, struct ip *); } /* do a linear search for a matching MAC address * in the cache table * . MAC address is 6 bytes, * . var w_nextcache holds total number of entries already cached */ for (i = 0; i < sc->w_nextcache; i++) { if (! bcmp(eh->ether_shost, sc->w_sigcache[i].macsrc, 6 )) { /* Match!, * so we already have this entry, * update the data, and LRU age */ break; } } /* did we find a matching mac address? * if yes, then overwrite a previously existing cache entry */ if (i < sc->w_nextcache ) { w_insertcache = i; } /* else, have a new address entry,so * add this new entry, * if table full, then we need to replace entry */ else { /* check for space in cache table * note: w_nextcache also holds number of entries * added in the cache table */ if ( sc->w_nextcache < MAXCACHEITEMS ) { w_insertcache = sc->w_nextcache; sc->w_nextcache++; sc->w_sigitems = sc->w_nextcache; } /* no space found, so simply wrap with wrap index * and "zap" the next entry */ else { if (sc->w_wrapindex == MAXCACHEITEMS) { sc->w_wrapindex = 0; } w_insertcache = sc->w_wrapindex++; } } /* invariant: w_insertcache now points at some slot * in cache. */ if (w_insertcache < 0 || w_insertcache >= MAXCACHEITEMS) { log(LOG_ERR, "wl_cache_store, bad index: %d of [0..%d], gross cache error\n", w_insertcache, MAXCACHEITEMS); return; } /* store items in cache * .ipsrc * .macsrc * .signal (0..63) ,silence (0..63) ,quality (0..15) */ if (ipflag) { sc->w_sigcache[w_insertcache].ipsrc = ip->ip_src.s_addr; } bcopy( eh->ether_shost, sc->w_sigcache[w_insertcache].macsrc, 6); signal = sc->w_sigcache[w_insertcache].signal = wlmmcread(base, MMC_SIGNAL_LVL) & 0x3f; silence = sc->w_sigcache[w_insertcache].silence = wlmmcread(base, MMC_SILENCE_LVL) & 0x3f; sc->w_sigcache[w_insertcache].quality = wlmmcread(base, MMC_SIGN_QUAL) & 0x0f; if (signal > 0) sc->w_sigcache[w_insertcache].snr = signal - silence; else sc->w_sigcache[w_insertcache].snr = 0; #endif /* INET */ } #endif /* WLCACHE */ Index: stable/6/sys/dev/xe/if_xe.c =================================================================== --- stable/6/sys/dev/xe/if_xe.c (revision 149421) +++ stable/6/sys/dev/xe/if_xe.c (revision 149422) @@ -1,1968 +1,1970 @@ /*- * Copyright (c) 1998, 1999, 2003 Scott Mitchell * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Portions of this software were derived from Werner Koch's xirc2ps driver * for Linux under the terms of the following license (from v1.30 of the * xirc2ps driver): * * Copyright (c) 1997 by Werner Koch (dd9jn) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, and the entire permission notice in its entirety, * including the disclaimer of warranties. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * FreeBSD device driver for Xircom CreditCard PCMCIA Ethernet adapters. The * following cards are currently known to work with the driver: * Xircom CreditCard 10/100 (CE3) * Xircom CreditCard Ethernet + Modem 28 (CEM28) * Xircom CreditCard Ethernet 10/100 + Modem 56 (CEM56) * Xircom RealPort Ethernet 10 * Xircom RealPort Ethernet 10/100 * Xircom RealPort Ethernet 10/100 + Modem 56 (REM56, REM56G) * Intel EtherExpress Pro/100 PC Card Mobile Adapter 16 (Pro/100 M16A) * Compaq Netelligent 10/100 PC Card (CPQ-10/100) * * Some other cards *should* work, but support for them is either broken or in * an unknown state at the moment. I'm always interested in hearing from * people who own any of these cards: * Xircom CreditCard 10Base-T (PS-CE2-10) * Xircom CreditCard Ethernet + ModemII (CEM2) * Xircom CEM28 and CEM33 Ethernet/Modem cards (may be variants of CEM2?) * * Thanks to all who assisted with the development and testing of the driver, * especially: Werner Koch, Duke Kamstra, Duncan Barclay, Jason George, Dru * Nelson, Mike Kephart, Bill Rainey and Douglas Rand. Apologies if I've left * out anyone who deserves a mention here. * * Special thanks to Ade Lovett for both hosting the mailing list and doing * the CEM56/REM56 support code; and the FreeBSD UK Users' Group for hosting * the web pages. * * Author email: * Driver web page: http://ukug.uk.freebsd.org/~scott/xe_drv/ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * MII command structure */ struct xe_mii_frame { u_int8_t mii_stdelim; u_int8_t mii_opcode; u_int8_t mii_phyaddr; u_int8_t mii_regaddr; u_int8_t mii_turnaround; u_int16_t mii_data; }; /* * Media autonegotiation progress constants */ #define XE_AUTONEG_NONE 0 /* No autonegotiation in progress */ #define XE_AUTONEG_WAITING 1 /* Waiting for transmitter to go idle */ #define XE_AUTONEG_STARTED 2 /* Waiting for autonegotiation to complete */ #define XE_AUTONEG_100TX 3 /* Trying to force 100baseTX link */ #define XE_AUTONEG_FAIL 4 /* Autonegotiation failed */ /* * Prototypes start here */ static void xe_init (void *xscp); static void xe_start (struct ifnet *ifp); static int xe_ioctl (struct ifnet *ifp, u_long command, caddr_t data); static void xe_watchdog (struct ifnet *ifp); static int xe_media_change (struct ifnet *ifp); static void xe_media_status (struct ifnet *ifp, struct ifmediareq *mrp); static timeout_t xe_setmedia; static void xe_reset (struct xe_softc *scp); static void xe_stop (struct xe_softc *scp); static void xe_enable_intr (struct xe_softc *scp); static void xe_disable_intr (struct xe_softc *scp); static void xe_set_multicast (struct xe_softc *scp); static void xe_set_addr (struct xe_softc *scp, u_int8_t* addr, unsigned idx); static void xe_mchash (struct xe_softc *scp, const uint8_t *addr); static int xe_pio_write_packet (struct xe_softc *scp, struct mbuf *mbp); /* * MII functions */ static void xe_mii_sync (struct xe_softc *scp); static int xe_mii_init (struct xe_softc *scp); static void xe_mii_send (struct xe_softc *scp, u_int32_t bits, int cnt); static int xe_mii_readreg (struct xe_softc *scp, struct xe_mii_frame *frame); static int xe_mii_writereg (struct xe_softc *scp, struct xe_mii_frame *frame); static u_int16_t xe_phy_readreg (struct xe_softc *scp, u_int16_t reg); static void xe_phy_writereg (struct xe_softc *scp, u_int16_t reg, u_int16_t data); /* * Debugging functions */ static void xe_mii_dump (struct xe_softc *scp); #if 0 static void xe_reg_dump (struct xe_softc *scp); #endif /* * Debug logging levels - set with hw.xe.debug sysctl * 0 = None * 1 = More hardware details, probe/attach progress * 2 = Most function calls, ioctls and media selection progress * 3 = Everything - interrupts, packets in/out and multicast address setup */ #define XE_DEBUG #ifdef XE_DEBUG /* sysctl vars */ SYSCTL_NODE(_hw, OID_AUTO, xe, CTLFLAG_RD, 0, "if_xe parameters"); int xe_debug = 0; SYSCTL_INT(_hw_xe, OID_AUTO, debug, CTLFLAG_RW, &xe_debug, 0, "if_xe debug level"); #define DEVPRINTF(level, arg) if (xe_debug >= (level)) device_printf arg #define DPRINTF(level, arg) if (xe_debug >= (level)) printf arg #define XE_MII_DUMP(scp) if (xe_debug >= 3) xe_mii_dump(scp) #if 0 #define XE_REG_DUMP(scp) if (xe_debug >= 3) xe_reg_dump(scp) #endif #else #define DEVPRINTF(level, arg) #define DPRINTF(level, arg) #define XE_MII_DUMP(scp) #if 0 #define XE_REG_DUMP(scp) #endif #endif /* * Attach a device. */ int xe_attach (device_t dev) { struct xe_softc *scp = device_get_softc(dev); DEVPRINTF(2, (dev, "attach\n")); /* Initialise stuff... */ scp->dev = dev; scp->ifp = if_alloc(IFT_ETHER); if (scp->ifp == NULL) return ENOSPC; scp->ifm = &scp->ifmedia; scp->autoneg_status = XE_AUTONEG_NONE; /* Initialise the ifnet structure */ scp->ifp->if_softc = scp; if_initname(scp->ifp, device_get_name(dev), device_get_unit(dev)); scp->ifp->if_timer = 0; scp->ifp->if_flags = (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT); scp->ifp->if_linkmib = &scp->mibdata; scp->ifp->if_linkmiblen = sizeof scp->mibdata; scp->ifp->if_start = xe_start; scp->ifp->if_ioctl = xe_ioctl; scp->ifp->if_watchdog = xe_watchdog; scp->ifp->if_init = xe_init; scp->ifp->if_baudrate = 100000000; scp->ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; /* Initialise the ifmedia structure */ ifmedia_init(scp->ifm, 0, xe_media_change, xe_media_status); callout_handle_init(&scp->chand); /* Add supported media types */ if (scp->mohawk) { ifmedia_add(scp->ifm, IFM_ETHER|IFM_100_TX, 0, NULL); ifmedia_add(scp->ifm, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); ifmedia_add(scp->ifm, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); } ifmedia_add(scp->ifm, IFM_ETHER|IFM_10_T, 0, NULL); if (scp->ce2) ifmedia_add(scp->ifm, IFM_ETHER|IFM_10_2, 0, NULL); ifmedia_add(scp->ifm, IFM_ETHER|IFM_AUTO, 0, NULL); /* Default is to autoselect best supported media type */ ifmedia_set(scp->ifm, IFM_ETHER|IFM_AUTO); /* Get the hardware into a known state */ xe_reset(scp); /* Get hardware version numbers */ XE_SELECT_PAGE(4); scp->version = XE_INB(XE_BOV); if (scp->mohawk) scp->srev = (XE_INB(XE_BOV) & 0x70) >> 4; else scp->srev = (XE_INB(XE_BOV) & 0x30) >> 4; /* Print some useful information */ device_printf(dev, "%s %s, version 0x%02x/0x%02x%s%s\n", scp->vendor, scp->card_type, scp->version, scp->srev, scp->mohawk ? ", 100Mbps capable" : "", scp->modem ? ", with modem" : ""); if (scp->mohawk) { XE_SELECT_PAGE(0x10); DEVPRINTF(1, (dev, "DingoID=0x%04x, RevisionID=0x%04x, VendorID=0x%04x\n", XE_INW(XE_DINGOID), XE_INW(XE_RevID), XE_INW(XE_VendorID))); } if (scp->ce2) { XE_SELECT_PAGE(0x45); DEVPRINTF(1, (dev, "CE2 version = 0x%#02x\n", XE_INB(XE_REV))); } /* Attach the interface */ ether_ifattach(scp->ifp, scp->enaddr); /* Done */ return 0; } /* * Complete hardware intitialisation and enable output. Exits without doing * anything if there's no address assigned to the card, or if media selection * is in progress (the latter implies we've already run this function). */ static void xe_init(void *xscp) { struct xe_softc *scp = xscp; unsigned i; int s; if (scp->autoneg_status != XE_AUTONEG_NONE) return; DEVPRINTF(2, (scp->dev, "init\n")); s = splimp(); /* Reset transmitter flags */ scp->tx_queued = 0; scp->tx_tpr = 0; scp->tx_timeouts = 0; scp->tx_thres = 64; scp->tx_min = ETHER_MIN_LEN - ETHER_CRC_LEN; scp->ifp->if_timer = 0; /* Soft reset the card */ XE_SELECT_PAGE(0); XE_OUTB(XE_CR, XE_CR_SOFT_RESET); DELAY(40000); XE_OUTB(XE_CR, 0); DELAY(40000); if (scp->mohawk) { /* * set GP1 and GP2 as outputs (bits 2 & 3) * set GP1 low to power on the ML6692 (bit 0) * set GP2 high to power on the 10Mhz chip (bit 1) */ XE_SELECT_PAGE(4); XE_OUTB(XE_GPR0, XE_GPR0_GP2_SELECT|XE_GPR0_GP1_SELECT|XE_GPR0_GP2_OUT); } /* Shut off interrupts */ xe_disable_intr(scp); /* Wait for everything to wake up */ DELAY(500000); /* Check for PHY */ if (scp->mohawk) scp->phy_ok = xe_mii_init(scp); /* Disable 'source insertion' (not sure what that means) */ XE_SELECT_PAGE(0x42); XE_OUTB(XE_SWC0, XE_SWC0_NO_SRC_INSERT); /* Set 8K/24K Tx/Rx buffer split */ if (scp->srev != 1) { XE_SELECT_PAGE(2); XE_OUTW(XE_RBS, 0x2000); } /* Enable early transmit mode on Mohawk/Dingo */ if (scp->mohawk) { XE_SELECT_PAGE(0x03); XE_OUTW(XE_TPT, scp->tx_thres); XE_SELECT_PAGE(0x01); XE_OUTB(XE_ECR, XE_INB(XE_ECR) | XE_ECR_EARLY_TX); } /* Put MAC address in first 'individual address' register */ XE_SELECT_PAGE(0x50); for (i = 0; i < 6; i++) XE_OUTB(0x08 + i, IFP2ENADDR(scp->ifp)[scp->mohawk ? 5 - i : i]); /* Set up multicast addresses */ xe_set_multicast(scp); /* Fix the receive data offset -- reset can leave it off-by-one */ XE_SELECT_PAGE(0); XE_OUTW(XE_DO, 0x2000); /* Set interrupt masks */ XE_SELECT_PAGE(1); XE_OUTB(XE_IMR0, XE_IMR0_TX_PACKET | XE_IMR0_MAC_INTR | XE_IMR0_RX_PACKET); /* Set MAC interrupt masks */ XE_SELECT_PAGE(0x40); XE_OUTB(XE_RX0Msk, ~(XE_RX0M_RX_OVERRUN | XE_RX0M_CRC_ERROR | XE_RX0M_ALIGN_ERROR | XE_RX0M_LONG_PACKET)); XE_OUTB(XE_TX0Msk, ~(XE_TX0M_SQE_FAIL | XE_TX0M_LATE_COLLISION | XE_TX0M_TX_UNDERRUN | XE_TX0M_16_COLLISIONS | XE_TX0M_NO_CARRIER)); /* Clear MAC status registers */ XE_SELECT_PAGE(0x40); XE_OUTB(XE_RST0, 0x00); XE_OUTB(XE_TXST0, 0x00); /* Enable receiver and put MAC online */ XE_SELECT_PAGE(0x40); XE_OUTB(XE_CMD0, XE_CMD0_RX_ENABLE|XE_CMD0_ONLINE); /* Set up IMR, enable interrupts */ xe_enable_intr(scp); /* Start media selection */ xe_setmedia(scp); /* Enable output */ scp->ifp->if_flags |= IFF_RUNNING; scp->ifp->if_flags &= ~IFF_OACTIVE; (void)splx(s); } /* * Start output on interface. Should be called at splimp() priority. Check * that the output is idle (ie, IFF_OACTIVE is not set) before calling this * function. If media selection is in progress we set IFF_OACTIVE ourselves * and return immediately. */ static void xe_start(struct ifnet *ifp) { struct xe_softc *scp = ifp->if_softc; struct mbuf *mbp; if (scp->autoneg_status != XE_AUTONEG_NONE) { ifp->if_flags |= IFF_OACTIVE; return; } DEVPRINTF(3, (scp->dev, "start\n")); /* * Loop while there are packets to be sent, and space to send them. */ while (1) { /* Suck a packet off the send queue */ IF_DEQUEUE(&ifp->if_snd, mbp); if (mbp == NULL) { /* * We are using the !OACTIVE flag to indicate to the outside world that * we can accept an additional packet rather than that the transmitter * is _actually_ active. Indeed, the transmitter may be active, but if * we haven't filled all the buffers with data then we still want to * accept more. */ ifp->if_flags &= ~IFF_OACTIVE; return; } if (xe_pio_write_packet(scp, mbp) != 0) { /* Push the packet back onto the queue */ IF_PREPEND(&ifp->if_snd, mbp); ifp->if_flags |= IFF_OACTIVE; return; } /* Tap off here if there is a bpf listener */ BPF_MTAP(ifp, mbp); /* In case we don't hear from the card again... */ ifp->if_timer = 5; scp->tx_queued++; m_freem(mbp); } } /* * Process an ioctl request. Adapted from the ed driver. */ static int xe_ioctl (register struct ifnet *ifp, u_long command, caddr_t data) { struct xe_softc *scp; int s, error; scp = ifp->if_softc; error = 0; s = splimp(); switch (command) { case SIOCSIFFLAGS: DEVPRINTF(2, (scp->dev, "ioctl: SIOCSIFFLAGS: 0x%04x\n", ifp->if_flags)); /* * If the interface is marked up and stopped, then start it. If it is * marked down and running, then stop it. */ if (ifp->if_flags & IFF_UP) { if (!(ifp->if_flags & IFF_RUNNING)) { xe_reset(scp); xe_init(scp); } } else { if (ifp->if_flags & IFF_RUNNING) xe_stop(scp); } /* FALL THROUGH (handle changes to PROMISC/ALLMULTI flags) */ case SIOCADDMULTI: case SIOCDELMULTI: DEVPRINTF(2, (scp->dev, "ioctl: SIOC{ADD,DEL}MULTI\n")); /* * Multicast list has (maybe) changed; set the hardware filters * accordingly. */ xe_set_multicast(scp); error = 0; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: DEVPRINTF(3, (scp->dev, "ioctl: bounce to ifmedia_ioctl\n")); /* * Someone wants to get/set media options. */ error = ifmedia_ioctl(ifp, (struct ifreq *)data, &scp->ifmedia, command); break; default: DEVPRINTF(3, (scp->dev, "ioctl: bounce to ether_ioctl\n")); error = ether_ioctl(ifp, command, data); } (void)splx(s); return error; } /* * Card interrupt handler. * * This function is probably more complicated than it needs to be, as it * attempts to deal with the case where multiple packets get sent between * interrupts. This is especially annoying when working out the collision * stats. Not sure whether this case ever really happens or not (maybe on a * slow/heavily loaded machine?) so it's probably best to leave this like it * is. * * Note that the crappy PIO used to get packets on and off the card means that * you will spend a lot of time in this routine -- I can get my P150 to spend * 90% of its time servicing interrupts if I really hammer the network. Could * fix this, but then you'd start dropping/losing packets. The moral of this * story? If you want good network performance _and_ some cycles left over to * get your work done, don't buy a Xircom card. Or convince them to tell me * how to do memory-mapped I/O :) */ static void xe_intr(void *xscp) { struct xe_softc *scp = (struct xe_softc *) xscp; struct ifnet *ifp; u_int8_t psr, isr, esr, rsr, rst0, txst0, txst1, coll; ifp = scp->ifp; /* Disable interrupts */ if (scp->mohawk) XE_OUTB(XE_CR, 0); /* Cache current register page */ psr = XE_INB(XE_PR); /* Read ISR to see what caused this interrupt */ while ((isr = XE_INB(XE_ISR)) != 0) { /* 0xff might mean the card is no longer around */ if (isr == 0xff) { DEVPRINTF(3, (scp->dev, "intr: interrupt received for missing card?\n")); break; } /* Read other status registers */ XE_SELECT_PAGE(0x40); rst0 = XE_INB(XE_RST0); XE_OUTB(XE_RST0, 0); txst0 = XE_INB(XE_TXST0); txst1 = XE_INB(XE_TXST1); coll = txst1 & XE_TXST1_RETRY_COUNT; XE_OUTB(XE_TXST0, 0); XE_OUTB(XE_TXST1, 0); XE_SELECT_PAGE(0); DEVPRINTF(3, (scp->dev, "intr: ISR=0x%02x, RST=0x%02x, TXT=0x%02x%02x, COLL=0x%01x\n", isr, rst0, txst1, txst0, coll)); if (isr & XE_ISR_TX_PACKET) { u_int8_t tpr, sent; /* Update packet count, accounting for rollover */ tpr = XE_INB(XE_TPR); sent = -scp->tx_tpr + tpr; /* Update statistics if we actually sent anything */ if (sent > 0) { scp->tx_tpr = tpr; scp->tx_queued -= sent; ifp->if_opackets += sent; ifp->if_collisions += coll; /* * According to the Xircom manual, Dingo will sometimes manage to * transmit a packet with triggering an interrupt. If this happens, * we have sent > 1 and the collision count only reflects collisions * on the last packet sent (the one that triggered the interrupt). * Collision stats might therefore be a bit low, but there doesn't * seem to be anything we can do about that. */ switch (coll) { case 0: break; case 1: scp->mibdata.dot3StatsSingleCollisionFrames++; scp->mibdata.dot3StatsCollFrequencies[0]++; break; default: scp->mibdata.dot3StatsMultipleCollisionFrames++; scp->mibdata.dot3StatsCollFrequencies[coll-1]++; } } ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; } /* Handle most MAC interrupts */ if (isr & XE_ISR_MAC_INTR) { #if 0 /* Carrier sense lost -- only in 10Mbit HDX mode */ if (txst0 & XE_TXST0_NO_CARRIER || !(txst1 & XE_TXST1_LINK_STATUS)) { /* XXX - Need to update media status here */ device_printf(scp->dev, "no carrier\n"); ifp->if_oerrors++; scp->mibdata.dot3StatsCarrierSenseErrors++; } #endif /* Excessive collisions -- try sending again */ if (txst0 & XE_TXST0_16_COLLISIONS) { ifp->if_collisions += 16; ifp->if_oerrors++; scp->mibdata.dot3StatsExcessiveCollisions++; scp->mibdata.dot3StatsMultipleCollisionFrames++; scp->mibdata.dot3StatsCollFrequencies[15]++; XE_OUTB(XE_CR, XE_CR_RESTART_TX); } /* Transmit underrun -- increase early transmit threshold */ if (txst0 & XE_TXST0_TX_UNDERRUN && scp->mohawk) { DEVPRINTF(1, (scp->dev, "transmit underrun")); if (scp->tx_thres < ETHER_MAX_LEN) { if ((scp->tx_thres += 64) > ETHER_MAX_LEN) scp->tx_thres = ETHER_MAX_LEN; DPRINTF(1, (": increasing transmit threshold to %u", scp->tx_thres)); XE_SELECT_PAGE(0x3); XE_OUTW(XE_TPT, scp->tx_thres); XE_SELECT_PAGE(0x0); } DPRINTF(1, ("\n")); ifp->if_oerrors++; scp->mibdata.dot3StatsInternalMacTransmitErrors++; } /* Late collision -- just complain about it */ if (txst0 & XE_TXST0_LATE_COLLISION) { device_printf(scp->dev, "late collision\n"); ifp->if_oerrors++; scp->mibdata.dot3StatsLateCollisions++; } /* SQE test failure -- just complain about it */ if (txst0 & XE_TXST0_SQE_FAIL) { device_printf(scp->dev, "SQE test failure\n"); ifp->if_oerrors++; scp->mibdata.dot3StatsSQETestErrors++; } /* Packet too long -- what happens to these */ if (rst0 & XE_RST0_LONG_PACKET) { device_printf(scp->dev, "received giant packet\n"); ifp->if_ierrors++; scp->mibdata.dot3StatsFrameTooLongs++; } /* CRC error -- packet dropped */ if (rst0 & XE_RST0_CRC_ERROR) { device_printf(scp->dev, "CRC error\n"); ifp->if_ierrors++; scp->mibdata.dot3StatsFCSErrors++; } } /* Handle received packet(s) */ while ((esr = XE_INB(XE_ESR)) & XE_ESR_FULL_PACKET_RX) { rsr = XE_INB(XE_RSR); DEVPRINTF(3, (scp->dev, "intr: ESR=0x%02x, RSR=0x%02x\n", esr, rsr)); /* Make sure packet is a good one */ if (rsr & XE_RSR_RX_OK) { struct ether_header *ehp; struct mbuf *mbp; u_int16_t len; len = XE_INW(XE_RBC) - ETHER_CRC_LEN; DEVPRINTF(3, (scp->dev, "intr: receive length = %d\n", len)); if (len == 0) { ifp->if_iqdrops++; continue; } /* * Allocate mbuf to hold received packet. If the mbuf header isn't * big enough, we attach an mbuf cluster to hold the packet. Note the * +=2 to align the packet data on a 32-bit boundary, and the +3 to * allow for the possibility of reading one more byte than the actual * packet length (we always read 16-bit words). * XXX - Surely there's a better way to do this alignment? */ MGETHDR(mbp, M_DONTWAIT, MT_DATA); if (mbp == NULL) { ifp->if_iqdrops++; continue; } if (len + 3 > MHLEN) { MCLGET(mbp, M_DONTWAIT); if ((mbp->m_flags & M_EXT) == 0) { m_freem(mbp); ifp->if_iqdrops++; continue; } } mbp->m_data += 2; ehp = mtod(mbp, struct ether_header *); /* * Now get the packet in PIO mode, including the Ethernet header but * omitting the trailing CRC. */ /* * Work around a bug in CE2 cards. There seems to be a problem with * duplicated and extraneous bytes in the receive buffer, but without * any real documentation for the CE2 it's hard to tell for sure. * XXX - Needs testing on CE2 hardware */ if (scp->srev == 0) { u_short rhs; XE_SELECT_PAGE(5); rhs = XE_INW(XE_RHSA); XE_SELECT_PAGE(0); rhs += 3; /* Skip control info */ if (rhs >= 0x8000) rhs = 0; if (rhs + len > 0x8000) { int i; for (i = 0; i < len; i++, rhs++) { ((char *)ehp)[i] = XE_INB(XE_EDP); if (rhs == 0x8000) { rhs = 0; i--; } } } else bus_space_read_multi_2(scp->bst, scp->bsh, XE_EDP, (u_int16_t *) ehp, (len + 1) >> 1); } else bus_space_read_multi_2(scp->bst, scp->bsh, XE_EDP, (u_int16_t *) ehp, (len + 1) >> 1); /* Deliver packet to upper layers */ mbp->m_pkthdr.rcvif = ifp; mbp->m_pkthdr.len = mbp->m_len = len; (*ifp->if_input)(ifp, mbp); ifp->if_ipackets++; } /* Packet alignment error -- drop packet */ else if (rsr & XE_RSR_ALIGN_ERROR) { device_printf(scp->dev, "alignment error\n"); scp->mibdata.dot3StatsAlignmentErrors++; ifp->if_ierrors++; } /* Skip to next packet, if there is one */ XE_OUTW(XE_DO, 0x8000); } /* Clear receiver overruns now we have some free buffer space */ if (rst0 & XE_RST0_RX_OVERRUN) { DEVPRINTF(1, (scp->dev, "receive overrun\n")); ifp->if_ierrors++; scp->mibdata.dot3StatsInternalMacReceiveErrors++; XE_OUTB(XE_CR, XE_CR_CLEAR_OVERRUN); } } /* Restore saved page */ XE_SELECT_PAGE(psr); /* Re-enable interrupts */ XE_OUTB(XE_CR, XE_CR_ENABLE_INTR); return; } /* * Device timeout/watchdog routine. Called automatically if we queue a packet * for transmission but don't get an interrupt within a specified timeout * (usually 5 seconds). When this happens we assume the worst and reset the * card. */ static void xe_watchdog(struct ifnet *ifp) { struct xe_softc *scp = ifp->if_softc; device_printf(scp->dev, "watchdog timeout: resetting card\n"); scp->tx_timeouts++; ifp->if_oerrors += scp->tx_queued; xe_stop(scp); xe_reset(scp); xe_init(scp); } /* * Change media selection. */ static int xe_media_change(struct ifnet *ifp) { struct xe_softc *scp = ifp->if_softc; DEVPRINTF(2, (scp->dev, "media_change\n")); if (IFM_TYPE(scp->ifm->ifm_media) != IFM_ETHER) return(EINVAL); /* * Some card/media combos aren't always possible -- filter those out here. */ if ((IFM_SUBTYPE(scp->ifm->ifm_media) == IFM_AUTO || IFM_SUBTYPE(scp->ifm->ifm_media) == IFM_100_TX) && !scp->phy_ok) return (EINVAL); xe_setmedia(scp); return 0; } /* * Return current media selection. */ static void xe_media_status(struct ifnet *ifp, struct ifmediareq *mrp) { struct xe_softc *scp = ifp->if_softc; DEVPRINTF(3, (scp->dev, "media_status\n")); /* XXX - This is clearly wrong. Will fix once I have CE2 working */ mrp->ifm_status = IFM_AVALID | IFM_ACTIVE; mrp->ifm_active = ((struct xe_softc *)ifp->if_softc)->media; return; } /* * Select active media. */ static void xe_setmedia(void *xscp) { struct xe_softc *scp = xscp; u_int16_t bmcr, bmsr, anar, lpar; DEVPRINTF(2, (scp->dev, "setmedia\n")); /* Cancel any pending timeout */ untimeout(xe_setmedia, scp, scp->chand); xe_disable_intr(scp); /* Select media */ scp->media = IFM_ETHER; switch (IFM_SUBTYPE(scp->ifm->ifm_media)) { case IFM_AUTO: /* Autoselect media */ scp->media = IFM_ETHER|IFM_AUTO; /* * Autoselection is really awful. It goes something like this: * * Wait until the transmitter goes idle (2sec timeout). * Reset card * IF a 100Mbit PHY exists * Start NWAY autonegotiation (3.5sec timeout) * IF that succeeds * Select 100baseTX or 10baseT, whichever was detected * ELSE * Reset card * IF a 100Mbit PHY exists * Try to force a 100baseTX link (3sec timeout) * IF that succeeds * Select 100baseTX * ELSE * Disable the PHY * ENDIF * ENDIF * ENDIF * ENDIF * IF nothing selected so far * IF a 100Mbit PHY exists * Select 10baseT * ELSE * Select 10baseT or 10base2, whichever is connected * ENDIF * ENDIF */ switch (scp->autoneg_status) { case XE_AUTONEG_NONE: DEVPRINTF(2, (scp->dev, "Waiting for idle transmitter\n")); scp->ifp->if_flags |= IFF_OACTIVE; scp->autoneg_status = XE_AUTONEG_WAITING; /* FALL THROUGH */ case XE_AUTONEG_WAITING: if (scp->tx_queued != 0) { scp->chand = timeout(xe_setmedia, scp, hz/2); return; } if (scp->phy_ok) { DEVPRINTF(2, (scp->dev, "Starting autonegotiation\n")); bmcr = xe_phy_readreg(scp, PHY_BMCR); bmcr &= ~(PHY_BMCR_AUTONEGENBL); xe_phy_writereg(scp, PHY_BMCR, bmcr); anar = xe_phy_readreg(scp, PHY_ANAR); anar &= ~(PHY_ANAR_100BT4|PHY_ANAR_100BTXFULL|PHY_ANAR_10BTFULL); anar |= PHY_ANAR_100BTXHALF|PHY_ANAR_10BTHALF; xe_phy_writereg(scp, PHY_ANAR, anar); bmcr |= PHY_BMCR_AUTONEGENBL|PHY_BMCR_AUTONEGRSTR; xe_phy_writereg(scp, PHY_BMCR, bmcr); scp->autoneg_status = XE_AUTONEG_STARTED; scp->chand = timeout(xe_setmedia, scp, hz * 7/2); return; } else { scp->autoneg_status = XE_AUTONEG_FAIL; } break; case XE_AUTONEG_STARTED: bmsr = xe_phy_readreg(scp, PHY_BMSR); lpar = xe_phy_readreg(scp, PHY_LPAR); if (bmsr & (PHY_BMSR_AUTONEGCOMP|PHY_BMSR_LINKSTAT)) { DEVPRINTF(2, (scp->dev, "Autonegotiation complete!\n")); /* * XXX - Shouldn't have to do this, but (on my hub at least) the * XXX - transmitter won't work after a successful autoneg. So we see * XXX - what the negotiation result was and force that mode. I'm * XXX - sure there is an easy fix for this. */ if (lpar & PHY_LPAR_100BTXHALF) { xe_phy_writereg(scp, PHY_BMCR, PHY_BMCR_SPEEDSEL); XE_MII_DUMP(scp); XE_SELECT_PAGE(2); XE_OUTB(XE_MSR, XE_INB(XE_MSR) | 0x08); scp->media = IFM_ETHER|IFM_100_TX; scp->autoneg_status = XE_AUTONEG_NONE; } else { /* * XXX - Bit of a hack going on in here. * XXX - This is derived from Ken Hughes patch to the Linux driver * XXX - to make it work with 10Mbit _autonegotiated_ links on CE3B * XXX - cards. What's a CE3B and how's it differ from a plain CE3? * XXX - these are the things we need to find out. */ xe_phy_writereg(scp, PHY_BMCR, 0x0000); XE_SELECT_PAGE(2); /* BEGIN HACK */ XE_OUTB(XE_MSR, XE_INB(XE_MSR) | 0x08); XE_SELECT_PAGE(0x42); XE_OUTB(XE_SWC1, 0x80); scp->media = IFM_ETHER|IFM_10_T; scp->autoneg_status = XE_AUTONEG_NONE; /* END HACK */ /*XE_OUTB(XE_MSR, XE_INB(XE_MSR) & ~0x08);*/ /* Disable PHY? */ /*scp->autoneg_status = XE_AUTONEG_FAIL;*/ } } else { DEVPRINTF(2, (scp->dev, "Autonegotiation failed; trying 100baseTX\n")); XE_MII_DUMP(scp); if (scp->phy_ok) { xe_phy_writereg(scp, PHY_BMCR, PHY_BMCR_SPEEDSEL); scp->autoneg_status = XE_AUTONEG_100TX; scp->chand = timeout(xe_setmedia, scp, hz * 3); return; } else { scp->autoneg_status = XE_AUTONEG_FAIL; } } break; case XE_AUTONEG_100TX: (void)xe_phy_readreg(scp, PHY_BMSR); bmsr = xe_phy_readreg(scp, PHY_BMSR); if (bmsr & PHY_BMSR_LINKSTAT) { DEVPRINTF(2, (scp->dev, "Got 100baseTX link!\n")); XE_MII_DUMP(scp); XE_SELECT_PAGE(2); XE_OUTB(XE_MSR, XE_INB(XE_MSR) | 0x08); scp->media = IFM_ETHER|IFM_100_TX; scp->autoneg_status = XE_AUTONEG_NONE; } else { DEVPRINTF(2, (scp->dev, "Autonegotiation failed; disabling PHY\n")); XE_MII_DUMP(scp); xe_phy_writereg(scp, PHY_BMCR, 0x0000); XE_SELECT_PAGE(2); XE_OUTB(XE_MSR, XE_INB(XE_MSR) & ~0x08); /* Disable PHY? */ scp->autoneg_status = XE_AUTONEG_FAIL; } break; } /* * If we got down here _and_ autoneg_status is XE_AUTONEG_FAIL, then * either autonegotiation failed, or never got started to begin with. In * either case, select a suitable 10Mbit media and hope it works. We * don't need to reset the card again, since it will have been done * already by the big switch above. */ if (scp->autoneg_status == XE_AUTONEG_FAIL) { DEVPRINTF(2, (scp->dev, "Selecting 10baseX\n")); if (scp->mohawk) { XE_SELECT_PAGE(0x42); XE_OUTB(XE_SWC1, 0x80); scp->media = IFM_ETHER|IFM_10_T; scp->autoneg_status = XE_AUTONEG_NONE; } else { XE_SELECT_PAGE(4); XE_OUTB(XE_GPR0, 4); DELAY(50000); XE_SELECT_PAGE(0x42); XE_OUTB(XE_SWC1, (XE_INB(XE_ESR) & XE_ESR_MEDIA_SELECT) ? 0x80 : 0xc0); scp->media = IFM_ETHER|((XE_INB(XE_ESR) & XE_ESR_MEDIA_SELECT) ? IFM_10_T : IFM_10_2); scp->autoneg_status = XE_AUTONEG_NONE; } } break; /* * If a specific media has been requested, we just reset the card and * select it (one small exception -- if 100baseTX is requested by there is * no PHY, we fall back to 10baseT operation). */ case IFM_100_TX: /* Force 100baseTX */ if (scp->phy_ok) { DEVPRINTF(2, (scp->dev, "Selecting 100baseTX\n")); XE_SELECT_PAGE(0x42); XE_OUTB(XE_SWC1, 0); xe_phy_writereg(scp, PHY_BMCR, PHY_BMCR_SPEEDSEL); XE_SELECT_PAGE(2); XE_OUTB(XE_MSR, XE_INB(XE_MSR) | 0x08); scp->media |= IFM_100_TX; break; } /* FALLTHROUGH */ case IFM_10_T: /* Force 10baseT */ DEVPRINTF(2, (scp->dev, "Selecting 10baseT\n")); if (scp->phy_ok) { xe_phy_writereg(scp, PHY_BMCR, 0x0000); XE_SELECT_PAGE(2); XE_OUTB(XE_MSR, XE_INB(XE_MSR) & ~0x08); /* Disable PHY */ } XE_SELECT_PAGE(0x42); XE_OUTB(XE_SWC1, 0x80); scp->media |= IFM_10_T; break; case IFM_10_2: DEVPRINTF(2, (scp->dev, "Selecting 10base2\n")); XE_SELECT_PAGE(0x42); XE_OUTB(XE_SWC1, 0xc0); scp->media |= IFM_10_2; break; } /* * Finally, the LEDs are set to match whatever media was chosen and the * transmitter is unblocked. */ DEVPRINTF(2, (scp->dev, "Setting LEDs\n")); XE_SELECT_PAGE(2); switch (IFM_SUBTYPE(scp->media)) { case IFM_100_TX: case IFM_10_T: XE_OUTB(XE_LED, 0x3b); if (scp->dingo) XE_OUTB(0x0b, 0x04); /* 100Mbit LED */ break; case IFM_10_2: XE_OUTB(XE_LED, 0x3a); break; } /* Restart output? */ xe_enable_intr(scp); scp->ifp->if_flags &= ~IFF_OACTIVE; xe_start(scp->ifp); } /* * Hard reset (power cycle) the card. */ static void xe_reset(struct xe_softc *scp) { int s; DEVPRINTF(2, (scp->dev, "reset\n")); s = splimp(); /* Power down */ XE_SELECT_PAGE(4); XE_OUTB(XE_GPR1, 0); DELAY(40000); /* Power up again */ if (scp->mohawk) XE_OUTB(XE_GPR1, XE_GPR1_POWER_DOWN); else XE_OUTB(XE_GPR1, XE_GPR1_POWER_DOWN|XE_GPR1_AIC); DELAY(40000); XE_SELECT_PAGE(0); (void)splx(s); } /* * Take interface offline. This is done by powering down the device, which I * assume means just shutting down the transceiver and Ethernet logic. This * requires a _hard_ reset to recover from, as we need to power up again. */ static void xe_stop(struct xe_softc *scp) { int s; DEVPRINTF(2, (scp->dev, "stop\n")); s = splimp(); /* * Shut off interrupts. */ xe_disable_intr(scp); /* * Power down. */ XE_SELECT_PAGE(4); XE_OUTB(XE_GPR1, 0); XE_SELECT_PAGE(0); if (scp->mohawk) { /* * set GP1 and GP2 as outputs (bits 2 & 3) * set GP1 high to power on the ML6692 (bit 0) * set GP2 low to power on the 10Mhz chip (bit 1) */ XE_SELECT_PAGE(4); XE_OUTB(XE_GPR0, XE_GPR0_GP2_SELECT|XE_GPR0_GP1_SELECT|XE_GPR0_GP1_OUT); } /* * ~IFF_RUNNING == interface down. */ scp->ifp->if_flags &= ~IFF_RUNNING; scp->ifp->if_flags &= ~IFF_OACTIVE; scp->ifp->if_timer = 0; (void)splx(s); } /* * Enable interrupts from the card. */ static void xe_enable_intr(struct xe_softc *scp) { DEVPRINTF(2, (scp->dev, "enable_intr\n")); XE_SELECT_PAGE(0); XE_OUTB(XE_CR, XE_CR_ENABLE_INTR); /* Enable interrupts */ if (scp->modem && !scp->dingo) { /* This bit is just magic */ if (!(XE_INB(0x10) & 0x01)) { XE_OUTB(0x10, 0x11); /* Unmask master int enable bit */ } } } /* * Disable interrupts from the card. */ static void xe_disable_intr(struct xe_softc *scp) { DEVPRINTF(2, (scp->dev, "disable_intr\n")); XE_SELECT_PAGE(0); XE_OUTB(XE_CR, 0); /* Disable interrupts */ if (scp->modem && !scp->dingo) { /* More magic */ XE_OUTB(0x10, 0x10); /* Mask the master int enable bit */ } } /* * Set up multicast filter and promiscuous modes. */ static void xe_set_multicast(struct xe_softc *scp) { struct ifnet *ifp; struct ifmultiaddr *maddr; unsigned count, i; DEVPRINTF(2, (scp->dev, "set_multicast\n")); ifp = scp->ifp; XE_SELECT_PAGE(0x42); /* Handle PROMISC flag */ if (ifp->if_flags & IFF_PROMISC) { XE_OUTB(XE_SWC1, XE_INB(XE_SWC1) | XE_SWC1_PROMISCUOUS); return; } else XE_OUTB(XE_SWC1, XE_INB(XE_SWC1) & ~XE_SWC1_PROMISCUOUS); /* Handle ALLMULTI flag */ if (ifp->if_flags & IFF_ALLMULTI) { XE_OUTB(XE_SWC1, XE_INB(XE_SWC1) | XE_SWC1_ALLMULTI); return; } else XE_OUTB(XE_SWC1, XE_INB(XE_SWC1) & ~XE_SWC1_ALLMULTI); /* Iterate over multicast address list */ count = 0; + IF_ADDR_LOCK(ifp); #if __FreeBSD_version < 500000 LIST_FOREACH(maddr, &ifp->if_multiaddrs, ifma_link) { #else TAILQ_FOREACH(maddr, &ifp->if_multiaddrs, ifma_link) { #endif if (maddr->ifma_addr->sa_family != AF_LINK) continue; count++; if (count < 10) /* First 9 use Individual Addresses for exact matching */ xe_set_addr(scp, LLADDR((struct sockaddr_dl *)maddr->ifma_addr), count); else if (scp->mohawk) /* Use hash filter on Mohawk and Dingo */ xe_mchash(scp, LLADDR((struct sockaddr_dl *)maddr->ifma_addr)); else /* Nowhere else to put them on CE2 */ break; } + IF_ADDR_UNLOCK(ifp); DEVPRINTF(2, (scp->dev, "set_multicast: count = %u\n", count)); /* Now do some cleanup and enable multicast handling as needed */ if (count == 0) { /* Disable all multicast handling */ XE_SELECT_PAGE(0x42); XE_OUTB(XE_SWC1, XE_INB(XE_SWC1) & ~(XE_SWC1_IA_ENABLE|XE_SWC1_ALLMULTI)); if (scp->mohawk) { XE_SELECT_PAGE(0x02); XE_OUTB(XE_MSR, XE_INB(XE_MSR) & ~XE_MSR_HASH_TABLE); } } else if (count < 10) { /* Full in any unused Individual Addresses with our MAC address */ for (i = count + 1; i < 10; i++) xe_set_addr(scp, (u_int8_t *)(&IFP2ENADDR(scp->ifp)), i); /* Enable Individual Address matching only */ XE_SELECT_PAGE(0x42); XE_OUTB(XE_SWC1, (XE_INB(XE_SWC1) & ~XE_SWC1_ALLMULTI) | XE_SWC1_IA_ENABLE); if (scp->mohawk) { XE_SELECT_PAGE(0x02); XE_OUTB(XE_MSR, XE_INB(XE_MSR) & ~XE_MSR_HASH_TABLE); } } else { if (scp->mohawk) { /* Check whether hash table is full */ XE_SELECT_PAGE(0x58); for (i = 0x08; i < 0x10; i++) if (XE_INB(i) != 0xff) break; if (i == 0x10) { /* Hash table full - enable promiscuous multicast matching */ XE_SELECT_PAGE(0x42); XE_OUTB(XE_SWC1, (XE_INB(XE_SWC1) & ~XE_SWC1_IA_ENABLE) | XE_SWC1_ALLMULTI); XE_SELECT_PAGE(0x02); XE_OUTB(XE_MSR, XE_INB(XE_MSR) & ~XE_MSR_HASH_TABLE); } else { /* Enable hash table and Individual Address matching */ XE_SELECT_PAGE(0x42); XE_OUTB(XE_SWC1, (XE_INB(XE_SWC1) & ~XE_SWC1_ALLMULTI) | XE_SWC1_IA_ENABLE); XE_SELECT_PAGE(0x02); XE_OUTB(XE_MSR, XE_INB(XE_MSR) | XE_MSR_HASH_TABLE); } } else { /* Enable promiscuous multicast matching */ XE_SELECT_PAGE(0x42); XE_OUTB(XE_SWC1, (XE_INB(XE_SWC1) & ~XE_SWC1_IA_ENABLE) | XE_SWC1_ALLMULTI); } } XE_SELECT_PAGE(0); } /* * Copy the Ethernet multicast address in addr to the on-chip registers for * Individual Address idx. Assumes that addr is really a multicast address * and that idx > 0 (slot 0 is always used for the card MAC address). */ static void xe_set_addr(struct xe_softc *scp, u_int8_t* addr, unsigned idx) { u_int8_t page, reg; unsigned i; /* * Individual Addresses are stored in registers 8-F of pages 0x50-0x57. IA1 * therefore starts at register 0xE on page 0x50. The expressions below * compute the starting page and register for any IA index > 0. */ --idx; page = 0x50 + idx%4 + idx/4*3; reg = 0x0e - 2 * (idx%4); DEVPRINTF(3, (scp->dev, "set_addr: idx = %u, page = 0x%02x, reg = 0x%02x\n", idx+1, page, reg)); /* * Copy the IA bytes. Note that the byte order is reversed for Mohawk and * Dingo wrt. CE2 hardware. */ XE_SELECT_PAGE(page); for (i = 0; i < 6; i++) { if (i > 0) { DPRINTF(3, (":%02x", addr[i])); } else { DEVPRINTF(3, (scp->dev, "set_addr: %02x", addr[0])); } XE_OUTB(reg, addr[scp->mohawk ? 5 - i : i]); if (++reg == 0x10) { reg = 0x08; XE_SELECT_PAGE(++page); } } DPRINTF(3, ("\n")); } /* * Set the appropriate bit in the multicast hash table for the supplied * Ethernet multicast address addr. Assumes that addr is really a multicast * address. */ static void xe_mchash(struct xe_softc* scp, const uint8_t *addr) { int bit; uint8_t byte, hash; hash = ether_crc32_le(addr, ETHER_ADDR_LEN) & 0x3F; /* Top 3 bits of hash give register - 8, bottom 3 give bit within register */ byte = hash >> 3 | 0x08; bit = 0x01 << (hash & 0x07); DEVPRINTF(3, (scp->dev, "set_hash: hash = 0x%02x, byte = 0x%02x, bit = 0x%02x\n", hash, byte, bit)); XE_SELECT_PAGE(0x58); XE_OUTB(byte, XE_INB(byte) | bit); } /* * Write an outgoing packet to the card using programmed I/O. */ static int xe_pio_write_packet(struct xe_softc *scp, struct mbuf *mbp) { unsigned len, pad; unsigned char wantbyte; u_int8_t *data; u_int8_t savebyte[2]; /* Get total packet length */ if (mbp->m_flags & M_PKTHDR) len = mbp->m_pkthdr.len; else { struct mbuf* mbp2 = mbp; for (len = 0; mbp2 != NULL; len += mbp2->m_len, mbp2 = mbp2->m_next); } DEVPRINTF(3, (scp->dev, "pio_write_packet: len = %u\n", len)); /* Packets < minimum length may need to be padded out */ pad = 0; if (len < scp->tx_min) { pad = scp->tx_min - len; len = scp->tx_min; } /* Check transmit buffer space */ XE_SELECT_PAGE(0); XE_OUTW(XE_TRS, len+2); /* Only effective on rev. 1 CE2 cards */ if ((XE_INW(XE_TSO) & 0x7fff) <= len + 2) return 1; /* Send packet length to card */ XE_OUTW(XE_EDP, len); /* * Write packet to card using PIO (code stolen from the ed driver) */ wantbyte = 0; while (mbp != NULL) { len = mbp->m_len; if (len > 0) { data = mtod(mbp, caddr_t); if (wantbyte) { /* Finish the last word */ savebyte[1] = *data; XE_OUTW(XE_EDP, *(u_short *)savebyte); data++; len--; wantbyte = 0; } if (len > 1) { /* Output contiguous words */ bus_space_write_multi_2(scp->bst, scp->bsh, XE_EDP, (u_int16_t *) data, len >> 1); data += len & ~1; len &= 1; } if (len == 1) { /* Save last byte, if necessary */ savebyte[0] = *data; wantbyte = 1; } } mbp = mbp->m_next; } /* * Send last byte of odd-length packets */ if (wantbyte) XE_OUTB(XE_EDP, savebyte[0]); /* * Can just tell CE3 cards to send; short packets will be padded out with * random cruft automatically. For CE2, manually pad the packet with * garbage; it will be sent when the required number or bytes have been * delivered to the card. */ if (scp->mohawk) XE_OUTB(XE_CR, XE_CR_TX_PACKET | XE_CR_RESTART_TX | XE_CR_ENABLE_INTR); else if (pad > 0) { if (pad & 0x01) XE_OUTB(XE_EDP, 0xaa); pad >>= 1; while (pad > 0) { XE_OUTW(XE_EDP, 0xdead); pad--; } } return 0; } /************************************************************** * * * M I I F U N C T I O N S * * * **************************************************************/ /* * Alternative MII/PHY handling code adapted from the xl driver. It doesn't * seem to work any better than the xirc2_ps stuff, but it's cleaner code. * XXX - this stuff shouldn't be here. It should all be abstracted off to * XXX - some kind of common MII-handling code, shared by all drivers. But * XXX - that's a whole other mission. */ #define XE_MII_SET(x) XE_OUTB(XE_GPR2, (XE_INB(XE_GPR2) | 0x04) | (x)) #define XE_MII_CLR(x) XE_OUTB(XE_GPR2, (XE_INB(XE_GPR2) | 0x04) & ~(x)) /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void xe_mii_sync(struct xe_softc *scp) { register int i; XE_SELECT_PAGE(2); XE_MII_SET(XE_MII_DIR|XE_MII_WRD); for (i = 0; i < 32; i++) { XE_MII_SET(XE_MII_CLK); DELAY(1); XE_MII_CLR(XE_MII_CLK); DELAY(1); } } /* * Look for a MII-compliant PHY. If we find one, reset it. */ static int xe_mii_init(struct xe_softc *scp) { u_int16_t status; status = xe_phy_readreg(scp, PHY_BMSR); if ((status & 0xff00) != 0x7800) { DEVPRINTF(2, (scp->dev, "no PHY found, %0x\n", status)); return 0; } else { DEVPRINTF(2, (scp->dev, "PHY OK!\n")); /* Reset the PHY */ xe_phy_writereg(scp, PHY_BMCR, PHY_BMCR_RESET); DELAY(500); while(xe_phy_readreg(scp, PHY_BMCR) & PHY_BMCR_RESET); XE_MII_DUMP(scp); return 1; } } /* * Clock a series of bits through the MII. */ static void xe_mii_send(struct xe_softc *scp, u_int32_t bits, int cnt) { int i; XE_SELECT_PAGE(2); XE_MII_CLR(XE_MII_CLK); for (i = (0x1 << (cnt - 1)); i; i >>= 1) { if (bits & i) { XE_MII_SET(XE_MII_WRD); } else { XE_MII_CLR(XE_MII_WRD); } DELAY(1); XE_MII_CLR(XE_MII_CLK); DELAY(1); XE_MII_SET(XE_MII_CLK); } } /* * Read an PHY register through the MII. */ static int xe_mii_readreg(struct xe_softc *scp, struct xe_mii_frame *frame) { int i, ack, s; s = splimp(); /* * Set up frame for RX. */ frame->mii_stdelim = XE_MII_STARTDELIM; frame->mii_opcode = XE_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; XE_SELECT_PAGE(2); XE_OUTB(XE_GPR2, 0); /* * Turn on data xmit. */ XE_MII_SET(XE_MII_DIR); xe_mii_sync(scp); /* * Send command/address info. */ xe_mii_send(scp, frame->mii_stdelim, 2); xe_mii_send(scp, frame->mii_opcode, 2); xe_mii_send(scp, frame->mii_phyaddr, 5); xe_mii_send(scp, frame->mii_regaddr, 5); /* Idle bit */ XE_MII_CLR((XE_MII_CLK|XE_MII_WRD)); DELAY(1); XE_MII_SET(XE_MII_CLK); DELAY(1); /* Turn off xmit. */ XE_MII_CLR(XE_MII_DIR); /* Check for ack */ XE_MII_CLR(XE_MII_CLK); DELAY(1); ack = XE_INB(XE_GPR2) & XE_MII_RDD; XE_MII_SET(XE_MII_CLK); DELAY(1); /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ if (ack) { for(i = 0; i < 16; i++) { XE_MII_CLR(XE_MII_CLK); DELAY(1); XE_MII_SET(XE_MII_CLK); DELAY(1); } goto fail; } for (i = 0x8000; i; i >>= 1) { XE_MII_CLR(XE_MII_CLK); DELAY(1); if (!ack) { if (XE_INB(XE_GPR2) & XE_MII_RDD) frame->mii_data |= i; DELAY(1); } XE_MII_SET(XE_MII_CLK); DELAY(1); } fail: XE_MII_CLR(XE_MII_CLK); DELAY(1); XE_MII_SET(XE_MII_CLK); DELAY(1); splx(s); if (ack) return(1); return(0); } /* * Write to a PHY register through the MII. */ static int xe_mii_writereg(struct xe_softc *scp, struct xe_mii_frame *frame) { int s; s = splimp(); /* * Set up frame for TX. */ frame->mii_stdelim = XE_MII_STARTDELIM; frame->mii_opcode = XE_MII_WRITEOP; frame->mii_turnaround = XE_MII_TURNAROUND; XE_SELECT_PAGE(2); /* * Turn on data output. */ XE_MII_SET(XE_MII_DIR); xe_mii_sync(scp); xe_mii_send(scp, frame->mii_stdelim, 2); xe_mii_send(scp, frame->mii_opcode, 2); xe_mii_send(scp, frame->mii_phyaddr, 5); xe_mii_send(scp, frame->mii_regaddr, 5); xe_mii_send(scp, frame->mii_turnaround, 2); xe_mii_send(scp, frame->mii_data, 16); /* Idle bit. */ XE_MII_SET(XE_MII_CLK); DELAY(1); XE_MII_CLR(XE_MII_CLK); DELAY(1); /* * Turn off xmit. */ XE_MII_CLR(XE_MII_DIR); splx(s); return(0); } /* * Read a register from the PHY. */ static u_int16_t xe_phy_readreg(struct xe_softc *scp, u_int16_t reg) { struct xe_mii_frame frame; bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = 0; frame.mii_regaddr = reg; xe_mii_readreg(scp, &frame); return(frame.mii_data); } /* * Write to a PHY register. */ static void xe_phy_writereg(struct xe_softc *scp, u_int16_t reg, u_int16_t data) { struct xe_mii_frame frame; bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = 0; frame.mii_regaddr = reg; frame.mii_data = data; xe_mii_writereg(scp, &frame); return; } /* * A bit of debugging code. */ static void xe_mii_dump(struct xe_softc *scp) { int i, s; s = splimp(); device_printf(scp->dev, "MII registers: "); for (i = 0; i < 2; i++) { printf(" %d:%04x", i, xe_phy_readreg(scp, i)); } for (i = 4; i < 7; i++) { printf(" %d:%04x", i, xe_phy_readreg(scp, i)); } printf("\n"); (void)splx(s); } #if 0 void xe_reg_dump(struct xe_softc *scp) { int page, i, s; s = splimp(); device_printf(scp->dev, "Common registers: "); for (i = 0; i < 8; i++) { printf(" %2.2x", XE_INB(i)); } printf("\n"); for (page = 0; page <= 8; page++) { device_printf(scp->dev, "Register page %2.2x: ", page); XE_SELECT_PAGE(page); for (i = 8; i < 16; i++) { printf(" %2.2x", XE_INB(i)); } printf("\n"); } for (page = 0x10; page < 0x5f; page++) { if ((page >= 0x11 && page <= 0x3f) || (page == 0x41) || (page >= 0x43 && page <= 0x4f) || (page >= 0x59)) continue; device_printf(scp->dev, "Register page %2.2x: ", page); XE_SELECT_PAGE(page); for (i = 8; i < 16; i++) { printf(" %2.2x", XE_INB(i)); } printf("\n"); } (void)splx(s); } #endif int xe_activate(device_t dev) { struct xe_softc *sc = device_get_softc(dev); int start, err, i; DEVPRINTF(2, (dev, "activate\n")); if (!sc->modem) { sc->port_rid = 0; /* 0 is managed by pccard */ sc->port_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->port_rid, 0, ~0, 16, RF_ACTIVE); } else if (sc->dingo) { /* * Find a 16 byte aligned ioport for the card. */ DEVPRINTF(1, (dev, "Finding an aligned port for RealPort\n")); sc->port_rid = 1; /* 0 is managed by pccard */ start = 0x100; do { sc->port_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->port_rid, start, 0x3ff, 16, RF_ACTIVE); if (sc->port_res == 0) break; /* we failed */ if ((rman_get_start(sc->port_res) & 0xf) == 0) break; /* good */ bus_release_resource(dev, SYS_RES_IOPORT, sc->port_rid, sc->port_res); start = (rman_get_start(sc->port_res) + 15) & ~0xf; } while (1); DEVPRINTF(1, (dev, "RealPort port 0x%0lx, size 0x%0lx\n", bus_get_resource_start(dev, SYS_RES_IOPORT, sc->port_rid), bus_get_resource_count(dev, SYS_RES_IOPORT, sc->port_rid))); } else if (sc->ce2) { /* * Find contiguous I/O port for the Ethernet function on CEM2 and * CEM3 cards. We allocate window 0 wherever pccard has decided * it should be, then find an available window adjacent to it for * the second function. Not sure that both windows are actually * needed. */ DEVPRINTF(1, (dev, "Finding I/O port for CEM2/CEM3\n")); sc->ce2_port_rid = 0; /* 0 is managed by pccard */ sc->ce2_port_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->ce2_port_rid, 0, ~0, 8, RF_ACTIVE); if (!sc->ce2_port_res) { DEVPRINTF(1, (dev, "Cannot allocate I/O port for modem\n")); return ENOMEM; } sc->port_rid = 1; start = bus_get_resource_start(dev, SYS_RES_IOPORT, sc->ce2_port_rid); for (i = 0; i < 2; i++) { start += (i == 0 ? 8 : -24); sc->port_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &sc->port_rid, start, start + 18, 18, RF_ACTIVE); if (sc->port_res == 0) continue; /* Failed, try again if possible */ if (bus_get_resource_start(dev, SYS_RES_IOPORT, sc->port_rid) == start) break; /* Success! */ bus_release_resource(dev, SYS_RES_IOPORT, sc->port_rid, sc->port_res); sc->port_res = 0; } DEVPRINTF(1, (dev, "CEM2/CEM3 port 0x%0lx, size 0x%0lx\n", bus_get_resource_start(dev, SYS_RES_IOPORT, sc->port_rid), bus_get_resource_count(dev, SYS_RES_IOPORT, sc->port_rid))); } if (!sc->port_res) { DEVPRINTF(1, (dev, "Cannot allocate ioport\n")); return ENOMEM; } sc->irq_rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE); if (!sc->irq_res) { DEVPRINTF(1, (dev, "Cannot allocate irq\n")); xe_deactivate(dev); return ENOMEM; } if ((err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET, xe_intr, sc, &sc->intrhand)) != 0) { xe_deactivate(dev); return err; } sc->bst = rman_get_bustag(sc->port_res); sc->bsh = rman_get_bushandle(sc->port_res); return (0); } void xe_deactivate(device_t dev) { struct xe_softc *sc = device_get_softc(dev); DEVPRINTF(2, (dev, "deactivate\n")); xe_disable_intr(sc); if (sc->intrhand) bus_teardown_intr(dev, sc->irq_res, sc->intrhand); sc->intrhand = 0; if (sc->port_res) bus_release_resource(dev, SYS_RES_IOPORT, sc->port_rid, sc->port_res); sc->port_res = 0; if (sc->ce2_port_res) bus_release_resource(dev, SYS_RES_IOPORT, sc->ce2_port_rid, sc->ce2_port_res); sc->ce2_port_res = 0; if (sc->irq_res) bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); sc->irq_res = 0; return; } Index: stable/6/sys/pci/if_dc.c =================================================================== --- stable/6/sys/pci/if_dc.c (revision 149421) +++ stable/6/sys/pci/if_dc.c (revision 149422) @@ -1,3824 +1,3832 @@ /*- * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * DEC "tulip" clone ethernet driver. Supports the DEC/Intel 21143 * series chips and several workalikes including the following: * * Macronix 98713/98715/98725/98727/98732 PMAC (www.macronix.com) * Macronix/Lite-On 82c115 PNIC II (www.macronix.com) * Lite-On 82c168/82c169 PNIC (www.litecom.com) * ASIX Electronics AX88140A (www.asix.com.tw) * ASIX Electronics AX88141 (www.asix.com.tw) * ADMtek AL981 (www.admtek.com.tw) * ADMtek AN985 (www.admtek.com.tw) * Netgear FA511 (www.netgear.com) Appears to be rebadged ADMTek AN985 * Davicom DM9100, DM9102, DM9102A (www.davicom8.com) * Accton EN1217 (www.accton.com) * Xircom X3201 (www.xircom.com) * Abocom FE2500 * Conexant LANfinity (www.conexant.com) * 3Com OfficeConnect 10/100B 3CSOHO100B (www.3com.com) * * Datasheets for the 21143 are available at developer.intel.com. * Datasheets for the clone parts can be found at their respective sites. * (Except for the PNIC; see www.freebsd.org/~wpaul/PNIC/pnic.ps.gz.) * The PNIC II is essentially a Macronix 98715A chip; the only difference * worth noting is that its multicast hash table is only 128 bits wide * instead of 512. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The Intel 21143 is the successor to the DEC 21140. It is basically * the same as the 21140 but with a few new features. The 21143 supports * three kinds of media attachments: * * o MII port, for 10Mbps and 100Mbps support and NWAY * autonegotiation provided by an external PHY. * o SYM port, for symbol mode 100Mbps support. * o 10baseT port. * o AUI/BNC port. * * The 100Mbps SYM port and 10baseT port can be used together in * combination with the internal NWAY support to create a 10/100 * autosensing configuration. * * Note that not all tulip workalikes are handled in this driver: we only * deal with those which are relatively well behaved. The Winbond is * handled separately due to its different register offsets and the * special handling needed for its various bugs. The PNIC is handled * here, but I'm not thrilled about it. * * All of the workalike chips use some form of MII transceiver support * with the exception of the Macronix chips, which also have a SYM port. * The ASIX AX88140A is also documented to have a SYM port, but all * the cards I've seen use an MII transceiver, probably because the * AX88140A doesn't support internal NWAY. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define DC_USEIOSPACE #ifdef __alpha__ #define SRM_MEDIA #endif #include #ifdef __sparc64__ #include #include #endif MODULE_DEPEND(dc, pci, 1, 1, 1); MODULE_DEPEND(dc, ether, 1, 1, 1); MODULE_DEPEND(dc, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Various supported device vendors/types and their names. */ static struct dc_type dc_devs[] = { { DC_VENDORID_DEC, DC_DEVICEID_21143, "Intel 21143 10/100BaseTX" }, { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9009, "Davicom DM9009 10/100BaseTX" }, { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9100, "Davicom DM9100 10/100BaseTX" }, { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102, "Davicom DM9102 10/100BaseTX" }, { DC_VENDORID_DAVICOM, DC_DEVICEID_DM9102, "Davicom DM9102A 10/100BaseTX" }, { DC_VENDORID_ADMTEK, DC_DEVICEID_AL981, "ADMtek AL981 10/100BaseTX" }, { DC_VENDORID_ADMTEK, DC_DEVICEID_AN985, "ADMtek AN985 10/100BaseTX" }, { DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9511, "ADMtek ADM9511 10/100BaseTX" }, { DC_VENDORID_ADMTEK, DC_DEVICEID_ADM9513, "ADMtek ADM9513 10/100BaseTX" }, { DC_VENDORID_ADMTEK, DC_DEVICEID_FA511, "Netgear FA511 10/100BaseTX" }, { DC_VENDORID_ASIX, DC_DEVICEID_AX88140A, "ASIX AX88140A 10/100BaseTX" }, { DC_VENDORID_ASIX, DC_DEVICEID_AX88140A, "ASIX AX88141 10/100BaseTX" }, { DC_VENDORID_MX, DC_DEVICEID_98713, "Macronix 98713 10/100BaseTX" }, { DC_VENDORID_MX, DC_DEVICEID_98713, "Macronix 98713A 10/100BaseTX" }, { DC_VENDORID_CP, DC_DEVICEID_98713_CP, "Compex RL100-TX 10/100BaseTX" }, { DC_VENDORID_CP, DC_DEVICEID_98713_CP, "Compex RL100-TX 10/100BaseTX" }, { DC_VENDORID_MX, DC_DEVICEID_987x5, "Macronix 98715/98715A 10/100BaseTX" }, { DC_VENDORID_MX, DC_DEVICEID_987x5, "Macronix 98715AEC-C 10/100BaseTX" }, { DC_VENDORID_MX, DC_DEVICEID_987x5, "Macronix 98725 10/100BaseTX" }, { DC_VENDORID_MX, DC_DEVICEID_98727, "Macronix 98727/98732 10/100BaseTX" }, { DC_VENDORID_LO, DC_DEVICEID_82C115, "LC82C115 PNIC II 10/100BaseTX" }, { DC_VENDORID_LO, DC_DEVICEID_82C168, "82c168 PNIC 10/100BaseTX" }, { DC_VENDORID_LO, DC_DEVICEID_82C168, "82c169 PNIC 10/100BaseTX" }, { DC_VENDORID_ACCTON, DC_DEVICEID_EN1217, "Accton EN1217 10/100BaseTX" }, { DC_VENDORID_ACCTON, DC_DEVICEID_EN2242, "Accton EN2242 MiniPCI 10/100BaseTX" }, { DC_VENDORID_XIRCOM, DC_DEVICEID_X3201, "Xircom X3201 10/100BaseTX" }, { DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500, "Abocom FE2500 10/100BaseTX" }, { DC_VENDORID_ABOCOM, DC_DEVICEID_FE2500MX, "Abocom FE2500MX 10/100BaseTX" }, { DC_VENDORID_CONEXANT, DC_DEVICEID_RS7112, "Conexant LANfinity MiniPCI 10/100BaseTX" }, { DC_VENDORID_HAWKING, DC_DEVICEID_HAWKING_PN672TX, "Hawking CB102 CardBus 10/100" }, { DC_VENDORID_PLANEX, DC_DEVICEID_FNW3602T, "PlaneX FNW-3602-T CardBus 10/100" }, { DC_VENDORID_3COM, DC_DEVICEID_3CSOHOB, "3Com OfficeConnect 10/100B" }, { DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN120, "Microsoft MN-120 CardBus 10/100" }, { DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN130, "Microsoft MN-130 10/100" }, { DC_VENDORID_MICROSOFT, DC_DEVICEID_MSMN130_FAKE, "Microsoft MN-130 10/100" }, { 0, 0, NULL } }; static int dc_probe(device_t); static int dc_attach(device_t); static int dc_detach(device_t); static int dc_suspend(device_t); static int dc_resume(device_t); static struct dc_type *dc_devtype(device_t); static int dc_newbuf(struct dc_softc *, int, int); static int dc_encap(struct dc_softc *, struct mbuf **); static void dc_pnic_rx_bug_war(struct dc_softc *, int); static int dc_rx_resync(struct dc_softc *); static void dc_rxeof(struct dc_softc *); static void dc_txeof(struct dc_softc *); static void dc_tick(void *); static void dc_tx_underrun(struct dc_softc *); static void dc_intr(void *); static void dc_start(struct ifnet *); static int dc_ioctl(struct ifnet *, u_long, caddr_t); static void dc_init(void *); static void dc_stop(struct dc_softc *); static void dc_watchdog(struct ifnet *); static void dc_shutdown(device_t); static int dc_ifmedia_upd(struct ifnet *); static void dc_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void dc_delay(struct dc_softc *); static void dc_eeprom_idle(struct dc_softc *); static void dc_eeprom_putbyte(struct dc_softc *, int); static void dc_eeprom_getword(struct dc_softc *, int, u_int16_t *); static void dc_eeprom_getword_pnic(struct dc_softc *, int, u_int16_t *); static void dc_eeprom_getword_xircom(struct dc_softc *, int, u_int16_t *); static void dc_eeprom_width(struct dc_softc *); static void dc_read_eeprom(struct dc_softc *, caddr_t, int, int, int); static void dc_mii_writebit(struct dc_softc *, int); static int dc_mii_readbit(struct dc_softc *); static void dc_mii_sync(struct dc_softc *); static void dc_mii_send(struct dc_softc *, u_int32_t, int); static int dc_mii_readreg(struct dc_softc *, struct dc_mii_frame *); static int dc_mii_writereg(struct dc_softc *, struct dc_mii_frame *); static int dc_miibus_readreg(device_t, int, int); static int dc_miibus_writereg(device_t, int, int, int); static void dc_miibus_statchg(device_t); static void dc_miibus_mediainit(device_t); static void dc_setcfg(struct dc_softc *, int); static uint32_t dc_mchash_le(struct dc_softc *, const uint8_t *); static uint32_t dc_mchash_be(const uint8_t *); static void dc_setfilt_21143(struct dc_softc *); static void dc_setfilt_asix(struct dc_softc *); static void dc_setfilt_admtek(struct dc_softc *); static void dc_setfilt_xircom(struct dc_softc *); static void dc_setfilt(struct dc_softc *); static void dc_reset(struct dc_softc *); static int dc_list_rx_init(struct dc_softc *); static int dc_list_tx_init(struct dc_softc *); static void dc_read_srom(struct dc_softc *, int); static void dc_parse_21143_srom(struct dc_softc *); static void dc_decode_leaf_sia(struct dc_softc *, struct dc_eblock_sia *); static void dc_decode_leaf_mii(struct dc_softc *, struct dc_eblock_mii *); static void dc_decode_leaf_sym(struct dc_softc *, struct dc_eblock_sym *); static void dc_apply_fixup(struct dc_softc *, int); static void dc_dma_map_txbuf(void *, bus_dma_segment_t *, int, bus_size_t, int); static void dc_dma_map_rxbuf(void *, bus_dma_segment_t *, int, bus_size_t, int); #ifdef DC_USEIOSPACE #define DC_RES SYS_RES_IOPORT #define DC_RID DC_PCI_CFBIO #else #define DC_RES SYS_RES_MEMORY #define DC_RID DC_PCI_CFBMA #endif static device_method_t dc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, dc_probe), DEVMETHOD(device_attach, dc_attach), DEVMETHOD(device_detach, dc_detach), DEVMETHOD(device_suspend, dc_suspend), DEVMETHOD(device_resume, dc_resume), DEVMETHOD(device_shutdown, dc_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, dc_miibus_readreg), DEVMETHOD(miibus_writereg, dc_miibus_writereg), DEVMETHOD(miibus_statchg, dc_miibus_statchg), DEVMETHOD(miibus_mediainit, dc_miibus_mediainit), { 0, 0 } }; static driver_t dc_driver = { "dc", dc_methods, sizeof(struct dc_softc) }; static devclass_t dc_devclass; #ifdef __i386__ static int dc_quick = 1; SYSCTL_INT(_hw, OID_AUTO, dc_quick, CTLFLAG_RW, &dc_quick, 0, "do not m_devget() in dc driver"); #endif DRIVER_MODULE(dc, cardbus, dc_driver, dc_devclass, 0, 0); DRIVER_MODULE(dc, pci, dc_driver, dc_devclass, 0, 0); DRIVER_MODULE(miibus, dc, miibus_driver, miibus_devclass, 0, 0); #define DC_SETBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) #define DC_CLRBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) #define SIO_SET(x) DC_SETBIT(sc, DC_SIO, (x)) #define SIO_CLR(x) DC_CLRBIT(sc, DC_SIO, (x)) #define IS_MPSAFE 0 static void dc_delay(struct dc_softc *sc) { int idx; for (idx = (300 / 33) + 1; idx > 0; idx--) CSR_READ_4(sc, DC_BUSCTL); } static void dc_eeprom_width(struct dc_softc *sc) { int i; /* Force EEPROM to idle state. */ dc_eeprom_idle(sc); /* Enter EEPROM access mode. */ CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); dc_delay(sc); for (i = 3; i--;) { if (6 & (1 << i)) DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); else DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); } for (i = 1; i <= 12; i++) { DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); if (!(CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT)) { DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); break; } DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); } /* Turn off EEPROM access mode. */ dc_eeprom_idle(sc); if (i < 4 || i > 12) sc->dc_romwidth = 6; else sc->dc_romwidth = i; /* Enter EEPROM access mode. */ CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); dc_delay(sc); /* Turn off EEPROM access mode. */ dc_eeprom_idle(sc); } static void dc_eeprom_idle(struct dc_softc *sc) { int i; CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); dc_delay(sc); for (i = 0; i < 25; i++) { DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); } DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CS); dc_delay(sc); CSR_WRITE_4(sc, DC_SIO, 0x00000000); } /* * Send a read command and address to the EEPROM, check for ACK. */ static void dc_eeprom_putbyte(struct dc_softc *sc, int addr) { int d, i; d = DC_EECMD_READ >> 6; for (i = 3; i--; ) { if (d & (1 << i)) DC_SETBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); else DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_DATAIN); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); } /* * Feed in each bit and strobe the clock. */ for (i = sc->dc_romwidth; i--;) { if (addr & (1 << i)) { SIO_SET(DC_SIO_EE_DATAIN); } else { SIO_CLR(DC_SIO_EE_DATAIN); } dc_delay(sc); SIO_SET(DC_SIO_EE_CLK); dc_delay(sc); SIO_CLR(DC_SIO_EE_CLK); dc_delay(sc); } } /* * Read a word of data stored in the EEPROM at address 'addr.' * The PNIC 82c168/82c169 has its own non-standard way to read * the EEPROM. */ static void dc_eeprom_getword_pnic(struct dc_softc *sc, int addr, u_int16_t *dest) { int i; u_int32_t r; CSR_WRITE_4(sc, DC_PN_SIOCTL, DC_PN_EEOPCODE_READ | addr); for (i = 0; i < DC_TIMEOUT; i++) { DELAY(1); r = CSR_READ_4(sc, DC_SIO); if (!(r & DC_PN_SIOCTL_BUSY)) { *dest = (u_int16_t)(r & 0xFFFF); return; } } } /* * Read a word of data stored in the EEPROM at address 'addr.' * The Xircom X3201 has its own non-standard way to read * the EEPROM, too. */ static void dc_eeprom_getword_xircom(struct dc_softc *sc, int addr, u_int16_t *dest) { SIO_SET(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); addr *= 2; CSR_WRITE_4(sc, DC_ROM, addr | 0x160); *dest = (u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff; addr += 1; CSR_WRITE_4(sc, DC_ROM, addr | 0x160); *dest |= ((u_int16_t)CSR_READ_4(sc, DC_SIO) & 0xff) << 8; SIO_CLR(DC_SIO_ROMSEL | DC_SIO_ROMCTL_READ); } /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void dc_eeprom_getword(struct dc_softc *sc, int addr, u_int16_t *dest) { int i; u_int16_t word = 0; /* Force EEPROM to idle state. */ dc_eeprom_idle(sc); /* Enter EEPROM access mode. */ CSR_WRITE_4(sc, DC_SIO, DC_SIO_EESEL); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_ROMCTL_READ); dc_delay(sc); DC_CLRBIT(sc, DC_SIO, DC_SIO_EE_CLK); dc_delay(sc); DC_SETBIT(sc, DC_SIO, DC_SIO_EE_CS); dc_delay(sc); /* * Send address of word we want to read. */ dc_eeprom_putbyte(sc, addr); /* * Start reading bits from EEPROM. */ for (i = 0x8000; i; i >>= 1) { SIO_SET(DC_SIO_EE_CLK); dc_delay(sc); if (CSR_READ_4(sc, DC_SIO) & DC_SIO_EE_DATAOUT) word |= i; dc_delay(sc); SIO_CLR(DC_SIO_EE_CLK); dc_delay(sc); } /* Turn off EEPROM access mode. */ dc_eeprom_idle(sc); *dest = word; } /* * Read a sequence of words from the EEPROM. */ static void dc_read_eeprom(struct dc_softc *sc, caddr_t dest, int off, int cnt, int be) { int i; u_int16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { if (DC_IS_PNIC(sc)) dc_eeprom_getword_pnic(sc, off + i, &word); else if (DC_IS_XIRCOM(sc)) dc_eeprom_getword_xircom(sc, off + i, &word); else dc_eeprom_getword(sc, off + i, &word); ptr = (u_int16_t *)(dest + (i * 2)); if (be) *ptr = be16toh(word); else *ptr = le16toh(word); } } /* * The following two routines are taken from the Macronix 98713 * Application Notes pp.19-21. */ /* * Write a bit to the MII bus. */ static void dc_mii_writebit(struct dc_softc *sc, int bit) { if (bit) CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE | DC_SIO_MII_DATAOUT); else CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); } /* * Read a bit from the MII bus. */ static int dc_mii_readbit(struct dc_softc *sc) { CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_READ | DC_SIO_MII_DIR); CSR_READ_4(sc, DC_SIO); DC_SETBIT(sc, DC_SIO, DC_SIO_MII_CLK); DC_CLRBIT(sc, DC_SIO, DC_SIO_MII_CLK); if (CSR_READ_4(sc, DC_SIO) & DC_SIO_MII_DATAIN) return (1); return (0); } /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void dc_mii_sync(struct dc_softc *sc) { int i; CSR_WRITE_4(sc, DC_SIO, DC_SIO_ROMCTL_WRITE); for (i = 0; i < 32; i++) dc_mii_writebit(sc, 1); } /* * Clock a series of bits through the MII. */ static void dc_mii_send(struct dc_softc *sc, u_int32_t bits, int cnt) { int i; for (i = (0x1 << (cnt - 1)); i; i >>= 1) dc_mii_writebit(sc, bits & i); } /* * Read an PHY register through the MII. */ static int dc_mii_readreg(struct dc_softc *sc, struct dc_mii_frame *frame) { int i, ack; DC_LOCK(sc); /* * Set up frame for RX. */ frame->mii_stdelim = DC_MII_STARTDELIM; frame->mii_opcode = DC_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; /* * Sync the PHYs. */ dc_mii_sync(sc); /* * Send command/address info. */ dc_mii_send(sc, frame->mii_stdelim, 2); dc_mii_send(sc, frame->mii_opcode, 2); dc_mii_send(sc, frame->mii_phyaddr, 5); dc_mii_send(sc, frame->mii_regaddr, 5); #ifdef notdef /* Idle bit */ dc_mii_writebit(sc, 1); dc_mii_writebit(sc, 0); #endif /* Check for ack. */ ack = dc_mii_readbit(sc); /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ if (ack) { for (i = 0; i < 16; i++) dc_mii_readbit(sc); goto fail; } for (i = 0x8000; i; i >>= 1) { if (!ack) { if (dc_mii_readbit(sc)) frame->mii_data |= i; } } fail: dc_mii_writebit(sc, 0); dc_mii_writebit(sc, 0); DC_UNLOCK(sc); if (ack) return (1); return (0); } /* * Write to a PHY register through the MII. */ static int dc_mii_writereg(struct dc_softc *sc, struct dc_mii_frame *frame) { DC_LOCK(sc); /* * Set up frame for TX. */ frame->mii_stdelim = DC_MII_STARTDELIM; frame->mii_opcode = DC_MII_WRITEOP; frame->mii_turnaround = DC_MII_TURNAROUND; /* * Sync the PHYs. */ dc_mii_sync(sc); dc_mii_send(sc, frame->mii_stdelim, 2); dc_mii_send(sc, frame->mii_opcode, 2); dc_mii_send(sc, frame->mii_phyaddr, 5); dc_mii_send(sc, frame->mii_regaddr, 5); dc_mii_send(sc, frame->mii_turnaround, 2); dc_mii_send(sc, frame->mii_data, 16); /* Idle bit. */ dc_mii_writebit(sc, 0); dc_mii_writebit(sc, 0); DC_UNLOCK(sc); return (0); } static int dc_miibus_readreg(device_t dev, int phy, int reg) { struct dc_mii_frame frame; struct dc_softc *sc; int i, rval, phy_reg = 0; sc = device_get_softc(dev); bzero(&frame, sizeof(frame)); /* * Note: both the AL981 and AN985 have internal PHYs, * however the AL981 provides direct access to the PHY * registers while the AN985 uses a serial MII interface. * The AN985's MII interface is also buggy in that you * can read from any MII address (0 to 31), but only address 1 * behaves normally. To deal with both cases, we pretend * that the PHY is at MII address 1. */ if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) return (0); /* * Note: the ukphy probes of the RS7112 report a PHY at * MII address 0 (possibly HomePNA?) and 1 (ethernet) * so we only respond to correct one. */ if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR) return (0); if (sc->dc_pmode != DC_PMODE_MII) { if (phy == (MII_NPHY - 1)) { switch (reg) { case MII_BMSR: /* * Fake something to make the probe * code think there's a PHY here. */ return (BMSR_MEDIAMASK); break; case MII_PHYIDR1: if (DC_IS_PNIC(sc)) return (DC_VENDORID_LO); return (DC_VENDORID_DEC); break; case MII_PHYIDR2: if (DC_IS_PNIC(sc)) return (DC_DEVICEID_82C168); return (DC_DEVICEID_21143); break; default: return (0); break; } } else return (0); } if (DC_IS_PNIC(sc)) { CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_READ | (phy << 23) | (reg << 18)); for (i = 0; i < DC_TIMEOUT; i++) { DELAY(1); rval = CSR_READ_4(sc, DC_PN_MII); if (!(rval & DC_PN_MII_BUSY)) { rval &= 0xFFFF; return (rval == 0xFFFF ? 0 : rval); } } return (0); } if (DC_IS_COMET(sc)) { switch (reg) { case MII_BMCR: phy_reg = DC_AL_BMCR; break; case MII_BMSR: phy_reg = DC_AL_BMSR; break; case MII_PHYIDR1: phy_reg = DC_AL_VENID; break; case MII_PHYIDR2: phy_reg = DC_AL_DEVID; break; case MII_ANAR: phy_reg = DC_AL_ANAR; break; case MII_ANLPAR: phy_reg = DC_AL_LPAR; break; case MII_ANER: phy_reg = DC_AL_ANER; break; default: printf("dc%d: phy_read: bad phy register %x\n", sc->dc_unit, reg); return (0); break; } rval = CSR_READ_4(sc, phy_reg) & 0x0000FFFF; if (rval == 0xFFFF) return (0); return (rval); } frame.mii_phyaddr = phy; frame.mii_regaddr = reg; if (sc->dc_type == DC_TYPE_98713) { phy_reg = CSR_READ_4(sc, DC_NETCFG); CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); } dc_mii_readreg(sc, &frame); if (sc->dc_type == DC_TYPE_98713) CSR_WRITE_4(sc, DC_NETCFG, phy_reg); return (frame.mii_data); } static int dc_miibus_writereg(device_t dev, int phy, int reg, int data) { struct dc_softc *sc; struct dc_mii_frame frame; int i, phy_reg = 0; sc = device_get_softc(dev); bzero(&frame, sizeof(frame)); if (DC_IS_ADMTEK(sc) && phy != DC_ADMTEK_PHYADDR) return (0); if (DC_IS_CONEXANT(sc) && phy != DC_CONEXANT_PHYADDR) return (0); if (DC_IS_PNIC(sc)) { CSR_WRITE_4(sc, DC_PN_MII, DC_PN_MIIOPCODE_WRITE | (phy << 23) | (reg << 10) | data); for (i = 0; i < DC_TIMEOUT; i++) { if (!(CSR_READ_4(sc, DC_PN_MII) & DC_PN_MII_BUSY)) break; } return (0); } if (DC_IS_COMET(sc)) { switch (reg) { case MII_BMCR: phy_reg = DC_AL_BMCR; break; case MII_BMSR: phy_reg = DC_AL_BMSR; break; case MII_PHYIDR1: phy_reg = DC_AL_VENID; break; case MII_PHYIDR2: phy_reg = DC_AL_DEVID; break; case MII_ANAR: phy_reg = DC_AL_ANAR; break; case MII_ANLPAR: phy_reg = DC_AL_LPAR; break; case MII_ANER: phy_reg = DC_AL_ANER; break; default: printf("dc%d: phy_write: bad phy register %x\n", sc->dc_unit, reg); return (0); break; } CSR_WRITE_4(sc, phy_reg, data); return (0); } frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = data; if (sc->dc_type == DC_TYPE_98713) { phy_reg = CSR_READ_4(sc, DC_NETCFG); CSR_WRITE_4(sc, DC_NETCFG, phy_reg & ~DC_NETCFG_PORTSEL); } dc_mii_writereg(sc, &frame); if (sc->dc_type == DC_TYPE_98713) CSR_WRITE_4(sc, DC_NETCFG, phy_reg); return (0); } static void dc_miibus_statchg(device_t dev) { struct dc_softc *sc; struct mii_data *mii; struct ifmedia *ifm; sc = device_get_softc(dev); if (DC_IS_ADMTEK(sc)) return; mii = device_get_softc(sc->dc_miibus); ifm = &mii->mii_media; if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { dc_setcfg(sc, ifm->ifm_media); sc->dc_if_media = ifm->ifm_media; } else { dc_setcfg(sc, mii->mii_media_active); sc->dc_if_media = mii->mii_media_active; } } /* * Special support for DM9102A cards with HomePNA PHYs. Note: * with the Davicom DM9102A/DM9801 eval board that I have, it seems * to be impossible to talk to the management interface of the DM9801 * PHY (its MDIO pin is not connected to anything). Consequently, * the driver has to just 'know' about the additional mode and deal * with it itself. *sigh* */ static void dc_miibus_mediainit(device_t dev) { struct dc_softc *sc; struct mii_data *mii; struct ifmedia *ifm; int rev; rev = pci_read_config(dev, DC_PCI_CFRV, 4) & 0xFF; sc = device_get_softc(dev); mii = device_get_softc(sc->dc_miibus); ifm = &mii->mii_media; if (DC_IS_DAVICOM(sc) && rev >= DC_REVISION_DM9102A) ifmedia_add(ifm, IFM_ETHER | IFM_HPNA_1, 0, NULL); } #define DC_BITS_512 9 #define DC_BITS_128 7 #define DC_BITS_64 6 static uint32_t dc_mchash_le(struct dc_softc *sc, const uint8_t *addr) { uint32_t crc; /* Compute CRC for the address value. */ crc = ether_crc32_le(addr, ETHER_ADDR_LEN); /* * The hash table on the PNIC II and the MX98715AEC-C/D/E * chips is only 128 bits wide. */ if (sc->dc_flags & DC_128BIT_HASH) return (crc & ((1 << DC_BITS_128) - 1)); /* The hash table on the MX98715BEC is only 64 bits wide. */ if (sc->dc_flags & DC_64BIT_HASH) return (crc & ((1 << DC_BITS_64) - 1)); /* Xircom's hash filtering table is different (read: weird) */ /* Xircom uses the LEAST significant bits */ if (DC_IS_XIRCOM(sc)) { if ((crc & 0x180) == 0x180) return ((crc & 0x0F) + (crc & 0x70) * 3 + (14 << 4)); else return ((crc & 0x1F) + ((crc >> 1) & 0xF0) * 3 + (12 << 4)); } return (crc & ((1 << DC_BITS_512) - 1)); } /* * Calculate CRC of a multicast group address, return the lower 6 bits. */ static uint32_t dc_mchash_be(const uint8_t *addr) { uint32_t crc; /* Compute CRC for the address value. */ crc = ether_crc32_be(addr, ETHER_ADDR_LEN); /* Return the filter bit position. */ return ((crc >> 26) & 0x0000003F); } /* * 21143-style RX filter setup routine. Filter programming is done by * downloading a special setup frame into the TX engine. 21143, Macronix, * PNIC, PNIC II and Davicom chips are programmed this way. * * We always program the chip using 'hash perfect' mode, i.e. one perfect * address (our node address) and a 512-bit hash filter for multicast * frames. We also sneak the broadcast address into the hash filter since * we need that too. */ static void dc_setfilt_21143(struct dc_softc *sc) { struct dc_desc *sframe; u_int32_t h, *sp; struct ifmultiaddr *ifma; struct ifnet *ifp; int i; ifp = sc->dc_ifp; i = sc->dc_cdata.dc_tx_prod; DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); sc->dc_cdata.dc_tx_cnt++; sframe = &sc->dc_ldata->dc_tx_list[i]; sp = sc->dc_cdata.dc_sbuf; bzero(sp, DC_SFRAME_LEN); sframe->dc_data = htole32(sc->dc_saddr); sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP | DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT); sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); if (ifp->if_flags & IFF_ALLMULTI) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = dc_mchash_le(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); sp[h >> 4] |= htole32(1 << (h & 0xF)); } + IF_ADDR_UNLOCK(ifp); if (ifp->if_flags & IFF_BROADCAST) { h = dc_mchash_le(sc, ifp->if_broadcastaddr); sp[h >> 4] |= htole32(1 << (h & 0xF)); } /* Set our MAC address */ sp[39] = DC_SP_MAC(((u_int16_t *)IFP2ENADDR(sc->dc_ifp))[0]); sp[40] = DC_SP_MAC(((u_int16_t *)IFP2ENADDR(sc->dc_ifp))[1]); sp[41] = DC_SP_MAC(((u_int16_t *)IFP2ENADDR(sc->dc_ifp))[2]); sframe->dc_status = htole32(DC_TXSTAT_OWN); CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); /* * The PNIC takes an exceedingly long time to process its * setup frame; wait 10ms after posting the setup frame * before proceeding, just so it has time to swallow its * medicine. */ DELAY(10000); ifp->if_timer = 5; } static void dc_setfilt_admtek(struct dc_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; int h = 0; u_int32_t hashes[2] = { 0, 0 }; ifp = sc->dc_ifp; /* Init our MAC address. */ CSR_WRITE_4(sc, DC_AL_PAR0, *(u_int32_t *)(&IFP2ENADDR(sc->dc_ifp)[0])); CSR_WRITE_4(sc, DC_AL_PAR1, *(u_int32_t *)(&IFP2ENADDR(sc->dc_ifp)[4])); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); if (ifp->if_flags & IFF_ALLMULTI) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); /* First, zot all the existing hash bits. */ CSR_WRITE_4(sc, DC_AL_MAR0, 0); CSR_WRITE_4(sc, DC_AL_MAR1, 0); /* * If we're already in promisc or allmulti mode, we * don't have to bother programming the multicast filter. */ if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) return; /* Now program new ones. */ + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (DC_IS_CENTAUR(sc)) h = dc_mchash_le(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); else h = dc_mchash_be( LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } + IF_ADDR_UNLOCK(ifp); CSR_WRITE_4(sc, DC_AL_MAR0, hashes[0]); CSR_WRITE_4(sc, DC_AL_MAR1, hashes[1]); } static void dc_setfilt_asix(struct dc_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; int h = 0; u_int32_t hashes[2] = { 0, 0 }; ifp = sc->dc_ifp; /* Init our MAC address */ CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR0); CSR_WRITE_4(sc, DC_AX_FILTDATA, *(u_int32_t *)(&IFP2ENADDR(sc->dc_ifp)[0])); CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_PAR1); CSR_WRITE_4(sc, DC_AX_FILTDATA, *(u_int32_t *)(&IFP2ENADDR(sc->dc_ifp)[4])); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); if (ifp->if_flags & IFF_ALLMULTI) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); /* * The ASIX chip has a special bit to enable reception * of broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) DC_SETBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); else DC_CLRBIT(sc, DC_NETCFG, DC_AX_NETCFG_RX_BROAD); /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); CSR_WRITE_4(sc, DC_AX_FILTDATA, 0); /* * If we're already in promisc or allmulti mode, we * don't have to bother programming the multicast filter. */ if (ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI)) return; /* now program new ones */ + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = dc_mchash_be(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } + IF_ADDR_UNLOCK(ifp); CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR0); CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[0]); CSR_WRITE_4(sc, DC_AX_FILTIDX, DC_AX_FILTIDX_MAR1); CSR_WRITE_4(sc, DC_AX_FILTDATA, hashes[1]); } static void dc_setfilt_xircom(struct dc_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; struct dc_desc *sframe; u_int32_t h, *sp; int i; ifp = sc->dc_ifp; DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)); i = sc->dc_cdata.dc_tx_prod; DC_INC(sc->dc_cdata.dc_tx_prod, DC_TX_LIST_CNT); sc->dc_cdata.dc_tx_cnt++; sframe = &sc->dc_ldata->dc_tx_list[i]; sp = sc->dc_cdata.dc_sbuf; bzero(sp, DC_SFRAME_LEN); sframe->dc_data = htole32(sc->dc_saddr); sframe->dc_ctl = htole32(DC_SFRAME_LEN | DC_TXCTL_SETUP | DC_TXCTL_TLINK | DC_FILTER_HASHPERF | DC_TXCTL_FINT); sc->dc_cdata.dc_tx_chain[i] = (struct mbuf *)sc->dc_cdata.dc_sbuf; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_PROMISC); if (ifp->if_flags & IFF_ALLMULTI) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); else DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_RX_ALLMULTI); + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = dc_mchash_le(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); sp[h >> 4] |= htole32(1 << (h & 0xF)); } + IF_ADDR_UNLOCK(ifp); if (ifp->if_flags & IFF_BROADCAST) { h = dc_mchash_le(sc, ifp->if_broadcastaddr); sp[h >> 4] |= htole32(1 << (h & 0xF)); } /* Set our MAC address */ sp[0] = DC_SP_MAC(((u_int16_t *)IFP2ENADDR(sc->dc_ifp))[0]); sp[1] = DC_SP_MAC(((u_int16_t *)IFP2ENADDR(sc->dc_ifp))[1]); sp[2] = DC_SP_MAC(((u_int16_t *)IFP2ENADDR(sc->dc_ifp))[2]); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); ifp->if_flags |= IFF_RUNNING; sframe->dc_status = htole32(DC_TXSTAT_OWN); CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); /* * Wait some time... */ DELAY(1000); ifp->if_timer = 5; } static void dc_setfilt(struct dc_softc *sc) { if (DC_IS_INTEL(sc) || DC_IS_MACRONIX(sc) || DC_IS_PNIC(sc) || DC_IS_PNICII(sc) || DC_IS_DAVICOM(sc) || DC_IS_CONEXANT(sc)) dc_setfilt_21143(sc); if (DC_IS_ASIX(sc)) dc_setfilt_asix(sc); if (DC_IS_ADMTEK(sc)) dc_setfilt_admtek(sc); if (DC_IS_XIRCOM(sc)) dc_setfilt_xircom(sc); } /* * In order to fiddle with the 'full-duplex' and '100Mbps' bits in * the netconfig register, we first have to put the transmit and/or * receive logic in the idle state. */ static void dc_setcfg(struct dc_softc *sc, int media) { int i, restart = 0, watchdogreg; u_int32_t isr; if (IFM_SUBTYPE(media) == IFM_NONE) return; if (CSR_READ_4(sc, DC_NETCFG) & (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)) { restart = 1; DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_TX_ON | DC_NETCFG_RX_ON)); for (i = 0; i < DC_TIMEOUT; i++) { isr = CSR_READ_4(sc, DC_ISR); if (isr & DC_ISR_TX_IDLE && ((isr & DC_ISR_RX_STATE) == DC_RXSTATE_STOPPED || (isr & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT)) break; DELAY(10); } if (i == DC_TIMEOUT) printf("dc%d: failed to force tx and " "rx to idle state\n", sc->dc_unit); } if (IFM_SUBTYPE(media) == IFM_100_TX) { DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); if (sc->dc_pmode == DC_PMODE_MII) { if (DC_IS_INTEL(sc)) { /* There's a write enable bit here that reads as 1. */ watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); watchdogreg &= ~DC_WDOG_CTLWREN; watchdogreg |= DC_WDOG_JABBERDIS; CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); } else { DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); } DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS | DC_NETCFG_PORTSEL | DC_NETCFG_SCRAMBLER)); if (sc->dc_type == DC_TYPE_98713) DC_SETBIT(sc, DC_NETCFG, (DC_NETCFG_PCS | DC_NETCFG_SCRAMBLER)); if (!DC_IS_DAVICOM(sc)) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); if (DC_IS_INTEL(sc)) dc_apply_fixup(sc, IFM_AUTO); } else { if (DC_IS_PNIC(sc)) { DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_SPEEDSEL); DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); } DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); if (DC_IS_INTEL(sc)) dc_apply_fixup(sc, (media & IFM_GMASK) == IFM_FDX ? IFM_100_TX | IFM_FDX : IFM_100_TX); } } if (IFM_SUBTYPE(media) == IFM_10_T) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_SPEEDSEL); DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_HEARTBEAT); if (sc->dc_pmode == DC_PMODE_MII) { /* There's a write enable bit here that reads as 1. */ if (DC_IS_INTEL(sc)) { watchdogreg = CSR_READ_4(sc, DC_WATCHDOG); watchdogreg &= ~DC_WDOG_CTLWREN; watchdogreg |= DC_WDOG_JABBERDIS; CSR_WRITE_4(sc, DC_WATCHDOG, watchdogreg); } else { DC_SETBIT(sc, DC_WATCHDOG, DC_WDOG_JABBERDIS); } DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_PCS | DC_NETCFG_PORTSEL | DC_NETCFG_SCRAMBLER)); if (sc->dc_type == DC_TYPE_98713) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PCS); if (!DC_IS_DAVICOM(sc)) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); if (DC_IS_INTEL(sc)) dc_apply_fixup(sc, IFM_AUTO); } else { if (DC_IS_PNIC(sc)) { DC_PN_GPIO_CLRBIT(sc, DC_PN_GPIO_SPEEDSEL); DC_PN_GPIO_SETBIT(sc, DC_PN_GPIO_100TX_LOOP); DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_SPEEDSEL); } DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PCS); DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_SCRAMBLER); if (DC_IS_INTEL(sc)) { DC_CLRBIT(sc, DC_SIARESET, DC_SIA_RESET); DC_CLRBIT(sc, DC_10BTCTRL, 0xFFFF); if ((media & IFM_GMASK) == IFM_FDX) DC_SETBIT(sc, DC_10BTCTRL, 0x7F3D); else DC_SETBIT(sc, DC_10BTCTRL, 0x7F3F); DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); DC_CLRBIT(sc, DC_10BTCTRL, DC_TCTL_AUTONEGENBL); dc_apply_fixup(sc, (media & IFM_GMASK) == IFM_FDX ? IFM_10_T | IFM_FDX : IFM_10_T); DELAY(20000); } } } /* * If this is a Davicom DM9102A card with a DM9801 HomePNA * PHY and we want HomePNA mode, set the portsel bit to turn * on the external MII port. */ if (DC_IS_DAVICOM(sc)) { if (IFM_SUBTYPE(media) == IFM_HPNA_1) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); sc->dc_link = 1; } else { DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_PORTSEL); } } if ((media & IFM_GMASK) == IFM_FDX) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) DC_SETBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); } else { DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_FULLDUPLEX); if (sc->dc_pmode == DC_PMODE_SYM && DC_IS_PNIC(sc)) DC_CLRBIT(sc, DC_PN_NWAY, DC_PN_NWAY_DUPLEX); } if (restart) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON | DC_NETCFG_RX_ON); } static void dc_reset(struct dc_softc *sc) { int i; DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); for (i = 0; i < DC_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_4(sc, DC_BUSCTL) & DC_BUSCTL_RESET)) break; } if (DC_IS_ASIX(sc) || DC_IS_ADMTEK(sc) || DC_IS_CONEXANT(sc) || DC_IS_XIRCOM(sc) || DC_IS_INTEL(sc)) { DELAY(10000); DC_CLRBIT(sc, DC_BUSCTL, DC_BUSCTL_RESET); i = 0; } if (i == DC_TIMEOUT) printf("dc%d: reset never completed!\n", sc->dc_unit); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); CSR_WRITE_4(sc, DC_IMR, 0x00000000); CSR_WRITE_4(sc, DC_BUSCTL, 0x00000000); CSR_WRITE_4(sc, DC_NETCFG, 0x00000000); /* * Bring the SIA out of reset. In some cases, it looks * like failing to unreset the SIA soon enough gets it * into a state where it will never come out of reset * until we reset the whole chip again. */ if (DC_IS_INTEL(sc)) { DC_SETBIT(sc, DC_SIARESET, DC_SIA_RESET); CSR_WRITE_4(sc, DC_10BTCTRL, 0); CSR_WRITE_4(sc, DC_WATCHDOG, 0); } } static struct dc_type * dc_devtype(device_t dev) { struct dc_type *t; u_int32_t rev; t = dc_devs; while (t->dc_name != NULL) { if ((pci_get_vendor(dev) == t->dc_vid) && (pci_get_device(dev) == t->dc_did)) { /* Check the PCI revision */ rev = pci_read_config(dev, DC_PCI_CFRV, 4) & 0xFF; if (t->dc_did == DC_DEVICEID_98713 && rev >= DC_REVISION_98713A) t++; if (t->dc_did == DC_DEVICEID_98713_CP && rev >= DC_REVISION_98713A) t++; if (t->dc_did == DC_DEVICEID_987x5 && rev >= DC_REVISION_98715AEC_C) t++; if (t->dc_did == DC_DEVICEID_987x5 && rev >= DC_REVISION_98725) t++; if (t->dc_did == DC_DEVICEID_AX88140A && rev >= DC_REVISION_88141) t++; if (t->dc_did == DC_DEVICEID_82C168 && rev >= DC_REVISION_82C169) t++; if (t->dc_did == DC_DEVICEID_DM9102 && rev >= DC_REVISION_DM9102A) t++; /* * The Microsoft MN-130 has a device ID of 0x0002, * which happens to be the same as the PNIC 82c168. * To keep dc_attach() from getting confused, we * pretend its ID is something different. * XXX: ideally, dc_attach() should be checking * vendorid+deviceid together to avoid such * collisions. */ if (t->dc_vid == DC_VENDORID_MICROSOFT && t->dc_did == DC_DEVICEID_MSMN130) t++; return (t); } t++; } return (NULL); } /* * Probe for a 21143 or clone chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. * We do a little bit of extra work to identify the exact type of * chip. The MX98713 and MX98713A have the same PCI vendor/device ID, * but different revision IDs. The same is true for 98715/98715A * chips and the 98725, as well as the ASIX and ADMtek chips. In some * cases, the exact chip revision affects driver behavior. */ static int dc_probe(device_t dev) { struct dc_type *t; t = dc_devtype(dev); if (t != NULL) { device_set_desc(dev, t->dc_name); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static void dc_apply_fixup(struct dc_softc *sc, int media) { struct dc_mediainfo *m; u_int8_t *p; int i; u_int32_t reg; m = sc->dc_mi; while (m != NULL) { if (m->dc_media == media) break; m = m->dc_next; } if (m == NULL) return; for (i = 0, p = m->dc_reset_ptr; i < m->dc_reset_len; i++, p += 2) { reg = (p[0] | (p[1] << 8)) << 16; CSR_WRITE_4(sc, DC_WATCHDOG, reg); } for (i = 0, p = m->dc_gp_ptr; i < m->dc_gp_len; i++, p += 2) { reg = (p[0] | (p[1] << 8)) << 16; CSR_WRITE_4(sc, DC_WATCHDOG, reg); } } static void dc_decode_leaf_sia(struct dc_softc *sc, struct dc_eblock_sia *l) { struct dc_mediainfo *m; m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO); switch (l->dc_sia_code & ~DC_SIA_CODE_EXT) { case DC_SIA_CODE_10BT: m->dc_media = IFM_10_T; break; case DC_SIA_CODE_10BT_FDX: m->dc_media = IFM_10_T | IFM_FDX; break; case DC_SIA_CODE_10B2: m->dc_media = IFM_10_2; break; case DC_SIA_CODE_10B5: m->dc_media = IFM_10_5; break; default: break; } /* * We need to ignore CSR13, CSR14, CSR15 for SIA mode. * Things apparently already work for cards that do * supply Media Specific Data. */ if (l->dc_sia_code & DC_SIA_CODE_EXT) { m->dc_gp_len = 2; m->dc_gp_ptr = (u_int8_t *)&l->dc_un.dc_sia_ext.dc_sia_gpio_ctl; } else { m->dc_gp_len = 2; m->dc_gp_ptr = (u_int8_t *)&l->dc_un.dc_sia_noext.dc_sia_gpio_ctl; } m->dc_next = sc->dc_mi; sc->dc_mi = m; sc->dc_pmode = DC_PMODE_SIA; } static void dc_decode_leaf_sym(struct dc_softc *sc, struct dc_eblock_sym *l) { struct dc_mediainfo *m; m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (l->dc_sym_code == DC_SYM_CODE_100BT) m->dc_media = IFM_100_TX; if (l->dc_sym_code == DC_SYM_CODE_100BT_FDX) m->dc_media = IFM_100_TX | IFM_FDX; m->dc_gp_len = 2; m->dc_gp_ptr = (u_int8_t *)&l->dc_sym_gpio_ctl; m->dc_next = sc->dc_mi; sc->dc_mi = m; sc->dc_pmode = DC_PMODE_SYM; } static void dc_decode_leaf_mii(struct dc_softc *sc, struct dc_eblock_mii *l) { struct dc_mediainfo *m; u_int8_t *p; m = malloc(sizeof(struct dc_mediainfo), M_DEVBUF, M_NOWAIT | M_ZERO); /* We abuse IFM_AUTO to represent MII. */ m->dc_media = IFM_AUTO; m->dc_gp_len = l->dc_gpr_len; p = (u_int8_t *)l; p += sizeof(struct dc_eblock_mii); m->dc_gp_ptr = p; p += 2 * l->dc_gpr_len; m->dc_reset_len = *p; p++; m->dc_reset_ptr = p; m->dc_next = sc->dc_mi; sc->dc_mi = m; } static void dc_read_srom(struct dc_softc *sc, int bits) { int size; size = 2 << bits; sc->dc_srom = malloc(size, M_DEVBUF, M_NOWAIT); dc_read_eeprom(sc, (caddr_t)sc->dc_srom, 0, (size / 2), 0); } static void dc_parse_21143_srom(struct dc_softc *sc) { struct dc_leaf_hdr *lhdr; struct dc_eblock_hdr *hdr; int have_mii, i, loff; char *ptr; have_mii = 0; loff = sc->dc_srom[27]; lhdr = (struct dc_leaf_hdr *)&(sc->dc_srom[loff]); ptr = (char *)lhdr; ptr += sizeof(struct dc_leaf_hdr) - 1; /* * Look if we got a MII media block. */ for (i = 0; i < lhdr->dc_mcnt; i++) { hdr = (struct dc_eblock_hdr *)ptr; if (hdr->dc_type == DC_EBLOCK_MII) have_mii++; ptr += (hdr->dc_len & 0x7F); ptr++; } /* * Do the same thing again. Only use SIA and SYM media * blocks if no MII media block is available. */ ptr = (char *)lhdr; ptr += sizeof(struct dc_leaf_hdr) - 1; for (i = 0; i < lhdr->dc_mcnt; i++) { hdr = (struct dc_eblock_hdr *)ptr; switch (hdr->dc_type) { case DC_EBLOCK_MII: dc_decode_leaf_mii(sc, (struct dc_eblock_mii *)hdr); break; case DC_EBLOCK_SIA: if (! have_mii) dc_decode_leaf_sia(sc, (struct dc_eblock_sia *)hdr); break; case DC_EBLOCK_SYM: if (! have_mii) dc_decode_leaf_sym(sc, (struct dc_eblock_sym *)hdr); break; default: /* Don't care. Yet. */ break; } ptr += (hdr->dc_len & 0x7F); ptr++; } } static void dc_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { u_int32_t *paddr; KASSERT(nseg == 1, ("wrong number of segments, should be 1")); paddr = arg; *paddr = segs->ds_addr; } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int dc_attach(device_t dev) { int tmp = 0; u_char eaddr[ETHER_ADDR_LEN]; u_int32_t command; struct dc_softc *sc; struct ifnet *ifp; u_int32_t revision; int unit, error = 0, rid, mac_offset; int i; u_int8_t *mac; sc = device_get_softc(dev); unit = device_get_unit(dev); mtx_init(&sc->dc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = DC_RID; sc->dc_res = bus_alloc_resource_any(dev, DC_RES, &rid, RF_ACTIVE); if (sc->dc_res == NULL) { printf("dc%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->dc_btag = rman_get_bustag(sc->dc_res); sc->dc_bhandle = rman_get_bushandle(sc->dc_res); /* Allocate interrupt. */ rid = 0; sc->dc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->dc_irq == NULL) { printf("dc%d: couldn't map interrupt\n", unit); error = ENXIO; goto fail; } /* Need this info to decide on a chip type. */ sc->dc_info = dc_devtype(dev); revision = pci_read_config(dev, DC_PCI_CFRV, 4) & 0x000000FF; /* Get the eeprom width, but PNIC and XIRCOM have diff eeprom */ if (sc->dc_info->dc_did != DC_DEVICEID_82C168 && sc->dc_info->dc_did != DC_DEVICEID_X3201) dc_eeprom_width(sc); switch (sc->dc_info->dc_did) { case DC_DEVICEID_21143: sc->dc_type = DC_TYPE_21143; sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; sc->dc_flags |= DC_REDUCED_MII_POLL; /* Save EEPROM contents so we can parse them later. */ dc_read_srom(sc, sc->dc_romwidth); break; case DC_DEVICEID_DM9009: case DC_DEVICEID_DM9100: case DC_DEVICEID_DM9102: sc->dc_type = DC_TYPE_DM9102; sc->dc_flags |= DC_TX_COALESCE | DC_TX_INTR_ALWAYS; sc->dc_flags |= DC_REDUCED_MII_POLL | DC_TX_STORENFWD; sc->dc_flags |= DC_TX_ALIGN; sc->dc_pmode = DC_PMODE_MII; /* Increase the latency timer value. */ command = pci_read_config(dev, DC_PCI_CFLT, 4); command &= 0xFFFF00FF; command |= 0x00008000; pci_write_config(dev, DC_PCI_CFLT, command, 4); break; case DC_DEVICEID_AL981: sc->dc_type = DC_TYPE_AL981; sc->dc_flags |= DC_TX_USE_TX_INTR; sc->dc_flags |= DC_TX_ADMTEK_WAR; sc->dc_pmode = DC_PMODE_MII; dc_read_srom(sc, sc->dc_romwidth); break; case DC_DEVICEID_AN985: case DC_DEVICEID_ADM9511: case DC_DEVICEID_ADM9513: case DC_DEVICEID_FA511: case DC_DEVICEID_FE2500: case DC_DEVICEID_EN2242: case DC_DEVICEID_HAWKING_PN672TX: case DC_DEVICEID_3CSOHOB: case DC_DEVICEID_MSMN120: case DC_DEVICEID_MSMN130_FAKE: /* XXX avoid collision with PNIC*/ sc->dc_type = DC_TYPE_AN985; sc->dc_flags |= DC_64BIT_HASH; sc->dc_flags |= DC_TX_USE_TX_INTR; sc->dc_flags |= DC_TX_ADMTEK_WAR; sc->dc_pmode = DC_PMODE_MII; /* Don't read SROM for - auto-loaded on reset */ break; case DC_DEVICEID_98713: case DC_DEVICEID_98713_CP: if (revision < DC_REVISION_98713A) { sc->dc_type = DC_TYPE_98713; } if (revision >= DC_REVISION_98713A) { sc->dc_type = DC_TYPE_98713A; sc->dc_flags |= DC_21143_NWAY; } sc->dc_flags |= DC_REDUCED_MII_POLL; sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; break; case DC_DEVICEID_987x5: case DC_DEVICEID_EN1217: /* * Macronix MX98715AEC-C/D/E parts have only a * 128-bit hash table. We need to deal with these * in the same manner as the PNIC II so that we * get the right number of bits out of the * CRC routine. */ if (revision >= DC_REVISION_98715AEC_C && revision < DC_REVISION_98725) sc->dc_flags |= DC_128BIT_HASH; sc->dc_type = DC_TYPE_987x5; sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY; break; case DC_DEVICEID_98727: sc->dc_type = DC_TYPE_987x5; sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR; sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY; break; case DC_DEVICEID_82C115: sc->dc_type = DC_TYPE_PNICII; sc->dc_flags |= DC_TX_POLL | DC_TX_USE_TX_INTR | DC_128BIT_HASH; sc->dc_flags |= DC_REDUCED_MII_POLL | DC_21143_NWAY; break; case DC_DEVICEID_82C168: sc->dc_type = DC_TYPE_PNIC; sc->dc_flags |= DC_TX_STORENFWD | DC_TX_INTR_ALWAYS; sc->dc_flags |= DC_PNIC_RX_BUG_WAR; sc->dc_pnic_rx_buf = malloc(DC_RXLEN * 5, M_DEVBUF, M_NOWAIT); if (revision < DC_REVISION_82C169) sc->dc_pmode = DC_PMODE_SYM; break; case DC_DEVICEID_AX88140A: sc->dc_type = DC_TYPE_ASIX; sc->dc_flags |= DC_TX_USE_TX_INTR | DC_TX_INTR_FIRSTFRAG; sc->dc_flags |= DC_REDUCED_MII_POLL; sc->dc_pmode = DC_PMODE_MII; break; case DC_DEVICEID_X3201: sc->dc_type = DC_TYPE_XIRCOM; sc->dc_flags |= DC_TX_INTR_ALWAYS | DC_TX_COALESCE | DC_TX_ALIGN; /* * We don't actually need to coalesce, but we're doing * it to obtain a double word aligned buffer. * The DC_TX_COALESCE flag is required. */ sc->dc_pmode = DC_PMODE_MII; break; case DC_DEVICEID_RS7112: sc->dc_type = DC_TYPE_CONEXANT; sc->dc_flags |= DC_TX_INTR_ALWAYS; sc->dc_flags |= DC_REDUCED_MII_POLL; sc->dc_pmode = DC_PMODE_MII; dc_read_srom(sc, sc->dc_romwidth); break; default: printf("dc%d: unknown device: %x\n", sc->dc_unit, sc->dc_info->dc_did); break; } /* Save the cache line size. */ if (DC_IS_DAVICOM(sc)) sc->dc_cachesize = 0; else sc->dc_cachesize = pci_read_config(dev, DC_PCI_CFLT, 4) & 0xFF; /* Reset the adapter. */ dc_reset(sc); /* Take 21143 out of snooze mode */ if (DC_IS_INTEL(sc) || DC_IS_XIRCOM(sc)) { command = pci_read_config(dev, DC_PCI_CFDD, 4); command &= ~(DC_CFDD_SNOOZE_MODE | DC_CFDD_SLEEP_MODE); pci_write_config(dev, DC_PCI_CFDD, command, 4); } /* * Try to learn something about the supported media. * We know that ASIX and ADMtek and Davicom devices * will *always* be using MII media, so that's a no-brainer. * The tricky ones are the Macronix/PNIC II and the * Intel 21143. */ if (DC_IS_INTEL(sc)) dc_parse_21143_srom(sc); else if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { if (sc->dc_type == DC_TYPE_98713) sc->dc_pmode = DC_PMODE_MII; else sc->dc_pmode = DC_PMODE_SYM; } else if (!sc->dc_pmode) sc->dc_pmode = DC_PMODE_MII; /* * Get station address from the EEPROM. */ switch(sc->dc_type) { case DC_TYPE_98713: case DC_TYPE_98713A: case DC_TYPE_987x5: case DC_TYPE_PNICII: dc_read_eeprom(sc, (caddr_t)&mac_offset, (DC_EE_NODEADDR_OFFSET / 2), 1, 0); dc_read_eeprom(sc, (caddr_t)&eaddr, (mac_offset / 2), 3, 0); break; case DC_TYPE_PNIC: dc_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 1); break; case DC_TYPE_DM9102: dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); #ifdef __sparc64__ /* * If this is an onboard dc(4) the station address read from * the EEPROM is all zero and we have to get it from the fcode. */ for (i = 0; i < ETHER_ADDR_LEN; i++) if (eaddr[i] != 0x00) break; if (i >= ETHER_ADDR_LEN) OF_getetheraddr(dev, eaddr); #endif break; case DC_TYPE_21143: case DC_TYPE_ASIX: dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); break; case DC_TYPE_AL981: case DC_TYPE_AN985: *(u_int32_t *)(&eaddr[0]) = CSR_READ_4(sc, DC_AL_PAR0); *(u_int16_t *)(&eaddr[4]) = CSR_READ_4(sc, DC_AL_PAR1); break; case DC_TYPE_CONEXANT: bcopy(sc->dc_srom + DC_CONEXANT_EE_NODEADDR, &eaddr, ETHER_ADDR_LEN); break; case DC_TYPE_XIRCOM: /* The MAC comes from the CIS. */ mac = pci_get_ether(dev); if (!mac) { device_printf(dev, "No station address in CIS!\n"); error = ENXIO; goto fail; } bcopy(mac, eaddr, ETHER_ADDR_LEN); break; default: dc_read_eeprom(sc, (caddr_t)&eaddr, DC_EE_NODEADDR, 3, 0); break; } sc->dc_unit = unit; /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */ error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct dc_list_data), 1, sizeof(struct dc_list_data), 0, NULL, NULL, &sc->dc_ltag); if (error) { printf("dc%d: failed to allocate busdma tag\n", unit); error = ENXIO; goto fail; } error = bus_dmamem_alloc(sc->dc_ltag, (void **)&sc->dc_ldata, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->dc_lmap); if (error) { printf("dc%d: failed to allocate DMA safe memory\n", unit); error = ENXIO; goto fail; } error = bus_dmamap_load(sc->dc_ltag, sc->dc_lmap, sc->dc_ldata, sizeof(struct dc_list_data), dc_dma_map_addr, &sc->dc_laddr, BUS_DMA_NOWAIT); if (error) { printf("dc%d: cannot get address of the descriptors\n", unit); error = ENXIO; goto fail; } /* * Allocate a busdma tag and DMA safe memory for the multicast * setup frame. */ error = bus_dma_tag_create(NULL, PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, DC_SFRAME_LEN + DC_MIN_FRAMELEN, 1, DC_SFRAME_LEN + DC_MIN_FRAMELEN, 0, NULL, NULL, &sc->dc_stag); if (error) { printf("dc%d: failed to allocate busdma tag\n", unit); error = ENXIO; goto fail; } error = bus_dmamem_alloc(sc->dc_stag, (void **)&sc->dc_cdata.dc_sbuf, BUS_DMA_NOWAIT, &sc->dc_smap); if (error) { printf("dc%d: failed to allocate DMA safe memory\n", unit); error = ENXIO; goto fail; } error = bus_dmamap_load(sc->dc_stag, sc->dc_smap, sc->dc_cdata.dc_sbuf, DC_SFRAME_LEN, dc_dma_map_addr, &sc->dc_saddr, BUS_DMA_NOWAIT); if (error) { printf("dc%d: cannot get address of the descriptors\n", unit); error = ENXIO; goto fail; } /* Allocate a busdma tag for mbufs. */ error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, DC_TX_LIST_CNT, MCLBYTES, 0, NULL, NULL, &sc->dc_mtag); if (error) { printf("dc%d: failed to allocate busdma tag\n", unit); error = ENXIO; goto fail; } /* Create the TX/RX busdma maps. */ for (i = 0; i < DC_TX_LIST_CNT; i++) { error = bus_dmamap_create(sc->dc_mtag, 0, &sc->dc_cdata.dc_tx_map[i]); if (error) { printf("dc%d: failed to init TX ring\n", unit); error = ENXIO; goto fail; } } for (i = 0; i < DC_RX_LIST_CNT; i++) { error = bus_dmamap_create(sc->dc_mtag, 0, &sc->dc_cdata.dc_rx_map[i]); if (error) { printf("dc%d: failed to init RX ring\n", unit); error = ENXIO; goto fail; } } error = bus_dmamap_create(sc->dc_mtag, 0, &sc->dc_sparemap); if (error) { printf("dc%d: failed to init RX ring\n", unit); error = ENXIO; goto fail; } ifp = sc->dc_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { printf("dc%d: can not if_alloc()\n", unit); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); /* XXX: bleah, MTU gets overwritten in ether_ifattach() */ ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; if (!IS_MPSAFE) ifp->if_flags |= IFF_NEEDSGIANT; ifp->if_ioctl = dc_ioctl; ifp->if_start = dc_start; ifp->if_watchdog = dc_watchdog; ifp->if_init = dc_init; ifp->if_baudrate = 10000000; IFQ_SET_MAXLEN(&ifp->if_snd, DC_TX_LIST_CNT - 1); ifp->if_snd.ifq_drv_maxlen = DC_TX_LIST_CNT - 1; IFQ_SET_READY(&ifp->if_snd); /* * Do MII setup. If this is a 21143, check for a PHY on the * MII bus after applying any necessary fixups to twiddle the * GPIO bits. If we don't end up finding a PHY, restore the * old selection (SIA only or SIA/SYM) and attach the dcphy * driver instead. */ if (DC_IS_INTEL(sc)) { dc_apply_fixup(sc, IFM_AUTO); tmp = sc->dc_pmode; sc->dc_pmode = DC_PMODE_MII; } /* * Setup General Purpose port mode and data so the tulip can talk * to the MII. This needs to be done before mii_phy_probe so that * we can actually see them. */ if (DC_IS_XIRCOM(sc)) { CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); DELAY(10); CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); DELAY(10); } error = mii_phy_probe(dev, &sc->dc_miibus, dc_ifmedia_upd, dc_ifmedia_sts); if (error && DC_IS_INTEL(sc)) { sc->dc_pmode = tmp; if (sc->dc_pmode != DC_PMODE_SIA) sc->dc_pmode = DC_PMODE_SYM; sc->dc_flags |= DC_21143_NWAY; mii_phy_probe(dev, &sc->dc_miibus, dc_ifmedia_upd, dc_ifmedia_sts); /* * For non-MII cards, we need to have the 21143 * drive the LEDs. Except there are some systems * like the NEC VersaPro NoteBook PC which have no * LEDs, and twiddling these bits has adverse effects * on them. (I.e. you suddenly can't get a link.) */ if (pci_read_config(dev, DC_PCI_CSID, 4) != 0x80281033) sc->dc_flags |= DC_TULIP_LEDS; error = 0; } if (error) { printf("dc%d: MII without any PHY!\n", sc->dc_unit); goto fail; } if (DC_IS_ADMTEK(sc)) { /* * Set automatic TX underrun recovery for the ADMtek chips */ DC_SETBIT(sc, DC_AL_CR, DC_AL_CR_ATUR); } /* * Tell the upper layer(s) we support long frames. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_MTU; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif ifp->if_capenable = ifp->if_capabilities; callout_init(&sc->dc_stat_ch, IS_MPSAFE ? CALLOUT_MPSAFE : 0); #ifdef SRM_MEDIA sc->dc_srm_media = 0; /* Remember the SRM console media setting */ if (DC_IS_INTEL(sc)) { command = pci_read_config(dev, DC_PCI_CFDD, 4); command &= ~(DC_CFDD_SNOOZE_MODE | DC_CFDD_SLEEP_MODE); switch ((command >> 8) & 0xff) { case 3: sc->dc_srm_media = IFM_10_T; break; case 4: sc->dc_srm_media = IFM_10_T | IFM_FDX; break; case 5: sc->dc_srm_media = IFM_100_TX; break; case 6: sc->dc_srm_media = IFM_100_TX | IFM_FDX; break; } if (sc->dc_srm_media) sc->dc_srm_media |= IFM_ACTIVE | IFM_ETHER; } #endif /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->dc_irq, INTR_TYPE_NET | (IS_MPSAFE ? INTR_MPSAFE : 0), dc_intr, sc, &sc->dc_intrhand); if (error) { printf("dc%d: couldn't set up irq\n", unit); ether_ifdetach(ifp); if_free(ifp); goto fail; } fail: if (error) dc_detach(dev); return (error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int dc_detach(device_t dev) { struct dc_softc *sc; struct ifnet *ifp; struct dc_mediainfo *m; int i; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->dc_mtx), ("dc mutex not initialized")); DC_LOCK(sc); ifp = sc->dc_ifp; /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { dc_stop(sc); ether_ifdetach(ifp); if_free(ifp); } if (sc->dc_miibus) device_delete_child(dev, sc->dc_miibus); bus_generic_detach(dev); if (sc->dc_intrhand) bus_teardown_intr(dev, sc->dc_irq, sc->dc_intrhand); if (sc->dc_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->dc_irq); if (sc->dc_res) bus_release_resource(dev, DC_RES, DC_RID, sc->dc_res); if (sc->dc_cdata.dc_sbuf != NULL) bus_dmamem_free(sc->dc_stag, sc->dc_cdata.dc_sbuf, sc->dc_smap); if (sc->dc_ldata != NULL) bus_dmamem_free(sc->dc_ltag, sc->dc_ldata, sc->dc_lmap); for (i = 0; i < DC_TX_LIST_CNT; i++) bus_dmamap_destroy(sc->dc_mtag, sc->dc_cdata.dc_tx_map[i]); for (i = 0; i < DC_RX_LIST_CNT; i++) bus_dmamap_destroy(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i]); bus_dmamap_destroy(sc->dc_mtag, sc->dc_sparemap); if (sc->dc_stag) bus_dma_tag_destroy(sc->dc_stag); if (sc->dc_mtag) bus_dma_tag_destroy(sc->dc_mtag); if (sc->dc_ltag) bus_dma_tag_destroy(sc->dc_ltag); free(sc->dc_pnic_rx_buf, M_DEVBUF); while (sc->dc_mi != NULL) { m = sc->dc_mi->dc_next; free(sc->dc_mi, M_DEVBUF); sc->dc_mi = m; } free(sc->dc_srom, M_DEVBUF); DC_UNLOCK(sc); mtx_destroy(&sc->dc_mtx); return (0); } /* * Initialize the transmit descriptors. */ static int dc_list_tx_init(struct dc_softc *sc) { struct dc_chain_data *cd; struct dc_list_data *ld; int i, nexti; cd = &sc->dc_cdata; ld = sc->dc_ldata; for (i = 0; i < DC_TX_LIST_CNT; i++) { if (i == DC_TX_LIST_CNT - 1) nexti = 0; else nexti = i + 1; ld->dc_tx_list[i].dc_next = htole32(DC_TXDESC(sc, nexti)); cd->dc_tx_chain[i] = NULL; ld->dc_tx_list[i].dc_data = 0; ld->dc_tx_list[i].dc_ctl = 0; } cd->dc_tx_prod = cd->dc_tx_cons = cd->dc_tx_cnt = 0; bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); return (0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int dc_list_rx_init(struct dc_softc *sc) { struct dc_chain_data *cd; struct dc_list_data *ld; int i, nexti; cd = &sc->dc_cdata; ld = sc->dc_ldata; for (i = 0; i < DC_RX_LIST_CNT; i++) { if (dc_newbuf(sc, i, 1) != 0) return (ENOBUFS); if (i == DC_RX_LIST_CNT - 1) nexti = 0; else nexti = i + 1; ld->dc_rx_list[i].dc_next = htole32(DC_RXDESC(sc, nexti)); } cd->dc_rx_prod = 0; bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); return (0); } static void dc_dma_map_rxbuf(arg, segs, nseg, mapsize, error) void *arg; bus_dma_segment_t *segs; int nseg; bus_size_t mapsize; int error; { struct dc_softc *sc; struct dc_desc *c; sc = arg; c = &sc->dc_ldata->dc_rx_list[sc->dc_cdata.dc_rx_cur]; if (error) { sc->dc_cdata.dc_rx_err = error; return; } KASSERT(nseg == 1, ("wrong number of segments, should be 1")); sc->dc_cdata.dc_rx_err = 0; c->dc_data = htole32(segs->ds_addr); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int dc_newbuf(struct dc_softc *sc, int i, int alloc) { struct mbuf *m_new; bus_dmamap_t tmp; int error; if (alloc) { m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m_new == NULL) return (ENOBUFS); } else { m_new = sc->dc_cdata.dc_rx_chain[i]; m_new->m_data = m_new->m_ext.ext_buf; } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_adj(m_new, sizeof(u_int64_t)); /* * If this is a PNIC chip, zero the buffer. This is part * of the workaround for the receive bug in the 82c168 and * 82c169 chips. */ if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) bzero(mtod(m_new, char *), m_new->m_len); /* No need to remap the mbuf if we're reusing it. */ if (alloc) { sc->dc_cdata.dc_rx_cur = i; error = bus_dmamap_load_mbuf(sc->dc_mtag, sc->dc_sparemap, m_new, dc_dma_map_rxbuf, sc, 0); if (error) { m_freem(m_new); return (error); } if (sc->dc_cdata.dc_rx_err != 0) { m_freem(m_new); return (sc->dc_cdata.dc_rx_err); } bus_dmamap_unload(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i]); tmp = sc->dc_cdata.dc_rx_map[i]; sc->dc_cdata.dc_rx_map[i] = sc->dc_sparemap; sc->dc_sparemap = tmp; sc->dc_cdata.dc_rx_chain[i] = m_new; } sc->dc_ldata->dc_rx_list[i].dc_ctl = htole32(DC_RXCTL_RLINK | DC_RXLEN); sc->dc_ldata->dc_rx_list[i].dc_status = htole32(DC_RXSTAT_OWN); bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i], BUS_DMASYNC_PREREAD); bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); return (0); } /* * Grrrrr. * The PNIC chip has a terrible bug in it that manifests itself during * periods of heavy activity. The exact mode of failure if difficult to * pinpoint: sometimes it only happens in promiscuous mode, sometimes it * will happen on slow machines. The bug is that sometimes instead of * uploading one complete frame during reception, it uploads what looks * like the entire contents of its FIFO memory. The frame we want is at * the end of the whole mess, but we never know exactly how much data has * been uploaded, so salvaging the frame is hard. * * There is only one way to do it reliably, and it's disgusting. * Here's what we know: * * - We know there will always be somewhere between one and three extra * descriptors uploaded. * * - We know the desired received frame will always be at the end of the * total data upload. * * - We know the size of the desired received frame because it will be * provided in the length field of the status word in the last descriptor. * * Here's what we do: * * - When we allocate buffers for the receive ring, we bzero() them. * This means that we know that the buffer contents should be all * zeros, except for data uploaded by the chip. * * - We also force the PNIC chip to upload frames that include the * ethernet CRC at the end. * * - We gather all of the bogus frame data into a single buffer. * * - We then position a pointer at the end of this buffer and scan * backwards until we encounter the first non-zero byte of data. * This is the end of the received frame. We know we will encounter * some data at the end of the frame because the CRC will always be * there, so even if the sender transmits a packet of all zeros, * we won't be fooled. * * - We know the size of the actual received frame, so we subtract * that value from the current pointer location. This brings us * to the start of the actual received packet. * * - We copy this into an mbuf and pass it on, along with the actual * frame length. * * The performance hit is tremendous, but it beats dropping frames all * the time. */ #define DC_WHOLEFRAME (DC_RXSTAT_FIRSTFRAG | DC_RXSTAT_LASTFRAG) static void dc_pnic_rx_bug_war(struct dc_softc *sc, int idx) { struct dc_desc *cur_rx; struct dc_desc *c = NULL; struct mbuf *m = NULL; unsigned char *ptr; int i, total_len; u_int32_t rxstat = 0; i = sc->dc_pnic_rx_bug_save; cur_rx = &sc->dc_ldata->dc_rx_list[idx]; ptr = sc->dc_pnic_rx_buf; bzero(ptr, DC_RXLEN * 5); /* Copy all the bytes from the bogus buffers. */ while (1) { c = &sc->dc_ldata->dc_rx_list[i]; rxstat = le32toh(c->dc_status); m = sc->dc_cdata.dc_rx_chain[i]; bcopy(mtod(m, char *), ptr, DC_RXLEN); ptr += DC_RXLEN; /* If this is the last buffer, break out. */ if (i == idx || rxstat & DC_RXSTAT_LASTFRAG) break; dc_newbuf(sc, i, 0); DC_INC(i, DC_RX_LIST_CNT); } /* Find the length of the actual receive frame. */ total_len = DC_RXBYTES(rxstat); /* Scan backwards until we hit a non-zero byte. */ while (*ptr == 0x00) ptr--; /* Round off. */ if ((uintptr_t)(ptr) & 0x3) ptr -= 1; /* Now find the start of the frame. */ ptr -= total_len; if (ptr < sc->dc_pnic_rx_buf) ptr = sc->dc_pnic_rx_buf; /* * Now copy the salvaged frame to the last mbuf and fake up * the status word to make it look like a successful * frame reception. */ dc_newbuf(sc, i, 0); bcopy(ptr, mtod(m, char *), total_len); cur_rx->dc_status = htole32(rxstat | DC_RXSTAT_FIRSTFRAG); } /* * This routine searches the RX ring for dirty descriptors in the * event that the rxeof routine falls out of sync with the chip's * current descriptor pointer. This may happen sometimes as a result * of a "no RX buffer available" condition that happens when the chip * consumes all of the RX buffers before the driver has a chance to * process the RX ring. This routine may need to be called more than * once to bring the driver back in sync with the chip, however we * should still be getting RX DONE interrupts to drive the search * for new packets in the RX ring, so we should catch up eventually. */ static int dc_rx_resync(struct dc_softc *sc) { struct dc_desc *cur_rx; int i, pos; pos = sc->dc_cdata.dc_rx_prod; for (i = 0; i < DC_RX_LIST_CNT; i++) { cur_rx = &sc->dc_ldata->dc_rx_list[pos]; if (!(le32toh(cur_rx->dc_status) & DC_RXSTAT_OWN)) break; DC_INC(pos, DC_RX_LIST_CNT); } /* If the ring really is empty, then just return. */ if (i == DC_RX_LIST_CNT) return (0); /* We've fallen behing the chip: catch it. */ sc->dc_cdata.dc_rx_prod = pos; return (EAGAIN); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static void dc_rxeof(struct dc_softc *sc) { struct mbuf *m; struct ifnet *ifp; struct dc_desc *cur_rx; int i, total_len = 0; u_int32_t rxstat; DC_LOCK_ASSERT(sc); ifp = sc->dc_ifp; i = sc->dc_cdata.dc_rx_prod; bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_POSTREAD); while (!(le32toh(sc->dc_ldata->dc_rx_list[i].dc_status) & DC_RXSTAT_OWN)) { #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif cur_rx = &sc->dc_ldata->dc_rx_list[i]; rxstat = le32toh(cur_rx->dc_status); m = sc->dc_cdata.dc_rx_chain[i]; bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_rx_map[i], BUS_DMASYNC_POSTREAD); total_len = DC_RXBYTES(rxstat); if (sc->dc_flags & DC_PNIC_RX_BUG_WAR) { if ((rxstat & DC_WHOLEFRAME) != DC_WHOLEFRAME) { if (rxstat & DC_RXSTAT_FIRSTFRAG) sc->dc_pnic_rx_bug_save = i; if ((rxstat & DC_RXSTAT_LASTFRAG) == 0) { DC_INC(i, DC_RX_LIST_CNT); continue; } dc_pnic_rx_bug_war(sc, i); rxstat = le32toh(cur_rx->dc_status); total_len = DC_RXBYTES(rxstat); } } /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. However, don't report long * frames as errors since they could be vlans. */ if ((rxstat & DC_RXSTAT_RXERR)) { if (!(rxstat & DC_RXSTAT_GIANT) || (rxstat & (DC_RXSTAT_CRCERR | DC_RXSTAT_DRIBBLE | DC_RXSTAT_MIIERE | DC_RXSTAT_COLLSEEN | DC_RXSTAT_RUNT | DC_RXSTAT_DE))) { ifp->if_ierrors++; if (rxstat & DC_RXSTAT_COLLSEEN) ifp->if_collisions++; dc_newbuf(sc, i, 0); if (rxstat & DC_RXSTAT_CRCERR) { DC_INC(i, DC_RX_LIST_CNT); continue; } else { dc_init(sc); return; } } } /* No errors; receive the packet. */ total_len -= ETHER_CRC_LEN; #ifdef __i386__ /* * On the x86 we do not have alignment problems, so try to * allocate a new buffer for the receive ring, and pass up * the one where the packet is already, saving the expensive * copy done in m_devget(). * If we are on an architecture with alignment problems, or * if the allocation fails, then use m_devget and leave the * existing buffer in the receive ring. */ if (dc_quick && dc_newbuf(sc, i, 1) == 0) { m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; DC_INC(i, DC_RX_LIST_CNT); } else #endif { struct mbuf *m0; m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp, NULL); dc_newbuf(sc, i, 0); DC_INC(i, DC_RX_LIST_CNT); if (m0 == NULL) { ifp->if_ierrors++; continue; } m = m0; } ifp->if_ipackets++; DC_UNLOCK(sc); (*ifp->if_input)(ifp, m); DC_LOCK(sc); } sc->dc_cdata.dc_rx_prod = i; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void dc_txeof(struct dc_softc *sc) { struct dc_desc *cur_tx = NULL; struct ifnet *ifp; int idx; u_int32_t ctl, txstat; ifp = sc->dc_ifp; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_POSTREAD); idx = sc->dc_cdata.dc_tx_cons; while (idx != sc->dc_cdata.dc_tx_prod) { cur_tx = &sc->dc_ldata->dc_tx_list[idx]; txstat = le32toh(cur_tx->dc_status); ctl = le32toh(cur_tx->dc_ctl); if (txstat & DC_TXSTAT_OWN) break; if (!(ctl & DC_TXCTL_LASTFRAG) || ctl & DC_TXCTL_SETUP) { if (ctl & DC_TXCTL_SETUP) { /* * Yes, the PNIC is so brain damaged * that it will sometimes generate a TX * underrun error while DMAing the RX * filter setup frame. If we detect this, * we have to send the setup frame again, * or else the filter won't be programmed * correctly. */ if (DC_IS_PNIC(sc)) { if (txstat & DC_TXSTAT_ERRSUM) dc_setfilt(sc); } sc->dc_cdata.dc_tx_chain[idx] = NULL; } sc->dc_cdata.dc_tx_cnt--; DC_INC(idx, DC_TX_LIST_CNT); continue; } if (DC_IS_XIRCOM(sc) || DC_IS_CONEXANT(sc)) { /* * XXX: Why does my Xircom taunt me so? * For some reason it likes setting the CARRLOST flag * even when the carrier is there. wtf?!? * Who knows, but Conexant chips have the * same problem. Maybe they took lessons * from Xircom. */ if (/*sc->dc_type == DC_TYPE_21143 &&*/ sc->dc_pmode == DC_PMODE_MII && ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM | DC_TXSTAT_NOCARRIER))) txstat &= ~DC_TXSTAT_ERRSUM; } else { if (/*sc->dc_type == DC_TYPE_21143 &&*/ sc->dc_pmode == DC_PMODE_MII && ((txstat & 0xFFFF) & ~(DC_TXSTAT_ERRSUM | DC_TXSTAT_NOCARRIER | DC_TXSTAT_CARRLOST))) txstat &= ~DC_TXSTAT_ERRSUM; } if (txstat & DC_TXSTAT_ERRSUM) { ifp->if_oerrors++; if (txstat & DC_TXSTAT_EXCESSCOLL) ifp->if_collisions++; if (txstat & DC_TXSTAT_LATECOLL) ifp->if_collisions++; if (!(txstat & DC_TXSTAT_UNDERRUN)) { dc_init(sc); return; } } ifp->if_collisions += (txstat & DC_TXSTAT_COLLCNT) >> 3; ifp->if_opackets++; if (sc->dc_cdata.dc_tx_chain[idx] != NULL) { bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_tx_map[idx], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->dc_mtag, sc->dc_cdata.dc_tx_map[idx]); m_freem(sc->dc_cdata.dc_tx_chain[idx]); sc->dc_cdata.dc_tx_chain[idx] = NULL; } sc->dc_cdata.dc_tx_cnt--; DC_INC(idx, DC_TX_LIST_CNT); } if (idx != sc->dc_cdata.dc_tx_cons) { /* Some buffers have been freed. */ sc->dc_cdata.dc_tx_cons = idx; ifp->if_flags &= ~IFF_OACTIVE; } ifp->if_timer = (sc->dc_cdata.dc_tx_cnt == 0) ? 0 : 5; } static void dc_tick(void *xsc) { struct dc_softc *sc; struct mii_data *mii; struct ifnet *ifp; u_int32_t r; sc = xsc; DC_LOCK(sc); ifp = sc->dc_ifp; mii = device_get_softc(sc->dc_miibus); if (sc->dc_flags & DC_REDUCED_MII_POLL) { if (sc->dc_flags & DC_21143_NWAY) { r = CSR_READ_4(sc, DC_10BTSTAT); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX && (r & DC_TSTAT_LS100)) { sc->dc_link = 0; mii_mediachg(mii); } if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T && (r & DC_TSTAT_LS10)) { sc->dc_link = 0; mii_mediachg(mii); } if (sc->dc_link == 0) mii_tick(mii); } else { r = CSR_READ_4(sc, DC_ISR); if ((r & DC_ISR_RX_STATE) == DC_RXSTATE_WAIT && sc->dc_cdata.dc_tx_cnt == 0) { mii_tick(mii); if (!(mii->mii_media_status & IFM_ACTIVE)) sc->dc_link = 0; } } } else mii_tick(mii); /* * When the init routine completes, we expect to be able to send * packets right away, and in fact the network code will send a * gratuitous ARP the moment the init routine marks the interface * as running. However, even though the MAC may have been initialized, * there may be a delay of a few seconds before the PHY completes * autonegotiation and the link is brought up. Any transmissions * made during that delay will be lost. Dealing with this is tricky: * we can't just pause in the init routine while waiting for the * PHY to come ready since that would bring the whole system to * a screeching halt for several seconds. * * What we do here is prevent the TX start routine from sending * any packets until a link has been established. After the * interface has been initialized, the tick routine will poll * the state of the PHY until the IFM_ACTIVE flag is set. Until * that time, packets will stay in the send queue, and once the * link comes up, they will be flushed out to the wire. */ if (!sc->dc_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->dc_link++; if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) dc_start(ifp); } if (sc->dc_flags & DC_21143_NWAY && !sc->dc_link) callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc); else callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc); DC_UNLOCK(sc); } /* * A transmit underrun has occurred. Back off the transmit threshold, * or switch to store and forward mode if we have to. */ static void dc_tx_underrun(struct dc_softc *sc) { u_int32_t isr; int i; if (DC_IS_DAVICOM(sc)) dc_init(sc); if (DC_IS_INTEL(sc)) { /* * The real 21143 requires that the transmitter be idle * in order to change the transmit threshold or store * and forward state. */ DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); for (i = 0; i < DC_TIMEOUT; i++) { isr = CSR_READ_4(sc, DC_ISR); if (isr & DC_ISR_TX_IDLE) break; DELAY(10); } if (i == DC_TIMEOUT) { printf("dc%d: failed to force tx to idle state\n", sc->dc_unit); dc_init(sc); } } printf("dc%d: TX underrun -- ", sc->dc_unit); sc->dc_txthresh += DC_TXTHRESH_INC; if (sc->dc_txthresh > DC_TXTHRESH_MAX) { printf("using store and forward mode\n"); DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); } else { printf("increasing TX threshold\n"); DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); } if (DC_IS_INTEL(sc)) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); } #ifdef DEVICE_POLLING static poll_handler_t dc_poll; static void dc_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct dc_softc *sc = ifp->if_softc; if (!(ifp->if_capenable & IFCAP_POLLING)) { ether_poll_deregister(ifp); cmd = POLL_DEREGISTER; } if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */ /* Re-enable interrupts. */ CSR_WRITE_4(sc, DC_IMR, DC_INTRS); return; } DC_LOCK(sc); sc->rxcycles = count; dc_rxeof(sc); dc_txeof(sc); if (!IFQ_IS_EMPTY(&ifp->if_snd) && !(ifp->if_flags & IFF_OACTIVE)) dc_start(ifp); if (cmd == POLL_AND_CHECK_STATUS) { /* also check status register */ u_int32_t status; status = CSR_READ_4(sc, DC_ISR); status &= (DC_ISR_RX_WATDOGTIMEO | DC_ISR_RX_NOBUF | DC_ISR_TX_NOBUF | DC_ISR_TX_IDLE | DC_ISR_TX_UNDERRUN | DC_ISR_BUS_ERR); if (!status) { DC_UNLOCK(sc); return; } /* ack what we have */ CSR_WRITE_4(sc, DC_ISR, status); if (status & (DC_ISR_RX_WATDOGTIMEO | DC_ISR_RX_NOBUF)) { u_int32_t r = CSR_READ_4(sc, DC_FRAMESDISCARDED); ifp->if_ierrors += (r & 0xffff) + ((r >> 17) & 0x7ff); if (dc_rx_resync(sc)) dc_rxeof(sc); } /* restart transmit unit if necessary */ if (status & DC_ISR_TX_IDLE && sc->dc_cdata.dc_tx_cnt) CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); if (status & DC_ISR_TX_UNDERRUN) dc_tx_underrun(sc); if (status & DC_ISR_BUS_ERR) { printf("dc_poll: dc%d bus error\n", sc->dc_unit); dc_reset(sc); dc_init(sc); } } DC_UNLOCK(sc); } #endif /* DEVICE_POLLING */ static void dc_intr(void *arg) { struct dc_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; if (sc->suspended) return; if ((CSR_READ_4(sc, DC_ISR) & DC_INTRS) == 0) return; DC_LOCK(sc); ifp = sc->dc_ifp; #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) goto done; if ((ifp->if_capenable & IFCAP_POLLING) && ether_poll_register(dc_poll, ifp)) { /* ok, disable interrupts */ CSR_WRITE_4(sc, DC_IMR, 0x00000000); goto done; } #endif /* Suppress unwanted interrupts */ if (!(ifp->if_flags & IFF_UP)) { if (CSR_READ_4(sc, DC_ISR) & DC_INTRS) dc_stop(sc); DC_UNLOCK(sc); return; } /* Disable interrupts. */ CSR_WRITE_4(sc, DC_IMR, 0x00000000); while (((status = CSR_READ_4(sc, DC_ISR)) & DC_INTRS) && status != 0xFFFFFFFF) { CSR_WRITE_4(sc, DC_ISR, status); if (status & DC_ISR_RX_OK) { int curpkts; curpkts = ifp->if_ipackets; dc_rxeof(sc); if (curpkts == ifp->if_ipackets) { while (dc_rx_resync(sc)) dc_rxeof(sc); } } if (status & (DC_ISR_TX_OK | DC_ISR_TX_NOBUF)) dc_txeof(sc); if (status & DC_ISR_TX_IDLE) { dc_txeof(sc); if (sc->dc_cdata.dc_tx_cnt) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); } } if (status & DC_ISR_TX_UNDERRUN) dc_tx_underrun(sc); if ((status & DC_ISR_RX_WATDOGTIMEO) || (status & DC_ISR_RX_NOBUF)) { int curpkts; curpkts = ifp->if_ipackets; dc_rxeof(sc); if (curpkts == ifp->if_ipackets) { while (dc_rx_resync(sc)) dc_rxeof(sc); } } if (status & DC_ISR_BUS_ERR) { dc_reset(sc); dc_init(sc); } } /* Re-enable interrupts. */ CSR_WRITE_4(sc, DC_IMR, DC_INTRS); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) dc_start(ifp); #ifdef DEVICE_POLLING done: #endif DC_UNLOCK(sc); } static void dc_dma_map_txbuf(arg, segs, nseg, mapsize, error) void *arg; bus_dma_segment_t *segs; int nseg; bus_size_t mapsize; int error; { struct dc_softc *sc; struct dc_desc *f; int cur, first, frag, i; sc = arg; if (error) { sc->dc_cdata.dc_tx_err = error; return; } first = cur = frag = sc->dc_cdata.dc_tx_prod; for (i = 0; i < nseg; i++) { if ((sc->dc_flags & DC_TX_ADMTEK_WAR) && (frag == (DC_TX_LIST_CNT - 1)) && (first != sc->dc_cdata.dc_tx_first)) { bus_dmamap_unload(sc->dc_mtag, sc->dc_cdata.dc_tx_map[first]); sc->dc_cdata.dc_tx_err = ENOBUFS; return; } f = &sc->dc_ldata->dc_tx_list[frag]; f->dc_ctl = htole32(DC_TXCTL_TLINK | segs[i].ds_len); if (i == 0) { f->dc_status = 0; f->dc_ctl |= htole32(DC_TXCTL_FIRSTFRAG); } else f->dc_status = htole32(DC_TXSTAT_OWN); f->dc_data = htole32(segs[i].ds_addr); cur = frag; DC_INC(frag, DC_TX_LIST_CNT); } sc->dc_cdata.dc_tx_err = 0; sc->dc_cdata.dc_tx_prod = frag; sc->dc_cdata.dc_tx_cnt += nseg; sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_LASTFRAG); sc->dc_cdata.dc_tx_chain[cur] = sc->dc_cdata.dc_tx_mapping; if (sc->dc_flags & DC_TX_INTR_FIRSTFRAG) sc->dc_ldata->dc_tx_list[first].dc_ctl |= htole32(DC_TXCTL_FINT); if (sc->dc_flags & DC_TX_INTR_ALWAYS) sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_FINT); if (sc->dc_flags & DC_TX_USE_TX_INTR && sc->dc_cdata.dc_tx_cnt > 64) sc->dc_ldata->dc_tx_list[cur].dc_ctl |= htole32(DC_TXCTL_FINT); sc->dc_ldata->dc_tx_list[first].dc_status = htole32(DC_TXSTAT_OWN); } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int dc_encap(struct dc_softc *sc, struct mbuf **m_head) { struct mbuf *m; int error, idx, chainlen = 0; /* * If there's no way we can send any packets, return now. */ if (DC_TX_LIST_CNT - sc->dc_cdata.dc_tx_cnt < 6) return (ENOBUFS); /* * Count the number of frags in this chain to see if * we need to m_defrag. Since the descriptor list is shared * by all packets, we'll m_defrag long chains so that they * do not use up the entire list, even if they would fit. */ for (m = *m_head; m != NULL; m = m->m_next) chainlen++; if ((chainlen > DC_TX_LIST_CNT / 4) || ((DC_TX_LIST_CNT - (chainlen + sc->dc_cdata.dc_tx_cnt)) < 6)) { m = m_defrag(*m_head, M_DONTWAIT); if (m == NULL) return (ENOBUFS); *m_head = m; } /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ idx = sc->dc_cdata.dc_tx_prod; sc->dc_cdata.dc_tx_mapping = *m_head; error = bus_dmamap_load_mbuf(sc->dc_mtag, sc->dc_cdata.dc_tx_map[idx], *m_head, dc_dma_map_txbuf, sc, 0); if (error) return (error); if (sc->dc_cdata.dc_tx_err != 0) return (sc->dc_cdata.dc_tx_err); bus_dmamap_sync(sc->dc_mtag, sc->dc_cdata.dc_tx_map[idx], BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->dc_ltag, sc->dc_lmap, BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD); return (0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void dc_start(struct ifnet *ifp) { struct dc_softc *sc; struct mbuf *m_head = NULL, *m; unsigned int queued = 0; int idx; sc = ifp->if_softc; DC_LOCK(sc); if (!sc->dc_link && ifp->if_snd.ifq_len < 10) { DC_UNLOCK(sc); return; } if (ifp->if_flags & IFF_OACTIVE) { DC_UNLOCK(sc); return; } idx = sc->dc_cdata.dc_tx_first = sc->dc_cdata.dc_tx_prod; while (sc->dc_cdata.dc_tx_chain[idx] == NULL) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (sc->dc_flags & DC_TX_COALESCE && (m_head->m_next != NULL || sc->dc_flags & DC_TX_ALIGN)) { m = m_defrag(m_head, M_DONTWAIT); if (m == NULL) { IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } else { m_head = m; } } if (dc_encap(sc, &m_head)) { IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } idx = sc->dc_cdata.dc_tx_prod; queued++; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); if (sc->dc_flags & DC_TX_ONE) { ifp->if_flags |= IFF_OACTIVE; break; } } if (queued > 0) { /* Transmit */ if (!(sc->dc_flags & DC_TX_POLL)) CSR_WRITE_4(sc, DC_TXSTART, 0xFFFFFFFF); /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; } DC_UNLOCK(sc); } static void dc_init(void *xsc) { struct dc_softc *sc = xsc; struct ifnet *ifp = sc->dc_ifp; struct mii_data *mii; DC_LOCK(sc); mii = device_get_softc(sc->dc_miibus); /* * Cancel pending I/O and free all RX/TX buffers. */ dc_stop(sc); dc_reset(sc); /* * Set cache alignment and burst length. */ if (DC_IS_ASIX(sc) || DC_IS_DAVICOM(sc)) CSR_WRITE_4(sc, DC_BUSCTL, 0); else CSR_WRITE_4(sc, DC_BUSCTL, DC_BUSCTL_MRME | DC_BUSCTL_MRLE); /* * Evenly share the bus between receive and transmit process. */ if (DC_IS_INTEL(sc)) DC_SETBIT(sc, DC_BUSCTL, DC_BUSCTL_ARBITRATION); if (DC_IS_DAVICOM(sc) || DC_IS_INTEL(sc)) { DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_USECA); } else { DC_SETBIT(sc, DC_BUSCTL, DC_BURSTLEN_16LONG); } if (sc->dc_flags & DC_TX_POLL) DC_SETBIT(sc, DC_BUSCTL, DC_TXPOLL_1); switch(sc->dc_cachesize) { case 32: DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_32LONG); break; case 16: DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_16LONG); break; case 8: DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_8LONG); break; case 0: default: DC_SETBIT(sc, DC_BUSCTL, DC_CACHEALIGN_NONE); break; } if (sc->dc_flags & DC_TX_STORENFWD) DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); else { if (sc->dc_txthresh > DC_TXTHRESH_MAX) { DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); } else { DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_STORENFWD); DC_SETBIT(sc, DC_NETCFG, sc->dc_txthresh); } } DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_NO_RXCRC); DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_BACKOFF); if (DC_IS_MACRONIX(sc) || DC_IS_PNICII(sc)) { /* * The app notes for the 98713 and 98715A say that * in order to have the chips operate properly, a magic * number must be written to CSR16. Macronix does not * document the meaning of these bits so there's no way * to know exactly what they do. The 98713 has a magic * number all its own; the rest all use a different one. */ DC_CLRBIT(sc, DC_MX_MAGICPACKET, 0xFFFF0000); if (sc->dc_type == DC_TYPE_98713) DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98713); else DC_SETBIT(sc, DC_MX_MAGICPACKET, DC_MX_MAGIC_98715); } if (DC_IS_XIRCOM(sc)) { /* * setup General Purpose Port mode and data so the tulip * can talk to the MII. */ CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_WRITE_EN | DC_SIAGP_INT1_EN | DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); DELAY(10); CSR_WRITE_4(sc, DC_SIAGP, DC_SIAGP_INT1_EN | DC_SIAGP_MD_GP2_OUTPUT | DC_SIAGP_MD_GP0_OUTPUT); DELAY(10); } DC_CLRBIT(sc, DC_NETCFG, DC_NETCFG_TX_THRESH); DC_SETBIT(sc, DC_NETCFG, DC_TXTHRESH_MIN); /* Init circular RX list. */ if (dc_list_rx_init(sc) == ENOBUFS) { printf("dc%d: initialization failed: no " "memory for rx buffers\n", sc->dc_unit); dc_stop(sc); DC_UNLOCK(sc); return; } /* * Init TX descriptors. */ dc_list_tx_init(sc); /* * Load the address of the RX list. */ CSR_WRITE_4(sc, DC_RXADDR, DC_RXDESC(sc, 0)); CSR_WRITE_4(sc, DC_TXADDR, DC_TXDESC(sc, 0)); /* * Enable interrupts. */ #ifdef DEVICE_POLLING /* * ... but only if we are not polling, and make sure they are off in * the case of polling. Some cards (e.g. fxp) turn interrupts on * after a reset. */ if (ifp->if_flags & IFF_POLLING) CSR_WRITE_4(sc, DC_IMR, 0x00000000); else #endif CSR_WRITE_4(sc, DC_IMR, DC_INTRS); CSR_WRITE_4(sc, DC_ISR, 0xFFFFFFFF); /* Enable transmitter. */ DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_TX_ON); /* * If this is an Intel 21143 and we're not using the * MII port, program the LED control pins so we get * link and activity indications. */ if (sc->dc_flags & DC_TULIP_LEDS) { CSR_WRITE_4(sc, DC_WATCHDOG, DC_WDOG_CTLWREN | DC_WDOG_LINK | DC_WDOG_ACTIVITY); CSR_WRITE_4(sc, DC_WATCHDOG, 0); } /* * Load the RX/multicast filter. We do this sort of late * because the filter programming scheme on the 21143 and * some clones requires DMAing a setup frame via the TX * engine, and we need the transmitter enabled for that. */ dc_setfilt(sc); /* Enable receiver. */ DC_SETBIT(sc, DC_NETCFG, DC_NETCFG_RX_ON); CSR_WRITE_4(sc, DC_RXSTART, 0xFFFFFFFF); mii_mediachg(mii); dc_setcfg(sc, sc->dc_if_media); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; /* Don't start the ticker if this is a homePNA link. */ if (IFM_SUBTYPE(mii->mii_media.ifm_media) == IFM_HPNA_1) sc->dc_link = 1; else { if (sc->dc_flags & DC_21143_NWAY) callout_reset(&sc->dc_stat_ch, hz/10, dc_tick, sc); else callout_reset(&sc->dc_stat_ch, hz, dc_tick, sc); } #ifdef SRM_MEDIA if(sc->dc_srm_media) { struct ifreq ifr; ifr.ifr_media = sc->dc_srm_media; ifmedia_ioctl(ifp, &ifr, &mii->mii_media, SIOCSIFMEDIA); sc->dc_srm_media = 0; } #endif DC_UNLOCK(sc); } /* * Set media options. */ static int dc_ifmedia_upd(struct ifnet *ifp) { struct dc_softc *sc; struct mii_data *mii; struct ifmedia *ifm; sc = ifp->if_softc; mii = device_get_softc(sc->dc_miibus); mii_mediachg(mii); ifm = &mii->mii_media; if (DC_IS_DAVICOM(sc) && IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) dc_setcfg(sc, ifm->ifm_media); else sc->dc_link = 0; return (0); } /* * Report current media status. */ static void dc_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct dc_softc *sc; struct mii_data *mii; struct ifmedia *ifm; sc = ifp->if_softc; mii = device_get_softc(sc->dc_miibus); mii_pollstat(mii); ifm = &mii->mii_media; if (DC_IS_DAVICOM(sc)) { if (IFM_SUBTYPE(ifm->ifm_media) == IFM_HPNA_1) { ifmr->ifm_active = ifm->ifm_media; ifmr->ifm_status = 0; return; } } ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static int dc_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct dc_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; int error = 0; DC_LOCK(sc); switch (command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { int need_setfilt = (ifp->if_flags ^ sc->dc_if_flags) & (IFF_PROMISC | IFF_ALLMULTI); if (ifp->if_flags & IFF_RUNNING) { if (need_setfilt) dc_setfilt(sc); } else { sc->dc_txthresh = 0; dc_init(sc); } } else { if (ifp->if_flags & IFF_RUNNING) dc_stop(sc); } sc->dc_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: dc_setfilt(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->dc_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); #ifdef SRM_MEDIA if (sc->dc_srm_media) sc->dc_srm_media = 0; #endif break; case SIOCSIFCAP: ifp->if_capenable &= ~IFCAP_POLLING; ifp->if_capenable |= ifr->ifr_reqcap & IFCAP_POLLING; break; default: error = ether_ioctl(ifp, command, data); break; } DC_UNLOCK(sc); return (error); } static void dc_watchdog(struct ifnet *ifp) { struct dc_softc *sc; sc = ifp->if_softc; DC_LOCK(sc); ifp->if_oerrors++; printf("dc%d: watchdog timeout\n", sc->dc_unit); dc_stop(sc); dc_reset(sc); dc_init(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) dc_start(ifp); DC_UNLOCK(sc); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void dc_stop(struct dc_softc *sc) { struct ifnet *ifp; struct dc_list_data *ld; struct dc_chain_data *cd; int i; u_int32_t ctl; DC_LOCK(sc); ifp = sc->dc_ifp; ifp->if_timer = 0; ld = sc->dc_ldata; cd = &sc->dc_cdata; callout_stop(&sc->dc_stat_ch); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); #ifdef DEVICE_POLLING ether_poll_deregister(ifp); #endif DC_CLRBIT(sc, DC_NETCFG, (DC_NETCFG_RX_ON | DC_NETCFG_TX_ON)); CSR_WRITE_4(sc, DC_IMR, 0x00000000); CSR_WRITE_4(sc, DC_TXADDR, 0x00000000); CSR_WRITE_4(sc, DC_RXADDR, 0x00000000); sc->dc_link = 0; /* * Free data in the RX lists. */ for (i = 0; i < DC_RX_LIST_CNT; i++) { if (cd->dc_rx_chain[i] != NULL) { m_freem(cd->dc_rx_chain[i]); cd->dc_rx_chain[i] = NULL; } } bzero(&ld->dc_rx_list, sizeof(ld->dc_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < DC_TX_LIST_CNT; i++) { if (cd->dc_tx_chain[i] != NULL) { ctl = le32toh(ld->dc_tx_list[i].dc_ctl); if ((ctl & DC_TXCTL_SETUP) || !(ctl & DC_TXCTL_LASTFRAG)) { cd->dc_tx_chain[i] = NULL; continue; } bus_dmamap_unload(sc->dc_mtag, cd->dc_tx_map[i]); m_freem(cd->dc_tx_chain[i]); cd->dc_tx_chain[i] = NULL; } } bzero(&ld->dc_tx_list, sizeof(ld->dc_tx_list)); DC_UNLOCK(sc); } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int dc_suspend(device_t dev) { struct dc_softc *sc; int s; s = splimp(); sc = device_get_softc(dev); dc_stop(sc); sc->suspended = 1; splx(s); return (0); } /* * Device resume routine. Restore some PCI settings in case the BIOS * doesn't, re-enable busmastering, and restart the interface if * appropriate. */ static int dc_resume(device_t dev) { struct dc_softc *sc; struct ifnet *ifp; int s; s = splimp(); sc = device_get_softc(dev); ifp = sc->dc_ifp; /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) dc_init(sc); sc->suspended = 0; splx(s); return (0); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void dc_shutdown(device_t dev) { struct dc_softc *sc; sc = device_get_softc(dev); dc_stop(sc); } Index: stable/6/sys/pci/if_de.c =================================================================== --- stable/6/sys/pci/if_de.c (revision 149421) +++ stable/6/sys/pci/if_de.c (revision 149422) @@ -1,5119 +1,5121 @@ /* $NetBSD: if_de.c,v 1.86 1999/06/01 19:17:59 thorpej Exp $ */ /*- * Copyright (c) 1994-1997 Matt Thomas (matt@3am-software.com) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Id: if_de.c,v 1.94 1997/07/03 16:55:07 thomas Exp */ /* * DEC 21040 PCI Ethernet Controller * * Written by Matt Thomas * BPF support code stolen directly from if_ec.c * * This driver supports the DEC DE435 or any other PCI * board which support 21040, 21041, or 21140 (mostly). */ #include __FBSDID("$FreeBSD$"); #define TULIP_HDR_DATA #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #endif #include #include #include #include #include #include /* * Intel CPUs should use I/O mapped access. */ #if defined(__i386__) #define TULIP_IOMAPPED #endif #if 0 /* * This turns on all sort of debugging stuff and make the * driver much larger. */ #define TULIP_DEBUG #endif #if 0 #define TULIP_PERFSTATS #endif #define TULIP_HZ 10 #include /* * This module supports * the DEC 21040 PCI Ethernet Controller. * the DEC 21041 PCI Ethernet Controller. * the DEC 21140 PCI Fast Ethernet Controller. */ static void tulip_addr_filter(tulip_softc_t * const sc); static void tulip_ifinit(void *); static int tulip_ifmedia_change(struct ifnet * const ifp); static void tulip_ifmedia_status(struct ifnet * const ifp, struct ifmediareq *req); static void tulip_ifstart(struct ifnet *ifp); static void tulip_init(tulip_softc_t * const sc); static void tulip_intr_shared(void *arg); static void tulip_intr_normal(void *arg); static void tulip_mii_autonegotiate(tulip_softc_t * const sc, const unsigned phyaddr); static int tulip_mii_map_abilities(tulip_softc_t * const sc, unsigned abilities); static tulip_media_t tulip_mii_phy_readspecific(tulip_softc_t * const sc); static unsigned tulip_mii_readreg(tulip_softc_t * const sc, unsigned devaddr, unsigned regno); static void tulip_mii_writereg(tulip_softc_t * const sc, unsigned devaddr, unsigned regno, unsigned data); static void tulip_reset(tulip_softc_t * const sc); static void tulip_rx_intr(tulip_softc_t * const sc); static int tulip_srom_decode(tulip_softc_t * const sc); static void tulip_start(tulip_softc_t * const sc); static struct mbuf * tulip_txput(tulip_softc_t * const sc, struct mbuf *m); static void tulip_txput_setup(tulip_softc_t * const sc); static void tulip_timeout_callback( void *arg) { tulip_softc_t * const sc = arg; TULIP_PERFSTART(timeout) TULIP_LOCK(sc); sc->tulip_flags &= ~TULIP_TIMEOUTPENDING; sc->tulip_probe_timeout -= 1000 / TULIP_HZ; (sc->tulip_boardsw->bd_media_poll)(sc, TULIP_MEDIAPOLL_TIMER); TULIP_PERFEND(timeout); TULIP_UNLOCK(sc); } static void tulip_timeout( tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); if (sc->tulip_flags & TULIP_TIMEOUTPENDING) return; sc->tulip_flags |= TULIP_TIMEOUTPENDING; callout_reset(&sc->tulip_callout, (hz + TULIP_HZ / 2) / TULIP_HZ, tulip_timeout_callback, sc); } static int tulip_txprobe( tulip_softc_t * const sc) { struct mbuf *m; /* * Before we are sure this is the right media we need * to send a small packet to make sure there's carrier. * Strangely, BNC and AUI will "see" receive data if * either is connected so the transmit is the only way * to verify the connectivity. */ TULIP_LOCK_ASSERT(sc); MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) return 0; /* * Construct a LLC TEST message which will point to ourselves. */ bcopy(IFP2ENADDR(sc->tulip_ifp), mtod(m, struct ether_header *)->ether_dhost, 6); bcopy(IFP2ENADDR(sc->tulip_ifp), mtod(m, struct ether_header *)->ether_shost, 6); mtod(m, struct ether_header *)->ether_type = htons(3); mtod(m, unsigned char *)[14] = 0; mtod(m, unsigned char *)[15] = 0; mtod(m, unsigned char *)[16] = 0xE3; /* LLC Class1 TEST (no poll) */ m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3; /* * send it! */ sc->tulip_cmdmode |= TULIP_CMD_TXRUN; sc->tulip_intrmask |= TULIP_STS_TXINTR; sc->tulip_flags |= TULIP_TXPROBE_ACTIVE; TULIP_CSR_WRITE(sc, csr_command, sc->tulip_cmdmode); TULIP_CSR_WRITE(sc, csr_intr, sc->tulip_intrmask); if ((m = tulip_txput(sc, m)) != NULL) m_freem(m); sc->tulip_probe.probe_txprobes++; return 1; } static void tulip_media_set( tulip_softc_t * const sc, tulip_media_t media) { const tulip_media_info_t *mi = sc->tulip_mediums[media]; TULIP_LOCK_ASSERT(sc); if (mi == NULL) return; /* * If we are switching media, make sure we don't think there's * any stale RX activity */ sc->tulip_flags &= ~TULIP_RXACT; if (mi->mi_type == TULIP_MEDIAINFO_SIA) { TULIP_CSR_WRITE(sc, csr_sia_connectivity, TULIP_SIACONN_RESET); TULIP_CSR_WRITE(sc, csr_sia_tx_rx, mi->mi_sia_tx_rx); if (sc->tulip_features & TULIP_HAVE_SIAGP) { TULIP_CSR_WRITE(sc, csr_sia_general, mi->mi_sia_gp_control|mi->mi_sia_general); DELAY(50); TULIP_CSR_WRITE(sc, csr_sia_general, mi->mi_sia_gp_data|mi->mi_sia_general); } else { TULIP_CSR_WRITE(sc, csr_sia_general, mi->mi_sia_general); } TULIP_CSR_WRITE(sc, csr_sia_connectivity, mi->mi_sia_connectivity); } else if (mi->mi_type == TULIP_MEDIAINFO_GPR) { #define TULIP_GPR_CMDBITS (TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION|TULIP_CMD_SCRAMBLER|TULIP_CMD_TXTHRSHLDCTL) /* * If the cmdmode bits don't match the currently operating mode, * set the cmdmode appropriately and reset the chip. */ if (((mi->mi_cmdmode ^ TULIP_CSR_READ(sc, csr_command)) & TULIP_GPR_CMDBITS) != 0) { sc->tulip_cmdmode &= ~TULIP_GPR_CMDBITS; sc->tulip_cmdmode |= mi->mi_cmdmode; tulip_reset(sc); } TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET|sc->tulip_gpinit); DELAY(10); TULIP_CSR_WRITE(sc, csr_gp, (u_int8_t) mi->mi_gpdata); } else if (mi->mi_type == TULIP_MEDIAINFO_SYM) { /* * If the cmdmode bits don't match the currently operating mode, * set the cmdmode appropriately and reset the chip. */ if (((mi->mi_cmdmode ^ TULIP_CSR_READ(sc, csr_command)) & TULIP_GPR_CMDBITS) != 0) { sc->tulip_cmdmode &= ~TULIP_GPR_CMDBITS; sc->tulip_cmdmode |= mi->mi_cmdmode; tulip_reset(sc); } TULIP_CSR_WRITE(sc, csr_sia_general, mi->mi_gpcontrol); TULIP_CSR_WRITE(sc, csr_sia_general, mi->mi_gpdata); } else if (mi->mi_type == TULIP_MEDIAINFO_MII && sc->tulip_probe_state != TULIP_PROBE_INACTIVE) { int idx; if (sc->tulip_features & TULIP_HAVE_SIAGP) { const u_int8_t *dp; dp = &sc->tulip_rombuf[mi->mi_reset_offset]; for (idx = 0; idx < mi->mi_reset_length; idx++, dp += 2) { DELAY(10); TULIP_CSR_WRITE(sc, csr_sia_general, (dp[0] + 256 * dp[1]) << 16); } sc->tulip_phyaddr = mi->mi_phyaddr; dp = &sc->tulip_rombuf[mi->mi_gpr_offset]; for (idx = 0; idx < mi->mi_gpr_length; idx++, dp += 2) { DELAY(10); TULIP_CSR_WRITE(sc, csr_sia_general, (dp[0] + 256 * dp[1]) << 16); } } else { for (idx = 0; idx < mi->mi_reset_length; idx++) { DELAY(10); TULIP_CSR_WRITE(sc, csr_gp, sc->tulip_rombuf[mi->mi_reset_offset + idx]); } sc->tulip_phyaddr = mi->mi_phyaddr; for (idx = 0; idx < mi->mi_gpr_length; idx++) { DELAY(10); TULIP_CSR_WRITE(sc, csr_gp, sc->tulip_rombuf[mi->mi_gpr_offset + idx]); } } if (sc->tulip_flags & TULIP_TRYNWAY) { tulip_mii_autonegotiate(sc, sc->tulip_phyaddr); } else if ((sc->tulip_flags & TULIP_DIDNWAY) == 0) { u_int32_t data = tulip_mii_readreg(sc, sc->tulip_phyaddr, PHYREG_CONTROL); data &= ~(PHYCTL_SELECT_100MB|PHYCTL_FULL_DUPLEX|PHYCTL_AUTONEG_ENABLE); sc->tulip_flags &= ~TULIP_DIDNWAY; if (TULIP_IS_MEDIA_FD(media)) data |= PHYCTL_FULL_DUPLEX; if (TULIP_IS_MEDIA_100MB(media)) data |= PHYCTL_SELECT_100MB; tulip_mii_writereg(sc, sc->tulip_phyaddr, PHYREG_CONTROL, data); } } } static void tulip_linkup( tulip_softc_t * const sc, tulip_media_t media) { TULIP_LOCK_ASSERT(sc); if ((sc->tulip_flags & TULIP_LINKUP) == 0) sc->tulip_flags |= TULIP_PRINTLINKUP; sc->tulip_flags |= TULIP_LINKUP; sc->tulip_ifp->if_flags &= ~IFF_OACTIVE; #if 0 /* XXX how does with work with ifmedia? */ if ((sc->tulip_flags & TULIP_DIDNWAY) == 0) { if (sc->tulip_ifp->if_flags & IFF_FULLDUPLEX) { if (TULIP_CAN_MEDIA_FD(media) && sc->tulip_mediums[TULIP_FD_MEDIA_OF(media)] != NULL) media = TULIP_FD_MEDIA_OF(media); } else { if (TULIP_IS_MEDIA_FD(media) && sc->tulip_mediums[TULIP_HD_MEDIA_OF(media)] != NULL) media = TULIP_HD_MEDIA_OF(media); } } #endif if (sc->tulip_media != media) { #ifdef TULIP_DEBUG sc->tulip_dbg.dbg_last_media = sc->tulip_media; #endif sc->tulip_media = media; sc->tulip_flags |= TULIP_PRINTMEDIA; if (TULIP_IS_MEDIA_FD(sc->tulip_media)) { sc->tulip_cmdmode |= TULIP_CMD_FULLDUPLEX; } else if (sc->tulip_chipid != TULIP_21041 || (sc->tulip_flags & TULIP_DIDNWAY) == 0) { sc->tulip_cmdmode &= ~TULIP_CMD_FULLDUPLEX; } } /* * We could set probe_timeout to 0 but setting to 3000 puts this * in one central place and the only matters is tulip_link is * followed by a tulip_timeout. Therefore setting it should not * result in aberrant behavour. */ sc->tulip_probe_timeout = 3000; sc->tulip_probe_state = TULIP_PROBE_INACTIVE; sc->tulip_flags &= ~(TULIP_TXPROBE_ACTIVE|TULIP_TRYNWAY); if (sc->tulip_flags & TULIP_INRESET) { tulip_media_set(sc, sc->tulip_media); } else if (sc->tulip_probe_media != sc->tulip_media) { /* * No reason to change media if we have the right media. */ tulip_reset(sc); } tulip_init(sc); } static void tulip_media_print( tulip_softc_t * const sc) { struct ifnet *ifp = sc->tulip_ifp; TULIP_LOCK_ASSERT(sc); if ((sc->tulip_flags & TULIP_LINKUP) == 0) return; if (sc->tulip_flags & TULIP_PRINTMEDIA) { if_printf(ifp, "enabling %s port\n", tulip_mediums[sc->tulip_media]); sc->tulip_flags &= ~(TULIP_PRINTMEDIA|TULIP_PRINTLINKUP); } else if (sc->tulip_flags & TULIP_PRINTLINKUP) { if_printf(ifp, "link up\n"); sc->tulip_flags &= ~TULIP_PRINTLINKUP; } } #if defined(TULIP_DO_GPR_SENSE) static tulip_media_t tulip_21140_gpr_media_sense( tulip_softc_t * const sc) { struct ifnet *ifp sc->tulip_ifp; tulip_media_t maybe_media = TULIP_MEDIA_UNKNOWN; tulip_media_t last_media = TULIP_MEDIA_UNKNOWN; tulip_media_t media; TULIP_LOCK_ASSERT(sc); /* * If one of the media blocks contained a default media flag, * use that. */ for (media = TULIP_MEDIA_UNKNOWN; media < TULIP_MEDIA_MAX; media++) { const tulip_media_info_t *mi; /* * Media is not supported (or is full-duplex). */ if ((mi = sc->tulip_mediums[media]) == NULL || TULIP_IS_MEDIA_FD(media)) continue; if (mi->mi_type != TULIP_MEDIAINFO_GPR) continue; /* * Remember the media is this is the "default" media. */ if (mi->mi_default && maybe_media == TULIP_MEDIA_UNKNOWN) maybe_media = media; /* * No activity mask? Can't see if it is active if there's no mask. */ if (mi->mi_actmask == 0) continue; /* * Does the activity data match? */ if ((TULIP_CSR_READ(sc, csr_gp) & mi->mi_actmask) != mi->mi_actdata) continue; #if defined(TULIP_DEBUG) if_printf(ifp, "gpr_media_sense: %s: 0x%02x & 0x%02x == 0x%02x\n", tulip_mediums[media], TULIP_CSR_READ(sc, csr_gp) & 0xFF, mi->mi_actmask, mi->mi_actdata); #endif /* * It does! If this is the first media we detected, then * remember this media. If isn't the first, then there were * multiple matches which we equate to no match (since we don't * which to select (if any). */ if (last_media == TULIP_MEDIA_UNKNOWN) { last_media = media; } else if (last_media != media) { last_media = TULIP_MEDIA_UNKNOWN; } } return (last_media != TULIP_MEDIA_UNKNOWN) ? last_media : maybe_media; } #endif /* TULIP_DO_GPR_SENSE */ static tulip_link_status_t tulip_media_link_monitor( tulip_softc_t * const sc) { struct ifnet *ifp = sc->tulip_ifp; const tulip_media_info_t * const mi = sc->tulip_mediums[sc->tulip_media]; tulip_link_status_t linkup = TULIP_LINK_DOWN; TULIP_LOCK_ASSERT(sc); if (mi == NULL) { #if defined(DIAGNOSTIC) || defined(TULIP_DEBUG) panic("tulip_media_link_monitor: %s: botch at line %d\n", tulip_mediums[sc->tulip_media],__LINE__); #else return TULIP_LINK_UNKNOWN; #endif } /* * Have we seen some packets? If so, the link must be good. */ if ((sc->tulip_flags & (TULIP_RXACT|TULIP_LINKUP)) == (TULIP_RXACT|TULIP_LINKUP)) { sc->tulip_flags &= ~TULIP_RXACT; sc->tulip_probe_timeout = 3000; return TULIP_LINK_UP; } sc->tulip_flags &= ~TULIP_RXACT; if (mi->mi_type == TULIP_MEDIAINFO_MII) { u_int32_t status; /* * Read the PHY status register. */ status = tulip_mii_readreg(sc, sc->tulip_phyaddr, PHYREG_STATUS); if (status & PHYSTS_AUTONEG_DONE) { /* * If the PHY has completed autonegotiation, see the if the * remote systems abilities have changed. If so, upgrade or * downgrade as appropriate. */ u_int32_t abilities = tulip_mii_readreg(sc, sc->tulip_phyaddr, PHYREG_AUTONEG_ABILITIES); abilities = (abilities << 6) & status; if (abilities != sc->tulip_abilities) { #if defined(TULIP_DEBUG) loudprintf("%s(phy%d): autonegotiation changed: 0x%04x -> 0x%04x\n", ifp->if_xname, sc->tulip_phyaddr, sc->tulip_abilities, abilities); #endif if (tulip_mii_map_abilities(sc, abilities)) { tulip_linkup(sc, sc->tulip_probe_media); return TULIP_LINK_UP; } /* * if we had selected media because of autonegotiation, * we need to probe for the new media. */ sc->tulip_probe_state = TULIP_PROBE_INACTIVE; if (sc->tulip_flags & TULIP_DIDNWAY) return TULIP_LINK_DOWN; } } /* * The link is now up. If was down, say its back up. */ if ((status & (PHYSTS_LINK_UP|PHYSTS_REMOTE_FAULT)) == PHYSTS_LINK_UP) linkup = TULIP_LINK_UP; } else if (mi->mi_type == TULIP_MEDIAINFO_GPR) { /* * No activity sensor? Assume all's well. */ if (mi->mi_actmask == 0) return TULIP_LINK_UNKNOWN; /* * Does the activity data match? */ if ((TULIP_CSR_READ(sc, csr_gp) & mi->mi_actmask) == mi->mi_actdata) linkup = TULIP_LINK_UP; } else if (mi->mi_type == TULIP_MEDIAINFO_SIA) { /* * Assume non TP ok for now. */ if (!TULIP_IS_MEDIA_TP(sc->tulip_media)) return TULIP_LINK_UNKNOWN; if ((TULIP_CSR_READ(sc, csr_sia_status) & TULIP_SIASTS_LINKFAIL) == 0) linkup = TULIP_LINK_UP; #if defined(TULIP_DEBUG) if (sc->tulip_probe_timeout <= 0) if_printf(ifp, "sia status = 0x%08x\n", TULIP_CSR_READ(sc, csr_sia_status)); #endif } else if (mi->mi_type == TULIP_MEDIAINFO_SYM) { return TULIP_LINK_UNKNOWN; } /* * We will wait for 3 seconds until the link goes into suspect mode. */ if (sc->tulip_flags & TULIP_LINKUP) { if (linkup == TULIP_LINK_UP) sc->tulip_probe_timeout = 3000; if (sc->tulip_probe_timeout > 0) return TULIP_LINK_UP; sc->tulip_flags &= ~TULIP_LINKUP; if_printf(ifp, "link down: cable problem?\n"); } #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_link_downed++; #endif return TULIP_LINK_DOWN; } static void tulip_media_poll( tulip_softc_t * const sc, tulip_mediapoll_event_t event) { struct ifnet *ifp = sc->tulip_ifp; TULIP_LOCK_ASSERT(sc); #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_events[event]++; #endif if (sc->tulip_probe_state == TULIP_PROBE_INACTIVE && event == TULIP_MEDIAPOLL_TIMER) { switch (tulip_media_link_monitor(sc)) { case TULIP_LINK_DOWN: { /* * Link Monitor failed. Probe for new media. */ event = TULIP_MEDIAPOLL_LINKFAIL; break; } case TULIP_LINK_UP: { /* * Check again soon. */ tulip_timeout(sc); return; } case TULIP_LINK_UNKNOWN: { /* * We can't tell so don't bother. */ return; } } } if (event == TULIP_MEDIAPOLL_LINKFAIL) { if (sc->tulip_probe_state == TULIP_PROBE_INACTIVE) { if (TULIP_DO_AUTOSENSE(sc)) { #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_link_failures++; #endif sc->tulip_media = TULIP_MEDIA_UNKNOWN; if (sc->tulip_ifp->if_flags & IFF_UP) tulip_reset(sc); /* restart probe */ } return; } #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_link_pollintrs++; #endif } if (event == TULIP_MEDIAPOLL_START) { sc->tulip_ifp->if_flags |= IFF_OACTIVE; if (sc->tulip_probe_state != TULIP_PROBE_INACTIVE) return; sc->tulip_probe_mediamask = 0; sc->tulip_probe_passes = 0; #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_media_probes++; #endif /* * If the SROM contained an explicit media to use, use it. */ sc->tulip_cmdmode &= ~(TULIP_CMD_RXRUN|TULIP_CMD_FULLDUPLEX); sc->tulip_flags |= TULIP_TRYNWAY|TULIP_PROBE1STPASS; sc->tulip_flags &= ~(TULIP_DIDNWAY|TULIP_PRINTMEDIA|TULIP_PRINTLINKUP); /* * connidx is defaulted to a media_unknown type. */ sc->tulip_probe_media = tulip_srom_conninfo[sc->tulip_connidx].sc_media; if (sc->tulip_probe_media != TULIP_MEDIA_UNKNOWN) { tulip_linkup(sc, sc->tulip_probe_media); tulip_timeout(sc); return; } if (sc->tulip_features & TULIP_HAVE_GPR) { sc->tulip_probe_state = TULIP_PROBE_GPRTEST; sc->tulip_probe_timeout = 2000; } else { sc->tulip_probe_media = TULIP_MEDIA_MAX; sc->tulip_probe_timeout = 0; sc->tulip_probe_state = TULIP_PROBE_MEDIATEST; } } /* * Ignore txprobe failures or spurious callbacks. */ if (event == TULIP_MEDIAPOLL_TXPROBE_FAILED && sc->tulip_probe_state != TULIP_PROBE_MEDIATEST) { sc->tulip_flags &= ~TULIP_TXPROBE_ACTIVE; return; } /* * If we really transmitted a packet, then that's the media we'll use. */ if (event == TULIP_MEDIAPOLL_TXPROBE_OK || event == TULIP_MEDIAPOLL_LINKPASS) { if (event == TULIP_MEDIAPOLL_LINKPASS) { /* XXX Check media status just to be sure */ sc->tulip_probe_media = TULIP_MEDIA_10BASET; #if defined(TULIP_DEBUG) } else { sc->tulip_dbg.dbg_txprobes_ok[sc->tulip_probe_media]++; #endif } tulip_linkup(sc, sc->tulip_probe_media); tulip_timeout(sc); return; } if (sc->tulip_probe_state == TULIP_PROBE_GPRTEST) { #if defined(TULIP_DO_GPR_SENSE) /* * Check for media via the general purpose register. * * Try to sense the media via the GPR. If the same value * occurs 3 times in a row then just use that. */ if (sc->tulip_probe_timeout > 0) { tulip_media_t new_probe_media = tulip_21140_gpr_media_sense(sc); #if defined(TULIP_DEBUG) if_printf(ifp, "media_poll: gpr sensing = %s\n", tulip_mediums[new_probe_media]); #endif if (new_probe_media != TULIP_MEDIA_UNKNOWN) { if (new_probe_media == sc->tulip_probe_media) { if (--sc->tulip_probe_count == 0) tulip_linkup(sc, sc->tulip_probe_media); } else { sc->tulip_probe_count = 10; } } sc->tulip_probe_media = new_probe_media; tulip_timeout(sc); return; } #endif /* TULIP_DO_GPR_SENSE */ /* * Brute force. We cycle through each of the media types * and try to transmit a packet. */ sc->tulip_probe_state = TULIP_PROBE_MEDIATEST; sc->tulip_probe_media = TULIP_MEDIA_MAX; sc->tulip_probe_timeout = 0; tulip_timeout(sc); return; } if (sc->tulip_probe_state != TULIP_PROBE_MEDIATEST && (sc->tulip_features & TULIP_HAVE_MII)) { tulip_media_t old_media = sc->tulip_probe_media; tulip_mii_autonegotiate(sc, sc->tulip_phyaddr); switch (sc->tulip_probe_state) { case TULIP_PROBE_FAILED: case TULIP_PROBE_MEDIATEST: { /* * Try the next media. */ sc->tulip_probe_mediamask |= sc->tulip_mediums[sc->tulip_probe_media]->mi_mediamask; sc->tulip_probe_timeout = 0; #ifdef notyet if (sc->tulip_probe_state == TULIP_PROBE_FAILED) break; if (sc->tulip_probe_media != tulip_mii_phy_readspecific(sc)) break; sc->tulip_probe_timeout = TULIP_IS_MEDIA_TP(sc->tulip_probe_media) ? 2500 : 300; #endif break; } case TULIP_PROBE_PHYAUTONEG: { return; } case TULIP_PROBE_INACTIVE: { /* * Only probe if we autonegotiated a media that hasn't failed. */ sc->tulip_probe_timeout = 0; if (sc->tulip_probe_mediamask & TULIP_BIT(sc->tulip_probe_media)) { sc->tulip_probe_media = old_media; break; } tulip_linkup(sc, sc->tulip_probe_media); tulip_timeout(sc); return; } default: { #if defined(DIAGNOSTIC) || defined(TULIP_DEBUG) panic("tulip_media_poll: botch at line %d\n", __LINE__); #endif break; } } } if (event == TULIP_MEDIAPOLL_TXPROBE_FAILED) { #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_txprobes_failed[sc->tulip_probe_media]++; #endif sc->tulip_flags &= ~TULIP_TXPROBE_ACTIVE; return; } /* * switch to another media if we tried this one enough. */ if (/* event == TULIP_MEDIAPOLL_TXPROBE_FAILED || */ sc->tulip_probe_timeout <= 0) { #if defined(TULIP_DEBUG) if (sc->tulip_probe_media == TULIP_MEDIA_UNKNOWN) { if_printf(ifp, "poll media unknown!\n"); sc->tulip_probe_media = TULIP_MEDIA_MAX; } #endif /* * Find the next media type to check for. Full Duplex * types are not allowed. */ do { sc->tulip_probe_media -= 1; if (sc->tulip_probe_media == TULIP_MEDIA_UNKNOWN) { if (++sc->tulip_probe_passes == 3) { if_printf(ifp, "autosense failed: cable problem?\n"); if ((sc->tulip_ifp->if_flags & IFF_UP) == 0) { sc->tulip_ifp->if_flags &= ~IFF_RUNNING; sc->tulip_probe_state = TULIP_PROBE_INACTIVE; return; } } sc->tulip_flags ^= TULIP_TRYNWAY; /* XXX */ sc->tulip_probe_mediamask = 0; sc->tulip_probe_media = TULIP_MEDIA_MAX - 1; } } while (sc->tulip_mediums[sc->tulip_probe_media] == NULL || (sc->tulip_probe_mediamask & TULIP_BIT(sc->tulip_probe_media)) || TULIP_IS_MEDIA_FD(sc->tulip_probe_media)); #if defined(TULIP_DEBUG) if_printf(ifp, "%s: probing %s\n", event == TULIP_MEDIAPOLL_TXPROBE_FAILED ? "txprobe failed" : "timeout", tulip_mediums[sc->tulip_probe_media]); #endif sc->tulip_probe_timeout = TULIP_IS_MEDIA_TP(sc->tulip_probe_media) ? 2500 : 1000; sc->tulip_probe_state = TULIP_PROBE_MEDIATEST; sc->tulip_probe.probe_txprobes = 0; tulip_reset(sc); tulip_media_set(sc, sc->tulip_probe_media); sc->tulip_flags &= ~TULIP_TXPROBE_ACTIVE; } tulip_timeout(sc); /* * If this is hanging off a phy, we know are doing NWAY and we have * forced the phy to a specific speed. Wait for link up before * before sending a packet. */ switch (sc->tulip_mediums[sc->tulip_probe_media]->mi_type) { case TULIP_MEDIAINFO_MII: { if (sc->tulip_probe_media != tulip_mii_phy_readspecific(sc)) return; break; } case TULIP_MEDIAINFO_SIA: { if (TULIP_IS_MEDIA_TP(sc->tulip_probe_media)) { if (TULIP_CSR_READ(sc, csr_sia_status) & TULIP_SIASTS_LINKFAIL) return; tulip_linkup(sc, sc->tulip_probe_media); #ifdef notyet if (sc->tulip_features & TULIP_HAVE_MII) tulip_timeout(sc); #endif return; } break; } case TULIP_MEDIAINFO_RESET: case TULIP_MEDIAINFO_SYM: case TULIP_MEDIAINFO_NONE: case TULIP_MEDIAINFO_GPR: { break; } } /* * Try to send a packet. */ tulip_txprobe(sc); } static void tulip_media_select( tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); if (sc->tulip_features & TULIP_HAVE_GPR) { TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_PINSET|sc->tulip_gpinit); DELAY(10); TULIP_CSR_WRITE(sc, csr_gp, sc->tulip_gpdata); } /* * If this board has no media, just return */ if (sc->tulip_features & TULIP_HAVE_NOMEDIA) return; if (sc->tulip_media == TULIP_MEDIA_UNKNOWN) { TULIP_CSR_WRITE(sc, csr_intr, sc->tulip_intrmask); (*sc->tulip_boardsw->bd_media_poll)(sc, TULIP_MEDIAPOLL_START); } else { tulip_media_set(sc, sc->tulip_media); } } static void tulip_21040_mediainfo_init( tulip_softc_t * const sc, tulip_media_t media) { TULIP_LOCK_ASSERT(sc); sc->tulip_cmdmode |= TULIP_CMD_CAPTREFFCT|TULIP_CMD_THRSHLD160 |TULIP_CMD_BACKOFFCTR; sc->tulip_ifp->if_baudrate = 10000000; if (media == TULIP_MEDIA_10BASET || media == TULIP_MEDIA_UNKNOWN) { TULIP_MEDIAINFO_SIA_INIT(sc, &sc->tulip_mediainfo[0], 21040, 10BASET); TULIP_MEDIAINFO_SIA_INIT(sc, &sc->tulip_mediainfo[1], 21040, 10BASET_FD); sc->tulip_intrmask |= TULIP_STS_LINKPASS|TULIP_STS_LINKFAIL; } if (media == TULIP_MEDIA_AUIBNC || media == TULIP_MEDIA_UNKNOWN) { TULIP_MEDIAINFO_SIA_INIT(sc, &sc->tulip_mediainfo[2], 21040, AUIBNC); } if (media == TULIP_MEDIA_UNKNOWN) { TULIP_MEDIAINFO_SIA_INIT(sc, &sc->tulip_mediainfo[3], 21040, EXTSIA); } } static void tulip_21040_media_probe( tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); tulip_21040_mediainfo_init(sc, TULIP_MEDIA_UNKNOWN); return; } static void tulip_21040_10baset_only_media_probe( tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); tulip_21040_mediainfo_init(sc, TULIP_MEDIA_10BASET); tulip_media_set(sc, TULIP_MEDIA_10BASET); sc->tulip_media = TULIP_MEDIA_10BASET; } static void tulip_21040_10baset_only_media_select( tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); sc->tulip_flags |= TULIP_LINKUP; if (sc->tulip_media == TULIP_MEDIA_10BASET_FD) { sc->tulip_cmdmode |= TULIP_CMD_FULLDUPLEX; sc->tulip_flags &= ~TULIP_SQETEST; } else { sc->tulip_cmdmode &= ~TULIP_CMD_FULLDUPLEX; sc->tulip_flags |= TULIP_SQETEST; } tulip_media_set(sc, sc->tulip_media); } static void tulip_21040_auibnc_only_media_probe( tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); tulip_21040_mediainfo_init(sc, TULIP_MEDIA_AUIBNC); sc->tulip_flags |= TULIP_SQETEST|TULIP_LINKUP; tulip_media_set(sc, TULIP_MEDIA_AUIBNC); sc->tulip_media = TULIP_MEDIA_AUIBNC; } static void tulip_21040_auibnc_only_media_select( tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); tulip_media_set(sc, TULIP_MEDIA_AUIBNC); sc->tulip_cmdmode &= ~TULIP_CMD_FULLDUPLEX; } static const tulip_boardsw_t tulip_21040_boardsw = { TULIP_21040_GENERIC, tulip_21040_media_probe, tulip_media_select, tulip_media_poll, }; static const tulip_boardsw_t tulip_21040_10baset_only_boardsw = { TULIP_21040_GENERIC, tulip_21040_10baset_only_media_probe, tulip_21040_10baset_only_media_select, NULL, }; static const tulip_boardsw_t tulip_21040_auibnc_only_boardsw = { TULIP_21040_GENERIC, tulip_21040_auibnc_only_media_probe, tulip_21040_auibnc_only_media_select, NULL, }; static void tulip_21041_mediainfo_init( tulip_softc_t * const sc) { tulip_media_info_t * const mi = sc->tulip_mediainfo; TULIP_LOCK_ASSERT(sc); #ifdef notyet if (sc->tulip_revinfo >= 0x20) { TULIP_MEDIAINFO_SIA_INIT(sc, &mi[0], 21041P2, 10BASET); TULIP_MEDIAINFO_SIA_INIT(sc, &mi[1], 21041P2, 10BASET_FD); TULIP_MEDIAINFO_SIA_INIT(sc, &mi[0], 21041P2, AUI); TULIP_MEDIAINFO_SIA_INIT(sc, &mi[1], 21041P2, BNC); return; } #endif TULIP_MEDIAINFO_SIA_INIT(sc, &mi[0], 21041, 10BASET); TULIP_MEDIAINFO_SIA_INIT(sc, &mi[1], 21041, 10BASET_FD); TULIP_MEDIAINFO_SIA_INIT(sc, &mi[2], 21041, AUI); TULIP_MEDIAINFO_SIA_INIT(sc, &mi[3], 21041, BNC); } static void tulip_21041_media_probe( tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); sc->tulip_ifp->if_baudrate = 10000000; sc->tulip_cmdmode |= TULIP_CMD_CAPTREFFCT|TULIP_CMD_ENHCAPTEFFCT |TULIP_CMD_THRSHLD160|TULIP_CMD_BACKOFFCTR; sc->tulip_intrmask |= TULIP_STS_LINKPASS|TULIP_STS_LINKFAIL; tulip_21041_mediainfo_init(sc); } static void tulip_21041_media_poll( tulip_softc_t * const sc, const tulip_mediapoll_event_t event) { u_int32_t sia_status; TULIP_LOCK_ASSERT(sc); #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_events[event]++; #endif if (event == TULIP_MEDIAPOLL_LINKFAIL) { if (sc->tulip_probe_state != TULIP_PROBE_INACTIVE || !TULIP_DO_AUTOSENSE(sc)) return; sc->tulip_media = TULIP_MEDIA_UNKNOWN; tulip_reset(sc); /* start probe */ return; } /* * If we've been been asked to start a poll or link change interrupt * restart the probe (and reset the tulip to a known state). */ if (event == TULIP_MEDIAPOLL_START) { sc->tulip_ifp->if_flags |= IFF_OACTIVE; sc->tulip_cmdmode &= ~(TULIP_CMD_FULLDUPLEX|TULIP_CMD_RXRUN); #ifdef notyet if (sc->tulip_revinfo >= 0x20) { sc->tulip_cmdmode |= TULIP_CMD_FULLDUPLEX; sc->tulip_flags |= TULIP_DIDNWAY; } #endif TULIP_CSR_WRITE(sc, csr_command, sc->tulip_cmdmode); sc->tulip_probe_state = TULIP_PROBE_MEDIATEST; sc->tulip_probe_media = TULIP_MEDIA_10BASET; sc->tulip_probe_timeout = TULIP_21041_PROBE_10BASET_TIMEOUT; tulip_media_set(sc, TULIP_MEDIA_10BASET); tulip_timeout(sc); return; } if (sc->tulip_probe_state == TULIP_PROBE_INACTIVE) return; if (event == TULIP_MEDIAPOLL_TXPROBE_OK) { #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_txprobes_ok[sc->tulip_probe_media]++; #endif tulip_linkup(sc, sc->tulip_probe_media); return; } sia_status = TULIP_CSR_READ(sc, csr_sia_status); TULIP_CSR_WRITE(sc, csr_sia_status, sia_status); if ((sia_status & TULIP_SIASTS_LINKFAIL) == 0) { if (sc->tulip_revinfo >= 0x20) { if (sia_status & (PHYSTS_10BASET_FD << (16 - 6))) sc->tulip_probe_media = TULIP_MEDIA_10BASET_FD; } /* * If the link has passed LinkPass, 10baseT is the * proper media to use. */ tulip_linkup(sc, sc->tulip_probe_media); return; } /* * wait for up to 2.4 seconds for the link to reach pass state. * Only then start scanning the other media for activity. * choose media with receive activity over those without. */ if (sc->tulip_probe_media == TULIP_MEDIA_10BASET) { if (event != TULIP_MEDIAPOLL_TIMER) return; if (sc->tulip_probe_timeout > 0 && (sia_status & TULIP_SIASTS_OTHERRXACTIVITY) == 0) { tulip_timeout(sc); return; } sc->tulip_probe_timeout = TULIP_21041_PROBE_AUIBNC_TIMEOUT; sc->tulip_flags |= TULIP_WANTRXACT; if (sia_status & TULIP_SIASTS_OTHERRXACTIVITY) { sc->tulip_probe_media = TULIP_MEDIA_BNC; } else { sc->tulip_probe_media = TULIP_MEDIA_AUI; } tulip_media_set(sc, sc->tulip_probe_media); tulip_timeout(sc); return; } /* * If we failed, clear the txprobe active flag. */ if (event == TULIP_MEDIAPOLL_TXPROBE_FAILED) sc->tulip_flags &= ~TULIP_TXPROBE_ACTIVE; if (event == TULIP_MEDIAPOLL_TIMER) { /* * If we've received something, then that's our link! */ if (sc->tulip_flags & TULIP_RXACT) { tulip_linkup(sc, sc->tulip_probe_media); return; } /* * if no txprobe active */ if ((sc->tulip_flags & TULIP_TXPROBE_ACTIVE) == 0 && ((sc->tulip_flags & TULIP_WANTRXACT) == 0 || (sia_status & TULIP_SIASTS_RXACTIVITY))) { sc->tulip_probe_timeout = TULIP_21041_PROBE_AUIBNC_TIMEOUT; tulip_txprobe(sc); tulip_timeout(sc); return; } /* * Take 2 passes through before deciding to not * wait for receive activity. Then take another * two passes before spitting out a warning. */ if (sc->tulip_probe_timeout <= 0) { if (sc->tulip_flags & TULIP_WANTRXACT) { sc->tulip_flags &= ~TULIP_WANTRXACT; sc->tulip_probe_timeout = TULIP_21041_PROBE_AUIBNC_TIMEOUT; } else { if_printf(sc->tulip_ifp, "autosense failed: cable problem?\n"); if ((sc->tulip_ifp->if_flags & IFF_UP) == 0) { sc->tulip_ifp->if_flags &= ~IFF_RUNNING; sc->tulip_probe_state = TULIP_PROBE_INACTIVE; return; } } } } /* * Since this media failed to probe, try the other one. */ sc->tulip_probe_timeout = TULIP_21041_PROBE_AUIBNC_TIMEOUT; if (sc->tulip_probe_media == TULIP_MEDIA_AUI) { sc->tulip_probe_media = TULIP_MEDIA_BNC; } else { sc->tulip_probe_media = TULIP_MEDIA_AUI; } tulip_media_set(sc, sc->tulip_probe_media); sc->tulip_flags &= ~TULIP_TXPROBE_ACTIVE; tulip_timeout(sc); } static const tulip_boardsw_t tulip_21041_boardsw = { TULIP_21041_GENERIC, tulip_21041_media_probe, tulip_media_select, tulip_21041_media_poll }; static const tulip_phy_attr_t tulip_mii_phy_attrlist[] = { { 0x20005c00, 0, /* 08-00-17 */ { { 0x19, 0x0040, 0x0040 }, /* 10TX */ { 0x19, 0x0040, 0x0000 }, /* 100TX */ }, #if defined(TULIP_DEBUG) "NS DP83840", #endif }, { 0x0281F400, 0, /* 00-A0-7D */ { { 0x12, 0x0010, 0x0000 }, /* 10T */ { }, /* 100TX */ { 0x12, 0x0010, 0x0010 }, /* 100T4 */ { 0x12, 0x0008, 0x0008 }, /* FULL_DUPLEX */ }, #if defined(TULIP_DEBUG) "Seeq 80C240" #endif }, #if 0 { 0x0015F420, 0, /* 00-A0-7D */ { { 0x12, 0x0010, 0x0000 }, /* 10T */ { }, /* 100TX */ { 0x12, 0x0010, 0x0010 }, /* 100T4 */ { 0x12, 0x0008, 0x0008 }, /* FULL_DUPLEX */ }, #if defined(TULIP_DEBUG) "Broadcom BCM5000" #endif }, #endif { 0x0281F400, 0, /* 00-A0-BE */ { { 0x11, 0x8000, 0x0000 }, /* 10T */ { 0x11, 0x8000, 0x8000 }, /* 100TX */ { }, /* 100T4 */ { 0x11, 0x4000, 0x4000 }, /* FULL_DUPLEX */ }, #if defined(TULIP_DEBUG) "ICS 1890" #endif }, { 0 } }; static tulip_media_t tulip_mii_phy_readspecific( tulip_softc_t * const sc) { const tulip_phy_attr_t *attr; u_int16_t data; u_int32_t id; unsigned idx = 0; static const tulip_media_t table[] = { TULIP_MEDIA_UNKNOWN, TULIP_MEDIA_10BASET, TULIP_MEDIA_100BASETX, TULIP_MEDIA_100BASET4, TULIP_MEDIA_UNKNOWN, TULIP_MEDIA_10BASET_FD, TULIP_MEDIA_100BASETX_FD, TULIP_MEDIA_UNKNOWN }; TULIP_LOCK_ASSERT(sc); /* * Don't read phy specific registers if link is not up. */ data = tulip_mii_readreg(sc, sc->tulip_phyaddr, PHYREG_STATUS); if ((data & (PHYSTS_LINK_UP|PHYSTS_EXTENDED_REGS)) != (PHYSTS_LINK_UP|PHYSTS_EXTENDED_REGS)) return TULIP_MEDIA_UNKNOWN; id = (tulip_mii_readreg(sc, sc->tulip_phyaddr, PHYREG_IDLOW) << 16) | tulip_mii_readreg(sc, sc->tulip_phyaddr, PHYREG_IDHIGH); for (attr = tulip_mii_phy_attrlist;; attr++) { if (attr->attr_id == 0) return TULIP_MEDIA_UNKNOWN; if ((id & ~0x0F) == attr->attr_id) break; } if (attr->attr_modes[PHY_MODE_100TX].pm_regno) { const tulip_phy_modedata_t * const pm = &attr->attr_modes[PHY_MODE_100TX]; data = tulip_mii_readreg(sc, sc->tulip_phyaddr, pm->pm_regno); if ((data & pm->pm_mask) == pm->pm_value) idx = 2; } if (idx == 0 && attr->attr_modes[PHY_MODE_100T4].pm_regno) { const tulip_phy_modedata_t * const pm = &attr->attr_modes[PHY_MODE_100T4]; data = tulip_mii_readreg(sc, sc->tulip_phyaddr, pm->pm_regno); if ((data & pm->pm_mask) == pm->pm_value) idx = 3; } if (idx == 0 && attr->attr_modes[PHY_MODE_10T].pm_regno) { const tulip_phy_modedata_t * const pm = &attr->attr_modes[PHY_MODE_10T]; data = tulip_mii_readreg(sc, sc->tulip_phyaddr, pm->pm_regno); if ((data & pm->pm_mask) == pm->pm_value) idx = 1; } if (idx != 0 && attr->attr_modes[PHY_MODE_FULLDUPLEX].pm_regno) { const tulip_phy_modedata_t * const pm = &attr->attr_modes[PHY_MODE_FULLDUPLEX]; data = tulip_mii_readreg(sc, sc->tulip_phyaddr, pm->pm_regno); idx += ((data & pm->pm_mask) == pm->pm_value ? 4 : 0); } return table[idx]; } static unsigned tulip_mii_get_phyaddr( tulip_softc_t * const sc, unsigned offset) { unsigned phyaddr; TULIP_LOCK_ASSERT(sc); for (phyaddr = 1; phyaddr < 32; phyaddr++) { unsigned status = tulip_mii_readreg(sc, phyaddr, PHYREG_STATUS); if (status == 0 || status == 0xFFFF || status < PHYSTS_10BASET) continue; if (offset == 0) return phyaddr; offset--; } if (offset == 0) { unsigned status = tulip_mii_readreg(sc, 0, PHYREG_STATUS); if (status == 0 || status == 0xFFFF || status < PHYSTS_10BASET) return TULIP_MII_NOPHY; return 0; } return TULIP_MII_NOPHY; } static int tulip_mii_map_abilities( tulip_softc_t * const sc, unsigned abilities) { TULIP_LOCK_ASSERT(sc); sc->tulip_abilities = abilities; if (abilities & PHYSTS_100BASETX_FD) { sc->tulip_probe_media = TULIP_MEDIA_100BASETX_FD; } else if (abilities & PHYSTS_100BASET4) { sc->tulip_probe_media = TULIP_MEDIA_100BASET4; } else if (abilities & PHYSTS_100BASETX) { sc->tulip_probe_media = TULIP_MEDIA_100BASETX; } else if (abilities & PHYSTS_10BASET_FD) { sc->tulip_probe_media = TULIP_MEDIA_10BASET_FD; } else if (abilities & PHYSTS_10BASET) { sc->tulip_probe_media = TULIP_MEDIA_10BASET; } else { sc->tulip_probe_state = TULIP_PROBE_MEDIATEST; return 0; } sc->tulip_probe_state = TULIP_PROBE_INACTIVE; return 1; } static void tulip_mii_autonegotiate( tulip_softc_t * const sc, const unsigned phyaddr) { struct ifnet *ifp = sc->tulip_ifp; TULIP_LOCK_ASSERT(sc); switch (sc->tulip_probe_state) { case TULIP_PROBE_MEDIATEST: case TULIP_PROBE_INACTIVE: { sc->tulip_flags |= TULIP_DIDNWAY; tulip_mii_writereg(sc, phyaddr, PHYREG_CONTROL, PHYCTL_RESET); sc->tulip_probe_timeout = 3000; sc->tulip_intrmask |= TULIP_STS_ABNRMLINTR|TULIP_STS_NORMALINTR; sc->tulip_probe_state = TULIP_PROBE_PHYRESET; } /* FALLTHROUGH */ case TULIP_PROBE_PHYRESET: { u_int32_t status; u_int32_t data = tulip_mii_readreg(sc, phyaddr, PHYREG_CONTROL); if (data & PHYCTL_RESET) { if (sc->tulip_probe_timeout > 0) { tulip_timeout(sc); return; } printf("%s(phy%d): error: reset of PHY never completed!\n", ifp->if_xname, phyaddr); sc->tulip_flags &= ~TULIP_TXPROBE_ACTIVE; sc->tulip_probe_state = TULIP_PROBE_FAILED; sc->tulip_ifp->if_flags &= ~(IFF_UP|IFF_RUNNING); return; } status = tulip_mii_readreg(sc, phyaddr, PHYREG_STATUS); if ((status & PHYSTS_CAN_AUTONEG) == 0) { #if defined(TULIP_DEBUG) loudprintf("%s(phy%d): autonegotiation disabled\n", ifp->if_xname, phyaddr); #endif sc->tulip_flags &= ~TULIP_DIDNWAY; sc->tulip_probe_state = TULIP_PROBE_MEDIATEST; return; } if (tulip_mii_readreg(sc, phyaddr, PHYREG_AUTONEG_ADVERTISEMENT) != ((status >> 6) | 0x01)) tulip_mii_writereg(sc, phyaddr, PHYREG_AUTONEG_ADVERTISEMENT, (status >> 6) | 0x01); tulip_mii_writereg(sc, phyaddr, PHYREG_CONTROL, data|PHYCTL_AUTONEG_RESTART|PHYCTL_AUTONEG_ENABLE); data = tulip_mii_readreg(sc, phyaddr, PHYREG_CONTROL); #if defined(TULIP_DEBUG) if ((data & PHYCTL_AUTONEG_ENABLE) == 0) loudprintf("%s(phy%d): oops: enable autonegotiation failed: 0x%04x\n", ifp->if_xname, phyaddr, data); else loudprintf("%s(phy%d): autonegotiation restarted: 0x%04x\n", ifp->if_xname, phyaddr, data); sc->tulip_dbg.dbg_nway_starts++; #endif sc->tulip_probe_state = TULIP_PROBE_PHYAUTONEG; sc->tulip_probe_timeout = 3000; } /* FALLTHROUGH */ case TULIP_PROBE_PHYAUTONEG: { u_int32_t status = tulip_mii_readreg(sc, phyaddr, PHYREG_STATUS); u_int32_t data; if ((status & PHYSTS_AUTONEG_DONE) == 0) { if (sc->tulip_probe_timeout > 0) { tulip_timeout(sc); return; } #if defined(TULIP_DEBUG) loudprintf("%s(phy%d): autonegotiation timeout: sts=0x%04x, ctl=0x%04x\n", ifp->if_xname, phyaddr, status, tulip_mii_readreg(sc, phyaddr, PHYREG_CONTROL)); #endif sc->tulip_flags &= ~TULIP_DIDNWAY; sc->tulip_probe_state = TULIP_PROBE_MEDIATEST; return; } data = tulip_mii_readreg(sc, phyaddr, PHYREG_AUTONEG_ABILITIES); #if defined(TULIP_DEBUG) loudprintf("%s(phy%d): autonegotiation complete: 0x%04x\n", ifp->if_xname, phyaddr, data); #endif data = (data << 6) & status; if (!tulip_mii_map_abilities(sc, data)) sc->tulip_flags &= ~TULIP_DIDNWAY; return; } default: { #if defined(DIAGNOSTIC) panic("tulip_media_poll: botch at line %d\n", __LINE__); #endif break; } } #if defined(TULIP_DEBUG) loudprintf("%s(phy%d): autonegotiation failure: state = %d\n", ifp->if_xname, phyaddr, sc->tulip_probe_state); sc->tulip_dbg.dbg_nway_failures++; #endif } static void tulip_2114x_media_preset( tulip_softc_t * const sc) { const tulip_media_info_t *mi = NULL; tulip_media_t media = sc->tulip_media; TULIP_LOCK_ASSERT(sc); if (sc->tulip_probe_state == TULIP_PROBE_INACTIVE) media = sc->tulip_media; else media = sc->tulip_probe_media; sc->tulip_cmdmode &= ~TULIP_CMD_PORTSELECT; sc->tulip_flags &= ~TULIP_SQETEST; if (media != TULIP_MEDIA_UNKNOWN && media != TULIP_MEDIA_MAX) { #if defined(TULIP_DEBUG) if (media < TULIP_MEDIA_MAX && sc->tulip_mediums[media] != NULL) { #endif mi = sc->tulip_mediums[media]; if (mi->mi_type == TULIP_MEDIAINFO_MII) { sc->tulip_cmdmode |= TULIP_CMD_PORTSELECT; } else if (mi->mi_type == TULIP_MEDIAINFO_GPR || mi->mi_type == TULIP_MEDIAINFO_SYM) { sc->tulip_cmdmode &= ~TULIP_GPR_CMDBITS; sc->tulip_cmdmode |= mi->mi_cmdmode; } else if (mi->mi_type == TULIP_MEDIAINFO_SIA) { TULIP_CSR_WRITE(sc, csr_sia_connectivity, TULIP_SIACONN_RESET); } #if defined(TULIP_DEBUG) } else { if_printf(sc->tulip_ifp, "preset: bad media %d!\n", media); } #endif } switch (media) { case TULIP_MEDIA_BNC: case TULIP_MEDIA_AUI: case TULIP_MEDIA_10BASET: { sc->tulip_cmdmode &= ~TULIP_CMD_FULLDUPLEX; sc->tulip_cmdmode |= TULIP_CMD_TXTHRSHLDCTL; sc->tulip_ifp->if_baudrate = 10000000; sc->tulip_flags |= TULIP_SQETEST; break; } case TULIP_MEDIA_10BASET_FD: { sc->tulip_cmdmode |= TULIP_CMD_FULLDUPLEX|TULIP_CMD_TXTHRSHLDCTL; sc->tulip_ifp->if_baudrate = 10000000; break; } case TULIP_MEDIA_100BASEFX: case TULIP_MEDIA_100BASET4: case TULIP_MEDIA_100BASETX: { sc->tulip_cmdmode &= ~(TULIP_CMD_FULLDUPLEX|TULIP_CMD_TXTHRSHLDCTL); sc->tulip_cmdmode |= TULIP_CMD_PORTSELECT; sc->tulip_ifp->if_baudrate = 100000000; break; } case TULIP_MEDIA_100BASEFX_FD: case TULIP_MEDIA_100BASETX_FD: { sc->tulip_cmdmode |= TULIP_CMD_FULLDUPLEX|TULIP_CMD_PORTSELECT; sc->tulip_cmdmode &= ~TULIP_CMD_TXTHRSHLDCTL; sc->tulip_ifp->if_baudrate = 100000000; break; } default: { break; } } TULIP_CSR_WRITE(sc, csr_command, sc->tulip_cmdmode); } /* ******************************************************************** * Start of 21140/21140A support which does not use the MII interface */ static void tulip_null_media_poll( tulip_softc_t * const sc, tulip_mediapoll_event_t event) { #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_events[event]++; #endif #if defined(DIAGNOSTIC) if_printf(sc->tulip_ifp, "botch(media_poll) at line %d\n", __LINE__); #endif } __inline static void tulip_21140_mediainit( tulip_softc_t * const sc, tulip_media_info_t * const mip, tulip_media_t const media, unsigned gpdata, unsigned cmdmode) { TULIP_LOCK_ASSERT(sc); sc->tulip_mediums[media] = mip; mip->mi_type = TULIP_MEDIAINFO_GPR; mip->mi_cmdmode = cmdmode; mip->mi_gpdata = gpdata; } static void tulip_21140_evalboard_media_probe( tulip_softc_t * const sc) { tulip_media_info_t *mip = sc->tulip_mediainfo; TULIP_LOCK_ASSERT(sc); sc->tulip_gpinit = TULIP_GP_EB_PINS; sc->tulip_gpdata = TULIP_GP_EB_INIT; TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_EB_PINS); TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_EB_INIT); TULIP_CSR_WRITE(sc, csr_command, TULIP_CSR_READ(sc, csr_command) | TULIP_CMD_PORTSELECT | TULIP_CMD_PCSFUNCTION | TULIP_CMD_SCRAMBLER | TULIP_CMD_MUSTBEONE); TULIP_CSR_WRITE(sc, csr_command, TULIP_CSR_READ(sc, csr_command) & ~TULIP_CMD_TXTHRSHLDCTL); DELAY(1000000); if ((TULIP_CSR_READ(sc, csr_gp) & TULIP_GP_EB_OK100) != 0) { sc->tulip_media = TULIP_MEDIA_10BASET; } else { sc->tulip_media = TULIP_MEDIA_100BASETX; } tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_10BASET, TULIP_GP_EB_INIT, TULIP_CMD_TXTHRSHLDCTL); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_10BASET_FD, TULIP_GP_EB_INIT, TULIP_CMD_TXTHRSHLDCTL|TULIP_CMD_FULLDUPLEX); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_100BASETX, TULIP_GP_EB_INIT, TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION |TULIP_CMD_SCRAMBLER); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_100BASETX_FD, TULIP_GP_EB_INIT, TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION |TULIP_CMD_SCRAMBLER|TULIP_CMD_FULLDUPLEX); } static const tulip_boardsw_t tulip_21140_eb_boardsw = { TULIP_21140_DEC_EB, tulip_21140_evalboard_media_probe, tulip_media_select, tulip_null_media_poll, tulip_2114x_media_preset, }; static void tulip_21140_accton_media_probe( tulip_softc_t * const sc) { tulip_media_info_t *mip = sc->tulip_mediainfo; unsigned gpdata; TULIP_LOCK_ASSERT(sc); sc->tulip_gpinit = TULIP_GP_EB_PINS; sc->tulip_gpdata = TULIP_GP_EB_INIT; TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_EB_PINS); TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_EB_INIT); TULIP_CSR_WRITE(sc, csr_command, TULIP_CSR_READ(sc, csr_command) | TULIP_CMD_PORTSELECT | TULIP_CMD_PCSFUNCTION | TULIP_CMD_SCRAMBLER | TULIP_CMD_MUSTBEONE); TULIP_CSR_WRITE(sc, csr_command, TULIP_CSR_READ(sc, csr_command) & ~TULIP_CMD_TXTHRSHLDCTL); DELAY(1000000); gpdata = TULIP_CSR_READ(sc, csr_gp); if ((gpdata & TULIP_GP_EN1207_UTP_INIT) == 0) { sc->tulip_media = TULIP_MEDIA_10BASET; } else { if ((gpdata & TULIP_GP_EN1207_BNC_INIT) == 0) { sc->tulip_media = TULIP_MEDIA_BNC; } else { sc->tulip_media = TULIP_MEDIA_100BASETX; } } tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_BNC, TULIP_GP_EN1207_BNC_INIT, TULIP_CMD_TXTHRSHLDCTL); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_10BASET, TULIP_GP_EN1207_UTP_INIT, TULIP_CMD_TXTHRSHLDCTL); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_10BASET_FD, TULIP_GP_EN1207_UTP_INIT, TULIP_CMD_TXTHRSHLDCTL|TULIP_CMD_FULLDUPLEX); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_100BASETX, TULIP_GP_EN1207_100_INIT, TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION |TULIP_CMD_SCRAMBLER); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_100BASETX_FD, TULIP_GP_EN1207_100_INIT, TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION |TULIP_CMD_SCRAMBLER|TULIP_CMD_FULLDUPLEX); } static const tulip_boardsw_t tulip_21140_accton_boardsw = { TULIP_21140_EN1207, tulip_21140_accton_media_probe, tulip_media_select, tulip_null_media_poll, tulip_2114x_media_preset, }; static void tulip_21140_smc9332_media_probe( tulip_softc_t * const sc) { tulip_media_info_t *mip = sc->tulip_mediainfo; int idx, cnt = 0; TULIP_LOCK_ASSERT(sc); TULIP_CSR_WRITE(sc, csr_command, TULIP_CMD_PORTSELECT|TULIP_CMD_MUSTBEONE); TULIP_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET); DELAY(10); /* Wait 10 microseconds (actually 50 PCI cycles but at 33MHz that comes to two microseconds but wait a bit longer anyways) */ TULIP_CSR_WRITE(sc, csr_command, TULIP_CMD_PORTSELECT | TULIP_CMD_PCSFUNCTION | TULIP_CMD_SCRAMBLER | TULIP_CMD_MUSTBEONE); sc->tulip_gpinit = TULIP_GP_SMC_9332_PINS; sc->tulip_gpdata = TULIP_GP_SMC_9332_INIT; TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_SMC_9332_PINS|TULIP_GP_PINSET); TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_SMC_9332_INIT); DELAY(200000); for (idx = 1000; idx > 0; idx--) { u_int32_t csr = TULIP_CSR_READ(sc, csr_gp); if ((csr & (TULIP_GP_SMC_9332_OK10|TULIP_GP_SMC_9332_OK100)) == (TULIP_GP_SMC_9332_OK10|TULIP_GP_SMC_9332_OK100)) { if (++cnt > 100) break; } else if ((csr & TULIP_GP_SMC_9332_OK10) == 0) { break; } else { cnt = 0; } DELAY(1000); } sc->tulip_media = cnt > 100 ? TULIP_MEDIA_100BASETX : TULIP_MEDIA_10BASET; tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_100BASETX, TULIP_GP_SMC_9332_INIT, TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION |TULIP_CMD_SCRAMBLER); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_100BASETX_FD, TULIP_GP_SMC_9332_INIT, TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION |TULIP_CMD_SCRAMBLER|TULIP_CMD_FULLDUPLEX); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_10BASET, TULIP_GP_SMC_9332_INIT, TULIP_CMD_TXTHRSHLDCTL); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_10BASET_FD, TULIP_GP_SMC_9332_INIT, TULIP_CMD_TXTHRSHLDCTL|TULIP_CMD_FULLDUPLEX); } static const tulip_boardsw_t tulip_21140_smc9332_boardsw = { TULIP_21140_SMC_9332, tulip_21140_smc9332_media_probe, tulip_media_select, tulip_null_media_poll, tulip_2114x_media_preset, }; static void tulip_21140_cogent_em100_media_probe( tulip_softc_t * const sc) { tulip_media_info_t *mip = sc->tulip_mediainfo; u_int32_t cmdmode = TULIP_CSR_READ(sc, csr_command); TULIP_LOCK_ASSERT(sc); sc->tulip_gpinit = TULIP_GP_EM100_PINS; sc->tulip_gpdata = TULIP_GP_EM100_INIT; TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_EM100_PINS); TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_EM100_INIT); cmdmode = TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION|TULIP_CMD_MUSTBEONE; cmdmode &= ~(TULIP_CMD_TXTHRSHLDCTL|TULIP_CMD_SCRAMBLER); if (sc->tulip_rombuf[32] == TULIP_COGENT_EM100FX_ID) { TULIP_CSR_WRITE(sc, csr_command, cmdmode); sc->tulip_media = TULIP_MEDIA_100BASEFX; tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_100BASEFX, TULIP_GP_EM100_INIT, TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_100BASEFX_FD, TULIP_GP_EM100_INIT, TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION |TULIP_CMD_FULLDUPLEX); } else { TULIP_CSR_WRITE(sc, csr_command, cmdmode|TULIP_CMD_SCRAMBLER); sc->tulip_media = TULIP_MEDIA_100BASETX; tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_100BASETX, TULIP_GP_EM100_INIT, TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION |TULIP_CMD_SCRAMBLER); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_100BASETX_FD, TULIP_GP_EM100_INIT, TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION |TULIP_CMD_SCRAMBLER|TULIP_CMD_FULLDUPLEX); } } static const tulip_boardsw_t tulip_21140_cogent_em100_boardsw = { TULIP_21140_COGENT_EM100, tulip_21140_cogent_em100_media_probe, tulip_media_select, tulip_null_media_poll, tulip_2114x_media_preset }; static void tulip_21140_znyx_zx34x_media_probe( tulip_softc_t * const sc) { tulip_media_info_t *mip = sc->tulip_mediainfo; int cnt10 = 0, cnt100 = 0, idx; TULIP_LOCK_ASSERT(sc); sc->tulip_gpinit = TULIP_GP_ZX34X_PINS; sc->tulip_gpdata = TULIP_GP_ZX34X_INIT; TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_ZX34X_PINS); TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_ZX34X_INIT); TULIP_CSR_WRITE(sc, csr_command, TULIP_CSR_READ(sc, csr_command) | TULIP_CMD_PORTSELECT | TULIP_CMD_PCSFUNCTION | TULIP_CMD_SCRAMBLER | TULIP_CMD_MUSTBEONE); TULIP_CSR_WRITE(sc, csr_command, TULIP_CSR_READ(sc, csr_command) & ~TULIP_CMD_TXTHRSHLDCTL); DELAY(200000); for (idx = 1000; idx > 0; idx--) { u_int32_t csr = TULIP_CSR_READ(sc, csr_gp); if ((csr & (TULIP_GP_ZX34X_LNKFAIL|TULIP_GP_ZX34X_SYMDET|TULIP_GP_ZX34X_SIGDET)) == (TULIP_GP_ZX34X_LNKFAIL|TULIP_GP_ZX34X_SYMDET|TULIP_GP_ZX34X_SIGDET)) { if (++cnt100 > 100) break; } else if ((csr & TULIP_GP_ZX34X_LNKFAIL) == 0) { if (++cnt10 > 100) break; } else { cnt10 = 0; cnt100 = 0; } DELAY(1000); } sc->tulip_media = cnt100 > 100 ? TULIP_MEDIA_100BASETX : TULIP_MEDIA_10BASET; tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_10BASET, TULIP_GP_ZX34X_INIT, TULIP_CMD_TXTHRSHLDCTL); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_10BASET_FD, TULIP_GP_ZX34X_INIT, TULIP_CMD_TXTHRSHLDCTL|TULIP_CMD_FULLDUPLEX); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_100BASETX, TULIP_GP_ZX34X_INIT, TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION |TULIP_CMD_SCRAMBLER); tulip_21140_mediainit(sc, mip++, TULIP_MEDIA_100BASETX_FD, TULIP_GP_ZX34X_INIT, TULIP_CMD_PORTSELECT|TULIP_CMD_PCSFUNCTION |TULIP_CMD_SCRAMBLER|TULIP_CMD_FULLDUPLEX); } static const tulip_boardsw_t tulip_21140_znyx_zx34x_boardsw = { TULIP_21140_ZNYX_ZX34X, tulip_21140_znyx_zx34x_media_probe, tulip_media_select, tulip_null_media_poll, tulip_2114x_media_preset, }; static void tulip_2114x_media_probe( tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); sc->tulip_cmdmode |= TULIP_CMD_MUSTBEONE |TULIP_CMD_BACKOFFCTR|TULIP_CMD_THRSHLD72; } static const tulip_boardsw_t tulip_2114x_isv_boardsw = { TULIP_21140_ISV, tulip_2114x_media_probe, tulip_media_select, tulip_media_poll, tulip_2114x_media_preset, }; /* * ******** END of chip-specific handlers. *********** */ /* * Code the read the SROM and MII bit streams (I2C) */ #define EMIT do { TULIP_CSR_WRITE(sc, csr_srom_mii, csr); DELAY(1); } while (0) static void tulip_srom_idle( tulip_softc_t * const sc) { unsigned bit, csr; csr = SROMSEL ; EMIT; csr = SROMSEL | SROMRD; EMIT; csr ^= SROMCS; EMIT; csr ^= SROMCLKON; EMIT; /* * Write 25 cycles of 0 which will force the SROM to be idle. */ for (bit = 3 + SROM_BITWIDTH + 16; bit > 0; bit--) { csr ^= SROMCLKOFF; EMIT; /* clock low; data not valid */ csr ^= SROMCLKON; EMIT; /* clock high; data valid */ } csr ^= SROMCLKOFF; EMIT; csr ^= SROMCS; EMIT; csr = 0; EMIT; } static void tulip_srom_read( tulip_softc_t * const sc) { unsigned idx; const unsigned bitwidth = SROM_BITWIDTH; const unsigned cmdmask = (SROMCMD_RD << bitwidth); const unsigned msb = 1 << (bitwidth + 3 - 1); unsigned lastidx = (1 << bitwidth) - 1; tulip_srom_idle(sc); for (idx = 0; idx <= lastidx; idx++) { unsigned lastbit, data, bits, bit, csr; csr = SROMSEL ; EMIT; csr = SROMSEL | SROMRD; EMIT; csr ^= SROMCSON; EMIT; csr ^= SROMCLKON; EMIT; lastbit = 0; for (bits = idx|cmdmask, bit = bitwidth + 3; bit > 0; bit--, bits <<= 1) { const unsigned thisbit = bits & msb; csr ^= SROMCLKOFF; EMIT; /* clock low; data not valid */ if (thisbit != lastbit) { csr ^= SROMDOUT; EMIT; /* clock low; invert data */ } else { EMIT; } csr ^= SROMCLKON; EMIT; /* clock high; data valid */ lastbit = thisbit; } csr ^= SROMCLKOFF; EMIT; for (data = 0, bits = 0; bits < 16; bits++) { data <<= 1; csr ^= SROMCLKON; EMIT; /* clock high; data valid */ data |= TULIP_CSR_READ(sc, csr_srom_mii) & SROMDIN ? 1 : 0; csr ^= SROMCLKOFF; EMIT; /* clock low; data not valid */ } sc->tulip_rombuf[idx*2] = data & 0xFF; sc->tulip_rombuf[idx*2+1] = data >> 8; csr = SROMSEL | SROMRD; EMIT; csr = 0; EMIT; } tulip_srom_idle(sc); } #define MII_EMIT do { TULIP_CSR_WRITE(sc, csr_srom_mii, csr); DELAY(1); } while (0) static void tulip_mii_writebits( tulip_softc_t * const sc, unsigned data, unsigned bits) { unsigned msb = 1 << (bits - 1); unsigned csr = TULIP_CSR_READ(sc, csr_srom_mii) & (MII_RD|MII_DOUT|MII_CLK); unsigned lastbit = (csr & MII_DOUT) ? msb : 0; TULIP_LOCK_ASSERT(sc); csr |= MII_WR; MII_EMIT; /* clock low; assert write */ for (; bits > 0; bits--, data <<= 1) { const unsigned thisbit = data & msb; if (thisbit != lastbit) { csr ^= MII_DOUT; MII_EMIT; /* clock low; invert data */ } csr ^= MII_CLKON; MII_EMIT; /* clock high; data valid */ lastbit = thisbit; csr ^= MII_CLKOFF; MII_EMIT; /* clock low; data not valid */ } } static void tulip_mii_turnaround( tulip_softc_t * const sc, unsigned cmd) { unsigned csr = TULIP_CSR_READ(sc, csr_srom_mii) & (MII_RD|MII_DOUT|MII_CLK); TULIP_LOCK_ASSERT(sc); if (cmd == MII_WRCMD) { csr |= MII_DOUT; MII_EMIT; /* clock low; change data */ csr ^= MII_CLKON; MII_EMIT; /* clock high; data valid */ csr ^= MII_CLKOFF; MII_EMIT; /* clock low; data not valid */ csr ^= MII_DOUT; MII_EMIT; /* clock low; change data */ } else { csr |= MII_RD; MII_EMIT; /* clock low; switch to read */ } csr ^= MII_CLKON; MII_EMIT; /* clock high; data valid */ csr ^= MII_CLKOFF; MII_EMIT; /* clock low; data not valid */ } static unsigned tulip_mii_readbits( tulip_softc_t * const sc) { unsigned data; unsigned csr = TULIP_CSR_READ(sc, csr_srom_mii) & (MII_RD|MII_DOUT|MII_CLK); int idx; TULIP_LOCK_ASSERT(sc); for (idx = 0, data = 0; idx < 16; idx++) { data <<= 1; /* this is NOOP on the first pass through */ csr ^= MII_CLKON; MII_EMIT; /* clock high; data valid */ if (TULIP_CSR_READ(sc, csr_srom_mii) & MII_DIN) data |= 1; csr ^= MII_CLKOFF; MII_EMIT; /* clock low; data not valid */ } csr ^= MII_RD; MII_EMIT; /* clock low; turn off read */ return data; } static unsigned tulip_mii_readreg( tulip_softc_t * const sc, unsigned devaddr, unsigned regno) { unsigned csr = TULIP_CSR_READ(sc, csr_srom_mii) & (MII_RD|MII_DOUT|MII_CLK); unsigned data; TULIP_LOCK_ASSERT(sc); csr &= ~(MII_RD|MII_CLK); MII_EMIT; tulip_mii_writebits(sc, MII_PREAMBLE, 32); tulip_mii_writebits(sc, MII_RDCMD, 8); tulip_mii_writebits(sc, devaddr, 5); tulip_mii_writebits(sc, regno, 5); tulip_mii_turnaround(sc, MII_RDCMD); data = tulip_mii_readbits(sc); #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_phyregs[regno][0] = data; sc->tulip_dbg.dbg_phyregs[regno][1]++; #endif return data; } static void tulip_mii_writereg( tulip_softc_t * const sc, unsigned devaddr, unsigned regno, unsigned data) { unsigned csr = TULIP_CSR_READ(sc, csr_srom_mii) & (MII_RD|MII_DOUT|MII_CLK); TULIP_LOCK_ASSERT(sc); csr &= ~(MII_RD|MII_CLK); MII_EMIT; tulip_mii_writebits(sc, MII_PREAMBLE, 32); tulip_mii_writebits(sc, MII_WRCMD, 8); tulip_mii_writebits(sc, devaddr, 5); tulip_mii_writebits(sc, regno, 5); tulip_mii_turnaround(sc, MII_WRCMD); tulip_mii_writebits(sc, data, 16); #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_phyregs[regno][2] = data; sc->tulip_dbg.dbg_phyregs[regno][3]++; #endif } #define tulip_mchash(mca) (ether_crc32_le(mca, 6) & 0x1FF) #define tulip_srom_crcok(databuf) ( \ ((ether_crc32_le(databuf, 126) & 0xFFFFU) ^ 0xFFFFU) == \ ((databuf)[126] | ((databuf)[127] << 8))) static void tulip_identify_dec_nic( tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); strcpy(sc->tulip_boardid, "DEC "); #define D0 4 if (sc->tulip_chipid <= TULIP_21040) return; if (bcmp(sc->tulip_rombuf + 29, "DE500", 5) == 0 || bcmp(sc->tulip_rombuf + 29, "DE450", 5) == 0) { bcopy(sc->tulip_rombuf + 29, &sc->tulip_boardid[D0], 8); sc->tulip_boardid[D0+8] = ' '; } #undef D0 } static void tulip_identify_znyx_nic( tulip_softc_t * const sc) { unsigned id = 0; TULIP_LOCK_ASSERT(sc); strcpy(sc->tulip_boardid, "ZNYX ZX3XX "); if (sc->tulip_chipid == TULIP_21140 || sc->tulip_chipid == TULIP_21140A) { unsigned znyx_ptr; sc->tulip_boardid[8] = '4'; znyx_ptr = sc->tulip_rombuf[124] + 256 * sc->tulip_rombuf[125]; if (znyx_ptr < 26 || znyx_ptr > 116) { sc->tulip_boardsw = &tulip_21140_znyx_zx34x_boardsw; return; } /* ZX344 = 0010 .. 0013FF */ if (sc->tulip_rombuf[znyx_ptr] == 0x4A && sc->tulip_rombuf[znyx_ptr + 1] == 0x52 && sc->tulip_rombuf[znyx_ptr + 2] == 0x01) { id = sc->tulip_rombuf[znyx_ptr + 5] + 256 * sc->tulip_rombuf[znyx_ptr + 4]; if ((id >> 8) == (TULIP_ZNYX_ID_ZX342 >> 8)) { sc->tulip_boardid[9] = '2'; if (id == TULIP_ZNYX_ID_ZX342B) { sc->tulip_boardid[10] = 'B'; sc->tulip_boardid[11] = ' '; } sc->tulip_boardsw = &tulip_21140_znyx_zx34x_boardsw; } else if (id == TULIP_ZNYX_ID_ZX344) { sc->tulip_boardid[10] = '4'; sc->tulip_boardsw = &tulip_21140_znyx_zx34x_boardsw; } else if (id == TULIP_ZNYX_ID_ZX345) { sc->tulip_boardid[9] = (sc->tulip_rombuf[19] > 1) ? '8' : '5'; } else if (id == TULIP_ZNYX_ID_ZX346) { sc->tulip_boardid[9] = '6'; } else if (id == TULIP_ZNYX_ID_ZX351) { sc->tulip_boardid[8] = '5'; sc->tulip_boardid[9] = '1'; } } if (id == 0) { /* * Assume it's a ZX342... */ sc->tulip_boardsw = &tulip_21140_znyx_zx34x_boardsw; } return; } sc->tulip_boardid[8] = '1'; if (sc->tulip_chipid == TULIP_21041) { sc->tulip_boardid[10] = '1'; return; } if (sc->tulip_rombuf[32] == 0x4A && sc->tulip_rombuf[33] == 0x52) { id = sc->tulip_rombuf[37] + 256 * sc->tulip_rombuf[36]; if (id == TULIP_ZNYX_ID_ZX312T) { sc->tulip_boardid[9] = '2'; sc->tulip_boardid[10] = 'T'; sc->tulip_boardid[11] = ' '; sc->tulip_boardsw = &tulip_21040_10baset_only_boardsw; } else if (id == TULIP_ZNYX_ID_ZX314_INTA) { sc->tulip_boardid[9] = '4'; sc->tulip_boardsw = &tulip_21040_10baset_only_boardsw; sc->tulip_features |= TULIP_HAVE_SHAREDINTR|TULIP_HAVE_BASEROM; } else if (id == TULIP_ZNYX_ID_ZX314) { sc->tulip_boardid[9] = '4'; sc->tulip_boardsw = &tulip_21040_10baset_only_boardsw; sc->tulip_features |= TULIP_HAVE_BASEROM; } else if (id == TULIP_ZNYX_ID_ZX315_INTA) { sc->tulip_boardid[9] = '5'; sc->tulip_features |= TULIP_HAVE_SHAREDINTR|TULIP_HAVE_BASEROM; } else if (id == TULIP_ZNYX_ID_ZX315) { sc->tulip_boardid[9] = '5'; sc->tulip_features |= TULIP_HAVE_BASEROM; } else { id = 0; } } if (id == 0) { if ((sc->tulip_enaddr[3] & ~3) == 0xF0 && (sc->tulip_enaddr[5] & 2) == 0) { sc->tulip_boardid[9] = '4'; sc->tulip_boardsw = &tulip_21040_10baset_only_boardsw; sc->tulip_features |= TULIP_HAVE_SHAREDINTR|TULIP_HAVE_BASEROM; } else if ((sc->tulip_enaddr[3] & ~3) == 0xF4 && (sc->tulip_enaddr[5] & 1) == 0) { sc->tulip_boardid[9] = '5'; sc->tulip_boardsw = &tulip_21040_boardsw; sc->tulip_features |= TULIP_HAVE_SHAREDINTR|TULIP_HAVE_BASEROM; } else if ((sc->tulip_enaddr[3] & ~3) == 0xEC) { sc->tulip_boardid[9] = '2'; sc->tulip_boardsw = &tulip_21040_boardsw; } } } static void tulip_identify_smc_nic( tulip_softc_t * const sc) { u_int32_t id1, id2, ei; int auibnc = 0, utp = 0; char *cp; TULIP_LOCK_ASSERT(sc); strcpy(sc->tulip_boardid, "SMC "); if (sc->tulip_chipid == TULIP_21041) return; if (sc->tulip_chipid != TULIP_21040) { if (sc->tulip_boardsw != &tulip_2114x_isv_boardsw) { strcpy(&sc->tulip_boardid[4], "9332DST "); sc->tulip_boardsw = &tulip_21140_smc9332_boardsw; } else if (sc->tulip_features & (TULIP_HAVE_BASEROM|TULIP_HAVE_SLAVEDROM)) { strcpy(&sc->tulip_boardid[4], "9334BDT "); } else { strcpy(&sc->tulip_boardid[4], "9332BDT "); } return; } id1 = sc->tulip_rombuf[0x60] | (sc->tulip_rombuf[0x61] << 8); id2 = sc->tulip_rombuf[0x62] | (sc->tulip_rombuf[0x63] << 8); ei = sc->tulip_rombuf[0x66] | (sc->tulip_rombuf[0x67] << 8); strcpy(&sc->tulip_boardid[4], "8432"); cp = &sc->tulip_boardid[8]; if ((id1 & 1) == 0) *cp++ = 'B', auibnc = 1; if ((id1 & 0xFF) > 0x32) *cp++ = 'T', utp = 1; if ((id1 & 0x4000) == 0) *cp++ = 'A', auibnc = 1; if (id2 == 0x15) { sc->tulip_boardid[7] = '4'; *cp++ = '-'; *cp++ = 'C'; *cp++ = 'H'; *cp++ = (ei ? '2' : '1'); } *cp++ = ' '; *cp = '\0'; if (utp && !auibnc) sc->tulip_boardsw = &tulip_21040_10baset_only_boardsw; else if (!utp && auibnc) sc->tulip_boardsw = &tulip_21040_auibnc_only_boardsw; } static void tulip_identify_cogent_nic( tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); strcpy(sc->tulip_boardid, "Cogent "); if (sc->tulip_chipid == TULIP_21140 || sc->tulip_chipid == TULIP_21140A) { if (sc->tulip_rombuf[32] == TULIP_COGENT_EM100TX_ID) { strcat(sc->tulip_boardid, "EM100TX "); sc->tulip_boardsw = &tulip_21140_cogent_em100_boardsw; #if defined(TULIP_COGENT_EM110TX_ID) } else if (sc->tulip_rombuf[32] == TULIP_COGENT_EM110TX_ID) { strcat(sc->tulip_boardid, "EM110TX "); sc->tulip_boardsw = &tulip_21140_cogent_em100_boardsw; #endif } else if (sc->tulip_rombuf[32] == TULIP_COGENT_EM100FX_ID) { strcat(sc->tulip_boardid, "EM100FX "); sc->tulip_boardsw = &tulip_21140_cogent_em100_boardsw; } /* * Magic number (0x24001109U) is the SubVendor (0x2400) and * SubDevId (0x1109) for the ANA6944TX (EM440TX). */ if (*(u_int32_t *) sc->tulip_rombuf == 0x24001109U && (sc->tulip_features & TULIP_HAVE_BASEROM)) { /* * Cogent (Adaptec) is still mapping all INTs to INTA of * first 21140. Dumb! Dumb! */ strcat(sc->tulip_boardid, "EM440TX "); sc->tulip_features |= TULIP_HAVE_SHAREDINTR; } } else if (sc->tulip_chipid == TULIP_21040) { sc->tulip_features |= TULIP_HAVE_SHAREDINTR|TULIP_HAVE_BASEROM; } } static void tulip_identify_accton_nic( tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); strcpy(sc->tulip_boardid, "ACCTON "); switch (sc->tulip_chipid) { case TULIP_21140A: strcat(sc->tulip_boardid, "EN1207 "); if (sc->tulip_boardsw != &tulip_2114x_isv_boardsw) sc->tulip_boardsw = &tulip_21140_accton_boardsw; break; case TULIP_21140: strcat(sc->tulip_boardid, "EN1207TX "); if (sc->tulip_boardsw != &tulip_2114x_isv_boardsw) sc->tulip_boardsw = &tulip_21140_eb_boardsw; break; case TULIP_21040: strcat(sc->tulip_boardid, "EN1203 "); sc->tulip_boardsw = &tulip_21040_boardsw; break; case TULIP_21041: strcat(sc->tulip_boardid, "EN1203 "); sc->tulip_boardsw = &tulip_21041_boardsw; break; default: sc->tulip_boardsw = &tulip_2114x_isv_boardsw; break; } } static void tulip_identify_asante_nic( tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); strcpy(sc->tulip_boardid, "Asante "); if ((sc->tulip_chipid == TULIP_21140 || sc->tulip_chipid == TULIP_21140A) && sc->tulip_boardsw != &tulip_2114x_isv_boardsw) { tulip_media_info_t *mi = sc->tulip_mediainfo; int idx; /* * The Asante Fast Ethernet doesn't always ship with a valid * new format SROM. So if isn't in the new format, we cheat * set it up as if we had. */ sc->tulip_gpinit = TULIP_GP_ASANTE_PINS; sc->tulip_gpdata = 0; TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_ASANTE_PINS|TULIP_GP_PINSET); TULIP_CSR_WRITE(sc, csr_gp, TULIP_GP_ASANTE_PHYRESET); DELAY(100); TULIP_CSR_WRITE(sc, csr_gp, 0); mi->mi_type = TULIP_MEDIAINFO_MII; mi->mi_gpr_length = 0; mi->mi_gpr_offset = 0; mi->mi_reset_length = 0; mi->mi_reset_offset = 0;; mi->mi_phyaddr = TULIP_MII_NOPHY; for (idx = 20; idx > 0 && mi->mi_phyaddr == TULIP_MII_NOPHY; idx--) { DELAY(10000); mi->mi_phyaddr = tulip_mii_get_phyaddr(sc, 0); } if (mi->mi_phyaddr == TULIP_MII_NOPHY) { if_printf(sc->tulip_ifp, "can't find phy 0\n"); return; } sc->tulip_features |= TULIP_HAVE_MII; mi->mi_capabilities = PHYSTS_10BASET|PHYSTS_10BASET_FD|PHYSTS_100BASETX|PHYSTS_100BASETX_FD; mi->mi_advertisement = PHYSTS_10BASET|PHYSTS_10BASET_FD|PHYSTS_100BASETX|PHYSTS_100BASETX_FD; mi->mi_full_duplex = PHYSTS_10BASET_FD|PHYSTS_100BASETX_FD; mi->mi_tx_threshold = PHYSTS_10BASET|PHYSTS_10BASET_FD; TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 100BASETX_FD); TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 100BASETX); TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 100BASET4); TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 10BASET_FD); TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 10BASET); mi->mi_phyid = (tulip_mii_readreg(sc, mi->mi_phyaddr, PHYREG_IDLOW) << 16) | tulip_mii_readreg(sc, mi->mi_phyaddr, PHYREG_IDHIGH); sc->tulip_boardsw = &tulip_2114x_isv_boardsw; } } static void tulip_identify_compex_nic( tulip_softc_t * const sc) { TULIP_LOCK_ASSERT(sc); strcpy(sc->tulip_boardid, "COMPEX "); if (sc->tulip_chipid == TULIP_21140A) { int root_unit; tulip_softc_t *root_sc = NULL; strcat(sc->tulip_boardid, "400TX/PCI "); /* * All 4 chips on these boards share an interrupt. This code * copied from tulip_read_macaddr. */ sc->tulip_features |= TULIP_HAVE_SHAREDINTR; for (root_unit = sc->tulip_unit - 1; root_unit >= 0; root_unit--) { root_sc = tulips[root_unit]; if (root_sc == NULL || !(root_sc->tulip_features & TULIP_HAVE_SLAVEDINTR)) break; root_sc = NULL; } if (root_sc != NULL && root_sc->tulip_chipid == sc->tulip_chipid && root_sc->tulip_pci_busno == sc->tulip_pci_busno) { sc->tulip_features |= TULIP_HAVE_SLAVEDINTR; sc->tulip_slaves = root_sc->tulip_slaves; root_sc->tulip_slaves = sc; } else if(sc->tulip_features & TULIP_HAVE_SLAVEDINTR) { printf("\nCannot find master device for %s interrupts", sc->tulip_ifp->if_xname); } } else { strcat(sc->tulip_boardid, "unknown "); } /* sc->tulip_boardsw = &tulip_21140_eb_boardsw; */ return; } static int tulip_srom_decode( tulip_softc_t * const sc) { unsigned idx1, idx2, idx3; const tulip_srom_header_t *shp = (const tulip_srom_header_t *) &sc->tulip_rombuf[0]; const tulip_srom_adapter_info_t *saip = (const tulip_srom_adapter_info_t *) (shp + 1); tulip_srom_media_t srom_media; tulip_media_info_t *mi = sc->tulip_mediainfo; const u_int8_t *dp; u_int32_t leaf_offset, blocks, data; TULIP_LOCK_ASSERT(sc); for (idx1 = 0; idx1 < shp->sh_adapter_count; idx1++, saip++) { if (shp->sh_adapter_count == 1) break; if (saip->sai_device == sc->tulip_pci_devno) break; } /* * Didn't find the right media block for this card. */ if (idx1 == shp->sh_adapter_count) return 0; /* * Save the hardware address. */ bcopy(shp->sh_ieee802_address, sc->tulip_enaddr, 6); /* * If this is a multiple port card, add the adapter index to the last * byte of the hardware address. (if it isn't multiport, adding 0 * won't hurt. */ sc->tulip_enaddr[5] += idx1; leaf_offset = saip->sai_leaf_offset_lowbyte + saip->sai_leaf_offset_highbyte * 256; dp = sc->tulip_rombuf + leaf_offset; sc->tulip_conntype = (tulip_srom_connection_t) (dp[0] + dp[1] * 256); dp += 2; for (idx2 = 0;; idx2++) { if (tulip_srom_conninfo[idx2].sc_type == sc->tulip_conntype || tulip_srom_conninfo[idx2].sc_type == TULIP_SROM_CONNTYPE_NOT_USED) break; } sc->tulip_connidx = idx2; if (sc->tulip_chipid == TULIP_21041) { blocks = *dp++; for (idx2 = 0; idx2 < blocks; idx2++) { tulip_media_t media; data = *dp++; srom_media = (tulip_srom_media_t) (data & 0x3F); for (idx3 = 0; tulip_srom_mediums[idx3].sm_type != TULIP_MEDIA_UNKNOWN; idx3++) { if (tulip_srom_mediums[idx3].sm_srom_type == srom_media) break; } media = tulip_srom_mediums[idx3].sm_type; if (media != TULIP_MEDIA_UNKNOWN) { if (data & TULIP_SROM_21041_EXTENDED) { mi->mi_type = TULIP_MEDIAINFO_SIA; sc->tulip_mediums[media] = mi; mi->mi_sia_connectivity = dp[0] + dp[1] * 256; mi->mi_sia_tx_rx = dp[2] + dp[3] * 256; mi->mi_sia_general = dp[4] + dp[5] * 256; mi++; } else { switch (media) { case TULIP_MEDIA_BNC: { TULIP_MEDIAINFO_SIA_INIT(sc, mi, 21041, BNC); mi++; break; } case TULIP_MEDIA_AUI: { TULIP_MEDIAINFO_SIA_INIT(sc, mi, 21041, AUI); mi++; break; } case TULIP_MEDIA_10BASET: { TULIP_MEDIAINFO_SIA_INIT(sc, mi, 21041, 10BASET); mi++; break; } case TULIP_MEDIA_10BASET_FD: { TULIP_MEDIAINFO_SIA_INIT(sc, mi, 21041, 10BASET_FD); mi++; break; } default: { break; } } } } if (data & TULIP_SROM_21041_EXTENDED) dp += 6; } #ifdef notdef if (blocks == 0) { TULIP_MEDIAINFO_SIA_INIT(sc, mi, 21041, BNC); mi++; TULIP_MEDIAINFO_SIA_INIT(sc, mi, 21041, AUI); mi++; TULIP_MEDIAINFO_SIA_INIT(sc, mi, 21041, 10BASET); mi++; TULIP_MEDIAINFO_SIA_INIT(sc, mi, 21041, 10BASET_FD); mi++; } #endif } else { unsigned length, type; tulip_media_t gp_media = TULIP_MEDIA_UNKNOWN; if (sc->tulip_features & TULIP_HAVE_GPR) sc->tulip_gpinit = *dp++; blocks = *dp++; for (idx2 = 0; idx2 < blocks; idx2++) { const u_int8_t *ep; if ((*dp & 0x80) == 0) { length = 4; type = 0; } else { length = (*dp++ & 0x7f) - 1; type = *dp++ & 0x3f; } ep = dp + length; switch (type & 0x3f) { case 0: { /* 21140[A] GPR block */ tulip_media_t media; srom_media = (tulip_srom_media_t)(dp[0] & 0x3f); for (idx3 = 0; tulip_srom_mediums[idx3].sm_type != TULIP_MEDIA_UNKNOWN; idx3++) { if (tulip_srom_mediums[idx3].sm_srom_type == srom_media) break; } media = tulip_srom_mediums[idx3].sm_type; if (media == TULIP_MEDIA_UNKNOWN) break; mi->mi_type = TULIP_MEDIAINFO_GPR; sc->tulip_mediums[media] = mi; mi->mi_gpdata = dp[1]; if (media > gp_media && !TULIP_IS_MEDIA_FD(media)) { sc->tulip_gpdata = mi->mi_gpdata; gp_media = media; } data = dp[2] + dp[3] * 256; mi->mi_cmdmode = TULIP_SROM_2114X_CMDBITS(data); if (data & TULIP_SROM_2114X_NOINDICATOR) { mi->mi_actmask = 0; } else { #if 0 mi->mi_default = (data & TULIP_SROM_2114X_DEFAULT) != 0; #endif mi->mi_actmask = TULIP_SROM_2114X_BITPOS(data); mi->mi_actdata = (data & TULIP_SROM_2114X_POLARITY) ? 0 : mi->mi_actmask; } mi++; break; } case 1: { /* 21140[A] MII block */ const unsigned phyno = *dp++; mi->mi_type = TULIP_MEDIAINFO_MII; mi->mi_gpr_length = *dp++; mi->mi_gpr_offset = dp - sc->tulip_rombuf; dp += mi->mi_gpr_length; mi->mi_reset_length = *dp++; mi->mi_reset_offset = dp - sc->tulip_rombuf; dp += mi->mi_reset_length; /* * Before we probe for a PHY, use the GPR information * to select it. If we don't, it may be inaccessible. */ TULIP_CSR_WRITE(sc, csr_gp, sc->tulip_gpinit|TULIP_GP_PINSET); for (idx3 = 0; idx3 < mi->mi_reset_length; idx3++) { DELAY(10); TULIP_CSR_WRITE(sc, csr_gp, sc->tulip_rombuf[mi->mi_reset_offset + idx3]); } sc->tulip_phyaddr = mi->mi_phyaddr; for (idx3 = 0; idx3 < mi->mi_gpr_length; idx3++) { DELAY(10); TULIP_CSR_WRITE(sc, csr_gp, sc->tulip_rombuf[mi->mi_gpr_offset + idx3]); } /* * At least write something! */ if (mi->mi_reset_length == 0 && mi->mi_gpr_length == 0) TULIP_CSR_WRITE(sc, csr_gp, 0); mi->mi_phyaddr = TULIP_MII_NOPHY; for (idx3 = 20; idx3 > 0 && mi->mi_phyaddr == TULIP_MII_NOPHY; idx3--) { DELAY(10000); mi->mi_phyaddr = tulip_mii_get_phyaddr(sc, phyno); } if (mi->mi_phyaddr == TULIP_MII_NOPHY) { #if defined(TULIP_DEBUG) if_printf(sc->tulip_ifp, "can't find phy %d\n", phyno); #endif break; } sc->tulip_features |= TULIP_HAVE_MII; mi->mi_capabilities = dp[0] + dp[1] * 256; dp += 2; mi->mi_advertisement = dp[0] + dp[1] * 256; dp += 2; mi->mi_full_duplex = dp[0] + dp[1] * 256; dp += 2; mi->mi_tx_threshold = dp[0] + dp[1] * 256; dp += 2; TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 100BASETX_FD); TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 100BASETX); TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 100BASET4); TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 10BASET_FD); TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 10BASET); mi->mi_phyid = (tulip_mii_readreg(sc, mi->mi_phyaddr, PHYREG_IDLOW) << 16) | tulip_mii_readreg(sc, mi->mi_phyaddr, PHYREG_IDHIGH); mi++; break; } case 2: { /* 2114[23] SIA block */ tulip_media_t media; srom_media = (tulip_srom_media_t)(dp[0] & 0x3f); for (idx3 = 0; tulip_srom_mediums[idx3].sm_type != TULIP_MEDIA_UNKNOWN; idx3++) { if (tulip_srom_mediums[idx3].sm_srom_type == srom_media) break; } media = tulip_srom_mediums[idx3].sm_type; if (media == TULIP_MEDIA_UNKNOWN) break; mi->mi_type = TULIP_MEDIAINFO_SIA; sc->tulip_mediums[media] = mi; if (dp[0] & 0x40) { mi->mi_sia_connectivity = dp[1] + dp[2] * 256; mi->mi_sia_tx_rx = dp[3] + dp[4] * 256; mi->mi_sia_general = dp[5] + dp[6] * 256; dp += 6; } else { switch (media) { case TULIP_MEDIA_BNC: { TULIP_MEDIAINFO_SIA_INIT(sc, mi, 21142, BNC); break; } case TULIP_MEDIA_AUI: { TULIP_MEDIAINFO_SIA_INIT(sc, mi, 21142, AUI); break; } case TULIP_MEDIA_10BASET: { TULIP_MEDIAINFO_SIA_INIT(sc, mi, 21142, 10BASET); sc->tulip_intrmask |= TULIP_STS_LINKPASS|TULIP_STS_LINKFAIL; break; } case TULIP_MEDIA_10BASET_FD: { TULIP_MEDIAINFO_SIA_INIT(sc, mi, 21142, 10BASET_FD); sc->tulip_intrmask |= TULIP_STS_LINKPASS|TULIP_STS_LINKFAIL; break; } default: { goto bad_media; } } } mi->mi_sia_gp_control = (dp[1] + dp[2] * 256) << 16; mi->mi_sia_gp_data = (dp[3] + dp[4] * 256) << 16; mi++; bad_media: break; } case 3: { /* 2114[23] MII PHY block */ const unsigned phyno = *dp++; const u_int8_t *dp0; mi->mi_type = TULIP_MEDIAINFO_MII; mi->mi_gpr_length = *dp++; mi->mi_gpr_offset = dp - sc->tulip_rombuf; dp += 2 * mi->mi_gpr_length; mi->mi_reset_length = *dp++; mi->mi_reset_offset = dp - sc->tulip_rombuf; dp += 2 * mi->mi_reset_length; dp0 = &sc->tulip_rombuf[mi->mi_reset_offset]; for (idx3 = 0; idx3 < mi->mi_reset_length; idx3++, dp0 += 2) { DELAY(10); TULIP_CSR_WRITE(sc, csr_sia_general, (dp0[0] + 256 * dp0[1]) << 16); } sc->tulip_phyaddr = mi->mi_phyaddr; dp0 = &sc->tulip_rombuf[mi->mi_gpr_offset]; for (idx3 = 0; idx3 < mi->mi_gpr_length; idx3++, dp0 += 2) { DELAY(10); TULIP_CSR_WRITE(sc, csr_sia_general, (dp0[0] + 256 * dp0[1]) << 16); } if (mi->mi_reset_length == 0 && mi->mi_gpr_length == 0) TULIP_CSR_WRITE(sc, csr_sia_general, 0); mi->mi_phyaddr = TULIP_MII_NOPHY; for (idx3 = 20; idx3 > 0 && mi->mi_phyaddr == TULIP_MII_NOPHY; idx3--) { DELAY(10000); mi->mi_phyaddr = tulip_mii_get_phyaddr(sc, phyno); } if (mi->mi_phyaddr == TULIP_MII_NOPHY) { #if defined(TULIP_DEBUG) if_printf(sc->tulip_ifp, "can't find phy %d\n", phyno); #endif break; } sc->tulip_features |= TULIP_HAVE_MII; mi->mi_capabilities = dp[0] + dp[1] * 256; dp += 2; mi->mi_advertisement = dp[0] + dp[1] * 256; dp += 2; mi->mi_full_duplex = dp[0] + dp[1] * 256; dp += 2; mi->mi_tx_threshold = dp[0] + dp[1] * 256; dp += 2; mi->mi_mii_interrupt = dp[0] + dp[1] * 256; dp += 2; TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 100BASETX_FD); TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 100BASETX); TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 100BASET4); TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 10BASET_FD); TULIP_MEDIAINFO_ADD_CAPABILITY(sc, mi, 10BASET); mi->mi_phyid = (tulip_mii_readreg(sc, mi->mi_phyaddr, PHYREG_IDLOW) << 16) | tulip_mii_readreg(sc, mi->mi_phyaddr, PHYREG_IDHIGH); mi++; break; } case 4: { /* 21143 SYM block */ tulip_media_t media; srom_media = (tulip_srom_media_t) dp[0]; for (idx3 = 0; tulip_srom_mediums[idx3].sm_type != TULIP_MEDIA_UNKNOWN; idx3++) { if (tulip_srom_mediums[idx3].sm_srom_type == srom_media) break; } media = tulip_srom_mediums[idx3].sm_type; if (media == TULIP_MEDIA_UNKNOWN) break; mi->mi_type = TULIP_MEDIAINFO_SYM; sc->tulip_mediums[media] = mi; mi->mi_gpcontrol = (dp[1] + dp[2] * 256) << 16; mi->mi_gpdata = (dp[3] + dp[4] * 256) << 16; data = dp[5] + dp[6] * 256; mi->mi_cmdmode = TULIP_SROM_2114X_CMDBITS(data); if (data & TULIP_SROM_2114X_NOINDICATOR) { mi->mi_actmask = 0; } else { mi->mi_default = (data & TULIP_SROM_2114X_DEFAULT) != 0; mi->mi_actmask = TULIP_SROM_2114X_BITPOS(data); mi->mi_actdata = (data & TULIP_SROM_2114X_POLARITY) ? 0 : mi->mi_actmask; } if (TULIP_IS_MEDIA_TP(media)) sc->tulip_intrmask |= TULIP_STS_LINKPASS|TULIP_STS_LINKFAIL; mi++; break; } #if 0 case 5: { /* 21143 Reset block */ mi->mi_type = TULIP_MEDIAINFO_RESET; mi->mi_reset_length = *dp++; mi->mi_reset_offset = dp - sc->tulip_rombuf; dp += 2 * mi->mi_reset_length; mi++; break; } #endif default: { } } dp = ep; } } return mi - sc->tulip_mediainfo; } static const struct { void (*vendor_identify_nic)(tulip_softc_t * const sc); unsigned char vendor_oui[3]; } tulip_vendors[] = { { tulip_identify_dec_nic, { 0x08, 0x00, 0x2B } }, { tulip_identify_dec_nic, { 0x00, 0x00, 0xF8 } }, { tulip_identify_smc_nic, { 0x00, 0x00, 0xC0 } }, { tulip_identify_smc_nic, { 0x00, 0xE0, 0x29 } }, { tulip_identify_znyx_nic, { 0x00, 0xC0, 0x95 } }, { tulip_identify_cogent_nic, { 0x00, 0x00, 0x92 } }, { tulip_identify_asante_nic, { 0x00, 0x00, 0x94 } }, { tulip_identify_cogent_nic, { 0x00, 0x00, 0xD1 } }, { tulip_identify_accton_nic, { 0x00, 0x00, 0xE8 } }, { tulip_identify_compex_nic, { 0x00, 0x80, 0x48 } }, { NULL } }; /* * This deals with the vagaries of the address roms and the * brain-deadness that various vendors commit in using them. */ static int tulip_read_macaddr( tulip_softc_t * const sc) { unsigned cksum, rom_cksum, idx; u_int32_t csr; unsigned char tmpbuf[8]; static const u_char testpat[] = { 0xFF, 0, 0x55, 0xAA, 0xFF, 0, 0x55, 0xAA }; sc->tulip_connidx = TULIP_SROM_LASTCONNIDX; if (sc->tulip_chipid == TULIP_21040) { TULIP_CSR_WRITE(sc, csr_enetrom, 1); for (idx = 0; idx < sizeof(sc->tulip_rombuf); idx++) { int cnt = 0; while (((csr = TULIP_CSR_READ(sc, csr_enetrom)) & 0x80000000L) && cnt < 10000) cnt++; sc->tulip_rombuf[idx] = csr & 0xFF; } sc->tulip_boardsw = &tulip_21040_boardsw; } else { if (sc->tulip_chipid == TULIP_21041) { /* * Thankfully all 21041's act the same. */ sc->tulip_boardsw = &tulip_21041_boardsw; } else { /* * Assume all 21140 board are compatible with the * DEC 10/100 evaluation board. Not really valid but * it's the best we can do until every one switches to * the new SROM format. */ sc->tulip_boardsw = &tulip_21140_eb_boardsw; } tulip_srom_read(sc); if (tulip_srom_crcok(sc->tulip_rombuf)) { /* * SROM CRC is valid therefore it must be in the * new format. */ sc->tulip_features |= TULIP_HAVE_ISVSROM|TULIP_HAVE_OKSROM; } else if (sc->tulip_rombuf[126] == 0xff && sc->tulip_rombuf[127] == 0xFF) { /* * No checksum is present. See if the SROM id checks out; * the first 18 bytes should be 0 followed by a 1 followed * by the number of adapters (which we don't deal with yet). */ for (idx = 0; idx < 18; idx++) { if (sc->tulip_rombuf[idx] != 0) break; } if (idx == 18 && sc->tulip_rombuf[18] == 1 && sc->tulip_rombuf[19] != 0) sc->tulip_features |= TULIP_HAVE_ISVSROM; } else if (sc->tulip_chipid >= TULIP_21142) { sc->tulip_features |= TULIP_HAVE_ISVSROM; sc->tulip_boardsw = &tulip_2114x_isv_boardsw; } if ((sc->tulip_features & TULIP_HAVE_ISVSROM) && tulip_srom_decode(sc)) { if (sc->tulip_chipid != TULIP_21041) sc->tulip_boardsw = &tulip_2114x_isv_boardsw; /* * If the SROM specifies more than one adapter, tag this as a * BASE rom. */ if (sc->tulip_rombuf[19] > 1) sc->tulip_features |= TULIP_HAVE_BASEROM; if (sc->tulip_boardsw == NULL) return -6; goto check_oui; } } if (bcmp(&sc->tulip_rombuf[0], &sc->tulip_rombuf[16], 8) != 0) { /* * Some folks don't use the standard ethernet rom format * but instead just put the address in the first 6 bytes * of the rom and let the rest be all 0xffs. (Can we say * ZNYX?) (well sometimes they put in a checksum so we'll * start at 8). */ for (idx = 8; idx < 32; idx++) { if (sc->tulip_rombuf[idx] != 0xFF) return -4; } /* * Make sure the address is not multicast or locally assigned * that the OUI is not 00-00-00. */ if ((sc->tulip_rombuf[0] & 3) != 0) return -4; if (sc->tulip_rombuf[0] == 0 && sc->tulip_rombuf[1] == 0 && sc->tulip_rombuf[2] == 0) return -4; bcopy(sc->tulip_rombuf, sc->tulip_enaddr, 6); sc->tulip_features |= TULIP_HAVE_OKROM; goto check_oui; } else { /* * A number of makers of multiport boards (ZNYX and Cogent) * only put on one address ROM on their 21040 boards. So * if the ROM is all zeros (or all 0xFFs), look at the * previous configured boards (as long as they are on the same * PCI bus and the bus number is non-zero) until we find the * master board with address ROM. We then use its address ROM * as the base for this board. (we add our relative board * to the last byte of its address). */ for (idx = 0; idx < sizeof(sc->tulip_rombuf); idx++) { if (sc->tulip_rombuf[idx] != 0 && sc->tulip_rombuf[idx] != 0xFF) break; } if (idx == sizeof(sc->tulip_rombuf)) { int root_unit; tulip_softc_t *root_sc = NULL; for (root_unit = sc->tulip_unit - 1; root_unit >= 0; root_unit--) { root_sc = tulips[root_unit]; if (root_sc == NULL || (root_sc->tulip_features & (TULIP_HAVE_OKROM|TULIP_HAVE_SLAVEDROM)) == TULIP_HAVE_OKROM) break; root_sc = NULL; } if (root_sc != NULL && (root_sc->tulip_features & TULIP_HAVE_BASEROM) && root_sc->tulip_chipid == sc->tulip_chipid && root_sc->tulip_pci_busno == sc->tulip_pci_busno) { sc->tulip_features |= TULIP_HAVE_SLAVEDROM; sc->tulip_boardsw = root_sc->tulip_boardsw; strcpy(sc->tulip_boardid, root_sc->tulip_boardid); if (sc->tulip_boardsw->bd_type == TULIP_21140_ISV) { bcopy(root_sc->tulip_rombuf, sc->tulip_rombuf, sizeof(sc->tulip_rombuf)); if (!tulip_srom_decode(sc)) return -5; } else { bcopy(root_sc->tulip_enaddr, sc->tulip_enaddr, 6); sc->tulip_enaddr[5] += sc->tulip_unit - root_sc->tulip_unit; } /* * Now for a truly disgusting kludge: all 4 21040s on * the ZX314 share the same INTA line so the mapping * setup by the BIOS on the PCI bridge is worthless. * Rather than reprogramming the value in the config * register, we will handle this internally. */ if (root_sc->tulip_features & TULIP_HAVE_SHAREDINTR) { sc->tulip_slaves = root_sc->tulip_slaves; root_sc->tulip_slaves = sc; sc->tulip_features |= TULIP_HAVE_SLAVEDINTR; } return 0; } } } /* * This is the standard DEC address ROM test. */ if (bcmp(&sc->tulip_rombuf[24], testpat, 8) != 0) return -3; tmpbuf[0] = sc->tulip_rombuf[15]; tmpbuf[1] = sc->tulip_rombuf[14]; tmpbuf[2] = sc->tulip_rombuf[13]; tmpbuf[3] = sc->tulip_rombuf[12]; tmpbuf[4] = sc->tulip_rombuf[11]; tmpbuf[5] = sc->tulip_rombuf[10]; tmpbuf[6] = sc->tulip_rombuf[9]; tmpbuf[7] = sc->tulip_rombuf[8]; if (bcmp(&sc->tulip_rombuf[0], tmpbuf, 8) != 0) return -2; bcopy(sc->tulip_rombuf, sc->tulip_enaddr, 6); cksum = *(u_int16_t *) &sc->tulip_enaddr[0]; cksum *= 2; if (cksum > 65535) cksum -= 65535; cksum += *(u_int16_t *) &sc->tulip_enaddr[2]; if (cksum > 65535) cksum -= 65535; cksum *= 2; if (cksum > 65535) cksum -= 65535; cksum += *(u_int16_t *) &sc->tulip_enaddr[4]; if (cksum >= 65535) cksum -= 65535; rom_cksum = *(u_int16_t *) &sc->tulip_rombuf[6]; if (cksum != rom_cksum) return -1; check_oui: /* * Check for various boards based on OUI. Did I say braindead? */ for (idx = 0; tulip_vendors[idx].vendor_identify_nic != NULL; idx++) { if (bcmp(sc->tulip_enaddr, tulip_vendors[idx].vendor_oui, 3) == 0) { (*tulip_vendors[idx].vendor_identify_nic)(sc); break; } } sc->tulip_features |= TULIP_HAVE_OKROM; return 0; } static void tulip_ifmedia_add( tulip_softc_t * const sc) { tulip_media_t media; int medias = 0; TULIP_LOCK_ASSERT(sc); for (media = TULIP_MEDIA_UNKNOWN; media < TULIP_MEDIA_MAX; media++) { if (sc->tulip_mediums[media] != NULL) { ifmedia_add(&sc->tulip_ifmedia, tulip_media_to_ifmedia[media], 0, 0); medias++; } } if (medias == 0) { sc->tulip_features |= TULIP_HAVE_NOMEDIA; ifmedia_add(&sc->tulip_ifmedia, IFM_ETHER | IFM_NONE, 0, 0); ifmedia_set(&sc->tulip_ifmedia, IFM_ETHER | IFM_NONE); } else if (sc->tulip_media == TULIP_MEDIA_UNKNOWN) { ifmedia_add(&sc->tulip_ifmedia, IFM_ETHER | IFM_AUTO, 0, 0); ifmedia_set(&sc->tulip_ifmedia, IFM_ETHER | IFM_AUTO); } else { ifmedia_set(&sc->tulip_ifmedia, tulip_media_to_ifmedia[sc->tulip_media]); sc->tulip_flags |= TULIP_PRINTMEDIA; tulip_linkup(sc, sc->tulip_media); } } static int tulip_ifmedia_change( struct ifnet * const ifp) { tulip_softc_t * const sc = (tulip_softc_t *)ifp->if_softc; TULIP_LOCK(sc); sc->tulip_flags |= TULIP_NEEDRESET; sc->tulip_probe_state = TULIP_PROBE_INACTIVE; sc->tulip_media = TULIP_MEDIA_UNKNOWN; if (IFM_SUBTYPE(sc->tulip_ifmedia.ifm_media) != IFM_AUTO) { tulip_media_t media; for (media = TULIP_MEDIA_UNKNOWN; media < TULIP_MEDIA_MAX; media++) { if (sc->tulip_mediums[media] != NULL && sc->tulip_ifmedia.ifm_media == tulip_media_to_ifmedia[media]) { sc->tulip_flags |= TULIP_PRINTMEDIA; sc->tulip_flags &= ~TULIP_DIDNWAY; tulip_linkup(sc, media); TULIP_UNLOCK(sc); return 0; } } } sc->tulip_flags &= ~(TULIP_TXPROBE_ACTIVE|TULIP_WANTRXACT); tulip_reset(sc); tulip_init(sc); TULIP_UNLOCK(sc); return 0; } /* * Media status callback */ static void tulip_ifmedia_status( struct ifnet * const ifp, struct ifmediareq *req) { tulip_softc_t *sc = (tulip_softc_t *)ifp->if_softc; TULIP_LOCK(sc); if (sc->tulip_media == TULIP_MEDIA_UNKNOWN) { TULIP_UNLOCK(sc); return; } req->ifm_status = IFM_AVALID; if (sc->tulip_flags & TULIP_LINKUP) req->ifm_status |= IFM_ACTIVE; req->ifm_active = tulip_media_to_ifmedia[sc->tulip_media]; TULIP_UNLOCK(sc); } static void tulip_addr_filter( tulip_softc_t * const sc) { struct ifmultiaddr *ifma; u_char *addrp; int multicnt; TULIP_LOCK_ASSERT(sc); sc->tulip_flags &= ~(TULIP_WANTHASHPERFECT|TULIP_WANTHASHONLY|TULIP_ALLMULTI); sc->tulip_flags |= TULIP_WANTSETUP|TULIP_WANTTXSTART; sc->tulip_cmdmode &= ~TULIP_CMD_RXRUN; sc->tulip_intrmask &= ~TULIP_STS_RXSTOPPED; #if defined(IFF_ALLMULTI) if (sc->tulip_ifp->if_flags & IFF_ALLMULTI) sc->tulip_flags |= TULIP_ALLMULTI ; #endif multicnt = 0; + IF_ADDR_LOCK(sc->tulip_ifp); TAILQ_FOREACH(ifma, &sc->tulip_ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family == AF_LINK) multicnt++; } if (multicnt > 14) { u_int32_t *sp = sc->tulip_setupdata; unsigned hash; /* * Some early passes of the 21140 have broken implementations of * hash-perfect mode. When we get too many multicasts for perfect * filtering with these chips, we need to switch into hash-only * mode (this is better than all-multicast on network with lots * of multicast traffic). */ if (sc->tulip_features & TULIP_HAVE_BROKEN_HASH) sc->tulip_flags |= TULIP_WANTHASHONLY; else sc->tulip_flags |= TULIP_WANTHASHPERFECT; /* * If we have more than 14 multicasts, we have * go into hash perfect mode (512 bit multicast * hash and one perfect hardware). */ bzero(sc->tulip_setupdata, sizeof(sc->tulip_setupdata)); TAILQ_FOREACH(ifma, &sc->tulip_ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; hash = tulip_mchash(LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); #if BYTE_ORDER == BIG_ENDIAN sp[hash >> 4] |= bswap32(1 << (hash & 0xF)); #else sp[hash >> 4] |= 1 << (hash & 0xF); #endif } /* * No reason to use a hash if we are going to be * receiving every multicast. */ if ((sc->tulip_flags & TULIP_ALLMULTI) == 0) { hash = tulip_mchash(sc->tulip_ifp->if_broadcastaddr); #if BYTE_ORDER == BIG_ENDIAN sp[hash >> 4] |= bswap32(1 << (hash & 0xF)); #else sp[hash >> 4] |= 1 << (hash & 0xF); #endif if (sc->tulip_flags & TULIP_WANTHASHONLY) { hash = tulip_mchash(IFP2ENADDR(sc->tulip_ifp)); #if BYTE_ORDER == BIG_ENDIAN sp[hash >> 4] |= bswap32(1 << (hash & 0xF)); #else sp[hash >> 4] |= 1 << (hash & 0xF); #endif } else { #if BYTE_ORDER == BIG_ENDIAN sp[39] = ((u_int16_t *) IFP2ENADDR(sc->tulip_ifp))[0] << 16; sp[40] = ((u_int16_t *) IFP2ENADDR(sc->tulip_ifp))[1] << 16; sp[41] = ((u_int16_t *) IFP2ENADDR(sc->tulip_ifp))[2] << 16; #else sp[39] = ((u_int16_t *) IFP2ENADDR(sc->tulip_ifp))[0]; sp[40] = ((u_int16_t *) IFP2ENADDR(sc->tulip_ifp))[1]; sp[41] = ((u_int16_t *) IFP2ENADDR(sc->tulip_ifp))[2]; #endif } } } if ((sc->tulip_flags & (TULIP_WANTHASHPERFECT|TULIP_WANTHASHONLY)) == 0) { u_int32_t *sp = sc->tulip_setupdata; int idx = 0; if ((sc->tulip_flags & TULIP_ALLMULTI) == 0) { /* * Else can get perfect filtering for 16 addresses. */ TAILQ_FOREACH(ifma, &sc->tulip_ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; addrp = LLADDR((struct sockaddr_dl *)ifma->ifma_addr); #if BYTE_ORDER == BIG_ENDIAN *sp++ = ((u_int16_t *) addrp)[0] << 16; *sp++ = ((u_int16_t *) addrp)[1] << 16; *sp++ = ((u_int16_t *) addrp)[2] << 16; #else *sp++ = ((u_int16_t *) addrp)[0]; *sp++ = ((u_int16_t *) addrp)[1]; *sp++ = ((u_int16_t *) addrp)[2]; #endif idx++; } /* * Add the broadcast address. */ idx++; #if BYTE_ORDER == BIG_ENDIAN *sp++ = 0xFFFF << 16; *sp++ = 0xFFFF << 16; *sp++ = 0xFFFF << 16; #else *sp++ = 0xFFFF; *sp++ = 0xFFFF; *sp++ = 0xFFFF; #endif } /* * Pad the rest with our hardware address */ for (; idx < 16; idx++) { #if BYTE_ORDER == BIG_ENDIAN *sp++ = ((u_int16_t *) IFP2ENADDR(sc->tulip_ifp))[0] << 16; *sp++ = ((u_int16_t *) IFP2ENADDR(sc->tulip_ifp))[1] << 16; *sp++ = ((u_int16_t *) IFP2ENADDR(sc->tulip_ifp))[2] << 16; #else *sp++ = ((u_int16_t *) IFP2ENADDR(sc->tulip_ifp))[0]; *sp++ = ((u_int16_t *) IFP2ENADDR(sc->tulip_ifp))[1]; *sp++ = ((u_int16_t *) IFP2ENADDR(sc->tulip_ifp))[2]; #endif } } + IF_ADDR_UNLOCK(sc->tulip_ifp); #if defined(IFF_ALLMULTI) if (sc->tulip_flags & TULIP_ALLMULTI) sc->tulip_ifp->if_flags |= IFF_ALLMULTI; #endif } static void tulip_reset( tulip_softc_t * const sc) { tulip_ringinfo_t *ri; tulip_desc_t *di; u_int32_t inreset = (sc->tulip_flags & TULIP_INRESET); TULIP_LOCK_ASSERT(sc); /* * Brilliant. Simply brilliant. When switching modes/speeds * on a 2114*, you need to set the appriopriate MII/PCS/SCL/PS * bits in CSR6 and then do a software reset to get the 21140 * to properly reset its internal pathways to the right places. * Grrrr. */ if ((sc->tulip_flags & TULIP_DEVICEPROBE) == 0 && sc->tulip_boardsw->bd_media_preset != NULL) (*sc->tulip_boardsw->bd_media_preset)(sc); TULIP_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET); DELAY(10); /* Wait 10 microseconds (actually 50 PCI cycles but at 33MHz that comes to two microseconds but wait a bit longer anyways) */ if (!inreset) { sc->tulip_flags |= TULIP_INRESET; sc->tulip_flags &= ~(TULIP_NEEDRESET|TULIP_RXBUFSLOW); sc->tulip_ifp->if_flags &= ~IFF_OACTIVE; } #if defined(TULIP_BUS_DMA) && !defined(TULIP_BUS_DMA_NOTX) TULIP_CSR_WRITE(sc, csr_txlist, sc->tulip_txdescmap->dm_segs[0].ds_addr); #else TULIP_CSR_WRITE(sc, csr_txlist, TULIP_KVATOPHYS(sc, &sc->tulip_txinfo.ri_first[0])); #endif #if defined(TULIP_BUS_DMA) && !defined(TULIP_BUS_DMA_NORX) TULIP_CSR_WRITE(sc, csr_rxlist, sc->tulip_rxdescmap->dm_segs[0].ds_addr); #else TULIP_CSR_WRITE(sc, csr_rxlist, TULIP_KVATOPHYS(sc, &sc->tulip_rxinfo.ri_first[0])); #endif TULIP_CSR_WRITE(sc, csr_busmode, (1 << (3 /*pci_max_burst_len*/ + 8)) |TULIP_BUSMODE_CACHE_ALIGN8 |TULIP_BUSMODE_READMULTIPLE |(BYTE_ORDER != LITTLE_ENDIAN ? TULIP_BUSMODE_DESC_BIGENDIAN : 0)); sc->tulip_txtimer = 0; sc->tulip_txq.ifq_maxlen = TULIP_TXDESCS; /* * Free all the mbufs that were on the transmit ring. */ for (;;) { #if defined(TULIP_BUS_DMA) && !defined(TULIP_BUS_DMA_NOTX) bus_dmamap_t map; #endif struct mbuf *m; _IF_DEQUEUE(&sc->tulip_txq, m); if (m == NULL) break; #if defined(TULIP_BUS_DMA) && !defined(TULIP_BUS_DMA_NOTX) map = M_GETCTX(m, bus_dmamap_t); bus_dmamap_unload(sc->tulip_dmatag, map); sc->tulip_txmaps[sc->tulip_txmaps_free++] = map; #endif m_freem(m); } ri = &sc->tulip_txinfo; ri->ri_nextin = ri->ri_nextout = ri->ri_first; ri->ri_free = ri->ri_max; for (di = ri->ri_first; di < ri->ri_last; di++) di->d_status = 0; #if defined(TULIP_BUS_DMA) && !defined(TULIP_BUS_DMA_NOTX) bus_dmamap_sync(sc->tulip_dmatag, sc->tulip_txdescmap, 0, sc->tulip_txdescmap->dm_mapsize, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); #endif /* * We need to collect all the mbufs were on the * receive ring before we reinit it either to put * them back on or to know if we have to allocate * more. */ ri = &sc->tulip_rxinfo; ri->ri_nextin = ri->ri_nextout = ri->ri_first; ri->ri_free = ri->ri_max; for (di = ri->ri_first; di < ri->ri_last; di++) { di->d_status = 0; di->d_length1 = 0; di->d_addr1 = 0; di->d_length2 = 0; di->d_addr2 = 0; } #if defined(TULIP_BUS_DMA) && !defined(TULIP_BUS_DMA_NORX) bus_dmamap_sync(sc->tulip_dmatag, sc->tulip_rxdescmap, 0, sc->tulip_rxdescmap->dm_mapsize, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); #endif for (;;) { #if defined(TULIP_BUS_DMA) && !defined(TULIP_BUS_DMA_NORX) bus_dmamap_t map; #endif struct mbuf *m; _IF_DEQUEUE(&sc->tulip_rxq, m); if (m == NULL) break; #if defined(TULIP_BUS_DMA) && !defined(TULIP_BUS_DMA_NORX) map = M_GETCTX(m, bus_dmamap_t); bus_dmamap_unload(sc->tulip_dmatag, map); sc->tulip_rxmaps[sc->tulip_rxmaps_free++] = map; #endif m_freem(m); } /* * If tulip_reset is being called recurisvely, exit quickly knowing * that when the outer tulip_reset returns all the right stuff will * have happened. */ if (inreset) return; sc->tulip_intrmask |= TULIP_STS_NORMALINTR|TULIP_STS_RXINTR|TULIP_STS_TXINTR |TULIP_STS_ABNRMLINTR|TULIP_STS_SYSERROR|TULIP_STS_TXSTOPPED |TULIP_STS_TXUNDERFLOW|TULIP_STS_TXBABBLE |TULIP_STS_RXSTOPPED; if ((sc->tulip_flags & TULIP_DEVICEPROBE) == 0) (*sc->tulip_boardsw->bd_media_select)(sc); #if defined(TULIP_DEBUG) if ((sc->tulip_flags & TULIP_NEEDRESET) == TULIP_NEEDRESET) if_printf(sc->tulip_ifp, "tulip_reset: additional reset needed?!?\n"); #endif if (bootverbose) tulip_media_print(sc); if (sc->tulip_features & TULIP_HAVE_DUALSENSE) TULIP_CSR_WRITE(sc, csr_sia_status, TULIP_CSR_READ(sc, csr_sia_status)); sc->tulip_flags &= ~(TULIP_DOINGSETUP|TULIP_WANTSETUP|TULIP_INRESET |TULIP_RXACT); tulip_addr_filter(sc); } static void tulip_ifinit( void *arg) { tulip_softc_t *sc = (tulip_softc_t *)arg; TULIP_LOCK(sc); tulip_init(sc); TULIP_UNLOCK(sc); } static void tulip_init( tulip_softc_t * const sc) { if (sc->tulip_ifp->if_flags & IFF_UP) { if ((sc->tulip_ifp->if_flags & IFF_RUNNING) == 0) { /* initialize the media */ tulip_reset(sc); } sc->tulip_ifp->if_flags |= IFF_RUNNING; if (sc->tulip_ifp->if_flags & IFF_PROMISC) { sc->tulip_flags |= TULIP_PROMISC; sc->tulip_cmdmode |= TULIP_CMD_PROMISCUOUS; sc->tulip_intrmask |= TULIP_STS_TXINTR; } else { sc->tulip_flags &= ~TULIP_PROMISC; sc->tulip_cmdmode &= ~TULIP_CMD_PROMISCUOUS; if (sc->tulip_flags & TULIP_ALLMULTI) { sc->tulip_cmdmode |= TULIP_CMD_ALLMULTI; } else { sc->tulip_cmdmode &= ~TULIP_CMD_ALLMULTI; } } sc->tulip_cmdmode |= TULIP_CMD_TXRUN; if ((sc->tulip_flags & (TULIP_TXPROBE_ACTIVE|TULIP_WANTSETUP)) == 0) { tulip_rx_intr(sc); sc->tulip_cmdmode |= TULIP_CMD_RXRUN; sc->tulip_intrmask |= TULIP_STS_RXSTOPPED; } else { sc->tulip_ifp->if_flags |= IFF_OACTIVE; sc->tulip_cmdmode &= ~TULIP_CMD_RXRUN; sc->tulip_intrmask &= ~TULIP_STS_RXSTOPPED; } TULIP_CSR_WRITE(sc, csr_intr, sc->tulip_intrmask); TULIP_CSR_WRITE(sc, csr_command, sc->tulip_cmdmode); if ((sc->tulip_flags & (TULIP_WANTSETUP|TULIP_TXPROBE_ACTIVE)) == TULIP_WANTSETUP) tulip_txput_setup(sc); } else { sc->tulip_ifp->if_flags &= ~IFF_RUNNING; tulip_reset(sc); } } static void tulip_rx_intr( tulip_softc_t * const sc) { TULIP_PERFSTART(rxintr) tulip_ringinfo_t * const ri = &sc->tulip_rxinfo; struct ifnet * const ifp = sc->tulip_ifp; int fillok = 1; #if defined(TULIP_DEBUG) int cnt = 0; #endif TULIP_LOCK_ASSERT(sc); for (;;) { TULIP_PERFSTART(rxget) tulip_desc_t *eop = ri->ri_nextin; int total_len = 0, last_offset = 0; struct mbuf *ms = NULL, *me = NULL; int accept = 0; #if defined(TULIP_BUS_DMA) && !defined(TULIP_BUS_DMA_NORX) bus_dmamap_t map; int error; #endif if (fillok && sc->tulip_rxq.ifq_len < TULIP_RXQ_TARGET) goto queue_mbuf; #if defined(TULIP_DEBUG) if (cnt == ri->ri_max) break; #endif /* * If the TULIP has no descriptors, there can't be any receive * descriptors to process. */ if (eop == ri->ri_nextout) break; /* * 90% of the packets will fit in one descriptor. So we optimize * for that case. */ TULIP_RXDESC_POSTSYNC(sc, eop, sizeof(*eop)); if ((((volatile tulip_desc_t *) eop)->d_status & (TULIP_DSTS_OWNER|TULIP_DSTS_RxFIRSTDESC|TULIP_DSTS_RxLASTDESC)) == (TULIP_DSTS_RxFIRSTDESC|TULIP_DSTS_RxLASTDESC)) { _IF_DEQUEUE(&sc->tulip_rxq, ms); me = ms; } else { /* * If still owned by the TULIP, don't touch it. */ if (((volatile tulip_desc_t *) eop)->d_status & TULIP_DSTS_OWNER) break; /* * It is possible (though improbable unless MCLBYTES < 1518) for * a received packet to cross more than one receive descriptor. */ while ((((volatile tulip_desc_t *) eop)->d_status & TULIP_DSTS_RxLASTDESC) == 0) { if (++eop == ri->ri_last) eop = ri->ri_first; TULIP_RXDESC_POSTSYNC(sc, eop, sizeof(*eop)); if (eop == ri->ri_nextout || ((((volatile tulip_desc_t *) eop)->d_status & TULIP_DSTS_OWNER))) { #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_rxintrs++; sc->tulip_dbg.dbg_rxpktsperintr[cnt]++; #endif TULIP_PERFEND(rxget); TULIP_PERFEND(rxintr); return; } total_len++; } /* * Dequeue the first buffer for the start of the packet. Hopefully * this will be the only one we need to dequeue. However, if the * packet consumed multiple descriptors, then we need to dequeue * those buffers and chain to the starting mbuf. All buffers but * the last buffer have the same length so we can set that now. * (we add to last_offset instead of multiplying since we normally * won't go into the loop and thereby saving ourselves from * doing a multiplication by 0 in the normal case). */ _IF_DEQUEUE(&sc->tulip_rxq, ms); for (me = ms; total_len > 0; total_len--) { #if defined(TULIP_BUS_DMA) && !defined(TULIP_BUS_DMA_NORX) map = M_GETCTX(me, bus_dmamap_t); TULIP_RXMAP_POSTSYNC(sc, map); bus_dmamap_unload(sc->tulip_dmatag, map); sc->tulip_rxmaps[sc->tulip_rxmaps_free++] = map; #if defined(DIAGNOSTIC) M_SETCTX(me, NULL); #endif #endif /* TULIP_BUS_DMA */ me->m_len = TULIP_RX_BUFLEN; last_offset += TULIP_RX_BUFLEN; _IF_DEQUEUE(&sc->tulip_rxq, me->m_next); me = me->m_next; } } /* * Now get the size of received packet (minus the CRC). */ total_len = ((eop->d_status >> 16) & 0x7FFF) - 4; if ((sc->tulip_flags & TULIP_RXIGNORE) == 0 && ((eop->d_status & TULIP_DSTS_ERRSUM) == 0)) { me->m_len = total_len - last_offset; #if defined(TULIP_BUS_DMA) && !defined(TULIP_BUS_DMA_NORX) map = M_GETCTX(me, bus_dmamap_t); bus_dmamap_sync(sc->tulip_dmatag, map, 0, me->m_len, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->tulip_dmatag, map); sc->tulip_rxmaps[sc->tulip_rxmaps_free++] = map; #if defined(DIAGNOSTIC) M_SETCTX(me, NULL); #endif #endif /* TULIP_BUS_DMA */ sc->tulip_flags |= TULIP_RXACT; accept = 1; } else { ifp->if_ierrors++; if (eop->d_status & (TULIP_DSTS_RxBADLENGTH|TULIP_DSTS_RxOVERFLOW|TULIP_DSTS_RxWATCHDOG)) { sc->tulip_dot3stats.dot3StatsInternalMacReceiveErrors++; } else { #if defined(TULIP_VERBOSE) const char *error = NULL; #endif if (eop->d_status & TULIP_DSTS_RxTOOLONG) { sc->tulip_dot3stats.dot3StatsFrameTooLongs++; #if defined(TULIP_VERBOSE) error = "frame too long"; #endif } if (eop->d_status & TULIP_DSTS_RxBADCRC) { if (eop->d_status & TULIP_DSTS_RxDRBBLBIT) { sc->tulip_dot3stats.dot3StatsAlignmentErrors++; #if defined(TULIP_VERBOSE) error = "alignment error"; #endif } else { sc->tulip_dot3stats.dot3StatsFCSErrors++; #if defined(TULIP_VERBOSE) error = "bad crc"; #endif } } #if defined(TULIP_VERBOSE) if (error != NULL && (sc->tulip_flags & TULIP_NOMESSAGES) == 0) { if_printf(sc->tulip_ifp, "receive: %6D: %s\n", mtod(ms, u_char *) + 6, ":", error); sc->tulip_flags |= TULIP_NOMESSAGES; } #endif } #if defined(TULIP_BUS_DMA) && !defined(TULIP_BUS_DMA_NORX) map = M_GETCTX(me, bus_dmamap_t); bus_dmamap_unload(sc->tulip_dmatag, map); sc->tulip_rxmaps[sc->tulip_rxmaps_free++] = map; #if defined(DIAGNOSTIC) M_SETCTX(me, NULL); #endif #endif /* TULIP_BUS_DMA */ } #if defined(TULIP_DEBUG) cnt++; #endif ifp->if_ipackets++; if (++eop == ri->ri_last) eop = ri->ri_first; ri->ri_nextin = eop; queue_mbuf: /* * Either we are priming the TULIP with mbufs (m == NULL) * or we are about to accept an mbuf for the upper layers * so we need to allocate an mbuf to replace it. If we * can't replace it, send up it anyways. This may cause * us to drop packets in the future but that's better than * being caught in livelock. * * Note that if this packet crossed multiple descriptors * we don't even try to reallocate all the mbufs here. * Instead we rely on the test of the beginning of * the loop to refill for the extra consumed mbufs. */ if (accept || ms == NULL) { struct mbuf *m0; MGETHDR(m0, M_DONTWAIT, MT_DATA); if (m0 != NULL) { #if defined(TULIP_COPY_RXDATA) if (!accept || total_len >= (MHLEN - 2)) { #endif MCLGET(m0, M_DONTWAIT); if ((m0->m_flags & M_EXT) == 0) { m_freem(m0); m0 = NULL; } #if defined(TULIP_COPY_RXDATA) } #endif } if (accept #if defined(TULIP_COPY_RXDATA) && m0 != NULL #endif ) { TULIP_UNLOCK(sc); #if !defined(TULIP_COPY_RXDATA) ms->m_pkthdr.len = total_len; ms->m_pkthdr.rcvif = ifp; (*ifp->if_input)(ifp, ms); #else m0->m_data += 2; /* align data after header */ m_copydata(ms, 0, total_len, mtod(m0, caddr_t)); m0->m_len = m0->m_pkthdr.len = total_len; m0->m_pkthdr.rcvif = ifp; (*ifp->if_input)(ifp, m0); m0 = ms; #endif /* ! TULIP_COPY_RXDATA */ TULIP_LOCK(sc); } ms = m0; } if (ms == NULL) { /* * Couldn't allocate a new buffer. Don't bother * trying to replenish the receive queue. */ fillok = 0; sc->tulip_flags |= TULIP_RXBUFSLOW; #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_rxlowbufs++; #endif TULIP_PERFEND(rxget); continue; } /* * Now give the buffer(s) to the TULIP and save in our * receive queue. */ do { tulip_desc_t * const nextout = ri->ri_nextout; #if defined(TULIP_BUS_DMA) && !defined(TULIP_BUS_DMA_NORX) if (sc->tulip_rxmaps_free > 0) { map = sc->tulip_rxmaps[--sc->tulip_rxmaps_free]; } else { m_freem(ms); sc->tulip_flags |= TULIP_RXBUFSLOW; #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_rxlowbufs++; #endif break; } M_SETCTX(ms, map); error = bus_dmamap_load(sc->tulip_dmatag, map, mtod(ms, void *), TULIP_RX_BUFLEN, NULL, BUS_DMA_NOWAIT); if (error) { if_printf(sc->tulip_ifp, "unable to load rx map, error = %d\n", error); panic("tulip_rx_intr"); /* XXX */ } nextout->d_addr1 = map->dm_segs[0].ds_addr; nextout->d_length1 = map->dm_segs[0].ds_len; if (map->dm_nsegs == 2) { nextout->d_addr2 = map->dm_segs[1].ds_addr; nextout->d_length2 = map->dm_segs[1].ds_len; } else { nextout->d_addr2 = 0; nextout->d_length2 = 0; } TULIP_RXDESC_POSTSYNC(sc, nextout, sizeof(*nextout)); #else /* TULIP_BUS_DMA */ nextout->d_addr1 = TULIP_KVATOPHYS(sc, mtod(ms, caddr_t)); nextout->d_length1 = TULIP_RX_BUFLEN; #endif /* TULIP_BUS_DMA */ nextout->d_status = TULIP_DSTS_OWNER; TULIP_RXDESC_POSTSYNC(sc, nextout, sizeof(u_int32_t)); if (++ri->ri_nextout == ri->ri_last) ri->ri_nextout = ri->ri_first; me = ms->m_next; ms->m_next = NULL; _IF_ENQUEUE(&sc->tulip_rxq, ms); } while ((ms = me) != NULL); if (sc->tulip_rxq.ifq_len >= TULIP_RXQ_TARGET) sc->tulip_flags &= ~TULIP_RXBUFSLOW; TULIP_PERFEND(rxget); } #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_rxintrs++; sc->tulip_dbg.dbg_rxpktsperintr[cnt]++; #endif TULIP_PERFEND(rxintr); } static int tulip_tx_intr( tulip_softc_t * const sc) { TULIP_PERFSTART(txintr) tulip_ringinfo_t * const ri = &sc->tulip_txinfo; struct mbuf *m; int xmits = 0; int descs = 0; TULIP_LOCK_ASSERT(sc); while (ri->ri_free < ri->ri_max) { u_int32_t d_flag; TULIP_TXDESC_POSTSYNC(sc, ri->ri_nextin, sizeof(*ri->ri_nextin)); if (((volatile tulip_desc_t *) ri->ri_nextin)->d_status & TULIP_DSTS_OWNER) break; ri->ri_free++; descs++; d_flag = ri->ri_nextin->d_flag; if (d_flag & TULIP_DFLAG_TxLASTSEG) { if (d_flag & TULIP_DFLAG_TxSETUPPKT) { /* * We've just finished processing a setup packet. * Mark that we finished it. If there's not * another pending, startup the TULIP receiver. * Make sure we ack the RXSTOPPED so we won't get * an abormal interrupt indication. */ TULIP_TXMAP_POSTSYNC(sc, sc->tulip_setupmap); sc->tulip_flags &= ~(TULIP_DOINGSETUP|TULIP_HASHONLY); if (ri->ri_nextin->d_flag & TULIP_DFLAG_TxINVRSFILT) sc->tulip_flags |= TULIP_HASHONLY; if ((sc->tulip_flags & (TULIP_WANTSETUP|TULIP_TXPROBE_ACTIVE)) == 0) { tulip_rx_intr(sc); sc->tulip_cmdmode |= TULIP_CMD_RXRUN; sc->tulip_intrmask |= TULIP_STS_RXSTOPPED; TULIP_CSR_WRITE(sc, csr_status, TULIP_STS_RXSTOPPED); TULIP_CSR_WRITE(sc, csr_intr, sc->tulip_intrmask); TULIP_CSR_WRITE(sc, csr_command, sc->tulip_cmdmode); } } else { const u_int32_t d_status = ri->ri_nextin->d_status; _IF_DEQUEUE(&sc->tulip_txq, m); if (m != NULL) { #if defined(TULIP_BUS_DMA) && !defined(TULIP_BUS_DMA_NOTX) bus_dmamap_t map = M_GETCTX(m, bus_dmamap_t); TULIP_TXMAP_POSTSYNC(sc, map); sc->tulip_txmaps[sc->tulip_txmaps_free++] = map; #endif /* TULIP_BUS_DMA */ m_freem(m); #if defined(TULIP_DEBUG) } else { if_printf(sc->tulip_ifp, "tx_intr: failed to dequeue mbuf?!?\n"); #endif } if (sc->tulip_flags & TULIP_TXPROBE_ACTIVE) { tulip_mediapoll_event_t event = TULIP_MEDIAPOLL_TXPROBE_OK; if (d_status & (TULIP_DSTS_TxNOCARR|TULIP_DSTS_TxEXCCOLL)) { #if defined(TULIP_DEBUG) if (d_status & TULIP_DSTS_TxNOCARR) sc->tulip_dbg.dbg_txprobe_nocarr++; if (d_status & TULIP_DSTS_TxEXCCOLL) sc->tulip_dbg.dbg_txprobe_exccoll++; #endif event = TULIP_MEDIAPOLL_TXPROBE_FAILED; } (*sc->tulip_boardsw->bd_media_poll)(sc, event); /* * Escape from the loop before media poll has reset the TULIP! */ break; } else { xmits++; if (d_status & TULIP_DSTS_ERRSUM) { sc->tulip_ifp->if_oerrors++; if (d_status & TULIP_DSTS_TxEXCCOLL) sc->tulip_dot3stats.dot3StatsExcessiveCollisions++; if (d_status & TULIP_DSTS_TxLATECOLL) sc->tulip_dot3stats.dot3StatsLateCollisions++; if (d_status & (TULIP_DSTS_TxNOCARR|TULIP_DSTS_TxCARRLOSS)) sc->tulip_dot3stats.dot3StatsCarrierSenseErrors++; if (d_status & (TULIP_DSTS_TxUNDERFLOW|TULIP_DSTS_TxBABBLE)) sc->tulip_dot3stats.dot3StatsInternalMacTransmitErrors++; if (d_status & TULIP_DSTS_TxUNDERFLOW) sc->tulip_dot3stats.dot3StatsInternalTransmitUnderflows++; if (d_status & TULIP_DSTS_TxBABBLE) sc->tulip_dot3stats.dot3StatsInternalTransmitBabbles++; } else { u_int32_t collisions = (d_status & TULIP_DSTS_TxCOLLMASK) >> TULIP_DSTS_V_TxCOLLCNT; sc->tulip_ifp->if_collisions += collisions; if (collisions == 1) sc->tulip_dot3stats.dot3StatsSingleCollisionFrames++; else if (collisions > 1) sc->tulip_dot3stats.dot3StatsMultipleCollisionFrames++; else if (d_status & TULIP_DSTS_TxDEFERRED) sc->tulip_dot3stats.dot3StatsDeferredTransmissions++; /* * SQE is only valid for 10baseT/BNC/AUI when not * running in full-duplex. In order to speed up the * test, the corresponding bit in tulip_flags needs to * set as well to get us to count SQE Test Errors. */ if (d_status & TULIP_DSTS_TxNOHRTBT & sc->tulip_flags) sc->tulip_dot3stats.dot3StatsSQETestErrors++; } } } } if (++ri->ri_nextin == ri->ri_last) ri->ri_nextin = ri->ri_first; if ((sc->tulip_flags & TULIP_TXPROBE_ACTIVE) == 0) sc->tulip_ifp->if_flags &= ~IFF_OACTIVE; } /* * If nothing left to transmit, disable the timer. * Else if progress, reset the timer back to 2 ticks. */ if (ri->ri_free == ri->ri_max || (sc->tulip_flags & TULIP_TXPROBE_ACTIVE)) sc->tulip_txtimer = 0; else if (xmits > 0) sc->tulip_txtimer = TULIP_TXTIMER; sc->tulip_ifp->if_opackets += xmits; TULIP_PERFEND(txintr); return descs; } static void tulip_print_abnormal_interrupt( tulip_softc_t * const sc, u_int32_t csr) { const char * const *msgp = tulip_status_bits; const char *sep; u_int32_t mask; const char thrsh[] = "72|128\0\0\0" "96|256\0\0\0" "128|512\0\0" "160|1024"; TULIP_LOCK_ASSERT(sc); csr &= (1 << (sizeof(tulip_status_bits)/sizeof(tulip_status_bits[0]))) - 1; if_printf(sc->tulip_ifp, "abnormal interrupt:"); for (sep = " ", mask = 1; mask <= csr; mask <<= 1, msgp++) { if ((csr & mask) && *msgp != NULL) { printf("%s%s", sep, *msgp); if (mask == TULIP_STS_TXUNDERFLOW && (sc->tulip_flags & TULIP_NEWTXTHRESH)) { sc->tulip_flags &= ~TULIP_NEWTXTHRESH; if (sc->tulip_cmdmode & TULIP_CMD_STOREFWD) { printf(" (switching to store-and-forward mode)"); } else { printf(" (raising TX threshold to %s)", &thrsh[9 * ((sc->tulip_cmdmode & TULIP_CMD_THRESHOLDCTL) >> 14)]); } } sep = ", "; } } printf("\n"); } static void tulip_intr_handler( tulip_softc_t * const sc) { TULIP_PERFSTART(intr) u_int32_t csr; TULIP_LOCK_ASSERT(sc); while ((csr = TULIP_CSR_READ(sc, csr_status)) & sc->tulip_intrmask) { TULIP_CSR_WRITE(sc, csr_status, csr); if (csr & TULIP_STS_SYSERROR) { sc->tulip_last_system_error = (csr & TULIP_STS_ERRORMASK) >> TULIP_STS_ERR_SHIFT; if (sc->tulip_flags & TULIP_NOMESSAGES) { sc->tulip_flags |= TULIP_SYSTEMERROR; } else { if_printf(sc->tulip_ifp, "system error: %s\n", tulip_system_errors[sc->tulip_last_system_error]); } sc->tulip_flags |= TULIP_NEEDRESET; sc->tulip_system_errors++; break; } if (csr & (TULIP_STS_LINKPASS|TULIP_STS_LINKFAIL) & sc->tulip_intrmask) { #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_link_intrs++; #endif if (sc->tulip_boardsw->bd_media_poll != NULL) { (*sc->tulip_boardsw->bd_media_poll)(sc, csr & TULIP_STS_LINKFAIL ? TULIP_MEDIAPOLL_LINKFAIL : TULIP_MEDIAPOLL_LINKPASS); csr &= ~TULIP_STS_ABNRMLINTR; } tulip_media_print(sc); } if (csr & (TULIP_STS_RXINTR|TULIP_STS_RXNOBUF)) { u_int32_t misses = TULIP_CSR_READ(sc, csr_missed_frames); if (csr & TULIP_STS_RXNOBUF) sc->tulip_dot3stats.dot3StatsMissedFrames += misses & 0xFFFF; /* * Pass 2.[012] of the 21140A-A[CDE] may hang and/or corrupt data * on receive overflows. */ if ((misses & 0x0FFE0000) && (sc->tulip_features & TULIP_HAVE_RXBADOVRFLW)) { sc->tulip_dot3stats.dot3StatsInternalMacReceiveErrors++; /* * Stop the receiver process and spin until it's stopped. * Tell rx_intr to drop the packets it dequeues. */ TULIP_CSR_WRITE(sc, csr_command, sc->tulip_cmdmode & ~TULIP_CMD_RXRUN); while ((TULIP_CSR_READ(sc, csr_status) & TULIP_STS_RXSTOPPED) == 0) ; TULIP_CSR_WRITE(sc, csr_status, TULIP_STS_RXSTOPPED); sc->tulip_flags |= TULIP_RXIGNORE; } tulip_rx_intr(sc); if (sc->tulip_flags & TULIP_RXIGNORE) { /* * Restart the receiver. */ sc->tulip_flags &= ~TULIP_RXIGNORE; TULIP_CSR_WRITE(sc, csr_command, sc->tulip_cmdmode); } } if (csr & TULIP_STS_ABNRMLINTR) { u_int32_t tmp = csr & sc->tulip_intrmask & ~(TULIP_STS_NORMALINTR|TULIP_STS_ABNRMLINTR); if (csr & TULIP_STS_TXUNDERFLOW) { if ((sc->tulip_cmdmode & TULIP_CMD_THRESHOLDCTL) != TULIP_CMD_THRSHLD160) { sc->tulip_cmdmode += TULIP_CMD_THRSHLD96; sc->tulip_flags |= TULIP_NEWTXTHRESH; } else if (sc->tulip_features & TULIP_HAVE_STOREFWD) { sc->tulip_cmdmode |= TULIP_CMD_STOREFWD; sc->tulip_flags |= TULIP_NEWTXTHRESH; } } if (sc->tulip_flags & TULIP_NOMESSAGES) { sc->tulip_statusbits |= tmp; } else { tulip_print_abnormal_interrupt(sc, tmp); sc->tulip_flags |= TULIP_NOMESSAGES; } TULIP_CSR_WRITE(sc, csr_command, sc->tulip_cmdmode); } if (sc->tulip_flags & (TULIP_WANTTXSTART|TULIP_TXPROBE_ACTIVE|TULIP_DOINGSETUP|TULIP_PROMISC)) { tulip_tx_intr(sc); if ((sc->tulip_flags & TULIP_TXPROBE_ACTIVE) == 0) tulip_start(sc); } } if (sc->tulip_flags & TULIP_NEEDRESET) { tulip_reset(sc); tulip_init(sc); } TULIP_PERFEND(intr); } static void tulip_intr_shared( void *arg) { tulip_softc_t * sc = arg; for (; sc != NULL; sc = sc->tulip_slaves) { TULIP_LOCK(sc); #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_intrs++; #endif tulip_intr_handler(sc); TULIP_UNLOCK(sc); } } static void tulip_intr_normal( void *arg) { tulip_softc_t * sc = (tulip_softc_t *) arg; TULIP_LOCK(sc); #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_intrs++; #endif tulip_intr_handler(sc); TULIP_UNLOCK(sc); } static struct mbuf * tulip_mbuf_compress( struct mbuf *m) { struct mbuf *m0; #if MCLBYTES >= ETHERMTU + 18 MGETHDR(m0, M_DONTWAIT, MT_DATA); if (m0 != NULL) { if (m->m_pkthdr.len > MHLEN) { MCLGET(m0, M_DONTWAIT); if ((m0->m_flags & M_EXT) == 0) { m_freem(m); m_freem(m0); return NULL; } } m_copydata(m, 0, m->m_pkthdr.len, mtod(m0, caddr_t)); m0->m_pkthdr.len = m0->m_len = m->m_pkthdr.len; } #else int mlen = MHLEN; int len = m->m_pkthdr.len; struct mbuf **mp = &m0; while (len > 0) { if (mlen == MHLEN) { MGETHDR(*mp, M_DONTWAIT, MT_DATA); } else { MGET(*mp, M_DONTWAIT, MT_DATA); } if (*mp == NULL) { m_freem(m0); m0 = NULL; break; } if (len > MLEN) { MCLGET(*mp, M_DONTWAIT); if (((*mp)->m_flags & M_EXT) == 0) { m_freem(m0); m0 = NULL; break; } (*mp)->m_len = len <= MCLBYTES ? len : MCLBYTES; } else { (*mp)->m_len = len <= mlen ? len : mlen; } m_copydata(m, m->m_pkthdr.len - len, (*mp)->m_len, mtod((*mp), caddr_t)); len -= (*mp)->m_len; mp = &(*mp)->m_next; mlen = MLEN; } #endif m_freem(m); return m0; } static struct mbuf * tulip_txput( tulip_softc_t * const sc, struct mbuf *m) { TULIP_PERFSTART(txput) tulip_ringinfo_t * const ri = &sc->tulip_txinfo; tulip_desc_t *eop, *nextout; int segcnt, free; u_int32_t d_status; #if defined(TULIP_BUS_DMA) && !defined(TULIP_BUS_DMA_NOTX) bus_dmamap_t map; int error; #else struct mbuf *m0; #endif TULIP_LOCK_ASSERT(sc); #if defined(TULIP_DEBUG) if ((sc->tulip_cmdmode & TULIP_CMD_TXRUN) == 0) { if_printf(sc->tulip_ifp, "txput%s: tx not running\n", (sc->tulip_flags & TULIP_TXPROBE_ACTIVE) ? "(probe)" : ""); sc->tulip_flags |= TULIP_WANTTXSTART; sc->tulip_dbg.dbg_txput_finishes[0]++; goto finish; } #endif /* * Now we try to fill in our transmit descriptors. This is * a bit reminiscent of going on the Ark two by two * since each descriptor for the TULIP can describe * two buffers. So we advance through packet filling * each of the two entries at a time to to fill each * descriptor. Clear the first and last segment bits * in each descriptor (actually just clear everything * but the end-of-ring or chain bits) to make sure * we don't get messed up by previously sent packets. * * We may fail to put the entire packet on the ring if * there is either not enough ring entries free or if the * packet has more than MAX_TXSEG segments. In the former * case we will just wait for the ring to empty. In the * latter case we have to recopy. */ #if !defined(TULIP_BUS_DMA) || defined(TULIP_BUS_DMA_NOTX) again: m0 = m; #endif d_status = 0; eop = nextout = ri->ri_nextout; segcnt = 0; free = ri->ri_free; #if defined(TULIP_BUS_DMA) && !defined(TULIP_BUS_DMA_NOTX) /* * Reclaim some dma maps from if we are out. */ if (sc->tulip_txmaps_free == 0) { #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_no_txmaps++; #endif free += tulip_tx_intr(sc); } if (sc->tulip_txmaps_free > 0) { map = sc->tulip_txmaps[sc->tulip_txmaps_free-1]; } else { sc->tulip_flags |= TULIP_WANTTXSTART; #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_txput_finishes[1]++; #endif goto finish; } error = bus_dmamap_load_mbuf(sc->tulip_dmatag, map, m, BUS_DMA_NOWAIT); if (error != 0) { if (error == EFBIG) { /* * The packet exceeds the number of transmit buffer * entries that we can use for one packet, so we have * to recopy it into one mbuf and then try again. */ m = tulip_mbuf_compress(m); if (m == NULL) { #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_txput_finishes[2]++; #endif goto finish; } error = bus_dmamap_load_mbuf(sc->tulip_dmatag, map, m, BUS_DMA_NOWAIT); } if (error != 0) { if_printf(sc->tulip_ifp, "unable to load tx map, error = %d\n", error); #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_txput_finishes[3]++; #endif goto finish; } } if ((free -= (map->dm_nsegs + 1) / 2) <= 0 /* * See if there's any unclaimed space in the transmit ring. */ && (free += tulip_tx_intr(sc)) <= 0) { /* * There's no more room but since nothing * has been committed at this point, just * show output is active, put back the * mbuf and return. */ sc->tulip_flags |= TULIP_WANTTXSTART; #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_txput_finishes[4]++; #endif bus_dmamap_unload(sc->tulip_dmatag, map); goto finish; } for (; map->dm_nsegs - segcnt > 1; segcnt += 2) { eop = nextout; eop->d_flag &= TULIP_DFLAG_ENDRING|TULIP_DFLAG_CHAIN; eop->d_status = d_status; eop->d_addr1 = map->dm_segs[segcnt].ds_addr; eop->d_length1 = map->dm_segs[segcnt].ds_len; eop->d_addr2 = map->dm_segs[segcnt+1].ds_addr; eop->d_length2 = map->dm_segs[segcnt+1].ds_len; d_status = TULIP_DSTS_OWNER; if (++nextout == ri->ri_last) nextout = ri->ri_first; } if (segcnt < map->dm_nsegs) { eop = nextout; eop->d_flag &= TULIP_DFLAG_ENDRING|TULIP_DFLAG_CHAIN; eop->d_status = d_status; eop->d_addr1 = map->dm_segs[segcnt].ds_addr; eop->d_length1 = map->dm_segs[segcnt].ds_len; eop->d_addr2 = 0; eop->d_length2 = 0; if (++nextout == ri->ri_last) nextout = ri->ri_first; } TULIP_TXMAP_PRESYNC(sc, map); M_SETCTX(m, map); map = NULL; --sc->tulip_txmaps_free; /* commit to using the dmamap */ #else /* !TULIP_BUS_DMA */ do { int len = m0->m_len; caddr_t addr = mtod(m0, caddr_t); unsigned clsize = PAGE_SIZE - (((uintptr_t) addr) & (PAGE_SIZE-1)); while (len > 0) { unsigned slen = min(len, clsize); segcnt++; if (segcnt > TULIP_MAX_TXSEG) { /* * The packet exceeds the number of transmit buffer * entries that we can use for one packet, so we have * recopy it into one mbuf and then try again. */ m = tulip_mbuf_compress(m); if (m == NULL) goto finish; goto again; } if (segcnt & 1) { if (--free == 0) { /* * See if there's any unclaimed space in the * transmit ring. */ if ((free += tulip_tx_intr(sc)) == 0) { /* * There's no more room but since nothing * has been committed at this point, just * show output is active, put back the * mbuf and return. */ sc->tulip_flags |= TULIP_WANTTXSTART; #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_txput_finishes[1]++; #endif goto finish; } } eop = nextout; if (++nextout == ri->ri_last) nextout = ri->ri_first; eop->d_flag &= TULIP_DFLAG_ENDRING|TULIP_DFLAG_CHAIN; eop->d_status = d_status; eop->d_addr1 = TULIP_KVATOPHYS(sc, addr); eop->d_length1 = slen; } else { /* * Fill in second half of descriptor */ eop->d_addr2 = TULIP_KVATOPHYS(sc, addr); eop->d_length2 = slen; } d_status = TULIP_DSTS_OWNER; len -= slen; addr += slen; clsize = PAGE_SIZE; } } while ((m0 = m0->m_next) != NULL); #endif /* TULIP_BUS_DMA */ /* * bounce a copy to the bpf listener, if any. */ BPF_MTAP(sc->tulip_ifp, m); /* * The descriptors have been filled in. Now get ready * to transmit. */ _IF_ENQUEUE(&sc->tulip_txq, m); m = NULL; /* * Make sure the next descriptor after this packet is owned * by us since it may have been set up above if we ran out * of room in the ring. */ nextout->d_status = 0; TULIP_TXDESC_PRESYNC(sc, nextout, sizeof(u_int32_t)); #if !defined(TULIP_BUS_DMA) || defined(TULIP_BUS_DMA_NOTX) /* * If we only used the first segment of the last descriptor, * make sure the second segment will not be used. */ if (segcnt & 1) { eop->d_addr2 = 0; eop->d_length2 = 0; } #endif /* TULIP_BUS_DMA */ /* * Mark the last and first segments, indicate we want a transmit * complete interrupt, and tell it to transmit! */ eop->d_flag |= TULIP_DFLAG_TxLASTSEG|TULIP_DFLAG_TxWANTINTR; /* * Note that ri->ri_nextout is still the start of the packet * and until we set the OWNER bit, we can still back out of * everything we have done. */ ri->ri_nextout->d_flag |= TULIP_DFLAG_TxFIRSTSEG; #if defined(TULIP_BUS_MAP) && !defined(TULIP_BUS_DMA_NOTX) if (eop < ri->ri_nextout) { TULIP_TXDESC_PRESYNC(sc, ri->ri_nextout, (caddr_t) ri->ri_last - (caddr_t) ri->ri_nextout); TULIP_TXDESC_PRESYNC(sc, ri->ri_first, (caddr_t) (eop + 1) - (caddr_t) ri->ri_first); } else { TULIP_TXDESC_PRESYNC(sc, ri->ri_nextout, (caddr_t) (eop + 1) - (caddr_t) ri->ri_nextout); } #endif ri->ri_nextout->d_status = TULIP_DSTS_OWNER; TULIP_TXDESC_PRESYNC(sc, ri->ri_nextout, sizeof(u_int32_t)); /* * This advances the ring for us. */ ri->ri_nextout = nextout; ri->ri_free = free; TULIP_PERFEND(txput); if (sc->tulip_flags & TULIP_TXPROBE_ACTIVE) { TULIP_CSR_WRITE(sc, csr_txpoll, 1); sc->tulip_ifp->if_flags |= IFF_OACTIVE; TULIP_PERFEND(txput); return NULL; } /* * switch back to the single queueing ifstart. */ sc->tulip_flags &= ~TULIP_WANTTXSTART; if (sc->tulip_txtimer == 0) sc->tulip_txtimer = TULIP_TXTIMER; #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_txput_finishes[5]++; #endif /* * If we want a txstart, there must be not enough space in the * transmit ring. So we want to enable transmit done interrupts * so we can immediately reclaim some space. When the transmit * interrupt is posted, the interrupt handler will call tx_intr * to reclaim space and then txstart (since WANTTXSTART is set). * txstart will move the packet into the transmit ring and clear * WANTTXSTART thereby causing TXINTR to be cleared. */ finish: #if defined(TULIP_DEBUG) sc->tulip_dbg.dbg_txput_finishes[6]++; #endif if (sc->tulip_flags & (TULIP_WANTTXSTART|TULIP_DOINGSETUP)) { sc->tulip_ifp->if_flags |= IFF_OACTIVE; if ((sc->tulip_intrmask & TULIP_STS_TXINTR) == 0) { sc->tulip_intrmask |= TULIP_STS_TXINTR; TULIP_CSR_WRITE(sc, csr_intr, sc->tulip_intrmask); } } else if ((sc->tulip_flags & TULIP_PROMISC) == 0) { if (sc->tulip_intrmask & TULIP_STS_TXINTR) { sc->tulip_intrmask &= ~TULIP_STS_TXINTR; TULIP_CSR_WRITE(sc, csr_intr, sc->tulip_intrmask); } } TULIP_CSR_WRITE(sc, csr_txpoll, 1); TULIP_PERFEND(txput); return m; } static void tulip_txput_setup( tulip_softc_t * const sc) { tulip_ringinfo_t * const ri = &sc->tulip_txinfo; tulip_desc_t *nextout; TULIP_LOCK_ASSERT(sc); /* * We will transmit, at most, one setup packet per call to ifstart. */ #if defined(TULIP_DEBUG) if ((sc->tulip_cmdmode & TULIP_CMD_TXRUN) == 0) { if_printf(sc->tulip_ifp, "txput_setup: tx not running\n"); sc->tulip_flags |= TULIP_WANTTXSTART; return; } #endif /* * Try to reclaim some free descriptors.. */ if (ri->ri_free < 2) tulip_tx_intr(sc); if ((sc->tulip_flags & TULIP_DOINGSETUP) || ri->ri_free == 1) { sc->tulip_flags |= TULIP_WANTTXSTART; return; } bcopy(sc->tulip_setupdata, sc->tulip_setupbuf, sizeof(sc->tulip_setupbuf)); /* * Clear WANTSETUP and set DOINGSETUP. Set know that WANTSETUP is * set and DOINGSETUP is clear doing an XOR of the two will DTRT. */ sc->tulip_flags ^= TULIP_WANTSETUP|TULIP_DOINGSETUP; ri->ri_free--; nextout = ri->ri_nextout; nextout->d_flag &= TULIP_DFLAG_ENDRING|TULIP_DFLAG_CHAIN; nextout->d_flag |= TULIP_DFLAG_TxFIRSTSEG|TULIP_DFLAG_TxLASTSEG |TULIP_DFLAG_TxSETUPPKT|TULIP_DFLAG_TxWANTINTR; if (sc->tulip_flags & TULIP_WANTHASHPERFECT) nextout->d_flag |= TULIP_DFLAG_TxHASHFILT; else if (sc->tulip_flags & TULIP_WANTHASHONLY) nextout->d_flag |= TULIP_DFLAG_TxHASHFILT|TULIP_DFLAG_TxINVRSFILT; nextout->d_length2 = 0; nextout->d_addr2 = 0; #if defined(TULIP_BUS_DMA) && !defined(TULIP_BUS_DMA_NOTX) nextout->d_length1 = sc->tulip_setupmap->dm_segs[0].ds_len; nextout->d_addr1 = sc->tulip_setupmap->dm_segs[0].ds_addr; if (sc->tulip_setupmap->dm_nsegs == 2) { nextout->d_length2 = sc->tulip_setupmap->dm_segs[1].ds_len; nextout->d_addr2 = sc->tulip_setupmap->dm_segs[1].ds_addr; } TULIP_TXMAP_PRESYNC(sc, sc->tulip_setupmap); TULIP_TXDESC_PRESYNC(sc, nextout, sizeof(*nextout)); #else nextout->d_length1 = sizeof(sc->tulip_setupbuf); nextout->d_addr1 = TULIP_KVATOPHYS(sc, sc->tulip_setupbuf); #endif /* * Advance the ring for the next transmit packet. */ if (++ri->ri_nextout == ri->ri_last) ri->ri_nextout = ri->ri_first; /* * Make sure the next descriptor is owned by us since it * may have been set up above if we ran out of room in the * ring. */ ri->ri_nextout->d_status = 0; TULIP_TXDESC_PRESYNC(sc, ri->ri_nextout, sizeof(u_int32_t)); nextout->d_status = TULIP_DSTS_OWNER; /* * Flush the ownwership of the current descriptor */ TULIP_TXDESC_PRESYNC(sc, nextout, sizeof(u_int32_t)); TULIP_CSR_WRITE(sc, csr_txpoll, 1); if ((sc->tulip_intrmask & TULIP_STS_TXINTR) == 0) { sc->tulip_intrmask |= TULIP_STS_TXINTR; TULIP_CSR_WRITE(sc, csr_intr, sc->tulip_intrmask); } } static int tulip_ifioctl( struct ifnet * ifp, u_long cmd, caddr_t data) { TULIP_PERFSTART(ifioctl) tulip_softc_t * const sc = (tulip_softc_t *)ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int error = 0; switch (cmd) { case SIOCSIFFLAGS: { TULIP_LOCK(sc); tulip_addr_filter(sc); /* reinit multicast filter */ tulip_init(sc); TULIP_UNLOCK(sc); break; } case SIOCSIFMEDIA: case SIOCGIFMEDIA: { error = ifmedia_ioctl(ifp, ifr, &sc->tulip_ifmedia, cmd); break; } case SIOCADDMULTI: case SIOCDELMULTI: { /* * Update multicast listeners */ TULIP_LOCK(sc); tulip_addr_filter(sc); /* reset multicast filtering */ tulip_init(sc); TULIP_UNLOCK(sc); error = 0; break; } case SIOCSIFMTU: /* * Set the interface MTU. */ TULIP_LOCK(sc); if (ifr->ifr_mtu > ETHERMTU) { error = EINVAL; break; } ifp->if_mtu = ifr->ifr_mtu; TULIP_UNLOCK(sc); break; #ifdef SIOCGADDRROM case SIOCGADDRROM: { error = copyout(sc->tulip_rombuf, ifr->ifr_data, sizeof(sc->tulip_rombuf)); break; } #endif #ifdef SIOCGCHIPID case SIOCGCHIPID: { ifr->ifr_metric = (int) sc->tulip_chipid; break; } #endif default: { error = ether_ioctl(ifp, cmd, data); break; } } TULIP_PERFEND(ifioctl); return error; } static void tulip_ifstart( struct ifnet * const ifp) { TULIP_PERFSTART(ifstart) tulip_softc_t * const sc = (tulip_softc_t *)ifp->if_softc; if (ifp->if_flags & IFF_RUNNING) { TULIP_LOCK(sc); tulip_start(sc); TULIP_UNLOCK(sc); } TULIP_PERFEND(ifstart); } static void tulip_start(tulip_softc_t * const sc) { struct mbuf *m; TULIP_LOCK_ASSERT(sc); if ((sc->tulip_flags & (TULIP_WANTSETUP|TULIP_TXPROBE_ACTIVE)) == TULIP_WANTSETUP) tulip_txput_setup(sc); while (!IFQ_DRV_IS_EMPTY(&sc->tulip_ifp->if_snd)) { IFQ_DRV_DEQUEUE(&sc->tulip_ifp->if_snd, m); if(m == NULL) break; if ((m = tulip_txput(sc, m)) != NULL) { IFQ_DRV_PREPEND(&sc->tulip_ifp->if_snd, m); break; } } } /* * Even though this routine runs at device spl, it does not break * our use of splnet (splsoftnet under NetBSD) for the majority * of this driver since * if_watcbog is called from if_watchdog which is called from * splsoftclock which is below spl[soft]net. */ static void tulip_ifwatchdog( struct ifnet *ifp) { TULIP_PERFSTART(ifwatchdog) tulip_softc_t * const sc = (tulip_softc_t *)ifp->if_softc; #if defined(TULIP_DEBUG) u_int32_t rxintrs; #endif TULIP_LOCK(sc); #if defined(TULIP_DEBUG) rxintrs = sc->tulip_dbg.dbg_rxintrs - sc->tulip_dbg.dbg_last_rxintrs; if (rxintrs > sc->tulip_dbg.dbg_high_rxintrs_hz) sc->tulip_dbg.dbg_high_rxintrs_hz = rxintrs; sc->tulip_dbg.dbg_last_rxintrs = sc->tulip_dbg.dbg_rxintrs; #endif /* TULIP_DEBUG */ sc->tulip_ifp->if_timer = 1; /* * These should be rare so do a bulk test up front so we can just skip * them if needed. */ if (sc->tulip_flags & (TULIP_SYSTEMERROR|TULIP_RXBUFSLOW|TULIP_NOMESSAGES)) { /* * If the number of receive buffer is low, try to refill */ if (sc->tulip_flags & TULIP_RXBUFSLOW) tulip_rx_intr(sc); if (sc->tulip_flags & TULIP_SYSTEMERROR) { if_printf(sc->tulip_ifp, "%d system errors: last was %s\n", sc->tulip_system_errors, tulip_system_errors[sc->tulip_last_system_error]); } if (sc->tulip_statusbits) { tulip_print_abnormal_interrupt(sc, sc->tulip_statusbits); sc->tulip_statusbits = 0; } sc->tulip_flags &= ~(TULIP_NOMESSAGES|TULIP_SYSTEMERROR); } if (sc->tulip_txtimer) tulip_tx_intr(sc); if (sc->tulip_txtimer && --sc->tulip_txtimer == 0) { if_printf(sc->tulip_ifp, "transmission timeout\n"); if (TULIP_DO_AUTOSENSE(sc)) { sc->tulip_media = TULIP_MEDIA_UNKNOWN; sc->tulip_probe_state = TULIP_PROBE_INACTIVE; sc->tulip_flags &= ~(TULIP_WANTRXACT|TULIP_LINKUP); } tulip_reset(sc); tulip_init(sc); } TULIP_PERFEND(ifwatchdog); TULIP_PERFMERGE(sc, perf_intr_cycles); TULIP_PERFMERGE(sc, perf_ifstart_cycles); TULIP_PERFMERGE(sc, perf_ifioctl_cycles); TULIP_PERFMERGE(sc, perf_ifwatchdog_cycles); TULIP_PERFMERGE(sc, perf_timeout_cycles); TULIP_PERFMERGE(sc, perf_ifstart_one_cycles); TULIP_PERFMERGE(sc, perf_txput_cycles); TULIP_PERFMERGE(sc, perf_txintr_cycles); TULIP_PERFMERGE(sc, perf_rxintr_cycles); TULIP_PERFMERGE(sc, perf_rxget_cycles); TULIP_PERFMERGE(sc, perf_intr); TULIP_PERFMERGE(sc, perf_ifstart); TULIP_PERFMERGE(sc, perf_ifioctl); TULIP_PERFMERGE(sc, perf_ifwatchdog); TULIP_PERFMERGE(sc, perf_timeout); TULIP_PERFMERGE(sc, perf_ifstart_one); TULIP_PERFMERGE(sc, perf_txput); TULIP_PERFMERGE(sc, perf_txintr); TULIP_PERFMERGE(sc, perf_rxintr); TULIP_PERFMERGE(sc, perf_rxget); TULIP_UNLOCK(sc); } /* * All printf's are real as of now! */ #ifdef printf #undef printf #endif static void tulip_attach( tulip_softc_t * const sc) { struct ifnet *ifp; ifp = sc->tulip_ifp = if_alloc(IFT_ETHER); /* XXX: driver name/unit should be set some other way */ if_initname(ifp, "de", sc->tulip_unit); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST; ifp->if_ioctl = tulip_ifioctl; ifp->if_start = tulip_ifstart; ifp->if_watchdog = tulip_ifwatchdog; ifp->if_timer = 1; ifp->if_init = tulip_ifinit; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); if_printf(ifp, "%s%s pass %d.%d%s\n", sc->tulip_boardid, tulip_chipdescs[sc->tulip_chipid], (sc->tulip_revinfo & 0xF0) >> 4, sc->tulip_revinfo & 0x0F, (sc->tulip_features & (TULIP_HAVE_ISVSROM|TULIP_HAVE_OKSROM)) == TULIP_HAVE_ISVSROM ? " (invalid EESPROM checksum)" : ""); TULIP_LOCK(sc); #if defined(__alpha__) /* * In case the SRM console told us about a bogus media, * we need to check to be safe. */ if (sc->tulip_mediums[sc->tulip_media] == NULL) sc->tulip_media = TULIP_MEDIA_UNKNOWN; #endif (*sc->tulip_boardsw->bd_media_probe)(sc); ifmedia_init(&sc->tulip_ifmedia, 0, tulip_ifmedia_change, tulip_ifmedia_status); sc->tulip_flags &= ~TULIP_DEVICEPROBE; tulip_ifmedia_add(sc); tulip_reset(sc); TULIP_UNLOCK(sc); ether_ifattach(sc->tulip_ifp, sc->tulip_enaddr); } #if defined(TULIP_BUS_DMA) #if !defined(TULIP_BUS_DMA_NOTX) || !defined(TULIP_BUS_DMA_NORX) static int tulip_busdma_allocmem( tulip_softc_t * const sc, size_t size, bus_dmamap_t *map_p, tulip_desc_t **desc_p) { bus_dma_segment_t segs[1]; int nsegs, error; error = bus_dmamem_alloc(sc->tulip_dmatag, size, 1, PAGE_SIZE, segs, sizeof(segs)/sizeof(segs[0]), &nsegs, BUS_DMA_NOWAIT); if (error == 0) { void *desc; error = bus_dmamem_map(sc->tulip_dmatag, segs, nsegs, size, (void *) &desc, BUS_DMA_NOWAIT|BUS_DMA_COHERENT); if (error == 0) { bus_dmamap_t map; error = bus_dmamap_create(sc->tulip_dmatag, size, 1, size, 0, BUS_DMA_NOWAIT, &map); if (error == 0) { error = bus_dmamap_load(sc->tulip_dmatag, map, desc, size, NULL, BUS_DMA_NOWAIT); if (error) bus_dmamap_destroy(sc->tulip_dmatag, map); else *map_p = map; } if (error) bus_dmamem_unmap(sc->tulip_dmatag, desc, size); } if (error) bus_dmamem_free(sc->tulip_dmatag, segs, nsegs); else *desc_p = desc; } return error; } #endif static int tulip_busdma_init( tulip_softc_t * const sc) { int error = 0; #if !defined(TULIP_BUS_DMA_NOTX) /* * Allocate dmamap for setup descriptor */ error = bus_dmamap_create(sc->tulip_dmatag, sizeof(sc->tulip_setupbuf), 2, sizeof(sc->tulip_setupbuf), 0, BUS_DMA_NOWAIT, &sc->tulip_setupmap); if (error == 0) { error = bus_dmamap_load(sc->tulip_dmatag, sc->tulip_setupmap, sc->tulip_setupbuf, sizeof(sc->tulip_setupbuf), NULL, BUS_DMA_NOWAIT); if (error) bus_dmamap_destroy(sc->tulip_dmatag, sc->tulip_setupmap); } /* * Allocate space and dmamap for transmit ring */ if (error == 0) { error = tulip_busdma_allocmem(sc, sizeof(tulip_desc_t) * TULIP_TXDESCS, &sc->tulip_txdescmap, &sc->tulip_txdescs); } /* * Allocate dmamaps for each transmit descriptors */ if (error == 0) { while (error == 0 && sc->tulip_txmaps_free < TULIP_TXDESCS) { bus_dmamap_t map; if ((error = TULIP_TXMAP_CREATE(sc, &map)) == 0) sc->tulip_txmaps[sc->tulip_txmaps_free++] = map; } if (error) { while (sc->tulip_txmaps_free > 0) bus_dmamap_destroy(sc->tulip_dmatag, sc->tulip_txmaps[--sc->tulip_txmaps_free]); } } #else if (error == 0) { sc->tulip_txdescs = (tulip_desc_t *) malloc(TULIP_TXDESCS * sizeof(tulip_desc_t), M_DEVBUF, M_NOWAIT); if (sc->tulip_txdescs == NULL) error = ENOMEM; } #endif #if !defined(TULIP_BUS_DMA_NORX) /* * Allocate space and dmamap for receive ring */ if (error == 0) { error = tulip_busdma_allocmem(sc, sizeof(tulip_desc_t) * TULIP_RXDESCS, &sc->tulip_rxdescmap, &sc->tulip_rxdescs); } /* * Allocate dmamaps for each receive descriptors */ if (error == 0) { while (error == 0 && sc->tulip_rxmaps_free < TULIP_RXDESCS) { bus_dmamap_t map; if ((error = TULIP_RXMAP_CREATE(sc, &map)) == 0) sc->tulip_rxmaps[sc->tulip_rxmaps_free++] = map; } if (error) { while (sc->tulip_rxmaps_free > 0) bus_dmamap_destroy(sc->tulip_dmatag, sc->tulip_rxmaps[--sc->tulip_rxmaps_free]); } } #else if (error == 0) { sc->tulip_rxdescs = (tulip_desc_t *) malloc(TULIP_RXDESCS * sizeof(tulip_desc_t), M_DEVBUF, M_NOWAIT); if (sc->tulip_rxdescs == NULL) error = ENOMEM; } #endif return error; } #endif /* TULIP_BUS_DMA */ static void tulip_initcsrs( tulip_softc_t * const sc, tulip_csrptr_t csr_base, size_t csr_size) { sc->tulip_csrs.csr_busmode = csr_base + 0 * csr_size; sc->tulip_csrs.csr_txpoll = csr_base + 1 * csr_size; sc->tulip_csrs.csr_rxpoll = csr_base + 2 * csr_size; sc->tulip_csrs.csr_rxlist = csr_base + 3 * csr_size; sc->tulip_csrs.csr_txlist = csr_base + 4 * csr_size; sc->tulip_csrs.csr_status = csr_base + 5 * csr_size; sc->tulip_csrs.csr_command = csr_base + 6 * csr_size; sc->tulip_csrs.csr_intr = csr_base + 7 * csr_size; sc->tulip_csrs.csr_missed_frames = csr_base + 8 * csr_size; sc->tulip_csrs.csr_9 = csr_base + 9 * csr_size; sc->tulip_csrs.csr_10 = csr_base + 10 * csr_size; sc->tulip_csrs.csr_11 = csr_base + 11 * csr_size; sc->tulip_csrs.csr_12 = csr_base + 12 * csr_size; sc->tulip_csrs.csr_13 = csr_base + 13 * csr_size; sc->tulip_csrs.csr_14 = csr_base + 14 * csr_size; sc->tulip_csrs.csr_15 = csr_base + 15 * csr_size; } static void tulip_initring( tulip_softc_t * const sc, tulip_ringinfo_t * const ri, tulip_desc_t *descs, int ndescs) { ri->ri_max = ndescs; ri->ri_first = descs; ri->ri_last = ri->ri_first + ri->ri_max; bzero((caddr_t) ri->ri_first, sizeof(ri->ri_first[0]) * ri->ri_max); ri->ri_last[-1].d_flag = TULIP_DFLAG_ENDRING; } /* * This is the PCI configuration support. */ #define PCI_CBIO PCIR_BAR(0) /* Configuration Base IO Address */ #define PCI_CBMA PCIR_BAR(1) /* Configuration Base Memory Address */ #define PCI_CFDA 0x40 /* Configuration Driver Area */ static int tulip_pci_probe(device_t dev) { const char *name = NULL; if (pci_get_vendor(dev) != DEC_VENDORID) return ENXIO; /* * Some LanMedia WAN cards use the Tulip chip, but they have * their own driver, and we should not recognize them */ if (pci_get_subvendor(dev) == 0x1376) return ENXIO; switch (pci_get_device(dev)) { case CHIPID_21040: name = "Digital 21040 Ethernet"; break; case CHIPID_21041: name = "Digital 21041 Ethernet"; break; case CHIPID_21140: if (pci_get_revid(dev) >= 0x20) name = "Digital 21140A Fast Ethernet"; else name = "Digital 21140 Fast Ethernet"; break; case CHIPID_21142: if (pci_get_revid(dev) >= 0x20) name = "Digital 21143 Fast Ethernet"; else name = "Digital 21142 Fast Ethernet"; break; } if (name) { device_set_desc(dev, name); return BUS_PROBE_LOW_PRIORITY; } return ENXIO; } static int tulip_shutdown(device_t dev) { tulip_softc_t * const sc = device_get_softc(dev); TULIP_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET); DELAY(10); /* Wait 10 microseconds (actually 50 PCI cycles but at 33MHz that comes to two microseconds but wait a bit longer anyways) */ return 0; } static int tulip_pci_attach(device_t dev) { tulip_softc_t *sc; #if defined(__alpha__) tulip_media_t media = TULIP_MEDIA_UNKNOWN; #endif int retval, idx; u_int32_t revinfo, cfdainfo; unsigned csroffset = TULIP_PCI_CSROFFSET; unsigned csrsize = TULIP_PCI_CSRSIZE; tulip_csrptr_t csr_base; tulip_chipid_t chipid = TULIP_CHIPID_UNKNOWN; struct resource *res; int rid, unit; unit = device_get_unit(dev); if (unit >= TULIP_MAX_DEVICES) { device_printf(dev, "not configured; limit of %d reached or exceeded\n", TULIP_MAX_DEVICES); return ENXIO; } revinfo = pci_get_revid(dev); cfdainfo = pci_read_config(dev, PCI_CFDA, 4); /* turn busmaster on in case BIOS doesn't set it */ pci_enable_busmaster(dev); if (pci_get_vendor(dev) == DEC_VENDORID) { if (pci_get_device(dev) == CHIPID_21040) chipid = TULIP_21040; else if (pci_get_device(dev) == CHIPID_21041) chipid = TULIP_21041; else if (pci_get_device(dev) == CHIPID_21140) chipid = (revinfo >= 0x20) ? TULIP_21140A : TULIP_21140; else if (pci_get_device(dev) == CHIPID_21142) chipid = (revinfo >= 0x20) ? TULIP_21143 : TULIP_21142; } if (chipid == TULIP_CHIPID_UNKNOWN) return ENXIO; if (chipid == TULIP_21040 && revinfo < 0x20) { device_printf(dev, "not configured; 21040 pass 2.0 required (%d.%d found)\n", revinfo >> 4, revinfo & 0x0f); return ENXIO; } else if (chipid == TULIP_21140 && revinfo < 0x11) { device_printf(dev, "not configured; 21140 pass 1.1 required (%d.%d found)\n", revinfo >> 4, revinfo & 0x0f); return ENXIO; } sc = device_get_softc(dev); sc->tulip_pci_busno = pci_get_bus(dev); sc->tulip_pci_devno = pci_get_slot(dev); sc->tulip_chipid = chipid; sc->tulip_flags |= TULIP_DEVICEPROBE; if (chipid == TULIP_21140 || chipid == TULIP_21140A) sc->tulip_features |= TULIP_HAVE_GPR|TULIP_HAVE_STOREFWD; if (chipid == TULIP_21140A && revinfo <= 0x22) sc->tulip_features |= TULIP_HAVE_RXBADOVRFLW; if (chipid == TULIP_21140) sc->tulip_features |= TULIP_HAVE_BROKEN_HASH; if (chipid != TULIP_21040 && chipid != TULIP_21140) sc->tulip_features |= TULIP_HAVE_POWERMGMT; if (chipid == TULIP_21041 || chipid == TULIP_21142 || chipid == TULIP_21143) { sc->tulip_features |= TULIP_HAVE_DUALSENSE; if (chipid != TULIP_21041 || revinfo >= 0x20) sc->tulip_features |= TULIP_HAVE_SIANWAY; if (chipid != TULIP_21041) sc->tulip_features |= TULIP_HAVE_SIAGP|TULIP_HAVE_RXBADOVRFLW|TULIP_HAVE_STOREFWD; if (chipid != TULIP_21041 && revinfo >= 0x20) sc->tulip_features |= TULIP_HAVE_SIA100; } if (sc->tulip_features & TULIP_HAVE_POWERMGMT && (cfdainfo & (TULIP_CFDA_SLEEP|TULIP_CFDA_SNOOZE))) { cfdainfo &= ~(TULIP_CFDA_SLEEP|TULIP_CFDA_SNOOZE); pci_write_config(dev, PCI_CFDA, cfdainfo, 4); DELAY(11*1000); } #if defined(__alpha__) /* * The Alpha SRM console encodes a console set media in the driver * part of the CFDA register. Note that the Multia presents a * problem in that its BNC mode is really EXTSIA. So in that case * force a probe. */ switch ((cfdainfo >> 8) & 0xff) { case 1: media = chipid > TULIP_21040 ? TULIP_MEDIA_AUI : TULIP_MEDIA_AUIBNC; break; case 2: media = chipid > TULIP_21040 ? TULIP_MEDIA_BNC : TULIP_MEDIA_UNKNOWN; break; case 3: media = TULIP_MEDIA_10BASET; break; case 4: media = TULIP_MEDIA_10BASET_FD; break; case 5: media = TULIP_MEDIA_100BASETX; break; case 6: media = TULIP_MEDIA_100BASETX_FD; break; default: media = TULIP_MEDIA_UNKNOWN; break; } #endif sc->tulip_unit = unit; sc->tulip_revinfo = revinfo; #if defined(TULIP_IOMAPPED) rid = PCI_CBIO; res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); #else rid = PCI_CBMA; res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); #endif if (!res) return ENXIO; sc->tulip_csrs_bst = rman_get_bustag(res); sc->tulip_csrs_bsh = rman_get_bushandle(res); csr_base = 0; mtx_init(TULIP_MUTEX(sc), MTX_NETWORK_LOCK, device_get_nameunit(dev), MTX_DEF); callout_init(&sc->tulip_callout, CALLOUT_MPSAFE); tulips[unit] = sc; tulip_initcsrs(sc, csr_base + csroffset, csrsize); #if defined(TULIP_BUS_DMA) if ((retval = tulip_busdma_init(sc)) != 0) { printf("error initing bus_dma: %d\n", retval); mtx_destroy(TULIP_MUTEX(sc)); return ENXIO; } #else sc->tulip_rxdescs = (tulip_desc_t *) malloc(sizeof(tulip_desc_t) * TULIP_RXDESCS, M_DEVBUF, M_NOWAIT); sc->tulip_txdescs = (tulip_desc_t *) malloc(sizeof(tulip_desc_t) * TULIP_TXDESCS, M_DEVBUF, M_NOWAIT); if (sc->tulip_rxdescs == NULL || sc->tulip_txdescs == NULL) { device_printf(dev, "malloc failed\n"); if (sc->tulip_rxdescs) free((caddr_t) sc->tulip_rxdescs, M_DEVBUF); if (sc->tulip_txdescs) free((caddr_t) sc->tulip_txdescs, M_DEVBUF); mtx_destroy(TULIP_MUTEX(sc)); return ENXIO; } #endif tulip_initring(sc, &sc->tulip_rxinfo, sc->tulip_rxdescs, TULIP_RXDESCS); tulip_initring(sc, &sc->tulip_txinfo, sc->tulip_txdescs, TULIP_TXDESCS); /* * Make sure there won't be any interrupts or such... */ TULIP_CSR_WRITE(sc, csr_busmode, TULIP_BUSMODE_SWRESET); DELAY(100); /* Wait 10 microseconds (actually 50 PCI cycles but at 33MHz that comes to two microseconds but wait a bit longer anyways) */ TULIP_LOCK(sc); retval = tulip_read_macaddr(sc); TULIP_UNLOCK(sc); if (retval < 0) { device_printf(dev, "can't read ENET ROM (why=%d) (", retval); for (idx = 0; idx < 32; idx++) printf("%02x", sc->tulip_rombuf[idx]); printf("\n"); device_printf(dev, "%s%s pass %d.%d\n", sc->tulip_boardid, tulip_chipdescs[sc->tulip_chipid], (sc->tulip_revinfo & 0xF0) >> 4, sc->tulip_revinfo & 0x0F); device_printf(dev, "address unknown\n"); } else { void (*intr_rtn)(void *) = tulip_intr_normal; if (sc->tulip_features & TULIP_HAVE_SHAREDINTR) intr_rtn = tulip_intr_shared; #if defined(__alpha__) sc->tulip_media = media; #endif tulip_attach(sc); /* Setup interrupt last. */ if ((sc->tulip_features & TULIP_HAVE_SLAVEDINTR) == 0) { void *ih; rid = 0; res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (res == 0 || bus_setup_intr(dev, res, INTR_TYPE_NET | INTR_MPSAFE, intr_rtn, sc, &ih)) { device_printf(dev, "couldn't map interrupt\n"); free((caddr_t) sc->tulip_rxdescs, M_DEVBUF); free((caddr_t) sc->tulip_txdescs, M_DEVBUF); ether_ifdetach(sc->tulip_ifp); if_free(sc->tulip_ifp); mtx_destroy(TULIP_MUTEX(sc)); return ENXIO; } } #if defined(__alpha__) TULIP_LOCK(sc); if (sc->tulip_media != TULIP_MEDIA_UNKNOWN) tulip_linkup(sc, media); TULIP_UNLOCK(sc); #endif } return 0; } static device_method_t tulip_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tulip_pci_probe), DEVMETHOD(device_attach, tulip_pci_attach), DEVMETHOD(device_shutdown, tulip_shutdown), { 0, 0 } }; static driver_t tulip_pci_driver = { "de", tulip_pci_methods, sizeof(tulip_softc_t), }; static devclass_t tulip_devclass; DRIVER_MODULE(de, pci, tulip_pci_driver, tulip_devclass, 0, 0); Index: stable/6/sys/pci/if_pcn.c =================================================================== --- stable/6/sys/pci/if_pcn.c (revision 149421) +++ stable/6/sys/pci/if_pcn.c (revision 149422) @@ -1,1449 +1,1451 @@ /*- * Copyright (c) 2000 Berkeley Software Design, Inc. * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * AMD Am79c972 fast ethernet PCI NIC driver. Datasheets are available * from http://www.amd.com. * * The AMD PCnet/PCI controllers are more advanced and functional * versions of the venerable 7990 LANCE. The PCnet/PCI chips retain * backwards compatibility with the LANCE and thus can be made * to work with older LANCE drivers. This is in fact how the * PCnet/PCI chips were supported in FreeBSD originally. The trouble * is that the PCnet/PCI devices offer several performance enhancements * which can't be exploited in LANCE compatibility mode. Chief among * these enhancements is the ability to perform PCI DMA operations * using 32-bit addressing (which eliminates the need for ISA * bounce-buffering), and special receive buffer alignment (which * allows the receive handler to pass packets to the upper protocol * layers without copying on both the x86 and alpha platforms). */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include #define PCN_USEIOSPACE #include MODULE_DEPEND(pcn, pci, 1, 1, 1); MODULE_DEPEND(pcn, ether, 1, 1, 1); MODULE_DEPEND(pcn, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Various supported device vendors/types and their names. */ static struct pcn_type pcn_devs[] = { { PCN_VENDORID, PCN_DEVICEID_PCNET, "AMD PCnet/PCI 10/100BaseTX" }, { PCN_VENDORID, PCN_DEVICEID_HOME, "AMD PCnet/Home HomePNA" }, { 0, 0, NULL } }; static struct pcn_chipid { u_int32_t id; char * name; } pcn_chipid[] = { { Am79C960, "Am79C960" }, { Am79C961, "Am79C961" }, { Am79C961A, "Am79C961A" }, { Am79C965, "Am79C965" }, { Am79C970, "Am79C970" }, { Am79C970A, "Am79C970A" }, { Am79C971, "Am79C971" }, { Am79C972, "Am79C972" }, { Am79C973, "Am79C973" }, { Am79C978, "Am79C978" }, { Am79C975, "Am79C975" }, { Am79C976, "Am79C976" }, { 0, NULL }, }; static char * pcn_chipid_name(u_int32_t); static u_int32_t pcn_chip_id(device_t); static u_int32_t pcn_csr_read(struct pcn_softc *, int); static u_int16_t pcn_csr_read16(struct pcn_softc *, int); static u_int16_t pcn_bcr_read16(struct pcn_softc *, int); static void pcn_csr_write(struct pcn_softc *, int, int); static u_int32_t pcn_bcr_read(struct pcn_softc *, int); static void pcn_bcr_write(struct pcn_softc *, int, int); static int pcn_probe(device_t); static int pcn_attach(device_t); static int pcn_detach(device_t); static int pcn_newbuf(struct pcn_softc *, int, struct mbuf *); static int pcn_encap(struct pcn_softc *, struct mbuf *, u_int32_t *); static void pcn_rxeof(struct pcn_softc *); static void pcn_txeof(struct pcn_softc *); static void pcn_intr(void *); static void pcn_tick(void *); static void pcn_start(struct ifnet *); static int pcn_ioctl(struct ifnet *, u_long, caddr_t); static void pcn_init(void *); static void pcn_stop(struct pcn_softc *); static void pcn_watchdog(struct ifnet *); static void pcn_shutdown(device_t); static int pcn_ifmedia_upd(struct ifnet *); static void pcn_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int pcn_miibus_readreg(device_t, int, int); static int pcn_miibus_writereg(device_t, int, int, int); static void pcn_miibus_statchg(device_t); static void pcn_setfilt(struct ifnet *); static void pcn_setmulti(struct pcn_softc *); static void pcn_reset(struct pcn_softc *); static int pcn_list_rx_init(struct pcn_softc *); static int pcn_list_tx_init(struct pcn_softc *); #ifdef PCN_USEIOSPACE #define PCN_RES SYS_RES_IOPORT #define PCN_RID PCN_PCI_LOIO #else #define PCN_RES SYS_RES_MEMORY #define PCN_RID PCN_PCI_LOMEM #endif static device_method_t pcn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pcn_probe), DEVMETHOD(device_attach, pcn_attach), DEVMETHOD(device_detach, pcn_detach), DEVMETHOD(device_shutdown, pcn_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, pcn_miibus_readreg), DEVMETHOD(miibus_writereg, pcn_miibus_writereg), DEVMETHOD(miibus_statchg, pcn_miibus_statchg), { 0, 0 } }; static driver_t pcn_driver = { "pcn", pcn_methods, sizeof(struct pcn_softc) }; static devclass_t pcn_devclass; DRIVER_MODULE(pcn, pci, pcn_driver, pcn_devclass, 0, 0); DRIVER_MODULE(miibus, pcn, miibus_driver, miibus_devclass, 0, 0); #define PCN_CSR_SETBIT(sc, reg, x) \ pcn_csr_write(sc, reg, pcn_csr_read(sc, reg) | (x)) #define PCN_CSR_CLRBIT(sc, reg, x) \ pcn_csr_write(sc, reg, pcn_csr_read(sc, reg) & ~(x)) #define PCN_BCR_SETBIT(sc, reg, x) \ pcn_bcr_write(sc, reg, pcn_bcr_read(sc, reg) | (x)) #define PCN_BCR_CLRBIT(sc, reg, x) \ pcn_bcr_write(sc, reg, pcn_bcr_read(sc, reg) & ~(x)) static u_int32_t pcn_csr_read(sc, reg) struct pcn_softc *sc; int reg; { CSR_WRITE_4(sc, PCN_IO32_RAP, reg); return(CSR_READ_4(sc, PCN_IO32_RDP)); } static u_int16_t pcn_csr_read16(sc, reg) struct pcn_softc *sc; int reg; { CSR_WRITE_2(sc, PCN_IO16_RAP, reg); return(CSR_READ_2(sc, PCN_IO16_RDP)); } static void pcn_csr_write(sc, reg, val) struct pcn_softc *sc; int reg; int val; { CSR_WRITE_4(sc, PCN_IO32_RAP, reg); CSR_WRITE_4(sc, PCN_IO32_RDP, val); return; } static u_int32_t pcn_bcr_read(sc, reg) struct pcn_softc *sc; int reg; { CSR_WRITE_4(sc, PCN_IO32_RAP, reg); return(CSR_READ_4(sc, PCN_IO32_BDP)); } static u_int16_t pcn_bcr_read16(sc, reg) struct pcn_softc *sc; int reg; { CSR_WRITE_2(sc, PCN_IO16_RAP, reg); return(CSR_READ_2(sc, PCN_IO16_BDP)); } static void pcn_bcr_write(sc, reg, val) struct pcn_softc *sc; int reg; int val; { CSR_WRITE_4(sc, PCN_IO32_RAP, reg); CSR_WRITE_4(sc, PCN_IO32_BDP, val); return; } static int pcn_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct pcn_softc *sc; int val; sc = device_get_softc(dev); if (sc->pcn_phyaddr && phy > sc->pcn_phyaddr) return(0); pcn_bcr_write(sc, PCN_BCR_MIIADDR, reg | (phy << 5)); val = pcn_bcr_read(sc, PCN_BCR_MIIDATA) & 0xFFFF; if (val == 0xFFFF) return(0); sc->pcn_phyaddr = phy; return(val); } static int pcn_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct pcn_softc *sc; sc = device_get_softc(dev); pcn_bcr_write(sc, PCN_BCR_MIIADDR, reg | (phy << 5)); pcn_bcr_write(sc, PCN_BCR_MIIDATA, data); return(0); } static void pcn_miibus_statchg(dev) device_t dev; { struct pcn_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->pcn_miibus); if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { PCN_BCR_SETBIT(sc, PCN_BCR_DUPLEX, PCN_DUPLEX_FDEN); } else { PCN_BCR_CLRBIT(sc, PCN_BCR_DUPLEX, PCN_DUPLEX_FDEN); } return; } static void pcn_setmulti(sc) struct pcn_softc *sc; { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int32_t h, i; u_int16_t hashes[4] = { 0, 0, 0, 0 }; ifp = sc->pcn_ifp; PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { for (i = 0; i < 4; i++) pcn_csr_write(sc, PCN_CSR_MAR0 + i, 0xFFFF); PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); return; } /* first, zot all the existing hash bits */ for (i = 0; i < 4; i++) pcn_csr_write(sc, PCN_CSR_MAR0 + i, 0); /* now program new ones */ + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_le(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; hashes[h >> 4] |= 1 << (h & 0xF); } + IF_ADDR_UNLOCK(ifp); for (i = 0; i < 4; i++) pcn_csr_write(sc, PCN_CSR_MAR0 + i, hashes[i]); PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); return; } static void pcn_reset(sc) struct pcn_softc *sc; { /* * Issue a reset by reading from the RESET register. * Note that we don't know if the chip is operating in * 16-bit or 32-bit mode at this point, so we attempt * to reset the chip both ways. If one fails, the other * will succeed. */ CSR_READ_2(sc, PCN_IO16_RESET); CSR_READ_4(sc, PCN_IO32_RESET); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); /* Select 32-bit (DWIO) mode */ CSR_WRITE_4(sc, PCN_IO32_RDP, 0); /* Select software style 3. */ pcn_bcr_write(sc, PCN_BCR_SSTYLE, PCN_SWSTYLE_PCNETPCI_BURST); return; } static char * pcn_chipid_name (u_int32_t id) { struct pcn_chipid *p = pcn_chipid; while (p->name) { if (id == p->id) return (p->name); p++; } return ("Unknown"); } static u_int32_t pcn_chip_id (device_t dev) { struct pcn_softc *sc; u_int32_t chip_id; sc = device_get_softc(dev); /* * Note: we can *NOT* put the chip into * 32-bit mode yet. The lnc driver will only * work in 16-bit mode, and once the chip * goes into 32-bit mode, the only way to * get it out again is with a hardware reset. * So if pcn_probe() is called before the * lnc driver's probe routine, the chip will * be locked into 32-bit operation and the lnc * driver will be unable to attach to it. * Note II: if the chip happens to already * be in 32-bit mode, we still need to check * the chip ID, but first we have to detect * 32-bit mode using only 16-bit operations. * The safest way to do this is to read the * PCI subsystem ID from BCR23/24 and compare * that with the value read from PCI config * space. */ chip_id = pcn_bcr_read16(sc, PCN_BCR_PCISUBSYSID); chip_id <<= 16; chip_id |= pcn_bcr_read16(sc, PCN_BCR_PCISUBVENID); /* * Note III: the test for 0x10001000 is a hack to * pacify VMware, who's pseudo-PCnet interface is * broken. Reading the subsystem register from PCI * config space yields 0x00000000 while reading the * same value from I/O space yields 0x10001000. It's * not supposed to be that way. */ if (chip_id == pci_read_config(dev, PCIR_SUBVEND_0, 4) || chip_id == 0x10001000) { /* We're in 16-bit mode. */ chip_id = pcn_csr_read16(sc, PCN_CSR_CHIPID1); chip_id <<= 16; chip_id |= pcn_csr_read16(sc, PCN_CSR_CHIPID0); } else { /* We're in 32-bit mode. */ chip_id = pcn_csr_read(sc, PCN_CSR_CHIPID1); chip_id <<= 16; chip_id |= pcn_csr_read(sc, PCN_CSR_CHIPID0); } return (chip_id); } static struct pcn_type * pcn_match (u_int16_t vid, u_int16_t did) { struct pcn_type *t; t = pcn_devs; while(t->pcn_name != NULL) { if ((vid == t->pcn_vid) && (did == t->pcn_did)) return (t); t++; } return (NULL); } /* * Probe for an AMD chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int pcn_probe(dev) device_t dev; { struct pcn_type *t; struct pcn_softc *sc; int rid; u_int32_t chip_id; t = pcn_match(pci_get_vendor(dev), pci_get_device(dev)); if (t == NULL) return (ENXIO); sc = device_get_softc(dev); /* * Temporarily map the I/O space so we can read the chip ID register. */ rid = PCN_RID; sc->pcn_res = bus_alloc_resource_any(dev, PCN_RES, &rid, RF_ACTIVE); if (sc->pcn_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); return(ENXIO); } sc->pcn_btag = rman_get_bustag(sc->pcn_res); sc->pcn_bhandle = rman_get_bushandle(sc->pcn_res); chip_id = pcn_chip_id(dev); bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res); switch((chip_id >> 12) & PART_MASK) { case Am79C971: case Am79C972: case Am79C973: case Am79C975: case Am79C976: case Am79C978: break; default: return(ENXIO); } device_set_desc(dev, t->pcn_name); return(BUS_PROBE_DEFAULT); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int pcn_attach(dev) device_t dev; { u_int32_t eaddr[2]; struct pcn_softc *sc; struct ifnet *ifp; int unit, error = 0, rid; sc = device_get_softc(dev); unit = device_get_unit(dev); /* Initialize our mutex. */ mtx_init(&sc->pcn_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); /* * Map control/status registers. */ pci_enable_busmaster(dev); /* Retrieve the chip ID */ sc->pcn_type = (pcn_chip_id(dev) >> 12) & PART_MASK; device_printf(dev, "Chip ID %04x (%s)\n", sc->pcn_type, pcn_chipid_name(sc->pcn_type)); rid = PCN_RID; sc->pcn_res = bus_alloc_resource_any(dev, PCN_RES, &rid, RF_ACTIVE); if (sc->pcn_res == NULL) { printf("pcn%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->pcn_btag = rman_get_bustag(sc->pcn_res); sc->pcn_bhandle = rman_get_bushandle(sc->pcn_res); /* Allocate interrupt */ rid = 0; sc->pcn_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->pcn_irq == NULL) { printf("pcn%d: couldn't map interrupt\n", unit); error = ENXIO; goto fail; } /* Reset the adapter. */ pcn_reset(sc); /* * Get station address from the EEPROM. */ eaddr[0] = CSR_READ_4(sc, PCN_IO32_APROM00); eaddr[1] = CSR_READ_4(sc, PCN_IO32_APROM01); sc->pcn_unit = unit; callout_handle_init(&sc->pcn_stat_ch); sc->pcn_ldata = contigmalloc(sizeof(struct pcn_list_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->pcn_ldata == NULL) { printf("pcn%d: no memory for list buffers!\n", unit); error = ENXIO; goto fail; } bzero(sc->pcn_ldata, sizeof(struct pcn_list_data)); ifp = sc->pcn_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { printf("pcn%d: can not if_alloc()\n", unit); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT; ifp->if_ioctl = pcn_ioctl; ifp->if_start = pcn_start; ifp->if_watchdog = pcn_watchdog; ifp->if_init = pcn_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = PCN_TX_LIST_CNT - 1; /* * Do MII setup. */ if (mii_phy_probe(dev, &sc->pcn_miibus, pcn_ifmedia_upd, pcn_ifmedia_sts)) { printf("pcn%d: MII without any PHY!\n", sc->pcn_unit); if_free(ifp); error = ENXIO; goto fail; } /* * Call MI attach routine. */ ether_ifattach(ifp, (u_int8_t *) eaddr); /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->pcn_irq, INTR_TYPE_NET, pcn_intr, sc, &sc->pcn_intrhand); if (error) { printf("pcn%d: couldn't set up irq\n", unit); ether_ifdetach(ifp); goto fail; } fail: if (error) pcn_detach(dev); return(error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int pcn_detach(dev) device_t dev; { struct pcn_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->pcn_ifp; KASSERT(mtx_initialized(&sc->pcn_mtx), ("pcn mutex not initialized")); PCN_LOCK(sc); /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { pcn_reset(sc); pcn_stop(sc); ether_ifdetach(ifp); if_free(ifp); } if (sc->pcn_miibus) device_delete_child(dev, sc->pcn_miibus); bus_generic_detach(dev); if (sc->pcn_intrhand) bus_teardown_intr(dev, sc->pcn_irq, sc->pcn_intrhand); if (sc->pcn_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_irq); if (sc->pcn_res) bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res); if (sc->pcn_ldata) { contigfree(sc->pcn_ldata, sizeof(struct pcn_list_data), M_DEVBUF); } PCN_UNLOCK(sc); mtx_destroy(&sc->pcn_mtx); return(0); } /* * Initialize the transmit descriptors. */ static int pcn_list_tx_init(sc) struct pcn_softc *sc; { struct pcn_list_data *ld; struct pcn_ring_data *cd; int i; cd = &sc->pcn_cdata; ld = sc->pcn_ldata; for (i = 0; i < PCN_TX_LIST_CNT; i++) { cd->pcn_tx_chain[i] = NULL; ld->pcn_tx_list[i].pcn_tbaddr = 0; ld->pcn_tx_list[i].pcn_txctl = 0; ld->pcn_tx_list[i].pcn_txstat = 0; } cd->pcn_tx_prod = cd->pcn_tx_cons = cd->pcn_tx_cnt = 0; return(0); } /* * Initialize the RX descriptors and allocate mbufs for them. */ static int pcn_list_rx_init(sc) struct pcn_softc *sc; { struct pcn_ring_data *cd; int i; cd = &sc->pcn_cdata; for (i = 0; i < PCN_RX_LIST_CNT; i++) { if (pcn_newbuf(sc, i, NULL) == ENOBUFS) return(ENOBUFS); } cd->pcn_rx_prod = 0; return(0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int pcn_newbuf(sc, idx, m) struct pcn_softc *sc; int idx; struct mbuf *m; { struct mbuf *m_new = NULL; struct pcn_rx_desc *c; c = &sc->pcn_ldata->pcn_rx_list[idx]; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) return(ENOBUFS); MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, ETHER_ALIGN); sc->pcn_cdata.pcn_rx_chain[idx] = m_new; c->pcn_rbaddr = vtophys(mtod(m_new, caddr_t)); c->pcn_bufsz = (~(PCN_RXLEN) + 1) & PCN_RXLEN_BUFSZ; c->pcn_bufsz |= PCN_RXLEN_MBO; c->pcn_rxstat = PCN_RXSTAT_STP|PCN_RXSTAT_ENP|PCN_RXSTAT_OWN; return(0); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static void pcn_rxeof(sc) struct pcn_softc *sc; { struct mbuf *m; struct ifnet *ifp; struct pcn_rx_desc *cur_rx; int i; PCN_LOCK_ASSERT(sc); ifp = sc->pcn_ifp; i = sc->pcn_cdata.pcn_rx_prod; while(PCN_OWN_RXDESC(&sc->pcn_ldata->pcn_rx_list[i])) { cur_rx = &sc->pcn_ldata->pcn_rx_list[i]; m = sc->pcn_cdata.pcn_rx_chain[i]; sc->pcn_cdata.pcn_rx_chain[i] = NULL; /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (cur_rx->pcn_rxstat & PCN_RXSTAT_ERR) { ifp->if_ierrors++; pcn_newbuf(sc, i, m); PCN_INC(i, PCN_RX_LIST_CNT); continue; } if (pcn_newbuf(sc, i, NULL)) { /* Ran out of mbufs; recycle this one. */ pcn_newbuf(sc, i, m); ifp->if_ierrors++; PCN_INC(i, PCN_RX_LIST_CNT); continue; } PCN_INC(i, PCN_RX_LIST_CNT); /* No errors; receive the packet. */ ifp->if_ipackets++; m->m_len = m->m_pkthdr.len = cur_rx->pcn_rxlen - ETHER_CRC_LEN; m->m_pkthdr.rcvif = ifp; PCN_UNLOCK(sc); (*ifp->if_input)(ifp, m); PCN_LOCK(sc); } sc->pcn_cdata.pcn_rx_prod = i; return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void pcn_txeof(sc) struct pcn_softc *sc; { struct pcn_tx_desc *cur_tx = NULL; struct ifnet *ifp; u_int32_t idx; ifp = sc->pcn_ifp; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ idx = sc->pcn_cdata.pcn_tx_cons; while (idx != sc->pcn_cdata.pcn_tx_prod) { cur_tx = &sc->pcn_ldata->pcn_tx_list[idx]; if (!PCN_OWN_TXDESC(cur_tx)) break; if (!(cur_tx->pcn_txctl & PCN_TXCTL_ENP)) { sc->pcn_cdata.pcn_tx_cnt--; PCN_INC(idx, PCN_TX_LIST_CNT); continue; } if (cur_tx->pcn_txctl & PCN_TXCTL_ERR) { ifp->if_oerrors++; if (cur_tx->pcn_txstat & PCN_TXSTAT_EXDEF) ifp->if_collisions++; if (cur_tx->pcn_txstat & PCN_TXSTAT_RTRY) ifp->if_collisions++; } ifp->if_collisions += cur_tx->pcn_txstat & PCN_TXSTAT_TRC; ifp->if_opackets++; if (sc->pcn_cdata.pcn_tx_chain[idx] != NULL) { m_freem(sc->pcn_cdata.pcn_tx_chain[idx]); sc->pcn_cdata.pcn_tx_chain[idx] = NULL; } sc->pcn_cdata.pcn_tx_cnt--; PCN_INC(idx, PCN_TX_LIST_CNT); } if (idx != sc->pcn_cdata.pcn_tx_cons) { /* Some buffers have been freed. */ sc->pcn_cdata.pcn_tx_cons = idx; ifp->if_flags &= ~IFF_OACTIVE; } ifp->if_timer = (sc->pcn_cdata.pcn_tx_cnt == 0) ? 0 : 5; return; } static void pcn_tick(xsc) void *xsc; { struct pcn_softc *sc; struct mii_data *mii; struct ifnet *ifp; sc = xsc; ifp = sc->pcn_ifp; PCN_LOCK(sc); mii = device_get_softc(sc->pcn_miibus); mii_tick(mii); /* link just died */ if (sc->pcn_link & !(mii->mii_media_status & IFM_ACTIVE)) sc->pcn_link = 0; /* link just came up, restart */ if (!sc->pcn_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->pcn_link++; if (ifp->if_snd.ifq_head != NULL) pcn_start(ifp); } sc->pcn_stat_ch = timeout(pcn_tick, sc, hz); PCN_UNLOCK(sc); return; } static void pcn_intr(arg) void *arg; { struct pcn_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; ifp = sc->pcn_ifp; /* Suppress unwanted interrupts */ if (!(ifp->if_flags & IFF_UP)) { pcn_stop(sc); return; } PCN_LOCK(sc); CSR_WRITE_4(sc, PCN_IO32_RAP, PCN_CSR_CSR); while ((status = CSR_READ_4(sc, PCN_IO32_RDP)) & PCN_CSR_INTR) { CSR_WRITE_4(sc, PCN_IO32_RDP, status); if (status & PCN_CSR_RINT) pcn_rxeof(sc); if (status & PCN_CSR_TINT) pcn_txeof(sc); if (status & PCN_CSR_ERR) { pcn_init(sc); break; } } if (ifp->if_snd.ifq_head != NULL) pcn_start(ifp); PCN_UNLOCK(sc); return; } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int pcn_encap(sc, m_head, txidx) struct pcn_softc *sc; struct mbuf *m_head; u_int32_t *txidx; { struct pcn_tx_desc *f = NULL; struct mbuf *m; int frag, cur, cnt = 0; /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ m = m_head; cur = frag = *txidx; for (m = m_head; m != NULL; m = m->m_next) { if (m->m_len == 0) continue; if ((PCN_TX_LIST_CNT - (sc->pcn_cdata.pcn_tx_cnt + cnt)) < 2) return(ENOBUFS); f = &sc->pcn_ldata->pcn_tx_list[frag]; f->pcn_txctl = (~(m->m_len) + 1) & PCN_TXCTL_BUFSZ; f->pcn_txctl |= PCN_TXCTL_MBO; f->pcn_tbaddr = vtophys(mtod(m, vm_offset_t)); if (cnt == 0) f->pcn_txctl |= PCN_TXCTL_STP; else f->pcn_txctl |= PCN_TXCTL_OWN; cur = frag; PCN_INC(frag, PCN_TX_LIST_CNT); cnt++; } if (m != NULL) return(ENOBUFS); sc->pcn_cdata.pcn_tx_chain[cur] = m_head; sc->pcn_ldata->pcn_tx_list[cur].pcn_txctl |= PCN_TXCTL_ENP|PCN_TXCTL_ADD_FCS|PCN_TXCTL_MORE_LTINT; sc->pcn_ldata->pcn_tx_list[*txidx].pcn_txctl |= PCN_TXCTL_OWN; sc->pcn_cdata.pcn_tx_cnt += cnt; *txidx = frag; return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void pcn_start(ifp) struct ifnet *ifp; { struct pcn_softc *sc; struct mbuf *m_head = NULL; u_int32_t idx; sc = ifp->if_softc; PCN_LOCK(sc); if (!sc->pcn_link) { PCN_UNLOCK(sc); return; } idx = sc->pcn_cdata.pcn_tx_prod; if (ifp->if_flags & IFF_OACTIVE) { PCN_UNLOCK(sc); return; } while(sc->pcn_cdata.pcn_tx_chain[idx] == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (pcn_encap(sc, m_head, &idx)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); } /* Transmit */ sc->pcn_cdata.pcn_tx_prod = idx; pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_TX|PCN_CSR_INTEN); /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; PCN_UNLOCK(sc); return; } static void pcn_setfilt(ifp) struct ifnet *ifp; { struct pcn_softc *sc; sc = ifp->if_softc; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { PCN_CSR_SETBIT(sc, PCN_CSR_MODE, PCN_MODE_PROMISC); } else { PCN_CSR_CLRBIT(sc, PCN_CSR_MODE, PCN_MODE_PROMISC); } /* Set the capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { PCN_CSR_CLRBIT(sc, PCN_CSR_MODE, PCN_MODE_RXNOBROAD); } else { PCN_CSR_SETBIT(sc, PCN_CSR_MODE, PCN_MODE_RXNOBROAD); } return; } static void pcn_init(xsc) void *xsc; { struct pcn_softc *sc = xsc; struct ifnet *ifp = sc->pcn_ifp; struct mii_data *mii = NULL; PCN_LOCK(sc); /* * Cancel pending I/O and free all RX/TX buffers. */ pcn_stop(sc); pcn_reset(sc); mii = device_get_softc(sc->pcn_miibus); /* Set MAC address */ pcn_csr_write(sc, PCN_CSR_PAR0, ((u_int16_t *)IFP2ENADDR(sc->pcn_ifp))[0]); pcn_csr_write(sc, PCN_CSR_PAR1, ((u_int16_t *)IFP2ENADDR(sc->pcn_ifp))[1]); pcn_csr_write(sc, PCN_CSR_PAR2, ((u_int16_t *)IFP2ENADDR(sc->pcn_ifp))[2]); /* Init circular RX list. */ if (pcn_list_rx_init(sc) == ENOBUFS) { printf("pcn%d: initialization failed: no " "memory for rx buffers\n", sc->pcn_unit); pcn_stop(sc); PCN_UNLOCK(sc); return; } /* * Init tx descriptors. */ pcn_list_tx_init(sc); /* Set up the mode register. */ pcn_csr_write(sc, PCN_CSR_MODE, PCN_PORT_MII); /* Set up RX filter. */ pcn_setfilt(ifp); /* * Load the multicast filter. */ pcn_setmulti(sc); /* * Load the addresses of the RX and TX lists. */ pcn_csr_write(sc, PCN_CSR_RXADDR0, vtophys(&sc->pcn_ldata->pcn_rx_list[0]) & 0xFFFF); pcn_csr_write(sc, PCN_CSR_RXADDR1, (vtophys(&sc->pcn_ldata->pcn_rx_list[0]) >> 16) & 0xFFFF); pcn_csr_write(sc, PCN_CSR_TXADDR0, vtophys(&sc->pcn_ldata->pcn_tx_list[0]) & 0xFFFF); pcn_csr_write(sc, PCN_CSR_TXADDR1, (vtophys(&sc->pcn_ldata->pcn_tx_list[0]) >> 16) & 0xFFFF); /* Set the RX and TX ring sizes. */ pcn_csr_write(sc, PCN_CSR_RXRINGLEN, (~PCN_RX_LIST_CNT) + 1); pcn_csr_write(sc, PCN_CSR_TXRINGLEN, (~PCN_TX_LIST_CNT) + 1); /* We're not using the initialization block. */ pcn_csr_write(sc, PCN_CSR_IAB1, 0); /* Enable fast suspend mode. */ PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL2, PCN_EXTCTL2_FASTSPNDE); /* * Enable burst read and write. Also set the no underflow * bit. This will avoid transmit underruns in certain * conditions while still providing decent performance. */ PCN_BCR_SETBIT(sc, PCN_BCR_BUSCTL, PCN_BUSCTL_NOUFLOW| PCN_BUSCTL_BREAD|PCN_BUSCTL_BWRITE); /* Enable graceful recovery from underflow. */ PCN_CSR_SETBIT(sc, PCN_CSR_IMR, PCN_IMR_DXSUFLO); /* Enable auto-padding of short TX frames. */ PCN_CSR_SETBIT(sc, PCN_CSR_TFEAT, PCN_TFEAT_PAD_TX); /* Disable MII autoneg (we handle this ourselves). */ PCN_BCR_SETBIT(sc, PCN_BCR_MIICTL, PCN_MIICTL_DANAS); if (sc->pcn_type == Am79C978) pcn_bcr_write(sc, PCN_BCR_PHYSEL, PCN_PHYSEL_PCNET|PCN_PHY_HOMEPNA); /* Enable interrupts and start the controller running. */ pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_INTEN|PCN_CSR_START); mii_mediachg(mii); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->pcn_stat_ch = timeout(pcn_tick, sc, hz); PCN_UNLOCK(sc); return; } /* * Set media options. */ static int pcn_ifmedia_upd(ifp) struct ifnet *ifp; { struct pcn_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->pcn_miibus); sc->pcn_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) mii_phy_reset(miisc); } mii_mediachg(mii); return(0); } /* * Report current media status. */ static void pcn_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct pcn_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->pcn_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static int pcn_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct pcn_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii = NULL; int error = 0; PCN_LOCK(sc); switch(command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->pcn_if_flags & IFF_PROMISC)) { PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); pcn_setfilt(ifp); PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_INTEN|PCN_CSR_START); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->pcn_if_flags & IFF_PROMISC) { PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); pcn_setfilt(ifp); PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_INTEN|PCN_CSR_START); } else if (!(ifp->if_flags & IFF_RUNNING)) pcn_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) pcn_stop(sc); } sc->pcn_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: pcn_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->pcn_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); break; } PCN_UNLOCK(sc); return(error); } static void pcn_watchdog(ifp) struct ifnet *ifp; { struct pcn_softc *sc; sc = ifp->if_softc; PCN_LOCK(sc); ifp->if_oerrors++; printf("pcn%d: watchdog timeout\n", sc->pcn_unit); pcn_stop(sc); pcn_reset(sc); pcn_init(sc); if (ifp->if_snd.ifq_head != NULL) pcn_start(ifp); PCN_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void pcn_stop(sc) struct pcn_softc *sc; { register int i; struct ifnet *ifp; ifp = sc->pcn_ifp; PCN_LOCK(sc); ifp->if_timer = 0; untimeout(pcn_tick, sc, sc->pcn_stat_ch); /* Turn off interrupts */ PCN_CSR_CLRBIT(sc, PCN_CSR_CSR, PCN_CSR_INTEN); /* Stop adapter */ PCN_CSR_SETBIT(sc, PCN_CSR_CSR, PCN_CSR_STOP); sc->pcn_link = 0; /* * Free data in the RX lists. */ for (i = 0; i < PCN_RX_LIST_CNT; i++) { if (sc->pcn_cdata.pcn_rx_chain[i] != NULL) { m_freem(sc->pcn_cdata.pcn_rx_chain[i]); sc->pcn_cdata.pcn_rx_chain[i] = NULL; } } bzero((char *)&sc->pcn_ldata->pcn_rx_list, sizeof(sc->pcn_ldata->pcn_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < PCN_TX_LIST_CNT; i++) { if (sc->pcn_cdata.pcn_tx_chain[i] != NULL) { m_freem(sc->pcn_cdata.pcn_tx_chain[i]); sc->pcn_cdata.pcn_tx_chain[i] = NULL; } } bzero((char *)&sc->pcn_ldata->pcn_tx_list, sizeof(sc->pcn_ldata->pcn_tx_list)); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); PCN_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void pcn_shutdown(dev) device_t dev; { struct pcn_softc *sc; sc = device_get_softc(dev); PCN_LOCK(sc); pcn_reset(sc); pcn_stop(sc); PCN_UNLOCK(sc); return; } Index: stable/6/sys/pci/if_rl.c =================================================================== --- stable/6/sys/pci/if_rl.c (revision 149421) +++ stable/6/sys/pci/if_rl.c (revision 149422) @@ -1,1808 +1,1810 @@ /*- * Copyright (c) 1997, 1998 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * RealTek 8129/8139 PCI NIC driver * * Supports several extremely cheap PCI 10/100 adapters based on * the RealTek chipset. Datasheets can be obtained from * www.realtek.com.tw. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is * probably the worst PCI ethernet controller ever made, with the possible * exception of the FEAST chip made by SMC. The 8139 supports bus-master * DMA, but it has a terrible interface that nullifies any performance * gains that bus-master DMA usually offers. * * For transmission, the chip offers a series of four TX descriptor * registers. Each transmit frame must be in a contiguous buffer, aligned * on a longword (32-bit) boundary. This means we almost always have to * do mbuf copies in order to transmit a frame, except in the unlikely * case where a) the packet fits into a single mbuf, and b) the packet * is 32-bit aligned within the mbuf's data area. The presence of only * four descriptor registers means that we can never have more than four * packets queued for transmission at any one time. * * Reception is not much better. The driver has to allocate a single large * buffer area (up to 64K in size) into which the chip will DMA received * frames. Because we don't know where within this region received packets * will begin or end, we have no choice but to copy data from the buffer * area into mbufs in order to pass the packets up to the higher protocol * levels. * * It's impossible given this rotten design to really achieve decent * performance at 100Mbps, unless you happen to have a 400Mhz PII or * some equally overmuscled CPU to drive it. * * On the bright side, the 8139 does have a built-in PHY, although * rather than using an MDIO serial interface like most other NICs, the * PHY registers are directly accessible through the 8139's register * space. The 8139 supports autonegotiation, as well as a 64-bit multicast * filter. * * The 8129 chip is an older version of the 8139 that uses an external PHY * chip. The 8129 has a serial MDIO interface for accessing the MII where * the 8139 lets you directly access the on-board PHY registers. We need * to select which interface to use depending on the chip type. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(rl, pci, 1, 1, 1); MODULE_DEPEND(rl, ether, 1, 1, 1); MODULE_DEPEND(rl, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Default to using PIO access for this driver. On SMP systems, * there appear to be problems with memory mapped mode: it looks like * doing too many memory mapped access back to back in rapid succession * can hang the bus. I'm inclined to blame this on crummy design/construction * on the part of RealTek. Memory mapped mode does appear to work on * uniprocessor systems though. */ #define RL_USEIOSPACE #include /* * Various supported device vendors/types and their names. */ static struct rl_type rl_devs[] = { { RT_VENDORID, RT_DEVICEID_8129, RL_8129, "RealTek 8129 10/100BaseTX" }, { RT_VENDORID, RT_DEVICEID_8139, RL_8139, "RealTek 8139 10/100BaseTX" }, { RT_VENDORID, RT_DEVICEID_8138, RL_8139, "RealTek 8139 10/100BaseTX CardBus" }, { RT_VENDORID, RT_DEVICEID_8100, RL_8139, "RealTek 8100 10/100BaseTX" }, { ACCTON_VENDORID, ACCTON_DEVICEID_5030, RL_8139, "Accton MPX 5030/5038 10/100BaseTX" }, { DELTA_VENDORID, DELTA_DEVICEID_8139, RL_8139, "Delta Electronics 8139 10/100BaseTX" }, { ADDTRON_VENDORID, ADDTRON_DEVICEID_8139, RL_8139, "Addtron Technolgy 8139 10/100BaseTX" }, { DLINK_VENDORID, DLINK_DEVICEID_530TXPLUS, RL_8139, "D-Link DFE-530TX+ 10/100BaseTX" }, { DLINK_VENDORID, DLINK_DEVICEID_690TXD, RL_8139, "D-Link DFE-690TXD 10/100BaseTX" }, { NORTEL_VENDORID, ACCTON_DEVICEID_5030, RL_8139, "Nortel Networks 10/100BaseTX" }, { COREGA_VENDORID, COREGA_DEVICEID_FETHERCBTXD, RL_8139, "Corega FEther CB-TXD" }, { COREGA_VENDORID, COREGA_DEVICEID_FETHERIICBTXD, RL_8139, "Corega FEtherII CB-TXD" }, { PEPPERCON_VENDORID, PEPPERCON_DEVICEID_ROLF, RL_8139, "Peppercon AG ROL-F" }, { PLANEX_VENDORID, PLANEX_DEVICEID_FNW3800TX, RL_8139, "Planex FNW-3800-TX" }, { CP_VENDORID, RT_DEVICEID_8139, RL_8139, "Compaq HNE-300" }, { LEVEL1_VENDORID, LEVEL1_DEVICEID_FPC0106TX, RL_8139, "LevelOne FPC-0106TX" }, { EDIMAX_VENDORID, EDIMAX_DEVICEID_EP4103DL, RL_8139, "Edimax EP-4103DL CardBus" }, { 0, 0, 0, NULL } }; static int rl_attach(device_t); static int rl_detach(device_t); static void rl_dma_map_rxbuf(void *, bus_dma_segment_t *, int, int); static void rl_dma_map_txbuf(void *, bus_dma_segment_t *, int, int); static void rl_eeprom_putbyte(struct rl_softc *, int); static void rl_eeprom_getword(struct rl_softc *, int, uint16_t *); static int rl_encap(struct rl_softc *, struct mbuf * ); static int rl_list_tx_init(struct rl_softc *); static int rl_ifmedia_upd(struct ifnet *); static void rl_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int rl_ioctl(struct ifnet *, u_long, caddr_t); static void rl_intr(void *); static void rl_init(void *); static void rl_init_locked(struct rl_softc *sc); static void rl_mii_send(struct rl_softc *, uint32_t, int); static void rl_mii_sync(struct rl_softc *); static int rl_mii_readreg(struct rl_softc *, struct rl_mii_frame *); static int rl_mii_writereg(struct rl_softc *, struct rl_mii_frame *); static int rl_miibus_readreg(device_t, int, int); static void rl_miibus_statchg(device_t); static int rl_miibus_writereg(device_t, int, int, int); #ifdef DEVICE_POLLING static void rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); static void rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count); #endif static int rl_probe(device_t); static void rl_read_eeprom(struct rl_softc *, uint8_t *, int, int, int); static void rl_reset(struct rl_softc *); static int rl_resume(device_t); static void rl_rxeof(struct rl_softc *); static void rl_setmulti(struct rl_softc *); static void rl_shutdown(device_t); static void rl_start(struct ifnet *); static void rl_start_locked(struct ifnet *); static void rl_stop(struct rl_softc *); static int rl_suspend(device_t); static void rl_tick(void *); static void rl_txeof(struct rl_softc *); static void rl_watchdog(struct ifnet *); #ifdef RL_USEIOSPACE #define RL_RES SYS_RES_IOPORT #define RL_RID RL_PCI_LOIO #else #define RL_RES SYS_RES_MEMORY #define RL_RID RL_PCI_LOMEM #endif static device_method_t rl_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rl_probe), DEVMETHOD(device_attach, rl_attach), DEVMETHOD(device_detach, rl_detach), DEVMETHOD(device_suspend, rl_suspend), DEVMETHOD(device_resume, rl_resume), DEVMETHOD(device_shutdown, rl_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, rl_miibus_readreg), DEVMETHOD(miibus_writereg, rl_miibus_writereg), DEVMETHOD(miibus_statchg, rl_miibus_statchg), { 0, 0 } }; static driver_t rl_driver = { "rl", rl_methods, sizeof(struct rl_softc) }; static devclass_t rl_devclass; DRIVER_MODULE(rl, pci, rl_driver, rl_devclass, 0, 0); DRIVER_MODULE(rl, cardbus, rl_driver, rl_devclass, 0, 0); DRIVER_MODULE(miibus, rl, miibus_driver, miibus_devclass, 0, 0); #define EE_SET(x) \ CSR_WRITE_1(sc, RL_EECMD, \ CSR_READ_1(sc, RL_EECMD) | x) #define EE_CLR(x) \ CSR_WRITE_1(sc, RL_EECMD, \ CSR_READ_1(sc, RL_EECMD) & ~x) static void rl_dma_map_rxbuf(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct rl_softc *sc = arg; CSR_WRITE_4(sc, RL_RXADDR, segs->ds_addr & 0xFFFFFFFF); } static void rl_dma_map_txbuf(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct rl_softc *sc = arg; CSR_WRITE_4(sc, RL_CUR_TXADDR(sc), segs->ds_addr & 0xFFFFFFFF); } /* * Send a read command and address to the EEPROM, check for ACK. */ static void rl_eeprom_putbyte(struct rl_softc *sc, int addr) { register int d, i; d = addr | sc->rl_eecmd_read; /* * Feed in each bit and strobe the clock. */ for (i = 0x400; i; i >>= 1) { if (d & i) { EE_SET(RL_EE_DATAIN); } else { EE_CLR(RL_EE_DATAIN); } DELAY(100); EE_SET(RL_EE_CLK); DELAY(150); EE_CLR(RL_EE_CLK); DELAY(100); } } /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void rl_eeprom_getword(struct rl_softc *sc, int addr, uint16_t *dest) { register int i; uint16_t word = 0; /* Enter EEPROM access mode. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); /* * Send address of word we want to read. */ rl_eeprom_putbyte(sc, addr); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); /* * Start reading bits from EEPROM. */ for (i = 0x8000; i; i >>= 1) { EE_SET(RL_EE_CLK); DELAY(100); if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) word |= i; EE_CLR(RL_EE_CLK); DELAY(100); } /* Turn off EEPROM access mode. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); *dest = word; } /* * Read a sequence of words from the EEPROM. */ static void rl_read_eeprom(struct rl_softc *sc, uint8_t *dest, int off, int cnt, int swap) { int i; uint16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { rl_eeprom_getword(sc, off + i, &word); ptr = (uint16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } } /* * MII access routines are provided for the 8129, which * doesn't have a built-in PHY. For the 8139, we fake things * up by diverting rl_phy_readreg()/rl_phy_writereg() to the * direct access PHY registers. */ #define MII_SET(x) \ CSR_WRITE_1(sc, RL_MII, \ CSR_READ_1(sc, RL_MII) | (x)) #define MII_CLR(x) \ CSR_WRITE_1(sc, RL_MII, \ CSR_READ_1(sc, RL_MII) & ~(x)) /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void rl_mii_sync(struct rl_softc *sc) { register int i; MII_SET(RL_MII_DIR|RL_MII_DATAOUT); for (i = 0; i < 32; i++) { MII_SET(RL_MII_CLK); DELAY(1); MII_CLR(RL_MII_CLK); DELAY(1); } } /* * Clock a series of bits through the MII. */ static void rl_mii_send(struct rl_softc *sc, uint32_t bits, int cnt) { int i; MII_CLR(RL_MII_CLK); for (i = (0x1 << (cnt - 1)); i; i >>= 1) { if (bits & i) { MII_SET(RL_MII_DATAOUT); } else { MII_CLR(RL_MII_DATAOUT); } DELAY(1); MII_CLR(RL_MII_CLK); DELAY(1); MII_SET(RL_MII_CLK); } } /* * Read an PHY register through the MII. */ static int rl_mii_readreg(struct rl_softc *sc, struct rl_mii_frame *frame) { int i, ack; RL_LOCK(sc); /* Set up frame for RX. */ frame->mii_stdelim = RL_MII_STARTDELIM; frame->mii_opcode = RL_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; CSR_WRITE_2(sc, RL_MII, 0); /* Turn on data xmit. */ MII_SET(RL_MII_DIR); rl_mii_sync(sc); /* Send command/address info. */ rl_mii_send(sc, frame->mii_stdelim, 2); rl_mii_send(sc, frame->mii_opcode, 2); rl_mii_send(sc, frame->mii_phyaddr, 5); rl_mii_send(sc, frame->mii_regaddr, 5); /* Idle bit */ MII_CLR((RL_MII_CLK|RL_MII_DATAOUT)); DELAY(1); MII_SET(RL_MII_CLK); DELAY(1); /* Turn off xmit. */ MII_CLR(RL_MII_DIR); /* Check for ack */ MII_CLR(RL_MII_CLK); DELAY(1); ack = CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN; MII_SET(RL_MII_CLK); DELAY(1); /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ if (ack) { for(i = 0; i < 16; i++) { MII_CLR(RL_MII_CLK); DELAY(1); MII_SET(RL_MII_CLK); DELAY(1); } goto fail; } for (i = 0x8000; i; i >>= 1) { MII_CLR(RL_MII_CLK); DELAY(1); if (!ack) { if (CSR_READ_2(sc, RL_MII) & RL_MII_DATAIN) frame->mii_data |= i; DELAY(1); } MII_SET(RL_MII_CLK); DELAY(1); } fail: MII_CLR(RL_MII_CLK); DELAY(1); MII_SET(RL_MII_CLK); DELAY(1); RL_UNLOCK(sc); return (ack ? 1 : 0); } /* * Write to a PHY register through the MII. */ static int rl_mii_writereg(struct rl_softc *sc, struct rl_mii_frame *frame) { RL_LOCK(sc); /* Set up frame for TX. */ frame->mii_stdelim = RL_MII_STARTDELIM; frame->mii_opcode = RL_MII_WRITEOP; frame->mii_turnaround = RL_MII_TURNAROUND; /* Turn on data output. */ MII_SET(RL_MII_DIR); rl_mii_sync(sc); rl_mii_send(sc, frame->mii_stdelim, 2); rl_mii_send(sc, frame->mii_opcode, 2); rl_mii_send(sc, frame->mii_phyaddr, 5); rl_mii_send(sc, frame->mii_regaddr, 5); rl_mii_send(sc, frame->mii_turnaround, 2); rl_mii_send(sc, frame->mii_data, 16); /* Idle bit. */ MII_SET(RL_MII_CLK); DELAY(1); MII_CLR(RL_MII_CLK); DELAY(1); /* Turn off xmit. */ MII_CLR(RL_MII_DIR); RL_UNLOCK(sc); return (0); } static int rl_miibus_readreg(device_t dev, int phy, int reg) { struct rl_softc *sc; struct rl_mii_frame frame; uint16_t rval = 0; uint16_t rl8139_reg = 0; sc = device_get_softc(dev); if (sc->rl_type == RL_8139) { /* Pretend the internal PHY is only at address 0 */ if (phy) { return (0); } switch (reg) { case MII_BMCR: rl8139_reg = RL_BMCR; break; case MII_BMSR: rl8139_reg = RL_BMSR; break; case MII_ANAR: rl8139_reg = RL_ANAR; break; case MII_ANER: rl8139_reg = RL_ANER; break; case MII_ANLPAR: rl8139_reg = RL_LPAR; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); /* * Allow the rlphy driver to read the media status * register. If we have a link partner which does not * support NWAY, this is the register which will tell * us the results of parallel detection. */ case RL_MEDIASTAT: rval = CSR_READ_1(sc, RL_MEDIASTAT); return (rval); default: if_printf(sc->rl_ifp, "bad phy register\n"); return (0); } rval = CSR_READ_2(sc, rl8139_reg); return (rval); } bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; rl_mii_readreg(sc, &frame); return (frame.mii_data); } static int rl_miibus_writereg(device_t dev, int phy, int reg, int data) { struct rl_softc *sc; struct rl_mii_frame frame; uint16_t rl8139_reg = 0; sc = device_get_softc(dev); if (sc->rl_type == RL_8139) { /* Pretend the internal PHY is only at address 0 */ if (phy) { return (0); } switch (reg) { case MII_BMCR: rl8139_reg = RL_BMCR; break; case MII_BMSR: rl8139_reg = RL_BMSR; break; case MII_ANAR: rl8139_reg = RL_ANAR; break; case MII_ANER: rl8139_reg = RL_ANER; break; case MII_ANLPAR: rl8139_reg = RL_LPAR; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); break; default: if_printf(sc->rl_ifp, "bad phy register\n"); return (0); } CSR_WRITE_2(sc, rl8139_reg, data); return (0); } bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = data; rl_mii_writereg(sc, &frame); return (0); } static void rl_miibus_statchg(device_t dev) { } /* * Program the 64-bit multicast hash filter. */ static void rl_setmulti(struct rl_softc *sc) { struct ifnet *ifp = sc->rl_ifp; int h = 0; uint32_t hashes[2] = { 0, 0 }; struct ifmultiaddr *ifma; uint32_t rxfilt; int mcnt = 0; RL_LOCK_ASSERT(sc); rxfilt = CSR_READ_4(sc, RL_RXCFG); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { rxfilt |= RL_RXCFG_RX_MULTI; CSR_WRITE_4(sc, RL_RXCFG, rxfilt); CSR_WRITE_4(sc, RL_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, RL_MAR4, 0xFFFFFFFF); return; } /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, RL_MAR0, 0); CSR_WRITE_4(sc, RL_MAR4, 0); /* now program new ones */ + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); mcnt++; } + IF_ADDR_UNLOCK(ifp); if (mcnt) rxfilt |= RL_RXCFG_RX_MULTI; else rxfilt &= ~RL_RXCFG_RX_MULTI; CSR_WRITE_4(sc, RL_RXCFG, rxfilt); CSR_WRITE_4(sc, RL_MAR0, hashes[0]); CSR_WRITE_4(sc, RL_MAR4, hashes[1]); } static void rl_reset(struct rl_softc *sc) { register int i; RL_LOCK_ASSERT(sc); CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); for (i = 0; i < RL_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) break; } if (i == RL_TIMEOUT) if_printf(sc->rl_ifp, "reset never completed!\n"); } /* * Probe for a RealTek 8129/8139 chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int rl_probe(device_t dev) { struct rl_softc *sc; struct rl_type *t = rl_devs; int rid; uint32_t hwrev; sc = device_get_softc(dev); while (t->rl_name != NULL) { if ((pci_get_vendor(dev) == t->rl_vid) && (pci_get_device(dev) == t->rl_did)) { /* * Temporarily map the I/O space * so we can read the chip ID register. */ rid = RL_RID; sc->rl_res = bus_alloc_resource_any(dev, RL_RES, &rid, RF_ACTIVE); if (sc->rl_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); return (ENXIO); } sc->rl_btag = rman_get_bustag(sc->rl_res); sc->rl_bhandle = rman_get_bushandle(sc->rl_res); hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res); /* Don't attach to 8139C+ or 8169/8110 chips. */ if (hwrev == RL_HWREV_8139CPLUS || (hwrev == RL_HWREV_8169 && t->rl_did == RT_DEVICEID_8169) || hwrev == RL_HWREV_8169S || hwrev == RL_HWREV_8110S) { t++; continue; } device_set_desc(dev, t->rl_name); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int rl_attach(device_t dev) { uint8_t eaddr[ETHER_ADDR_LEN]; uint16_t as[3]; struct ifnet *ifp; struct rl_softc *sc; struct rl_type *t; int error = 0, i, rid; int unit; uint16_t rl_did = 0; sc = device_get_softc(dev); unit = device_get_unit(dev); mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); pci_enable_busmaster(dev); /* Map control/status registers. */ rid = RL_RID; sc->rl_res = bus_alloc_resource_any(dev, RL_RES, &rid, RF_ACTIVE); if (sc->rl_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } #ifdef notdef /* * Detect the Realtek 8139B. For some reason, this chip is very * unstable when left to autoselect the media * The best workaround is to set the device to the required * media type or to set it to the 10 Meg speed. */ if ((rman_get_end(sc->rl_res) - rman_get_start(sc->rl_res)) == 0xFF) device_printf(dev, "Realtek 8139B detected. Warning, this may be unstable in autoselect mode\n"); #endif sc->rl_btag = rman_get_bustag(sc->rl_res); sc->rl_bhandle = rman_get_bushandle(sc->rl_res); /* Allocate interrupt */ rid = 0; sc->rl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->rl_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } /* * Reset the adapter. Only take the lock here as it's needed in * order to call rl_reset(). */ RL_LOCK(sc); rl_reset(sc); RL_UNLOCK(sc); sc->rl_eecmd_read = RL_EECMD_READ_6BIT; rl_read_eeprom(sc, (uint8_t *)&rl_did, 0, 1, 0); if (rl_did != 0x8129) sc->rl_eecmd_read = RL_EECMD_READ_8BIT; /* * Get station address from the EEPROM. */ rl_read_eeprom(sc, (uint8_t *)as, RL_EE_EADDR, 3, 0); for (i = 0; i < 3; i++) { eaddr[(i * 2) + 0] = as[i] & 0xff; eaddr[(i * 2) + 1] = as[i] >> 8; } sc->rl_unit = unit; /* * Now read the exact device type from the EEPROM to find * out if it's an 8129 or 8139. */ rl_read_eeprom(sc, (uint8_t *)&rl_did, RL_EE_PCI_DID, 1, 0); t = rl_devs; sc->rl_type = 0; while(t->rl_name != NULL) { if (rl_did == t->rl_did) { sc->rl_type = t->rl_basetype; break; } t++; } if (sc->rl_type == 0) { device_printf(dev, "unknown device ID: %x\n", rl_did); error = ENXIO; goto fail; } /* * Allocate the parent bus DMA tag appropriate for PCI. */ #define RL_NSEG_NEW 32 error = bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MAXBSIZE, RL_NSEG_NEW, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->rl_parent_tag); if (error) goto fail; /* * Now allocate a tag for the DMA descriptor lists. * All of our lists are allocated as a contiguous block * of memory. */ error = bus_dma_tag_create(sc->rl_parent_tag, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ RL_RXBUFLEN + 1518, 1, /* maxsize,nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->rl_tag); if (error) goto fail; /* * Now allocate a chunk of DMA-able memory based on the * tag we just created. */ error = bus_dmamem_alloc(sc->rl_tag, (void **)&sc->rl_cdata.rl_rx_buf, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->rl_cdata.rl_rx_dmamap); if (error) { device_printf(dev, "no memory for list buffers!\n"); bus_dma_tag_destroy(sc->rl_tag); sc->rl_tag = NULL; goto fail; } /* Leave a few bytes before the start of the RX ring buffer. */ sc->rl_cdata.rl_rx_buf_ptr = sc->rl_cdata.rl_rx_buf; sc->rl_cdata.rl_rx_buf += sizeof(uint64_t); ifp = sc->rl_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } /* Do MII setup */ if (mii_phy_probe(dev, &sc->rl_miibus, rl_ifmedia_upd, rl_ifmedia_sts)) { device_printf(dev, "MII without any phy!\n"); error = ENXIO; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = rl_ioctl; ifp->if_start = rl_start; ifp->if_watchdog = rl_watchdog; ifp->if_init = rl_init; ifp->if_baudrate = 10000000; ifp->if_capabilities = IFCAP_VLAN_MTU; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif ifp->if_capenable = ifp->if_capabilities; IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); callout_handle_init(&sc->rl_stat_ch); /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->rl_irq, INTR_TYPE_NET | INTR_MPSAFE, rl_intr, sc, &sc->rl_intrhand); if (error) { if_printf(ifp, "couldn't set up irq\n"); ether_ifdetach(ifp); if_free(ifp); } fail: if (error) rl_detach(dev); return (error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int rl_detach(device_t dev) { struct rl_softc *sc; struct ifnet *ifp; int attached; sc = device_get_softc(dev); ifp = sc->rl_ifp; KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized")); attached = device_is_attached(dev); /* These should only be active if attach succeeded */ if (attached) { ether_ifdetach(ifp); if_free(ifp); } RL_LOCK(sc); #if 0 sc->suspended = 1; #endif if (attached) rl_stop(sc); if (sc->rl_miibus) device_delete_child(dev, sc->rl_miibus); bus_generic_detach(dev); if (sc->rl_intrhand) bus_teardown_intr(dev, sc->rl_irq, sc->rl_intrhand); if (sc->rl_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq); if (sc->rl_res) bus_release_resource(dev, RL_RES, RL_RID, sc->rl_res); if (sc->rl_tag) { bus_dmamap_unload(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap); bus_dmamem_free(sc->rl_tag, sc->rl_cdata.rl_rx_buf, sc->rl_cdata.rl_rx_dmamap); bus_dma_tag_destroy(sc->rl_tag); } if (sc->rl_parent_tag) bus_dma_tag_destroy(sc->rl_parent_tag); RL_UNLOCK(sc); mtx_destroy(&sc->rl_mtx); return (0); } /* * Initialize the transmit descriptors. */ static int rl_list_tx_init(struct rl_softc *sc) { struct rl_chain_data *cd; int i; RL_LOCK_ASSERT(sc); cd = &sc->rl_cdata; for (i = 0; i < RL_TX_LIST_CNT; i++) { cd->rl_tx_chain[i] = NULL; CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)), 0x0000000); } sc->rl_cdata.cur_tx = 0; sc->rl_cdata.last_tx = 0; return (0); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. * * You know there's something wrong with a PCI bus-master chip design * when you have to use m_devget(). * * The receive operation is badly documented in the datasheet, so I'll * attempt to document it here. The driver provides a buffer area and * places its base address in the RX buffer start address register. * The chip then begins copying frames into the RX buffer. Each frame * is preceded by a 32-bit RX status word which specifies the length * of the frame and certain other status bits. Each frame (starting with * the status word) is also 32-bit aligned. The frame length is in the * first 16 bits of the status word; the lower 15 bits correspond with * the 'rx status register' mentioned in the datasheet. * * Note: to make the Alpha happy, the frame payload needs to be aligned * on a 32-bit boundary. To achieve this, we pass RL_ETHER_ALIGN (2 bytes) * as the offset argument to m_devget(). */ static void rl_rxeof(struct rl_softc *sc) { struct mbuf *m; struct ifnet *ifp = sc->rl_ifp; uint8_t *rxbufpos; int total_len = 0; int wrap = 0; uint32_t rxstat; uint16_t cur_rx; uint16_t limit; uint16_t max_bytes, rx_bytes = 0; RL_LOCK_ASSERT(sc); bus_dmamap_sync(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap, BUS_DMASYNC_POSTREAD); cur_rx = (CSR_READ_2(sc, RL_CURRXADDR) + 16) % RL_RXBUFLEN; /* Do not try to read past this point. */ limit = CSR_READ_2(sc, RL_CURRXBUF) % RL_RXBUFLEN; if (limit < cur_rx) max_bytes = (RL_RXBUFLEN - cur_rx) + limit; else max_bytes = limit - cur_rx; while((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) { #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif /* DEVICE_POLLING */ rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx; rxstat = le32toh(*(uint32_t *)rxbufpos); /* * Here's a totally undocumented fact for you. When the * RealTek chip is in the process of copying a packet into * RAM for you, the length will be 0xfff0. If you spot a * packet header with this value, you need to stop. The * datasheet makes absolutely no mention of this and * RealTek should be shot for this. */ if ((uint16_t)(rxstat >> 16) == RL_RXSTAT_UNFINISHED) break; if (!(rxstat & RL_RXSTAT_RXOK)) { ifp->if_ierrors++; rl_init_locked(sc); return; } /* No errors; receive the packet. */ total_len = rxstat >> 16; rx_bytes += total_len + 4; /* * XXX The RealTek chip includes the CRC with every * received frame, and there's no way to turn this * behavior off (at least, I can't find anything in * the manual that explains how to do it) so we have * to trim off the CRC manually. */ total_len -= ETHER_CRC_LEN; /* * Avoid trying to read more bytes than we know * the chip has prepared for us. */ if (rx_bytes > max_bytes) break; rxbufpos = sc->rl_cdata.rl_rx_buf + ((cur_rx + sizeof(uint32_t)) % RL_RXBUFLEN); if (rxbufpos == (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN)) rxbufpos = sc->rl_cdata.rl_rx_buf; wrap = (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN) - rxbufpos; if (total_len > wrap) { m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp, NULL); if (m == NULL) { ifp->if_ierrors++; } else { m_copyback(m, wrap, total_len - wrap, sc->rl_cdata.rl_rx_buf); } cur_rx = (total_len - wrap + ETHER_CRC_LEN); } else { m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp, NULL); if (m == NULL) ifp->if_ierrors++; cur_rx += total_len + 4 + ETHER_CRC_LEN; } /* Round up to 32-bit boundary. */ cur_rx = (cur_rx + 3) & ~3; CSR_WRITE_2(sc, RL_CURRXADDR, cur_rx - 16); if (m == NULL) continue; ifp->if_ipackets++; RL_UNLOCK(sc); (*ifp->if_input)(ifp, m); RL_LOCK(sc); } } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void rl_txeof(struct rl_softc *sc) { struct ifnet *ifp = sc->rl_ifp; uint32_t txstat; RL_LOCK_ASSERT(sc); /* * Go through our tx list and free mbufs for those * frames that have been uploaded. */ do { if (RL_LAST_TXMBUF(sc) == NULL) break; txstat = CSR_READ_4(sc, RL_LAST_TXSTAT(sc)); if (!(txstat & (RL_TXSTAT_TX_OK| RL_TXSTAT_TX_UNDERRUN|RL_TXSTAT_TXABRT))) break; ifp->if_collisions += (txstat & RL_TXSTAT_COLLCNT) >> 24; bus_dmamap_unload(sc->rl_tag, RL_LAST_DMAMAP(sc)); bus_dmamap_destroy(sc->rl_tag, RL_LAST_DMAMAP(sc)); m_freem(RL_LAST_TXMBUF(sc)); RL_LAST_TXMBUF(sc) = NULL; /* * If there was a transmit underrun, bump the TX threshold. * Make sure not to overflow the 63 * 32byte we can address * with the 6 available bit. */ if ((txstat & RL_TXSTAT_TX_UNDERRUN) && (sc->rl_txthresh < 2016)) sc->rl_txthresh += 32; if (txstat & RL_TXSTAT_TX_OK) ifp->if_opackets++; else { int oldthresh; ifp->if_oerrors++; if ((txstat & RL_TXSTAT_TXABRT) || (txstat & RL_TXSTAT_OUTOFWIN)) CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); oldthresh = sc->rl_txthresh; /* error recovery */ rl_reset(sc); rl_init_locked(sc); /* restore original threshold */ sc->rl_txthresh = oldthresh; return; } RL_INC(sc->rl_cdata.last_tx); ifp->if_flags &= ~IFF_OACTIVE; } while (sc->rl_cdata.last_tx != sc->rl_cdata.cur_tx); if (RL_LAST_TXMBUF(sc) == NULL) ifp->if_timer = 0; else if (ifp->if_timer == 0) ifp->if_timer = 5; } static void rl_tick(void *xsc) { struct rl_softc *sc = xsc; struct mii_data *mii; RL_LOCK(sc); mii = device_get_softc(sc->rl_miibus); mii_tick(mii); sc->rl_stat_ch = timeout(rl_tick, sc, hz); RL_UNLOCK(sc); } #ifdef DEVICE_POLLING static void rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct rl_softc *sc = ifp->if_softc; RL_LOCK(sc); rl_poll_locked(ifp, cmd, count); RL_UNLOCK(sc); } static void rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct rl_softc *sc = ifp->if_softc; RL_LOCK_ASSERT(sc); if (!(ifp->if_capenable & IFCAP_POLLING)) { ether_poll_deregister(ifp); cmd = POLL_DEREGISTER; } if (cmd == POLL_DEREGISTER) { /* Final call; enable interrupts. */ CSR_WRITE_2(sc, RL_IMR, RL_INTRS); return; } sc->rxcycles = count; rl_rxeof(sc); rl_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) rl_start_locked(ifp); if (cmd == POLL_AND_CHECK_STATUS) { uint16_t status; /* We should also check the status register. */ status = CSR_READ_2(sc, RL_ISR); if (status == 0xffff) return; if (status != 0) CSR_WRITE_2(sc, RL_ISR, status); /* XXX We should check behaviour on receiver stalls. */ if (status & RL_ISR_SYSTEM_ERR) { rl_reset(sc); rl_init_locked(sc); } } } #endif /* DEVICE_POLLING */ static void rl_intr(void *arg) { struct rl_softc *sc = arg; struct ifnet *ifp = sc->rl_ifp; uint16_t status; RL_LOCK(sc); if (sc->suspended) goto done_locked; #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) goto done_locked; if ((ifp->if_capenable & IFCAP_POLLING) && ether_poll_register(rl_poll, ifp)) { /* Disable interrupts. */ CSR_WRITE_2(sc, RL_IMR, 0x0000); rl_poll_locked(ifp, 0, 1); goto done_locked; } #endif /* DEVICE_POLLING */ for (;;) { status = CSR_READ_2(sc, RL_ISR); /* If the card has gone away, the read returns 0xffff. */ if (status == 0xffff) break; if (status != 0) CSR_WRITE_2(sc, RL_ISR, status); if ((status & RL_INTRS) == 0) break; if (status & RL_ISR_RX_OK) rl_rxeof(sc); if (status & RL_ISR_RX_ERR) rl_rxeof(sc); if ((status & RL_ISR_TX_OK) || (status & RL_ISR_TX_ERR)) rl_txeof(sc); if (status & RL_ISR_SYSTEM_ERR) { rl_reset(sc); rl_init_locked(sc); } } if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) rl_start_locked(ifp); done_locked: RL_UNLOCK(sc); } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int rl_encap(struct rl_softc *sc, struct mbuf *m_head) { struct mbuf *m_new = NULL; RL_LOCK_ASSERT(sc); /* * The RealTek is brain damaged and wants longword-aligned * TX buffers, plus we can only have one fragment buffer * per packet. We have to copy pretty much all the time. */ m_new = m_defrag(m_head, M_DONTWAIT); if (m_new == NULL) { m_freem(m_head); return (1); } m_head = m_new; /* Pad frames to at least 60 bytes. */ if (m_head->m_pkthdr.len < RL_MIN_FRAMELEN) { /* * Make security concious people happy: zero out the * bytes in the pad area, since we don't know what * this mbuf cluster buffer's previous user might * have left in it. */ bzero(mtod(m_head, char *) + m_head->m_pkthdr.len, RL_MIN_FRAMELEN - m_head->m_pkthdr.len); m_head->m_pkthdr.len += (RL_MIN_FRAMELEN - m_head->m_pkthdr.len); m_head->m_len = m_head->m_pkthdr.len; } RL_CUR_TXMBUF(sc) = m_head; return (0); } /* * Main transmit routine. */ static void rl_start(struct ifnet *ifp) { struct rl_softc *sc = ifp->if_softc; RL_LOCK(sc); rl_start_locked(ifp); RL_UNLOCK(sc); } static void rl_start_locked(struct ifnet *ifp) { struct rl_softc *sc = ifp->if_softc; struct mbuf *m_head = NULL; RL_LOCK_ASSERT(sc); while (RL_CUR_TXMBUF(sc) == NULL) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (rl_encap(sc, m_head)) break; /* Pass a copy of this mbuf chain to the bpf subsystem. */ BPF_MTAP(ifp, RL_CUR_TXMBUF(sc)); /* Transmit the frame. */ bus_dmamap_create(sc->rl_tag, 0, &RL_CUR_DMAMAP(sc)); bus_dmamap_load(sc->rl_tag, RL_CUR_DMAMAP(sc), mtod(RL_CUR_TXMBUF(sc), void *), RL_CUR_TXMBUF(sc)->m_pkthdr.len, rl_dma_map_txbuf, sc, 0); bus_dmamap_sync(sc->rl_tag, RL_CUR_DMAMAP(sc), BUS_DMASYNC_PREREAD); CSR_WRITE_4(sc, RL_CUR_TXSTAT(sc), RL_TXTHRESH(sc->rl_txthresh) | RL_CUR_TXMBUF(sc)->m_pkthdr.len); RL_INC(sc->rl_cdata.cur_tx); /* Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; } /* * We broke out of the loop because all our TX slots are * full. Mark the NIC as busy until it drains some of the * packets from the queue. */ if (RL_CUR_TXMBUF(sc) != NULL) ifp->if_flags |= IFF_OACTIVE; } static void rl_init(void *xsc) { struct rl_softc *sc = xsc; RL_LOCK(sc); rl_init_locked(sc); RL_UNLOCK(sc); } static void rl_init_locked(struct rl_softc *sc) { struct ifnet *ifp = sc->rl_ifp; struct mii_data *mii; uint32_t rxcfg = 0; RL_LOCK_ASSERT(sc); mii = device_get_softc(sc->rl_miibus); /* * Cancel pending I/O and free all RX/TX buffers. */ rl_stop(sc); /* * Init our MAC address. Even though the chipset * documentation doesn't mention it, we need to enter "Config * register write enable" mode to modify the ID registers. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); CSR_WRITE_STREAM_4(sc, RL_IDR0, *(uint32_t *)(&IFP2ENADDR(sc->rl_ifp)[0])); CSR_WRITE_STREAM_4(sc, RL_IDR4, *(uint32_t *)(&IFP2ENADDR(sc->rl_ifp)[4])); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); /* Init the RX buffer pointer register. */ bus_dmamap_load(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap, sc->rl_cdata.rl_rx_buf, RL_RXBUFLEN, rl_dma_map_rxbuf, sc, 0); bus_dmamap_sync(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap, BUS_DMASYNC_PREWRITE); /* Init TX descriptors. */ rl_list_tx_init(sc); /* * Enable transmit and receive. */ CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); /* * Set the initial TX and RX configuration. */ CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG); /* Set the individual bit to receive frames for this host only. */ rxcfg = CSR_READ_4(sc, RL_RXCFG); rxcfg |= RL_RXCFG_RX_INDIV; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { rxcfg |= RL_RXCFG_RX_ALLPHYS; CSR_WRITE_4(sc, RL_RXCFG, rxcfg); } else { rxcfg &= ~RL_RXCFG_RX_ALLPHYS; CSR_WRITE_4(sc, RL_RXCFG, rxcfg); } /* Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { rxcfg |= RL_RXCFG_RX_BROAD; CSR_WRITE_4(sc, RL_RXCFG, rxcfg); } else { rxcfg &= ~RL_RXCFG_RX_BROAD; CSR_WRITE_4(sc, RL_RXCFG, rxcfg); } /* Program the multicast filter, if necessary. */ rl_setmulti(sc); #ifdef DEVICE_POLLING /* Disable interrupts if we are polling. */ if (ifp->if_flags & IFF_POLLING) CSR_WRITE_2(sc, RL_IMR, 0); else #endif /* DEVICE_POLLING */ /* Enable interrupts. */ CSR_WRITE_2(sc, RL_IMR, RL_INTRS); /* Set initial TX threshold */ sc->rl_txthresh = RL_TX_THRESH_INIT; /* Start RX/TX process. */ CSR_WRITE_4(sc, RL_MISSEDPKT, 0); /* Enable receiver and transmitter. */ CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); mii_mediachg(mii); CSR_WRITE_1(sc, RL_CFG1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->rl_stat_ch = timeout(rl_tick, sc, hz); } /* * Set media options. */ static int rl_ifmedia_upd(struct ifnet *ifp) { struct rl_softc *sc = ifp->if_softc; struct mii_data *mii; mii = device_get_softc(sc->rl_miibus); mii_mediachg(mii); return (0); } /* * Report current media status. */ static void rl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct rl_softc *sc = ifp->if_softc; struct mii_data *mii; mii = device_get_softc(sc->rl_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static int rl_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; struct rl_softc *sc = ifp->if_softc; int error = 0; switch (command) { case SIOCSIFFLAGS: RL_LOCK(sc); if (ifp->if_flags & IFF_UP) { rl_init_locked(sc); } else { if (ifp->if_flags & IFF_RUNNING) rl_stop(sc); } RL_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: RL_LOCK(sc); rl_setmulti(sc); RL_UNLOCK(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->rl_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: ifp->if_capenable &= ~IFCAP_POLLING; ifp->if_capenable |= ifr->ifr_reqcap & IFCAP_POLLING; break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void rl_watchdog(struct ifnet *ifp) { struct rl_softc *sc = ifp->if_softc; RL_LOCK(sc); if_printf(ifp, "watchdog timeout\n"); ifp->if_oerrors++; rl_txeof(sc); rl_rxeof(sc); rl_init_locked(sc); RL_UNLOCK(sc); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void rl_stop(struct rl_softc *sc) { register int i; struct ifnet *ifp = sc->rl_ifp; RL_LOCK_ASSERT(sc); ifp->if_timer = 0; untimeout(rl_tick, sc, sc->rl_stat_ch); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); #ifdef DEVICE_POLLING ether_poll_deregister(ifp); #endif /* DEVICE_POLLING */ CSR_WRITE_1(sc, RL_COMMAND, 0x00); CSR_WRITE_2(sc, RL_IMR, 0x0000); bus_dmamap_unload(sc->rl_tag, sc->rl_cdata.rl_rx_dmamap); /* * Free the TX list buffers. */ for (i = 0; i < RL_TX_LIST_CNT; i++) { if (sc->rl_cdata.rl_tx_chain[i] != NULL) { bus_dmamap_unload(sc->rl_tag, sc->rl_cdata.rl_tx_dmamap[i]); bus_dmamap_destroy(sc->rl_tag, sc->rl_cdata.rl_tx_dmamap[i]); m_freem(sc->rl_cdata.rl_tx_chain[i]); sc->rl_cdata.rl_tx_chain[i] = NULL; CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)), 0x0000000); } } } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int rl_suspend(device_t dev) { struct rl_softc *sc; sc = device_get_softc(dev); RL_LOCK(sc); rl_stop(sc); sc->suspended = 1; RL_UNLOCK(sc); return (0); } /* * Device resume routine. Restore some PCI settings in case the BIOS * doesn't, re-enable busmastering, and restart the interface if * appropriate. */ static int rl_resume(device_t dev) { struct rl_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->rl_ifp; RL_LOCK(sc); /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) rl_init_locked(sc); sc->suspended = 0; RL_UNLOCK(sc); return (0); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void rl_shutdown(device_t dev) { struct rl_softc *sc; sc = device_get_softc(dev); RL_LOCK(sc); rl_stop(sc); RL_UNLOCK(sc); } Index: stable/6/sys/pci/if_sf.c =================================================================== --- stable/6/sys/pci/if_sf.c (revision 149421) +++ stable/6/sys/pci/if_sf.c (revision 149422) @@ -1,1568 +1,1570 @@ /*- * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Adaptec AIC-6915 "Starfire" PCI fast ethernet driver for FreeBSD. * Programming manual is available from: * http://download.adaptec.com/pdfs/user_guides/aic6915_pg.pdf. * * Written by Bill Paul * Department of Electical Engineering * Columbia University, New York City */ /* * The Adaptec AIC-6915 "Starfire" is a 64-bit 10/100 PCI ethernet * controller designed with flexibility and reducing CPU load in mind. * The Starfire offers high and low priority buffer queues, a * producer/consumer index mechanism and several different buffer * queue and completion queue descriptor types. Any one of a number * of different driver designs can be used, depending on system and * OS requirements. This driver makes use of type0 transmit frame * descriptors (since BSD fragments packets across an mbuf chain) * and two RX buffer queues prioritized on size (one queue for small * frames that will fit into a single mbuf, another with full size * mbuf clusters for everything else). The producer/consumer indexes * and completion queues are also used. * * One downside to the Starfire has to do with alignment: buffer * queues must be aligned on 256-byte boundaries, and receive buffers * must be aligned on longword boundaries. The receive buffer alignment * causes problems on the Alpha platform, where the packet payload * should be longword aligned. There is no simple way around this. * * For receive filtering, the Starfire offers 16 perfect filter slots * and a 512-bit hash table. * * The Starfire has no internal transceiver, relying instead on an * external MII-based transceiver. Accessing registers on external * PHYs is done through a special register map rather than with the * usual bitbang MDIO method. * * Acesssing the registers on the Starfire is a little tricky. The * Starfire has a 512K internal register space. When programmed for * PCI memory mapped mode, the entire register space can be accessed * directly. However in I/O space mode, only 256 bytes are directly * mapped into PCI I/O space. The other registers can be accessed * indirectly using the SF_INDIRECTIO_ADDR and SF_INDIRECTIO_DATA * registers inside the 256-byte I/O window. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #include #include #define SF_USEIOSPACE #include MODULE_DEPEND(sf, pci, 1, 1, 1); MODULE_DEPEND(sf, ether, 1, 1, 1); MODULE_DEPEND(sf, miibus, 1, 1, 1); static struct sf_type sf_devs[] = { { AD_VENDORID, AD_DEVICEID_STARFIRE, "Adaptec AIC-6915 10/100BaseTX" }, { 0, 0, NULL } }; static int sf_probe(device_t); static int sf_attach(device_t); static int sf_detach(device_t); static void sf_intr(void *); static void sf_stats_update(void *); static void sf_rxeof(struct sf_softc *); static void sf_txeof(struct sf_softc *); static int sf_encap(struct sf_softc *, struct sf_tx_bufdesc_type0 *, struct mbuf *); static void sf_start(struct ifnet *); static int sf_ioctl(struct ifnet *, u_long, caddr_t); static void sf_init(void *); static void sf_stop(struct sf_softc *); static void sf_watchdog(struct ifnet *); static void sf_shutdown(device_t); static int sf_ifmedia_upd(struct ifnet *); static void sf_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void sf_reset(struct sf_softc *); static int sf_init_rx_ring(struct sf_softc *); static void sf_init_tx_ring(struct sf_softc *); static int sf_newbuf(struct sf_softc *, struct sf_rx_bufdesc_type0 *, struct mbuf *); static void sf_setmulti(struct sf_softc *); static int sf_setperf(struct sf_softc *, int, caddr_t); static int sf_sethash(struct sf_softc *, caddr_t, int); #ifdef notdef static int sf_setvlan(struct sf_softc *, int, u_int32_t); #endif static u_int8_t sf_read_eeprom(struct sf_softc *, int); static int sf_miibus_readreg(device_t, int, int); static int sf_miibus_writereg(device_t, int, int, int); static void sf_miibus_statchg(device_t); #ifdef DEVICE_POLLING static void sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); static void sf_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count); #endif /* DEVICE_POLLING */ static u_int32_t csr_read_4(struct sf_softc *, int); static void csr_write_4(struct sf_softc *, int, u_int32_t); static void sf_txthresh_adjust(struct sf_softc *); #ifdef SF_USEIOSPACE #define SF_RES SYS_RES_IOPORT #define SF_RID SF_PCI_LOIO #else #define SF_RES SYS_RES_MEMORY #define SF_RID SF_PCI_LOMEM #endif static device_method_t sf_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sf_probe), DEVMETHOD(device_attach, sf_attach), DEVMETHOD(device_detach, sf_detach), DEVMETHOD(device_shutdown, sf_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, sf_miibus_readreg), DEVMETHOD(miibus_writereg, sf_miibus_writereg), DEVMETHOD(miibus_statchg, sf_miibus_statchg), { 0, 0 } }; static driver_t sf_driver = { "sf", sf_methods, sizeof(struct sf_softc), }; static devclass_t sf_devclass; DRIVER_MODULE(sf, pci, sf_driver, sf_devclass, 0, 0); DRIVER_MODULE(miibus, sf, miibus_driver, miibus_devclass, 0, 0); #define SF_SETBIT(sc, reg, x) \ csr_write_4(sc, reg, csr_read_4(sc, reg) | (x)) #define SF_CLRBIT(sc, reg, x) \ csr_write_4(sc, reg, csr_read_4(sc, reg) & ~(x)) static u_int32_t csr_read_4(sc, reg) struct sf_softc *sc; int reg; { u_int32_t val; #ifdef SF_USEIOSPACE CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE); val = CSR_READ_4(sc, SF_INDIRECTIO_DATA); #else val = CSR_READ_4(sc, (reg + SF_RMAP_INTREG_BASE)); #endif return(val); } static u_int8_t sf_read_eeprom(sc, reg) struct sf_softc *sc; int reg; { u_int8_t val; val = (csr_read_4(sc, SF_EEADDR_BASE + (reg & 0xFFFFFFFC)) >> (8 * (reg & 3))) & 0xFF; return(val); } static void csr_write_4(sc, reg, val) struct sf_softc *sc; int reg; u_int32_t val; { #ifdef SF_USEIOSPACE CSR_WRITE_4(sc, SF_INDIRECTIO_ADDR, reg + SF_RMAP_INTREG_BASE); CSR_WRITE_4(sc, SF_INDIRECTIO_DATA, val); #else CSR_WRITE_4(sc, (reg + SF_RMAP_INTREG_BASE), val); #endif } /* * Copy the address 'mac' into the perfect RX filter entry at * offset 'idx.' The perfect filter only has 16 entries so do * some sanity tests. */ static int sf_setperf(sc, idx, mac) struct sf_softc *sc; int idx; caddr_t mac; { u_int16_t *p; if (idx < 0 || idx > SF_RXFILT_PERFECT_CNT) return(EINVAL); if (mac == NULL) return(EINVAL); p = (u_int16_t *)mac; csr_write_4(sc, SF_RXFILT_PERFECT_BASE + (idx * SF_RXFILT_PERFECT_SKIP), htons(p[2])); csr_write_4(sc, SF_RXFILT_PERFECT_BASE + (idx * SF_RXFILT_PERFECT_SKIP) + 4, htons(p[1])); csr_write_4(sc, SF_RXFILT_PERFECT_BASE + (idx * SF_RXFILT_PERFECT_SKIP) + 8, htons(p[0])); return(0); } /* * Set the bit in the 512-bit hash table that corresponds to the * specified mac address 'mac.' If 'prio' is nonzero, update the * priority hash table instead of the filter hash table. */ static int sf_sethash(sc, mac, prio) struct sf_softc *sc; caddr_t mac; int prio; { u_int32_t h; if (mac == NULL) return(EINVAL); h = ether_crc32_be(mac, ETHER_ADDR_LEN) >> 23; if (prio) { SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_PRIOOFF + (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF))); } else { SF_SETBIT(sc, SF_RXFILT_HASH_BASE + SF_RXFILT_HASH_ADDROFF + (SF_RXFILT_HASH_SKIP * (h >> 4)), (1 << (h & 0xF))); } return(0); } #ifdef notdef /* * Set a VLAN tag in the receive filter. */ static int sf_setvlan(sc, idx, vlan) struct sf_softc *sc; int idx; u_int32_t vlan; { if (idx < 0 || idx >> SF_RXFILT_HASH_CNT) return(EINVAL); csr_write_4(sc, SF_RXFILT_HASH_BASE + (idx * SF_RXFILT_HASH_SKIP) + SF_RXFILT_HASH_VLANOFF, vlan); return(0); } #endif static int sf_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct sf_softc *sc; int i; u_int32_t val = 0; sc = device_get_softc(dev); for (i = 0; i < SF_TIMEOUT; i++) { val = csr_read_4(sc, SF_PHY_REG(phy, reg)); if (val & SF_MII_DATAVALID) break; } if (i == SF_TIMEOUT) return(0); if ((val & 0x0000FFFF) == 0xFFFF) return(0); return(val & 0x0000FFFF); } static int sf_miibus_writereg(dev, phy, reg, val) device_t dev; int phy, reg, val; { struct sf_softc *sc; int i; int busy; sc = device_get_softc(dev); csr_write_4(sc, SF_PHY_REG(phy, reg), val); for (i = 0; i < SF_TIMEOUT; i++) { busy = csr_read_4(sc, SF_PHY_REG(phy, reg)); if (!(busy & SF_MII_BUSY)) break; } return(0); } static void sf_miibus_statchg(dev) device_t dev; { struct sf_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->sf_miibus); if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_FULLDUPLEX); csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_FDX); } else { SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_FULLDUPLEX); csr_write_4(sc, SF_BKTOBKIPG, SF_IPGT_HDX); } } static void sf_setmulti(sc) struct sf_softc *sc; { struct ifnet *ifp; int i; struct ifmultiaddr *ifma; u_int8_t dummy[] = { 0, 0, 0, 0, 0, 0 }; ifp = sc->sf_ifp; /* First zot all the existing filters. */ for (i = 1; i < SF_RXFILT_PERFECT_CNT; i++) sf_setperf(sc, i, (char *)&dummy); for (i = SF_RXFILT_HASH_BASE; i < (SF_RXFILT_HASH_MAX + 1); i += 4) csr_write_4(sc, i, 0); SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_ALLMULTI); /* Now program new ones. */ if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_ALLMULTI); } else { i = 1; + IF_ADDR_LOCK(ifp); TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; /* * Program the first 15 multicast groups * into the perfect filter. For all others, * use the hash table. */ if (i < SF_RXFILT_PERFECT_CNT) { sf_setperf(sc, i, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); i++; continue; } sf_sethash(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr), 0); } + IF_ADDR_UNLOCK(ifp); } } /* * Set media options. */ static int sf_ifmedia_upd(ifp) struct ifnet *ifp; { struct sf_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->sf_miibus); sc->sf_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) mii_phy_reset(miisc); } mii_mediachg(mii); return(0); } /* * Report current media status. */ static void sf_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct sf_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->sf_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static int sf_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct sf_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; SF_LOCK(sc); switch(command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->sf_if_flags & IFF_PROMISC)) { SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->sf_if_flags & IFF_PROMISC) { SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC); } else if (!(ifp->if_flags & IFF_RUNNING)) sf_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) sf_stop(sc); } sc->sf_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: sf_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->sf_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: ifp->if_capenable &= ~IFCAP_POLLING; ifp->if_capenable |= ifr->ifr_reqcap & IFCAP_POLLING; break; default: error = ether_ioctl(ifp, command, data); break; } SF_UNLOCK(sc); return(error); } static void sf_reset(sc) struct sf_softc *sc; { register int i; csr_write_4(sc, SF_GEN_ETH_CTL, 0); SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); DELAY(1000); SF_CLRBIT(sc, SF_MACCFG_1, SF_MACCFG1_SOFTRESET); SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_RESET); for (i = 0; i < SF_TIMEOUT; i++) { DELAY(10); if (!(csr_read_4(sc, SF_PCI_DEVCFG) & SF_PCIDEVCFG_RESET)) break; } if (i == SF_TIMEOUT) printf("sf%d: reset never completed!\n", sc->sf_unit); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); } /* * Probe for an Adaptec AIC-6915 chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. * We also check the subsystem ID so that we can identify exactly which * NIC has been found, if possible. */ static int sf_probe(dev) device_t dev; { struct sf_type *t; t = sf_devs; while(t->sf_name != NULL) { if ((pci_get_vendor(dev) == t->sf_vid) && (pci_get_device(dev) == t->sf_did)) { switch((pci_read_config(dev, SF_PCI_SUBVEN_ID, 4) >> 16) & 0xFFFF) { case AD_SUBSYSID_62011_REV0: case AD_SUBSYSID_62011_REV1: device_set_desc(dev, "Adaptec ANA-62011 10/100BaseTX"); return (BUS_PROBE_DEFAULT); case AD_SUBSYSID_62022: device_set_desc(dev, "Adaptec ANA-62022 10/100BaseTX"); return (BUS_PROBE_DEFAULT); case AD_SUBSYSID_62044_REV0: case AD_SUBSYSID_62044_REV1: device_set_desc(dev, "Adaptec ANA-62044 10/100BaseTX"); return (BUS_PROBE_DEFAULT); case AD_SUBSYSID_62020: device_set_desc(dev, "Adaptec ANA-62020 10/100BaseFX"); return (BUS_PROBE_DEFAULT); case AD_SUBSYSID_69011: device_set_desc(dev, "Adaptec ANA-69011 10/100BaseTX"); return (BUS_PROBE_DEFAULT); default: device_set_desc(dev, t->sf_name); return (BUS_PROBE_DEFAULT); break; } } t++; } return(ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int sf_attach(dev) device_t dev; { int i; struct sf_softc *sc; struct ifnet *ifp; int unit, rid, error = 0; u_char eaddr[6]; sc = device_get_softc(dev); unit = device_get_unit(dev); mtx_init(&sc->sf_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = SF_RID; sc->sf_res = bus_alloc_resource_any(dev, SF_RES, &rid, RF_ACTIVE); if (sc->sf_res == NULL) { printf ("sf%d: couldn't map ports\n", unit); error = ENXIO; goto fail; } sc->sf_btag = rman_get_bustag(sc->sf_res); sc->sf_bhandle = rman_get_bushandle(sc->sf_res); /* Allocate interrupt */ rid = 0; sc->sf_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->sf_irq == NULL) { printf("sf%d: couldn't map interrupt\n", unit); error = ENXIO; goto fail; } callout_handle_init(&sc->sf_stat_ch); /* Reset the adapter. */ sf_reset(sc); /* * Get station address from the EEPROM. */ for (i = 0; i < ETHER_ADDR_LEN; i++) eaddr[i] = sf_read_eeprom(sc, SF_EE_NODEADDR + ETHER_ADDR_LEN - i); sc->sf_unit = unit; /* Allocate the descriptor queues. */ sc->sf_ldata = contigmalloc(sizeof(struct sf_list_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->sf_ldata == NULL) { printf("sf%d: no memory for list buffers!\n", unit); error = ENXIO; goto fail; } bzero(sc->sf_ldata, sizeof(struct sf_list_data)); ifp = sc->sf_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { printf("sf%d: can not if_alloc()\n", sc->sf_unit); error = ENOSPC; goto fail; } /* Do MII setup. */ if (mii_phy_probe(dev, &sc->sf_miibus, sf_ifmedia_upd, sf_ifmedia_sts)) { printf("sf%d: MII without any phy!\n", sc->sf_unit); error = ENXIO; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT; ifp->if_ioctl = sf_ioctl; ifp->if_start = sf_start; ifp->if_watchdog = sf_watchdog; ifp->if_init = sf_init; ifp->if_baudrate = 10000000; IFQ_SET_MAXLEN(&ifp->if_snd, SF_TX_DLIST_CNT - 1); ifp->if_snd.ifq_drv_maxlen = SF_TX_DLIST_CNT - 1; IFQ_SET_READY(&ifp->if_snd); #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* DEVICE_POLLING */ ifp->if_capenable = ifp->if_capabilities; /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->sf_irq, INTR_TYPE_NET, sf_intr, sc, &sc->sf_intrhand); if (error) { printf("sf%d: couldn't set up irq\n", unit); ether_ifdetach(ifp); if_free(ifp); goto fail; } fail: if (error) sf_detach(dev); return(error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int sf_detach(dev) device_t dev; { struct sf_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->sf_mtx), ("sf mutex not initialized")); SF_LOCK(sc); ifp = sc->sf_ifp; /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { sf_stop(sc); ether_ifdetach(ifp); if_free(ifp); } if (sc->sf_miibus) device_delete_child(dev, sc->sf_miibus); bus_generic_detach(dev); if (sc->sf_intrhand) bus_teardown_intr(dev, sc->sf_irq, sc->sf_intrhand); if (sc->sf_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sf_irq); if (sc->sf_res) bus_release_resource(dev, SF_RES, SF_RID, sc->sf_res); if (sc->sf_ldata) contigfree(sc->sf_ldata, sizeof(struct sf_list_data), M_DEVBUF); SF_UNLOCK(sc); mtx_destroy(&sc->sf_mtx); return(0); } static int sf_init_rx_ring(sc) struct sf_softc *sc; { struct sf_list_data *ld; int i; ld = sc->sf_ldata; bzero((char *)ld->sf_rx_dlist_big, sizeof(struct sf_rx_bufdesc_type0) * SF_RX_DLIST_CNT); bzero((char *)ld->sf_rx_clist, sizeof(struct sf_rx_cmpdesc_type3) * SF_RX_CLIST_CNT); for (i = 0; i < SF_RX_DLIST_CNT; i++) { if (sf_newbuf(sc, &ld->sf_rx_dlist_big[i], NULL) == ENOBUFS) return(ENOBUFS); } return(0); } static void sf_init_tx_ring(sc) struct sf_softc *sc; { struct sf_list_data *ld; int i; ld = sc->sf_ldata; bzero((char *)ld->sf_tx_dlist, sizeof(struct sf_tx_bufdesc_type0) * SF_TX_DLIST_CNT); bzero((char *)ld->sf_tx_clist, sizeof(struct sf_tx_cmpdesc_type0) * SF_TX_CLIST_CNT); for (i = 0; i < SF_TX_DLIST_CNT; i++) ld->sf_tx_dlist[i].sf_id = SF_TX_BUFDESC_ID; for (i = 0; i < SF_TX_CLIST_CNT; i++) ld->sf_tx_clist[i].sf_type = SF_TXCMPTYPE_TX; ld->sf_tx_dlist[SF_TX_DLIST_CNT - 1].sf_end = 1; sc->sf_tx_cnt = 0; } static int sf_newbuf(sc, c, m) struct sf_softc *sc; struct sf_rx_bufdesc_type0 *c; struct mbuf *m; { struct mbuf *m_new = NULL; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) return(ENOBUFS); MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, sizeof(u_int64_t)); c->sf_mbuf = m_new; c->sf_addrlo = SF_RX_HOSTADDR(vtophys(mtod(m_new, caddr_t))); c->sf_valid = 1; return(0); } /* * The starfire is programmed to use 'normal' mode for packet reception, * which means we use the consumer/producer model for both the buffer * descriptor queue and the completion descriptor queue. The only problem * with this is that it involves a lot of register accesses: we have to * read the RX completion consumer and producer indexes and the RX buffer * producer index, plus the RX completion consumer and RX buffer producer * indexes have to be updated. It would have been easier if Adaptec had * put each index in a separate register, especially given that the damn * NIC has a 512K register space. * * In spite of all the lovely features that Adaptec crammed into the 6915, * it is marred by one truly stupid design flaw, which is that receive * buffer addresses must be aligned on a longword boundary. This forces * the packet payload to be unaligned, which is suboptimal on the x86 and * completely unuseable on the Alpha. Our only recourse is to copy received * packets into properly aligned buffers before handing them off. */ static void sf_rxeof(sc) struct sf_softc *sc; { struct mbuf *m; struct ifnet *ifp; struct sf_rx_bufdesc_type0 *desc; struct sf_rx_cmpdesc_type3 *cur_rx; u_int32_t rxcons, rxprod; int cmpprodidx, cmpconsidx, bufprodidx; SF_LOCK_ASSERT(sc); ifp = sc->sf_ifp; rxcons = csr_read_4(sc, SF_CQ_CONSIDX); rxprod = csr_read_4(sc, SF_RXDQ_PTR_Q1); cmpprodidx = SF_IDX_LO(csr_read_4(sc, SF_CQ_PRODIDX)); cmpconsidx = SF_IDX_LO(rxcons); bufprodidx = SF_IDX_LO(rxprod); while (cmpconsidx != cmpprodidx) { struct mbuf *m0; #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif /* DEVICE_POLLING */ cur_rx = &sc->sf_ldata->sf_rx_clist[cmpconsidx]; desc = &sc->sf_ldata->sf_rx_dlist_big[cur_rx->sf_endidx]; m = desc->sf_mbuf; SF_INC(cmpconsidx, SF_RX_CLIST_CNT); SF_INC(bufprodidx, SF_RX_DLIST_CNT); if (!(cur_rx->sf_status1 & SF_RXSTAT1_OK)) { ifp->if_ierrors++; sf_newbuf(sc, desc, m); continue; } m0 = m_devget(mtod(m, char *), cur_rx->sf_len, ETHER_ALIGN, ifp, NULL); sf_newbuf(sc, desc, m); if (m0 == NULL) { ifp->if_ierrors++; continue; } m = m0; ifp->if_ipackets++; SF_UNLOCK(sc); (*ifp->if_input)(ifp, m); SF_LOCK(sc); } csr_write_4(sc, SF_CQ_CONSIDX, (rxcons & ~SF_CQ_CONSIDX_RXQ1) | cmpconsidx); csr_write_4(sc, SF_RXDQ_PTR_Q1, (rxprod & ~SF_RXDQ_PRODIDX) | bufprodidx); } /* * Read the transmit status from the completion queue and release * mbufs. Note that the buffer descriptor index in the completion * descriptor is an offset from the start of the transmit buffer * descriptor list in bytes. This is important because the manual * gives the impression that it should match the producer/consumer * index, which is the offset in 8 byte blocks. */ static void sf_txeof(sc) struct sf_softc *sc; { int txcons, cmpprodidx, cmpconsidx; struct sf_tx_cmpdesc_type1 *cur_cmp; struct sf_tx_bufdesc_type0 *cur_tx; struct ifnet *ifp; ifp = sc->sf_ifp; txcons = csr_read_4(sc, SF_CQ_CONSIDX); cmpprodidx = SF_IDX_HI(csr_read_4(sc, SF_CQ_PRODIDX)); cmpconsidx = SF_IDX_HI(txcons); while (cmpconsidx != cmpprodidx) { cur_cmp = &sc->sf_ldata->sf_tx_clist[cmpconsidx]; cur_tx = &sc->sf_ldata->sf_tx_dlist[cur_cmp->sf_index >> 7]; if (cur_cmp->sf_txstat & SF_TXSTAT_TX_OK) ifp->if_opackets++; else { if (cur_cmp->sf_txstat & SF_TXSTAT_TX_UNDERRUN) sf_txthresh_adjust(sc); ifp->if_oerrors++; } sc->sf_tx_cnt--; if (cur_tx->sf_mbuf != NULL) { m_freem(cur_tx->sf_mbuf); cur_tx->sf_mbuf = NULL; } else break; SF_INC(cmpconsidx, SF_TX_CLIST_CNT); } ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; csr_write_4(sc, SF_CQ_CONSIDX, (txcons & ~SF_CQ_CONSIDX_TXQ) | ((cmpconsidx << 16) & 0xFFFF0000)); } static void sf_txthresh_adjust(sc) struct sf_softc *sc; { u_int32_t txfctl; u_int8_t txthresh; txfctl = csr_read_4(sc, SF_TX_FRAMCTL); txthresh = txfctl & SF_TXFRMCTL_TXTHRESH; if (txthresh < 0xFF) { txthresh++; txfctl &= ~SF_TXFRMCTL_TXTHRESH; txfctl |= txthresh; #ifdef DIAGNOSTIC printf("sf%d: tx underrun, increasing " "tx threshold to %d bytes\n", sc->sf_unit, txthresh * 4); #endif csr_write_4(sc, SF_TX_FRAMCTL, txfctl); } } #ifdef DEVICE_POLLING static void sf_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct sf_softc *sc = ifp->if_softc; SF_LOCK(sc); sf_poll_locked(ifp, cmd, count); SF_UNLOCK(sc); } static void sf_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct sf_softc *sc = ifp->if_softc; SF_LOCK_ASSERT(sc); if (!(ifp->if_capenable & IFCAP_POLLING)) { ether_poll_deregister(ifp); cmd = POLL_DEREGISTER; } if (cmd == POLL_DEREGISTER) { /* Final call, enable interrupts. */ csr_write_4(sc, SF_IMR, SF_INTRS); return; } sc->rxcycles = count; sf_rxeof(sc); sf_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) sf_start(ifp); if (cmd == POLL_AND_CHECK_STATUS) { u_int32_t status; status = csr_read_4(sc, SF_ISR); if (status) csr_write_4(sc, SF_ISR, status); if (status & SF_ISR_TX_LOFIFO) sf_txthresh_adjust(sc); if (status & SF_ISR_ABNORMALINTR) { if (status & SF_ISR_STATSOFLOW) { untimeout(sf_stats_update, sc, sc->sf_stat_ch); sf_stats_update(sc); } else sf_init(sc); } } } #endif /* DEVICE_POLLING */ static void sf_intr(arg) void *arg; { struct sf_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; SF_LOCK(sc); ifp = sc->sf_ifp; #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) goto done_locked; if ((ifp->if_capenable & IFCAP_POLLING) && ether_poll_register(sf_poll, ifp)) { /* OK, disable interrupts. */ csr_write_4(sc, SF_IMR, 0x00000000); sf_poll_locked(ifp, 0, 1); goto done_locked; } #endif /* DEVICE_POLLING */ if (!(csr_read_4(sc, SF_ISR_SHADOW) & SF_ISR_PCIINT_ASSERTED)) { SF_UNLOCK(sc); return; } /* Disable interrupts. */ csr_write_4(sc, SF_IMR, 0x00000000); for (;;) { status = csr_read_4(sc, SF_ISR); if (status) csr_write_4(sc, SF_ISR, status); if (!(status & SF_INTRS)) break; if (status & SF_ISR_RXDQ1_DMADONE) sf_rxeof(sc); if (status & SF_ISR_TX_TXDONE || status & SF_ISR_TX_DMADONE || status & SF_ISR_TX_QUEUEDONE) sf_txeof(sc); if (status & SF_ISR_TX_LOFIFO) sf_txthresh_adjust(sc); if (status & SF_ISR_ABNORMALINTR) { if (status & SF_ISR_STATSOFLOW) { untimeout(sf_stats_update, sc, sc->sf_stat_ch); sf_stats_update(sc); } else sf_init(sc); } } /* Re-enable interrupts. */ csr_write_4(sc, SF_IMR, SF_INTRS); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) sf_start(ifp); #ifdef DEVICE_POLLING done_locked: #endif /* DEVICE_POLLING */ SF_UNLOCK(sc); } static void sf_init(xsc) void *xsc; { struct sf_softc *sc; struct ifnet *ifp; struct mii_data *mii; int i; sc = xsc; SF_LOCK(sc); ifp = sc->sf_ifp; mii = device_get_softc(sc->sf_miibus); sf_stop(sc); sf_reset(sc); /* Init all the receive filter registers */ for (i = SF_RXFILT_PERFECT_BASE; i < (SF_RXFILT_HASH_MAX + 1); i += 4) csr_write_4(sc, i, 0); /* Empty stats counter registers. */ for (i = 0; i < sizeof(struct sf_stats)/sizeof(u_int32_t); i++) csr_write_4(sc, SF_STATS_BASE + (i + sizeof(u_int32_t)), 0); /* Init our MAC address */ csr_write_4(sc, SF_PAR0, *(u_int32_t *)(&IFP2ENADDR(sc->sf_ifp)[0])); csr_write_4(sc, SF_PAR1, *(u_int32_t *)(&IFP2ENADDR(sc->sf_ifp)[4])); sf_setperf(sc, 0, (caddr_t)&IFP2ENADDR(sc->sf_ifp)); if (sf_init_rx_ring(sc) == ENOBUFS) { printf("sf%d: initialization failed: no " "memory for rx buffers\n", sc->sf_unit); SF_UNLOCK(sc); return; } sf_init_tx_ring(sc); csr_write_4(sc, SF_RXFILT, SF_PERFMODE_NORMAL|SF_HASHMODE_WITHVLAN); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC); } else { SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_PROMISC); } if (ifp->if_flags & IFF_BROADCAST) { SF_SETBIT(sc, SF_RXFILT, SF_RXFILT_BROAD); } else { SF_CLRBIT(sc, SF_RXFILT, SF_RXFILT_BROAD); } /* * Load the multicast filter. */ sf_setmulti(sc); /* Init the completion queue indexes */ csr_write_4(sc, SF_CQ_CONSIDX, 0); csr_write_4(sc, SF_CQ_PRODIDX, 0); /* Init the RX completion queue */ csr_write_4(sc, SF_RXCQ_CTL_1, vtophys(sc->sf_ldata->sf_rx_clist) & SF_RXCQ_ADDR); SF_SETBIT(sc, SF_RXCQ_CTL_1, SF_RXCQTYPE_3); /* Init RX DMA control. */ SF_SETBIT(sc, SF_RXDMA_CTL, SF_RXDMA_REPORTBADPKTS); /* Init the RX buffer descriptor queue. */ csr_write_4(sc, SF_RXDQ_ADDR_Q1, vtophys(sc->sf_ldata->sf_rx_dlist_big)); csr_write_4(sc, SF_RXDQ_CTL_1, (MCLBYTES << 16) | SF_DESCSPACE_16BYTES); csr_write_4(sc, SF_RXDQ_PTR_Q1, SF_RX_DLIST_CNT - 1); /* Init the TX completion queue */ csr_write_4(sc, SF_TXCQ_CTL, vtophys(sc->sf_ldata->sf_tx_clist) & SF_RXCQ_ADDR); /* Init the TX buffer descriptor queue. */ csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, vtophys(sc->sf_ldata->sf_tx_dlist)); SF_SETBIT(sc, SF_TX_FRAMCTL, SF_TXFRMCTL_CPLAFTERTX); csr_write_4(sc, SF_TXDQ_CTL, SF_TXBUFDESC_TYPE0|SF_TXMINSPACE_128BYTES|SF_TXSKIPLEN_8BYTES); SF_SETBIT(sc, SF_TXDQ_CTL, SF_TXDQCTL_NODMACMP); /* Enable autopadding of short TX frames. */ SF_SETBIT(sc, SF_MACCFG_1, SF_MACCFG1_AUTOPAD); #ifdef DEVICE_POLLING /* Disable interrupts if we are polling. */ if (ifp->if_flags & IFF_POLLING) csr_write_4(sc, SF_IMR, 0x00000000); else #endif /* DEVICE_POLLING */ /* Enable interrupts. */ csr_write_4(sc, SF_IMR, SF_INTRS); SF_SETBIT(sc, SF_PCI_DEVCFG, SF_PCIDEVCFG_INTR_ENB); /* Enable the RX and TX engines. */ SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_RX_ENB|SF_ETHCTL_RXDMA_ENB); SF_SETBIT(sc, SF_GEN_ETH_CTL, SF_ETHCTL_TX_ENB|SF_ETHCTL_TXDMA_ENB); /*mii_mediachg(mii);*/ sf_ifmedia_upd(ifp); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->sf_stat_ch = timeout(sf_stats_update, sc, hz); SF_UNLOCK(sc); } static int sf_encap(sc, c, m_head) struct sf_softc *sc; struct sf_tx_bufdesc_type0 *c; struct mbuf *m_head; { int frag = 0; struct sf_frag *f = NULL; struct mbuf *m; m = m_head; for (m = m_head, frag = 0; m != NULL; m = m->m_next) { if (m->m_len != 0) { if (frag == SF_MAXFRAGS) break; f = &c->sf_frags[frag]; if (frag == 0) f->sf_pktlen = m_head->m_pkthdr.len; f->sf_fraglen = m->m_len; f->sf_addr = vtophys(mtod(m, vm_offset_t)); frag++; } } if (m != NULL) { struct mbuf *m_new = NULL; MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("sf%d: no memory for tx list\n", sc->sf_unit); return(1); } if (m_head->m_pkthdr.len > MHLEN) { MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); printf("sf%d: no memory for tx list\n", sc->sf_unit); return(1); } } m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t)); m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; m_freem(m_head); m_head = m_new; f = &c->sf_frags[0]; f->sf_fraglen = f->sf_pktlen = m_head->m_pkthdr.len; f->sf_addr = vtophys(mtod(m_head, caddr_t)); frag = 1; } c->sf_mbuf = m_head; c->sf_id = SF_TX_BUFDESC_ID; c->sf_fragcnt = frag; c->sf_intr = 1; c->sf_caltcp = 0; c->sf_crcen = 1; return(0); } static void sf_start(ifp) struct ifnet *ifp; { struct sf_softc *sc; struct sf_tx_bufdesc_type0 *cur_tx = NULL; struct mbuf *m_head = NULL; int i, txprod; sc = ifp->if_softc; SF_LOCK(sc); if (!sc->sf_link && ifp->if_snd.ifq_len < 10) { SF_UNLOCK(sc); return; } if (ifp->if_flags & IFF_OACTIVE) { SF_UNLOCK(sc); return; } txprod = csr_read_4(sc, SF_TXDQ_PRODIDX); i = SF_IDX_HI(txprod) >> 4; if (sc->sf_ldata->sf_tx_dlist[i].sf_mbuf != NULL) { printf("sf%d: TX ring full, resetting\n", sc->sf_unit); sf_init(sc); txprod = csr_read_4(sc, SF_TXDQ_PRODIDX); i = SF_IDX_HI(txprod) >> 4; } while(sc->sf_ldata->sf_tx_dlist[i].sf_mbuf == NULL) { if (sc->sf_tx_cnt >= (SF_TX_DLIST_CNT - 5)) { ifp->if_flags |= IFF_OACTIVE; cur_tx = NULL; break; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; cur_tx = &sc->sf_ldata->sf_tx_dlist[i]; if (sf_encap(sc, cur_tx, m_head)) { IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; cur_tx = NULL; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); SF_INC(i, SF_TX_DLIST_CNT); sc->sf_tx_cnt++; /* * Don't get the TX DMA queue get too full. */ if (sc->sf_tx_cnt > 64) break; } if (cur_tx == NULL) { SF_UNLOCK(sc); return; } /* Transmit */ csr_write_4(sc, SF_TXDQ_PRODIDX, (txprod & ~SF_TXDQ_PRODIDX_HIPRIO) | ((i << 20) & 0xFFFF0000)); ifp->if_timer = 5; SF_UNLOCK(sc); } static void sf_stop(sc) struct sf_softc *sc; { int i; struct ifnet *ifp; SF_LOCK(sc); ifp = sc->sf_ifp; untimeout(sf_stats_update, sc, sc->sf_stat_ch); #ifdef DEVICE_POLLING ether_poll_deregister(ifp); #endif /* DEVICE_POLLING */ csr_write_4(sc, SF_GEN_ETH_CTL, 0); csr_write_4(sc, SF_CQ_CONSIDX, 0); csr_write_4(sc, SF_CQ_PRODIDX, 0); csr_write_4(sc, SF_RXDQ_ADDR_Q1, 0); csr_write_4(sc, SF_RXDQ_CTL_1, 0); csr_write_4(sc, SF_RXDQ_PTR_Q1, 0); csr_write_4(sc, SF_TXCQ_CTL, 0); csr_write_4(sc, SF_TXDQ_ADDR_HIPRIO, 0); csr_write_4(sc, SF_TXDQ_CTL, 0); sf_reset(sc); sc->sf_link = 0; for (i = 0; i < SF_RX_DLIST_CNT; i++) { if (sc->sf_ldata->sf_rx_dlist_big[i].sf_mbuf != NULL) { m_freem(sc->sf_ldata->sf_rx_dlist_big[i].sf_mbuf); sc->sf_ldata->sf_rx_dlist_big[i].sf_mbuf = NULL; } } for (i = 0; i < SF_TX_DLIST_CNT; i++) { if (sc->sf_ldata->sf_tx_dlist[i].sf_mbuf != NULL) { m_freem(sc->sf_ldata->sf_tx_dlist[i].sf_mbuf); sc->sf_ldata->sf_tx_dlist[i].sf_mbuf = NULL; } } ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE); SF_UNLOCK(sc); } /* * Note: it is important that this function not be interrupted. We * use a two-stage register access scheme: if we are interrupted in * between setting the indirect address register and reading from the * indirect data register, the contents of the address register could * be changed out from under us. */ static void sf_stats_update(xsc) void *xsc; { struct sf_softc *sc; struct ifnet *ifp; struct mii_data *mii; struct sf_stats stats; u_int32_t *ptr; int i; sc = xsc; SF_LOCK(sc); ifp = sc->sf_ifp; mii = device_get_softc(sc->sf_miibus); ptr = (u_int32_t *)&stats; for (i = 0; i < sizeof(stats)/sizeof(u_int32_t); i++) ptr[i] = csr_read_4(sc, SF_STATS_BASE + (i + sizeof(u_int32_t))); for (i = 0; i < sizeof(stats)/sizeof(u_int32_t); i++) csr_write_4(sc, SF_STATS_BASE + (i + sizeof(u_int32_t)), 0); ifp->if_collisions += stats.sf_tx_single_colls + stats.sf_tx_multi_colls + stats.sf_tx_excess_colls; mii_tick(mii); if (!sc->sf_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->sf_link++; if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) sf_start(ifp); } sc->sf_stat_ch = timeout(sf_stats_update, sc, hz); SF_UNLOCK(sc); } static void sf_watchdog(ifp) struct ifnet *ifp; { struct sf_softc *sc; sc = ifp->if_softc; SF_LOCK(sc); ifp->if_oerrors++; printf("sf%d: watchdog timeout\n", sc->sf_unit); sf_stop(sc); sf_reset(sc); sf_init(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) sf_start(ifp); SF_UNLOCK(sc); } static void sf_shutdown(dev) device_t dev; { struct sf_softc *sc; sc = device_get_softc(dev); sf_stop(sc); } Index: stable/6/sys/pci/if_sis.c =================================================================== --- stable/6/sys/pci/if_sis.c (revision 149421) +++ stable/6/sys/pci/if_sis.c (revision 149422) @@ -1,2297 +1,2301 @@ /*- * Copyright (c) 2005 Poul-Henning Kamp * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * SiS 900/SiS 7016 fast ethernet PCI NIC driver. Datasheets are * available from http://www.sis.com.tw. * * This driver also supports the NatSemi DP83815. Datasheets are * available from http://www.national.com. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The SiS 900 is a fairly simple chip. It uses bus master DMA with * simple TX and RX descriptors of 3 longwords in size. The receiver * has a single perfect filter entry for the station address and a * 128-bit multicast hash table. The SiS 900 has a built-in MII-based * transceiver while the 7016 requires an external transceiver chip. * Both chips offer the standard bit-bang MII interface as well as * an enchanced PHY interface which simplifies accessing MII registers. * * The only downside to this chipset is that RX descriptors must be * longword aligned. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define SIS_USEIOSPACE #include MODULE_DEPEND(sis, pci, 1, 1, 1); MODULE_DEPEND(sis, ether, 1, 1, 1); MODULE_DEPEND(sis, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Various supported device vendors/types and their names. */ static struct sis_type sis_devs[] = { { SIS_VENDORID, SIS_DEVICEID_900, "SiS 900 10/100BaseTX" }, { SIS_VENDORID, SIS_DEVICEID_7016, "SiS 7016 10/100BaseTX" }, { NS_VENDORID, NS_DEVICEID_DP83815, "NatSemi DP8381[56] 10/100BaseTX" }, { 0, 0, NULL } }; static int sis_detach(device_t); static void sis_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int sis_ifmedia_upd(struct ifnet *); static void sis_init(void *); static void sis_initl(struct sis_softc *); static void sis_intr(void *); static int sis_ioctl(struct ifnet *, u_long, caddr_t); static int sis_newbuf(struct sis_softc *, struct sis_desc *, struct mbuf *); static void sis_start(struct ifnet *); static void sis_startl(struct ifnet *); static void sis_stop(struct sis_softc *); static void sis_watchdog(struct ifnet *); #ifdef SIS_USEIOSPACE #define SIS_RES SYS_RES_IOPORT #define SIS_RID SIS_PCI_LOIO #else #define SIS_RES SYS_RES_MEMORY #define SIS_RID SIS_PCI_LOMEM #endif #define SIS_SETBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) | (x)) #define SIS_CLRBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) & ~(x)) #define SIO_SET(x) \ CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) | x) #define SIO_CLR(x) \ CSR_WRITE_4(sc, SIS_EECTL, CSR_READ_4(sc, SIS_EECTL) & ~x) static void sis_dma_map_desc_next(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct sis_desc *r; r = arg; r->sis_next = segs->ds_addr; } static void sis_dma_map_desc_ptr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct sis_desc *r; r = arg; r->sis_ptr = segs->ds_addr; } static void sis_dma_map_ring(void *arg, bus_dma_segment_t *segs, int nseg, int error) { u_int32_t *p; p = arg; *p = segs->ds_addr; } /* * Routine to reverse the bits in a word. Stolen almost * verbatim from /usr/games/fortune. */ static uint16_t sis_reverse(uint16_t n) { n = ((n >> 1) & 0x5555) | ((n << 1) & 0xaaaa); n = ((n >> 2) & 0x3333) | ((n << 2) & 0xcccc); n = ((n >> 4) & 0x0f0f) | ((n << 4) & 0xf0f0); n = ((n >> 8) & 0x00ff) | ((n << 8) & 0xff00); return(n); } static void sis_delay(struct sis_softc *sc) { int idx; for (idx = (300 / 33) + 1; idx > 0; idx--) CSR_READ_4(sc, SIS_CSR); } static void sis_eeprom_idle(struct sis_softc *sc) { int i; SIO_SET(SIS_EECTL_CSEL); sis_delay(sc); SIO_SET(SIS_EECTL_CLK); sis_delay(sc); for (i = 0; i < 25; i++) { SIO_CLR(SIS_EECTL_CLK); sis_delay(sc); SIO_SET(SIS_EECTL_CLK); sis_delay(sc); } SIO_CLR(SIS_EECTL_CLK); sis_delay(sc); SIO_CLR(SIS_EECTL_CSEL); sis_delay(sc); CSR_WRITE_4(sc, SIS_EECTL, 0x00000000); } /* * Send a read command and address to the EEPROM, check for ACK. */ static void sis_eeprom_putbyte(struct sis_softc *sc, int addr) { int d, i; d = addr | SIS_EECMD_READ; /* * Feed in each bit and stobe the clock. */ for (i = 0x400; i; i >>= 1) { if (d & i) { SIO_SET(SIS_EECTL_DIN); } else { SIO_CLR(SIS_EECTL_DIN); } sis_delay(sc); SIO_SET(SIS_EECTL_CLK); sis_delay(sc); SIO_CLR(SIS_EECTL_CLK); sis_delay(sc); } } /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void sis_eeprom_getword(struct sis_softc *sc, int addr, uint16_t *dest) { int i; u_int16_t word = 0; /* Force EEPROM to idle state. */ sis_eeprom_idle(sc); /* Enter EEPROM access mode. */ sis_delay(sc); SIO_CLR(SIS_EECTL_CLK); sis_delay(sc); SIO_SET(SIS_EECTL_CSEL); sis_delay(sc); /* * Send address of word we want to read. */ sis_eeprom_putbyte(sc, addr); /* * Start reading bits from EEPROM. */ for (i = 0x8000; i; i >>= 1) { SIO_SET(SIS_EECTL_CLK); sis_delay(sc); if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECTL_DOUT) word |= i; sis_delay(sc); SIO_CLR(SIS_EECTL_CLK); sis_delay(sc); } /* Turn off EEPROM access mode. */ sis_eeprom_idle(sc); *dest = word; } /* * Read a sequence of words from the EEPROM. */ static void sis_read_eeprom(struct sis_softc *sc, caddr_t dest, int off, int cnt, int swap) { int i; u_int16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { sis_eeprom_getword(sc, off + i, &word); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } } #if defined(__i386__) || defined(__amd64__) static device_t sis_find_bridge(device_t dev) { devclass_t pci_devclass; device_t *pci_devices; int pci_count = 0; device_t *pci_children; int pci_childcount = 0; device_t *busp, *childp; device_t child = NULL; int i, j; if ((pci_devclass = devclass_find("pci")) == NULL) return(NULL); devclass_get_devices(pci_devclass, &pci_devices, &pci_count); for (i = 0, busp = pci_devices; i < pci_count; i++, busp++) { pci_childcount = 0; device_get_children(*busp, &pci_children, &pci_childcount); for (j = 0, childp = pci_children; j < pci_childcount; j++, childp++) { if (pci_get_vendor(*childp) == SIS_VENDORID && pci_get_device(*childp) == 0x0008) { child = *childp; goto done; } } } done: free(pci_devices, M_TEMP); free(pci_children, M_TEMP); return(child); } static void sis_read_cmos(struct sis_softc *sc, device_t dev, caddr_t dest, int off, int cnt) { device_t bridge; u_int8_t reg; int i; bus_space_tag_t btag; bridge = sis_find_bridge(dev); if (bridge == NULL) return; reg = pci_read_config(bridge, 0x48, 1); pci_write_config(bridge, 0x48, reg|0x40, 1); /* XXX */ #if defined(__i386__) btag = I386_BUS_SPACE_IO; #elif defined(__amd64__) btag = AMD64_BUS_SPACE_IO; #endif for (i = 0; i < cnt; i++) { bus_space_write_1(btag, 0x0, 0x70, i + off); *(dest + i) = bus_space_read_1(btag, 0x0, 0x71); } pci_write_config(bridge, 0x48, reg & ~0x40, 1); return; } static void sis_read_mac(struct sis_softc *sc, device_t dev, caddr_t dest) { u_int32_t filtsave, csrsave; filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL); csrsave = CSR_READ_4(sc, SIS_CSR); CSR_WRITE_4(sc, SIS_CSR, SIS_CSR_RELOAD | filtsave); CSR_WRITE_4(sc, SIS_CSR, 0); CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave & ~SIS_RXFILTCTL_ENABLE); CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0); ((u_int16_t *)dest)[0] = CSR_READ_2(sc, SIS_RXFILT_DATA); CSR_WRITE_4(sc, SIS_RXFILT_CTL,SIS_FILTADDR_PAR1); ((u_int16_t *)dest)[1] = CSR_READ_2(sc, SIS_RXFILT_DATA); CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2); ((u_int16_t *)dest)[2] = CSR_READ_2(sc, SIS_RXFILT_DATA); CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave); CSR_WRITE_4(sc, SIS_CSR, csrsave); return; } #endif /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void sis_mii_sync(struct sis_softc *sc) { int i; SIO_SET(SIS_MII_DIR|SIS_MII_DATA); for (i = 0; i < 32; i++) { SIO_SET(SIS_MII_CLK); DELAY(1); SIO_CLR(SIS_MII_CLK); DELAY(1); } } /* * Clock a series of bits through the MII. */ static void sis_mii_send(struct sis_softc *sc, uint32_t bits, int cnt) { int i; SIO_CLR(SIS_MII_CLK); for (i = (0x1 << (cnt - 1)); i; i >>= 1) { if (bits & i) { SIO_SET(SIS_MII_DATA); } else { SIO_CLR(SIS_MII_DATA); } DELAY(1); SIO_CLR(SIS_MII_CLK); DELAY(1); SIO_SET(SIS_MII_CLK); } } /* * Read an PHY register through the MII. */ static int sis_mii_readreg(struct sis_softc *sc, struct sis_mii_frame *frame) { int i, ack, s; s = splimp(); /* * Set up frame for RX. */ frame->mii_stdelim = SIS_MII_STARTDELIM; frame->mii_opcode = SIS_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; /* * Turn on data xmit. */ SIO_SET(SIS_MII_DIR); sis_mii_sync(sc); /* * Send command/address info. */ sis_mii_send(sc, frame->mii_stdelim, 2); sis_mii_send(sc, frame->mii_opcode, 2); sis_mii_send(sc, frame->mii_phyaddr, 5); sis_mii_send(sc, frame->mii_regaddr, 5); /* Idle bit */ SIO_CLR((SIS_MII_CLK|SIS_MII_DATA)); DELAY(1); SIO_SET(SIS_MII_CLK); DELAY(1); /* Turn off xmit. */ SIO_CLR(SIS_MII_DIR); /* Check for ack */ SIO_CLR(SIS_MII_CLK); DELAY(1); ack = CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA; SIO_SET(SIS_MII_CLK); DELAY(1); /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ if (ack) { for(i = 0; i < 16; i++) { SIO_CLR(SIS_MII_CLK); DELAY(1); SIO_SET(SIS_MII_CLK); DELAY(1); } goto fail; } for (i = 0x8000; i; i >>= 1) { SIO_CLR(SIS_MII_CLK); DELAY(1); if (!ack) { if (CSR_READ_4(sc, SIS_EECTL) & SIS_MII_DATA) frame->mii_data |= i; DELAY(1); } SIO_SET(SIS_MII_CLK); DELAY(1); } fail: SIO_CLR(SIS_MII_CLK); DELAY(1); SIO_SET(SIS_MII_CLK); DELAY(1); splx(s); if (ack) return(1); return(0); } /* * Write to a PHY register through the MII. */ static int sis_mii_writereg(struct sis_softc *sc, struct sis_mii_frame *frame) { int s; s = splimp(); /* * Set up frame for TX. */ frame->mii_stdelim = SIS_MII_STARTDELIM; frame->mii_opcode = SIS_MII_WRITEOP; frame->mii_turnaround = SIS_MII_TURNAROUND; /* * Turn on data output. */ SIO_SET(SIS_MII_DIR); sis_mii_sync(sc); sis_mii_send(sc, frame->mii_stdelim, 2); sis_mii_send(sc, frame->mii_opcode, 2); sis_mii_send(sc, frame->mii_phyaddr, 5); sis_mii_send(sc, frame->mii_regaddr, 5); sis_mii_send(sc, frame->mii_turnaround, 2); sis_mii_send(sc, frame->mii_data, 16); /* Idle bit. */ SIO_SET(SIS_MII_CLK); DELAY(1); SIO_CLR(SIS_MII_CLK); DELAY(1); /* * Turn off xmit. */ SIO_CLR(SIS_MII_DIR); splx(s); return(0); } static int sis_miibus_readreg(device_t dev, int phy, int reg) { struct sis_softc *sc; struct sis_mii_frame frame; sc = device_get_softc(dev); if (sc->sis_type == SIS_TYPE_83815) { if (phy != 0) return(0); /* * The NatSemi chip can take a while after * a reset to come ready, during which the BMSR * returns a value of 0. This is *never* supposed * to happen: some of the BMSR bits are meant to * be hardwired in the on position, and this can * confuse the miibus code a bit during the probe * and attach phase. So we make an effort to check * for this condition and wait for it to clear. */ if (!CSR_READ_4(sc, NS_BMSR)) DELAY(1000); return CSR_READ_4(sc, NS_BMCR + (reg * 4)); } /* * Chipsets < SIS_635 seem not to be able to read/write * through mdio. Use the enhanced PHY access register * again for them. */ if (sc->sis_type == SIS_TYPE_900 && sc->sis_rev < SIS_REV_635) { int i, val = 0; if (phy != 0) return(0); CSR_WRITE_4(sc, SIS_PHYCTL, (phy << 11) | (reg << 6) | SIS_PHYOP_READ); SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS); for (i = 0; i < SIS_TIMEOUT; i++) { if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS)) break; } if (i == SIS_TIMEOUT) { printf("sis%d: PHY failed to come ready\n", sc->sis_unit); return(0); } val = (CSR_READ_4(sc, SIS_PHYCTL) >> 16) & 0xFFFF; if (val == 0xFFFF) return(0); return(val); } else { bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; sis_mii_readreg(sc, &frame); return(frame.mii_data); } } static int sis_miibus_writereg(device_t dev, int phy, int reg, int data) { struct sis_softc *sc; struct sis_mii_frame frame; sc = device_get_softc(dev); if (sc->sis_type == SIS_TYPE_83815) { if (phy != 0) return(0); CSR_WRITE_4(sc, NS_BMCR + (reg * 4), data); return(0); } /* * Chipsets < SIS_635 seem not to be able to read/write * through mdio. Use the enhanced PHY access register * again for them. */ if (sc->sis_type == SIS_TYPE_900 && sc->sis_rev < SIS_REV_635) { int i; if (phy != 0) return(0); CSR_WRITE_4(sc, SIS_PHYCTL, (data << 16) | (phy << 11) | (reg << 6) | SIS_PHYOP_WRITE); SIS_SETBIT(sc, SIS_PHYCTL, SIS_PHYCTL_ACCESS); for (i = 0; i < SIS_TIMEOUT; i++) { if (!(CSR_READ_4(sc, SIS_PHYCTL) & SIS_PHYCTL_ACCESS)) break; } if (i == SIS_TIMEOUT) printf("sis%d: PHY failed to come ready\n", sc->sis_unit); } else { bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = data; sis_mii_writereg(sc, &frame); } return(0); } static void sis_miibus_statchg(device_t dev) { struct sis_softc *sc; sc = device_get_softc(dev); SIS_LOCK_ASSERT(sc); sis_initl(sc); } static uint32_t sis_mchash(struct sis_softc *sc, const uint8_t *addr) { uint32_t crc; /* Compute CRC for the address value. */ crc = ether_crc32_be(addr, ETHER_ADDR_LEN); /* * return the filter bit position * * The NatSemi chip has a 512-bit filter, which is * different than the SiS, so we special-case it. */ if (sc->sis_type == SIS_TYPE_83815) return (crc >> 23); else if (sc->sis_rev >= SIS_REV_635 || sc->sis_rev == SIS_REV_900B) return (crc >> 24); else return (crc >> 25); } static void sis_setmulti_ns(struct sis_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int32_t h = 0, i, filtsave; int bit, index; ifp = sc->sis_ifp; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { SIS_CLRBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH); SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI); return; } /* * We have to explicitly enable the multicast hash table * on the NatSemi chip if we want to use it, which we do. */ SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_MCHASH); SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLMULTI); filtsave = CSR_READ_4(sc, SIS_RXFILT_CTL); /* first, zot all the existing hash bits */ for (i = 0; i < 32; i++) { CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + (i*2)); CSR_WRITE_4(sc, SIS_RXFILT_DATA, 0); } + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = sis_mchash(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); index = h >> 3; bit = h & 0x1F; CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_FMEM_LO + index); if (bit > 0xF) bit -= 0x10; SIS_SETBIT(sc, SIS_RXFILT_DATA, (1 << bit)); } + IF_ADDR_UNLOCK(ifp); CSR_WRITE_4(sc, SIS_RXFILT_CTL, filtsave); return; } static void sis_setmulti_sis(struct sis_softc *sc) { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int32_t h, i, n, ctl; u_int16_t hashes[16]; ifp = sc->sis_ifp; /* hash table size */ if (sc->sis_rev >= SIS_REV_635 || sc->sis_rev == SIS_REV_900B) n = 16; else n = 8; ctl = CSR_READ_4(sc, SIS_RXFILT_CTL) & SIS_RXFILTCTL_ENABLE; if (ifp->if_flags & IFF_BROADCAST) ctl |= SIS_RXFILTCTL_BROAD; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { ctl |= SIS_RXFILTCTL_ALLMULTI; if (ifp->if_flags & IFF_PROMISC) ctl |= SIS_RXFILTCTL_BROAD|SIS_RXFILTCTL_ALLPHYS; for (i = 0; i < n; i++) hashes[i] = ~0; } else { for (i = 0; i < n; i++) hashes[i] = 0; i = 0; + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = sis_mchash(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); hashes[h >> 4] |= 1 << (h & 0xf); i++; } + IF_ADDR_UNLOCK(ifp); if (i > n) { ctl |= SIS_RXFILTCTL_ALLMULTI; for (i = 0; i < n; i++) hashes[i] = ~0; } } for (i = 0; i < n; i++) { CSR_WRITE_4(sc, SIS_RXFILT_CTL, (4 + i) << 16); CSR_WRITE_4(sc, SIS_RXFILT_DATA, hashes[i]); } CSR_WRITE_4(sc, SIS_RXFILT_CTL, ctl); } static void sis_reset(struct sis_softc *sc) { int i; SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RESET); for (i = 0; i < SIS_TIMEOUT; i++) { if (!(CSR_READ_4(sc, SIS_CSR) & SIS_CSR_RESET)) break; } if (i == SIS_TIMEOUT) printf("sis%d: reset never completed\n", sc->sis_unit); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); /* * If this is a NetSemi chip, make sure to clear * PME mode. */ if (sc->sis_type == SIS_TYPE_83815) { CSR_WRITE_4(sc, NS_CLKRUN, NS_CLKRUN_PMESTS); CSR_WRITE_4(sc, NS_CLKRUN, 0); } return; } /* * Probe for an SiS chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int sis_probe(device_t dev) { struct sis_type *t; t = sis_devs; while(t->sis_name != NULL) { if ((pci_get_vendor(dev) == t->sis_vid) && (pci_get_device(dev) == t->sis_did)) { device_set_desc(dev, t->sis_name); return (BUS_PROBE_DEFAULT); } t++; } return(ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int sis_attach(device_t dev) { u_char eaddr[ETHER_ADDR_LEN]; struct sis_softc *sc; struct ifnet *ifp; int unit, error = 0, rid, waittime = 0; waittime = 0; sc = device_get_softc(dev); unit = device_get_unit(dev); sc->sis_self = dev; mtx_init(&sc->sis_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); if (pci_get_device(dev) == SIS_DEVICEID_900) sc->sis_type = SIS_TYPE_900; if (pci_get_device(dev) == SIS_DEVICEID_7016) sc->sis_type = SIS_TYPE_7016; if (pci_get_vendor(dev) == NS_VENDORID) sc->sis_type = SIS_TYPE_83815; sc->sis_rev = pci_read_config(dev, PCIR_REVID, 1); /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = SIS_RID; sc->sis_res = bus_alloc_resource_any(dev, SIS_RES, &rid, RF_ACTIVE); if (sc->sis_res == NULL) { printf("sis%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->sis_btag = rman_get_bustag(sc->sis_res); sc->sis_bhandle = rman_get_bushandle(sc->sis_res); /* Allocate interrupt */ rid = 0; sc->sis_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->sis_irq == NULL) { printf("sis%d: couldn't map interrupt\n", unit); error = ENXIO; goto fail; } /* Reset the adapter. */ sis_reset(sc); if (sc->sis_type == SIS_TYPE_900 && (sc->sis_rev == SIS_REV_635 || sc->sis_rev == SIS_REV_900B)) { SIO_SET(SIS_CFG_RND_CNT); SIO_SET(SIS_CFG_PERR_DETECT); } /* * Get station address from the EEPROM. */ switch (pci_get_vendor(dev)) { case NS_VENDORID: sc->sis_srr = CSR_READ_4(sc, NS_SRR); /* We can't update the device description, so spew */ if (sc->sis_srr == NS_SRR_15C) device_printf(dev, "Silicon Revision: DP83815C\n"); else if (sc->sis_srr == NS_SRR_15D) device_printf(dev, "Silicon Revision: DP83815D\n"); else if (sc->sis_srr == NS_SRR_16A) device_printf(dev, "Silicon Revision: DP83816A\n"); else device_printf(dev, "Silicon Revision %x\n", sc->sis_srr); /* * Reading the MAC address out of the EEPROM on * the NatSemi chip takes a bit more work than * you'd expect. The address spans 4 16-bit words, * with the first word containing only a single bit. * You have to shift everything over one bit to * get it aligned properly. Also, the bits are * stored backwards (the LSB is really the MSB, * and so on) so you have to reverse them in order * to get the MAC address into the form we want. * Why? Who the hell knows. */ { u_int16_t tmp[4]; sis_read_eeprom(sc, (caddr_t)&tmp, NS_EE_NODEADDR, 4, 0); /* Shift everything over one bit. */ tmp[3] = tmp[3] >> 1; tmp[3] |= tmp[2] << 15; tmp[2] = tmp[2] >> 1; tmp[2] |= tmp[1] << 15; tmp[1] = tmp[1] >> 1; tmp[1] |= tmp[0] << 15; /* Now reverse all the bits. */ tmp[3] = sis_reverse(tmp[3]); tmp[2] = sis_reverse(tmp[2]); tmp[1] = sis_reverse(tmp[1]); bcopy((char *)&tmp[1], eaddr, ETHER_ADDR_LEN); } break; case SIS_VENDORID: default: #if defined(__i386__) || defined(__amd64__) /* * If this is a SiS 630E chipset with an embedded * SiS 900 controller, we have to read the MAC address * from the APC CMOS RAM. Our method for doing this * is very ugly since we have to reach out and grab * ahold of hardware for which we cannot properly * allocate resources. This code is only compiled on * the i386 architecture since the SiS 630E chipset * is for x86 motherboards only. Note that there are * a lot of magic numbers in this hack. These are * taken from SiS's Linux driver. I'd like to replace * them with proper symbolic definitions, but that * requires some datasheets that I don't have access * to at the moment. */ if (sc->sis_rev == SIS_REV_630S || sc->sis_rev == SIS_REV_630E || sc->sis_rev == SIS_REV_630EA1) sis_read_cmos(sc, dev, (caddr_t)&eaddr, 0x9, 6); else if (sc->sis_rev == SIS_REV_635 || sc->sis_rev == SIS_REV_630ET) sis_read_mac(sc, dev, (caddr_t)&eaddr); else if (sc->sis_rev == SIS_REV_96x) { /* Allow to read EEPROM from LAN. It is shared * between a 1394 controller and the NIC and each * time we access it, we need to set SIS_EECMD_REQ. */ SIO_SET(SIS_EECMD_REQ); for (waittime = 0; waittime < SIS_TIMEOUT; waittime++) { /* Force EEPROM to idle state. */ sis_eeprom_idle(sc); if (CSR_READ_4(sc, SIS_EECTL) & SIS_EECMD_GNT) { sis_read_eeprom(sc, (caddr_t)&eaddr, SIS_EE_NODEADDR, 3, 0); break; } DELAY(1); } /* * Set SIS_EECTL_CLK to high, so a other master * can operate on the i2c bus. */ SIO_SET(SIS_EECTL_CLK); /* Refuse EEPROM access by LAN */ SIO_SET(SIS_EECMD_DONE); } else #endif sis_read_eeprom(sc, (caddr_t)&eaddr, SIS_EE_NODEADDR, 3, 0); break; } sc->sis_unit = unit; if (debug_mpsafenet) callout_init(&sc->sis_stat_ch, CALLOUT_MPSAFE); else callout_init(&sc->sis_stat_ch, 0); /* * Allocate the parent bus DMA tag appropriate for PCI. */ #define SIS_NSEG_NEW 32 error = bus_dma_tag_create(NULL, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MAXBSIZE, SIS_NSEG_NEW, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sis_parent_tag); if (error) goto fail; /* * Now allocate a tag for the DMA descriptor lists and a chunk * of DMA-able memory based on the tag. Also obtain the physical * addresses of the RX and TX ring, which we'll need later. * All of our lists are allocated as a contiguous block * of memory. */ error = bus_dma_tag_create(sc->sis_parent_tag, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SIS_RX_LIST_SZ, 1, /* maxsize,nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 0, /* flags */ busdma_lock_mutex, /* lockfunc */ &Giant, /* lockarg */ &sc->sis_rx_tag); if (error) goto fail; error = bus_dmamem_alloc(sc->sis_rx_tag, (void **)&sc->sis_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->sis_rx_dmamap); if (error) { printf("sis%d: no memory for rx list buffers!\n", unit); bus_dma_tag_destroy(sc->sis_rx_tag); sc->sis_rx_tag = NULL; goto fail; } error = bus_dmamap_load(sc->sis_rx_tag, sc->sis_rx_dmamap, &(sc->sis_rx_list[0]), sizeof(struct sis_desc), sis_dma_map_ring, &sc->sis_rx_paddr, 0); if (error) { printf("sis%d: cannot get address of the rx ring!\n", unit); bus_dmamem_free(sc->sis_rx_tag, sc->sis_rx_list, sc->sis_rx_dmamap); bus_dma_tag_destroy(sc->sis_rx_tag); sc->sis_rx_tag = NULL; goto fail; } error = bus_dma_tag_create(sc->sis_parent_tag, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ SIS_TX_LIST_SZ, 1, /* maxsize,nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 0, /* flags */ busdma_lock_mutex, /* lockfunc */ &Giant, /* lockarg */ &sc->sis_tx_tag); if (error) goto fail; error = bus_dmamem_alloc(sc->sis_tx_tag, (void **)&sc->sis_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->sis_tx_dmamap); if (error) { printf("sis%d: no memory for tx list buffers!\n", unit); bus_dma_tag_destroy(sc->sis_tx_tag); sc->sis_tx_tag = NULL; goto fail; } error = bus_dmamap_load(sc->sis_tx_tag, sc->sis_tx_dmamap, &(sc->sis_tx_list[0]), sizeof(struct sis_desc), sis_dma_map_ring, &sc->sis_tx_paddr, 0); if (error) { printf("sis%d: cannot get address of the tx ring!\n", unit); bus_dmamem_free(sc->sis_tx_tag, sc->sis_tx_list, sc->sis_tx_dmamap); bus_dma_tag_destroy(sc->sis_tx_tag); sc->sis_tx_tag = NULL; goto fail; } error = bus_dma_tag_create(sc->sis_parent_tag, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, 1, /* maxsize,nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 0, /* flags */ busdma_lock_mutex, /* lockfunc */ &Giant, /* lockarg */ &sc->sis_tag); if (error) goto fail; /* * Obtain the physical addresses of the RX and TX * rings which we'll need later in the init routine. */ ifp = sc->sis_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { printf("sis%d: can not if_alloc()\n", sc->sis_unit); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = sis_ioctl; ifp->if_start = sis_start; ifp->if_watchdog = sis_watchdog; ifp->if_init = sis_init; ifp->if_baudrate = 10000000; IFQ_SET_MAXLEN(&ifp->if_snd, SIS_TX_LIST_CNT - 1); ifp->if_snd.ifq_drv_maxlen = SIS_TX_LIST_CNT - 1; IFQ_SET_READY(&ifp->if_snd); /* * Do MII setup. */ if (mii_phy_probe(dev, &sc->sis_miibus, sis_ifmedia_upd, sis_ifmedia_sts)) { printf("sis%d: MII without any PHY!\n", sc->sis_unit); if_free(ifp); error = ENXIO; goto fail; } /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* * Tell the upper layer(s) we support long frames. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_MTU; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif ifp->if_capenable = ifp->if_capabilities; /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->sis_irq, INTR_TYPE_NET | INTR_MPSAFE, sis_intr, sc, &sc->sis_intrhand); if (error) { printf("sis%d: couldn't set up irq\n", unit); ether_ifdetach(ifp); if_free(ifp); goto fail; } fail: if (error) sis_detach(dev); return(error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int sis_detach(device_t dev) { struct sis_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->sis_mtx), ("sis mutex not initialized")); SIS_LOCK(sc); ifp = sc->sis_ifp; /* These should only be active if attach succeeded. */ if (device_is_attached(dev)) { sis_reset(sc); sis_stop(sc); ether_ifdetach(ifp); if_free(ifp); } if (sc->sis_miibus) device_delete_child(dev, sc->sis_miibus); bus_generic_detach(dev); if (sc->sis_intrhand) bus_teardown_intr(dev, sc->sis_irq, sc->sis_intrhand); if (sc->sis_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sis_irq); if (sc->sis_res) bus_release_resource(dev, SIS_RES, SIS_RID, sc->sis_res); if (sc->sis_rx_tag) { bus_dmamap_unload(sc->sis_rx_tag, sc->sis_rx_dmamap); bus_dmamem_free(sc->sis_rx_tag, sc->sis_rx_list, sc->sis_rx_dmamap); bus_dma_tag_destroy(sc->sis_rx_tag); } if (sc->sis_tx_tag) { bus_dmamap_unload(sc->sis_tx_tag, sc->sis_tx_dmamap); bus_dmamem_free(sc->sis_tx_tag, sc->sis_tx_list, sc->sis_tx_dmamap); bus_dma_tag_destroy(sc->sis_tx_tag); } if (sc->sis_parent_tag) bus_dma_tag_destroy(sc->sis_parent_tag); if (sc->sis_tag) bus_dma_tag_destroy(sc->sis_tag); SIS_UNLOCK(sc); mtx_destroy(&sc->sis_mtx); return(0); } /* * Initialize the TX and RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int sis_ring_init(struct sis_softc *sc) { int i, error; struct sis_desc *dp; dp = &sc->sis_tx_list[0]; for (i = 0; i < SIS_TX_LIST_CNT; i++, dp++) { if (i == (SIS_TX_LIST_CNT - 1)) dp->sis_nextdesc = &sc->sis_tx_list[0]; else dp->sis_nextdesc = dp + 1; bus_dmamap_load(sc->sis_tx_tag, sc->sis_tx_dmamap, dp->sis_nextdesc, sizeof(struct sis_desc), sis_dma_map_desc_next, dp, 0); dp->sis_mbuf = NULL; dp->sis_ptr = 0; dp->sis_ctl = 0; } sc->sis_tx_prod = sc->sis_tx_cons = sc->sis_tx_cnt = 0; bus_dmamap_sync(sc->sis_tx_tag, sc->sis_tx_dmamap, BUS_DMASYNC_PREWRITE); dp = &sc->sis_rx_list[0]; for (i = 0; i < SIS_RX_LIST_CNT; i++, dp++) { error = sis_newbuf(sc, dp, NULL); if (error) return(error); if (i == (SIS_RX_LIST_CNT - 1)) dp->sis_nextdesc = &sc->sis_rx_list[0]; else dp->sis_nextdesc = dp + 1; bus_dmamap_load(sc->sis_rx_tag, sc->sis_rx_dmamap, dp->sis_nextdesc, sizeof(struct sis_desc), sis_dma_map_desc_next, dp, 0); } bus_dmamap_sync(sc->sis_rx_tag, sc->sis_rx_dmamap, BUS_DMASYNC_PREWRITE); sc->sis_rx_pdsc = &sc->sis_rx_list[0]; return(0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int sis_newbuf(struct sis_softc *sc, struct sis_desc *c, struct mbuf *m) { if (c == NULL) return(EINVAL); if (m == NULL) { m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return(ENOBUFS); } else m->m_data = m->m_ext.ext_buf; c->sis_mbuf = m; c->sis_ctl = SIS_RXLEN; bus_dmamap_create(sc->sis_tag, 0, &c->sis_map); bus_dmamap_load(sc->sis_tag, c->sis_map, mtod(m, void *), MCLBYTES, sis_dma_map_desc_ptr, c, 0); bus_dmamap_sync(sc->sis_tag, c->sis_map, BUS_DMASYNC_PREREAD); return(0); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static void sis_rxeof(struct sis_softc *sc) { struct mbuf *m; struct ifnet *ifp; struct sis_desc *cur_rx; int total_len = 0; u_int32_t rxstat; SIS_LOCK_ASSERT(sc); ifp = sc->sis_ifp; for(cur_rx = sc->sis_rx_pdsc; SIS_OWNDESC(cur_rx); cur_rx = cur_rx->sis_nextdesc) { #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif /* DEVICE_POLLING */ rxstat = cur_rx->sis_rxstat; bus_dmamap_sync(sc->sis_tag, cur_rx->sis_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sis_tag, cur_rx->sis_map); bus_dmamap_destroy(sc->sis_tag, cur_rx->sis_map); m = cur_rx->sis_mbuf; cur_rx->sis_mbuf = NULL; total_len = SIS_RXBYTES(cur_rx); /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (!(rxstat & SIS_CMDSTS_PKT_OK)) { ifp->if_ierrors++; if (rxstat & SIS_RXSTAT_COLL) ifp->if_collisions++; sis_newbuf(sc, cur_rx, m); continue; } /* No errors; receive the packet. */ #if defined(__i386__) || defined(__amd64__) /* * On the x86 we do not have alignment problems, so try to * allocate a new buffer for the receive ring, and pass up * the one where the packet is already, saving the expensive * copy done in m_devget(). * If we are on an architecture with alignment problems, or * if the allocation fails, then use m_devget and leave the * existing buffer in the receive ring. */ if (sis_newbuf(sc, cur_rx, NULL) == 0) m->m_pkthdr.len = m->m_len = total_len; else #endif { struct mbuf *m0; m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp, NULL); sis_newbuf(sc, cur_rx, m); if (m0 == NULL) { ifp->if_ierrors++; continue; } m = m0; } ifp->if_ipackets++; m->m_pkthdr.rcvif = ifp; SIS_UNLOCK(sc); (*ifp->if_input)(ifp, m); SIS_LOCK(sc); } sc->sis_rx_pdsc = cur_rx; } static void sis_rxeoc(struct sis_softc *sc) { SIS_LOCK_ASSERT(sc); sis_rxeof(sc); sis_initl(sc); } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void sis_txeof(struct sis_softc *sc) { struct ifnet *ifp; u_int32_t idx; SIS_LOCK_ASSERT(sc); ifp = sc->sis_ifp; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ for (idx = sc->sis_tx_cons; sc->sis_tx_cnt > 0; sc->sis_tx_cnt--, SIS_INC(idx, SIS_TX_LIST_CNT) ) { struct sis_desc *cur_tx = &sc->sis_tx_list[idx]; if (SIS_OWNDESC(cur_tx)) break; if (cur_tx->sis_ctl & SIS_CMDSTS_MORE) continue; if (!(cur_tx->sis_ctl & SIS_CMDSTS_PKT_OK)) { ifp->if_oerrors++; if (cur_tx->sis_txstat & SIS_TXSTAT_EXCESSCOLLS) ifp->if_collisions++; if (cur_tx->sis_txstat & SIS_TXSTAT_OUTOFWINCOLL) ifp->if_collisions++; } ifp->if_collisions += (cur_tx->sis_txstat & SIS_TXSTAT_COLLCNT) >> 16; ifp->if_opackets++; if (cur_tx->sis_mbuf != NULL) { m_freem(cur_tx->sis_mbuf); cur_tx->sis_mbuf = NULL; bus_dmamap_unload(sc->sis_tag, cur_tx->sis_map); bus_dmamap_destroy(sc->sis_tag, cur_tx->sis_map); } } if (idx != sc->sis_tx_cons) { /* we freed up some buffers */ sc->sis_tx_cons = idx; ifp->if_flags &= ~IFF_OACTIVE; } ifp->if_timer = (sc->sis_tx_cnt == 0) ? 0 : 5; return; } static void sis_tick(void *xsc) { struct sis_softc *sc; struct mii_data *mii; struct ifnet *ifp; sc = xsc; SIS_LOCK(sc); sc->in_tick = 1; ifp = sc->sis_ifp; mii = device_get_softc(sc->sis_miibus); mii_tick(mii); if (!sc->sis_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->sis_link++; if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) sis_startl(ifp); } callout_reset(&sc->sis_stat_ch, hz, sis_tick, sc); sc->in_tick = 0; SIS_UNLOCK(sc); } #ifdef DEVICE_POLLING static poll_handler_t sis_poll; static void sis_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct sis_softc *sc = ifp->if_softc; SIS_LOCK(sc); if (!(ifp->if_capenable & IFCAP_POLLING)) { ether_poll_deregister(ifp); cmd = POLL_DEREGISTER; } if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */ CSR_WRITE_4(sc, SIS_IER, 1); goto done; } /* * On the sis, reading the status register also clears it. * So before returning to intr mode we must make sure that all * possible pending sources of interrupts have been served. * In practice this means run to completion the *eof routines, * and then call the interrupt routine */ sc->rxcycles = count; sis_rxeof(sc); sis_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) sis_startl(ifp); if (sc->rxcycles > 0 || cmd == POLL_AND_CHECK_STATUS) { u_int32_t status; /* Reading the ISR register clears all interrupts. */ status = CSR_READ_4(sc, SIS_ISR); if (status & (SIS_ISR_RX_ERR|SIS_ISR_RX_OFLOW)) sis_rxeoc(sc); if (status & (SIS_ISR_RX_IDLE)) SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE); if (status & SIS_ISR_SYSERR) { sis_reset(sc); sis_initl(sc); } } done: SIS_UNLOCK(sc); } #endif /* DEVICE_POLLING */ static void sis_intr(void *arg) { struct sis_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; ifp = sc->sis_ifp; if (sc->sis_stopped) /* Most likely shared interrupt */ return; SIS_LOCK(sc); #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) goto done; if ((ifp->if_capenable & IFCAP_POLLING) && ether_poll_register(sis_poll, ifp)) { /* ok, disable interrupts */ CSR_WRITE_4(sc, SIS_IER, 0); goto done; } #endif /* DEVICE_POLLING */ /* Disable interrupts. */ CSR_WRITE_4(sc, SIS_IER, 0); for (;;) { SIS_LOCK_ASSERT(sc); /* Reading the ISR register clears all interrupts. */ status = CSR_READ_4(sc, SIS_ISR); if ((status & SIS_INTRS) == 0) break; if (status & (SIS_ISR_TX_DESC_OK | SIS_ISR_TX_ERR | SIS_ISR_TX_OK | SIS_ISR_TX_IDLE) ) sis_txeof(sc); if (status & (SIS_ISR_RX_DESC_OK|SIS_ISR_RX_OK|SIS_ISR_RX_IDLE)) sis_rxeof(sc); if (status & (SIS_ISR_RX_ERR | SIS_ISR_RX_OFLOW)) sis_rxeoc(sc); if (status & (SIS_ISR_RX_IDLE)) SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE); if (status & SIS_ISR_SYSERR) { sis_reset(sc); sis_initl(sc); } } /* Re-enable interrupts. */ CSR_WRITE_4(sc, SIS_IER, 1); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) sis_startl(ifp); #ifdef DEVICE_POLLING done: #endif /* DEVICE_POLLING */ SIS_UNLOCK(sc); } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int sis_encap(struct sis_softc *sc, struct mbuf **m_head, uint32_t *txidx) { struct sis_desc *f = NULL; struct mbuf *m; int frag, cur, cnt = 0, chainlen = 0; /* * If there's no way we can send any packets, return now. */ if (SIS_TX_LIST_CNT - sc->sis_tx_cnt < 2) return (ENOBUFS); /* * Count the number of frags in this chain to see if * we need to m_defrag. Since the descriptor list is shared * by all packets, we'll m_defrag long chains so that they * do not use up the entire list, even if they would fit. */ for (m = *m_head; m != NULL; m = m->m_next) chainlen++; if ((chainlen > SIS_TX_LIST_CNT / 4) || ((SIS_TX_LIST_CNT - (chainlen + sc->sis_tx_cnt)) < 2)) { m = m_defrag(*m_head, M_DONTWAIT); if (m == NULL) return (ENOBUFS); *m_head = m; } /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ cur = frag = *txidx; for (m = *m_head; m != NULL; m = m->m_next) { if (m->m_len != 0) { if ((SIS_TX_LIST_CNT - (sc->sis_tx_cnt + cnt)) < 2) return(ENOBUFS); f = &sc->sis_tx_list[frag]; f->sis_ctl = SIS_CMDSTS_MORE | m->m_len; bus_dmamap_create(sc->sis_tag, 0, &f->sis_map); bus_dmamap_load(sc->sis_tag, f->sis_map, mtod(m, void *), m->m_len, sis_dma_map_desc_ptr, f, 0); bus_dmamap_sync(sc->sis_tag, f->sis_map, BUS_DMASYNC_PREREAD); if (cnt != 0) f->sis_ctl |= SIS_CMDSTS_OWN; cur = frag; SIS_INC(frag, SIS_TX_LIST_CNT); cnt++; } } if (m != NULL) return(ENOBUFS); sc->sis_tx_list[cur].sis_mbuf = *m_head; sc->sis_tx_list[cur].sis_ctl &= ~SIS_CMDSTS_MORE; sc->sis_tx_list[*txidx].sis_ctl |= SIS_CMDSTS_OWN; sc->sis_tx_cnt += cnt; *txidx = frag; return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void sis_start(struct ifnet *ifp) { struct sis_softc *sc; sc = ifp->if_softc; SIS_LOCK(sc); sis_startl(ifp); SIS_UNLOCK(sc); } static void sis_startl(struct ifnet *ifp) { struct sis_softc *sc; struct mbuf *m_head = NULL; u_int32_t idx, queued = 0; sc = ifp->if_softc; SIS_LOCK_ASSERT(sc); if (!sc->sis_link) return; idx = sc->sis_tx_prod; if (ifp->if_flags & IFF_OACTIVE) return; while(sc->sis_tx_list[idx].sis_mbuf == NULL) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (sis_encap(sc, &m_head, &idx)) { IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } queued++; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); } if (queued) { /* Transmit */ sc->sis_tx_prod = idx; SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_ENABLE); /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; } } static void sis_init(void *xsc) { struct sis_softc *sc = xsc; SIS_LOCK(sc); sis_initl(sc); SIS_UNLOCK(sc); } static void sis_initl(struct sis_softc *sc) { struct ifnet *ifp = sc->sis_ifp; struct mii_data *mii; SIS_LOCK_ASSERT(sc); /* * Cancel pending I/O and free all RX/TX buffers. */ sis_stop(sc); sc->sis_stopped = 0; #ifdef notyet if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr >= NS_SRR_16A) { /* * Configure 400usec of interrupt holdoff. This is based * on emperical tests on a Soekris 4801. */ CSR_WRITE_4(sc, NS_IHR, 0x100 | 4); } #endif mii = device_get_softc(sc->sis_miibus); /* Set MAC address */ if (sc->sis_type == SIS_TYPE_83815) { CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR0); CSR_WRITE_4(sc, SIS_RXFILT_DATA, ((u_int16_t *)IFP2ENADDR(sc->sis_ifp))[0]); CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR1); CSR_WRITE_4(sc, SIS_RXFILT_DATA, ((u_int16_t *)IFP2ENADDR(sc->sis_ifp))[1]); CSR_WRITE_4(sc, SIS_RXFILT_CTL, NS_FILTADDR_PAR2); CSR_WRITE_4(sc, SIS_RXFILT_DATA, ((u_int16_t *)IFP2ENADDR(sc->sis_ifp))[2]); } else { CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR0); CSR_WRITE_4(sc, SIS_RXFILT_DATA, ((u_int16_t *)IFP2ENADDR(sc->sis_ifp))[0]); CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR1); CSR_WRITE_4(sc, SIS_RXFILT_DATA, ((u_int16_t *)IFP2ENADDR(sc->sis_ifp))[1]); CSR_WRITE_4(sc, SIS_RXFILT_CTL, SIS_FILTADDR_PAR2); CSR_WRITE_4(sc, SIS_RXFILT_DATA, ((u_int16_t *)IFP2ENADDR(sc->sis_ifp))[2]); } /* Init circular TX/RX lists. */ if (sis_ring_init(sc) != 0) { printf("sis%d: initialization failed: no " "memory for rx buffers\n", sc->sis_unit); sis_stop(sc); return; } /* * Short Cable Receive Errors (MP21.E) * also: Page 78 of the DP83815 data sheet (september 2002 version) * recommends the following register settings "for optimum * performance." for rev 15C. The driver from NS also sets * the PHY_CR register for later versions. */ if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr <= NS_SRR_15D) { CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001); CSR_WRITE_4(sc, NS_PHY_CR, 0x189C); if (sc->sis_srr == NS_SRR_15C) { /* set val for c2 */ CSR_WRITE_4(sc, NS_PHY_TDATA, 0x0000); /* load/kill c2 */ CSR_WRITE_4(sc, NS_PHY_DSPCFG, 0x5040); /* rais SD off, from 4 to c */ CSR_WRITE_4(sc, NS_PHY_SDCFG, 0x008C); } CSR_WRITE_4(sc, NS_PHY_PAGE, 0); } /* * For the NatSemi chip, we have to explicitly enable the * reception of ARP frames, as well as turn on the 'perfect * match' filter where we store the station address, otherwise * we won't receive unicasts meant for this host. */ if (sc->sis_type == SIS_TYPE_83815) { SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_ARP); SIS_SETBIT(sc, SIS_RXFILT_CTL, NS_RXFILTCTL_PERFECT); } /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS); } else { SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ALLPHYS); } /* * Set the capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD); } else { SIS_CLRBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_BROAD); } /* * Load the multicast filter. */ if (sc->sis_type == SIS_TYPE_83815) sis_setmulti_ns(sc); else sis_setmulti_sis(sc); /* Turn the receive filter on */ SIS_SETBIT(sc, SIS_RXFILT_CTL, SIS_RXFILTCTL_ENABLE); /* * Load the address of the RX and TX lists. */ CSR_WRITE_4(sc, SIS_RX_LISTPTR, sc->sis_rx_paddr); CSR_WRITE_4(sc, SIS_TX_LISTPTR, sc->sis_tx_paddr); /* SIS_CFG_EDB_MASTER_EN indicates the EDB bus is used instead of * the PCI bus. When this bit is set, the Max DMA Burst Size * for TX/RX DMA should be no larger than 16 double words. */ if (CSR_READ_4(sc, SIS_CFG) & SIS_CFG_EDB_MASTER_EN) { CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG64); } else { CSR_WRITE_4(sc, SIS_RX_CFG, SIS_RXCFG256); } /* Accept Long Packets for VLAN support */ SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_JABBER); /* Set TX configuration */ if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) { CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_10); } else { CSR_WRITE_4(sc, SIS_TX_CFG, SIS_TXCFG_100); } /* Set full/half duplex mode. */ if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { SIS_SETBIT(sc, SIS_TX_CFG, (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR)); SIS_SETBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS); } else { SIS_CLRBIT(sc, SIS_TX_CFG, (SIS_TXCFG_IGN_HBEAT|SIS_TXCFG_IGN_CARR)); SIS_CLRBIT(sc, SIS_RX_CFG, SIS_RXCFG_RX_TXPKTS); } if (sc->sis_type == SIS_TYPE_83816) { /* * MPII03.D: Half Duplex Excessive Collisions. * Also page 49 in 83816 manual */ SIS_SETBIT(sc, SIS_TX_CFG, SIS_TXCFG_MPII03D); } if (sc->sis_type == SIS_TYPE_83815 && sc->sis_srr < NS_SRR_16A && IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) { uint32_t reg; /* * Short Cable Receive Errors (MP21.E) */ CSR_WRITE_4(sc, NS_PHY_PAGE, 0x0001); reg = CSR_READ_4(sc, NS_PHY_DSPCFG) & 0xfff; CSR_WRITE_4(sc, NS_PHY_DSPCFG, reg | 0x1000); DELAY(100000); reg = CSR_READ_4(sc, NS_PHY_TDATA) & 0xff; if ((reg & 0x0080) == 0 || (reg > 0xd8 && reg <= 0xff)) { device_printf(sc->sis_self, "Applying short cable fix (reg=%x)\n", reg); CSR_WRITE_4(sc, NS_PHY_TDATA, 0x00e8); reg = CSR_READ_4(sc, NS_PHY_DSPCFG); SIS_SETBIT(sc, NS_PHY_DSPCFG, reg | 0x20); } CSR_WRITE_4(sc, NS_PHY_PAGE, 0); } /* * Enable interrupts. */ CSR_WRITE_4(sc, SIS_IMR, SIS_INTRS); #ifdef DEVICE_POLLING /* * ... only enable interrupts if we are not polling, make sure * they are off otherwise. */ if (ifp->if_flags & IFF_POLLING) CSR_WRITE_4(sc, SIS_IER, 0); else #endif /* DEVICE_POLLING */ CSR_WRITE_4(sc, SIS_IER, 1); /* Enable receiver and transmitter. */ SIS_CLRBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE); SIS_SETBIT(sc, SIS_CSR, SIS_CSR_RX_ENABLE); #ifdef notdef mii_mediachg(mii); #endif ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; if (!sc->in_tick) callout_reset(&sc->sis_stat_ch, hz, sis_tick, sc); } /* * Set media options. */ static int sis_ifmedia_upd(struct ifnet *ifp) { struct sis_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->sis_miibus); sc->sis_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) mii_phy_reset(miisc); } mii_mediachg(mii); return(0); } /* * Report current media status. */ static void sis_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct sis_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->sis_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static int sis_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct sis_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; switch(command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { sis_init(sc); } else if (ifp->if_flags & IFF_RUNNING) { SIS_LOCK(sc); sis_stop(sc); SIS_UNLOCK(sc); } error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: SIS_LOCK(sc); if (sc->sis_type == SIS_TYPE_83815) sis_setmulti_ns(sc); else sis_setmulti_sis(sc); SIS_UNLOCK(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->sis_miibus); SIS_LOCK(sc); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); SIS_UNLOCK(sc); break; case SIOCSIFCAP: ifp->if_capenable &= ~IFCAP_POLLING; ifp->if_capenable |= ifr->ifr_reqcap & IFCAP_POLLING; break; default: error = ether_ioctl(ifp, command, data); break; } return(error); } static void sis_watchdog(struct ifnet *ifp) { struct sis_softc *sc; sc = ifp->if_softc; SIS_LOCK(sc); if (sc->sis_stopped) { SIS_UNLOCK(sc); return; } ifp->if_oerrors++; printf("sis%d: watchdog timeout\n", sc->sis_unit); sis_stop(sc); sis_reset(sc); sis_initl(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) sis_startl(ifp); SIS_UNLOCK(sc); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void sis_stop(struct sis_softc *sc) { int i; struct ifnet *ifp; struct sis_desc *dp; if (sc->sis_stopped) return; SIS_LOCK_ASSERT(sc); ifp = sc->sis_ifp; ifp->if_timer = 0; callout_stop(&sc->sis_stat_ch); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); #ifdef DEVICE_POLLING ether_poll_deregister(ifp); #endif CSR_WRITE_4(sc, SIS_IER, 0); CSR_WRITE_4(sc, SIS_IMR, 0); CSR_READ_4(sc, SIS_ISR); /* clear any interrupts already pending */ SIS_SETBIT(sc, SIS_CSR, SIS_CSR_TX_DISABLE|SIS_CSR_RX_DISABLE); DELAY(1000); CSR_WRITE_4(sc, SIS_TX_LISTPTR, 0); CSR_WRITE_4(sc, SIS_RX_LISTPTR, 0); sc->sis_link = 0; /* * Free data in the RX lists. */ dp = &sc->sis_rx_list[0]; for (i = 0; i < SIS_RX_LIST_CNT; i++, dp++) { if (dp->sis_mbuf == NULL) continue; bus_dmamap_unload(sc->sis_tag, dp->sis_map); bus_dmamap_destroy(sc->sis_tag, dp->sis_map); m_freem(dp->sis_mbuf); dp->sis_mbuf = NULL; } bzero(sc->sis_rx_list, SIS_RX_LIST_SZ); /* * Free the TX list buffers. */ dp = &sc->sis_tx_list[0]; for (i = 0; i < SIS_TX_LIST_CNT; i++, dp++) { if (dp->sis_mbuf == NULL) continue; bus_dmamap_unload(sc->sis_tag, dp->sis_map); bus_dmamap_destroy(sc->sis_tag, dp->sis_map); m_freem(dp->sis_mbuf); dp->sis_mbuf = NULL; } bzero(sc->sis_tx_list, SIS_TX_LIST_SZ); sc->sis_stopped = 1; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void sis_shutdown(device_t dev) { struct sis_softc *sc; sc = device_get_softc(dev); SIS_LOCK(sc); sis_reset(sc); sis_stop(sc); SIS_UNLOCK(sc); } static device_method_t sis_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sis_probe), DEVMETHOD(device_attach, sis_attach), DEVMETHOD(device_detach, sis_detach), DEVMETHOD(device_shutdown, sis_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, sis_miibus_readreg), DEVMETHOD(miibus_writereg, sis_miibus_writereg), DEVMETHOD(miibus_statchg, sis_miibus_statchg), { 0, 0 } }; static driver_t sis_driver = { "sis", sis_methods, sizeof(struct sis_softc) }; static devclass_t sis_devclass; DRIVER_MODULE(sis, pci, sis_driver, sis_devclass, 0, 0); DRIVER_MODULE(miibus, sis, miibus_driver, miibus_devclass, 0, 0); Index: stable/6/sys/pci/if_sk.c =================================================================== --- stable/6/sys/pci/if_sk.c (revision 149421) +++ stable/6/sys/pci/if_sk.c (revision 149422) @@ -1,3002 +1,3004 @@ /* $OpenBSD: if_sk.c,v 2.33 2003/08/12 05:23:06 nate Exp $ */ /*- * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 2003 Nathan L. Binkert * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /* * SysKonnect SK-NET gigabit ethernet driver for FreeBSD. Supports * the SK-984x series adapters, both single port and dual port. * References: * The XaQti XMAC II datasheet, * http://www.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf * The SysKonnect GEnesis manual, http://www.syskonnect.com * * Note: XaQti has been aquired by Vitesse, and Vitesse does not have the * XMAC II datasheet online. I have put my copy at people.freebsd.org as a * convenience to others until Vitesse corrects this problem: * * http://people.freebsd.org/~wpaul/SysKonnect/xmacii_datasheet_rev_c_9-29.pdf * * Written by Bill Paul * Department of Electrical Engineering * Columbia University, New York City */ /* * The SysKonnect gigabit ethernet adapters consist of two main * components: the SysKonnect GEnesis controller chip and the XaQti Corp. * XMAC II gigabit ethernet MAC. The XMAC provides all of the MAC * components and a PHY while the GEnesis controller provides a PCI * interface with DMA support. Each card may have between 512K and * 2MB of SRAM on board depending on the configuration. * * The SysKonnect GEnesis controller can have either one or two XMAC * chips connected to it, allowing single or dual port NIC configurations. * SysKonnect has the distinction of being the only vendor on the market * with a dual port gigabit ethernet NIC. The GEnesis provides dual FIFOs, * dual DMA queues, packet/MAC/transmit arbiters and direct access to the * XMAC registers. This driver takes advantage of these features to allow * both XMACs to operate as independent interfaces. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include #include #if 0 #define SK_USEIOSPACE #endif #include #include #include MODULE_DEPEND(sk, pci, 1, 1, 1); MODULE_DEPEND(sk, ether, 1, 1, 1); MODULE_DEPEND(sk, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #ifndef lint static const char rcsid[] = "$FreeBSD$"; #endif static struct sk_type sk_devs[] = { { VENDORID_SK, DEVICEID_SK_V1, "SysKonnect Gigabit Ethernet (V1.0)" }, { VENDORID_SK, DEVICEID_SK_V2, "SysKonnect Gigabit Ethernet (V2.0)" }, { VENDORID_MARVELL, DEVICEID_SK_V2, "Marvell Gigabit Ethernet" }, { VENDORID_MARVELL, DEVICEID_BELKIN_5005, "Belkin F5D5005 Gigabit Ethernet" }, { VENDORID_3COM, DEVICEID_3COM_3C940, "3Com 3C940 Gigabit Ethernet" }, { VENDORID_LINKSYS, DEVICEID_LINKSYS_EG1032, "Linksys EG1032 Gigabit Ethernet" }, { VENDORID_DLINK, DEVICEID_DLINK_DGE530T, "D-Link DGE-530T Gigabit Ethernet" }, { 0, 0, NULL } }; static int skc_probe(device_t); static int skc_attach(device_t); static int skc_detach(device_t); static void skc_shutdown(device_t); static int sk_detach(device_t); static int sk_probe(device_t); static int sk_attach(device_t); static void sk_tick(void *); static void sk_intr(void *); static void sk_intr_xmac(struct sk_if_softc *); static void sk_intr_bcom(struct sk_if_softc *); static void sk_intr_yukon(struct sk_if_softc *); static void sk_rxeof(struct sk_if_softc *); static void sk_txeof(struct sk_if_softc *); static int sk_encap(struct sk_if_softc *, struct mbuf *, u_int32_t *); static void sk_start(struct ifnet *); static int sk_ioctl(struct ifnet *, u_long, caddr_t); static void sk_init(void *); static void sk_init_xmac(struct sk_if_softc *); static void sk_init_yukon(struct sk_if_softc *); static void sk_stop(struct sk_if_softc *); static void sk_watchdog(struct ifnet *); static int sk_ifmedia_upd(struct ifnet *); static void sk_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void sk_reset(struct sk_softc *); static int sk_newbuf(struct sk_if_softc *, struct sk_chain *, struct mbuf *); static int sk_alloc_jumbo_mem(struct sk_if_softc *); static void sk_free_jumbo_mem(struct sk_if_softc *); static void *sk_jalloc(struct sk_if_softc *); static void sk_jfree(void *, void *); static int sk_init_rx_ring(struct sk_if_softc *); static void sk_init_tx_ring(struct sk_if_softc *); static u_int32_t sk_win_read_4(struct sk_softc *, int); static u_int16_t sk_win_read_2(struct sk_softc *, int); static u_int8_t sk_win_read_1(struct sk_softc *, int); static void sk_win_write_4(struct sk_softc *, int, u_int32_t); static void sk_win_write_2(struct sk_softc *, int, u_int32_t); static void sk_win_write_1(struct sk_softc *, int, u_int32_t); static u_int8_t sk_vpd_readbyte(struct sk_softc *, int); static void sk_vpd_read_res(struct sk_softc *, struct vpd_res *, int); static void sk_vpd_read(struct sk_softc *); static int sk_miibus_readreg(device_t, int, int); static int sk_miibus_writereg(device_t, int, int, int); static void sk_miibus_statchg(device_t); static int sk_xmac_miibus_readreg(struct sk_if_softc *, int, int); static int sk_xmac_miibus_writereg(struct sk_if_softc *, int, int, int); static void sk_xmac_miibus_statchg(struct sk_if_softc *); static int sk_marv_miibus_readreg(struct sk_if_softc *, int, int); static int sk_marv_miibus_writereg(struct sk_if_softc *, int, int, int); static void sk_marv_miibus_statchg(struct sk_if_softc *); static uint32_t sk_xmchash(const uint8_t *); static uint32_t sk_gmchash(const uint8_t *); static void sk_setfilt(struct sk_if_softc *, caddr_t, int); static void sk_setmulti(struct sk_if_softc *); static void sk_setpromisc(struct sk_if_softc *); static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high); static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS); #ifdef SK_USEIOSPACE #define SK_RES SYS_RES_IOPORT #define SK_RID SK_PCI_LOIO #else #define SK_RES SYS_RES_MEMORY #define SK_RID SK_PCI_LOMEM #endif /* * Note that we have newbus methods for both the GEnesis controller * itself and the XMAC(s). The XMACs are children of the GEnesis, and * the miibus code is a child of the XMACs. We need to do it this way * so that the miibus drivers can access the PHY registers on the * right PHY. It's not quite what I had in mind, but it's the only * design that achieves the desired effect. */ static device_method_t skc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, skc_probe), DEVMETHOD(device_attach, skc_attach), DEVMETHOD(device_detach, skc_detach), DEVMETHOD(device_shutdown, skc_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), { 0, 0 } }; static driver_t skc_driver = { "skc", skc_methods, sizeof(struct sk_softc) }; static devclass_t skc_devclass; static device_method_t sk_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sk_probe), DEVMETHOD(device_attach, sk_attach), DEVMETHOD(device_detach, sk_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, sk_miibus_readreg), DEVMETHOD(miibus_writereg, sk_miibus_writereg), DEVMETHOD(miibus_statchg, sk_miibus_statchg), { 0, 0 } }; static driver_t sk_driver = { "sk", sk_methods, sizeof(struct sk_if_softc) }; static devclass_t sk_devclass; DRIVER_MODULE(sk, pci, skc_driver, skc_devclass, 0, 0); DRIVER_MODULE(sk, skc, sk_driver, sk_devclass, 0, 0); DRIVER_MODULE(miibus, sk, miibus_driver, miibus_devclass, 0, 0); #define SK_SETBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | x) #define SK_CLRBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~x) #define SK_WIN_SETBIT_4(sc, reg, x) \ sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) | x) #define SK_WIN_CLRBIT_4(sc, reg, x) \ sk_win_write_4(sc, reg, sk_win_read_4(sc, reg) & ~x) #define SK_WIN_SETBIT_2(sc, reg, x) \ sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) | x) #define SK_WIN_CLRBIT_2(sc, reg, x) \ sk_win_write_2(sc, reg, sk_win_read_2(sc, reg) & ~x) static u_int32_t sk_win_read_4(sc, reg) struct sk_softc *sc; int reg; { #ifdef SK_USEIOSPACE CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); return(CSR_READ_4(sc, SK_WIN_BASE + SK_REG(reg))); #else return(CSR_READ_4(sc, reg)); #endif } static u_int16_t sk_win_read_2(sc, reg) struct sk_softc *sc; int reg; { #ifdef SK_USEIOSPACE CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); return(CSR_READ_2(sc, SK_WIN_BASE + SK_REG(reg))); #else return(CSR_READ_2(sc, reg)); #endif } static u_int8_t sk_win_read_1(sc, reg) struct sk_softc *sc; int reg; { #ifdef SK_USEIOSPACE CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); return(CSR_READ_1(sc, SK_WIN_BASE + SK_REG(reg))); #else return(CSR_READ_1(sc, reg)); #endif } static void sk_win_write_4(sc, reg, val) struct sk_softc *sc; int reg; u_int32_t val; { #ifdef SK_USEIOSPACE CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); CSR_WRITE_4(sc, SK_WIN_BASE + SK_REG(reg), val); #else CSR_WRITE_4(sc, reg, val); #endif return; } static void sk_win_write_2(sc, reg, val) struct sk_softc *sc; int reg; u_int32_t val; { #ifdef SK_USEIOSPACE CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); CSR_WRITE_2(sc, SK_WIN_BASE + SK_REG(reg), val); #else CSR_WRITE_2(sc, reg, val); #endif return; } static void sk_win_write_1(sc, reg, val) struct sk_softc *sc; int reg; u_int32_t val; { #ifdef SK_USEIOSPACE CSR_WRITE_4(sc, SK_RAP, SK_WIN(reg)); CSR_WRITE_1(sc, SK_WIN_BASE + SK_REG(reg), val); #else CSR_WRITE_1(sc, reg, val); #endif return; } /* * The VPD EEPROM contains Vital Product Data, as suggested in * the PCI 2.1 specification. The VPD data is separared into areas * denoted by resource IDs. The SysKonnect VPD contains an ID string * resource (the name of the adapter), a read-only area resource * containing various key/data fields and a read/write area which * can be used to store asset management information or log messages. * We read the ID string and read-only into buffers attached to * the controller softc structure for later use. At the moment, * we only use the ID string during skc_attach(). */ static u_int8_t sk_vpd_readbyte(sc, addr) struct sk_softc *sc; int addr; { int i; sk_win_write_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR), addr); for (i = 0; i < SK_TIMEOUT; i++) { DELAY(1); if (sk_win_read_2(sc, SK_PCI_REG(SK_PCI_VPD_ADDR)) & SK_VPD_FLAG) break; } if (i == SK_TIMEOUT) return(0); return(sk_win_read_1(sc, SK_PCI_REG(SK_PCI_VPD_DATA))); } static void sk_vpd_read_res(sc, res, addr) struct sk_softc *sc; struct vpd_res *res; int addr; { int i; u_int8_t *ptr; ptr = (u_int8_t *)res; for (i = 0; i < sizeof(struct vpd_res); i++) ptr[i] = sk_vpd_readbyte(sc, i + addr); return; } static void sk_vpd_read(sc) struct sk_softc *sc; { int pos = 0, i; struct vpd_res res; if (sc->sk_vpd_prodname != NULL) free(sc->sk_vpd_prodname, M_DEVBUF); if (sc->sk_vpd_readonly != NULL) free(sc->sk_vpd_readonly, M_DEVBUF); sc->sk_vpd_prodname = NULL; sc->sk_vpd_readonly = NULL; sc->sk_vpd_readonly_len = 0; sk_vpd_read_res(sc, &res, pos); /* * Bail out quietly if the eeprom appears to be missing or empty. */ if (res.vr_id == 0xff && res.vr_len == 0xff && res.vr_pad == 0xff) return; if (res.vr_id != VPD_RES_ID) { printf("skc%d: bad VPD resource id: expected %x got %x\n", sc->sk_unit, VPD_RES_ID, res.vr_id); return; } pos += sizeof(res); sc->sk_vpd_prodname = malloc(res.vr_len + 1, M_DEVBUF, M_NOWAIT); if (sc->sk_vpd_prodname != NULL) { for (i = 0; i < res.vr_len; i++) sc->sk_vpd_prodname[i] = sk_vpd_readbyte(sc, i + pos); sc->sk_vpd_prodname[i] = '\0'; } pos += res.vr_len; sk_vpd_read_res(sc, &res, pos); if (res.vr_id != VPD_RES_READ) { printf("skc%d: bad VPD resource id: expected %x got %x\n", sc->sk_unit, VPD_RES_READ, res.vr_id); return; } pos += sizeof(res); sc->sk_vpd_readonly = malloc(res.vr_len, M_DEVBUF, M_NOWAIT); for (i = 0; i < res.vr_len; i++) sc->sk_vpd_readonly[i] = sk_vpd_readbyte(sc, i + pos); sc->sk_vpd_readonly_len = res.vr_len; return; } static int sk_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct sk_if_softc *sc_if; sc_if = device_get_softc(dev); switch(sc_if->sk_softc->sk_type) { case SK_GENESIS: return(sk_xmac_miibus_readreg(sc_if, phy, reg)); case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: return(sk_marv_miibus_readreg(sc_if, phy, reg)); } return(0); } static int sk_miibus_writereg(dev, phy, reg, val) device_t dev; int phy, reg, val; { struct sk_if_softc *sc_if; sc_if = device_get_softc(dev); switch(sc_if->sk_softc->sk_type) { case SK_GENESIS: return(sk_xmac_miibus_writereg(sc_if, phy, reg, val)); case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: return(sk_marv_miibus_writereg(sc_if, phy, reg, val)); } return(0); } static void sk_miibus_statchg(dev) device_t dev; { struct sk_if_softc *sc_if; sc_if = device_get_softc(dev); switch(sc_if->sk_softc->sk_type) { case SK_GENESIS: sk_xmac_miibus_statchg(sc_if); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: sk_marv_miibus_statchg(sc_if); break; } return; } static int sk_xmac_miibus_readreg(sc_if, phy, reg) struct sk_if_softc *sc_if; int phy, reg; { int i; if (sc_if->sk_phytype == SK_PHYTYPE_XMAC && phy != 0) return(0); SK_IF_LOCK(sc_if); SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); SK_XM_READ_2(sc_if, XM_PHY_DATA); if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { for (i = 0; i < SK_TIMEOUT; i++) { DELAY(1); if (SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYDATARDY) break; } if (i == SK_TIMEOUT) { printf("sk%d: phy failed to come ready\n", sc_if->sk_unit); SK_IF_UNLOCK(sc_if); return(0); } } DELAY(1); i = SK_XM_READ_2(sc_if, XM_PHY_DATA); SK_IF_UNLOCK(sc_if); return(i); } static int sk_xmac_miibus_writereg(sc_if, phy, reg, val) struct sk_if_softc *sc_if; int phy, reg, val; { int i; SK_IF_LOCK(sc_if); SK_XM_WRITE_2(sc_if, XM_PHY_ADDR, reg|(phy << 8)); for (i = 0; i < SK_TIMEOUT; i++) { if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) break; } if (i == SK_TIMEOUT) { printf("sk%d: phy failed to come ready\n", sc_if->sk_unit); SK_IF_UNLOCK(sc_if); return(ETIMEDOUT); } SK_XM_WRITE_2(sc_if, XM_PHY_DATA, val); for (i = 0; i < SK_TIMEOUT; i++) { DELAY(1); if (!(SK_XM_READ_2(sc_if, XM_MMUCMD) & XM_MMUCMD_PHYBUSY)) break; } SK_IF_UNLOCK(sc_if); if (i == SK_TIMEOUT) printf("sk%d: phy write timed out\n", sc_if->sk_unit); return(0); } static void sk_xmac_miibus_statchg(sc_if) struct sk_if_softc *sc_if; { struct mii_data *mii; mii = device_get_softc(sc_if->sk_miibus); SK_IF_LOCK(sc_if); /* * If this is a GMII PHY, manually set the XMAC's * duplex mode accordingly. */ if (sc_if->sk_phytype != SK_PHYTYPE_XMAC) { if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); } else { SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_GMIIFDX); } } SK_IF_UNLOCK(sc_if); return; } static int sk_marv_miibus_readreg(sc_if, phy, reg) struct sk_if_softc *sc_if; int phy, reg; { u_int16_t val; int i; if (phy != 0 || (sc_if->sk_phytype != SK_PHYTYPE_MARV_COPPER && sc_if->sk_phytype != SK_PHYTYPE_MARV_FIBER)) { return(0); } SK_IF_LOCK(sc_if); SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | YU_SMICR_REGAD(reg) | YU_SMICR_OP_READ); for (i = 0; i < SK_TIMEOUT; i++) { DELAY(1); val = SK_YU_READ_2(sc_if, YUKON_SMICR); if (val & YU_SMICR_READ_VALID) break; } if (i == SK_TIMEOUT) { printf("sk%d: phy failed to come ready\n", sc_if->sk_unit); SK_IF_UNLOCK(sc_if); return(0); } val = SK_YU_READ_2(sc_if, YUKON_SMIDR); SK_IF_UNLOCK(sc_if); return(val); } static int sk_marv_miibus_writereg(sc_if, phy, reg, val) struct sk_if_softc *sc_if; int phy, reg, val; { int i; SK_IF_LOCK(sc_if); SK_YU_WRITE_2(sc_if, YUKON_SMIDR, val); SK_YU_WRITE_2(sc_if, YUKON_SMICR, YU_SMICR_PHYAD(phy) | YU_SMICR_REGAD(reg) | YU_SMICR_OP_WRITE); for (i = 0; i < SK_TIMEOUT; i++) { DELAY(1); if (SK_YU_READ_2(sc_if, YUKON_SMICR) & YU_SMICR_BUSY) break; } SK_IF_UNLOCK(sc_if); return(0); } static void sk_marv_miibus_statchg(sc_if) struct sk_if_softc *sc_if; { return; } #define HASH_BITS 6 static u_int32_t sk_xmchash(addr) const uint8_t *addr; { uint32_t crc; /* Compute CRC for the address value. */ crc = ether_crc32_le(addr, ETHER_ADDR_LEN); return (~crc & ((1 << HASH_BITS) - 1)); } /* gmchash is just a big endian crc */ static u_int32_t sk_gmchash(addr) const uint8_t *addr; { uint32_t crc; /* Compute CRC for the address value. */ crc = ether_crc32_be(addr, ETHER_ADDR_LEN); return (crc & ((1 << HASH_BITS) - 1)); } static void sk_setfilt(sc_if, addr, slot) struct sk_if_softc *sc_if; caddr_t addr; int slot; { int base; base = XM_RXFILT_ENTRY(slot); SK_XM_WRITE_2(sc_if, base, *(u_int16_t *)(&addr[0])); SK_XM_WRITE_2(sc_if, base + 2, *(u_int16_t *)(&addr[2])); SK_XM_WRITE_2(sc_if, base + 4, *(u_int16_t *)(&addr[4])); return; } static void sk_setmulti(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc = sc_if->sk_softc; struct ifnet *ifp = sc_if->sk_ifp; u_int32_t hashes[2] = { 0, 0 }; int h = 0, i; struct ifmultiaddr *ifma; u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 }; /* First, zot all the existing filters. */ switch(sc->sk_type) { case SK_GENESIS: for (i = 1; i < XM_RXFILT_MAX; i++) sk_setfilt(sc_if, (caddr_t)&dummy, i); SK_XM_WRITE_4(sc_if, XM_MAR0, 0); SK_XM_WRITE_4(sc_if, XM_MAR2, 0); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: SK_YU_WRITE_2(sc_if, YUKON_MCAH1, 0); SK_YU_WRITE_2(sc_if, YUKON_MCAH2, 0); SK_YU_WRITE_2(sc_if, YUKON_MCAH3, 0); SK_YU_WRITE_2(sc_if, YUKON_MCAH4, 0); break; } /* Now program new ones. */ if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { hashes[0] = 0xFFFFFFFF; hashes[1] = 0xFFFFFFFF; } else { i = 1; + IF_ADDR_LOCK(ifp); TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; /* * Program the first XM_RXFILT_MAX multicast groups * into the perfect filter. For all others, * use the hash table. */ if (sc->sk_type == SK_GENESIS && i < XM_RXFILT_MAX) { sk_setfilt(sc_if, LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i); i++; continue; } switch(sc->sk_type) { case SK_GENESIS: h = sk_xmchash( LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: h = sk_gmchash( LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); break; } if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } + IF_ADDR_UNLOCK(ifp); } switch(sc->sk_type) { case SK_GENESIS: SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_HASH| XM_MODE_RX_USE_PERFECT); SK_XM_WRITE_4(sc_if, XM_MAR0, hashes[0]); SK_XM_WRITE_4(sc_if, XM_MAR2, hashes[1]); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: SK_YU_WRITE_2(sc_if, YUKON_MCAH1, hashes[0] & 0xffff); SK_YU_WRITE_2(sc_if, YUKON_MCAH2, (hashes[0] >> 16) & 0xffff); SK_YU_WRITE_2(sc_if, YUKON_MCAH3, hashes[1] & 0xffff); SK_YU_WRITE_2(sc_if, YUKON_MCAH4, (hashes[1] >> 16) & 0xffff); break; } return; } static void sk_setpromisc(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc = sc_if->sk_softc; struct ifnet *ifp = sc_if->sk_ifp; switch(sc->sk_type) { case SK_GENESIS: if (ifp->if_flags & IFF_PROMISC) { SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); } else { SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_PROMISC); } break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: if (ifp->if_flags & IFF_PROMISC) { SK_YU_CLRBIT_2(sc_if, YUKON_RCR, YU_RCR_UFLEN | YU_RCR_MUFLEN); } else { SK_YU_SETBIT_2(sc_if, YUKON_RCR, YU_RCR_UFLEN | YU_RCR_MUFLEN); } break; } return; } static int sk_init_rx_ring(sc_if) struct sk_if_softc *sc_if; { struct sk_chain_data *cd = &sc_if->sk_cdata; struct sk_ring_data *rd = sc_if->sk_rdata; int i; bzero((char *)rd->sk_rx_ring, sizeof(struct sk_rx_desc) * SK_RX_RING_CNT); for (i = 0; i < SK_RX_RING_CNT; i++) { cd->sk_rx_chain[i].sk_desc = &rd->sk_rx_ring[i]; if (sk_newbuf(sc_if, &cd->sk_rx_chain[i], NULL) == ENOBUFS) return(ENOBUFS); if (i == (SK_RX_RING_CNT - 1)) { cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[0]; rd->sk_rx_ring[i].sk_next = vtophys(&rd->sk_rx_ring[0]); } else { cd->sk_rx_chain[i].sk_next = &cd->sk_rx_chain[i + 1]; rd->sk_rx_ring[i].sk_next = vtophys(&rd->sk_rx_ring[i + 1]); } } sc_if->sk_cdata.sk_rx_prod = 0; sc_if->sk_cdata.sk_rx_cons = 0; return(0); } static void sk_init_tx_ring(sc_if) struct sk_if_softc *sc_if; { struct sk_chain_data *cd = &sc_if->sk_cdata; struct sk_ring_data *rd = sc_if->sk_rdata; int i; bzero((char *)sc_if->sk_rdata->sk_tx_ring, sizeof(struct sk_tx_desc) * SK_TX_RING_CNT); for (i = 0; i < SK_TX_RING_CNT; i++) { cd->sk_tx_chain[i].sk_desc = &rd->sk_tx_ring[i]; if (i == (SK_TX_RING_CNT - 1)) { cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[0]; rd->sk_tx_ring[i].sk_next = vtophys(&rd->sk_tx_ring[0]); } else { cd->sk_tx_chain[i].sk_next = &cd->sk_tx_chain[i + 1]; rd->sk_tx_ring[i].sk_next = vtophys(&rd->sk_tx_ring[i + 1]); } } sc_if->sk_cdata.sk_tx_prod = 0; sc_if->sk_cdata.sk_tx_cons = 0; sc_if->sk_cdata.sk_tx_cnt = 0; return; } static int sk_newbuf(sc_if, c, m) struct sk_if_softc *sc_if; struct sk_chain *c; struct mbuf *m; { struct mbuf *m_new = NULL; struct sk_rx_desc *r; if (m == NULL) { caddr_t *buf = NULL; MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) return(ENOBUFS); /* Allocate the jumbo buffer */ buf = sk_jalloc(sc_if); if (buf == NULL) { m_freem(m_new); #ifdef SK_VERBOSE printf("sk%d: jumbo allocation failed " "-- packet dropped!\n", sc_if->sk_unit); #endif return(ENOBUFS); } /* Attach the buffer to the mbuf */ MEXTADD(m_new, buf, SK_JLEN, sk_jfree, (struct sk_if_softc *)sc_if, 0, EXT_NET_DRV); m_new->m_data = (void *)buf; m_new->m_pkthdr.len = m_new->m_len = SK_JLEN; } else { /* * We're re-using a previously allocated mbuf; * be sure to re-init pointers and lengths to * default values. */ m_new = m; m_new->m_len = m_new->m_pkthdr.len = SK_JLEN; m_new->m_data = m_new->m_ext.ext_buf; } /* * Adjust alignment so packet payload begins on a * longword boundary. Mandatory for Alpha, useful on * x86 too. */ m_adj(m_new, ETHER_ALIGN); r = c->sk_desc; c->sk_mbuf = m_new; r->sk_data_lo = vtophys(mtod(m_new, caddr_t)); r->sk_ctl = m_new->m_len | SK_RXSTAT; return(0); } /* * Allocate jumbo buffer storage. The SysKonnect adapters support * "jumbograms" (9K frames), although SysKonnect doesn't currently * use them in their drivers. In order for us to use them, we need * large 9K receive buffers, however standard mbuf clusters are only * 2048 bytes in size. Consequently, we need to allocate and manage * our own jumbo buffer pool. Fortunately, this does not require an * excessive amount of additional code. */ static int sk_alloc_jumbo_mem(sc_if) struct sk_if_softc *sc_if; { caddr_t ptr; register int i; struct sk_jpool_entry *entry; /* Grab a big chunk o' storage. */ sc_if->sk_cdata.sk_jumbo_buf = contigmalloc(SK_JMEM, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc_if->sk_cdata.sk_jumbo_buf == NULL) { printf("sk%d: no memory for jumbo buffers!\n", sc_if->sk_unit); return(ENOBUFS); } mtx_init(&sc_if->sk_jlist_mtx, "sk_jlist_mtx", NULL, MTX_DEF); SLIST_INIT(&sc_if->sk_jfree_listhead); SLIST_INIT(&sc_if->sk_jinuse_listhead); /* * Now divide it up into 9K pieces and save the addresses * in an array. */ ptr = sc_if->sk_cdata.sk_jumbo_buf; for (i = 0; i < SK_JSLOTS; i++) { sc_if->sk_cdata.sk_jslots[i] = ptr; ptr += SK_JLEN; entry = malloc(sizeof(struct sk_jpool_entry), M_DEVBUF, M_NOWAIT); if (entry == NULL) { sk_free_jumbo_mem(sc_if); sc_if->sk_cdata.sk_jumbo_buf = NULL; printf("sk%d: no memory for jumbo " "buffer queue!\n", sc_if->sk_unit); return(ENOBUFS); } entry->slot = i; SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries); } return(0); } static void sk_free_jumbo_mem(sc_if) struct sk_if_softc *sc_if; { struct sk_jpool_entry *entry; SK_JLIST_LOCK(sc_if); /* We cannot release external mbuf storage while in use. */ if (!SLIST_EMPTY(&sc_if->sk_jinuse_listhead)) { printf("sk%d: will leak jumbo buffer memory!\n", sc_if->sk_unit); SK_JLIST_UNLOCK(sc_if); return; } while (!SLIST_EMPTY(&sc_if->sk_jfree_listhead)) { entry = SLIST_FIRST(&sc_if->sk_jfree_listhead); SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries); free(entry, M_DEVBUF); } SK_JLIST_UNLOCK(sc_if); mtx_destroy(&sc_if->sk_jlist_mtx); contigfree(sc_if->sk_cdata.sk_jumbo_buf, SK_JMEM, M_DEVBUF); return; } /* * Allocate a jumbo buffer. */ static void * sk_jalloc(sc_if) struct sk_if_softc *sc_if; { struct sk_jpool_entry *entry; SK_JLIST_LOCK(sc_if); entry = SLIST_FIRST(&sc_if->sk_jfree_listhead); if (entry == NULL) { #ifdef SK_VERBOSE printf("sk%d: no free jumbo buffers\n", sc_if->sk_unit); #endif SK_JLIST_UNLOCK(sc_if); return(NULL); } SLIST_REMOVE_HEAD(&sc_if->sk_jfree_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc_if->sk_jinuse_listhead, entry, jpool_entries); SK_JLIST_UNLOCK(sc_if); return(sc_if->sk_cdata.sk_jslots[entry->slot]); } /* * Release a jumbo buffer. */ static void sk_jfree(buf, args) void *buf; void *args; { struct sk_if_softc *sc_if; int i; struct sk_jpool_entry *entry; /* Extract the softc struct pointer. */ sc_if = (struct sk_if_softc *)args; if (sc_if == NULL) panic("sk_jfree: didn't get softc pointer!"); SK_JLIST_LOCK(sc_if); /* calculate the slot this buffer belongs to */ i = ((vm_offset_t)buf - (vm_offset_t)sc_if->sk_cdata.sk_jumbo_buf) / SK_JLEN; if ((i < 0) || (i >= SK_JSLOTS)) panic("sk_jfree: asked to free buffer that we don't manage!"); entry = SLIST_FIRST(&sc_if->sk_jinuse_listhead); if (entry == NULL) panic("sk_jfree: buffer not in use!"); entry->slot = i; SLIST_REMOVE_HEAD(&sc_if->sk_jinuse_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc_if->sk_jfree_listhead, entry, jpool_entries); if (SLIST_EMPTY(&sc_if->sk_jinuse_listhead)) wakeup(sc_if); SK_JLIST_UNLOCK(sc_if); return; } /* * Set media options. */ static int sk_ifmedia_upd(ifp) struct ifnet *ifp; { struct sk_if_softc *sc_if = ifp->if_softc; struct mii_data *mii; mii = device_get_softc(sc_if->sk_miibus); sk_init(sc_if); mii_mediachg(mii); return(0); } /* * Report current media status. */ static void sk_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct sk_if_softc *sc_if; struct mii_data *mii; sc_if = ifp->if_softc; mii = device_get_softc(sc_if->sk_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static int sk_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct sk_if_softc *sc_if = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int error = 0; struct mii_data *mii; switch(command) { case SIOCSIFMTU: if (ifr->ifr_mtu > SK_JUMBO_MTU) error = EINVAL; else { ifp->if_mtu = ifr->ifr_mtu; ifp->if_flags &= ~IFF_RUNNING; sk_init(sc_if); } break; case SIOCSIFFLAGS: SK_IF_LOCK(sc_if); if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING) { if ((ifp->if_flags ^ sc_if->sk_if_flags) & IFF_PROMISC) { sk_setpromisc(sc_if); sk_setmulti(sc_if); } } else sk_init(sc_if); } else { if (ifp->if_flags & IFF_RUNNING) sk_stop(sc_if); } sc_if->sk_if_flags = ifp->if_flags; SK_IF_UNLOCK(sc_if); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: if (ifp->if_flags & IFF_RUNNING) { SK_IF_LOCK(sc_if); sk_setmulti(sc_if); SK_IF_UNLOCK(sc_if); error = 0; } break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc_if->sk_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); break; } return(error); } /* * Probe for a SysKonnect GEnesis chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int skc_probe(dev) device_t dev; { struct sk_softc *sc; struct sk_type *t = sk_devs; sc = device_get_softc(dev); while(t->sk_name != NULL) { if ((pci_get_vendor(dev) == t->sk_vid) && (pci_get_device(dev) == t->sk_did)) { device_set_desc(dev, t->sk_name); return (BUS_PROBE_DEFAULT); } t++; } return(ENXIO); } /* * Force the GEnesis into reset, then bring it out of reset. */ static void sk_reset(sc) struct sk_softc *sc; { CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_RESET); CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_RESET); if (SK_YUKON_FAMILY(sc->sk_type)) CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_SET); DELAY(1000); CSR_WRITE_2(sc, SK_CSR, SK_CSR_SW_UNRESET); DELAY(2); CSR_WRITE_2(sc, SK_CSR, SK_CSR_MASTER_UNRESET); if (SK_YUKON_FAMILY(sc->sk_type)) CSR_WRITE_2(sc, SK_LINK_CTRL, SK_LINK_RESET_CLEAR); if (sc->sk_type == SK_GENESIS) { /* Configure packet arbiter */ sk_win_write_2(sc, SK_PKTARB_CTL, SK_PKTARBCTL_UNRESET); sk_win_write_2(sc, SK_RXPA1_TINIT, SK_PKTARB_TIMEOUT); sk_win_write_2(sc, SK_TXPA1_TINIT, SK_PKTARB_TIMEOUT); sk_win_write_2(sc, SK_RXPA2_TINIT, SK_PKTARB_TIMEOUT); sk_win_write_2(sc, SK_TXPA2_TINIT, SK_PKTARB_TIMEOUT); } /* Enable RAM interface */ sk_win_write_4(sc, SK_RAMCTL, SK_RAMCTL_UNRESET); /* * Configure interrupt moderation. The moderation timer * defers interrupts specified in the interrupt moderation * timer mask based on the timeout specified in the interrupt * moderation timer init register. Each bit in the timer * register represents 18.825ns, so to specify a timeout in * microseconds, we have to multiply by 54. */ printf("skc%d: interrupt moderation is %d us\n", sc->sk_unit, sc->sk_int_mod); sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod)); sk_win_write_4(sc, SK_IMMR, SK_ISR_TX1_S_EOF|SK_ISR_TX2_S_EOF| SK_ISR_RX1_EOF|SK_ISR_RX2_EOF); sk_win_write_1(sc, SK_IMTIMERCTL, SK_IMCTL_START); return; } static int sk_probe(dev) device_t dev; { struct sk_softc *sc; sc = device_get_softc(device_get_parent(dev)); /* * Not much to do here. We always know there will be * at least one XMAC present, and if there are two, * skc_attach() will create a second device instance * for us. */ switch (sc->sk_type) { case SK_GENESIS: device_set_desc(dev, "XaQti Corp. XMAC II"); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: device_set_desc(dev, "Marvell Semiconductor, Inc. Yukon"); break; } return (BUS_PROBE_DEFAULT); } /* * Each XMAC chip is attached as a separate logical IP interface. * Single port cards will have only one logical interface of course. */ static int sk_attach(dev) device_t dev; { struct sk_softc *sc; struct sk_if_softc *sc_if; struct ifnet *ifp; int i, port, error; u_char eaddr[6]; if (dev == NULL) return(EINVAL); error = 0; sc_if = device_get_softc(dev); sc = device_get_softc(device_get_parent(dev)); port = *(int *)device_get_ivars(dev); sc_if->sk_dev = dev; sc_if->sk_unit = device_get_unit(dev); sc_if->sk_port = port; sc_if->sk_softc = sc; sc->sk_if[port] = sc_if; if (port == SK_PORT_A) sc_if->sk_tx_bmu = SK_BMU_TXS_CSR0; if (port == SK_PORT_B) sc_if->sk_tx_bmu = SK_BMU_TXS_CSR1; /* Allocate the descriptor queues. */ sc_if->sk_rdata = contigmalloc(sizeof(struct sk_ring_data), M_DEVBUF, M_NOWAIT, M_ZERO, 0xffffffff, PAGE_SIZE, 0); if (sc_if->sk_rdata == NULL) { printf("sk%d: no memory for list buffers!\n", sc_if->sk_unit); error = ENOMEM; goto fail; } /* Try to allocate memory for jumbo buffers. */ if (sk_alloc_jumbo_mem(sc_if)) { printf("sk%d: jumbo buffer allocation failed\n", sc_if->sk_unit); error = ENOMEM; goto fail; } ifp = sc_if->sk_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { printf("sk%d: can not if_alloc()\n", sc_if->sk_unit); error = ENOSPC; goto fail; } ifp->if_softc = sc_if; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = sk_ioctl; ifp->if_start = sk_start; ifp->if_watchdog = sk_watchdog; ifp->if_init = sk_init; ifp->if_baudrate = 1000000000; IFQ_SET_MAXLEN(&ifp->if_snd, SK_TX_RING_CNT - 1); ifp->if_snd.ifq_drv_maxlen = SK_TX_RING_CNT - 1; IFQ_SET_READY(&ifp->if_snd); callout_handle_init(&sc_if->sk_tick_ch); /* * Get station address for this interface. Note that * dual port cards actually come with three station * addresses: one for each port, plus an extra. The * extra one is used by the SysKonnect driver software * as a 'virtual' station address for when both ports * are operating in failover mode. Currently we don't * use this extra address. */ SK_LOCK(sc); for (i = 0; i < ETHER_ADDR_LEN; i++) eaddr[i] = sk_win_read_1(sc, SK_MAC0_0 + (port * 8) + i); /* * Set up RAM buffer addresses. The NIC will have a certain * amount of SRAM on it, somewhere between 512K and 2MB. We * need to divide this up a) between the transmitter and * receiver and b) between the two XMACs, if this is a * dual port NIC. Our algotithm is to divide up the memory * evenly so that everyone gets a fair share. */ if (sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC) { u_int32_t chunk, val; chunk = sc->sk_ramsize / 2; val = sc->sk_rboff / sizeof(u_int64_t); sc_if->sk_rx_ramstart = val; val += (chunk / sizeof(u_int64_t)); sc_if->sk_rx_ramend = val - 1; sc_if->sk_tx_ramstart = val; val += (chunk / sizeof(u_int64_t)); sc_if->sk_tx_ramend = val - 1; } else { u_int32_t chunk, val; chunk = sc->sk_ramsize / 4; val = (sc->sk_rboff + (chunk * 2 * sc_if->sk_port)) / sizeof(u_int64_t); sc_if->sk_rx_ramstart = val; val += (chunk / sizeof(u_int64_t)); sc_if->sk_rx_ramend = val - 1; sc_if->sk_tx_ramstart = val; val += (chunk / sizeof(u_int64_t)); sc_if->sk_tx_ramend = val - 1; } /* Read and save PHY type and set PHY address */ sc_if->sk_phytype = sk_win_read_1(sc, SK_EPROM1) & 0xF; switch(sc_if->sk_phytype) { case SK_PHYTYPE_XMAC: sc_if->sk_phyaddr = SK_PHYADDR_XMAC; break; case SK_PHYTYPE_BCOM: sc_if->sk_phyaddr = SK_PHYADDR_BCOM; break; case SK_PHYTYPE_MARV_COPPER: sc_if->sk_phyaddr = SK_PHYADDR_MARV; break; default: printf("skc%d: unsupported PHY type: %d\n", sc->sk_unit, sc_if->sk_phytype); error = ENODEV; SK_UNLOCK(sc); if_free(ifp); goto fail; } /* * Call MI attach routine. Can't hold locks when calling into ether_*. */ SK_UNLOCK(sc); ether_ifattach(ifp, eaddr); SK_LOCK(sc); /* * Do miibus setup. */ switch (sc->sk_type) { case SK_GENESIS: sk_init_xmac(sc_if); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: sk_init_yukon(sc_if); break; } SK_UNLOCK(sc); if (mii_phy_probe(dev, &sc_if->sk_miibus, sk_ifmedia_upd, sk_ifmedia_sts)) { printf("skc%d: no PHY found!\n", sc_if->sk_unit); ether_ifdetach(ifp); if_free(ifp); error = ENXIO; goto fail; } fail: if (error) { /* Access should be ok even though lock has been dropped */ sc->sk_if[port] = NULL; sk_detach(dev); } return(error); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int skc_attach(dev) device_t dev; { struct sk_softc *sc; int unit, error = 0, rid, *port; uint8_t skrs; char *pname, *revstr; sc = device_get_softc(dev); unit = device_get_unit(dev); mtx_init(&sc->sk_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = SK_RID; sc->sk_res = bus_alloc_resource_any(dev, SK_RES, &rid, RF_ACTIVE); if (sc->sk_res == NULL) { printf("sk%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->sk_btag = rman_get_bustag(sc->sk_res); sc->sk_bhandle = rman_get_bushandle(sc->sk_res); sc->sk_type = sk_win_read_1(sc, SK_CHIPVER); sc->sk_rev = (sk_win_read_1(sc, SK_CONFIG) >> 4) & 0xf; /* Bail out if chip is not recognized. */ if (sc->sk_type != SK_GENESIS && !SK_YUKON_FAMILY(sc->sk_type)) { printf("skc%d: unknown device: chipver=%02x, rev=%x\n", unit, sc->sk_type, sc->sk_rev); error = ENXIO; goto fail; } /* Allocate interrupt */ rid = 0; sc->sk_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->sk_irq == NULL) { printf("skc%d: couldn't map interrupt\n", unit); error = ENXIO; goto fail; } SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "int_mod", CTLTYPE_INT|CTLFLAG_RW, &sc->sk_int_mod, 0, sysctl_hw_sk_int_mod, "I", "SK interrupt moderation"); /* Pull in device tunables. */ sc->sk_int_mod = SK_IM_DEFAULT; error = resource_int_value(device_get_name(dev), unit, "int_mod", &sc->sk_int_mod); if (error == 0) { if (sc->sk_int_mod < SK_IM_MIN || sc->sk_int_mod > SK_IM_MAX) { printf("skc%d: int_mod value out of range; " "using default: %d\n", unit, SK_IM_DEFAULT); sc->sk_int_mod = SK_IM_DEFAULT; } } /* Reset the adapter. */ sk_reset(sc); sc->sk_unit = unit; /* Read and save vital product data from EEPROM. */ sk_vpd_read(sc); skrs = sk_win_read_1(sc, SK_EPROM0); if (sc->sk_type == SK_GENESIS) { /* Read and save RAM size and RAMbuffer offset */ switch(skrs) { case SK_RAMSIZE_512K_64: sc->sk_ramsize = 0x80000; sc->sk_rboff = SK_RBOFF_0; break; case SK_RAMSIZE_1024K_64: sc->sk_ramsize = 0x100000; sc->sk_rboff = SK_RBOFF_80000; break; case SK_RAMSIZE_1024K_128: sc->sk_ramsize = 0x100000; sc->sk_rboff = SK_RBOFF_0; break; case SK_RAMSIZE_2048K_128: sc->sk_ramsize = 0x200000; sc->sk_rboff = SK_RBOFF_0; break; default: printf("skc%d: unknown ram size: %d\n", sc->sk_unit, sk_win_read_1(sc, SK_EPROM0)); error = ENXIO; goto fail; } } else { /* SK_YUKON_FAMILY */ if (skrs == 0x00) sc->sk_ramsize = 0x20000; else sc->sk_ramsize = skrs * (1<<12); sc->sk_rboff = SK_RBOFF_0; } /* Read and save physical media type */ switch(sk_win_read_1(sc, SK_PMDTYPE)) { case SK_PMD_1000BASESX: sc->sk_pmd = IFM_1000_SX; break; case SK_PMD_1000BASELX: sc->sk_pmd = IFM_1000_LX; break; case SK_PMD_1000BASECX: sc->sk_pmd = IFM_1000_CX; break; case SK_PMD_1000BASETX: sc->sk_pmd = IFM_1000_T; break; default: printf("skc%d: unknown media type: 0x%x\n", sc->sk_unit, sk_win_read_1(sc, SK_PMDTYPE)); error = ENXIO; goto fail; } /* Determine whether to name it with VPD PN or just make it up. * Marvell Yukon VPD PN seems to freqently be bogus. */ switch (pci_get_device(dev)) { case DEVICEID_SK_V1: case DEVICEID_BELKIN_5005: case DEVICEID_3COM_3C940: case DEVICEID_LINKSYS_EG1032: case DEVICEID_DLINK_DGE530T: /* Stay with VPD PN. */ pname = sc->sk_vpd_prodname; break; case DEVICEID_SK_V2: /* YUKON VPD PN might bear no resemblance to reality. */ switch (sc->sk_type) { case SK_GENESIS: /* Stay with VPD PN. */ pname = sc->sk_vpd_prodname; break; case SK_YUKON: pname = "Marvell Yukon Gigabit Ethernet"; break; case SK_YUKON_LITE: pname = "Marvell Yukon Lite Gigabit Ethernet"; break; case SK_YUKON_LP: pname = "Marvell Yukon LP Gigabit Ethernet"; break; default: pname = "Marvell Yukon (Unknown) Gigabit Ethernet"; break; } /* Yukon Lite Rev. A0 needs special test. */ if (sc->sk_type == SK_YUKON || sc->sk_type == SK_YUKON_LP) { u_int32_t far; u_int8_t testbyte; /* Save flash address register before testing. */ far = sk_win_read_4(sc, SK_EP_ADDR); sk_win_write_1(sc, SK_EP_ADDR+0x03, 0xff); testbyte = sk_win_read_1(sc, SK_EP_ADDR+0x03); if (testbyte != 0x00) { /* Yukon Lite Rev. A0 detected. */ sc->sk_type = SK_YUKON_LITE; sc->sk_rev = SK_YUKON_LITE_REV_A0; /* Restore flash address register. */ sk_win_write_4(sc, SK_EP_ADDR, far); } } break; default: device_printf(dev, "unknown device: vendor=%04x, device=%04x, " "chipver=%02x, rev=%x\n", pci_get_vendor(dev), pci_get_device(dev), sc->sk_type, sc->sk_rev); error = ENXIO; goto fail; } if (sc->sk_type == SK_YUKON_LITE) { switch (sc->sk_rev) { case SK_YUKON_LITE_REV_A0: revstr = "A0"; break; case SK_YUKON_LITE_REV_A1: revstr = "A1"; break; case SK_YUKON_LITE_REV_A3: revstr = "A3"; break; default: revstr = ""; break; } } else { revstr = ""; } /* Announce the product name and more VPD data if there. */ device_printf(dev, "%s rev. %s(0x%x)\n", pname != NULL ? pname : "", revstr, sc->sk_rev); if (bootverbose) { if (sc->sk_vpd_readonly != NULL && sc->sk_vpd_readonly_len != 0) { char buf[256]; char *dp = sc->sk_vpd_readonly; uint16_t l, len = sc->sk_vpd_readonly_len; while (len >= 3) { if ((*dp == 'P' && *(dp+1) == 'N') || (*dp == 'E' && *(dp+1) == 'C') || (*dp == 'M' && *(dp+1) == 'N') || (*dp == 'S' && *(dp+1) == 'N')) { l = 0; while (l < *(dp+2)) { buf[l] = *(dp+3+l); ++l; } buf[l] = '\0'; device_printf(dev, "%c%c: %s\n", *dp, *(dp+1), buf); len -= (3 + l); dp += (3 + l); } else { len -= (3 + *(dp+2)); dp += (3 + *(dp+2)); } } } device_printf(dev, "chip ver = 0x%02x\n", sc->sk_type); device_printf(dev, "chip rev = 0x%02x\n", sc->sk_rev); device_printf(dev, "SK_EPROM0 = 0x%02x\n", skrs); device_printf(dev, "SRAM size = 0x%06x\n", sc->sk_ramsize); } sc->sk_devs[SK_PORT_A] = device_add_child(dev, "sk", -1); if (sc->sk_devs[SK_PORT_A] == NULL) { device_printf(dev, "failed to add child for PORT_A\n"); error = ENXIO; goto fail; } port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); if (port == NULL) { device_printf(dev, "failed to allocate memory for " "ivars of PORT_A\n"); error = ENXIO; goto fail; } *port = SK_PORT_A; device_set_ivars(sc->sk_devs[SK_PORT_A], port); if (!(sk_win_read_1(sc, SK_CONFIG) & SK_CONFIG_SINGLEMAC)) { sc->sk_devs[SK_PORT_B] = device_add_child(dev, "sk", -1); if (sc->sk_devs[SK_PORT_B] == NULL) { device_printf(dev, "failed to add child for PORT_B\n"); error = ENXIO; goto fail; } port = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); if (port == NULL) { device_printf(dev, "failed to allocate memory for " "ivars of PORT_B\n"); error = ENXIO; goto fail; } *port = SK_PORT_B; device_set_ivars(sc->sk_devs[SK_PORT_B], port); } /* Turn on the 'driver is loaded' LED. */ CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_ON); bus_generic_attach(dev); /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->sk_irq, INTR_TYPE_NET|INTR_MPSAFE, sk_intr, sc, &sc->sk_intrhand); if (error) { printf("skc%d: couldn't set up irq\n", unit); goto fail; } fail: if (error) skc_detach(dev); return(error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int sk_detach(dev) device_t dev; { struct sk_if_softc *sc_if; struct ifnet *ifp; sc_if = device_get_softc(dev); KASSERT(mtx_initialized(&sc_if->sk_softc->sk_mtx), ("sk mutex not initialized in sk_detach")); SK_IF_LOCK(sc_if); ifp = sc_if->sk_ifp; /* These should only be active if attach_xmac succeeded */ if (device_is_attached(dev)) { sk_stop(sc_if); /* Can't hold locks while calling detach */ SK_IF_UNLOCK(sc_if); ether_ifdetach(ifp); if_free(ifp); SK_IF_LOCK(sc_if); } /* * We're generally called from skc_detach() which is using * device_delete_child() to get to here. It's already trashed * miibus for us, so don't do it here or we'll panic. */ /* if (sc_if->sk_miibus != NULL) device_delete_child(dev, sc_if->sk_miibus); */ bus_generic_detach(dev); if (sc_if->sk_cdata.sk_jumbo_buf != NULL) sk_free_jumbo_mem(sc_if); if (sc_if->sk_rdata != NULL) { contigfree(sc_if->sk_rdata, sizeof(struct sk_ring_data), M_DEVBUF); } SK_IF_UNLOCK(sc_if); return(0); } static int skc_detach(dev) device_t dev; { struct sk_softc *sc; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->sk_mtx), ("sk mutex not initialized")); if (device_is_alive(dev)) { if (sc->sk_devs[SK_PORT_A] != NULL) { free(device_get_ivars(sc->sk_devs[SK_PORT_A]), M_DEVBUF); device_delete_child(dev, sc->sk_devs[SK_PORT_A]); } if (sc->sk_devs[SK_PORT_B] != NULL) { free(device_get_ivars(sc->sk_devs[SK_PORT_B]), M_DEVBUF); device_delete_child(dev, sc->sk_devs[SK_PORT_B]); } bus_generic_detach(dev); } if (sc->sk_vpd_prodname != NULL) free(sc->sk_vpd_prodname, M_DEVBUF); if (sc->sk_vpd_readonly != NULL) free(sc->sk_vpd_readonly, M_DEVBUF); if (sc->sk_intrhand) bus_teardown_intr(dev, sc->sk_irq, sc->sk_intrhand); if (sc->sk_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sk_irq); if (sc->sk_res) bus_release_resource(dev, SK_RES, SK_RID, sc->sk_res); mtx_destroy(&sc->sk_mtx); return(0); } static int sk_encap(sc_if, m_head, txidx) struct sk_if_softc *sc_if; struct mbuf *m_head; u_int32_t *txidx; { struct sk_tx_desc *f = NULL; struct mbuf *m; u_int32_t frag, cur, cnt = 0; SK_IF_LOCK_ASSERT(sc_if); m = m_head; cur = frag = *txidx; /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ for (m = m_head; m != NULL; m = m->m_next) { if (m->m_len != 0) { if ((SK_TX_RING_CNT - (sc_if->sk_cdata.sk_tx_cnt + cnt)) < 2) return(ENOBUFS); f = &sc_if->sk_rdata->sk_tx_ring[frag]; f->sk_data_lo = vtophys(mtod(m, vm_offset_t)); f->sk_ctl = m->m_len | SK_OPCODE_DEFAULT; if (cnt == 0) f->sk_ctl |= SK_TXCTL_FIRSTFRAG; else f->sk_ctl |= SK_TXCTL_OWN; cur = frag; SK_INC(frag, SK_TX_RING_CNT); cnt++; } } if (m != NULL) return(ENOBUFS); sc_if->sk_rdata->sk_tx_ring[cur].sk_ctl |= SK_TXCTL_LASTFRAG|SK_TXCTL_EOF_INTR; sc_if->sk_cdata.sk_tx_chain[cur].sk_mbuf = m_head; sc_if->sk_rdata->sk_tx_ring[*txidx].sk_ctl |= SK_TXCTL_OWN; sc_if->sk_cdata.sk_tx_cnt += cnt; *txidx = frag; return(0); } static void sk_start(ifp) struct ifnet *ifp; { struct sk_softc *sc; struct sk_if_softc *sc_if; struct mbuf *m_head = NULL; u_int32_t idx; sc_if = ifp->if_softc; sc = sc_if->sk_softc; SK_IF_LOCK(sc_if); idx = sc_if->sk_cdata.sk_tx_prod; while(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf == NULL) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (sk_encap(sc_if, m_head, &idx)) { IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); } /* Transmit */ if (idx != sc_if->sk_cdata.sk_tx_prod) { sc_if->sk_cdata.sk_tx_prod = idx; CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); /* Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; } SK_IF_UNLOCK(sc_if); return; } static void sk_watchdog(ifp) struct ifnet *ifp; { struct sk_if_softc *sc_if; sc_if = ifp->if_softc; printf("sk%d: watchdog timeout\n", sc_if->sk_unit); ifp->if_flags &= ~IFF_RUNNING; sk_init(sc_if); return; } static void skc_shutdown(dev) device_t dev; { struct sk_softc *sc; sc = device_get_softc(dev); SK_LOCK(sc); /* Turn off the 'driver is loaded' LED. */ CSR_WRITE_2(sc, SK_LED, SK_LED_GREEN_OFF); /* * Reset the GEnesis controller. Doing this should also * assert the resets on the attached XMAC(s). */ sk_reset(sc); SK_UNLOCK(sc); return; } static void sk_rxeof(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc; struct mbuf *m; struct ifnet *ifp; struct sk_chain *cur_rx; int total_len = 0; int i; u_int32_t rxstat; sc = sc_if->sk_softc; ifp = sc_if->sk_ifp; i = sc_if->sk_cdata.sk_rx_prod; cur_rx = &sc_if->sk_cdata.sk_rx_chain[i]; SK_LOCK_ASSERT(sc); while(!(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl & SK_RXCTL_OWN)) { cur_rx = &sc_if->sk_cdata.sk_rx_chain[i]; rxstat = sc_if->sk_rdata->sk_rx_ring[i].sk_xmac_rxstat; m = cur_rx->sk_mbuf; cur_rx->sk_mbuf = NULL; total_len = SK_RXBYTES(sc_if->sk_rdata->sk_rx_ring[i].sk_ctl); SK_INC(i, SK_RX_RING_CNT); if (rxstat & XM_RXSTAT_ERRFRAME) { ifp->if_ierrors++; sk_newbuf(sc_if, cur_rx, m); continue; } /* * Try to allocate a new jumbo buffer. If that * fails, copy the packet to mbufs and put the * jumbo buffer back in the ring so it can be * re-used. If allocating mbufs fails, then we * have to drop the packet. */ if (sk_newbuf(sc_if, cur_rx, NULL) == ENOBUFS) { struct mbuf *m0; m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp, NULL); sk_newbuf(sc_if, cur_rx, m); if (m0 == NULL) { printf("sk%d: no receive buffers " "available -- packet dropped!\n", sc_if->sk_unit); ifp->if_ierrors++; continue; } m = m0; } else { m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; } ifp->if_ipackets++; SK_UNLOCK(sc); (*ifp->if_input)(ifp, m); SK_LOCK(sc); } sc_if->sk_cdata.sk_rx_prod = i; return; } static void sk_txeof(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc; struct sk_tx_desc *cur_tx; struct ifnet *ifp; u_int32_t idx; sc = sc_if->sk_softc; ifp = sc_if->sk_ifp; /* * Go through our tx ring and free mbufs for those * frames that have been sent. */ idx = sc_if->sk_cdata.sk_tx_cons; while(idx != sc_if->sk_cdata.sk_tx_prod) { cur_tx = &sc_if->sk_rdata->sk_tx_ring[idx]; if (cur_tx->sk_ctl & SK_TXCTL_OWN) break; if (cur_tx->sk_ctl & SK_TXCTL_LASTFRAG) ifp->if_opackets++; if (sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf != NULL) { m_freem(sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf); sc_if->sk_cdata.sk_tx_chain[idx].sk_mbuf = NULL; } sc_if->sk_cdata.sk_tx_cnt--; SK_INC(idx, SK_TX_RING_CNT); } if (sc_if->sk_cdata.sk_tx_cnt == 0) { ifp->if_timer = 0; } else /* nudge chip to keep tx ring moving */ CSR_WRITE_4(sc, sc_if->sk_tx_bmu, SK_TXBMU_TX_START); if (sc_if->sk_cdata.sk_tx_cnt < SK_TX_RING_CNT - 2) ifp->if_flags &= ~IFF_OACTIVE; sc_if->sk_cdata.sk_tx_cons = idx; } static void sk_tick(xsc_if) void *xsc_if; { struct sk_if_softc *sc_if; struct mii_data *mii; struct ifnet *ifp; int i; sc_if = xsc_if; SK_IF_LOCK(sc_if); ifp = sc_if->sk_ifp; mii = device_get_softc(sc_if->sk_miibus); if (!(ifp->if_flags & IFF_UP)) { SK_IF_UNLOCK(sc_if); return; } if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { sk_intr_bcom(sc_if); SK_IF_UNLOCK(sc_if); return; } /* * According to SysKonnect, the correct way to verify that * the link has come back up is to poll bit 0 of the GPIO * register three times. This pin has the signal from the * link_sync pin connected to it; if we read the same link * state 3 times in a row, we know the link is up. */ for (i = 0; i < 3; i++) { if (SK_XM_READ_2(sc_if, XM_GPIO) & XM_GPIO_GP0_SET) break; } if (i != 3) { sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); SK_IF_UNLOCK(sc_if); return; } /* Turn the GP0 interrupt back on. */ SK_XM_CLRBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); SK_XM_READ_2(sc_if, XM_ISR); mii_tick(mii); untimeout(sk_tick, sc_if, sc_if->sk_tick_ch); SK_IF_UNLOCK(sc_if); return; } static void sk_intr_bcom(sc_if) struct sk_if_softc *sc_if; { struct mii_data *mii; struct ifnet *ifp; int status; mii = device_get_softc(sc_if->sk_miibus); ifp = sc_if->sk_ifp; SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); /* * Read the PHY interrupt register to make sure * we clear any pending interrupts. */ status = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_ISR); if (!(ifp->if_flags & IFF_RUNNING)) { sk_init_xmac(sc_if); return; } if (status & (BRGPHY_ISR_LNK_CHG|BRGPHY_ISR_AN_PR)) { int lstat; lstat = sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_AUXSTS); if (!(lstat & BRGPHY_AUXSTS_LINK) && sc_if->sk_link) { mii_mediachg(mii); /* Turn off the link LED. */ SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); sc_if->sk_link = 0; } else if (status & BRGPHY_ISR_LNK_CHG) { sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFF00); mii_tick(mii); sc_if->sk_link = 1; /* Turn on the link LED. */ SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON|SK_LINKLED_LINKSYNC_OFF| SK_LINKLED_BLINK_OFF); } else { mii_tick(mii); sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); } } SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); return; } static void sk_intr_xmac(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc; u_int16_t status; sc = sc_if->sk_softc; status = SK_XM_READ_2(sc_if, XM_ISR); /* * Link has gone down. Start MII tick timeout to * watch for link resync. */ if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) { if (status & XM_ISR_GP0_SET) { SK_XM_SETBIT_2(sc_if, XM_IMR, XM_IMR_GP0_SET); sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); } if (status & XM_ISR_AUTONEG_DONE) { sc_if->sk_tick_ch = timeout(sk_tick, sc_if, hz); } } if (status & XM_IMR_TX_UNDERRUN) SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_TXFIFO); if (status & XM_IMR_RX_OVERRUN) SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_FLUSH_RXFIFO); status = SK_XM_READ_2(sc_if, XM_ISR); return; } static void sk_intr_yukon(sc_if) struct sk_if_softc *sc_if; { int status; status = SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); return; } static void sk_intr(xsc) void *xsc; { struct sk_softc *sc = xsc; struct sk_if_softc *sc_if0 = NULL, *sc_if1 = NULL; struct ifnet *ifp0 = NULL, *ifp1 = NULL; u_int32_t status; SK_LOCK(sc); sc_if0 = sc->sk_if[SK_PORT_A]; sc_if1 = sc->sk_if[SK_PORT_B]; if (sc_if0 != NULL) ifp0 = sc_if0->sk_ifp; if (sc_if1 != NULL) ifp1 = sc_if1->sk_ifp; for (;;) { status = CSR_READ_4(sc, SK_ISSR); if (!(status & sc->sk_intrmask)) break; /* Handle receive interrupts first. */ if (status & SK_ISR_RX1_EOF) { sk_rxeof(sc_if0); CSR_WRITE_4(sc, SK_BMU_RX_CSR0, SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); } if (status & SK_ISR_RX2_EOF) { sk_rxeof(sc_if1); CSR_WRITE_4(sc, SK_BMU_RX_CSR1, SK_RXBMU_CLR_IRQ_EOF|SK_RXBMU_RX_START); } /* Then transmit interrupts. */ if (status & SK_ISR_TX1_S_EOF) { sk_txeof(sc_if0); CSR_WRITE_4(sc, SK_BMU_TXS_CSR0, SK_TXBMU_CLR_IRQ_EOF); } if (status & SK_ISR_TX2_S_EOF) { sk_txeof(sc_if1); CSR_WRITE_4(sc, SK_BMU_TXS_CSR1, SK_TXBMU_CLR_IRQ_EOF); } /* Then MAC interrupts. */ if (status & SK_ISR_MAC1 && ifp0->if_flags & IFF_RUNNING) { if (sc->sk_type == SK_GENESIS) sk_intr_xmac(sc_if0); else sk_intr_yukon(sc_if0); } if (status & SK_ISR_MAC2 && ifp1->if_flags & IFF_RUNNING) { if (sc->sk_type == SK_GENESIS) sk_intr_xmac(sc_if1); else sk_intr_yukon(sc_if1); } if (status & SK_ISR_EXTERNAL_REG) { if (ifp0 != NULL && sc_if0->sk_phytype == SK_PHYTYPE_BCOM) sk_intr_bcom(sc_if0); if (ifp1 != NULL && sc_if1->sk_phytype == SK_PHYTYPE_BCOM) sk_intr_bcom(sc_if1); } } CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); if (ifp0 != NULL && !IFQ_DRV_IS_EMPTY(&ifp0->if_snd)) sk_start(ifp0); if (ifp1 != NULL && !IFQ_DRV_IS_EMPTY(&ifp1->if_snd)) sk_start(ifp1); SK_UNLOCK(sc); return; } static void sk_init_xmac(sc_if) struct sk_if_softc *sc_if; { struct sk_softc *sc; struct ifnet *ifp; struct sk_bcom_hack bhack[] = { { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 }, { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 }, { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, { 0, 0 } }; sc = sc_if->sk_softc; ifp = sc_if->sk_ifp; /* Unreset the XMAC. */ SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_UNRESET); DELAY(1000); /* Reset the XMAC's internal state. */ SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); /* Save the XMAC II revision */ sc_if->sk_xmac_rev = XM_XMAC_REV(SK_XM_READ_4(sc_if, XM_DEVID)); /* * Perform additional initialization for external PHYs, * namely for the 1000baseTX cards that use the XMAC's * GMII mode. */ if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { int i = 0; u_int32_t val; /* Take PHY out of reset. */ val = sk_win_read_4(sc, SK_GPIO); if (sc_if->sk_port == SK_PORT_A) val |= SK_GPIO_DIR0|SK_GPIO_DAT0; else val |= SK_GPIO_DIR2|SK_GPIO_DAT2; sk_win_write_4(sc, SK_GPIO, val); /* Enable GMII mode on the XMAC. */ SK_XM_SETBIT_2(sc_if, XM_HWCFG, XM_HWCFG_GMIIMODE); sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_BMCR, BRGPHY_BMCR_RESET); DELAY(10000); sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, BRGPHY_MII_IMR, 0xFFF0); /* * Early versions of the BCM5400 apparently have * a bug that requires them to have their reserved * registers initialized to some magic values. I don't * know what the numbers do, I'm just the messenger. */ if (sk_xmac_miibus_readreg(sc_if, SK_PHYADDR_BCOM, 0x03) == 0x6041) { while(bhack[i].reg) { sk_xmac_miibus_writereg(sc_if, SK_PHYADDR_BCOM, bhack[i].reg, bhack[i].val); i++; } } } /* Set station address */ SK_XM_WRITE_2(sc_if, XM_PAR0, *(u_int16_t *)(&IFP2ENADDR(sc_if->sk_ifp)[0])); SK_XM_WRITE_2(sc_if, XM_PAR1, *(u_int16_t *)(&IFP2ENADDR(sc_if->sk_ifp)[2])); SK_XM_WRITE_2(sc_if, XM_PAR2, *(u_int16_t *)(&IFP2ENADDR(sc_if->sk_ifp)[4])); SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_USE_STATION); if (ifp->if_flags & IFF_BROADCAST) { SK_XM_CLRBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); } else { SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_NOBROAD); } /* We don't need the FCS appended to the packet. */ SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_STRIPFCS); /* We want short frames padded to 60 bytes. */ SK_XM_SETBIT_2(sc_if, XM_TXCMD, XM_TXCMD_AUTOPAD); /* * Enable the reception of all error frames. This is is * a necessary evil due to the design of the XMAC. The * XMAC's receive FIFO is only 8K in size, however jumbo * frames can be up to 9000 bytes in length. When bad * frame filtering is enabled, the XMAC's RX FIFO operates * in 'store and forward' mode. For this to work, the * entire frame has to fit into the FIFO, but that means * that jumbo frames larger than 8192 bytes will be * truncated. Disabling all bad frame filtering causes * the RX FIFO to operate in streaming mode, in which * case the XMAC will start transfering frames out of the * RX FIFO as soon as the FIFO threshold is reached. */ SK_XM_SETBIT_4(sc_if, XM_MODE, XM_MODE_RX_BADFRAMES| XM_MODE_RX_GIANTS|XM_MODE_RX_RUNTS|XM_MODE_RX_CRCERRS| XM_MODE_RX_INRANGELEN); if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) SK_XM_SETBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); else SK_XM_CLRBIT_2(sc_if, XM_RXCMD, XM_RXCMD_BIGPKTOK); /* * Bump up the transmit threshold. This helps hold off transmit * underruns when we're blasting traffic from both ports at once. */ SK_XM_WRITE_2(sc_if, XM_TX_REQTHRESH, SK_XM_TX_FIFOTHRESH); /* Set promiscuous mode */ sk_setpromisc(sc_if); /* Set multicast filter */ sk_setmulti(sc_if); /* Clear and enable interrupts */ SK_XM_READ_2(sc_if, XM_ISR); if (sc_if->sk_phytype == SK_PHYTYPE_XMAC) SK_XM_WRITE_2(sc_if, XM_IMR, XM_INTRS); else SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); /* Configure MAC arbiter */ switch(sc_if->sk_xmac_rev) { case XM_XMAC_REV_B2: sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_B2); sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_B2); sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_B2); sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_B2); sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_B2); sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_B2); sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_B2); sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_B2); sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); break; case XM_XMAC_REV_C1: sk_win_write_1(sc, SK_RCINIT_RX1, SK_RCINIT_XMAC_C1); sk_win_write_1(sc, SK_RCINIT_TX1, SK_RCINIT_XMAC_C1); sk_win_write_1(sc, SK_RCINIT_RX2, SK_RCINIT_XMAC_C1); sk_win_write_1(sc, SK_RCINIT_TX2, SK_RCINIT_XMAC_C1); sk_win_write_1(sc, SK_MINIT_RX1, SK_MINIT_XMAC_C1); sk_win_write_1(sc, SK_MINIT_TX1, SK_MINIT_XMAC_C1); sk_win_write_1(sc, SK_MINIT_RX2, SK_MINIT_XMAC_C1); sk_win_write_1(sc, SK_MINIT_TX2, SK_MINIT_XMAC_C1); sk_win_write_1(sc, SK_RECOVERY_CTL, SK_RECOVERY_XMAC_B2); break; default: break; } sk_win_write_2(sc, SK_MACARB_CTL, SK_MACARBCTL_UNRESET|SK_MACARBCTL_FASTOE_OFF); sc_if->sk_link = 1; return; } static void sk_init_yukon(sc_if) struct sk_if_softc *sc_if; { u_int32_t phy; u_int16_t reg; struct sk_softc *sc; struct ifnet *ifp; int i; sc = sc_if->sk_softc; ifp = sc_if->sk_ifp; if (sc->sk_type == SK_YUKON_LITE && sc->sk_rev == SK_YUKON_LITE_REV_A3) { /* Take PHY out of reset. */ sk_win_write_4(sc, SK_GPIO, (sk_win_read_4(sc, SK_GPIO) | SK_GPIO_DIR9) & ~SK_GPIO_DAT9); } /* GMAC and GPHY Reset */ SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, SK_GPHY_RESET_SET); SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); DELAY(1000); SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_CLEAR); SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_RESET_SET); DELAY(1000); phy = SK_GPHY_INT_POL_HI | SK_GPHY_DIS_FC | SK_GPHY_DIS_SLEEP | SK_GPHY_ENA_XC | SK_GPHY_ANEG_ALL | SK_GPHY_ENA_PAUSE; switch(sc_if->sk_softc->sk_pmd) { case IFM_1000_SX: case IFM_1000_LX: phy |= SK_GPHY_FIBER; break; case IFM_1000_CX: case IFM_1000_T: phy |= SK_GPHY_COPPER; break; } SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_SET); DELAY(1000); SK_IF_WRITE_4(sc_if, 0, SK_GPHY_CTRL, phy | SK_GPHY_RESET_CLEAR); SK_IF_WRITE_4(sc_if, 0, SK_GMAC_CTRL, SK_GMAC_LOOP_OFF | SK_GMAC_PAUSE_ON | SK_GMAC_RESET_CLEAR); /* unused read of the interrupt source register */ SK_IF_READ_2(sc_if, 0, SK_GMAC_ISR); reg = SK_YU_READ_2(sc_if, YUKON_PAR); /* MIB Counter Clear Mode set */ reg |= YU_PAR_MIB_CLR; SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); /* MIB Counter Clear Mode clear */ reg &= ~YU_PAR_MIB_CLR; SK_YU_WRITE_2(sc_if, YUKON_PAR, reg); /* receive control reg */ SK_YU_WRITE_2(sc_if, YUKON_RCR, YU_RCR_CRCR); /* transmit parameter register */ SK_YU_WRITE_2(sc_if, YUKON_TPR, YU_TPR_JAM_LEN(0x3) | YU_TPR_JAM_IPG(0xb) | YU_TPR_JAM2DATA_IPG(0x1a) ); /* serial mode register */ reg = YU_SMR_DATA_BLIND(0x1c) | YU_SMR_MFL_VLAN | YU_SMR_IPG_DATA(0x1e); if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) reg |= YU_SMR_MFL_JUMBO; SK_YU_WRITE_2(sc_if, YUKON_SMR, reg); /* Setup Yukon's address */ for (i = 0; i < 3; i++) { /* Write Source Address 1 (unicast filter) */ SK_YU_WRITE_2(sc_if, YUKON_SAL1 + i * 4, IFP2ENADDR(sc_if->sk_ifp)[i * 2] | IFP2ENADDR(sc_if->sk_ifp)[i * 2 + 1] << 8); } for (i = 0; i < 3; i++) { reg = sk_win_read_2(sc_if->sk_softc, SK_MAC1_0 + i * 2 + sc_if->sk_port * 8); SK_YU_WRITE_2(sc_if, YUKON_SAL2 + i * 4, reg); } /* Set promiscuous mode */ sk_setpromisc(sc_if); /* Set multicast filter */ sk_setmulti(sc_if); /* enable interrupt mask for counter overflows */ SK_YU_WRITE_2(sc_if, YUKON_TIMR, 0); SK_YU_WRITE_2(sc_if, YUKON_RIMR, 0); SK_YU_WRITE_2(sc_if, YUKON_TRIMR, 0); /* Configure RX MAC FIFO */ SK_IF_WRITE_1(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_CLEAR); SK_IF_WRITE_4(sc_if, 0, SK_RXMF1_CTRL_TEST, SK_RFCTL_OPERATION_ON); /* Configure TX MAC FIFO */ SK_IF_WRITE_1(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_CLEAR); SK_IF_WRITE_4(sc_if, 0, SK_TXMF1_CTRL_TEST, SK_TFCTL_OPERATION_ON); } /* * Note that to properly initialize any part of the GEnesis chip, * you first have to take it out of reset mode. */ static void sk_init(xsc) void *xsc; { struct sk_if_softc *sc_if = xsc; struct sk_softc *sc; struct ifnet *ifp; struct mii_data *mii; u_int16_t reg; u_int32_t imr; SK_IF_LOCK(sc_if); ifp = sc_if->sk_ifp; sc = sc_if->sk_softc; mii = device_get_softc(sc_if->sk_miibus); if (ifp->if_flags & IFF_RUNNING) { SK_IF_UNLOCK(sc_if); return; } /* Cancel pending I/O and free all RX/TX buffers. */ sk_stop(sc_if); if (sc->sk_type == SK_GENESIS) { /* Configure LINK_SYNC LED */ SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_ON); SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_ON); /* Configure RX LED */ SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_START); /* Configure TX LED */ SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_TXLEDCTL_COUNTER_START); } /* Configure I2C registers */ /* Configure XMAC(s) */ switch (sc->sk_type) { case SK_GENESIS: sk_init_xmac(sc_if); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: sk_init_yukon(sc_if); break; } mii_mediachg(mii); if (sc->sk_type == SK_GENESIS) { /* Configure MAC FIFOs */ SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_UNRESET); SK_IF_WRITE_4(sc_if, 0, SK_RXF1_END, SK_FIFO_END); SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_ON); SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_UNRESET); SK_IF_WRITE_4(sc_if, 0, SK_TXF1_END, SK_FIFO_END); SK_IF_WRITE_4(sc_if, 0, SK_TXF1_CTL, SK_FIFO_ON); } /* Configure transmit arbiter(s) */ SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_ON|SK_TXARCTL_FSYNC_ON); /* Configure RAMbuffers */ SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_UNRESET); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_START, sc_if->sk_rx_ramstart); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_WR_PTR, sc_if->sk_rx_ramstart); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_RD_PTR, sc_if->sk_rx_ramstart); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_END, sc_if->sk_rx_ramend); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_ON); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_UNRESET); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_STORENFWD_ON); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_START, sc_if->sk_tx_ramstart); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_WR_PTR, sc_if->sk_tx_ramstart); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_RD_PTR, sc_if->sk_tx_ramstart); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_END, sc_if->sk_tx_ramend); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_ON); /* Configure BMUs */ SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_ONLINE); SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_LO, vtophys(&sc_if->sk_rdata->sk_rx_ring[0])); SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_CURADDR_HI, 0); SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_ONLINE); SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_LO, vtophys(&sc_if->sk_rdata->sk_tx_ring[0])); SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_CURADDR_HI, 0); /* Init descriptors */ if (sk_init_rx_ring(sc_if) == ENOBUFS) { printf("sk%d: initialization failed: no " "memory for rx buffers\n", sc_if->sk_unit); sk_stop(sc_if); SK_IF_UNLOCK(sc_if); return; } sk_init_tx_ring(sc_if); /* Set interrupt moderation if changed via sysctl. */ /* SK_LOCK(sc); */ imr = sk_win_read_4(sc, SK_IMTIMERINIT); if (imr != SK_IM_USECS(sc->sk_int_mod)) { sk_win_write_4(sc, SK_IMTIMERINIT, SK_IM_USECS(sc->sk_int_mod)); printf("skc%d: interrupt moderation is %d us\n", sc->sk_unit, sc->sk_int_mod); } /* SK_UNLOCK(sc); */ /* Configure interrupt handling */ CSR_READ_4(sc, SK_ISSR); if (sc_if->sk_port == SK_PORT_A) sc->sk_intrmask |= SK_INTRS1; else sc->sk_intrmask |= SK_INTRS2; sc->sk_intrmask |= SK_ISR_EXTERNAL_REG; CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); /* Start BMUs. */ SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_RX_START); switch(sc->sk_type) { case SK_GENESIS: /* Enable XMACs TX and RX state machines */ SK_XM_CLRBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_IGNPAUSE); SK_XM_SETBIT_2(sc_if, XM_MMUCMD, XM_MMUCMD_TX_ENB|XM_MMUCMD_RX_ENB); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: reg = SK_YU_READ_2(sc_if, YUKON_GPCR); reg |= YU_GPCR_TXEN | YU_GPCR_RXEN; reg &= ~(YU_GPCR_SPEED_EN | YU_GPCR_DPLX_EN); SK_YU_WRITE_2(sc_if, YUKON_GPCR, reg); } ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; SK_IF_UNLOCK(sc_if); return; } static void sk_stop(sc_if) struct sk_if_softc *sc_if; { int i; struct sk_softc *sc; struct ifnet *ifp; SK_IF_LOCK(sc_if); sc = sc_if->sk_softc; ifp = sc_if->sk_ifp; untimeout(sk_tick, sc_if, sc_if->sk_tick_ch); if (sc_if->sk_phytype == SK_PHYTYPE_BCOM) { u_int32_t val; /* Put PHY back into reset. */ val = sk_win_read_4(sc, SK_GPIO); if (sc_if->sk_port == SK_PORT_A) { val |= SK_GPIO_DIR0; val &= ~SK_GPIO_DAT0; } else { val |= SK_GPIO_DIR2; val &= ~SK_GPIO_DAT2; } sk_win_write_4(sc, SK_GPIO, val); } /* Turn off various components of this interface. */ SK_XM_SETBIT_2(sc_if, XM_GPIO, XM_GPIO_RESETMAC); switch (sc->sk_type) { case SK_GENESIS: SK_IF_WRITE_2(sc_if, 0, SK_TXF1_MACCTL, SK_TXMACCTL_XMAC_RESET); SK_IF_WRITE_4(sc_if, 0, SK_RXF1_CTL, SK_FIFO_RESET); break; case SK_YUKON: case SK_YUKON_LITE: case SK_YUKON_LP: SK_IF_WRITE_1(sc_if,0, SK_RXMF1_CTRL_TEST, SK_RFCTL_RESET_SET); SK_IF_WRITE_1(sc_if,0, SK_TXMF1_CTRL_TEST, SK_TFCTL_RESET_SET); break; } SK_IF_WRITE_4(sc_if, 0, SK_RXQ1_BMU_CSR, SK_RXBMU_OFFLINE); SK_IF_WRITE_4(sc_if, 0, SK_RXRB1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); SK_IF_WRITE_4(sc_if, 1, SK_TXQS1_BMU_CSR, SK_TXBMU_OFFLINE); SK_IF_WRITE_4(sc_if, 1, SK_TXRBS1_CTLTST, SK_RBCTL_RESET|SK_RBCTL_OFF); SK_IF_WRITE_1(sc_if, 0, SK_TXAR1_COUNTERCTL, SK_TXARCTL_OFF); SK_IF_WRITE_1(sc_if, 0, SK_RXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); SK_IF_WRITE_1(sc_if, 0, SK_TXLED1_CTL, SK_RXLEDCTL_COUNTER_STOP); SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_OFF); SK_IF_WRITE_1(sc_if, 0, SK_LINKLED1_CTL, SK_LINKLED_LINKSYNC_OFF); /* Disable interrupts */ if (sc_if->sk_port == SK_PORT_A) sc->sk_intrmask &= ~SK_INTRS1; else sc->sk_intrmask &= ~SK_INTRS2; CSR_WRITE_4(sc, SK_IMR, sc->sk_intrmask); SK_XM_READ_2(sc_if, XM_ISR); SK_XM_WRITE_2(sc_if, XM_IMR, 0xFFFF); /* Free RX and TX mbufs still in the queues. */ for (i = 0; i < SK_RX_RING_CNT; i++) { if (sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf != NULL) { m_freem(sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf); sc_if->sk_cdata.sk_rx_chain[i].sk_mbuf = NULL; } } for (i = 0; i < SK_TX_RING_CNT; i++) { if (sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf != NULL) { m_freem(sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf); sc_if->sk_cdata.sk_tx_chain[i].sk_mbuf = NULL; } } ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE); SK_IF_UNLOCK(sc_if); return; } static int sysctl_int_range(SYSCTL_HANDLER_ARGS, int low, int high) { int error, value; if (!arg1) return (EINVAL); value = *(int *)arg1; error = sysctl_handle_int(oidp, &value, 0, req); if (error || !req->newptr) return (error); if (value < low || value > high) return (EINVAL); *(int *)arg1 = value; return (0); } static int sysctl_hw_sk_int_mod(SYSCTL_HANDLER_ARGS) { return (sysctl_int_range(oidp, arg1, arg2, req, SK_IM_MIN, SK_IM_MAX)); } Index: stable/6/sys/pci/if_ste.c =================================================================== --- stable/6/sys/pci/if_ste.c (revision 149421) +++ stable/6/sys/pci/if_ste.c (revision 149422) @@ -1,1701 +1,1703 @@ /*- * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #define STE_USEIOSPACE #include MODULE_DEPEND(ste, pci, 1, 1, 1); MODULE_DEPEND(ste, ether, 1, 1, 1); MODULE_DEPEND(ste, miibus, 1, 1, 1); /* * Various supported device vendors/types and their names. */ static struct ste_type ste_devs[] = { { ST_VENDORID, ST_DEVICEID_ST201, "Sundance ST201 10/100BaseTX" }, { DL_VENDORID, DL_DEVICEID_DL10050, "D-Link DL10050 10/100BaseTX" }, { 0, 0, NULL } }; static int ste_probe(device_t); static int ste_attach(device_t); static int ste_detach(device_t); static void ste_init(void *); static void ste_intr(void *); static void ste_rxeoc(struct ste_softc *); static void ste_rxeof(struct ste_softc *); static void ste_txeoc(struct ste_softc *); static void ste_txeof(struct ste_softc *); static void ste_stats_update(void *); static void ste_stop(struct ste_softc *); static void ste_reset(struct ste_softc *); static int ste_ioctl(struct ifnet *, u_long, caddr_t); static int ste_encap(struct ste_softc *, struct ste_chain *, struct mbuf *); static void ste_start(struct ifnet *); static void ste_watchdog(struct ifnet *); static void ste_shutdown(device_t); static int ste_newbuf(struct ste_softc *, struct ste_chain_onefrag *, struct mbuf *); static int ste_ifmedia_upd(struct ifnet *); static void ste_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void ste_mii_sync(struct ste_softc *); static void ste_mii_send(struct ste_softc *, u_int32_t, int); static int ste_mii_readreg(struct ste_softc *, struct ste_mii_frame *); static int ste_mii_writereg(struct ste_softc *, struct ste_mii_frame *); static int ste_miibus_readreg(device_t, int, int); static int ste_miibus_writereg(device_t, int, int, int); static void ste_miibus_statchg(device_t); static int ste_eeprom_wait(struct ste_softc *); static int ste_read_eeprom(struct ste_softc *, caddr_t, int, int, int); static void ste_wait(struct ste_softc *); static void ste_setmulti(struct ste_softc *); static int ste_init_rx_list(struct ste_softc *); static void ste_init_tx_list(struct ste_softc *); #ifdef STE_USEIOSPACE #define STE_RES SYS_RES_IOPORT #define STE_RID STE_PCI_LOIO #else #define STE_RES SYS_RES_MEMORY #define STE_RID STE_PCI_LOMEM #endif static device_method_t ste_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ste_probe), DEVMETHOD(device_attach, ste_attach), DEVMETHOD(device_detach, ste_detach), DEVMETHOD(device_shutdown, ste_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, ste_miibus_readreg), DEVMETHOD(miibus_writereg, ste_miibus_writereg), DEVMETHOD(miibus_statchg, ste_miibus_statchg), { 0, 0 } }; static driver_t ste_driver = { "ste", ste_methods, sizeof(struct ste_softc) }; static devclass_t ste_devclass; DRIVER_MODULE(ste, pci, ste_driver, ste_devclass, 0, 0); DRIVER_MODULE(miibus, ste, miibus_driver, miibus_devclass, 0, 0); SYSCTL_NODE(_hw, OID_AUTO, ste, CTLFLAG_RD, 0, "if_ste parameters"); static int ste_rxsyncs; SYSCTL_INT(_hw_ste, OID_AUTO, rxsyncs, CTLFLAG_RW, &ste_rxsyncs, 0, ""); #define STE_SETBIT4(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) #define STE_CLRBIT4(sc, reg, x) \ CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) #define STE_SETBIT2(sc, reg, x) \ CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) | (x)) #define STE_CLRBIT2(sc, reg, x) \ CSR_WRITE_2(sc, reg, CSR_READ_2(sc, reg) & ~(x)) #define STE_SETBIT1(sc, reg, x) \ CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) | (x)) #define STE_CLRBIT1(sc, reg, x) \ CSR_WRITE_1(sc, reg, CSR_READ_1(sc, reg) & ~(x)) #define MII_SET(x) STE_SETBIT1(sc, STE_PHYCTL, x) #define MII_CLR(x) STE_CLRBIT1(sc, STE_PHYCTL, x) /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void ste_mii_sync(sc) struct ste_softc *sc; { register int i; MII_SET(STE_PHYCTL_MDIR|STE_PHYCTL_MDATA); for (i = 0; i < 32; i++) { MII_SET(STE_PHYCTL_MCLK); DELAY(1); MII_CLR(STE_PHYCTL_MCLK); DELAY(1); } return; } /* * Clock a series of bits through the MII. */ static void ste_mii_send(sc, bits, cnt) struct ste_softc *sc; u_int32_t bits; int cnt; { int i; MII_CLR(STE_PHYCTL_MCLK); for (i = (0x1 << (cnt - 1)); i; i >>= 1) { if (bits & i) { MII_SET(STE_PHYCTL_MDATA); } else { MII_CLR(STE_PHYCTL_MDATA); } DELAY(1); MII_CLR(STE_PHYCTL_MCLK); DELAY(1); MII_SET(STE_PHYCTL_MCLK); } } /* * Read an PHY register through the MII. */ static int ste_mii_readreg(sc, frame) struct ste_softc *sc; struct ste_mii_frame *frame; { int i, ack; STE_LOCK(sc); /* * Set up frame for RX. */ frame->mii_stdelim = STE_MII_STARTDELIM; frame->mii_opcode = STE_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; CSR_WRITE_2(sc, STE_PHYCTL, 0); /* * Turn on data xmit. */ MII_SET(STE_PHYCTL_MDIR); ste_mii_sync(sc); /* * Send command/address info. */ ste_mii_send(sc, frame->mii_stdelim, 2); ste_mii_send(sc, frame->mii_opcode, 2); ste_mii_send(sc, frame->mii_phyaddr, 5); ste_mii_send(sc, frame->mii_regaddr, 5); /* Turn off xmit. */ MII_CLR(STE_PHYCTL_MDIR); /* Idle bit */ MII_CLR((STE_PHYCTL_MCLK|STE_PHYCTL_MDATA)); DELAY(1); MII_SET(STE_PHYCTL_MCLK); DELAY(1); /* Check for ack */ MII_CLR(STE_PHYCTL_MCLK); DELAY(1); ack = CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA; MII_SET(STE_PHYCTL_MCLK); DELAY(1); /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ if (ack) { for(i = 0; i < 16; i++) { MII_CLR(STE_PHYCTL_MCLK); DELAY(1); MII_SET(STE_PHYCTL_MCLK); DELAY(1); } goto fail; } for (i = 0x8000; i; i >>= 1) { MII_CLR(STE_PHYCTL_MCLK); DELAY(1); if (!ack) { if (CSR_READ_2(sc, STE_PHYCTL) & STE_PHYCTL_MDATA) frame->mii_data |= i; DELAY(1); } MII_SET(STE_PHYCTL_MCLK); DELAY(1); } fail: MII_CLR(STE_PHYCTL_MCLK); DELAY(1); MII_SET(STE_PHYCTL_MCLK); DELAY(1); STE_UNLOCK(sc); if (ack) return(1); return(0); } /* * Write to a PHY register through the MII. */ static int ste_mii_writereg(sc, frame) struct ste_softc *sc; struct ste_mii_frame *frame; { STE_LOCK(sc); /* * Set up frame for TX. */ frame->mii_stdelim = STE_MII_STARTDELIM; frame->mii_opcode = STE_MII_WRITEOP; frame->mii_turnaround = STE_MII_TURNAROUND; /* * Turn on data output. */ MII_SET(STE_PHYCTL_MDIR); ste_mii_sync(sc); ste_mii_send(sc, frame->mii_stdelim, 2); ste_mii_send(sc, frame->mii_opcode, 2); ste_mii_send(sc, frame->mii_phyaddr, 5); ste_mii_send(sc, frame->mii_regaddr, 5); ste_mii_send(sc, frame->mii_turnaround, 2); ste_mii_send(sc, frame->mii_data, 16); /* Idle bit. */ MII_SET(STE_PHYCTL_MCLK); DELAY(1); MII_CLR(STE_PHYCTL_MCLK); DELAY(1); /* * Turn off xmit. */ MII_CLR(STE_PHYCTL_MDIR); STE_UNLOCK(sc); return(0); } static int ste_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct ste_softc *sc; struct ste_mii_frame frame; sc = device_get_softc(dev); if ( sc->ste_one_phy && phy != 0 ) return (0); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; ste_mii_readreg(sc, &frame); return(frame.mii_data); } static int ste_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct ste_softc *sc; struct ste_mii_frame frame; sc = device_get_softc(dev); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = data; ste_mii_writereg(sc, &frame); return(0); } static void ste_miibus_statchg(dev) device_t dev; { struct ste_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); STE_LOCK(sc); mii = device_get_softc(sc->ste_miibus); if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { STE_SETBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX); } else { STE_CLRBIT2(sc, STE_MACCTL0, STE_MACCTL0_FULLDUPLEX); } STE_UNLOCK(sc); return; } static int ste_ifmedia_upd(ifp) struct ifnet *ifp; { struct ste_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->ste_miibus); sc->ste_link = 0; if (mii->mii_instance) { struct mii_softc *miisc; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) mii_phy_reset(miisc); } mii_mediachg(mii); return(0); } static void ste_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct ste_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->ste_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static void ste_wait(sc) struct ste_softc *sc; { register int i; for (i = 0; i < STE_TIMEOUT; i++) { if (!(CSR_READ_4(sc, STE_DMACTL) & STE_DMACTL_DMA_HALTINPROG)) break; } if (i == STE_TIMEOUT) printf("ste%d: command never completed!\n", sc->ste_unit); return; } /* * The EEPROM is slow: give it time to come ready after issuing * it a command. */ static int ste_eeprom_wait(sc) struct ste_softc *sc; { int i; DELAY(1000); for (i = 0; i < 100; i++) { if (CSR_READ_2(sc, STE_EEPROM_CTL) & STE_EECTL_BUSY) DELAY(1000); else break; } if (i == 100) { printf("ste%d: eeprom failed to come ready\n", sc->ste_unit); return(1); } return(0); } /* * Read a sequence of words from the EEPROM. Note that ethernet address * data is stored in the EEPROM in network byte order. */ static int ste_read_eeprom(sc, dest, off, cnt, swap) struct ste_softc *sc; caddr_t dest; int off; int cnt; int swap; { int err = 0, i; u_int16_t word = 0, *ptr; if (ste_eeprom_wait(sc)) return(1); for (i = 0; i < cnt; i++) { CSR_WRITE_2(sc, STE_EEPROM_CTL, STE_EEOPCODE_READ | (off + i)); err = ste_eeprom_wait(sc); if (err) break; word = CSR_READ_2(sc, STE_EEPROM_DATA); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } return(err ? 1 : 0); } static void ste_setmulti(sc) struct ste_softc *sc; { struct ifnet *ifp; int h = 0; u_int32_t hashes[2] = { 0, 0 }; struct ifmultiaddr *ifma; ifp = sc->ste_ifp; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI); STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH); return; } /* first, zot all the existing hash bits */ CSR_WRITE_2(sc, STE_MAR0, 0); CSR_WRITE_2(sc, STE_MAR1, 0); CSR_WRITE_2(sc, STE_MAR2, 0); CSR_WRITE_2(sc, STE_MAR3, 0); /* now program new ones */ + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) & 0x3F; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } + IF_ADDR_UNLOCK(ifp); CSR_WRITE_2(sc, STE_MAR0, hashes[0] & 0xFFFF); CSR_WRITE_2(sc, STE_MAR1, (hashes[0] >> 16) & 0xFFFF); CSR_WRITE_2(sc, STE_MAR2, hashes[1] & 0xFFFF); CSR_WRITE_2(sc, STE_MAR3, (hashes[1] >> 16) & 0xFFFF); STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_ALLMULTI); STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_MULTIHASH); return; } #ifdef DEVICE_POLLING static poll_handler_t ste_poll; static void ste_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct ste_softc *sc = ifp->if_softc; STE_LOCK(sc); if (!(ifp->if_capenable & IFCAP_POLLING)) { ether_poll_deregister(ifp); cmd = POLL_DEREGISTER; } if (cmd == POLL_DEREGISTER) { /* final call, enable interrupts */ CSR_WRITE_2(sc, STE_IMR, STE_INTRS); goto done; } sc->rxcycles = count; if (cmd == POLL_AND_CHECK_STATUS) ste_rxeoc(sc); ste_rxeof(sc); ste_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) ste_start(ifp); if (cmd == POLL_AND_CHECK_STATUS) { u_int16_t status; status = CSR_READ_2(sc, STE_ISR_ACK); if (status & STE_ISR_TX_DONE) ste_txeoc(sc); if (status & STE_ISR_STATS_OFLOW) { untimeout(ste_stats_update, sc, sc->ste_stat_ch); ste_stats_update(sc); } if (status & STE_ISR_LINKEVENT) mii_pollstat(device_get_softc(sc->ste_miibus)); if (status & STE_ISR_HOSTERR) { ste_reset(sc); ste_init(sc); } } done: STE_UNLOCK(sc); } #endif /* DEVICE_POLLING */ static void ste_intr(xsc) void *xsc; { struct ste_softc *sc; struct ifnet *ifp; u_int16_t status; sc = xsc; STE_LOCK(sc); ifp = sc->ste_ifp; #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) goto done; if ((ifp->if_capenable & IFCAP_POLLING) && ether_poll_register(ste_poll, ifp)) { /* ok, disable interrupts */ CSR_WRITE_2(sc, STE_IMR, 0); ste_poll(ifp, 0, 1); goto done; } #endif /* DEVICE_POLLING */ /* See if this is really our interrupt. */ if (!(CSR_READ_2(sc, STE_ISR) & STE_ISR_INTLATCH)) { STE_UNLOCK(sc); return; } for (;;) { status = CSR_READ_2(sc, STE_ISR_ACK); if (!(status & STE_INTRS)) break; if (status & STE_ISR_RX_DMADONE) { ste_rxeoc(sc); ste_rxeof(sc); } if (status & STE_ISR_TX_DMADONE) ste_txeof(sc); if (status & STE_ISR_TX_DONE) ste_txeoc(sc); if (status & STE_ISR_STATS_OFLOW) { untimeout(ste_stats_update, sc, sc->ste_stat_ch); ste_stats_update(sc); } if (status & STE_ISR_LINKEVENT) mii_pollstat(device_get_softc(sc->ste_miibus)); if (status & STE_ISR_HOSTERR) { ste_reset(sc); ste_init(sc); } } /* Re-enable interrupts */ CSR_WRITE_2(sc, STE_IMR, STE_INTRS); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) ste_start(ifp); #ifdef DEVICE_POLLING done: #endif /* DEVICE_POLLING */ STE_UNLOCK(sc); return; } static void ste_rxeoc(struct ste_softc *sc) { struct ste_chain_onefrag *cur_rx; STE_LOCK_ASSERT(sc); if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) { cur_rx = sc->ste_cdata.ste_rx_head; do { cur_rx = cur_rx->ste_next; /* If the ring is empty, just return. */ if (cur_rx == sc->ste_cdata.ste_rx_head) return; } while (cur_rx->ste_ptr->ste_status == 0); if (sc->ste_cdata.ste_rx_head->ste_ptr->ste_status == 0) { /* We've fallen behind the chip: catch it. */ sc->ste_cdata.ste_rx_head = cur_rx; ++ste_rxsyncs; } } } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static void ste_rxeof(sc) struct ste_softc *sc; { struct mbuf *m; struct ifnet *ifp; struct ste_chain_onefrag *cur_rx; int total_len = 0, count=0; u_int32_t rxstat; STE_LOCK_ASSERT(sc); ifp = sc->ste_ifp; while((rxstat = sc->ste_cdata.ste_rx_head->ste_ptr->ste_status) & STE_RXSTAT_DMADONE) { #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif /* DEVICE_POLLING */ if ((STE_RX_LIST_CNT - count) < 3) { break; } cur_rx = sc->ste_cdata.ste_rx_head; sc->ste_cdata.ste_rx_head = cur_rx->ste_next; /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (rxstat & STE_RXSTAT_FRAME_ERR) { ifp->if_ierrors++; cur_rx->ste_ptr->ste_status = 0; continue; } /* * If there error bit was not set, the upload complete * bit should be set which means we have a valid packet. * If not, something truly strange has happened. */ if (!(rxstat & STE_RXSTAT_DMADONE)) { printf("ste%d: bad receive status -- packet dropped\n", sc->ste_unit); ifp->if_ierrors++; cur_rx->ste_ptr->ste_status = 0; continue; } /* No errors; receive the packet. */ m = cur_rx->ste_mbuf; total_len = cur_rx->ste_ptr->ste_status & STE_RXSTAT_FRAMELEN; /* * Try to conjure up a new mbuf cluster. If that * fails, it means we have an out of memory condition and * should leave the buffer in place and continue. This will * result in a lost packet, but there's little else we * can do in this situation. */ if (ste_newbuf(sc, cur_rx, NULL) == ENOBUFS) { ifp->if_ierrors++; cur_rx->ste_ptr->ste_status = 0; continue; } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; ifp->if_ipackets++; STE_UNLOCK(sc); (*ifp->if_input)(ifp, m); STE_LOCK(sc); cur_rx->ste_ptr->ste_status = 0; count++; } return; } static void ste_txeoc(sc) struct ste_softc *sc; { u_int8_t txstat; struct ifnet *ifp; ifp = sc->ste_ifp; while ((txstat = CSR_READ_1(sc, STE_TX_STATUS)) & STE_TXSTATUS_TXDONE) { if (txstat & STE_TXSTATUS_UNDERRUN || txstat & STE_TXSTATUS_EXCESSCOLLS || txstat & STE_TXSTATUS_RECLAIMERR) { ifp->if_oerrors++; printf("ste%d: transmission error: %x\n", sc->ste_unit, txstat); ste_reset(sc); ste_init(sc); if (txstat & STE_TXSTATUS_UNDERRUN && sc->ste_tx_thresh < STE_PACKET_SIZE) { sc->ste_tx_thresh += STE_MIN_FRAMELEN; printf("ste%d: tx underrun, increasing tx" " start threshold to %d bytes\n", sc->ste_unit, sc->ste_tx_thresh); } CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh); CSR_WRITE_2(sc, STE_TX_RECLAIM_THRESH, (STE_PACKET_SIZE >> 4)); } ste_init(sc); CSR_WRITE_2(sc, STE_TX_STATUS, txstat); } return; } static void ste_txeof(sc) struct ste_softc *sc; { struct ste_chain *cur_tx; struct ifnet *ifp; int idx; ifp = sc->ste_ifp; idx = sc->ste_cdata.ste_tx_cons; while(idx != sc->ste_cdata.ste_tx_prod) { cur_tx = &sc->ste_cdata.ste_tx_chain[idx]; if (!(cur_tx->ste_ptr->ste_ctl & STE_TXCTL_DMADONE)) break; m_freem(cur_tx->ste_mbuf); cur_tx->ste_mbuf = NULL; ifp->if_flags &= ~IFF_OACTIVE; ifp->if_opackets++; STE_INC(idx, STE_TX_LIST_CNT); } sc->ste_cdata.ste_tx_cons = idx; if (idx == sc->ste_cdata.ste_tx_prod) ifp->if_timer = 0; } static void ste_stats_update(xsc) void *xsc; { struct ste_softc *sc; struct ifnet *ifp; struct mii_data *mii; sc = xsc; STE_LOCK(sc); ifp = sc->ste_ifp; mii = device_get_softc(sc->ste_miibus); ifp->if_collisions += CSR_READ_1(sc, STE_LATE_COLLS) + CSR_READ_1(sc, STE_MULTI_COLLS) + CSR_READ_1(sc, STE_SINGLE_COLLS); if (!sc->ste_link) { mii_pollstat(mii); if (mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->ste_link++; /* * we don't get a call-back on re-init so do it * otherwise we get stuck in the wrong link state */ ste_miibus_statchg(sc->ste_dev); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) ste_start(ifp); } } sc->ste_stat_ch = timeout(ste_stats_update, sc, hz); STE_UNLOCK(sc); return; } /* * Probe for a Sundance ST201 chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int ste_probe(dev) device_t dev; { struct ste_type *t; t = ste_devs; while(t->ste_name != NULL) { if ((pci_get_vendor(dev) == t->ste_vid) && (pci_get_device(dev) == t->ste_did)) { device_set_desc(dev, t->ste_name); return (BUS_PROBE_DEFAULT); } t++; } return(ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int ste_attach(dev) device_t dev; { struct ste_softc *sc; struct ifnet *ifp; int unit, error = 0, rid; u_char eaddr[6]; sc = device_get_softc(dev); unit = device_get_unit(dev); sc->ste_dev = dev; /* * Only use one PHY since this chip reports multiple * Note on the DFE-550 the PHY is at 1 on the DFE-580 * it is at 0 & 1. It is rev 0x12. */ if (pci_get_vendor(dev) == DL_VENDORID && pci_get_device(dev) == DL_DEVICEID_DL10050 && pci_get_revid(dev) == 0x12 ) sc->ste_one_phy = 1; mtx_init(&sc->ste_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = STE_RID; sc->ste_res = bus_alloc_resource_any(dev, STE_RES, &rid, RF_ACTIVE); if (sc->ste_res == NULL) { printf ("ste%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->ste_btag = rman_get_bustag(sc->ste_res); sc->ste_bhandle = rman_get_bushandle(sc->ste_res); /* Allocate interrupt */ rid = 0; sc->ste_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->ste_irq == NULL) { printf("ste%d: couldn't map interrupt\n", unit); error = ENXIO; goto fail; } callout_handle_init(&sc->ste_stat_ch); /* Reset the adapter. */ ste_reset(sc); /* * Get station address from the EEPROM. */ if (ste_read_eeprom(sc, eaddr, STE_EEADDR_NODE0, 3, 0)) { printf("ste%d: failed to read station address\n", unit); error = ENXIO;; goto fail; } sc->ste_unit = unit; /* Allocate the descriptor queues. */ sc->ste_ldata = contigmalloc(sizeof(struct ste_list_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->ste_ldata == NULL) { printf("ste%d: no memory for list buffers!\n", unit); error = ENXIO; goto fail; } bzero(sc->ste_ldata, sizeof(struct ste_list_data)); ifp = sc->ste_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { printf("ste%d: can not if_alloc()\n", sc->ste_unit); error = ENOSPC; goto fail; } /* Do MII setup. */ if (mii_phy_probe(dev, &sc->ste_miibus, ste_ifmedia_upd, ste_ifmedia_sts)) { printf("ste%d: MII without any phy!\n", sc->ste_unit); error = ENXIO; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT; ifp->if_ioctl = ste_ioctl; ifp->if_start = ste_start; ifp->if_watchdog = ste_watchdog; ifp->if_init = ste_init; ifp->if_baudrate = 10000000; IFQ_SET_MAXLEN(&ifp->if_snd, STE_TX_LIST_CNT - 1); ifp->if_snd.ifq_drv_maxlen = STE_TX_LIST_CNT - 1; IFQ_SET_READY(&ifp->if_snd); sc->ste_tx_thresh = STE_TXSTART_THRESH; /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* * Tell the upper layer(s) we support long frames. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_MTU; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif ifp->if_capenable = ifp->if_capabilities; /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->ste_irq, INTR_TYPE_NET, ste_intr, sc, &sc->ste_intrhand); if (error) { printf("ste%d: couldn't set up irq\n", unit); ether_ifdetach(ifp); if_free(ifp); goto fail; } fail: if (error) ste_detach(dev); return(error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int ste_detach(dev) device_t dev; { struct ste_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->ste_mtx), ("ste mutex not initialized")); STE_LOCK(sc); ifp = sc->ste_ifp; /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { ste_stop(sc); ether_ifdetach(ifp); if_free(ifp); } if (sc->ste_miibus) device_delete_child(dev, sc->ste_miibus); bus_generic_detach(dev); if (sc->ste_intrhand) bus_teardown_intr(dev, sc->ste_irq, sc->ste_intrhand); if (sc->ste_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ste_irq); if (sc->ste_res) bus_release_resource(dev, STE_RES, STE_RID, sc->ste_res); if (sc->ste_ldata) { contigfree(sc->ste_ldata, sizeof(struct ste_list_data), M_DEVBUF); } STE_UNLOCK(sc); mtx_destroy(&sc->ste_mtx); return(0); } static int ste_newbuf(sc, c, m) struct ste_softc *sc; struct ste_chain_onefrag *c; struct mbuf *m; { struct mbuf *m_new = NULL; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) return(ENOBUFS); MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, ETHER_ALIGN); c->ste_mbuf = m_new; c->ste_ptr->ste_status = 0; c->ste_ptr->ste_frag.ste_addr = vtophys(mtod(m_new, caddr_t)); c->ste_ptr->ste_frag.ste_len = (1536 + ETHER_VLAN_ENCAP_LEN) | STE_FRAG_LAST; return(0); } static int ste_init_rx_list(sc) struct ste_softc *sc; { struct ste_chain_data *cd; struct ste_list_data *ld; int i; cd = &sc->ste_cdata; ld = sc->ste_ldata; for (i = 0; i < STE_RX_LIST_CNT; i++) { cd->ste_rx_chain[i].ste_ptr = &ld->ste_rx_list[i]; if (ste_newbuf(sc, &cd->ste_rx_chain[i], NULL) == ENOBUFS) return(ENOBUFS); if (i == (STE_RX_LIST_CNT - 1)) { cd->ste_rx_chain[i].ste_next = &cd->ste_rx_chain[0]; ld->ste_rx_list[i].ste_next = vtophys(&ld->ste_rx_list[0]); } else { cd->ste_rx_chain[i].ste_next = &cd->ste_rx_chain[i + 1]; ld->ste_rx_list[i].ste_next = vtophys(&ld->ste_rx_list[i + 1]); } ld->ste_rx_list[i].ste_status = 0; } cd->ste_rx_head = &cd->ste_rx_chain[0]; return(0); } static void ste_init_tx_list(sc) struct ste_softc *sc; { struct ste_chain_data *cd; struct ste_list_data *ld; int i; cd = &sc->ste_cdata; ld = sc->ste_ldata; for (i = 0; i < STE_TX_LIST_CNT; i++) { cd->ste_tx_chain[i].ste_ptr = &ld->ste_tx_list[i]; cd->ste_tx_chain[i].ste_ptr->ste_next = 0; cd->ste_tx_chain[i].ste_ptr->ste_ctl = 0; cd->ste_tx_chain[i].ste_phys = vtophys(&ld->ste_tx_list[i]); if (i == (STE_TX_LIST_CNT - 1)) cd->ste_tx_chain[i].ste_next = &cd->ste_tx_chain[0]; else cd->ste_tx_chain[i].ste_next = &cd->ste_tx_chain[i + 1]; } cd->ste_tx_prod = 0; cd->ste_tx_cons = 0; return; } static void ste_init(xsc) void *xsc; { struct ste_softc *sc; int i; struct ifnet *ifp; sc = xsc; STE_LOCK(sc); ifp = sc->ste_ifp; ste_stop(sc); /* Init our MAC address */ for (i = 0; i < ETHER_ADDR_LEN; i++) { CSR_WRITE_1(sc, STE_PAR0 + i, IFP2ENADDR(sc->ste_ifp)[i]); } /* Init RX list */ if (ste_init_rx_list(sc) == ENOBUFS) { printf("ste%d: initialization failed: no " "memory for RX buffers\n", sc->ste_unit); ste_stop(sc); STE_UNLOCK(sc); return; } /* Set RX polling interval */ CSR_WRITE_1(sc, STE_RX_DMAPOLL_PERIOD, 64); /* Init TX descriptors */ ste_init_tx_list(sc); /* Set the TX freethresh value */ CSR_WRITE_1(sc, STE_TX_DMABURST_THRESH, STE_PACKET_SIZE >> 8); /* Set the TX start threshold for best performance. */ CSR_WRITE_2(sc, STE_TX_STARTTHRESH, sc->ste_tx_thresh); /* Set the TX reclaim threshold. */ CSR_WRITE_1(sc, STE_TX_RECLAIM_THRESH, (STE_PACKET_SIZE >> 4)); /* Set up the RX filter. */ CSR_WRITE_1(sc, STE_RX_MODE, STE_RXMODE_UNICAST); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC); } else { STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC); } /* Set capture broadcast bit to accept broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST); } else { STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_BROADCAST); } ste_setmulti(sc); /* Load the address of the RX list. */ STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL); ste_wait(sc); CSR_WRITE_4(sc, STE_RX_DMALIST_PTR, vtophys(&sc->ste_ldata->ste_rx_list[0])); STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_RXDMA_UNSTALL); /* Set TX polling interval (defer until we TX first packet */ CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 0); /* Load address of the TX list */ STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); ste_wait(sc); CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, 0); STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); ste_wait(sc); sc->ste_tx_prev = NULL; /* Enable receiver and transmitter */ CSR_WRITE_2(sc, STE_MACCTL0, 0); CSR_WRITE_2(sc, STE_MACCTL1, 0); STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_ENABLE); STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_ENABLE); /* Enable stats counters. */ STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_ENABLE); CSR_WRITE_2(sc, STE_ISR, 0xFFFF); #ifdef DEVICE_POLLING /* Disable interrupts if we are polling. */ if (ifp->if_flags & IFF_POLLING) CSR_WRITE_2(sc, STE_IMR, 0); else #endif /* DEVICE_POLLING */ /* Enable interrupts. */ CSR_WRITE_2(sc, STE_IMR, STE_INTRS); /* Accept VLAN length packets */ CSR_WRITE_2(sc, STE_MAX_FRAMELEN, ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN); ste_ifmedia_upd(ifp); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->ste_stat_ch = timeout(ste_stats_update, sc, hz); STE_UNLOCK(sc); return; } static void ste_stop(sc) struct ste_softc *sc; { int i; struct ifnet *ifp; STE_LOCK(sc); ifp = sc->ste_ifp; untimeout(ste_stats_update, sc, sc->ste_stat_ch); ifp->if_flags &= ~(IFF_RUNNING|IFF_OACTIVE); #ifdef DEVICE_POLLING ether_poll_deregister(ifp); #endif /* DEVICE_POLLING */ CSR_WRITE_2(sc, STE_IMR, 0); STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_TX_DISABLE); STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_RX_DISABLE); STE_SETBIT2(sc, STE_MACCTL1, STE_MACCTL1_STATS_DISABLE); STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); STE_SETBIT2(sc, STE_DMACTL, STE_DMACTL_RXDMA_STALL); ste_wait(sc); /* * Try really hard to stop the RX engine or under heavy RX * data chip will write into de-allocated memory. */ ste_reset(sc); sc->ste_link = 0; for (i = 0; i < STE_RX_LIST_CNT; i++) { if (sc->ste_cdata.ste_rx_chain[i].ste_mbuf != NULL) { m_freem(sc->ste_cdata.ste_rx_chain[i].ste_mbuf); sc->ste_cdata.ste_rx_chain[i].ste_mbuf = NULL; } } for (i = 0; i < STE_TX_LIST_CNT; i++) { if (sc->ste_cdata.ste_tx_chain[i].ste_mbuf != NULL) { m_freem(sc->ste_cdata.ste_tx_chain[i].ste_mbuf); sc->ste_cdata.ste_tx_chain[i].ste_mbuf = NULL; } } bzero(sc->ste_ldata, sizeof(struct ste_list_data)); STE_UNLOCK(sc); return; } static void ste_reset(sc) struct ste_softc *sc; { int i; STE_SETBIT4(sc, STE_ASICCTL, STE_ASICCTL_GLOBAL_RESET|STE_ASICCTL_RX_RESET| STE_ASICCTL_TX_RESET|STE_ASICCTL_DMA_RESET| STE_ASICCTL_FIFO_RESET|STE_ASICCTL_NETWORK_RESET| STE_ASICCTL_AUTOINIT_RESET|STE_ASICCTL_HOST_RESET| STE_ASICCTL_EXTRESET_RESET); DELAY(100000); for (i = 0; i < STE_TIMEOUT; i++) { if (!(CSR_READ_4(sc, STE_ASICCTL) & STE_ASICCTL_RESET_BUSY)) break; } if (i == STE_TIMEOUT) printf("ste%d: global reset never completed\n", sc->ste_unit); return; } static int ste_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct ste_softc *sc; struct ifreq *ifr; struct mii_data *mii; int error = 0; sc = ifp->if_softc; STE_LOCK(sc); ifr = (struct ifreq *)data; switch(command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->ste_if_flags & IFF_PROMISC)) { STE_SETBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->ste_if_flags & IFF_PROMISC) { STE_CLRBIT1(sc, STE_RX_MODE, STE_RXMODE_PROMISC); } if (ifp->if_flags & IFF_RUNNING && (ifp->if_flags ^ sc->ste_if_flags) & IFF_ALLMULTI) ste_setmulti(sc); if (!(ifp->if_flags & IFF_RUNNING)) { sc->ste_tx_thresh = STE_TXSTART_THRESH; ste_init(sc); } } else { if (ifp->if_flags & IFF_RUNNING) ste_stop(sc); } sc->ste_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: ste_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->ste_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: ifp->if_capenable &= ~IFCAP_POLLING; ifp->if_capenable |= ifr->ifr_reqcap & IFCAP_POLLING; break; default: error = ether_ioctl(ifp, command, data); break; } STE_UNLOCK(sc); return(error); } static int ste_encap(sc, c, m_head) struct ste_softc *sc; struct ste_chain *c; struct mbuf *m_head; { int frag = 0; struct ste_frag *f = NULL; struct mbuf *m; struct ste_desc *d; d = c->ste_ptr; d->ste_ctl = 0; encap_retry: for (m = m_head, frag = 0; m != NULL; m = m->m_next) { if (m->m_len != 0) { if (frag == STE_MAXFRAGS) break; f = &d->ste_frags[frag]; f->ste_addr = vtophys(mtod(m, vm_offset_t)); f->ste_len = m->m_len; frag++; } } if (m != NULL) { struct mbuf *mn; /* * We ran out of segments. We have to recopy this * mbuf chain first. Bail out if we can't get the * new buffers. */ mn = m_defrag(m_head, M_DONTWAIT); if (mn == NULL) { m_freem(m_head); return ENOMEM; } m_head = mn; goto encap_retry; } c->ste_mbuf = m_head; d->ste_frags[frag - 1].ste_len |= STE_FRAG_LAST; d->ste_ctl = 1; return(0); } static void ste_start(ifp) struct ifnet *ifp; { struct ste_softc *sc; struct mbuf *m_head = NULL; struct ste_chain *cur_tx; int idx; sc = ifp->if_softc; STE_LOCK(sc); if (!sc->ste_link) { STE_UNLOCK(sc); return; } if (ifp->if_flags & IFF_OACTIVE) { STE_UNLOCK(sc); return; } idx = sc->ste_cdata.ste_tx_prod; while(sc->ste_cdata.ste_tx_chain[idx].ste_mbuf == NULL) { /* * We cannot re-use the last (free) descriptor; * the chip may not have read its ste_next yet. */ if (STE_NEXT(idx, STE_TX_LIST_CNT) == sc->ste_cdata.ste_tx_cons) { ifp->if_flags |= IFF_OACTIVE; break; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; cur_tx = &sc->ste_cdata.ste_tx_chain[idx]; if (ste_encap(sc, cur_tx, m_head) != 0) break; cur_tx->ste_ptr->ste_next = 0; if (sc->ste_tx_prev == NULL) { cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1; /* Load address of the TX list */ STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_STALL); ste_wait(sc); CSR_WRITE_4(sc, STE_TX_DMALIST_PTR, vtophys(&sc->ste_ldata->ste_tx_list[0])); /* Set TX polling interval to start TX engine */ CSR_WRITE_1(sc, STE_TX_DMAPOLL_PERIOD, 64); STE_SETBIT4(sc, STE_DMACTL, STE_DMACTL_TXDMA_UNSTALL); ste_wait(sc); }else{ cur_tx->ste_ptr->ste_ctl = STE_TXCTL_DMAINTR | 1; sc->ste_tx_prev->ste_ptr->ste_next = cur_tx->ste_phys; } sc->ste_tx_prev = cur_tx; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, cur_tx->ste_mbuf); STE_INC(idx, STE_TX_LIST_CNT); ifp->if_timer = 5; } sc->ste_cdata.ste_tx_prod = idx; STE_UNLOCK(sc); return; } static void ste_watchdog(ifp) struct ifnet *ifp; { struct ste_softc *sc; sc = ifp->if_softc; STE_LOCK(sc); ifp->if_oerrors++; printf("ste%d: watchdog timeout\n", sc->ste_unit); ste_txeoc(sc); ste_txeof(sc); ste_rxeoc(sc); ste_rxeof(sc); ste_reset(sc); ste_init(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) ste_start(ifp); STE_UNLOCK(sc); return; } static void ste_shutdown(dev) device_t dev; { struct ste_softc *sc; sc = device_get_softc(dev); ste_stop(sc); return; } Index: stable/6/sys/pci/if_ti.c =================================================================== --- stable/6/sys/pci/if_ti.c (revision 149421) +++ stable/6/sys/pci/if_ti.c (revision 149422) @@ -1,3548 +1,3550 @@ /*- * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /* * Alteon Networks Tigon PCI gigabit ethernet driver for FreeBSD. * Manuals, sample driver and firmware source kits are available * from http://www.alteon.com/support/openkits. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The Alteon Networks Tigon chip contains an embedded R4000 CPU, * gigabit MAC, dual DMA channels and a PCI interface unit. NICs * using the Tigon may have anywhere from 512K to 2MB of SRAM. The * Tigon supports hardware IP, TCP and UCP checksumming, multicast * filtering and jumbo (9014 byte) frames. The hardware is largely * controlled by firmware, which must be loaded into the NIC during * initialization. * * The Tigon 2 contains 2 R4000 CPUs and requires a newer firmware * revision, which supports new features such as extended commands, * extended jumbo receive ring desciptors and a mini receive ring. * * Alteon Networks is to be commended for releasing such a vast amount * of development material for the Tigon NIC without requiring an NDA * (although they really should have done it a long time ago). With * any luck, the other vendors will finally wise up and follow Alteon's * stellar example. * * The firmware for the Tigon 1 and 2 NICs is compiled directly into * this driver by #including it as a C header file. This bloats the * driver somewhat, but it's the easiest method considering that the * driver code and firmware code need to be kept in sync. The source * for the firmware is not provided with the FreeBSD distribution since * compiling it requires a GNU toolchain targeted for mips-sgi-irix5.3. * * The following people deserve special thanks: * - Terry Murphy of 3Com, for providing a 3c985 Tigon 1 board * for testing * - Raymond Lee of Netgear, for providing a pair of Netgear * GA620 Tigon 2 boards for testing * - Ulf Zimmermann, for bringing the GA260 to my attention and * convincing me to write this driver. * - Andrew Gallatin for providing FreeBSD/Alpha support. */ #include __FBSDID("$FreeBSD$"); #include "opt_ti.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include /* #define TI_PRIVATE_JUMBOS */ #if !defined(TI_PRIVATE_JUMBOS) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #endif /* !TI_PRIVATE_JUMBOS */ #include #include #include #include #include #include #define TI_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_IP_FRAGS) /* * We can only turn on header splitting if we're using extended receive * BDs. */ #if defined(TI_JUMBO_HDRSPLIT) && defined(TI_PRIVATE_JUMBOS) #error "options TI_JUMBO_HDRSPLIT and TI_PRIVATE_JUMBOS are mutually exclusive" #endif /* TI_JUMBO_HDRSPLIT && TI_JUMBO_HDRSPLIT */ struct ti_softc *tis[8]; typedef enum { TI_SWAP_HTON, TI_SWAP_NTOH } ti_swap_type; /* * Various supported device vendors/types and their names. */ static struct ti_type ti_devs[] = { { ALT_VENDORID, ALT_DEVICEID_ACENIC, "Alteon AceNIC 1000baseSX Gigabit Ethernet" }, { ALT_VENDORID, ALT_DEVICEID_ACENIC_COPPER, "Alteon AceNIC 1000baseT Gigabit Ethernet" }, { TC_VENDORID, TC_DEVICEID_3C985, "3Com 3c985-SX Gigabit Ethernet" }, { NG_VENDORID, NG_DEVICEID_GA620, "Netgear GA620 1000baseSX Gigabit Ethernet" }, { NG_VENDORID, NG_DEVICEID_GA620T, "Netgear GA620 1000baseT Gigabit Ethernet" }, { SGI_VENDORID, SGI_DEVICEID_TIGON, "Silicon Graphics Gigabit Ethernet" }, { DEC_VENDORID, DEC_DEVICEID_FARALLON_PN9000SX, "Farallon PN9000SX Gigabit Ethernet" }, { 0, 0, NULL } }; static d_open_t ti_open; static d_close_t ti_close; static d_ioctl_t ti_ioctl2; static struct cdevsw ti_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = ti_open, .d_close = ti_close, .d_ioctl = ti_ioctl2, .d_name = "ti", }; static int ti_probe(device_t); static int ti_attach(device_t); static int ti_detach(device_t); static void ti_txeof(struct ti_softc *); static void ti_rxeof(struct ti_softc *); static void ti_stats_update(struct ti_softc *); static int ti_encap(struct ti_softc *, struct mbuf *, u_int32_t *); static void ti_intr(void *); static void ti_start(struct ifnet *); static int ti_ioctl(struct ifnet *, u_long, caddr_t); static void ti_init(void *); static void ti_init2(struct ti_softc *); static void ti_stop(struct ti_softc *); static void ti_watchdog(struct ifnet *); static void ti_shutdown(device_t); static int ti_ifmedia_upd(struct ifnet *); static void ti_ifmedia_sts(struct ifnet *, struct ifmediareq *); static u_int32_t ti_eeprom_putbyte(struct ti_softc *, int); static u_int8_t ti_eeprom_getbyte(struct ti_softc *, int, u_int8_t *); static int ti_read_eeprom(struct ti_softc *, caddr_t, int, int); static void ti_add_mcast(struct ti_softc *, struct ether_addr *); static void ti_del_mcast(struct ti_softc *, struct ether_addr *); static void ti_setmulti(struct ti_softc *); static void ti_mem(struct ti_softc *, u_int32_t, u_int32_t, caddr_t); static int ti_copy_mem(struct ti_softc *, u_int32_t, u_int32_t, caddr_t, int, int); static int ti_copy_scratch(struct ti_softc *, u_int32_t, u_int32_t, caddr_t, int, int, int); static int ti_bcopy_swap(const void *, void *, size_t, ti_swap_type); static void ti_loadfw(struct ti_softc *); static void ti_cmd(struct ti_softc *, struct ti_cmd_desc *); static void ti_cmd_ext(struct ti_softc *, struct ti_cmd_desc *, caddr_t, int); static void ti_handle_events(struct ti_softc *); #ifdef TI_PRIVATE_JUMBOS static int ti_alloc_jumbo_mem(struct ti_softc *); static void *ti_jalloc(struct ti_softc *); static void ti_jfree(void *, void *); #endif /* TI_PRIVATE_JUMBOS */ static int ti_newbuf_std(struct ti_softc *, int, struct mbuf *); static int ti_newbuf_mini(struct ti_softc *, int, struct mbuf *); static int ti_newbuf_jumbo(struct ti_softc *, int, struct mbuf *); static int ti_init_rx_ring_std(struct ti_softc *); static void ti_free_rx_ring_std(struct ti_softc *); static int ti_init_rx_ring_jumbo(struct ti_softc *); static void ti_free_rx_ring_jumbo(struct ti_softc *); static int ti_init_rx_ring_mini(struct ti_softc *); static void ti_free_rx_ring_mini(struct ti_softc *); static void ti_free_tx_ring(struct ti_softc *); static int ti_init_tx_ring(struct ti_softc *); static int ti_64bitslot_war(struct ti_softc *); static int ti_chipinit(struct ti_softc *); static int ti_gibinit(struct ti_softc *); #ifdef TI_JUMBO_HDRSPLIT static __inline void ti_hdr_split (struct mbuf *top, int hdr_len, int pkt_len, int idx); #endif /* TI_JUMBO_HDRSPLIT */ static device_method_t ti_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ti_probe), DEVMETHOD(device_attach, ti_attach), DEVMETHOD(device_detach, ti_detach), DEVMETHOD(device_shutdown, ti_shutdown), { 0, 0 } }; static driver_t ti_driver = { "ti", ti_methods, sizeof(struct ti_softc) }; static devclass_t ti_devclass; DRIVER_MODULE(ti, pci, ti_driver, ti_devclass, 0, 0); MODULE_DEPEND(ti, pci, 1, 1, 1); MODULE_DEPEND(ti, ether, 1, 1, 1); /* * Send an instruction or address to the EEPROM, check for ACK. */ static u_int32_t ti_eeprom_putbyte(sc, byte) struct ti_softc *sc; int byte; { register int i, ack = 0; /* * Make sure we're in TX mode. */ TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); /* * Feed in each bit and stobe the clock. */ for (i = 0x80; i; i >>= 1) { if (byte & i) { TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); } else { TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); } DELAY(1); TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); DELAY(1); TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); } /* * Turn off TX mode. */ TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); /* * Check for ack. */ TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); ack = CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN; TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); return (ack); } /* * Read a byte of data stored in the EEPROM at address 'addr.' * We have to send two address bytes since the EEPROM can hold * more than 256 bytes of data. */ static u_int8_t ti_eeprom_getbyte(sc, addr, dest) struct ti_softc *sc; int addr; u_int8_t *dest; { register int i; u_int8_t byte = 0; EEPROM_START; /* * Send write control code to EEPROM. */ if (ti_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) { printf("ti%d: failed to send write command, status: %x\n", sc->ti_unit, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); return (1); } /* * Send first byte of address of byte we want to read. */ if (ti_eeprom_putbyte(sc, (addr >> 8) & 0xFF)) { printf("ti%d: failed to send address, status: %x\n", sc->ti_unit, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); return (1); } /* * Send second byte address of byte we want to read. */ if (ti_eeprom_putbyte(sc, addr & 0xFF)) { printf("ti%d: failed to send address, status: %x\n", sc->ti_unit, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); return (1); } EEPROM_STOP; EEPROM_START; /* * Send read control code to EEPROM. */ if (ti_eeprom_putbyte(sc, EEPROM_CTL_READ)) { printf("ti%d: failed to send read command, status: %x\n", sc->ti_unit, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); return (1); } /* * Start reading bits from EEPROM. */ TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); for (i = 0x80; i; i >>= 1) { TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); DELAY(1); if (CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN) byte |= i; TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); DELAY(1); } EEPROM_STOP; /* * No ACK generated for read, so just return byte. */ *dest = byte; return (0); } /* * Read a sequence of bytes from the EEPROM. */ static int ti_read_eeprom(sc, dest, off, cnt) struct ti_softc *sc; caddr_t dest; int off; int cnt; { int err = 0, i; u_int8_t byte = 0; for (i = 0; i < cnt; i++) { err = ti_eeprom_getbyte(sc, off + i, &byte); if (err) break; *(dest + i) = byte; } return (err ? 1 : 0); } /* * NIC memory access function. Can be used to either clear a section * of NIC local memory or (if buf is non-NULL) copy data into it. */ static void ti_mem(sc, addr, len, buf) struct ti_softc *sc; u_int32_t addr, len; caddr_t buf; { int segptr, segsize, cnt; caddr_t ti_winbase, ptr; segptr = addr; cnt = len; ti_winbase = (caddr_t)(sc->ti_vhandle + TI_WINDOW); ptr = buf; while (cnt) { if (cnt < TI_WINLEN) segsize = cnt; else segsize = TI_WINLEN - (segptr % TI_WINLEN); CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); if (buf == NULL) bzero((char *)ti_winbase + (segptr & (TI_WINLEN - 1)), segsize); else { bcopy((char *)ptr, (char *)ti_winbase + (segptr & (TI_WINLEN - 1)), segsize); ptr += segsize; } segptr += segsize; cnt -= segsize; } } static int ti_copy_mem(sc, tigon_addr, len, buf, useraddr, readdata) struct ti_softc *sc; u_int32_t tigon_addr, len; caddr_t buf; int useraddr, readdata; { int segptr, segsize, cnt; caddr_t ptr; u_int32_t origwin; u_int8_t tmparray[TI_WINLEN], tmparray2[TI_WINLEN]; int resid, segresid; int first_pass; /* * At the moment, we don't handle non-aligned cases, we just bail. * If this proves to be a problem, it will be fixed. */ if ((readdata == 0) && (tigon_addr & 0x3)) { printf("ti%d: ti_copy_mem: tigon address %#x isn't " "word-aligned\n", sc->ti_unit, tigon_addr); printf("ti%d: ti_copy_mem: unaligned writes aren't yet " "supported\n", sc->ti_unit); return (EINVAL); } segptr = tigon_addr & ~0x3; segresid = tigon_addr - segptr; /* * This is the non-aligned amount left over that we'll need to * copy. */ resid = len & 0x3; /* Add in the left over amount at the front of the buffer */ resid += segresid; cnt = len & ~0x3; /* * If resid + segresid is >= 4, add multiples of 4 to the count and * decrease the residual by that much. */ cnt += resid & ~0x3; resid -= resid & ~0x3; ptr = buf; first_pass = 1; /* * Make sure we aren't interrupted while we're changing the window * pointer. */ TI_LOCK(sc); /* * Save the old window base value. */ origwin = CSR_READ_4(sc, TI_WINBASE); while (cnt) { bus_size_t ti_offset; if (cnt < TI_WINLEN) segsize = cnt; else segsize = TI_WINLEN - (segptr % TI_WINLEN); CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); ti_offset = TI_WINDOW + (segptr & (TI_WINLEN -1)); if (readdata) { bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle, ti_offset, (u_int32_t *)tmparray, segsize >> 2); if (useraddr) { /* * Yeah, this is a little on the kludgy * side, but at least this code is only * used for debugging. */ ti_bcopy_swap(tmparray, tmparray2, segsize, TI_SWAP_NTOH); if (first_pass) { copyout(&tmparray2[segresid], ptr, segsize - segresid); first_pass = 0; } else copyout(tmparray2, ptr, segsize); } else { if (first_pass) { ti_bcopy_swap(tmparray, tmparray2, segsize, TI_SWAP_NTOH); bcopy(&tmparray2[segresid], ptr, segsize - segresid); first_pass = 0; } else ti_bcopy_swap(tmparray, ptr, segsize, TI_SWAP_NTOH); } } else { if (useraddr) { copyin(ptr, tmparray2, segsize); ti_bcopy_swap(tmparray2, tmparray, segsize, TI_SWAP_HTON); } else ti_bcopy_swap(ptr, tmparray, segsize, TI_SWAP_HTON); bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle, ti_offset, (u_int32_t *)tmparray, segsize >> 2); } segptr += segsize; ptr += segsize; cnt -= segsize; } /* * Handle leftover, non-word-aligned bytes. */ if (resid != 0) { u_int32_t tmpval, tmpval2; bus_size_t ti_offset; /* * Set the segment pointer. */ CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); ti_offset = TI_WINDOW + (segptr & (TI_WINLEN - 1)); /* * First, grab whatever is in our source/destination. * We'll obviously need this for reads, but also for * writes, since we'll be doing read/modify/write. */ bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle, ti_offset, &tmpval, 1); /* * Next, translate this from little-endian to big-endian * (at least on i386 boxes). */ tmpval2 = ntohl(tmpval); if (readdata) { /* * If we're reading, just copy the leftover number * of bytes from the host byte order buffer to * the user's buffer. */ if (useraddr) copyout(&tmpval2, ptr, resid); else bcopy(&tmpval2, ptr, resid); } else { /* * If we're writing, first copy the bytes to be * written into the network byte order buffer, * leaving the rest of the buffer with whatever was * originally in there. Then, swap the bytes * around into host order and write them out. * * XXX KDM the read side of this has been verified * to work, but the write side of it has not been * verified. So user beware. */ if (useraddr) copyin(ptr, &tmpval2, resid); else bcopy(ptr, &tmpval2, resid); tmpval = htonl(tmpval2); bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle, ti_offset, &tmpval, 1); } } CSR_WRITE_4(sc, TI_WINBASE, origwin); TI_UNLOCK(sc); return (0); } static int ti_copy_scratch(sc, tigon_addr, len, buf, useraddr, readdata, cpu) struct ti_softc *sc; u_int32_t tigon_addr, len; caddr_t buf; int useraddr, readdata; int cpu; { u_int32_t segptr; int cnt; u_int32_t tmpval, tmpval2; caddr_t ptr; /* * At the moment, we don't handle non-aligned cases, we just bail. * If this proves to be a problem, it will be fixed. */ if (tigon_addr & 0x3) { printf("ti%d: ti_copy_scratch: tigon address %#x isn't " "word-aligned\n", sc->ti_unit, tigon_addr); return (EINVAL); } if (len & 0x3) { printf("ti%d: ti_copy_scratch: transfer length %d isn't " "word-aligned\n", sc->ti_unit, len); return (EINVAL); } segptr = tigon_addr; cnt = len; ptr = buf; TI_LOCK(sc); while (cnt) { CSR_WRITE_4(sc, CPU_REG(TI_SRAM_ADDR, cpu), segptr); if (readdata) { tmpval2 = CSR_READ_4(sc, CPU_REG(TI_SRAM_DATA, cpu)); tmpval = ntohl(tmpval2); /* * Note: I've used this debugging interface * extensively with Alteon's 12.3.15 firmware, * compiled with GCC 2.7.2.1 and binutils 2.9.1. * * When you compile the firmware without * optimization, which is necessary sometimes in * order to properly step through it, you sometimes * read out a bogus value of 0xc0017c instead of * whatever was supposed to be in that scratchpad * location. That value is on the stack somewhere, * but I've never been able to figure out what was * causing the problem. * * The address seems to pop up in random places, * often not in the same place on two subsequent * reads. * * In any case, the underlying data doesn't seem * to be affected, just the value read out. * * KDM, 3/7/2000 */ if (tmpval2 == 0xc0017c) printf("ti%d: found 0xc0017c at %#x " "(tmpval2)\n", sc->ti_unit, segptr); if (tmpval == 0xc0017c) printf("ti%d: found 0xc0017c at %#x " "(tmpval)\n", sc->ti_unit, segptr); if (useraddr) copyout(&tmpval, ptr, 4); else bcopy(&tmpval, ptr, 4); } else { if (useraddr) copyin(ptr, &tmpval2, 4); else bcopy(ptr, &tmpval2, 4); tmpval = htonl(tmpval2); CSR_WRITE_4(sc, CPU_REG(TI_SRAM_DATA, cpu), tmpval); } cnt -= 4; segptr += 4; ptr += 4; } TI_UNLOCK(sc); return (0); } static int ti_bcopy_swap(src, dst, len, swap_type) const void *src; void *dst; size_t len; ti_swap_type swap_type; { const u_int8_t *tmpsrc; u_int8_t *tmpdst; size_t tmplen; if (len & 0x3) { printf("ti_bcopy_swap: length %zd isn't 32-bit aligned\n", len); return (-1); } tmpsrc = src; tmpdst = dst; tmplen = len; while (tmplen) { if (swap_type == TI_SWAP_NTOH) *(u_int32_t *)tmpdst = ntohl(*(const u_int32_t *)tmpsrc); else *(u_int32_t *)tmpdst = htonl(*(const u_int32_t *)tmpsrc); tmpsrc += 4; tmpdst += 4; tmplen -= 4; } return (0); } /* * Load firmware image into the NIC. Check that the firmware revision * is acceptable and see if we want the firmware for the Tigon 1 or * Tigon 2. */ static void ti_loadfw(sc) struct ti_softc *sc; { switch (sc->ti_hwrev) { case TI_HWREV_TIGON: if (tigonFwReleaseMajor != TI_FIRMWARE_MAJOR || tigonFwReleaseMinor != TI_FIRMWARE_MINOR || tigonFwReleaseFix != TI_FIRMWARE_FIX) { printf("ti%d: firmware revision mismatch; want " "%d.%d.%d, got %d.%d.%d\n", sc->ti_unit, TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR, TI_FIRMWARE_FIX, tigonFwReleaseMajor, tigonFwReleaseMinor, tigonFwReleaseFix); return; } ti_mem(sc, tigonFwTextAddr, tigonFwTextLen, (caddr_t)tigonFwText); ti_mem(sc, tigonFwDataAddr, tigonFwDataLen, (caddr_t)tigonFwData); ti_mem(sc, tigonFwRodataAddr, tigonFwRodataLen, (caddr_t)tigonFwRodata); ti_mem(sc, tigonFwBssAddr, tigonFwBssLen, NULL); ti_mem(sc, tigonFwSbssAddr, tigonFwSbssLen, NULL); CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigonFwStartAddr); break; case TI_HWREV_TIGON_II: if (tigon2FwReleaseMajor != TI_FIRMWARE_MAJOR || tigon2FwReleaseMinor != TI_FIRMWARE_MINOR || tigon2FwReleaseFix != TI_FIRMWARE_FIX) { printf("ti%d: firmware revision mismatch; want " "%d.%d.%d, got %d.%d.%d\n", sc->ti_unit, TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR, TI_FIRMWARE_FIX, tigon2FwReleaseMajor, tigon2FwReleaseMinor, tigon2FwReleaseFix); return; } ti_mem(sc, tigon2FwTextAddr, tigon2FwTextLen, (caddr_t)tigon2FwText); ti_mem(sc, tigon2FwDataAddr, tigon2FwDataLen, (caddr_t)tigon2FwData); ti_mem(sc, tigon2FwRodataAddr, tigon2FwRodataLen, (caddr_t)tigon2FwRodata); ti_mem(sc, tigon2FwBssAddr, tigon2FwBssLen, NULL); ti_mem(sc, tigon2FwSbssAddr, tigon2FwSbssLen, NULL); CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigon2FwStartAddr); break; default: printf("ti%d: can't load firmware: unknown hardware rev\n", sc->ti_unit); break; } } /* * Send the NIC a command via the command ring. */ static void ti_cmd(sc, cmd) struct ti_softc *sc; struct ti_cmd_desc *cmd; { u_int32_t index; if (sc->ti_rdata->ti_cmd_ring == NULL) return; index = sc->ti_cmd_saved_prodidx; CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(cmd)); TI_INC(index, TI_CMD_RING_CNT); CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); sc->ti_cmd_saved_prodidx = index; } /* * Send the NIC an extended command. The 'len' parameter specifies the * number of command slots to include after the initial command. */ static void ti_cmd_ext(sc, cmd, arg, len) struct ti_softc *sc; struct ti_cmd_desc *cmd; caddr_t arg; int len; { u_int32_t index; register int i; if (sc->ti_rdata->ti_cmd_ring == NULL) return; index = sc->ti_cmd_saved_prodidx; CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(cmd)); TI_INC(index, TI_CMD_RING_CNT); for (i = 0; i < len; i++) { CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(&arg[i * 4])); TI_INC(index, TI_CMD_RING_CNT); } CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); sc->ti_cmd_saved_prodidx = index; } /* * Handle events that have triggered interrupts. */ static void ti_handle_events(sc) struct ti_softc *sc; { struct ti_event_desc *e; if (sc->ti_rdata->ti_event_ring == NULL) return; while (sc->ti_ev_saved_considx != sc->ti_ev_prodidx.ti_idx) { e = &sc->ti_rdata->ti_event_ring[sc->ti_ev_saved_considx]; switch (e->ti_event) { case TI_EV_LINKSTAT_CHANGED: sc->ti_linkstat = e->ti_code; if (e->ti_code == TI_EV_CODE_LINK_UP) printf("ti%d: 10/100 link up\n", sc->ti_unit); else if (e->ti_code == TI_EV_CODE_GIG_LINK_UP) printf("ti%d: gigabit link up\n", sc->ti_unit); else if (e->ti_code == TI_EV_CODE_LINK_DOWN) printf("ti%d: link down\n", sc->ti_unit); break; case TI_EV_ERROR: if (e->ti_code == TI_EV_CODE_ERR_INVAL_CMD) printf("ti%d: invalid command\n", sc->ti_unit); else if (e->ti_code == TI_EV_CODE_ERR_UNIMP_CMD) printf("ti%d: unknown command\n", sc->ti_unit); else if (e->ti_code == TI_EV_CODE_ERR_BADCFG) printf("ti%d: bad config data\n", sc->ti_unit); break; case TI_EV_FIRMWARE_UP: ti_init2(sc); break; case TI_EV_STATS_UPDATED: ti_stats_update(sc); break; case TI_EV_RESET_JUMBO_RING: case TI_EV_MCAST_UPDATED: /* Who cares. */ break; default: printf("ti%d: unknown event: %d\n", sc->ti_unit, e->ti_event); break; } /* Advance the consumer index. */ TI_INC(sc->ti_ev_saved_considx, TI_EVENT_RING_CNT); CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, sc->ti_ev_saved_considx); } } #ifdef TI_PRIVATE_JUMBOS /* * Memory management for the jumbo receive ring is a pain in the * butt. We need to allocate at least 9018 bytes of space per frame, * _and_ it has to be contiguous (unless you use the extended * jumbo descriptor format). Using malloc() all the time won't * work: malloc() allocates memory in powers of two, which means we * would end up wasting a considerable amount of space by allocating * 9K chunks. We don't have a jumbo mbuf cluster pool. Thus, we have * to do our own memory management. * * The driver needs to allocate a contiguous chunk of memory at boot * time. We then chop this up ourselves into 9K pieces and use them * as external mbuf storage. * * One issue here is how much memory to allocate. The jumbo ring has * 256 slots in it, but at 9K per slot than can consume over 2MB of * RAM. This is a bit much, especially considering we also need * RAM for the standard ring and mini ring (on the Tigon 2). To * save space, we only actually allocate enough memory for 64 slots * by default, which works out to between 500 and 600K. This can * be tuned by changing a #define in if_tireg.h. */ static int ti_alloc_jumbo_mem(sc) struct ti_softc *sc; { caddr_t ptr; register int i; struct ti_jpool_entry *entry; /* Grab a big chunk o' storage. */ sc->ti_cdata.ti_jumbo_buf = contigmalloc(TI_JMEM, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->ti_cdata.ti_jumbo_buf == NULL) { printf("ti%d: no memory for jumbo buffers!\n", sc->ti_unit); return (ENOBUFS); } SLIST_INIT(&sc->ti_jfree_listhead); SLIST_INIT(&sc->ti_jinuse_listhead); /* * Now divide it up into 9K pieces and save the addresses * in an array. */ ptr = sc->ti_cdata.ti_jumbo_buf; for (i = 0; i < TI_JSLOTS; i++) { sc->ti_cdata.ti_jslots[i] = ptr; ptr += TI_JLEN; entry = malloc(sizeof(struct ti_jpool_entry), M_DEVBUF, M_NOWAIT); if (entry == NULL) { contigfree(sc->ti_cdata.ti_jumbo_buf, TI_JMEM, M_DEVBUF); sc->ti_cdata.ti_jumbo_buf = NULL; printf("ti%d: no memory for jumbo " "buffer queue!\n", sc->ti_unit); return (ENOBUFS); } entry->slot = i; SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jpool_entries); } return (0); } /* * Allocate a jumbo buffer. */ static void *ti_jalloc(sc) struct ti_softc *sc; { struct ti_jpool_entry *entry; entry = SLIST_FIRST(&sc->ti_jfree_listhead); if (entry == NULL) { printf("ti%d: no free jumbo buffers\n", sc->ti_unit); return (NULL); } SLIST_REMOVE_HEAD(&sc->ti_jfree_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc->ti_jinuse_listhead, entry, jpool_entries); return (sc->ti_cdata.ti_jslots[entry->slot]); } /* * Release a jumbo buffer. */ static void ti_jfree(buf, args) void *buf; void *args; { struct ti_softc *sc; int i; struct ti_jpool_entry *entry; /* Extract the softc struct pointer. */ sc = (struct ti_softc *)args; if (sc == NULL) panic("ti_jfree: didn't get softc pointer!"); /* calculate the slot this buffer belongs to */ i = ((vm_offset_t)buf - (vm_offset_t)sc->ti_cdata.ti_jumbo_buf) / TI_JLEN; if ((i < 0) || (i >= TI_JSLOTS)) panic("ti_jfree: asked to free buffer that we don't manage!"); entry = SLIST_FIRST(&sc->ti_jinuse_listhead); if (entry == NULL) panic("ti_jfree: buffer not in use!"); entry->slot = i; SLIST_REMOVE_HEAD(&sc->ti_jinuse_listhead, jpool_entries); SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jpool_entries); } #endif /* TI_PRIVATE_JUMBOS */ /* * Intialize a standard receive ring descriptor. */ static int ti_newbuf_std(sc, i, m) struct ti_softc *sc; int i; struct mbuf *m; { struct mbuf *m_new = NULL; struct ti_rx_desc *r; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) return (ENOBUFS); MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); return (ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, ETHER_ALIGN); sc->ti_cdata.ti_rx_std_chain[i] = m_new; r = &sc->ti_rdata->ti_rx_std_ring[i]; TI_HOSTADDR(r->ti_addr) = vtophys(mtod(m_new, caddr_t)); r->ti_type = TI_BDTYPE_RECV_BD; r->ti_flags = 0; if (sc->ti_ifp->if_hwassist) r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; r->ti_len = m_new->m_len; r->ti_idx = i; return (0); } /* * Intialize a mini receive ring descriptor. This only applies to * the Tigon 2. */ static int ti_newbuf_mini(sc, i, m) struct ti_softc *sc; int i; struct mbuf *m; { struct mbuf *m_new = NULL; struct ti_rx_desc *r; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { return (ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MHLEN; } else { m_new = m; m_new->m_data = m_new->m_pktdat; m_new->m_len = m_new->m_pkthdr.len = MHLEN; } m_adj(m_new, ETHER_ALIGN); r = &sc->ti_rdata->ti_rx_mini_ring[i]; sc->ti_cdata.ti_rx_mini_chain[i] = m_new; TI_HOSTADDR(r->ti_addr) = vtophys(mtod(m_new, caddr_t)); r->ti_type = TI_BDTYPE_RECV_BD; r->ti_flags = TI_BDFLAG_MINI_RING; if (sc->ti_ifp->if_hwassist) r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; r->ti_len = m_new->m_len; r->ti_idx = i; return (0); } #ifdef TI_PRIVATE_JUMBOS /* * Initialize a jumbo receive ring descriptor. This allocates * a jumbo buffer from the pool managed internally by the driver. */ static int ti_newbuf_jumbo(sc, i, m) struct ti_softc *sc; int i; struct mbuf *m; { struct mbuf *m_new = NULL; struct ti_rx_desc *r; if (m == NULL) { caddr_t *buf = NULL; /* Allocate the mbuf. */ MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { return (ENOBUFS); } /* Allocate the jumbo buffer */ buf = ti_jalloc(sc); if (buf == NULL) { m_freem(m_new); printf("ti%d: jumbo allocation failed " "-- packet dropped!\n", sc->ti_unit); return (ENOBUFS); } /* Attach the buffer to the mbuf. */ m_new->m_data = (void *) buf; m_new->m_len = m_new->m_pkthdr.len = TI_JUMBO_FRAMELEN; MEXTADD(m_new, buf, TI_JUMBO_FRAMELEN, ti_jfree, (struct ti_softc *)sc, 0, EXT_NET_DRV); } else { m_new = m; m_new->m_data = m_new->m_ext.ext_buf; m_new->m_ext.ext_size = TI_JUMBO_FRAMELEN; } m_adj(m_new, ETHER_ALIGN); /* Set up the descriptor. */ r = &sc->ti_rdata->ti_rx_jumbo_ring[i]; sc->ti_cdata.ti_rx_jumbo_chain[i] = m_new; TI_HOSTADDR(r->ti_addr) = vtophys(mtod(m_new, caddr_t)); r->ti_type = TI_BDTYPE_RECV_JUMBO_BD; r->ti_flags = TI_BDFLAG_JUMBO_RING; if (sc->ti_ifp->if_hwassist) r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM | TI_BDFLAG_IP_CKSUM; r->ti_len = m_new->m_len; r->ti_idx = i; return (0); } #else #include #if (PAGE_SIZE == 4096) #define NPAYLOAD 2 #else #define NPAYLOAD 1 #endif #define TCP_HDR_LEN (52 + sizeof(struct ether_header)) #define UDP_HDR_LEN (28 + sizeof(struct ether_header)) #define NFS_HDR_LEN (UDP_HDR_LEN) static int HDR_LEN = TCP_HDR_LEN; /* * Initialize a jumbo receive ring descriptor. This allocates * a jumbo buffer from the pool managed internally by the driver. */ static int ti_newbuf_jumbo(sc, idx, m_old) struct ti_softc *sc; int idx; struct mbuf *m_old; { struct mbuf *cur, *m_new = NULL; struct mbuf *m[3] = {NULL, NULL, NULL}; struct ti_rx_desc_ext *r; vm_page_t frame; static int color; /* 1 extra buf to make nobufs easy*/ struct sf_buf *sf[3] = {NULL, NULL, NULL}; int i; if (m_old != NULL) { m_new = m_old; cur = m_old->m_next; for (i = 0; i <= NPAYLOAD; i++){ m[i] = cur; cur = cur->m_next; } } else { /* Allocate the mbufs. */ MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { printf("ti%d: mbuf allocation failed " "-- packet dropped!\n", sc->ti_unit); goto nobufs; } MGET(m[NPAYLOAD], M_DONTWAIT, MT_DATA); if (m[NPAYLOAD] == NULL) { printf("ti%d: cluster mbuf allocation failed " "-- packet dropped!\n", sc->ti_unit); goto nobufs; } MCLGET(m[NPAYLOAD], M_DONTWAIT); if ((m[NPAYLOAD]->m_flags & M_EXT) == 0) { printf("ti%d: mbuf allocation failed " "-- packet dropped!\n", sc->ti_unit); goto nobufs; } m[NPAYLOAD]->m_len = MCLBYTES; for (i = 0; i < NPAYLOAD; i++){ MGET(m[i], M_DONTWAIT, MT_DATA); if (m[i] == NULL) { printf("ti%d: mbuf allocation failed " "-- packet dropped!\n", sc->ti_unit); goto nobufs; } frame = vm_page_alloc(NULL, color++, VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); if (frame == NULL) { printf("ti%d: buffer allocation failed " "-- packet dropped!\n", sc->ti_unit); printf(" index %d page %d\n", idx, i); goto nobufs; } sf[i] = sf_buf_alloc(frame, SFB_NOWAIT); if (sf[i] == NULL) { vm_page_lock_queues(); vm_page_unwire(frame, 0); vm_page_free(frame); vm_page_unlock_queues(); printf("ti%d: buffer allocation failed " "-- packet dropped!\n", sc->ti_unit); printf(" index %d page %d\n", idx, i); goto nobufs; } } for (i = 0; i < NPAYLOAD; i++){ /* Attach the buffer to the mbuf. */ m[i]->m_data = (void *)sf_buf_kva(sf[i]); m[i]->m_len = PAGE_SIZE; MEXTADD(m[i], sf_buf_kva(sf[i]), PAGE_SIZE, sf_buf_mext, sf[i], 0, EXT_DISPOSABLE); m[i]->m_next = m[i+1]; } /* link the buffers to the header */ m_new->m_next = m[0]; m_new->m_data += ETHER_ALIGN; if (sc->ti_hdrsplit) m_new->m_len = MHLEN - ETHER_ALIGN; else m_new->m_len = HDR_LEN; m_new->m_pkthdr.len = NPAYLOAD * PAGE_SIZE + m_new->m_len; } /* Set up the descriptor. */ r = &sc->ti_rdata->ti_rx_jumbo_ring[idx]; sc->ti_cdata.ti_rx_jumbo_chain[idx] = m_new; TI_HOSTADDR(r->ti_addr0) = vtophys(mtod(m_new, caddr_t)); r->ti_len0 = m_new->m_len; TI_HOSTADDR(r->ti_addr1) = vtophys(mtod(m[0], caddr_t)); r->ti_len1 = PAGE_SIZE; TI_HOSTADDR(r->ti_addr2) = vtophys(mtod(m[1], caddr_t)); r->ti_len2 = m[1]->m_ext.ext_size; /* could be PAGE_SIZE or MCLBYTES */ if (PAGE_SIZE == 4096) { TI_HOSTADDR(r->ti_addr3) = vtophys(mtod(m[2], caddr_t)); r->ti_len3 = MCLBYTES; } else { r->ti_len3 = 0; } r->ti_type = TI_BDTYPE_RECV_JUMBO_BD; r->ti_flags = TI_BDFLAG_JUMBO_RING|TI_RCB_FLAG_USE_EXT_RX_BD; if (sc->ti_ifp->if_hwassist) r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM|TI_BDFLAG_IP_CKSUM; r->ti_idx = idx; return (0); nobufs: /* * Warning! : * This can only be called before the mbufs are strung together. * If the mbufs are strung together, m_freem() will free the chain, * so that the later mbufs will be freed multiple times. */ if (m_new) m_freem(m_new); for (i = 0; i < 3; i++) { if (m[i]) m_freem(m[i]); if (sf[i]) sf_buf_mext((void *)sf_buf_kva(sf[i]), sf[i]); } return (ENOBUFS); } #endif /* * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, * that's 1MB or memory, which is a lot. For now, we fill only the first * 256 ring entries and hope that our CPU is fast enough to keep up with * the NIC. */ static int ti_init_rx_ring_std(sc) struct ti_softc *sc; { register int i; struct ti_cmd_desc cmd; for (i = 0; i < TI_SSLOTS; i++) { if (ti_newbuf_std(sc, i, NULL) == ENOBUFS) return (ENOBUFS); }; TI_UPDATE_STDPROD(sc, i - 1); sc->ti_std = i - 1; return (0); } static void ti_free_rx_ring_std(sc) struct ti_softc *sc; { register int i; for (i = 0; i < TI_STD_RX_RING_CNT; i++) { if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) { m_freem(sc->ti_cdata.ti_rx_std_chain[i]); sc->ti_cdata.ti_rx_std_chain[i] = NULL; } bzero((char *)&sc->ti_rdata->ti_rx_std_ring[i], sizeof(struct ti_rx_desc)); } } static int ti_init_rx_ring_jumbo(sc) struct ti_softc *sc; { register int i; struct ti_cmd_desc cmd; for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { if (ti_newbuf_jumbo(sc, i, NULL) == ENOBUFS) return (ENOBUFS); }; TI_UPDATE_JUMBOPROD(sc, i - 1); sc->ti_jumbo = i - 1; return (0); } static void ti_free_rx_ring_jumbo(sc) struct ti_softc *sc; { register int i; for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) { m_freem(sc->ti_cdata.ti_rx_jumbo_chain[i]); sc->ti_cdata.ti_rx_jumbo_chain[i] = NULL; } bzero((char *)&sc->ti_rdata->ti_rx_jumbo_ring[i], sizeof(struct ti_rx_desc)); } } static int ti_init_rx_ring_mini(sc) struct ti_softc *sc; { register int i; for (i = 0; i < TI_MSLOTS; i++) { if (ti_newbuf_mini(sc, i, NULL) == ENOBUFS) return (ENOBUFS); }; TI_UPDATE_MINIPROD(sc, i - 1); sc->ti_mini = i - 1; return (0); } static void ti_free_rx_ring_mini(sc) struct ti_softc *sc; { register int i; for (i = 0; i < TI_MINI_RX_RING_CNT; i++) { if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) { m_freem(sc->ti_cdata.ti_rx_mini_chain[i]); sc->ti_cdata.ti_rx_mini_chain[i] = NULL; } bzero((char *)&sc->ti_rdata->ti_rx_mini_ring[i], sizeof(struct ti_rx_desc)); } } static void ti_free_tx_ring(sc) struct ti_softc *sc; { register int i; if (sc->ti_rdata->ti_tx_ring == NULL) return; for (i = 0; i < TI_TX_RING_CNT; i++) { if (sc->ti_cdata.ti_tx_chain[i] != NULL) { m_freem(sc->ti_cdata.ti_tx_chain[i]); sc->ti_cdata.ti_tx_chain[i] = NULL; } bzero((char *)&sc->ti_rdata->ti_tx_ring[i], sizeof(struct ti_tx_desc)); } } static int ti_init_tx_ring(sc) struct ti_softc *sc; { sc->ti_txcnt = 0; sc->ti_tx_saved_considx = 0; CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, 0); return (0); } /* * The Tigon 2 firmware has a new way to add/delete multicast addresses, * but we have to support the old way too so that Tigon 1 cards will * work. */ static void ti_add_mcast(sc, addr) struct ti_softc *sc; struct ether_addr *addr; { struct ti_cmd_desc cmd; u_int16_t *m; u_int32_t ext[2] = {0, 0}; m = (u_int16_t *)&addr->octet[0]; switch (sc->ti_hwrev) { case TI_HWREV_TIGON: CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); TI_DO_CMD(TI_CMD_ADD_MCAST_ADDR, 0, 0); break; case TI_HWREV_TIGON_II: ext[0] = htons(m[0]); ext[1] = (htons(m[1]) << 16) | htons(m[2]); TI_DO_CMD_EXT(TI_CMD_EXT_ADD_MCAST, 0, 0, (caddr_t)&ext, 2); break; default: printf("ti%d: unknown hwrev\n", sc->ti_unit); break; } } static void ti_del_mcast(sc, addr) struct ti_softc *sc; struct ether_addr *addr; { struct ti_cmd_desc cmd; u_int16_t *m; u_int32_t ext[2] = {0, 0}; m = (u_int16_t *)&addr->octet[0]; switch (sc->ti_hwrev) { case TI_HWREV_TIGON: CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); TI_DO_CMD(TI_CMD_DEL_MCAST_ADDR, 0, 0); break; case TI_HWREV_TIGON_II: ext[0] = htons(m[0]); ext[1] = (htons(m[1]) << 16) | htons(m[2]); TI_DO_CMD_EXT(TI_CMD_EXT_DEL_MCAST, 0, 0, (caddr_t)&ext, 2); break; default: printf("ti%d: unknown hwrev\n", sc->ti_unit); break; } } /* * Configure the Tigon's multicast address filter. * * The actual multicast table management is a bit of a pain, thanks to * slight brain damage on the part of both Alteon and us. With our * multicast code, we are only alerted when the multicast address table * changes and at that point we only have the current list of addresses: * we only know the current state, not the previous state, so we don't * actually know what addresses were removed or added. The firmware has * state, but we can't get our grubby mits on it, and there is no 'delete * all multicast addresses' command. Hence, we have to maintain our own * state so we know what addresses have been programmed into the NIC at * any given time. */ static void ti_setmulti(sc) struct ti_softc *sc; { struct ifnet *ifp; struct ifmultiaddr *ifma; struct ti_cmd_desc cmd; struct ti_mc_entry *mc; u_int32_t intrs; ifp = sc->ti_ifp; if (ifp->if_flags & IFF_ALLMULTI) { TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_ENB, 0); return; } else { TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_DIS, 0); } /* Disable interrupts. */ intrs = CSR_READ_4(sc, TI_MB_HOSTINTR); CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); /* First, zot all the existing filters. */ while (SLIST_FIRST(&sc->ti_mc_listhead) != NULL) { mc = SLIST_FIRST(&sc->ti_mc_listhead); ti_del_mcast(sc, &mc->mc_addr); SLIST_REMOVE_HEAD(&sc->ti_mc_listhead, mc_entries); free(mc, M_DEVBUF); } /* Now program new ones. */ + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; mc = malloc(sizeof(struct ti_mc_entry), M_DEVBUF, M_NOWAIT); if (mc == NULL) { if_printf(ifp, "no memory for mcast filter entry\n"); continue; } bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), (char *)&mc->mc_addr, ETHER_ADDR_LEN); SLIST_INSERT_HEAD(&sc->ti_mc_listhead, mc, mc_entries); ti_add_mcast(sc, &mc->mc_addr); } + IF_ADDR_UNLOCK(ifp); /* Re-enable interrupts. */ CSR_WRITE_4(sc, TI_MB_HOSTINTR, intrs); } /* * Check to see if the BIOS has configured us for a 64 bit slot when * we aren't actually in one. If we detect this condition, we can work * around it on the Tigon 2 by setting a bit in the PCI state register, * but for the Tigon 1 we must give up and abort the interface attach. */ static int ti_64bitslot_war(sc) struct ti_softc *sc; { if (!(CSR_READ_4(sc, TI_PCI_STATE) & TI_PCISTATE_32BIT_BUS)) { CSR_WRITE_4(sc, 0x600, 0); CSR_WRITE_4(sc, 0x604, 0); CSR_WRITE_4(sc, 0x600, 0x5555AAAA); if (CSR_READ_4(sc, 0x604) == 0x5555AAAA) { if (sc->ti_hwrev == TI_HWREV_TIGON) return (EINVAL); else { TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_32BIT_BUS); return (0); } } } return (0); } /* * Do endian, PCI and DMA initialization. Also check the on-board ROM * self-test results. */ static int ti_chipinit(sc) struct ti_softc *sc; { u_int32_t cacheline; u_int32_t pci_writemax = 0; u_int32_t hdrsplit; /* Initialize link to down state. */ sc->ti_linkstat = TI_EV_CODE_LINK_DOWN; if (sc->ti_ifp->if_capenable & IFCAP_HWCSUM) sc->ti_ifp->if_hwassist = TI_CSUM_FEATURES; else sc->ti_ifp->if_hwassist = 0; /* Set endianness before we access any non-PCI registers. */ #if BYTE_ORDER == BIG_ENDIAN CSR_WRITE_4(sc, TI_MISC_HOST_CTL, TI_MHC_BIGENDIAN_INIT | (TI_MHC_BIGENDIAN_INIT << 24)); #else CSR_WRITE_4(sc, TI_MISC_HOST_CTL, TI_MHC_LITTLEENDIAN_INIT | (TI_MHC_LITTLEENDIAN_INIT << 24)); #endif /* Check the ROM failed bit to see if self-tests passed. */ if (CSR_READ_4(sc, TI_CPU_STATE) & TI_CPUSTATE_ROMFAIL) { printf("ti%d: board self-diagnostics failed!\n", sc->ti_unit); return (ENODEV); } /* Halt the CPU. */ TI_SETBIT(sc, TI_CPU_STATE, TI_CPUSTATE_HALT); /* Figure out the hardware revision. */ switch (CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_CHIP_REV_MASK) { case TI_REV_TIGON_I: sc->ti_hwrev = TI_HWREV_TIGON; break; case TI_REV_TIGON_II: sc->ti_hwrev = TI_HWREV_TIGON_II; break; default: printf("ti%d: unsupported chip revision\n", sc->ti_unit); return (ENODEV); } /* Do special setup for Tigon 2. */ if (sc->ti_hwrev == TI_HWREV_TIGON_II) { TI_SETBIT(sc, TI_CPU_CTL_B, TI_CPUSTATE_HALT); TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_SRAM_BANK_512K); TI_SETBIT(sc, TI_MISC_CONF, TI_MCR_SRAM_SYNCHRONOUS); } /* * We don't have firmware source for the Tigon 1, so Tigon 1 boards * can't do header splitting. */ #ifdef TI_JUMBO_HDRSPLIT if (sc->ti_hwrev != TI_HWREV_TIGON) sc->ti_hdrsplit = 1; else printf("ti%d: can't do header splitting on a Tigon I board\n", sc->ti_unit); #endif /* TI_JUMBO_HDRSPLIT */ /* Set up the PCI state register. */ CSR_WRITE_4(sc, TI_PCI_STATE, TI_PCI_READ_CMD|TI_PCI_WRITE_CMD); if (sc->ti_hwrev == TI_HWREV_TIGON_II) { TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_USE_MEM_RD_MULT); } /* Clear the read/write max DMA parameters. */ TI_CLRBIT(sc, TI_PCI_STATE, (TI_PCISTATE_WRITE_MAXDMA| TI_PCISTATE_READ_MAXDMA)); /* Get cache line size. */ cacheline = CSR_READ_4(sc, TI_PCI_BIST) & 0xFF; /* * If the system has set enabled the PCI memory write * and invalidate command in the command register, set * the write max parameter accordingly. This is necessary * to use MWI with the Tigon 2. */ if (CSR_READ_4(sc, TI_PCI_CMDSTAT) & PCIM_CMD_MWIEN) { switch (cacheline) { case 1: case 4: case 8: case 16: case 32: case 64: break; default: /* Disable PCI memory write and invalidate. */ if (bootverbose) printf("ti%d: cache line size %d not " "supported; disabling PCI MWI\n", sc->ti_unit, cacheline); CSR_WRITE_4(sc, TI_PCI_CMDSTAT, CSR_READ_4(sc, TI_PCI_CMDSTAT) & ~PCIM_CMD_MWIEN); break; } } #ifdef __brokenalpha__ /* * From the Alteon sample driver: * Must insure that we do not cross an 8K (bytes) boundary * for DMA reads. Our highest limit is 1K bytes. This is a * restriction on some ALPHA platforms with early revision * 21174 PCI chipsets, such as the AlphaPC 164lx */ TI_SETBIT(sc, TI_PCI_STATE, pci_writemax|TI_PCI_READMAX_1024); #else TI_SETBIT(sc, TI_PCI_STATE, pci_writemax); #endif /* This sets the min dma param all the way up (0xff). */ TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_MINDMA); if (sc->ti_hdrsplit) hdrsplit = TI_OPMODE_JUMBO_HDRSPLIT; else hdrsplit = 0; /* Configure DMA variables. */ #if BYTE_ORDER == BIG_ENDIAN CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_BD | TI_OPMODE_BYTESWAP_DATA | TI_OPMODE_WORDSWAP_BD | TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB | TI_OPMODE_DONT_FRAG_JUMBO | hdrsplit); #else /* BYTE_ORDER */ CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_DATA| TI_OPMODE_WORDSWAP_BD|TI_OPMODE_DONT_FRAG_JUMBO| TI_OPMODE_WARN_ENB|TI_OPMODE_FATAL_ENB | hdrsplit); #endif /* BYTE_ORDER */ /* * Only allow 1 DMA channel to be active at a time. * I don't think this is a good idea, but without it * the firmware racks up lots of nicDmaReadRingFull * errors. This is not compatible with hardware checksums. */ if (sc->ti_ifp->if_hwassist == 0) TI_SETBIT(sc, TI_GCR_OPMODE, TI_OPMODE_1_DMA_ACTIVE); /* Recommended settings from Tigon manual. */ CSR_WRITE_4(sc, TI_GCR_DMA_WRITECFG, TI_DMA_STATE_THRESH_8W); CSR_WRITE_4(sc, TI_GCR_DMA_READCFG, TI_DMA_STATE_THRESH_8W); if (ti_64bitslot_war(sc)) { printf("ti%d: bios thinks we're in a 64 bit slot, " "but we aren't", sc->ti_unit); return (EINVAL); } return (0); } #define TI_RD_OFF(x) offsetof(struct ti_ring_data, x) /* * Initialize the general information block and firmware, and * start the CPU(s) running. */ static int ti_gibinit(sc) struct ti_softc *sc; { struct ti_rcb *rcb; int i; struct ifnet *ifp; uint32_t rdphys; ifp = sc->ti_ifp; rdphys = sc->ti_rdata_phys; /* Disable interrupts for now. */ CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); /* * Tell the chip where to find the general information block. * While this struct could go into >4GB memory, we allocate it in a * single slab with the other descriptors, and those don't seem to * support being located in a 64-bit region. */ CSR_WRITE_4(sc, TI_GCR_GENINFO_HI, 0); CSR_WRITE_4(sc, TI_GCR_GENINFO_LO, rdphys + TI_RD_OFF(ti_info)); /* Load the firmware into SRAM. */ ti_loadfw(sc); /* Set up the contents of the general info and ring control blocks. */ /* Set up the event ring and producer pointer. */ rcb = &sc->ti_rdata->ti_info.ti_ev_rcb; TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_event_ring); rcb->ti_flags = 0; TI_HOSTADDR(sc->ti_rdata->ti_info.ti_ev_prodidx_ptr) = rdphys + TI_RD_OFF(ti_ev_prodidx_r); sc->ti_ev_prodidx.ti_idx = 0; CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, 0); sc->ti_ev_saved_considx = 0; /* Set up the command ring and producer mailbox. */ rcb = &sc->ti_rdata->ti_info.ti_cmd_rcb; sc->ti_rdata->ti_cmd_ring = (struct ti_cmd_desc *)(sc->ti_vhandle + TI_GCR_CMDRING); TI_HOSTADDR(rcb->ti_hostaddr) = TI_GCR_NIC_ADDR(TI_GCR_CMDRING); rcb->ti_flags = 0; rcb->ti_max_len = 0; for (i = 0; i < TI_CMD_RING_CNT; i++) { CSR_WRITE_4(sc, TI_GCR_CMDRING + (i * 4), 0); } CSR_WRITE_4(sc, TI_GCR_CMDCONS_IDX, 0); CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, 0); sc->ti_cmd_saved_prodidx = 0; /* * Assign the address of the stats refresh buffer. * We re-use the current stats buffer for this to * conserve memory. */ TI_HOSTADDR(sc->ti_rdata->ti_info.ti_refresh_stats_ptr) = rdphys + TI_RD_OFF(ti_info.ti_stats); /* Set up the standard receive ring. */ rcb = &sc->ti_rdata->ti_info.ti_std_rx_rcb; TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_rx_std_ring); rcb->ti_max_len = TI_FRAMELEN; rcb->ti_flags = 0; if (sc->ti_ifp->if_hwassist) rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; /* Set up the jumbo receive ring. */ rcb = &sc->ti_rdata->ti_info.ti_jumbo_rx_rcb; TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_rx_jumbo_ring); #ifdef TI_PRIVATE_JUMBOS rcb->ti_max_len = TI_JUMBO_FRAMELEN; rcb->ti_flags = 0; #else rcb->ti_max_len = PAGE_SIZE; rcb->ti_flags = TI_RCB_FLAG_USE_EXT_RX_BD; #endif if (sc->ti_ifp->if_hwassist) rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; /* * Set up the mini ring. Only activated on the * Tigon 2 but the slot in the config block is * still there on the Tigon 1. */ rcb = &sc->ti_rdata->ti_info.ti_mini_rx_rcb; TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_rx_mini_ring); rcb->ti_max_len = MHLEN - ETHER_ALIGN; if (sc->ti_hwrev == TI_HWREV_TIGON) rcb->ti_flags = TI_RCB_FLAG_RING_DISABLED; else rcb->ti_flags = 0; if (sc->ti_ifp->if_hwassist) rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; /* * Set up the receive return ring. */ rcb = &sc->ti_rdata->ti_info.ti_return_rcb; TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_rx_return_ring); rcb->ti_flags = 0; rcb->ti_max_len = TI_RETURN_RING_CNT; TI_HOSTADDR(sc->ti_rdata->ti_info.ti_return_prodidx_ptr) = rdphys + TI_RD_OFF(ti_return_prodidx_r); /* * Set up the tx ring. Note: for the Tigon 2, we have the option * of putting the transmit ring in the host's address space and * letting the chip DMA it instead of leaving the ring in the NIC's * memory and accessing it through the shared memory region. We * do this for the Tigon 2, but it doesn't work on the Tigon 1, * so we have to revert to the shared memory scheme if we detect * a Tigon 1 chip. */ CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE); if (sc->ti_hwrev == TI_HWREV_TIGON) { sc->ti_rdata->ti_tx_ring_nic = (struct ti_tx_desc *)(sc->ti_vhandle + TI_WINDOW); } bzero((char *)sc->ti_rdata->ti_tx_ring, TI_TX_RING_CNT * sizeof(struct ti_tx_desc)); rcb = &sc->ti_rdata->ti_info.ti_tx_rcb; if (sc->ti_hwrev == TI_HWREV_TIGON) rcb->ti_flags = 0; else rcb->ti_flags = TI_RCB_FLAG_HOST_RING; rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; if (sc->ti_ifp->if_hwassist) rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM | TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; rcb->ti_max_len = TI_TX_RING_CNT; if (sc->ti_hwrev == TI_HWREV_TIGON) TI_HOSTADDR(rcb->ti_hostaddr) = TI_TX_RING_BASE; else TI_HOSTADDR(rcb->ti_hostaddr) = rdphys + TI_RD_OFF(ti_tx_ring); TI_HOSTADDR(sc->ti_rdata->ti_info.ti_tx_considx_ptr) = rdphys + TI_RD_OFF(ti_tx_considx_r); /* Set up tuneables */ #if 0 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, (sc->ti_rx_coal_ticks / 10)); else #endif CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, sc->ti_rx_coal_ticks); CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS, sc->ti_tx_coal_ticks); CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks); CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD, sc->ti_rx_max_coal_bds); CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD, sc->ti_tx_max_coal_bds); CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO, sc->ti_tx_buf_ratio); /* Turn interrupts on. */ CSR_WRITE_4(sc, TI_GCR_MASK_INTRS, 0); CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); /* Start CPU. */ TI_CLRBIT(sc, TI_CPU_STATE, (TI_CPUSTATE_HALT|TI_CPUSTATE_STEP)); return (0); } static void ti_rdata_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct ti_softc *sc; sc = arg; if (error || nseg != 1) return; /* * All of the Tigon data structures need to live at <4GB. This * cast is fine since busdma was told about this constraint. */ sc->ti_rdata_phys = (uint32_t)segs[0].ds_addr; return; } /* * Probe for a Tigon chip. Check the PCI vendor and device IDs * against our list and return its name if we find a match. */ static int ti_probe(dev) device_t dev; { struct ti_type *t; t = ti_devs; while (t->ti_name != NULL) { if ((pci_get_vendor(dev) == t->ti_vid) && (pci_get_device(dev) == t->ti_did)) { device_set_desc(dev, t->ti_name); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } static int ti_attach(dev) device_t dev; { struct ifnet *ifp; struct ti_softc *sc; int unit, error = 0, rid; u_char eaddr[6]; sc = device_get_softc(dev); unit = device_get_unit(dev); sc->ti_unit = unit; mtx_init(&sc->ti_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); ifmedia_init(&sc->ifmedia, IFM_IMASK, ti_ifmedia_upd, ti_ifmedia_sts); ifp = sc->ti_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { printf("ti%d: can not if_alloc()\n", sc->ti_unit); error = ENOSPC; goto fail; } sc->ti_ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; sc->ti_ifp->if_capenable = sc->ti_ifp->if_capabilities; /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = TI_PCI_LOMEM; sc->ti_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE|PCI_RF_DENSE); if (sc->ti_res == NULL) { printf ("ti%d: couldn't map memory\n", unit); error = ENXIO; goto fail; } sc->ti_btag = rman_get_bustag(sc->ti_res); sc->ti_bhandle = rman_get_bushandle(sc->ti_res); sc->ti_vhandle = (vm_offset_t)rman_get_virtual(sc->ti_res); /* Allocate interrupt */ rid = 0; sc->ti_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->ti_irq == NULL) { printf("ti%d: couldn't map interrupt\n", unit); error = ENXIO; goto fail; } if (ti_chipinit(sc)) { printf("ti%d: chip initialization failed\n", sc->ti_unit); error = ENXIO; goto fail; } /* Zero out the NIC's on-board SRAM. */ ti_mem(sc, 0x2000, 0x100000 - 0x2000, NULL); /* Init again -- zeroing memory may have clobbered some registers. */ if (ti_chipinit(sc)) { printf("ti%d: chip initialization failed\n", sc->ti_unit); error = ENXIO; goto fail; } /* * Get station address from the EEPROM. Note: the manual states * that the MAC address is at offset 0x8c, however the data is * stored as two longwords (since that's how it's loaded into * the NIC). This means the MAC address is actually preceded * by two zero bytes. We need to skip over those. */ if (ti_read_eeprom(sc, eaddr, TI_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { printf("ti%d: failed to read station address\n", unit); error = ENXIO; goto fail; } /* Allocate the general information block and ring buffers. */ if (bus_dma_tag_create(NULL, /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->ti_parent_dmat) != 0) { printf("ti%d: Failed to allocate parent dmat\n", sc->ti_unit); error = ENOMEM; goto fail; } if (bus_dma_tag_create(sc->ti_parent_dmat, /* parent */ PAGE_SIZE, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sizeof(struct ti_ring_data), /* maxsize */ 1, /* nsegments */ sizeof(struct ti_ring_data), /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->ti_rdata_dmat) != 0) { printf("ti%d: Failed to allocate rdata dmat\n", sc->ti_unit); error = ENOMEM; goto fail; } if (bus_dmamem_alloc(sc->ti_rdata_dmat, (void**)&sc->ti_rdata, BUS_DMA_NOWAIT, &sc->ti_rdata_dmamap) != 0) { printf("ti%d: Failed to allocate rdata memory\n", sc->ti_unit); error = ENOMEM; goto fail; } if (bus_dmamap_load(sc->ti_rdata_dmat, sc->ti_rdata_dmamap, sc->ti_rdata, sizeof(struct ti_ring_data), ti_rdata_cb, sc, BUS_DMA_NOWAIT) != 0) { printf("ti%d: Failed to load rdata segments\n", sc->ti_unit); error = ENOMEM; goto fail; } bzero(sc->ti_rdata, sizeof(struct ti_ring_data)); /* Try to allocate memory for jumbo buffers. */ #ifdef TI_PRIVATE_JUMBOS if (ti_alloc_jumbo_mem(sc)) { printf("ti%d: jumbo buffer allocation failed\n", sc->ti_unit); error = ENXIO; goto fail; } #endif /* * We really need a better way to tell a 1000baseTX card * from a 1000baseSX one, since in theory there could be * OEMed 1000baseTX cards from lame vendors who aren't * clever enough to change the PCI ID. For the moment * though, the AceNIC is the only copper card available. */ if (pci_get_vendor(dev) == ALT_VENDORID && pci_get_device(dev) == ALT_DEVICEID_ACENIC_COPPER) sc->ti_copper = 1; /* Ok, it's not the only copper card available. */ if (pci_get_vendor(dev) == NG_VENDORID && pci_get_device(dev) == NG_DEVICEID_GA620T) sc->ti_copper = 1; /* Set default tuneable values. */ sc->ti_stat_ticks = 2 * TI_TICKS_PER_SEC; #if 0 sc->ti_rx_coal_ticks = TI_TICKS_PER_SEC / 5000; #endif sc->ti_rx_coal_ticks = 170; sc->ti_tx_coal_ticks = TI_TICKS_PER_SEC / 500; sc->ti_rx_max_coal_bds = 64; #if 0 sc->ti_tx_max_coal_bds = 128; #endif sc->ti_tx_max_coal_bds = 32; sc->ti_tx_buf_ratio = 21; /* Set up ifnet structure */ ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT; tis[unit] = sc; ifp->if_ioctl = ti_ioctl; ifp->if_start = ti_start; ifp->if_watchdog = ti_watchdog; ifp->if_init = ti_init; ifp->if_mtu = ETHERMTU; ifp->if_snd.ifq_maxlen = TI_TX_RING_CNT - 1; /* Set up ifmedia support. */ if (sc->ti_copper) { /* * Copper cards allow manual 10/100 mode selection, * but not manual 1000baseTX mode selection. Why? * Becuase currently there's no way to specify the * master/slave setting through the firmware interface, * so Alteon decided to just bag it and handle it * via autonegotiation. */ ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_T, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL); } else { /* Fiber cards don't support 10/100 modes. */ ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); } ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_AUTO); /* * We're assuming here that card initialization is a sequential * thing. If it isn't, multiple cards probing at the same time * could stomp on the list of softcs here. */ /* Register the device */ sc->dev = make_dev(&ti_cdevsw, sc->ti_unit, UID_ROOT, GID_OPERATOR, 0600, "ti%d", sc->ti_unit); sc->dev->si_drv1 = sc; /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->ti_irq, INTR_TYPE_NET, ti_intr, sc, &sc->ti_intrhand); if (error) { printf("ti%d: couldn't set up irq\n", unit); ether_ifdetach(ifp); if_free(ifp); goto fail; } fail: if (sc && error) ti_detach(dev); return (error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int ti_detach(dev) device_t dev; { struct ti_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); if (sc->dev) destroy_dev(sc->dev); KASSERT(mtx_initialized(&sc->ti_mtx), ("ti mutex not initialized")); TI_LOCK(sc); ifp = sc->ti_ifp; /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { ti_stop(sc); ether_ifdetach(ifp); if_free(ifp); bus_generic_detach(dev); } ifmedia_removeall(&sc->ifmedia); if (sc->ti_rdata) bus_dmamem_free(sc->ti_rdata_dmat, sc->ti_rdata, sc->ti_rdata_dmamap); if (sc->ti_rdata_dmat) bus_dma_tag_destroy(sc->ti_rdata_dmat); if (sc->ti_parent_dmat) bus_dma_tag_destroy(sc->ti_parent_dmat); if (sc->ti_intrhand) bus_teardown_intr(dev, sc->ti_irq, sc->ti_intrhand); if (sc->ti_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ti_irq); if (sc->ti_res) { bus_release_resource(dev, SYS_RES_MEMORY, TI_PCI_LOMEM, sc->ti_res); } #ifdef TI_PRIVATE_JUMBOS if (sc->ti_cdata.ti_jumbo_buf) contigfree(sc->ti_cdata.ti_jumbo_buf, TI_JMEM, M_DEVBUF); #endif if (sc->ti_rdata) contigfree(sc->ti_rdata, sizeof(struct ti_ring_data), M_DEVBUF); TI_UNLOCK(sc); mtx_destroy(&sc->ti_mtx); return (0); } #ifdef TI_JUMBO_HDRSPLIT /* * If hdr_len is 0, that means that header splitting wasn't done on * this packet for some reason. The two most likely reasons are that * the protocol isn't a supported protocol for splitting, or this * packet had a fragment offset that wasn't 0. * * The header length, if it is non-zero, will always be the length of * the headers on the packet, but that length could be longer than the * first mbuf. So we take the minimum of the two as the actual * length. */ static __inline void ti_hdr_split(struct mbuf *top, int hdr_len, int pkt_len, int idx) { int i = 0; int lengths[4] = {0, 0, 0, 0}; struct mbuf *m, *mp; if (hdr_len != 0) top->m_len = min(hdr_len, top->m_len); pkt_len -= top->m_len; lengths[i++] = top->m_len; mp = top; for (m = top->m_next; m && pkt_len; m = m->m_next) { m->m_len = m->m_ext.ext_size = min(m->m_len, pkt_len); pkt_len -= m->m_len; lengths[i++] = m->m_len; mp = m; } #if 0 if (hdr_len != 0) printf("got split packet: "); else printf("got non-split packet: "); printf("%d,%d,%d,%d = %d\n", lengths[0], lengths[1], lengths[2], lengths[3], lengths[0] + lengths[1] + lengths[2] + lengths[3]); #endif if (pkt_len) panic("header splitting didn't"); if (m) { m_freem(m); mp->m_next = NULL; } if (mp->m_next != NULL) panic("ti_hdr_split: last mbuf in chain should be null"); } #endif /* TI_JUMBO_HDRSPLIT */ /* * Frame reception handling. This is called if there's a frame * on the receive return list. * * Note: we have to be able to handle three possibilities here: * 1) the frame is from the mini receive ring (can only happen) * on Tigon 2 boards) * 2) the frame is from the jumbo recieve ring * 3) the frame is from the standard receive ring */ static void ti_rxeof(sc) struct ti_softc *sc; { struct ifnet *ifp; struct ti_cmd_desc cmd; TI_LOCK_ASSERT(sc); ifp = sc->ti_ifp; while (sc->ti_rx_saved_considx != sc->ti_return_prodidx.ti_idx) { struct ti_rx_desc *cur_rx; u_int32_t rxidx; struct mbuf *m = NULL; u_int16_t vlan_tag = 0; int have_tag = 0; cur_rx = &sc->ti_rdata->ti_rx_return_ring[sc->ti_rx_saved_considx]; rxidx = cur_rx->ti_idx; TI_INC(sc->ti_rx_saved_considx, TI_RETURN_RING_CNT); if (cur_rx->ti_flags & TI_BDFLAG_VLAN_TAG) { have_tag = 1; vlan_tag = cur_rx->ti_vlan_tag & 0xfff; } if (cur_rx->ti_flags & TI_BDFLAG_JUMBO_RING) { TI_INC(sc->ti_jumbo, TI_JUMBO_RX_RING_CNT); m = sc->ti_cdata.ti_rx_jumbo_chain[rxidx]; sc->ti_cdata.ti_rx_jumbo_chain[rxidx] = NULL; if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { ifp->if_ierrors++; ti_newbuf_jumbo(sc, sc->ti_jumbo, m); continue; } if (ti_newbuf_jumbo(sc, sc->ti_jumbo, NULL) == ENOBUFS) { ifp->if_ierrors++; ti_newbuf_jumbo(sc, sc->ti_jumbo, m); continue; } #ifdef TI_PRIVATE_JUMBOS m->m_len = cur_rx->ti_len; #else /* TI_PRIVATE_JUMBOS */ #ifdef TI_JUMBO_HDRSPLIT if (sc->ti_hdrsplit) ti_hdr_split(m, TI_HOSTADDR(cur_rx->ti_addr), cur_rx->ti_len, rxidx); else #endif /* TI_JUMBO_HDRSPLIT */ m_adj(m, cur_rx->ti_len - m->m_pkthdr.len); #endif /* TI_PRIVATE_JUMBOS */ } else if (cur_rx->ti_flags & TI_BDFLAG_MINI_RING) { TI_INC(sc->ti_mini, TI_MINI_RX_RING_CNT); m = sc->ti_cdata.ti_rx_mini_chain[rxidx]; sc->ti_cdata.ti_rx_mini_chain[rxidx] = NULL; if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { ifp->if_ierrors++; ti_newbuf_mini(sc, sc->ti_mini, m); continue; } if (ti_newbuf_mini(sc, sc->ti_mini, NULL) == ENOBUFS) { ifp->if_ierrors++; ti_newbuf_mini(sc, sc->ti_mini, m); continue; } m->m_len = cur_rx->ti_len; } else { TI_INC(sc->ti_std, TI_STD_RX_RING_CNT); m = sc->ti_cdata.ti_rx_std_chain[rxidx]; sc->ti_cdata.ti_rx_std_chain[rxidx] = NULL; if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { ifp->if_ierrors++; ti_newbuf_std(sc, sc->ti_std, m); continue; } if (ti_newbuf_std(sc, sc->ti_std, NULL) == ENOBUFS) { ifp->if_ierrors++; ti_newbuf_std(sc, sc->ti_std, m); continue; } m->m_len = cur_rx->ti_len; } m->m_pkthdr.len = cur_rx->ti_len; ifp->if_ipackets++; m->m_pkthdr.rcvif = ifp; if (ifp->if_hwassist) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_DATA_VALID; if ((cur_rx->ti_ip_cksum ^ 0xffff) == 0) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; m->m_pkthdr.csum_data = cur_rx->ti_tcp_udp_cksum; } /* * If we received a packet with a vlan tag, * tag it before passing the packet upward. */ if (have_tag) VLAN_INPUT_TAG(ifp, m, vlan_tag, continue); TI_UNLOCK(sc); (*ifp->if_input)(ifp, m); TI_LOCK(sc); } /* Only necessary on the Tigon 1. */ if (sc->ti_hwrev == TI_HWREV_TIGON) CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, sc->ti_rx_saved_considx); TI_UPDATE_STDPROD(sc, sc->ti_std); TI_UPDATE_MINIPROD(sc, sc->ti_mini); TI_UPDATE_JUMBOPROD(sc, sc->ti_jumbo); } static void ti_txeof(sc) struct ti_softc *sc; { struct ti_tx_desc *cur_tx = NULL; struct ifnet *ifp; ifp = sc->ti_ifp; /* * Go through our tx ring and free mbufs for those * frames that have been sent. */ while (sc->ti_tx_saved_considx != sc->ti_tx_considx.ti_idx) { u_int32_t idx = 0; idx = sc->ti_tx_saved_considx; if (sc->ti_hwrev == TI_HWREV_TIGON) { if (idx > 383) CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE + 6144); else if (idx > 255) CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE + 4096); else if (idx > 127) CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE + 2048); else CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE); cur_tx = &sc->ti_rdata->ti_tx_ring_nic[idx % 128]; } else cur_tx = &sc->ti_rdata->ti_tx_ring[idx]; if (cur_tx->ti_flags & TI_BDFLAG_END) ifp->if_opackets++; if (sc->ti_cdata.ti_tx_chain[idx] != NULL) { m_freem(sc->ti_cdata.ti_tx_chain[idx]); sc->ti_cdata.ti_tx_chain[idx] = NULL; } sc->ti_txcnt--; TI_INC(sc->ti_tx_saved_considx, TI_TX_RING_CNT); ifp->if_timer = 0; } if (cur_tx != NULL) ifp->if_flags &= ~IFF_OACTIVE; } static void ti_intr(xsc) void *xsc; { struct ti_softc *sc; struct ifnet *ifp; sc = xsc; TI_LOCK(sc); ifp = sc->ti_ifp; /*#ifdef notdef*/ /* Avoid this for now -- checking this register is expensive. */ /* Make sure this is really our interrupt. */ if (!(CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_INTSTATE)) { TI_UNLOCK(sc); return; } /*#endif*/ /* Ack interrupt and stop others from occuring. */ CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); if (ifp->if_flags & IFF_RUNNING) { /* Check RX return ring producer/consumer */ ti_rxeof(sc); /* Check TX ring producer/consumer */ ti_txeof(sc); } ti_handle_events(sc); /* Re-enable interrupts. */ CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); if (ifp->if_flags & IFF_RUNNING && ifp->if_snd.ifq_head != NULL) ti_start(ifp); TI_UNLOCK(sc); } static void ti_stats_update(sc) struct ti_softc *sc; { struct ifnet *ifp; ifp = sc->ti_ifp; ifp->if_collisions += (sc->ti_rdata->ti_info.ti_stats.dot3StatsSingleCollisionFrames + sc->ti_rdata->ti_info.ti_stats.dot3StatsMultipleCollisionFrames + sc->ti_rdata->ti_info.ti_stats.dot3StatsExcessiveCollisions + sc->ti_rdata->ti_info.ti_stats.dot3StatsLateCollisions) - ifp->if_collisions; } /* * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data * pointers to descriptors. */ static int ti_encap(sc, m_head, txidx) struct ti_softc *sc; struct mbuf *m_head; u_int32_t *txidx; { struct ti_tx_desc *f = NULL; struct mbuf *m; u_int32_t frag, cur, cnt = 0; u_int16_t csum_flags = 0; struct m_tag *mtag; m = m_head; cur = frag = *txidx; if (m_head->m_pkthdr.csum_flags) { if (m_head->m_pkthdr.csum_flags & CSUM_IP) csum_flags |= TI_BDFLAG_IP_CKSUM; if (m_head->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP)) csum_flags |= TI_BDFLAG_TCP_UDP_CKSUM; if (m_head->m_flags & M_LASTFRAG) csum_flags |= TI_BDFLAG_IP_FRAG_END; else if (m_head->m_flags & M_FRAG) csum_flags |= TI_BDFLAG_IP_FRAG; } mtag = VLAN_OUTPUT_TAG(sc->ti_ifp, m); /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ for (m = m_head; m != NULL; m = m->m_next) { if (m->m_len != 0) { if (sc->ti_hwrev == TI_HWREV_TIGON) { if (frag > 383) CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE + 6144); else if (frag > 255) CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE + 4096); else if (frag > 127) CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE + 2048); else CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE); f = &sc->ti_rdata->ti_tx_ring_nic[frag % 128]; } else f = &sc->ti_rdata->ti_tx_ring[frag]; if (sc->ti_cdata.ti_tx_chain[frag] != NULL) break; TI_HOSTADDR(f->ti_addr) = vtophys(mtod(m, vm_offset_t)); f->ti_len = m->m_len; f->ti_flags = csum_flags; if (mtag != NULL) { f->ti_flags |= TI_BDFLAG_VLAN_TAG; f->ti_vlan_tag = VLAN_TAG_VALUE(mtag) & 0xfff; } else { f->ti_vlan_tag = 0; } /* * Sanity check: avoid coming within 16 descriptors * of the end of the ring. */ if ((TI_TX_RING_CNT - (sc->ti_txcnt + cnt)) < 16) return (ENOBUFS); cur = frag; TI_INC(frag, TI_TX_RING_CNT); cnt++; } } if (m != NULL) return (ENOBUFS); if (frag == sc->ti_tx_saved_considx) return (ENOBUFS); if (sc->ti_hwrev == TI_HWREV_TIGON) sc->ti_rdata->ti_tx_ring_nic[cur % 128].ti_flags |= TI_BDFLAG_END; else sc->ti_rdata->ti_tx_ring[cur].ti_flags |= TI_BDFLAG_END; sc->ti_cdata.ti_tx_chain[cur] = m_head; sc->ti_txcnt += cnt; *txidx = frag; return (0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit descriptors. */ static void ti_start(ifp) struct ifnet *ifp; { struct ti_softc *sc; struct mbuf *m_head = NULL; u_int32_t prodidx = 0; sc = ifp->if_softc; TI_LOCK(sc); prodidx = CSR_READ_4(sc, TI_MB_SENDPROD_IDX); while (sc->ti_cdata.ti_tx_chain[prodidx] == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * XXX * safety overkill. If this is a fragmented packet chain * with delayed TCP/UDP checksums, then only encapsulate * it if we have enough descriptors to handle the entire * chain at once. * (paranoia -- may not actually be needed) */ if (m_head->m_flags & M_FIRSTFRAG && m_head->m_pkthdr.csum_flags & (CSUM_DELAY_DATA)) { if ((TI_TX_RING_CNT - sc->ti_txcnt) < m_head->m_pkthdr.csum_data + 16) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } } /* * Pack the data into the transmit ring. If we * don't have room, set the OACTIVE flag and wait * for the NIC to drain the ring. */ if (ti_encap(sc, m_head, &prodidx)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_flags |= IFF_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); } /* Transmit */ CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, prodidx); /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; TI_UNLOCK(sc); } static void ti_init(xsc) void *xsc; { struct ti_softc *sc = xsc; /* Cancel pending I/O and flush buffers. */ ti_stop(sc); TI_LOCK(sc); /* Init the gen info block, ring control blocks and firmware. */ if (ti_gibinit(sc)) { printf("ti%d: initialization failure\n", sc->ti_unit); TI_UNLOCK(sc); return; } TI_UNLOCK(sc); } static void ti_init2(sc) struct ti_softc *sc; { struct ti_cmd_desc cmd; struct ifnet *ifp; u_int16_t *m; struct ifmedia *ifm; int tmp; ifp = sc->ti_ifp; /* Specify MTU and interface index. */ CSR_WRITE_4(sc, TI_GCR_IFINDEX, sc->ti_unit); CSR_WRITE_4(sc, TI_GCR_IFMTU, ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN); TI_DO_CMD(TI_CMD_UPDATE_GENCOM, 0, 0); /* Load our MAC address. */ m = (u_int16_t *)&IFP2ENADDR(sc->ti_ifp)[0]; CSR_WRITE_4(sc, TI_GCR_PAR0, htons(m[0])); CSR_WRITE_4(sc, TI_GCR_PAR1, (htons(m[1]) << 16) | htons(m[2])); TI_DO_CMD(TI_CMD_SET_MAC_ADDR, 0, 0); /* Enable or disable promiscuous mode as needed. */ if (ifp->if_flags & IFF_PROMISC) { TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_ENB, 0); } else { TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_DIS, 0); } /* Program multicast filter. */ ti_setmulti(sc); /* * If this is a Tigon 1, we should tell the * firmware to use software packet filtering. */ if (sc->ti_hwrev == TI_HWREV_TIGON) { TI_DO_CMD(TI_CMD_FDR_FILTERING, TI_CMD_CODE_FILT_ENB, 0); } /* Init RX ring. */ ti_init_rx_ring_std(sc); /* Init jumbo RX ring. */ if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN)) ti_init_rx_ring_jumbo(sc); /* * If this is a Tigon 2, we can also configure the * mini ring. */ if (sc->ti_hwrev == TI_HWREV_TIGON_II) ti_init_rx_ring_mini(sc); CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 0); sc->ti_rx_saved_considx = 0; /* Init TX ring. */ ti_init_tx_ring(sc); /* Tell firmware we're alive. */ TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_UP, 0); /* Enable host interrupts. */ CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; /* * Make sure to set media properly. We have to do this * here since we have to issue commands in order to set * the link negotiation and we can't issue commands until * the firmware is running. */ ifm = &sc->ifmedia; tmp = ifm->ifm_media; ifm->ifm_media = ifm->ifm_cur->ifm_media; ti_ifmedia_upd(ifp); ifm->ifm_media = tmp; } /* * Set media options. */ static int ti_ifmedia_upd(ifp) struct ifnet *ifp; { struct ti_softc *sc; struct ifmedia *ifm; struct ti_cmd_desc cmd; u_int32_t flowctl; sc = ifp->if_softc; ifm = &sc->ifmedia; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); flowctl = 0; switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: /* * Transmit flow control doesn't work on the Tigon 1. */ flowctl = TI_GLNK_RX_FLOWCTL_Y; /* * Transmit flow control can also cause problems on the * Tigon 2, apparantly with both the copper and fiber * boards. The symptom is that the interface will just * hang. This was reproduced with Alteon 180 switches. */ #if 0 if (sc->ti_hwrev != TI_HWREV_TIGON) flowctl |= TI_GLNK_TX_FLOWCTL_Y; #endif CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB| TI_GLNK_FULL_DUPLEX| flowctl | TI_GLNK_AUTONEGENB|TI_GLNK_ENB); flowctl = TI_LNK_RX_FLOWCTL_Y; #if 0 if (sc->ti_hwrev != TI_HWREV_TIGON) flowctl |= TI_LNK_TX_FLOWCTL_Y; #endif CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_100MB|TI_LNK_10MB| TI_LNK_FULL_DUPLEX|TI_LNK_HALF_DUPLEX| flowctl | TI_LNK_AUTONEGENB|TI_LNK_ENB); TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, TI_CMD_CODE_NEGOTIATE_BOTH, 0); break; case IFM_1000_SX: case IFM_1000_T: flowctl = TI_GLNK_RX_FLOWCTL_Y; #if 0 if (sc->ti_hwrev != TI_HWREV_TIGON) flowctl |= TI_GLNK_TX_FLOWCTL_Y; #endif CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB| flowctl |TI_GLNK_ENB); CSR_WRITE_4(sc, TI_GCR_LINK, 0); if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { TI_SETBIT(sc, TI_GCR_GLINK, TI_GLNK_FULL_DUPLEX); } TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, TI_CMD_CODE_NEGOTIATE_GIGABIT, 0); break; case IFM_100_FX: case IFM_10_FL: case IFM_100_TX: case IFM_10_T: flowctl = TI_LNK_RX_FLOWCTL_Y; #if 0 if (sc->ti_hwrev != TI_HWREV_TIGON) flowctl |= TI_LNK_TX_FLOWCTL_Y; #endif CSR_WRITE_4(sc, TI_GCR_GLINK, 0); CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_ENB|TI_LNK_PREF|flowctl); if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_FX || IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_100MB); } else { TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_10MB); } if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_FULL_DUPLEX); } else { TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_HALF_DUPLEX); } TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, TI_CMD_CODE_NEGOTIATE_10_100, 0); break; } return (0); } /* * Report current media status. */ static void ti_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct ti_softc *sc; u_int32_t media = 0; sc = ifp->if_softc; ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) return; ifmr->ifm_status |= IFM_ACTIVE; if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) { media = CSR_READ_4(sc, TI_GCR_GLINK_STAT); if (sc->ti_copper) ifmr->ifm_active |= IFM_1000_T; else ifmr->ifm_active |= IFM_1000_SX; if (media & TI_GLNK_FULL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } else if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) { media = CSR_READ_4(sc, TI_GCR_LINK_STAT); if (sc->ti_copper) { if (media & TI_LNK_100MB) ifmr->ifm_active |= IFM_100_TX; if (media & TI_LNK_10MB) ifmr->ifm_active |= IFM_10_T; } else { if (media & TI_LNK_100MB) ifmr->ifm_active |= IFM_100_FX; if (media & TI_LNK_10MB) ifmr->ifm_active |= IFM_10_FL; } if (media & TI_LNK_FULL_DUPLEX) ifmr->ifm_active |= IFM_FDX; if (media & TI_LNK_HALF_DUPLEX) ifmr->ifm_active |= IFM_HDX; } } static int ti_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct ti_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int mask, error = 0; struct ti_cmd_desc cmd; TI_LOCK(sc); switch (command) { case SIOCSIFMTU: if (ifr->ifr_mtu > TI_JUMBO_MTU) error = EINVAL; else { ifp->if_mtu = ifr->ifr_mtu; ti_init(sc); } break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { /* * If only the state of the PROMISC flag changed, * then just use the 'set promisc mode' command * instead of reinitializing the entire NIC. Doing * a full re-init means reloading the firmware and * waiting for it to start up, which may take a * second or two. */ if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->ti_if_flags & IFF_PROMISC)) { TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_ENB, 0); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->ti_if_flags & IFF_PROMISC) { TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_DIS, 0); } else ti_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) { ti_stop(sc); } } sc->ti_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: if (ifp->if_flags & IFF_RUNNING) { ti_setmulti(sc); error = 0; } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_HWCSUM) { if (IFCAP_HWCSUM & ifp->if_capenable) ifp->if_capenable &= ~IFCAP_HWCSUM; else ifp->if_capenable |= IFCAP_HWCSUM; if (ifp->if_flags & IFF_RUNNING) ti_init(sc); } error = 0; break; default: error = ether_ioctl(ifp, command, data); break; } TI_UNLOCK(sc); return (error); } static int ti_open(struct cdev *dev, int flags, int fmt, struct thread *td) { struct ti_softc *sc; sc = dev->si_drv1; if (sc == NULL) return (ENODEV); TI_LOCK(sc); sc->ti_flags |= TI_FLAG_DEBUGING; TI_UNLOCK(sc); return (0); } static int ti_close(struct cdev *dev, int flag, int fmt, struct thread *td) { struct ti_softc *sc; sc = dev->si_drv1; if (sc == NULL) return (ENODEV); TI_LOCK(sc); sc->ti_flags &= ~TI_FLAG_DEBUGING; TI_UNLOCK(sc); return (0); } /* * This ioctl routine goes along with the Tigon character device. */ static int ti_ioctl2(struct cdev *dev, u_long cmd, caddr_t addr, int flag, struct thread *td) { int error; struct ti_softc *sc; sc = dev->si_drv1; if (sc == NULL) return (ENODEV); error = 0; switch (cmd) { case TIIOCGETSTATS: { struct ti_stats *outstats; outstats = (struct ti_stats *)addr; bcopy(&sc->ti_rdata->ti_info.ti_stats, outstats, sizeof(struct ti_stats)); break; } case TIIOCGETPARAMS: { struct ti_params *params; params = (struct ti_params *)addr; params->ti_stat_ticks = sc->ti_stat_ticks; params->ti_rx_coal_ticks = sc->ti_rx_coal_ticks; params->ti_tx_coal_ticks = sc->ti_tx_coal_ticks; params->ti_rx_max_coal_bds = sc->ti_rx_max_coal_bds; params->ti_tx_max_coal_bds = sc->ti_tx_max_coal_bds; params->ti_tx_buf_ratio = sc->ti_tx_buf_ratio; params->param_mask = TI_PARAM_ALL; error = 0; break; } case TIIOCSETPARAMS: { struct ti_params *params; params = (struct ti_params *)addr; if (params->param_mask & TI_PARAM_STAT_TICKS) { sc->ti_stat_ticks = params->ti_stat_ticks; CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks); } if (params->param_mask & TI_PARAM_RX_COAL_TICKS) { sc->ti_rx_coal_ticks = params->ti_rx_coal_ticks; CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, sc->ti_rx_coal_ticks); } if (params->param_mask & TI_PARAM_TX_COAL_TICKS) { sc->ti_tx_coal_ticks = params->ti_tx_coal_ticks; CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS, sc->ti_tx_coal_ticks); } if (params->param_mask & TI_PARAM_RX_COAL_BDS) { sc->ti_rx_max_coal_bds = params->ti_rx_max_coal_bds; CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD, sc->ti_rx_max_coal_bds); } if (params->param_mask & TI_PARAM_TX_COAL_BDS) { sc->ti_tx_max_coal_bds = params->ti_tx_max_coal_bds; CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD, sc->ti_tx_max_coal_bds); } if (params->param_mask & TI_PARAM_TX_BUF_RATIO) { sc->ti_tx_buf_ratio = params->ti_tx_buf_ratio; CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO, sc->ti_tx_buf_ratio); } error = 0; break; } case TIIOCSETTRACE: { ti_trace_type trace_type; trace_type = *(ti_trace_type *)addr; /* * Set tracing to whatever the user asked for. Setting * this register to 0 should have the effect of disabling * tracing. */ CSR_WRITE_4(sc, TI_GCR_NIC_TRACING, trace_type); error = 0; break; } case TIIOCGETTRACE: { struct ti_trace_buf *trace_buf; u_int32_t trace_start, cur_trace_ptr, trace_len; trace_buf = (struct ti_trace_buf *)addr; trace_start = CSR_READ_4(sc, TI_GCR_NICTRACE_START); cur_trace_ptr = CSR_READ_4(sc, TI_GCR_NICTRACE_PTR); trace_len = CSR_READ_4(sc, TI_GCR_NICTRACE_LEN); #if 0 printf("ti%d: trace_start = %#x, cur_trace_ptr = %#x, " "trace_len = %d\n", sc->ti_unit, trace_start, cur_trace_ptr, trace_len); printf("ti%d: trace_buf->buf_len = %d\n", sc->ti_unit, trace_buf->buf_len); #endif error = ti_copy_mem(sc, trace_start, min(trace_len, trace_buf->buf_len), (caddr_t)trace_buf->buf, 1, 1); if (error == 0) { trace_buf->fill_len = min(trace_len, trace_buf->buf_len); if (cur_trace_ptr < trace_start) trace_buf->cur_trace_ptr = trace_start - cur_trace_ptr; else trace_buf->cur_trace_ptr = cur_trace_ptr - trace_start; } else trace_buf->fill_len = 0; break; } /* * For debugging, five ioctls are needed: * ALT_ATTACH * ALT_READ_TG_REG * ALT_WRITE_TG_REG * ALT_READ_TG_MEM * ALT_WRITE_TG_MEM */ case ALT_ATTACH: /* * From what I can tell, Alteon's Solaris Tigon driver * only has one character device, so you have to attach * to the Tigon board you're interested in. This seems * like a not-so-good way to do things, since unless you * subsequently specify the unit number of the device * you're interested in in every ioctl, you'll only be * able to debug one board at a time. */ error = 0; break; case ALT_READ_TG_MEM: case ALT_WRITE_TG_MEM: { struct tg_mem *mem_param; u_int32_t sram_end, scratch_end; mem_param = (struct tg_mem *)addr; if (sc->ti_hwrev == TI_HWREV_TIGON) { sram_end = TI_END_SRAM_I; scratch_end = TI_END_SCRATCH_I; } else { sram_end = TI_END_SRAM_II; scratch_end = TI_END_SCRATCH_II; } /* * For now, we'll only handle accessing regular SRAM, * nothing else. */ if ((mem_param->tgAddr >= TI_BEG_SRAM) && ((mem_param->tgAddr + mem_param->len) <= sram_end)) { /* * In this instance, we always copy to/from user * space, so the user space argument is set to 1. */ error = ti_copy_mem(sc, mem_param->tgAddr, mem_param->len, mem_param->userAddr, 1, (cmd == ALT_READ_TG_MEM) ? 1 : 0); } else if ((mem_param->tgAddr >= TI_BEG_SCRATCH) && (mem_param->tgAddr <= scratch_end)) { error = ti_copy_scratch(sc, mem_param->tgAddr, mem_param->len, mem_param->userAddr, 1, (cmd == ALT_READ_TG_MEM) ? 1 : 0, TI_PROCESSOR_A); } else if ((mem_param->tgAddr >= TI_BEG_SCRATCH_B_DEBUG) && (mem_param->tgAddr <= TI_BEG_SCRATCH_B_DEBUG)) { if (sc->ti_hwrev == TI_HWREV_TIGON) { printf("ti%d: invalid memory range for " "Tigon I\n", sc->ti_unit); error = EINVAL; break; } error = ti_copy_scratch(sc, mem_param->tgAddr - TI_SCRATCH_DEBUG_OFF, mem_param->len, mem_param->userAddr, 1, (cmd == ALT_READ_TG_MEM) ? 1 : 0, TI_PROCESSOR_B); } else { printf("ti%d: memory address %#x len %d is out of " "supported range\n", sc->ti_unit, mem_param->tgAddr, mem_param->len); error = EINVAL; } break; } case ALT_READ_TG_REG: case ALT_WRITE_TG_REG: { struct tg_reg *regs; u_int32_t tmpval; regs = (struct tg_reg *)addr; /* * Make sure the address in question isn't out of range. */ if (regs->addr > TI_REG_MAX) { error = EINVAL; break; } if (cmd == ALT_READ_TG_REG) { bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle, regs->addr, &tmpval, 1); regs->data = ntohl(tmpval); #if 0 if ((regs->addr == TI_CPU_STATE) || (regs->addr == TI_CPU_CTL_B)) { printf("ti%d: register %#x = %#x\n", sc->ti_unit, regs->addr, tmpval); } #endif } else { tmpval = htonl(regs->data); bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle, regs->addr, &tmpval, 1); } break; } default: error = ENOTTY; break; } return (error); } static void ti_watchdog(ifp) struct ifnet *ifp; { struct ti_softc *sc; sc = ifp->if_softc; TI_LOCK(sc); /* * When we're debugging, the chip is often stopped for long periods * of time, and that would normally cause the watchdog timer to fire. * Since that impedes debugging, we don't want to do that. */ if (sc->ti_flags & TI_FLAG_DEBUGING) { TI_UNLOCK(sc); return; } printf("ti%d: watchdog timeout -- resetting\n", sc->ti_unit); ti_stop(sc); ti_init(sc); ifp->if_oerrors++; TI_UNLOCK(sc); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void ti_stop(sc) struct ti_softc *sc; { struct ifnet *ifp; struct ti_cmd_desc cmd; TI_LOCK(sc); ifp = sc->ti_ifp; /* Disable host interrupts. */ CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); /* * Tell firmware we're shutting down. */ TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_DOWN, 0); /* Halt and reinitialize. */ ti_chipinit(sc); ti_mem(sc, 0x2000, 0x100000 - 0x2000, NULL); ti_chipinit(sc); /* Free the RX lists. */ ti_free_rx_ring_std(sc); /* Free jumbo RX list. */ ti_free_rx_ring_jumbo(sc); /* Free mini RX list. */ ti_free_rx_ring_mini(sc); /* Free TX buffers. */ ti_free_tx_ring(sc); sc->ti_ev_prodidx.ti_idx = 0; sc->ti_return_prodidx.ti_idx = 0; sc->ti_tx_considx.ti_idx = 0; sc->ti_tx_saved_considx = TI_TXCONS_UNSET; ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); TI_UNLOCK(sc); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void ti_shutdown(dev) device_t dev; { struct ti_softc *sc; sc = device_get_softc(dev); TI_LOCK(sc); ti_chipinit(sc); TI_UNLOCK(sc); } Index: stable/6/sys/pci/if_tl.c =================================================================== --- stable/6/sys/pci/if_tl.c (revision 149421) +++ stable/6/sys/pci/if_tl.c (revision 149422) @@ -1,2354 +1,2356 @@ /*- * Copyright (c) 1997, 1998 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Texas Instruments ThunderLAN driver for FreeBSD 2.2.6 and 3.x. * Supports many Compaq PCI NICs based on the ThunderLAN ethernet controller, * the National Semiconductor DP83840A physical interface and the * Microchip Technology 24Cxx series serial EEPROM. * * Written using the following four documents: * * Texas Instruments ThunderLAN Programmer's Guide (www.ti.com) * National Semiconductor DP83840A data sheet (www.national.com) * Microchip Technology 24C02C data sheet (www.microchip.com) * Micro Linear ML6692 100BaseTX only PHY data sheet (www.microlinear.com) * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * Some notes about the ThunderLAN: * * The ThunderLAN controller is a single chip containing PCI controller * logic, approximately 3K of on-board SRAM, a LAN controller, and media * independent interface (MII) bus. The MII allows the ThunderLAN chip to * control up to 32 different physical interfaces (PHYs). The ThunderLAN * also has a built-in 10baseT PHY, allowing a single ThunderLAN controller * to act as a complete ethernet interface. * * Other PHYs may be attached to the ThunderLAN; the Compaq 10/100 cards * use a National Semiconductor DP83840A PHY that supports 10 or 100Mb/sec * in full or half duplex. Some of the Compaq Deskpro machines use a * Level 1 LXT970 PHY with the same capabilities. Certain Olicom adapters * use a Micro Linear ML6692 100BaseTX only PHY, which can be used in * concert with the ThunderLAN's internal PHY to provide full 10/100 * support. This is cheaper than using a standalone external PHY for both * 10/100 modes and letting the ThunderLAN's internal PHY go to waste. * A serial EEPROM is also attached to the ThunderLAN chip to provide * power-up default register settings and for storing the adapter's * station address. Although not supported by this driver, the ThunderLAN * chip can also be connected to token ring PHYs. * * The ThunderLAN has a set of registers which can be used to issue * commands, acknowledge interrupts, and to manipulate other internal * registers on its DIO bus. The primary registers can be accessed * using either programmed I/O (inb/outb) or via PCI memory mapping, * depending on how the card is configured during the PCI probing * phase. It is even possible to have both PIO and memory mapped * access turned on at the same time. * * Frame reception and transmission with the ThunderLAN chip is done * using frame 'lists.' A list structure looks more or less like this: * * struct tl_frag { * u_int32_t fragment_address; * u_int32_t fragment_size; * }; * struct tl_list { * u_int32_t forward_pointer; * u_int16_t cstat; * u_int16_t frame_size; * struct tl_frag fragments[10]; * }; * * The forward pointer in the list header can be either a 0 or the address * of another list, which allows several lists to be linked together. Each * list contains up to 10 fragment descriptors. This means the chip allows * ethernet frames to be broken up into up to 10 chunks for transfer to * and from the SRAM. Note that the forward pointer and fragment buffer * addresses are physical memory addresses, not virtual. Note also that * a single ethernet frame can not span lists: if the host wants to * transmit a frame and the frame data is split up over more than 10 * buffers, the frame has to collapsed before it can be transmitted. * * To receive frames, the driver sets up a number of lists and populates * the fragment descriptors, then it sends an RX GO command to the chip. * When a frame is received, the chip will DMA it into the memory regions * specified by the fragment descriptors and then trigger an RX 'end of * frame interrupt' when done. The driver may choose to use only one * fragment per list; this may result is slighltly less efficient use * of memory in exchange for improving performance. * * To transmit frames, the driver again sets up lists and fragment * descriptors, only this time the buffers contain frame data that * is to be DMA'ed into the chip instead of out of it. Once the chip * has transfered the data into its on-board SRAM, it will trigger a * TX 'end of frame' interrupt. It will also generate an 'end of channel' * interrupt when it reaches the end of the list. */ /* * Some notes about this driver: * * The ThunderLAN chip provides a couple of different ways to organize * reception, transmission and interrupt handling. The simplest approach * is to use one list each for transmission and reception. In this mode, * the ThunderLAN will generate two interrupts for every received frame * (one RX EOF and one RX EOC) and two for each transmitted frame (one * TX EOF and one TX EOC). This may make the driver simpler but it hurts * performance to have to handle so many interrupts. * * Initially I wanted to create a circular list of receive buffers so * that the ThunderLAN chip would think there was an infinitely long * receive channel and never deliver an RXEOC interrupt. However this * doesn't work correctly under heavy load: while the manual says the * chip will trigger an RXEOF interrupt each time a frame is copied into * memory, you can't count on the chip waiting around for you to acknowledge * the interrupt before it starts trying to DMA the next frame. The result * is that the chip might traverse the entire circular list and then wrap * around before you have a chance to do anything about it. Consequently, * the receive list is terminated (with a 0 in the forward pointer in the * last element). Each time an RXEOF interrupt arrives, the used list * is shifted to the end of the list. This gives the appearance of an * infinitely large RX chain so long as the driver doesn't fall behind * the chip and allow all of the lists to be filled up. * * If all the lists are filled, the adapter will deliver an RX 'end of * channel' interrupt when it hits the 0 forward pointer at the end of * the chain. The RXEOC handler then cleans out the RX chain and resets * the list head pointer in the ch_parm register and restarts the receiver. * * For frame transmission, it is possible to program the ThunderLAN's * transmit interrupt threshold so that the chip can acknowledge multiple * lists with only a single TX EOF interrupt. This allows the driver to * queue several frames in one shot, and only have to handle a total * two interrupts (one TX EOF and one TX EOC) no matter how many frames * are transmitted. Frame transmission is done directly out of the * mbufs passed to the tl_start() routine via the interface send queue. * The driver simply sets up the fragment descriptors in the transmit * lists to point to the mbuf data regions and sends a TX GO command. * * Note that since the RX and TX lists themselves are always used * only by the driver, the are malloc()ed once at driver initialization * time and never free()ed. * * Also, in order to remain as platform independent as possible, this * driver uses memory mapped register access to manipulate the card * as opposed to programmed I/O. This avoids the use of the inb/outb * (and related) instructions which are specific to the i386 platform. * * Using these techniques, this driver achieves very high performance * by minimizing the amount of interrupts generated during large * transfers and by completely avoiding buffer copies. Frame transfer * to and from the ThunderLAN chip is performed entirely by the chip * itself thereby reducing the load on the host CPU. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include /* * Default to using PIO register access mode to pacify certain * laptop docking stations with built-in ThunderLAN chips that * don't seem to handle memory mapped mode properly. */ #define TL_USEIOSPACE #include MODULE_DEPEND(tl, pci, 1, 1, 1); MODULE_DEPEND(tl, ether, 1, 1, 1); MODULE_DEPEND(tl, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Various supported device vendors/types and their names. */ static struct tl_type tl_devs[] = { { TI_VENDORID, TI_DEVICEID_THUNDERLAN, "Texas Instruments ThunderLAN" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10, "Compaq Netelligent 10" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100, "Compaq Netelligent 10/100" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_PROLIANT, "Compaq Netelligent 10/100 Proliant" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_DUAL, "Compaq Netelligent 10/100 Dual Port" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_INTEGRATED, "Compaq NetFlex-3/P Integrated" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P, "Compaq NetFlex-3/P" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETFLEX_3P_BNC, "Compaq NetFlex 3/P w/ BNC" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_EMBEDDED, "Compaq Netelligent 10/100 TX Embedded UTP" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_T2_UTP_COAX, "Compaq Netelligent 10 T/2 PCI UTP/Coax" }, { COMPAQ_VENDORID, COMPAQ_DEVICEID_NETEL_10_100_TX_UTP, "Compaq Netelligent 10/100 TX UTP" }, { OLICOM_VENDORID, OLICOM_DEVICEID_OC2183, "Olicom OC-2183/2185" }, { OLICOM_VENDORID, OLICOM_DEVICEID_OC2325, "Olicom OC-2325" }, { OLICOM_VENDORID, OLICOM_DEVICEID_OC2326, "Olicom OC-2326 10/100 TX UTP" }, { 0, 0, NULL } }; static int tl_probe(device_t); static int tl_attach(device_t); static int tl_detach(device_t); static int tl_intvec_rxeoc(void *, u_int32_t); static int tl_intvec_txeoc(void *, u_int32_t); static int tl_intvec_txeof(void *, u_int32_t); static int tl_intvec_rxeof(void *, u_int32_t); static int tl_intvec_adchk(void *, u_int32_t); static int tl_intvec_netsts(void *, u_int32_t); static int tl_newbuf(struct tl_softc *, struct tl_chain_onefrag *); static void tl_stats_update(void *); static int tl_encap(struct tl_softc *, struct tl_chain *, struct mbuf *); static void tl_intr(void *); static void tl_start(struct ifnet *); static int tl_ioctl(struct ifnet *, u_long, caddr_t); static void tl_init(void *); static void tl_stop(struct tl_softc *); static void tl_watchdog(struct ifnet *); static void tl_shutdown(device_t); static int tl_ifmedia_upd(struct ifnet *); static void tl_ifmedia_sts(struct ifnet *, struct ifmediareq *); static u_int8_t tl_eeprom_putbyte(struct tl_softc *, int); static u_int8_t tl_eeprom_getbyte(struct tl_softc *, int, u_int8_t *); static int tl_read_eeprom(struct tl_softc *, caddr_t, int, int); static void tl_mii_sync(struct tl_softc *); static void tl_mii_send(struct tl_softc *, u_int32_t, int); static int tl_mii_readreg(struct tl_softc *, struct tl_mii_frame *); static int tl_mii_writereg(struct tl_softc *, struct tl_mii_frame *); static int tl_miibus_readreg(device_t, int, int); static int tl_miibus_writereg(device_t, int, int, int); static void tl_miibus_statchg(device_t); static void tl_setmode(struct tl_softc *, int); static uint32_t tl_mchash(const uint8_t *); static void tl_setmulti(struct tl_softc *); static void tl_setfilt(struct tl_softc *, caddr_t, int); static void tl_softreset(struct tl_softc *, int); static void tl_hardreset(device_t); static int tl_list_rx_init(struct tl_softc *); static int tl_list_tx_init(struct tl_softc *); static u_int8_t tl_dio_read8(struct tl_softc *, int); static u_int16_t tl_dio_read16(struct tl_softc *, int); static u_int32_t tl_dio_read32(struct tl_softc *, int); static void tl_dio_write8(struct tl_softc *, int, int); static void tl_dio_write16(struct tl_softc *, int, int); static void tl_dio_write32(struct tl_softc *, int, int); static void tl_dio_setbit(struct tl_softc *, int, int); static void tl_dio_clrbit(struct tl_softc *, int, int); static void tl_dio_setbit16(struct tl_softc *, int, int); static void tl_dio_clrbit16(struct tl_softc *, int, int); #ifdef TL_USEIOSPACE #define TL_RES SYS_RES_IOPORT #define TL_RID TL_PCI_LOIO #else #define TL_RES SYS_RES_MEMORY #define TL_RID TL_PCI_LOMEM #endif static device_method_t tl_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tl_probe), DEVMETHOD(device_attach, tl_attach), DEVMETHOD(device_detach, tl_detach), DEVMETHOD(device_shutdown, tl_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, tl_miibus_readreg), DEVMETHOD(miibus_writereg, tl_miibus_writereg), DEVMETHOD(miibus_statchg, tl_miibus_statchg), { 0, 0 } }; static driver_t tl_driver = { "tl", tl_methods, sizeof(struct tl_softc) }; static devclass_t tl_devclass; DRIVER_MODULE(tl, pci, tl_driver, tl_devclass, 0, 0); DRIVER_MODULE(miibus, tl, miibus_driver, miibus_devclass, 0, 0); static u_int8_t tl_dio_read8(sc, reg) struct tl_softc *sc; int reg; { CSR_WRITE_2(sc, TL_DIO_ADDR, reg); return(CSR_READ_1(sc, TL_DIO_DATA + (reg & 3))); } static u_int16_t tl_dio_read16(sc, reg) struct tl_softc *sc; int reg; { CSR_WRITE_2(sc, TL_DIO_ADDR, reg); return(CSR_READ_2(sc, TL_DIO_DATA + (reg & 3))); } static u_int32_t tl_dio_read32(sc, reg) struct tl_softc *sc; int reg; { CSR_WRITE_2(sc, TL_DIO_ADDR, reg); return(CSR_READ_4(sc, TL_DIO_DATA + (reg & 3))); } static void tl_dio_write8(sc, reg, val) struct tl_softc *sc; int reg; int val; { CSR_WRITE_2(sc, TL_DIO_ADDR, reg); CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), val); return; } static void tl_dio_write16(sc, reg, val) struct tl_softc *sc; int reg; int val; { CSR_WRITE_2(sc, TL_DIO_ADDR, reg); CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), val); return; } static void tl_dio_write32(sc, reg, val) struct tl_softc *sc; int reg; int val; { CSR_WRITE_2(sc, TL_DIO_ADDR, reg); CSR_WRITE_4(sc, TL_DIO_DATA + (reg & 3), val); return; } static void tl_dio_setbit(sc, reg, bit) struct tl_softc *sc; int reg; int bit; { u_int8_t f; CSR_WRITE_2(sc, TL_DIO_ADDR, reg); f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)); f |= bit; CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f); return; } static void tl_dio_clrbit(sc, reg, bit) struct tl_softc *sc; int reg; int bit; { u_int8_t f; CSR_WRITE_2(sc, TL_DIO_ADDR, reg); f = CSR_READ_1(sc, TL_DIO_DATA + (reg & 3)); f &= ~bit; CSR_WRITE_1(sc, TL_DIO_DATA + (reg & 3), f); return; } static void tl_dio_setbit16(sc, reg, bit) struct tl_softc *sc; int reg; int bit; { u_int16_t f; CSR_WRITE_2(sc, TL_DIO_ADDR, reg); f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)); f |= bit; CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f); return; } static void tl_dio_clrbit16(sc, reg, bit) struct tl_softc *sc; int reg; int bit; { u_int16_t f; CSR_WRITE_2(sc, TL_DIO_ADDR, reg); f = CSR_READ_2(sc, TL_DIO_DATA + (reg & 3)); f &= ~bit; CSR_WRITE_2(sc, TL_DIO_DATA + (reg & 3), f); return; } /* * Send an instruction or address to the EEPROM, check for ACK. */ static u_int8_t tl_eeprom_putbyte(sc, byte) struct tl_softc *sc; int byte; { register int i, ack = 0; /* * Make sure we're in TX mode. */ tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ETXEN); /* * Feed in each bit and stobe the clock. */ for (i = 0x80; i; i >>= 1) { if (byte & i) { tl_dio_setbit(sc, TL_NETSIO, TL_SIO_EDATA); } else { tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_EDATA); } DELAY(1); tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); DELAY(1); tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); } /* * Turn off TX mode. */ tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN); /* * Check for ack. */ tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA; tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); return(ack); } /* * Read a byte of data stored in the EEPROM at address 'addr.' */ static u_int8_t tl_eeprom_getbyte(sc, addr, dest) struct tl_softc *sc; int addr; u_int8_t *dest; { register int i; u_int8_t byte = 0; struct ifnet *ifp = sc->tl_ifp; tl_dio_write8(sc, TL_NETSIO, 0); EEPROM_START; /* * Send write control code to EEPROM. */ if (tl_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) { if_printf(ifp, "failed to send write command, status: %x\n", tl_dio_read8(sc, TL_NETSIO)); return(1); } /* * Send address of byte we want to read. */ if (tl_eeprom_putbyte(sc, addr)) { if_printf(ifp, "failed to send address, status: %x\n", tl_dio_read8(sc, TL_NETSIO)); return(1); } EEPROM_STOP; EEPROM_START; /* * Send read control code to EEPROM. */ if (tl_eeprom_putbyte(sc, EEPROM_CTL_READ)) { if_printf(ifp, "failed to send write command, status: %x\n", tl_dio_read8(sc, TL_NETSIO)); return(1); } /* * Start reading bits from EEPROM. */ tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ETXEN); for (i = 0x80; i; i >>= 1) { tl_dio_setbit(sc, TL_NETSIO, TL_SIO_ECLOK); DELAY(1); if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_EDATA) byte |= i; tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_ECLOK); DELAY(1); } EEPROM_STOP; /* * No ACK generated for read, so just return byte. */ *dest = byte; return(0); } /* * Read a sequence of bytes from the EEPROM. */ static int tl_read_eeprom(sc, dest, off, cnt) struct tl_softc *sc; caddr_t dest; int off; int cnt; { int err = 0, i; u_int8_t byte = 0; for (i = 0; i < cnt; i++) { err = tl_eeprom_getbyte(sc, off + i, &byte); if (err) break; *(dest + i) = byte; } return(err ? 1 : 0); } static void tl_mii_sync(sc) struct tl_softc *sc; { register int i; tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN); for (i = 0; i < 32; i++) { tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); } return; } static void tl_mii_send(sc, bits, cnt) struct tl_softc *sc; u_int32_t bits; int cnt; { int i; for (i = (0x1 << (cnt - 1)); i; i >>= 1) { tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); if (bits & i) { tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MDATA); } else { tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MDATA); } tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); } } static int tl_mii_readreg(sc, frame) struct tl_softc *sc; struct tl_mii_frame *frame; { int i, ack; int minten = 0; TL_LOCK(sc); tl_mii_sync(sc); /* * Set up frame for RX. */ frame->mii_stdelim = TL_MII_STARTDELIM; frame->mii_opcode = TL_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; /* * Turn off MII interrupt by forcing MINTEN low. */ minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN; if (minten) { tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN); } /* * Turn on data xmit. */ tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN); /* * Send command/address info. */ tl_mii_send(sc, frame->mii_stdelim, 2); tl_mii_send(sc, frame->mii_opcode, 2); tl_mii_send(sc, frame->mii_phyaddr, 5); tl_mii_send(sc, frame->mii_regaddr, 5); /* * Turn off xmit. */ tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN); /* Idle bit */ tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); /* Check for ack */ tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); ack = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA; /* Complete the cycle */ tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHYs in sync. */ if (ack) { for(i = 0; i < 16; i++) { tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); } goto fail; } for (i = 0x8000; i; i >>= 1) { tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); if (!ack) { if (tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MDATA) frame->mii_data |= i; } tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); } fail: tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); /* Reenable interrupts */ if (minten) { tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN); } TL_UNLOCK(sc); if (ack) return(1); return(0); } static int tl_mii_writereg(sc, frame) struct tl_softc *sc; struct tl_mii_frame *frame; { int minten; TL_LOCK(sc); tl_mii_sync(sc); /* * Set up frame for TX. */ frame->mii_stdelim = TL_MII_STARTDELIM; frame->mii_opcode = TL_MII_WRITEOP; frame->mii_turnaround = TL_MII_TURNAROUND; /* * Turn off MII interrupt by forcing MINTEN low. */ minten = tl_dio_read8(sc, TL_NETSIO) & TL_SIO_MINTEN; if (minten) { tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MINTEN); } /* * Turn on data output. */ tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MTXEN); tl_mii_send(sc, frame->mii_stdelim, 2); tl_mii_send(sc, frame->mii_opcode, 2); tl_mii_send(sc, frame->mii_phyaddr, 5); tl_mii_send(sc, frame->mii_regaddr, 5); tl_mii_send(sc, frame->mii_turnaround, 2); tl_mii_send(sc, frame->mii_data, 16); tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MCLK); tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MCLK); /* * Turn off xmit. */ tl_dio_clrbit(sc, TL_NETSIO, TL_SIO_MTXEN); /* Reenable interrupts */ if (minten) tl_dio_setbit(sc, TL_NETSIO, TL_SIO_MINTEN); TL_UNLOCK(sc); return(0); } static int tl_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct tl_softc *sc; struct tl_mii_frame frame; sc = device_get_softc(dev); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; tl_mii_readreg(sc, &frame); return(frame.mii_data); } static int tl_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct tl_softc *sc; struct tl_mii_frame frame; sc = device_get_softc(dev); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = data; tl_mii_writereg(sc, &frame); return(0); } static void tl_miibus_statchg(dev) device_t dev; { struct tl_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); TL_LOCK(sc); mii = device_get_softc(sc->tl_miibus); if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX); } else { tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX); } TL_UNLOCK(sc); return; } /* * Set modes for bitrate devices. */ static void tl_setmode(sc, media) struct tl_softc *sc; int media; { if (IFM_SUBTYPE(media) == IFM_10_5) tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD1); if (IFM_SUBTYPE(media) == IFM_10_T) { tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD1); if ((media & IFM_GMASK) == IFM_FDX) { tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_MTXD3); tl_dio_setbit(sc, TL_NETCMD, TL_CMD_DUPLEX); } else { tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_MTXD3); tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_DUPLEX); } } return; } /* * Calculate the hash of a MAC address for programming the multicast hash * table. This hash is simply the address split into 6-bit chunks * XOR'd, e.g. * byte: 000000|00 1111|1111 22|222222|333333|33 4444|4444 55|555555 * bit: 765432|10 7654|3210 76|543210|765432|10 7654|3210 76|543210 * Bytes 0-2 and 3-5 are symmetrical, so are folded together. Then * the folded 24-bit value is split into 6-bit portions and XOR'd. */ static uint32_t tl_mchash(addr) const uint8_t *addr; { int t; t = (addr[0] ^ addr[3]) << 16 | (addr[1] ^ addr[4]) << 8 | (addr[2] ^ addr[5]); return ((t >> 18) ^ (t >> 12) ^ (t >> 6) ^ t) & 0x3f; } /* * The ThunderLAN has a perfect MAC address filter in addition to * the multicast hash filter. The perfect filter can be programmed * with up to four MAC addresses. The first one is always used to * hold the station address, which leaves us free to use the other * three for multicast addresses. */ static void tl_setfilt(sc, addr, slot) struct tl_softc *sc; caddr_t addr; int slot; { int i; u_int16_t regaddr; regaddr = TL_AREG0_B5 + (slot * ETHER_ADDR_LEN); for (i = 0; i < ETHER_ADDR_LEN; i++) tl_dio_write8(sc, regaddr + i, *(addr + i)); return; } /* * XXX In FreeBSD 3.0, multicast addresses are managed using a doubly * linked list. This is fine, except addresses are added from the head * end of the list. We want to arrange for 224.0.0.1 (the "all hosts") * group to always be in the perfect filter, but as more groups are added, * the 224.0.0.1 entry (which is always added first) gets pushed down * the list and ends up at the tail. So after 3 or 4 multicast groups * are added, the all-hosts entry gets pushed out of the perfect filter * and into the hash table. * * Because the multicast list is a doubly-linked list as opposed to a * circular queue, we don't have the ability to just grab the tail of * the list and traverse it backwards. Instead, we have to traverse * the list once to find the tail, then traverse it again backwards to * update the multicast filter. */ static void tl_setmulti(sc) struct tl_softc *sc; { struct ifnet *ifp; u_int32_t hashes[2] = { 0, 0 }; int h, i; struct ifmultiaddr *ifma; u_int8_t dummy[] = { 0, 0, 0, 0, 0 ,0 }; ifp = sc->tl_ifp; /* First, zot all the existing filters. */ for (i = 1; i < 4; i++) tl_setfilt(sc, (caddr_t)&dummy, i); tl_dio_write32(sc, TL_HASH1, 0); tl_dio_write32(sc, TL_HASH2, 0); /* Now program new ones. */ if (ifp->if_flags & IFF_ALLMULTI) { hashes[0] = 0xFFFFFFFF; hashes[1] = 0xFFFFFFFF; } else { i = 1; + IF_ADDR_LOCK(ifp); TAILQ_FOREACH_REVERSE(ifma, &ifp->if_multiaddrs, ifmultihead, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; /* * Program the first three multicast groups * into the perfect filter. For all others, * use the hash table. */ if (i < 4) { tl_setfilt(sc, LLADDR((struct sockaddr_dl *)ifma->ifma_addr), i); i++; continue; } h = tl_mchash( LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } + IF_ADDR_UNLOCK(ifp); } tl_dio_write32(sc, TL_HASH1, hashes[0]); tl_dio_write32(sc, TL_HASH2, hashes[1]); return; } /* * This routine is recommended by the ThunderLAN manual to insure that * the internal PHY is powered up correctly. It also recommends a one * second pause at the end to 'wait for the clocks to start' but in my * experience this isn't necessary. */ static void tl_hardreset(dev) device_t dev; { struct tl_softc *sc; int i; u_int16_t flags; sc = device_get_softc(dev); tl_mii_sync(sc); flags = BMCR_LOOP|BMCR_ISO|BMCR_PDOWN; for (i = 0; i < MII_NPHY; i++) tl_miibus_writereg(dev, i, MII_BMCR, flags); tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_ISO); DELAY(50000); tl_miibus_writereg(dev, 31, MII_BMCR, BMCR_LOOP|BMCR_ISO); tl_mii_sync(sc); while(tl_miibus_readreg(dev, 31, MII_BMCR) & BMCR_RESET); DELAY(50000); return; } static void tl_softreset(sc, internal) struct tl_softc *sc; int internal; { u_int32_t cmd, dummy, i; /* Assert the adapter reset bit. */ CMD_SET(sc, TL_CMD_ADRST); /* Turn off interrupts */ CMD_SET(sc, TL_CMD_INTSOFF); /* First, clear the stats registers. */ for (i = 0; i < 5; i++) dummy = tl_dio_read32(sc, TL_TXGOODFRAMES); /* Clear Areg and Hash registers */ for (i = 0; i < 8; i++) tl_dio_write32(sc, TL_AREG0_B5, 0x00000000); /* * Set up Netconfig register. Enable one channel and * one fragment mode. */ tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_ONECHAN|TL_CFG_ONEFRAG); if (internal && !sc->tl_bitrate) { tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN); } else { tl_dio_clrbit16(sc, TL_NETCONFIG, TL_CFG_PHYEN); } /* Handle cards with bitrate devices. */ if (sc->tl_bitrate) tl_dio_setbit16(sc, TL_NETCONFIG, TL_CFG_BITRATE); /* * Load adapter irq pacing timer and tx threshold. * We make the transmit threshold 1 initially but we may * change that later. */ cmd = CSR_READ_4(sc, TL_HOSTCMD); cmd |= TL_CMD_NES; cmd &= ~(TL_CMD_RT|TL_CMD_EOC|TL_CMD_ACK_MASK|TL_CMD_CHSEL_MASK); CMD_PUT(sc, cmd | (TL_CMD_LDTHR | TX_THR)); CMD_PUT(sc, cmd | (TL_CMD_LDTMR | 0x00000003)); /* Unreset the MII */ tl_dio_setbit(sc, TL_NETSIO, TL_SIO_NMRST); /* Take the adapter out of reset */ tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NRESET|TL_CMD_NWRAP); /* Wait for things to settle down a little. */ DELAY(500); return; } /* * Probe for a ThunderLAN chip. Check the PCI vendor and device IDs * against our list and return its name if we find a match. */ static int tl_probe(dev) device_t dev; { struct tl_type *t; t = tl_devs; while(t->tl_name != NULL) { if ((pci_get_vendor(dev) == t->tl_vid) && (pci_get_device(dev) == t->tl_did)) { device_set_desc(dev, t->tl_name); return (BUS_PROBE_DEFAULT); } t++; } return(ENXIO); } static int tl_attach(dev) device_t dev; { int i; u_int16_t did, vid; struct tl_type *t; struct ifnet *ifp; struct tl_softc *sc; int unit, error = 0, rid; u_char eaddr[6]; vid = pci_get_vendor(dev); did = pci_get_device(dev); sc = device_get_softc(dev); unit = device_get_unit(dev); t = tl_devs; while(t->tl_name != NULL) { if (vid == t->tl_vid && did == t->tl_did) break; t++; } if (t->tl_name == NULL) { device_printf(dev, "unknown device!?\n"); return (ENXIO); } mtx_init(&sc->tl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); /* * Map control/status registers. */ pci_enable_busmaster(dev); #ifdef TL_USEIOSPACE rid = TL_PCI_LOIO; sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); /* * Some cards have the I/O and memory mapped address registers * reversed. Try both combinations before giving up. */ if (sc->tl_res == NULL) { rid = TL_PCI_LOMEM; sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); } #else rid = TL_PCI_LOMEM; sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->tl_res == NULL) { rid = TL_PCI_LOIO; sc->tl_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); } #endif if (sc->tl_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } sc->tl_btag = rman_get_bustag(sc->tl_res); sc->tl_bhandle = rman_get_bushandle(sc->tl_res); #ifdef notdef /* * The ThunderLAN manual suggests jacking the PCI latency * timer all the way up to its maximum value. I'm not sure * if this is really necessary, but what the manual wants, * the manual gets. */ command = pci_read_config(dev, TL_PCI_LATENCY_TIMER, 4); command |= 0x0000FF00; pci_write_config(dev, TL_PCI_LATENCY_TIMER, command, 4); #endif /* Allocate interrupt */ rid = 0; sc->tl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->tl_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } /* * Now allocate memory for the TX and RX lists. */ sc->tl_ldata = contigmalloc(sizeof(struct tl_list_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->tl_ldata == NULL) { device_printf(dev, "no memory for list buffers!\n"); error = ENXIO; goto fail; } bzero(sc->tl_ldata, sizeof(struct tl_list_data)); sc->tl_dinfo = t; if (t->tl_vid == COMPAQ_VENDORID || t->tl_vid == TI_VENDORID) sc->tl_eeaddr = TL_EEPROM_EADDR; if (t->tl_vid == OLICOM_VENDORID) sc->tl_eeaddr = TL_EEPROM_EADDR_OC; /* Reset the adapter. */ tl_softreset(sc, 1); tl_hardreset(dev); tl_softreset(sc, 1); /* * Get station address from the EEPROM. */ if (tl_read_eeprom(sc, eaddr, sc->tl_eeaddr, ETHER_ADDR_LEN)) { device_printf(dev, "failed to read station address\n"); error = ENXIO; goto fail; } /* * XXX Olicom, in its desire to be different from the * rest of the world, has done strange things with the * encoding of the station address in the EEPROM. First * of all, they store the address at offset 0xF8 rather * than at 0x83 like the ThunderLAN manual suggests. * Second, they store the address in three 16-bit words in * network byte order, as opposed to storing it sequentially * like all the other ThunderLAN cards. In order to get * the station address in a form that matches what the Olicom * diagnostic utility specifies, we have to byte-swap each * word. To make things even more confusing, neither 00:00:28 * nor 00:00:24 appear in the IEEE OUI database. */ if (sc->tl_dinfo->tl_vid == OLICOM_VENDORID) { for (i = 0; i < ETHER_ADDR_LEN; i += 2) { u_int16_t *p; p = (u_int16_t *)&eaddr[i]; *p = ntohs(*p); } } ifp = sc->tl_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT; ifp->if_ioctl = tl_ioctl; ifp->if_start = tl_start; ifp->if_watchdog = tl_watchdog; ifp->if_init = tl_init; ifp->if_mtu = ETHERMTU; ifp->if_snd.ifq_maxlen = TL_TX_LIST_CNT - 1; callout_handle_init(&sc->tl_stat_ch); /* Reset the adapter again. */ tl_softreset(sc, 1); tl_hardreset(dev); tl_softreset(sc, 1); /* * Do MII setup. If no PHYs are found, then this is a * bitrate ThunderLAN chip that only supports 10baseT * and AUI/BNC. */ if (mii_phy_probe(dev, &sc->tl_miibus, tl_ifmedia_upd, tl_ifmedia_sts)) { struct ifmedia *ifm; sc->tl_bitrate = 1; ifmedia_init(&sc->ifmedia, 0, tl_ifmedia_upd, tl_ifmedia_sts); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL); ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_10_T); /* Reset again, this time setting bitrate mode. */ tl_softreset(sc, 1); ifm = &sc->ifmedia; ifm->ifm_media = ifm->ifm_cur->ifm_media; tl_ifmedia_upd(ifp); } /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->tl_irq, INTR_TYPE_NET, tl_intr, sc, &sc->tl_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); ether_ifdetach(ifp); if_free(ifp); goto fail; } fail: if (error) tl_detach(dev); return(error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int tl_detach(dev) device_t dev; { struct tl_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->tl_mtx), ("tl mutex not initialized")); TL_LOCK(sc); ifp = sc->tl_ifp; /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { tl_stop(sc); ether_ifdetach(ifp); if_free(ifp); } if (sc->tl_miibus) device_delete_child(dev, sc->tl_miibus); bus_generic_detach(dev); if (sc->tl_ldata) contigfree(sc->tl_ldata, sizeof(struct tl_list_data), M_DEVBUF); if (sc->tl_bitrate) ifmedia_removeall(&sc->ifmedia); if (sc->tl_intrhand) bus_teardown_intr(dev, sc->tl_irq, sc->tl_intrhand); if (sc->tl_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->tl_irq); if (sc->tl_res) bus_release_resource(dev, TL_RES, TL_RID, sc->tl_res); TL_UNLOCK(sc); mtx_destroy(&sc->tl_mtx); return(0); } /* * Initialize the transmit lists. */ static int tl_list_tx_init(sc) struct tl_softc *sc; { struct tl_chain_data *cd; struct tl_list_data *ld; int i; cd = &sc->tl_cdata; ld = sc->tl_ldata; for (i = 0; i < TL_TX_LIST_CNT; i++) { cd->tl_tx_chain[i].tl_ptr = &ld->tl_tx_list[i]; if (i == (TL_TX_LIST_CNT - 1)) cd->tl_tx_chain[i].tl_next = NULL; else cd->tl_tx_chain[i].tl_next = &cd->tl_tx_chain[i + 1]; } cd->tl_tx_free = &cd->tl_tx_chain[0]; cd->tl_tx_tail = cd->tl_tx_head = NULL; sc->tl_txeoc = 1; return(0); } /* * Initialize the RX lists and allocate mbufs for them. */ static int tl_list_rx_init(sc) struct tl_softc *sc; { struct tl_chain_data *cd; struct tl_list_data *ld; int i; cd = &sc->tl_cdata; ld = sc->tl_ldata; for (i = 0; i < TL_RX_LIST_CNT; i++) { cd->tl_rx_chain[i].tl_ptr = (struct tl_list_onefrag *)&ld->tl_rx_list[i]; if (tl_newbuf(sc, &cd->tl_rx_chain[i]) == ENOBUFS) return(ENOBUFS); if (i == (TL_RX_LIST_CNT - 1)) { cd->tl_rx_chain[i].tl_next = NULL; ld->tl_rx_list[i].tlist_fptr = 0; } else { cd->tl_rx_chain[i].tl_next = &cd->tl_rx_chain[i + 1]; ld->tl_rx_list[i].tlist_fptr = vtophys(&ld->tl_rx_list[i + 1]); } } cd->tl_rx_head = &cd->tl_rx_chain[0]; cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1]; return(0); } static int tl_newbuf(sc, c) struct tl_softc *sc; struct tl_chain_onefrag *c; { struct mbuf *m_new = NULL; MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) return(ENOBUFS); MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); return(ENOBUFS); } #ifdef __alpha__ m_new->m_data += 2; #endif c->tl_mbuf = m_new; c->tl_next = NULL; c->tl_ptr->tlist_frsize = MCLBYTES; c->tl_ptr->tlist_fptr = 0; c->tl_ptr->tl_frag.tlist_dadr = vtophys(mtod(m_new, caddr_t)); c->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES; c->tl_ptr->tlist_cstat = TL_CSTAT_READY; return(0); } /* * Interrupt handler for RX 'end of frame' condition (EOF). This * tells us that a full ethernet frame has been captured and we need * to handle it. * * Reception is done using 'lists' which consist of a header and a * series of 10 data count/data address pairs that point to buffers. * Initially you're supposed to create a list, populate it with pointers * to buffers, then load the physical address of the list into the * ch_parm register. The adapter is then supposed to DMA the received * frame into the buffers for you. * * To make things as fast as possible, we have the chip DMA directly * into mbufs. This saves us from having to do a buffer copy: we can * just hand the mbufs directly to ether_input(). Once the frame has * been sent on its way, the 'list' structure is assigned a new buffer * and moved to the end of the RX chain. As long we we stay ahead of * the chip, it will always think it has an endless receive channel. * * If we happen to fall behind and the chip manages to fill up all of * the buffers, it will generate an end of channel interrupt and wait * for us to empty the chain and restart the receiver. */ static int tl_intvec_rxeof(xsc, type) void *xsc; u_int32_t type; { struct tl_softc *sc; int r = 0, total_len = 0; struct ether_header *eh; struct mbuf *m; struct ifnet *ifp; struct tl_chain_onefrag *cur_rx; sc = xsc; ifp = sc->tl_ifp; TL_LOCK_ASSERT(sc); while(sc->tl_cdata.tl_rx_head != NULL) { cur_rx = sc->tl_cdata.tl_rx_head; if (!(cur_rx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP)) break; r++; sc->tl_cdata.tl_rx_head = cur_rx->tl_next; m = cur_rx->tl_mbuf; total_len = cur_rx->tl_ptr->tlist_frsize; if (tl_newbuf(sc, cur_rx) == ENOBUFS) { ifp->if_ierrors++; cur_rx->tl_ptr->tlist_frsize = MCLBYTES; cur_rx->tl_ptr->tlist_cstat = TL_CSTAT_READY; cur_rx->tl_ptr->tl_frag.tlist_dcnt = MCLBYTES; continue; } sc->tl_cdata.tl_rx_tail->tl_ptr->tlist_fptr = vtophys(cur_rx->tl_ptr); sc->tl_cdata.tl_rx_tail->tl_next = cur_rx; sc->tl_cdata.tl_rx_tail = cur_rx; /* * Note: when the ThunderLAN chip is in 'capture all * frames' mode, it will receive its own transmissions. * We drop don't need to process our own transmissions, * so we drop them here and continue. */ eh = mtod(m, struct ether_header *); /*if (ifp->if_flags & IFF_PROMISC && */ if (!bcmp(eh->ether_shost, IFP2ENADDR(sc->tl_ifp), ETHER_ADDR_LEN)) { m_freem(m); continue; } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; TL_UNLOCK(sc); (*ifp->if_input)(ifp, m); TL_LOCK(sc); } return(r); } /* * The RX-EOC condition hits when the ch_parm address hasn't been * initialized or the adapter reached a list with a forward pointer * of 0 (which indicates the end of the chain). In our case, this means * the card has hit the end of the receive buffer chain and we need to * empty out the buffers and shift the pointer back to the beginning again. */ static int tl_intvec_rxeoc(xsc, type) void *xsc; u_int32_t type; { struct tl_softc *sc; int r; struct tl_chain_data *cd; sc = xsc; cd = &sc->tl_cdata; /* Flush out the receive queue and ack RXEOF interrupts. */ r = tl_intvec_rxeof(xsc, type); CMD_PUT(sc, TL_CMD_ACK | r | (type & ~(0x00100000))); r = 1; cd->tl_rx_head = &cd->tl_rx_chain[0]; cd->tl_rx_tail = &cd->tl_rx_chain[TL_RX_LIST_CNT - 1]; CSR_WRITE_4(sc, TL_CH_PARM, vtophys(sc->tl_cdata.tl_rx_head->tl_ptr)); r |= (TL_CMD_GO|TL_CMD_RT); return(r); } static int tl_intvec_txeof(xsc, type) void *xsc; u_int32_t type; { struct tl_softc *sc; int r = 0; struct tl_chain *cur_tx; sc = xsc; /* * Go through our tx list and free mbufs for those * frames that have been sent. */ while (sc->tl_cdata.tl_tx_head != NULL) { cur_tx = sc->tl_cdata.tl_tx_head; if (!(cur_tx->tl_ptr->tlist_cstat & TL_CSTAT_FRAMECMP)) break; sc->tl_cdata.tl_tx_head = cur_tx->tl_next; r++; m_freem(cur_tx->tl_mbuf); cur_tx->tl_mbuf = NULL; cur_tx->tl_next = sc->tl_cdata.tl_tx_free; sc->tl_cdata.tl_tx_free = cur_tx; if (!cur_tx->tl_ptr->tlist_fptr) break; } return(r); } /* * The transmit end of channel interrupt. The adapter triggers this * interrupt to tell us it hit the end of the current transmit list. * * A note about this: it's possible for a condition to arise where * tl_start() may try to send frames between TXEOF and TXEOC interrupts. * You have to avoid this since the chip expects things to go in a * particular order: transmit, acknowledge TXEOF, acknowledge TXEOC. * When the TXEOF handler is called, it will free all of the transmitted * frames and reset the tx_head pointer to NULL. However, a TXEOC * interrupt should be received and acknowledged before any more frames * are queued for transmission. If tl_statrt() is called after TXEOF * resets the tx_head pointer but _before_ the TXEOC interrupt arrives, * it could attempt to issue a transmit command prematurely. * * To guard against this, tl_start() will only issue transmit commands * if the tl_txeoc flag is set, and only the TXEOC interrupt handler * can set this flag once tl_start() has cleared it. */ static int tl_intvec_txeoc(xsc, type) void *xsc; u_int32_t type; { struct tl_softc *sc; struct ifnet *ifp; u_int32_t cmd; sc = xsc; ifp = sc->tl_ifp; /* Clear the timeout timer. */ ifp->if_timer = 0; if (sc->tl_cdata.tl_tx_head == NULL) { ifp->if_flags &= ~IFF_OACTIVE; sc->tl_cdata.tl_tx_tail = NULL; sc->tl_txeoc = 1; } else { sc->tl_txeoc = 0; /* First we have to ack the EOC interrupt. */ CMD_PUT(sc, TL_CMD_ACK | 0x00000001 | type); /* Then load the address of the next TX list. */ CSR_WRITE_4(sc, TL_CH_PARM, vtophys(sc->tl_cdata.tl_tx_head->tl_ptr)); /* Restart TX channel. */ cmd = CSR_READ_4(sc, TL_HOSTCMD); cmd &= ~TL_CMD_RT; cmd |= TL_CMD_GO|TL_CMD_INTSON; CMD_PUT(sc, cmd); return(0); } return(1); } static int tl_intvec_adchk(xsc, type) void *xsc; u_int32_t type; { struct tl_softc *sc; sc = xsc; if (type) if_printf(sc->tl_ifp, "adapter check: %x\n", (unsigned int)CSR_READ_4(sc, TL_CH_PARM)); tl_softreset(sc, 1); tl_stop(sc); tl_init(sc); CMD_SET(sc, TL_CMD_INTSON); return(0); } static int tl_intvec_netsts(xsc, type) void *xsc; u_int32_t type; { struct tl_softc *sc; u_int16_t netsts; sc = xsc; netsts = tl_dio_read16(sc, TL_NETSTS); tl_dio_write16(sc, TL_NETSTS, netsts); if_printf(sc->tl_ifp, "network status: %x\n", netsts); return(1); } static void tl_intr(xsc) void *xsc; { struct tl_softc *sc; struct ifnet *ifp; int r = 0; u_int32_t type = 0; u_int16_t ints = 0; u_int8_t ivec = 0; sc = xsc; TL_LOCK(sc); /* Disable interrupts */ ints = CSR_READ_2(sc, TL_HOST_INT); CSR_WRITE_2(sc, TL_HOST_INT, ints); type = (ints << 16) & 0xFFFF0000; ivec = (ints & TL_VEC_MASK) >> 5; ints = (ints & TL_INT_MASK) >> 2; ifp = sc->tl_ifp; switch(ints) { case (TL_INTR_INVALID): #ifdef DIAGNOSTIC if_printf(ifp, "got an invalid interrupt!\n"); #endif /* Re-enable interrupts but don't ack this one. */ CMD_PUT(sc, type); r = 0; break; case (TL_INTR_TXEOF): r = tl_intvec_txeof((void *)sc, type); break; case (TL_INTR_TXEOC): r = tl_intvec_txeoc((void *)sc, type); break; case (TL_INTR_STATOFLOW): tl_stats_update(sc); r = 1; break; case (TL_INTR_RXEOF): r = tl_intvec_rxeof((void *)sc, type); break; case (TL_INTR_DUMMY): if_printf(ifp, "got a dummy interrupt\n"); r = 1; break; case (TL_INTR_ADCHK): if (ivec) r = tl_intvec_adchk((void *)sc, type); else r = tl_intvec_netsts((void *)sc, type); break; case (TL_INTR_RXEOC): r = tl_intvec_rxeoc((void *)sc, type); break; default: if_printf(ifp, "bogus interrupt type\n"); break; } /* Re-enable interrupts */ if (r) { CMD_PUT(sc, TL_CMD_ACK | r | type); } if (ifp->if_snd.ifq_head != NULL) tl_start(ifp); TL_UNLOCK(sc); return; } static void tl_stats_update(xsc) void *xsc; { struct tl_softc *sc; struct ifnet *ifp; struct tl_stats tl_stats; struct mii_data *mii; u_int32_t *p; bzero((char *)&tl_stats, sizeof(struct tl_stats)); sc = xsc; TL_LOCK(sc); ifp = sc->tl_ifp; p = (u_int32_t *)&tl_stats; CSR_WRITE_2(sc, TL_DIO_ADDR, TL_TXGOODFRAMES|TL_DIO_ADDR_INC); *p++ = CSR_READ_4(sc, TL_DIO_DATA); *p++ = CSR_READ_4(sc, TL_DIO_DATA); *p++ = CSR_READ_4(sc, TL_DIO_DATA); *p++ = CSR_READ_4(sc, TL_DIO_DATA); *p++ = CSR_READ_4(sc, TL_DIO_DATA); ifp->if_opackets += tl_tx_goodframes(tl_stats); ifp->if_collisions += tl_stats.tl_tx_single_collision + tl_stats.tl_tx_multi_collision; ifp->if_ipackets += tl_rx_goodframes(tl_stats); ifp->if_ierrors += tl_stats.tl_crc_errors + tl_stats.tl_code_errors + tl_rx_overrun(tl_stats); ifp->if_oerrors += tl_tx_underrun(tl_stats); if (tl_tx_underrun(tl_stats)) { u_int8_t tx_thresh; tx_thresh = tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_TXTHRESH; if (tx_thresh != TL_AC_TXTHRESH_WHOLEPKT) { tx_thresh >>= 4; tx_thresh++; if_printf(ifp, "tx underrun -- increasing " "tx threshold to %d bytes\n", (64 * (tx_thresh * 4))); tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH); tl_dio_setbit(sc, TL_ACOMMIT, tx_thresh << 4); } } sc->tl_stat_ch = timeout(tl_stats_update, sc, hz); if (!sc->tl_bitrate) { mii = device_get_softc(sc->tl_miibus); mii_tick(mii); } TL_UNLOCK(sc); return; } /* * Encapsulate an mbuf chain in a list by coupling the mbuf data * pointers to the fragment pointers. */ static int tl_encap(sc, c, m_head) struct tl_softc *sc; struct tl_chain *c; struct mbuf *m_head; { int frag = 0; struct tl_frag *f = NULL; int total_len; struct mbuf *m; struct ifnet *ifp = sc->tl_ifp; /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ m = m_head; total_len = 0; for (m = m_head, frag = 0; m != NULL; m = m->m_next) { if (m->m_len != 0) { if (frag == TL_MAXFRAGS) break; total_len+= m->m_len; c->tl_ptr->tl_frag[frag].tlist_dadr = vtophys(mtod(m, vm_offset_t)); c->tl_ptr->tl_frag[frag].tlist_dcnt = m->m_len; frag++; } } /* * Handle special cases. * Special case #1: we used up all 10 fragments, but * we have more mbufs left in the chain. Copy the * data into an mbuf cluster. Note that we don't * bother clearing the values in the other fragment * pointers/counters; it wouldn't gain us anything, * and would waste cycles. */ if (m != NULL) { struct mbuf *m_new = NULL; MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) { if_printf(ifp, "no memory for tx list\n"); return(1); } if (m_head->m_pkthdr.len > MHLEN) { MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); if_printf(ifp, "no memory for tx list\n"); return(1); } } m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t)); m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; m_freem(m_head); m_head = m_new; f = &c->tl_ptr->tl_frag[0]; f->tlist_dadr = vtophys(mtod(m_new, caddr_t)); f->tlist_dcnt = total_len = m_new->m_len; frag = 1; } /* * Special case #2: the frame is smaller than the minimum * frame size. We have to pad it to make the chip happy. */ if (total_len < TL_MIN_FRAMELEN) { if (frag == TL_MAXFRAGS) if_printf(ifp, "all frags filled but frame still to small!\n"); f = &c->tl_ptr->tl_frag[frag]; f->tlist_dcnt = TL_MIN_FRAMELEN - total_len; f->tlist_dadr = vtophys(&sc->tl_ldata->tl_pad); total_len += f->tlist_dcnt; frag++; } c->tl_mbuf = m_head; c->tl_ptr->tl_frag[frag - 1].tlist_dcnt |= TL_LAST_FRAG; c->tl_ptr->tlist_frsize = total_len; c->tl_ptr->tlist_cstat = TL_CSTAT_READY; c->tl_ptr->tlist_fptr = 0; return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void tl_start(ifp) struct ifnet *ifp; { struct tl_softc *sc; struct mbuf *m_head = NULL; u_int32_t cmd; struct tl_chain *prev = NULL, *cur_tx = NULL, *start_tx; sc = ifp->if_softc; TL_LOCK(sc); /* * Check for an available queue slot. If there are none, * punt. */ if (sc->tl_cdata.tl_tx_free == NULL) { ifp->if_flags |= IFF_OACTIVE; TL_UNLOCK(sc); return; } start_tx = sc->tl_cdata.tl_tx_free; while(sc->tl_cdata.tl_tx_free != NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* Pick a chain member off the free list. */ cur_tx = sc->tl_cdata.tl_tx_free; sc->tl_cdata.tl_tx_free = cur_tx->tl_next; cur_tx->tl_next = NULL; /* Pack the data into the list. */ tl_encap(sc, cur_tx, m_head); /* Chain it together */ if (prev != NULL) { prev->tl_next = cur_tx; prev->tl_ptr->tlist_fptr = vtophys(cur_tx->tl_ptr); } prev = cur_tx; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, cur_tx->tl_mbuf); } /* * If there are no packets queued, bail. */ if (cur_tx == NULL) { TL_UNLOCK(sc); return; } /* * That's all we can stands, we can't stands no more. * If there are no other transfers pending, then issue the * TX GO command to the adapter to start things moving. * Otherwise, just leave the data in the queue and let * the EOF/EOC interrupt handler send. */ if (sc->tl_cdata.tl_tx_head == NULL) { sc->tl_cdata.tl_tx_head = start_tx; sc->tl_cdata.tl_tx_tail = cur_tx; if (sc->tl_txeoc) { sc->tl_txeoc = 0; CSR_WRITE_4(sc, TL_CH_PARM, vtophys(start_tx->tl_ptr)); cmd = CSR_READ_4(sc, TL_HOSTCMD); cmd &= ~TL_CMD_RT; cmd |= TL_CMD_GO|TL_CMD_INTSON; CMD_PUT(sc, cmd); } } else { sc->tl_cdata.tl_tx_tail->tl_next = start_tx; sc->tl_cdata.tl_tx_tail = cur_tx; } /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; TL_UNLOCK(sc); return; } static void tl_init(xsc) void *xsc; { struct tl_softc *sc = xsc; struct ifnet *ifp = sc->tl_ifp; struct mii_data *mii; TL_LOCK(sc); ifp = sc->tl_ifp; /* * Cancel pending I/O. */ tl_stop(sc); /* Initialize TX FIFO threshold */ tl_dio_clrbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH); tl_dio_setbit(sc, TL_ACOMMIT, TL_AC_TXTHRESH_16LONG); /* Set PCI burst size */ tl_dio_write8(sc, TL_BSIZEREG, TL_RXBURST_16LONG|TL_TXBURST_16LONG); /* * Set 'capture all frames' bit for promiscuous mode. */ if (ifp->if_flags & IFF_PROMISC) tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF); else tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF); /* * Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_NOBRX); else tl_dio_setbit(sc, TL_NETCMD, TL_CMD_NOBRX); tl_dio_write16(sc, TL_MAXRX, MCLBYTES); /* Init our MAC address */ tl_setfilt(sc, (caddr_t)&IFP2ENADDR(sc->tl_ifp), 0); /* Init multicast filter, if needed. */ tl_setmulti(sc); /* Init circular RX list. */ if (tl_list_rx_init(sc) == ENOBUFS) { if_printf(ifp, "initialization failed: no memory for rx buffers\n"); tl_stop(sc); TL_UNLOCK(sc); return; } /* Init TX pointers. */ tl_list_tx_init(sc); /* Enable PCI interrupts. */ CMD_SET(sc, TL_CMD_INTSON); /* Load the address of the rx list */ CMD_SET(sc, TL_CMD_RT); CSR_WRITE_4(sc, TL_CH_PARM, vtophys(&sc->tl_ldata->tl_rx_list[0])); if (!sc->tl_bitrate) { if (sc->tl_miibus != NULL) { mii = device_get_softc(sc->tl_miibus); mii_mediachg(mii); } } else { tl_ifmedia_upd(ifp); } /* Send the RX go command */ CMD_SET(sc, TL_CMD_GO|TL_CMD_NES|TL_CMD_RT); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; /* Start the stats update counter */ sc->tl_stat_ch = timeout(tl_stats_update, sc, hz); TL_UNLOCK(sc); return; } /* * Set media options. */ static int tl_ifmedia_upd(ifp) struct ifnet *ifp; { struct tl_softc *sc; struct mii_data *mii = NULL; sc = ifp->if_softc; if (sc->tl_bitrate) tl_setmode(sc, sc->ifmedia.ifm_media); else { mii = device_get_softc(sc->tl_miibus); mii_mediachg(mii); } return(0); } /* * Report current media status. */ static void tl_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct tl_softc *sc; struct mii_data *mii; sc = ifp->if_softc; ifmr->ifm_active = IFM_ETHER; if (sc->tl_bitrate) { if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD1) ifmr->ifm_active = IFM_ETHER|IFM_10_5; else ifmr->ifm_active = IFM_ETHER|IFM_10_T; if (tl_dio_read8(sc, TL_ACOMMIT) & TL_AC_MTXD3) ifmr->ifm_active |= IFM_HDX; else ifmr->ifm_active |= IFM_FDX; return; } else { mii = device_get_softc(sc->tl_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } return; } static int tl_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct tl_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int s, error = 0; s = splimp(); switch(command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->tl_if_flags & IFF_PROMISC)) { tl_dio_setbit(sc, TL_NETCMD, TL_CMD_CAF); tl_setmulti(sc); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->tl_if_flags & IFF_PROMISC) { tl_dio_clrbit(sc, TL_NETCMD, TL_CMD_CAF); tl_setmulti(sc); } else tl_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) { tl_stop(sc); } } sc->tl_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: tl_setmulti(sc); error = 0; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: if (sc->tl_bitrate) error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); else { struct mii_data *mii; mii = device_get_softc(sc->tl_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } break; default: error = ether_ioctl(ifp, command, data); break; } (void)splx(s); return(error); } static void tl_watchdog(ifp) struct ifnet *ifp; { struct tl_softc *sc; sc = ifp->if_softc; if_printf(ifp, "device timeout\n"); ifp->if_oerrors++; tl_softreset(sc, 1); tl_init(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void tl_stop(sc) struct tl_softc *sc; { register int i; struct ifnet *ifp; TL_LOCK(sc); ifp = sc->tl_ifp; /* Stop the stats updater. */ untimeout(tl_stats_update, sc, sc->tl_stat_ch); /* Stop the transmitter */ CMD_CLR(sc, TL_CMD_RT); CMD_SET(sc, TL_CMD_STOP); CSR_WRITE_4(sc, TL_CH_PARM, 0); /* Stop the receiver */ CMD_SET(sc, TL_CMD_RT); CMD_SET(sc, TL_CMD_STOP); CSR_WRITE_4(sc, TL_CH_PARM, 0); /* * Disable host interrupts. */ CMD_SET(sc, TL_CMD_INTSOFF); /* * Clear list pointer. */ CSR_WRITE_4(sc, TL_CH_PARM, 0); /* * Free the RX lists. */ for (i = 0; i < TL_RX_LIST_CNT; i++) { if (sc->tl_cdata.tl_rx_chain[i].tl_mbuf != NULL) { m_freem(sc->tl_cdata.tl_rx_chain[i].tl_mbuf); sc->tl_cdata.tl_rx_chain[i].tl_mbuf = NULL; } } bzero((char *)&sc->tl_ldata->tl_rx_list, sizeof(sc->tl_ldata->tl_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < TL_TX_LIST_CNT; i++) { if (sc->tl_cdata.tl_tx_chain[i].tl_mbuf != NULL) { m_freem(sc->tl_cdata.tl_tx_chain[i].tl_mbuf); sc->tl_cdata.tl_tx_chain[i].tl_mbuf = NULL; } } bzero((char *)&sc->tl_ldata->tl_tx_list, sizeof(sc->tl_ldata->tl_tx_list)); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); TL_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void tl_shutdown(dev) device_t dev; { struct tl_softc *sc; sc = device_get_softc(dev); tl_stop(sc); return; } Index: stable/6/sys/pci/if_vr.c =================================================================== --- stable/6/sys/pci/if_vr.c (revision 149421) +++ stable/6/sys/pci/if_vr.c (revision 149422) @@ -1,1707 +1,1709 @@ /*- * Copyright (c) 1997, 1998 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * VIA Rhine fast ethernet PCI NIC driver * * Supports various network adapters based on the VIA Rhine * and Rhine II PCI controllers, including the D-Link DFE530TX. * Datasheets are available at http://www.via.com.tw. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The VIA Rhine controllers are similar in some respects to the * the DEC tulip chips, except less complicated. The controller * uses an MII bus and an external physical layer interface. The * receiver has a one entry perfect filter and a 64-bit hash table * multicast filter. Transmit and receive descriptors are similar * to the tulip. * * The Rhine has a serious flaw in its transmit DMA mechanism: * transmit buffers must be longword aligned. Unfortunately, * FreeBSD doesn't guarantee that mbufs will be filled in starting * at longword boundaries, so we have to do a buffer copy before * transmission. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include #define VR_USEIOSPACE #include MODULE_DEPEND(vr, pci, 1, 1, 1); MODULE_DEPEND(vr, ether, 1, 1, 1); MODULE_DEPEND(vr, miibus, 1, 1, 1); /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #undef VR_USESWSHIFT /* * Various supported device vendors/types and their names. */ static struct vr_type vr_devs[] = { { VIA_VENDORID, VIA_DEVICEID_RHINE, "VIA VT3043 Rhine I 10/100BaseTX" }, { VIA_VENDORID, VIA_DEVICEID_RHINE_II, "VIA VT86C100A Rhine II 10/100BaseTX" }, { VIA_VENDORID, VIA_DEVICEID_RHINE_II_2, "VIA VT6102 Rhine II 10/100BaseTX" }, { VIA_VENDORID, VIA_DEVICEID_RHINE_III, "VIA VT6105 Rhine III 10/100BaseTX" }, { VIA_VENDORID, VIA_DEVICEID_RHINE_III_M, "VIA VT6105M Rhine III 10/100BaseTX" }, { DELTA_VENDORID, DELTA_DEVICEID_RHINE_II, "Delta Electronics Rhine II 10/100BaseTX" }, { ADDTRON_VENDORID, ADDTRON_DEVICEID_RHINE_II, "Addtron Technology Rhine II 10/100BaseTX" }, { 0, 0, NULL } }; static int vr_probe(device_t); static int vr_attach(device_t); static int vr_detach(device_t); static int vr_newbuf(struct vr_softc *, struct vr_chain_onefrag *, struct mbuf *); static int vr_encap(struct vr_softc *, struct vr_chain *, struct mbuf * ); static void vr_rxeof(struct vr_softc *); static void vr_rxeoc(struct vr_softc *); static void vr_txeof(struct vr_softc *); static void vr_tick(void *); static void vr_intr(void *); static void vr_start(struct ifnet *); static void vr_start_locked(struct ifnet *); static int vr_ioctl(struct ifnet *, u_long, caddr_t); static void vr_init(void *); static void vr_init_locked(struct vr_softc *); static void vr_stop(struct vr_softc *); static void vr_watchdog(struct ifnet *); static void vr_shutdown(device_t); static int vr_ifmedia_upd(struct ifnet *); static void vr_ifmedia_sts(struct ifnet *, struct ifmediareq *); #ifdef VR_USESWSHIFT static void vr_mii_sync(struct vr_softc *); static void vr_mii_send(struct vr_softc *, uint32_t, int); #endif static int vr_mii_readreg(struct vr_softc *, struct vr_mii_frame *); static int vr_mii_writereg(struct vr_softc *, struct vr_mii_frame *); static int vr_miibus_readreg(device_t, uint16_t, uint16_t); static int vr_miibus_writereg(device_t, uint16_t, uint16_t, uint16_t); static void vr_miibus_statchg(device_t); static void vr_setcfg(struct vr_softc *, int); static void vr_setmulti(struct vr_softc *); static void vr_reset(struct vr_softc *); static int vr_list_rx_init(struct vr_softc *); static int vr_list_tx_init(struct vr_softc *); #ifdef VR_USEIOSPACE #define VR_RES SYS_RES_IOPORT #define VR_RID VR_PCI_LOIO #else #define VR_RES SYS_RES_MEMORY #define VR_RID VR_PCI_LOMEM #endif static device_method_t vr_methods[] = { /* Device interface */ DEVMETHOD(device_probe, vr_probe), DEVMETHOD(device_attach, vr_attach), DEVMETHOD(device_detach, vr_detach), DEVMETHOD(device_shutdown, vr_shutdown), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, vr_miibus_readreg), DEVMETHOD(miibus_writereg, vr_miibus_writereg), DEVMETHOD(miibus_statchg, vr_miibus_statchg), { 0, 0 } }; static driver_t vr_driver = { "vr", vr_methods, sizeof(struct vr_softc) }; static devclass_t vr_devclass; DRIVER_MODULE(vr, pci, vr_driver, vr_devclass, 0, 0); DRIVER_MODULE(miibus, vr, miibus_driver, miibus_devclass, 0, 0); #define VR_SETBIT(sc, reg, x) \ CSR_WRITE_1(sc, reg, \ CSR_READ_1(sc, reg) | (x)) #define VR_CLRBIT(sc, reg, x) \ CSR_WRITE_1(sc, reg, \ CSR_READ_1(sc, reg) & ~(x)) #define VR_SETBIT16(sc, reg, x) \ CSR_WRITE_2(sc, reg, \ CSR_READ_2(sc, reg) | (x)) #define VR_CLRBIT16(sc, reg, x) \ CSR_WRITE_2(sc, reg, \ CSR_READ_2(sc, reg) & ~(x)) #define VR_SETBIT32(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) | (x)) #define VR_CLRBIT32(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) & ~(x)) #define SIO_SET(x) \ CSR_WRITE_1(sc, VR_MIICMD, \ CSR_READ_1(sc, VR_MIICMD) | (x)) #define SIO_CLR(x) \ CSR_WRITE_1(sc, VR_MIICMD, \ CSR_READ_1(sc, VR_MIICMD) & ~(x)) #ifdef VR_USESWSHIFT /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void vr_mii_sync(struct vr_softc *sc) { register int i; SIO_SET(VR_MIICMD_DIR|VR_MIICMD_DATAIN); for (i = 0; i < 32; i++) { SIO_SET(VR_MIICMD_CLK); DELAY(1); SIO_CLR(VR_MIICMD_CLK); DELAY(1); } } /* * Clock a series of bits through the MII. */ static void vr_mii_send(struct vr_softc *sc, uint32_t bits, int cnt) { int i; SIO_CLR(VR_MIICMD_CLK); for (i = (0x1 << (cnt - 1)); i; i >>= 1) { if (bits & i) { SIO_SET(VR_MIICMD_DATAIN); } else { SIO_CLR(VR_MIICMD_DATAIN); } DELAY(1); SIO_CLR(VR_MIICMD_CLK); DELAY(1); SIO_SET(VR_MIICMD_CLK); } } #endif /* * Read an PHY register through the MII. */ static int vr_mii_readreg(struct vr_softc *sc, struct vr_mii_frame *frame) #ifdef VR_USESWSHIFT { int i, ack; /* Set up frame for RX. */ frame->mii_stdelim = VR_MII_STARTDELIM; frame->mii_opcode = VR_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; CSR_WRITE_1(sc, VR_MIICMD, 0); VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); /* Turn on data xmit. */ SIO_SET(VR_MIICMD_DIR); vr_mii_sync(sc); /* Send command/address info. */ vr_mii_send(sc, frame->mii_stdelim, 2); vr_mii_send(sc, frame->mii_opcode, 2); vr_mii_send(sc, frame->mii_phyaddr, 5); vr_mii_send(sc, frame->mii_regaddr, 5); /* Idle bit. */ SIO_CLR((VR_MIICMD_CLK|VR_MIICMD_DATAIN)); DELAY(1); SIO_SET(VR_MIICMD_CLK); DELAY(1); /* Turn off xmit. */ SIO_CLR(VR_MIICMD_DIR); /* Check for ack */ SIO_CLR(VR_MIICMD_CLK); DELAY(1); ack = CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT; SIO_SET(VR_MIICMD_CLK); DELAY(1); /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ if (ack) { for(i = 0; i < 16; i++) { SIO_CLR(VR_MIICMD_CLK); DELAY(1); SIO_SET(VR_MIICMD_CLK); DELAY(1); } goto fail; } for (i = 0x8000; i; i >>= 1) { SIO_CLR(VR_MIICMD_CLK); DELAY(1); if (!ack) { if (CSR_READ_4(sc, VR_MIICMD) & VR_MIICMD_DATAOUT) frame->mii_data |= i; DELAY(1); } SIO_SET(VR_MIICMD_CLK); DELAY(1); } fail: SIO_CLR(VR_MIICMD_CLK); DELAY(1); SIO_SET(VR_MIICMD_CLK); DELAY(1); if (ack) return (1); return (0); } #else { int i; /* Set the PHY address. */ CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| frame->mii_phyaddr); /* Set the register address. */ CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_READ_ENB); for (i = 0; i < 10000; i++) { if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_READ_ENB) == 0) break; DELAY(1); } frame->mii_data = CSR_READ_2(sc, VR_MIIDATA); return (0); } #endif /* * Write to a PHY register through the MII. */ static int vr_mii_writereg(struct vr_softc *sc, struct vr_mii_frame *frame) #ifdef VR_USESWSHIFT { CSR_WRITE_1(sc, VR_MIICMD, 0); VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_DIRECTPGM); /* Set up frame for TX. */ frame->mii_stdelim = VR_MII_STARTDELIM; frame->mii_opcode = VR_MII_WRITEOP; frame->mii_turnaround = VR_MII_TURNAROUND; /* Turn on data output. */ SIO_SET(VR_MIICMD_DIR); vr_mii_sync(sc); vr_mii_send(sc, frame->mii_stdelim, 2); vr_mii_send(sc, frame->mii_opcode, 2); vr_mii_send(sc, frame->mii_phyaddr, 5); vr_mii_send(sc, frame->mii_regaddr, 5); vr_mii_send(sc, frame->mii_turnaround, 2); vr_mii_send(sc, frame->mii_data, 16); /* Idle bit. */ SIO_SET(VR_MIICMD_CLK); DELAY(1); SIO_CLR(VR_MIICMD_CLK); DELAY(1); /* Turn off xmit. */ SIO_CLR(VR_MIICMD_DIR); return (0); } #else { int i; /* Set the PHY address. */ CSR_WRITE_1(sc, VR_PHYADDR, (CSR_READ_1(sc, VR_PHYADDR)& 0xe0)| frame->mii_phyaddr); /* Set the register address and data to write. */ CSR_WRITE_1(sc, VR_MIIADDR, frame->mii_regaddr); CSR_WRITE_2(sc, VR_MIIDATA, frame->mii_data); VR_SETBIT(sc, VR_MIICMD, VR_MIICMD_WRITE_ENB); for (i = 0; i < 10000; i++) { if ((CSR_READ_1(sc, VR_MIICMD) & VR_MIICMD_WRITE_ENB) == 0) break; DELAY(1); } return (0); } #endif static int vr_miibus_readreg(device_t dev, uint16_t phy, uint16_t reg) { struct vr_mii_frame frame; struct vr_softc *sc = device_get_softc(dev); switch (sc->vr_revid) { case REV_ID_VT6102_APOLLO: if (phy != 1) { frame.mii_data = 0; goto out; } default: break; } bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; vr_mii_readreg(sc, &frame); out: return (frame.mii_data); } static int vr_miibus_writereg(device_t dev, uint16_t phy, uint16_t reg, uint16_t data) { struct vr_mii_frame frame; struct vr_softc *sc = device_get_softc(dev); switch (sc->vr_revid) { case REV_ID_VT6102_APOLLO: if (phy != 1) return (0); default: break; } bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = data; vr_mii_writereg(sc, &frame); return (0); } static void vr_miibus_statchg(device_t dev) { struct mii_data *mii; struct vr_softc *sc = device_get_softc(dev); mii = device_get_softc(sc->vr_miibus); vr_setcfg(sc, mii->mii_media_active); } /* * Program the 64-bit multicast hash filter. */ static void vr_setmulti(struct vr_softc *sc) { struct ifnet *ifp = sc->vr_ifp; int h = 0; uint32_t hashes[2] = { 0, 0 }; struct ifmultiaddr *ifma; uint8_t rxfilt; int mcnt = 0; VR_LOCK_ASSERT(sc); rxfilt = CSR_READ_1(sc, VR_RXCFG); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { rxfilt |= VR_RXCFG_RX_MULTI; CSR_WRITE_1(sc, VR_RXCFG, rxfilt); CSR_WRITE_4(sc, VR_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, VR_MAR1, 0xFFFFFFFF); return; } /* First, zero out all the existing hash bits. */ CSR_WRITE_4(sc, VR_MAR0, 0); CSR_WRITE_4(sc, VR_MAR1, 0); /* Now program new ones. */ + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); mcnt++; } + IF_ADDR_UNLOCK(ifp); if (mcnt) rxfilt |= VR_RXCFG_RX_MULTI; else rxfilt &= ~VR_RXCFG_RX_MULTI; CSR_WRITE_4(sc, VR_MAR0, hashes[0]); CSR_WRITE_4(sc, VR_MAR1, hashes[1]); CSR_WRITE_1(sc, VR_RXCFG, rxfilt); } /* * In order to fiddle with the * 'full-duplex' and '100Mbps' bits in the netconfig register, we * first have to put the transmit and/or receive logic in the idle state. */ static void vr_setcfg(struct vr_softc *sc, int media) { int restart = 0; VR_LOCK_ASSERT(sc); if (CSR_READ_2(sc, VR_COMMAND) & (VR_CMD_TX_ON|VR_CMD_RX_ON)) { restart = 1; VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_TX_ON|VR_CMD_RX_ON)); } if ((media & IFM_GMASK) == IFM_FDX) VR_SETBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); else VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_FULLDUPLEX); if (restart) VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON|VR_CMD_RX_ON); } static void vr_reset(struct vr_softc *sc) { register int i; /*VR_LOCK_ASSERT(sc);*/ /* XXX: Called during detach w/o lock. */ VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RESET); for (i = 0; i < VR_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RESET)) break; } if (i == VR_TIMEOUT) { if (sc->vr_revid < REV_ID_VT3065_A) printf("vr%d: reset never completed!\n", sc->vr_unit); else { /* Use newer force reset command */ printf("vr%d: Using force reset command.\n", sc->vr_unit); VR_SETBIT(sc, VR_MISC_CR1, VR_MISCCR1_FORSRST); } } /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); } /* * Probe for a VIA Rhine chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int vr_probe(device_t dev) { struct vr_type *t = vr_devs; while (t->vr_name != NULL) { if ((pci_get_vendor(dev) == t->vr_vid) && (pci_get_device(dev) == t->vr_did)) { device_set_desc(dev, t->vr_name); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int vr_attach(dev) device_t dev; { int i; u_char eaddr[ETHER_ADDR_LEN]; struct vr_softc *sc; struct ifnet *ifp; int unit, error = 0, rid; sc = device_get_softc(dev); unit = device_get_unit(dev); mtx_init(&sc->vr_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); /* * Map control/status registers. */ pci_enable_busmaster(dev); sc->vr_revid = pci_read_config(dev, VR_PCI_REVID, 4) & 0x000000FF; rid = VR_RID; sc->vr_res = bus_alloc_resource_any(dev, VR_RES, &rid, RF_ACTIVE); if (sc->vr_res == NULL) { printf("vr%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->vr_btag = rman_get_bustag(sc->vr_res); sc->vr_bhandle = rman_get_bushandle(sc->vr_res); /* Allocate interrupt */ rid = 0; sc->vr_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->vr_irq == NULL) { printf("vr%d: couldn't map interrupt\n", unit); error = ENXIO; goto fail; } /* * Windows may put the chip in suspend mode when it * shuts down. Be sure to kick it in the head to wake it * up again. */ VR_CLRBIT(sc, VR_STICKHW, (VR_STICKHW_DS0|VR_STICKHW_DS1)); /* Reset the adapter. */ vr_reset(sc); /* * Turn on bit2 (MIION) in PCI configuration register 0x53 during * initialization and disable AUTOPOLL. */ pci_write_config(dev, VR_PCI_MODE, pci_read_config(dev, VR_PCI_MODE, 4) | (VR_MODE3_MIION << 24), 4); VR_CLRBIT(sc, VR_MIICMD, VR_MIICMD_AUTOPOLL); /* * Get station address. The way the Rhine chips work, * you're not allowed to directly access the EEPROM once * they've been programmed a special way. Consequently, * we need to read the node address from the PAR0 and PAR1 * registers. */ VR_SETBIT(sc, VR_EECSR, VR_EECSR_LOAD); DELAY(200); for (i = 0; i < ETHER_ADDR_LEN; i++) eaddr[i] = CSR_READ_1(sc, VR_PAR0 + i); sc->vr_unit = unit; sc->vr_ldata = contigmalloc(sizeof(struct vr_list_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->vr_ldata == NULL) { printf("vr%d: no memory for list buffers!\n", unit); error = ENXIO; goto fail; } bzero(sc->vr_ldata, sizeof(struct vr_list_data)); ifp = sc->vr_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { printf("vr%d: can not if_alloc()\n", unit); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = vr_ioctl; ifp->if_start = vr_start; ifp->if_watchdog = vr_watchdog; ifp->if_init = vr_init; ifp->if_baudrate = 10000000; IFQ_SET_MAXLEN(&ifp->if_snd, VR_TX_LIST_CNT - 1); ifp->if_snd.ifq_maxlen = VR_TX_LIST_CNT - 1; IFQ_SET_READY(&ifp->if_snd); #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif ifp->if_capenable = ifp->if_capabilities; /* Do MII setup. */ if (mii_phy_probe(dev, &sc->vr_miibus, vr_ifmedia_upd, vr_ifmedia_sts)) { printf("vr%d: MII without any phy!\n", sc->vr_unit); error = ENXIO; goto fail; } callout_handle_init(&sc->vr_stat_ch); /* Call MI attach routine. */ ether_ifattach(ifp, eaddr); sc->suspended = 0; /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->vr_irq, INTR_TYPE_NET | INTR_MPSAFE, vr_intr, sc, &sc->vr_intrhand); if (error) { printf("vr%d: couldn't set up irq\n", unit); ether_ifdetach(ifp); if_free(ifp); goto fail; } fail: if (error) vr_detach(dev); return (error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int vr_detach(device_t dev) { struct vr_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->vr_ifp; KASSERT(mtx_initialized(&sc->vr_mtx), ("vr mutex not initialized")); VR_LOCK(sc); sc->suspended = 1; /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { vr_stop(sc); VR_UNLOCK(sc); /* XXX: Avoid recursive acquire. */ ether_ifdetach(ifp); if_free(ifp); VR_LOCK(sc); } if (sc->vr_miibus) device_delete_child(dev, sc->vr_miibus); bus_generic_detach(dev); if (sc->vr_intrhand) bus_teardown_intr(dev, sc->vr_irq, sc->vr_intrhand); if (sc->vr_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->vr_irq); if (sc->vr_res) bus_release_resource(dev, VR_RES, VR_RID, sc->vr_res); if (sc->vr_ldata) contigfree(sc->vr_ldata, sizeof(struct vr_list_data), M_DEVBUF); VR_UNLOCK(sc); mtx_destroy(&sc->vr_mtx); return (0); } /* * Initialize the transmit descriptors. */ static int vr_list_tx_init(struct vr_softc *sc) { struct vr_chain_data *cd; struct vr_list_data *ld; int i; cd = &sc->vr_cdata; ld = sc->vr_ldata; for (i = 0; i < VR_TX_LIST_CNT; i++) { cd->vr_tx_chain[i].vr_ptr = &ld->vr_tx_list[i]; if (i == (VR_TX_LIST_CNT - 1)) cd->vr_tx_chain[i].vr_nextdesc = &cd->vr_tx_chain[0]; else cd->vr_tx_chain[i].vr_nextdesc = &cd->vr_tx_chain[i + 1]; } cd->vr_tx_cons = cd->vr_tx_prod = &cd->vr_tx_chain[0]; return (0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int vr_list_rx_init(struct vr_softc *sc) { struct vr_chain_data *cd; struct vr_list_data *ld; int i; VR_LOCK_ASSERT(sc); cd = &sc->vr_cdata; ld = sc->vr_ldata; for (i = 0; i < VR_RX_LIST_CNT; i++) { cd->vr_rx_chain[i].vr_ptr = (struct vr_desc *)&ld->vr_rx_list[i]; if (vr_newbuf(sc, &cd->vr_rx_chain[i], NULL) == ENOBUFS) return (ENOBUFS); if (i == (VR_RX_LIST_CNT - 1)) { cd->vr_rx_chain[i].vr_nextdesc = &cd->vr_rx_chain[0]; ld->vr_rx_list[i].vr_next = vtophys(&ld->vr_rx_list[0]); } else { cd->vr_rx_chain[i].vr_nextdesc = &cd->vr_rx_chain[i + 1]; ld->vr_rx_list[i].vr_next = vtophys(&ld->vr_rx_list[i + 1]); } } cd->vr_rx_head = &cd->vr_rx_chain[0]; return (0); } /* * Initialize an RX descriptor and attach an MBUF cluster. * Note: the length fields are only 11 bits wide, which means the * largest size we can specify is 2047. This is important because * MCLBYTES is 2048, so we have to subtract one otherwise we'll * overflow the field and make a mess. */ static int vr_newbuf(struct vr_softc *sc, struct vr_chain_onefrag *c, struct mbuf *m) { struct mbuf *m_new = NULL; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) return (ENOBUFS); MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); return (ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, sizeof(uint64_t)); c->vr_mbuf = m_new; c->vr_ptr->vr_status = VR_RXSTAT; c->vr_ptr->vr_data = vtophys(mtod(m_new, caddr_t)); c->vr_ptr->vr_ctl = VR_RXCTL | VR_RXLEN; return (0); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static void vr_rxeof(struct vr_softc *sc) { struct mbuf *m, *m0; struct ifnet *ifp; struct vr_chain_onefrag *cur_rx; int total_len = 0; uint32_t rxstat; VR_LOCK_ASSERT(sc); ifp = sc->vr_ifp; while (!((rxstat = sc->vr_cdata.vr_rx_head->vr_ptr->vr_status) & VR_RXSTAT_OWN)) { #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif /* DEVICE_POLLING */ m0 = NULL; cur_rx = sc->vr_cdata.vr_rx_head; sc->vr_cdata.vr_rx_head = cur_rx->vr_nextdesc; m = cur_rx->vr_mbuf; /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (rxstat & VR_RXSTAT_RXERR) { ifp->if_ierrors++; printf("vr%d: rx error (%02x):", sc->vr_unit, rxstat & 0x000000ff); if (rxstat & VR_RXSTAT_CRCERR) printf(" crc error"); if (rxstat & VR_RXSTAT_FRAMEALIGNERR) printf(" frame alignment error\n"); if (rxstat & VR_RXSTAT_FIFOOFLOW) printf(" FIFO overflow"); if (rxstat & VR_RXSTAT_GIANT) printf(" received giant packet"); if (rxstat & VR_RXSTAT_RUNT) printf(" received runt packet"); if (rxstat & VR_RXSTAT_BUSERR) printf(" system bus error"); if (rxstat & VR_RXSTAT_BUFFERR) printf("rx buffer error"); printf("\n"); vr_newbuf(sc, cur_rx, m); continue; } /* No errors; receive the packet. */ total_len = VR_RXBYTES(cur_rx->vr_ptr->vr_status); /* * XXX The VIA Rhine chip includes the CRC with every * received frame, and there's no way to turn this * behavior off (at least, I can't find anything in * the manual that explains how to do it) so we have * to trim off the CRC manually. */ total_len -= ETHER_CRC_LEN; m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp, NULL); vr_newbuf(sc, cur_rx, m); if (m0 == NULL) { ifp->if_ierrors++; continue; } m = m0; ifp->if_ipackets++; VR_UNLOCK(sc); (*ifp->if_input)(ifp, m); VR_LOCK(sc); } } static void vr_rxeoc(struct vr_softc *sc) { struct ifnet *ifp = sc->vr_ifp; int i; VR_LOCK_ASSERT(sc); ifp->if_ierrors++; VR_CLRBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); DELAY(10000); /* Wait for receiver to stop */ for (i = 0x400; i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_RX_ON); i--) { ; } if (!i) { printf("vr%d: rx shutdown error!\n", sc->vr_unit); sc->vr_flags |= VR_F_RESTART; return; } vr_rxeof(sc); CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr)); VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_ON); VR_SETBIT16(sc, VR_COMMAND, VR_CMD_RX_GO); } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void vr_txeof(struct vr_softc *sc) { struct vr_chain *cur_tx; struct ifnet *ifp = sc->vr_ifp; VR_LOCK_ASSERT(sc); /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ cur_tx = sc->vr_cdata.vr_tx_cons; while (cur_tx->vr_mbuf != NULL) { uint32_t txstat; int i; txstat = cur_tx->vr_ptr->vr_status; if ((txstat & VR_TXSTAT_ABRT) || (txstat & VR_TXSTAT_UDF)) { for (i = 0x400; i && (CSR_READ_2(sc, VR_COMMAND) & VR_CMD_TX_ON); i--) ; /* Wait for chip to shutdown */ if (!i) { printf("vr%d: tx shutdown timeout\n", sc->vr_unit); sc->vr_flags |= VR_F_RESTART; break; } VR_TXOWN(cur_tx) = VR_TXSTAT_OWN; CSR_WRITE_4(sc, VR_TXADDR, vtophys(cur_tx->vr_ptr)); break; } if (txstat & VR_TXSTAT_OWN) break; if (txstat & VR_TXSTAT_ERRSUM) { ifp->if_oerrors++; if (txstat & VR_TXSTAT_DEFER) ifp->if_collisions++; if (txstat & VR_TXSTAT_LATECOLL) ifp->if_collisions++; } ifp->if_collisions +=(txstat & VR_TXSTAT_COLLCNT) >> 3; ifp->if_opackets++; m_freem(cur_tx->vr_mbuf); cur_tx->vr_mbuf = NULL; ifp->if_flags &= ~IFF_OACTIVE; cur_tx = cur_tx->vr_nextdesc; } sc->vr_cdata.vr_tx_cons = cur_tx; if (cur_tx->vr_mbuf == NULL) ifp->if_timer = 0; } static void vr_tick(void *xsc) { struct vr_softc *sc = xsc; struct mii_data *mii; VR_LOCK(sc); if (sc->vr_flags & VR_F_RESTART) { printf("vr%d: restarting\n", sc->vr_unit); vr_stop(sc); vr_reset(sc); vr_init_locked(sc); sc->vr_flags &= ~VR_F_RESTART; } mii = device_get_softc(sc->vr_miibus); mii_tick(mii); sc->vr_stat_ch = timeout(vr_tick, sc, hz); VR_UNLOCK(sc); } #ifdef DEVICE_POLLING static poll_handler_t vr_poll; static poll_handler_t vr_poll_locked; static void vr_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct vr_softc *sc = ifp->if_softc; VR_LOCK(sc); vr_poll_locked(ifp, cmd, count); VR_UNLOCK(sc); } static void vr_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct vr_softc *sc = ifp->if_softc; VR_LOCK_ASSERT(sc); if (!(ifp->if_capenable & IFCAP_POLLING)) { ether_poll_deregister(ifp); cmd = POLL_DEREGISTER; } if (cmd == POLL_DEREGISTER) { /* Final call, enable interrupts. */ CSR_WRITE_2(sc, VR_IMR, VR_INTRS); return; } sc->rxcycles = count; vr_rxeof(sc); vr_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) vr_start_locked(ifp); if (cmd == POLL_AND_CHECK_STATUS) { uint16_t status; /* Also check status register. */ status = CSR_READ_2(sc, VR_ISR); if (status) CSR_WRITE_2(sc, VR_ISR, status); if ((status & VR_INTRS) == 0) return; if (status & VR_ISR_RX_DROPPED) { printf("vr%d: rx packet lost\n", sc->vr_unit); ifp->if_ierrors++; } if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW)) { printf("vr%d: receive error (%04x)", sc->vr_unit, status); if (status & VR_ISR_RX_NOBUF) printf(" no buffers"); if (status & VR_ISR_RX_OFLOW) printf(" overflow"); if (status & VR_ISR_RX_DROPPED) printf(" packet lost"); printf("\n"); vr_rxeoc(sc); } if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) { vr_reset(sc); vr_init_locked(sc); return; } if ((status & VR_ISR_UDFI) || (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_TX_ABRT)) { ifp->if_oerrors++; if (sc->vr_cdata.vr_tx_cons->vr_mbuf != NULL) { VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON); VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO); } } } } #endif /* DEVICE_POLLING */ static void vr_intr(void *arg) { struct vr_softc *sc = arg; struct ifnet *ifp = sc->vr_ifp; uint16_t status; VR_LOCK(sc); if (sc->suspended) { /* * Forcibly disable interrupts. * XXX: Mobile VIA based platforms may need * interrupt re-enable on resume. */ CSR_WRITE_2(sc, VR_IMR, 0x0000); goto done_locked; } #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) goto done_locked; if ((ifp->if_capenable & IFCAP_POLLING) && ether_poll_register(vr_poll, ifp)) { /* OK, disable interrupts. */ CSR_WRITE_2(sc, VR_IMR, 0x0000); vr_poll_locked(ifp, 0, 1); goto done_locked; } #endif /* DEVICE_POLLING */ /* Suppress unwanted interrupts. */ if (!(ifp->if_flags & IFF_UP)) { vr_stop(sc); goto done_locked; } /* Disable interrupts. */ CSR_WRITE_2(sc, VR_IMR, 0x0000); for (;;) { status = CSR_READ_2(sc, VR_ISR); if (status) CSR_WRITE_2(sc, VR_ISR, status); if ((status & VR_INTRS) == 0) break; if (status & VR_ISR_RX_OK) vr_rxeof(sc); if (status & VR_ISR_RX_DROPPED) { printf("vr%d: rx packet lost\n", sc->vr_unit); ifp->if_ierrors++; } if ((status & VR_ISR_RX_ERR) || (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_NOBUF) || (status & VR_ISR_RX_OFLOW)) { printf("vr%d: receive error (%04x)", sc->vr_unit, status); if (status & VR_ISR_RX_NOBUF) printf(" no buffers"); if (status & VR_ISR_RX_OFLOW) printf(" overflow"); if (status & VR_ISR_RX_DROPPED) printf(" packet lost"); printf("\n"); vr_rxeoc(sc); } if ((status & VR_ISR_BUSERR) || (status & VR_ISR_TX_UNDERRUN)) { vr_reset(sc); vr_init_locked(sc); break; } if ((status & VR_ISR_TX_OK) || (status & VR_ISR_TX_ABRT) || (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_UDFI)) { vr_txeof(sc); if ((status & VR_ISR_UDFI) || (status & VR_ISR_TX_ABRT2) || (status & VR_ISR_TX_ABRT)) { ifp->if_oerrors++; if (sc->vr_cdata.vr_tx_cons->vr_mbuf != NULL) { VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_ON); VR_SETBIT16(sc, VR_COMMAND, VR_CMD_TX_GO); } } } } /* Re-enable interrupts. */ CSR_WRITE_2(sc, VR_IMR, VR_INTRS); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) vr_start_locked(ifp); done_locked: VR_UNLOCK(sc); } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int vr_encap(struct vr_softc *sc, struct vr_chain *c, struct mbuf *m_head) { struct vr_desc *f = NULL; struct mbuf *m; VR_LOCK_ASSERT(sc); /* * The VIA Rhine wants packet buffers to be longword * aligned, but very often our mbufs aren't. Rather than * waste time trying to decide when to copy and when not * to copy, just do it all the time. */ m = m_defrag(m_head, M_DONTWAIT); if (m == NULL) return (1); /* * The Rhine chip doesn't auto-pad, so we have to make * sure to pad short frames out to the minimum frame length * ourselves. */ if (m->m_len < VR_MIN_FRAMELEN) { m->m_pkthdr.len += VR_MIN_FRAMELEN - m->m_len; m->m_len = m->m_pkthdr.len; } c->vr_mbuf = m; f = c->vr_ptr; f->vr_data = vtophys(mtod(m, caddr_t)); f->vr_ctl = m->m_len; f->vr_ctl |= VR_TXCTL_TLINK|VR_TXCTL_FIRSTFRAG; f->vr_status = 0; f->vr_ctl |= VR_TXCTL_LASTFRAG|VR_TXCTL_FINT; f->vr_next = vtophys(c->vr_nextdesc->vr_ptr); return (0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void vr_start(struct ifnet *ifp) { struct vr_softc *sc = ifp->if_softc; VR_LOCK(sc); vr_start_locked(ifp); VR_UNLOCK(sc); } static void vr_start_locked(struct ifnet *ifp) { struct vr_softc *sc = ifp->if_softc; struct mbuf *m_head; struct vr_chain *cur_tx; if (ifp->if_flags & IFF_OACTIVE) return; cur_tx = sc->vr_cdata.vr_tx_prod; while (cur_tx->vr_mbuf == NULL) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* Pack the data into the descriptor. */ if (vr_encap(sc, cur_tx, m_head)) { /* Rollback, send what we were able to encap. */ IFQ_DRV_PREPEND(&ifp->if_snd, m_head); break; } VR_TXOWN(cur_tx) = VR_TXSTAT_OWN; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, cur_tx->vr_mbuf); cur_tx = cur_tx->vr_nextdesc; } if (cur_tx != sc->vr_cdata.vr_tx_prod || cur_tx->vr_mbuf != NULL) { sc->vr_cdata.vr_tx_prod = cur_tx; /* Tell the chip to start transmitting. */ VR_SETBIT16(sc, VR_COMMAND, /*VR_CMD_TX_ON|*/ VR_CMD_TX_GO); /* Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; if (cur_tx->vr_mbuf != NULL) ifp->if_flags |= IFF_OACTIVE; } } static void vr_init(void *xsc) { struct vr_softc *sc = xsc; VR_LOCK(sc); vr_init_locked(sc); VR_UNLOCK(sc); } static void vr_init_locked(struct vr_softc *sc) { struct ifnet *ifp = sc->vr_ifp; struct mii_data *mii; int i; VR_LOCK_ASSERT(sc); mii = device_get_softc(sc->vr_miibus); /* Cancel pending I/O and free all RX/TX buffers. */ vr_stop(sc); vr_reset(sc); /* Set our station address. */ for (i = 0; i < ETHER_ADDR_LEN; i++) CSR_WRITE_1(sc, VR_PAR0 + i, IFP2ENADDR(sc->vr_ifp)[i]); /* Set DMA size. */ VR_CLRBIT(sc, VR_BCR0, VR_BCR0_DMA_LENGTH); VR_SETBIT(sc, VR_BCR0, VR_BCR0_DMA_STORENFWD); /* * BCR0 and BCR1 can override the RXCFG and TXCFG registers, * so we must set both. */ VR_CLRBIT(sc, VR_BCR0, VR_BCR0_RX_THRESH); VR_SETBIT(sc, VR_BCR0, VR_BCR0_RXTHRESH128BYTES); VR_CLRBIT(sc, VR_BCR1, VR_BCR1_TX_THRESH); VR_SETBIT(sc, VR_BCR1, VR_BCR1_TXTHRESHSTORENFWD); VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_THRESH); VR_SETBIT(sc, VR_RXCFG, VR_RXTHRESH_128BYTES); VR_CLRBIT(sc, VR_TXCFG, VR_TXCFG_TX_THRESH); VR_SETBIT(sc, VR_TXCFG, VR_TXTHRESH_STORENFWD); /* Init circular RX list. */ if (vr_list_rx_init(sc) == ENOBUFS) { printf( "vr%d: initialization failed: no memory for rx buffers\n", sc->vr_unit); vr_stop(sc); return; } /* Init tx descriptors. */ vr_list_tx_init(sc); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); else VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_PROMISC); /* Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) VR_SETBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); else VR_CLRBIT(sc, VR_RXCFG, VR_RXCFG_RX_BROAD); /* * Program the multicast filter, if necessary. */ vr_setmulti(sc); /* * Load the address of the RX list. */ CSR_WRITE_4(sc, VR_RXADDR, vtophys(sc->vr_cdata.vr_rx_head->vr_ptr)); /* Enable receiver and transmitter. */ CSR_WRITE_2(sc, VR_COMMAND, VR_CMD_TX_NOPOLL|VR_CMD_START| VR_CMD_TX_ON|VR_CMD_RX_ON| VR_CMD_RX_GO); CSR_WRITE_4(sc, VR_TXADDR, vtophys(&sc->vr_ldata->vr_tx_list[0])); CSR_WRITE_2(sc, VR_ISR, 0xFFFF); #ifdef DEVICE_POLLING /* * Disable interrupts if we are polling. */ if (ifp->if_flags & IFF_POLLING) CSR_WRITE_2(sc, VR_IMR, 0); else #endif /* DEVICE_POLLING */ /* * Enable interrupts. */ CSR_WRITE_2(sc, VR_IMR, VR_INTRS); mii_mediachg(mii); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->vr_stat_ch = timeout(vr_tick, sc, hz); } /* * Set media options. */ static int vr_ifmedia_upd(struct ifnet *ifp) { struct vr_softc *sc = ifp->if_softc; if (ifp->if_flags & IFF_UP) vr_init(sc); return (0); } /* * Report current media status. */ static void vr_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct vr_softc *sc = ifp->if_softc; struct mii_data *mii; mii = device_get_softc(sc->vr_miibus); VR_LOCK(sc); mii_pollstat(mii); VR_UNLOCK(sc); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } static int vr_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct vr_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error = 0; switch (command) { case SIOCSIFFLAGS: VR_LOCK(sc); if (ifp->if_flags & IFF_UP) { vr_init_locked(sc); } else { if (ifp->if_flags & IFF_RUNNING) vr_stop(sc); } VR_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: VR_LOCK(sc); vr_setmulti(sc); VR_UNLOCK(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->vr_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: ifp->if_capenable = ifr->ifr_reqcap; break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void vr_watchdog(struct ifnet *ifp) { struct vr_softc *sc = ifp->if_softc; VR_LOCK(sc); ifp->if_oerrors++; printf("vr%d: watchdog timeout\n", sc->vr_unit); vr_stop(sc); vr_reset(sc); vr_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) vr_start_locked(ifp); VR_UNLOCK(sc); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void vr_stop(struct vr_softc *sc) { register int i; struct ifnet *ifp; VR_LOCK_ASSERT(sc); ifp = sc->vr_ifp; ifp->if_timer = 0; untimeout(vr_tick, sc, sc->vr_stat_ch); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); #ifdef DEVICE_POLLING ether_poll_deregister(ifp); #endif /* DEVICE_POLLING */ VR_SETBIT16(sc, VR_COMMAND, VR_CMD_STOP); VR_CLRBIT16(sc, VR_COMMAND, (VR_CMD_RX_ON|VR_CMD_TX_ON)); CSR_WRITE_2(sc, VR_IMR, 0x0000); CSR_WRITE_4(sc, VR_TXADDR, 0x00000000); CSR_WRITE_4(sc, VR_RXADDR, 0x00000000); /* * Free data in the RX lists. */ for (i = 0; i < VR_RX_LIST_CNT; i++) { if (sc->vr_cdata.vr_rx_chain[i].vr_mbuf != NULL) { m_freem(sc->vr_cdata.vr_rx_chain[i].vr_mbuf); sc->vr_cdata.vr_rx_chain[i].vr_mbuf = NULL; } } bzero((char *)&sc->vr_ldata->vr_rx_list, sizeof(sc->vr_ldata->vr_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < VR_TX_LIST_CNT; i++) { if (sc->vr_cdata.vr_tx_chain[i].vr_mbuf != NULL) { m_freem(sc->vr_cdata.vr_tx_chain[i].vr_mbuf); sc->vr_cdata.vr_tx_chain[i].vr_mbuf = NULL; } } bzero((char *)&sc->vr_ldata->vr_tx_list, sizeof(sc->vr_ldata->vr_tx_list)); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void vr_shutdown(device_t dev) { vr_detach(dev); } Index: stable/6/sys/pci/if_wb.c =================================================================== --- stable/6/sys/pci/if_wb.c (revision 149421) +++ stable/6/sys/pci/if_wb.c (revision 149422) @@ -1,1835 +1,1837 @@ /*- * Copyright (c) 1997, 1998 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Winbond fast ethernet PCI NIC driver * * Supports various cheap network adapters based on the Winbond W89C840F * fast ethernet controller chip. This includes adapters manufactured by * Winbond itself and some made by Linksys. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The Winbond W89C840F chip is a bus master; in some ways it resembles * a DEC 'tulip' chip, only not as complicated. Unfortunately, it has * one major difference which is that while the registers do many of * the same things as a tulip adapter, the offsets are different: where * tulip registers are typically spaced 8 bytes apart, the Winbond * registers are spaced 4 bytes apart. The receiver filter is also * programmed differently. * * Like the tulip, the Winbond chip uses small descriptors containing * a status word, a control word and 32-bit areas that can either be used * to point to two external data blocks, or to point to a single block * and another descriptor in a linked list. Descriptors can be grouped * together in blocks to form fixed length rings or can be chained * together in linked lists. A single packet may be spread out over * several descriptors if necessary. * * For the receive ring, this driver uses a linked list of descriptors, * each pointing to a single mbuf cluster buffer, which us large enough * to hold an entire packet. The link list is looped back to created a * closed ring. * * For transmission, the driver creates a linked list of 'super descriptors' * which each contain several individual descriptors linked toghether. * Each 'super descriptor' contains WB_MAXFRAGS descriptors, which we * abuse as fragment pointers. This allows us to use a buffer managment * scheme very similar to that used in the ThunderLAN and Etherlink XL * drivers. * * Autonegotiation is performed using the external PHY via the MII bus. * The sample boards I have all use a Davicom PHY. * * Note: the author of the Linux driver for the Winbond chip alludes * to some sort of flaw in the chip's design that seems to mandate some * drastic workaround which signigicantly impairs transmit performance. * I have no idea what he's on about: transmit performance with all * three of my test boards seems fine. */ #include "opt_bdg.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include /* "controller miibus0" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #define WB_USEIOSPACE #include MODULE_DEPEND(wb, pci, 1, 1, 1); MODULE_DEPEND(wb, ether, 1, 1, 1); MODULE_DEPEND(wb, miibus, 1, 1, 1); /* * Various supported device vendors/types and their names. */ static struct wb_type wb_devs[] = { { WB_VENDORID, WB_DEVICEID_840F, "Winbond W89C840F 10/100BaseTX" }, { CP_VENDORID, CP_DEVICEID_RL100, "Compex RL100-ATX 10/100baseTX" }, { 0, 0, NULL } }; static int wb_probe(device_t); static int wb_attach(device_t); static int wb_detach(device_t); static void wb_bfree(void *addr, void *args); static int wb_newbuf(struct wb_softc *, struct wb_chain_onefrag *, struct mbuf *); static int wb_encap(struct wb_softc *, struct wb_chain *, struct mbuf *); static void wb_rxeof(struct wb_softc *); static void wb_rxeoc(struct wb_softc *); static void wb_txeof(struct wb_softc *); static void wb_txeoc(struct wb_softc *); static void wb_intr(void *); static void wb_tick(void *); static void wb_start(struct ifnet *); static int wb_ioctl(struct ifnet *, u_long, caddr_t); static void wb_init(void *); static void wb_stop(struct wb_softc *); static void wb_watchdog(struct ifnet *); static void wb_shutdown(device_t); static int wb_ifmedia_upd(struct ifnet *); static void wb_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void wb_eeprom_putbyte(struct wb_softc *, int); static void wb_eeprom_getword(struct wb_softc *, int, u_int16_t *); static void wb_read_eeprom(struct wb_softc *, caddr_t, int, int, int); static void wb_mii_sync(struct wb_softc *); static void wb_mii_send(struct wb_softc *, u_int32_t, int); static int wb_mii_readreg(struct wb_softc *, struct wb_mii_frame *); static int wb_mii_writereg(struct wb_softc *, struct wb_mii_frame *); static void wb_setcfg(struct wb_softc *, u_int32_t); static void wb_setmulti(struct wb_softc *); static void wb_reset(struct wb_softc *); static void wb_fixmedia(struct wb_softc *); static int wb_list_rx_init(struct wb_softc *); static int wb_list_tx_init(struct wb_softc *); static int wb_miibus_readreg(device_t, int, int); static int wb_miibus_writereg(device_t, int, int, int); static void wb_miibus_statchg(device_t); #ifdef WB_USEIOSPACE #define WB_RES SYS_RES_IOPORT #define WB_RID WB_PCI_LOIO #else #define WB_RES SYS_RES_MEMORY #define WB_RID WB_PCI_LOMEM #endif static device_method_t wb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, wb_probe), DEVMETHOD(device_attach, wb_attach), DEVMETHOD(device_detach, wb_detach), DEVMETHOD(device_shutdown, wb_shutdown), /* bus interface, for miibus */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, wb_miibus_readreg), DEVMETHOD(miibus_writereg, wb_miibus_writereg), DEVMETHOD(miibus_statchg, wb_miibus_statchg), { 0, 0 } }; static driver_t wb_driver = { "wb", wb_methods, sizeof(struct wb_softc) }; static devclass_t wb_devclass; DRIVER_MODULE(wb, pci, wb_driver, wb_devclass, 0, 0); DRIVER_MODULE(miibus, wb, miibus_driver, miibus_devclass, 0, 0); #define WB_SETBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) | (x)) #define WB_CLRBIT(sc, reg, x) \ CSR_WRITE_4(sc, reg, \ CSR_READ_4(sc, reg) & ~(x)) #define SIO_SET(x) \ CSR_WRITE_4(sc, WB_SIO, \ CSR_READ_4(sc, WB_SIO) | (x)) #define SIO_CLR(x) \ CSR_WRITE_4(sc, WB_SIO, \ CSR_READ_4(sc, WB_SIO) & ~(x)) /* * Send a read command and address to the EEPROM, check for ACK. */ static void wb_eeprom_putbyte(sc, addr) struct wb_softc *sc; int addr; { register int d, i; d = addr | WB_EECMD_READ; /* * Feed in each bit and stobe the clock. */ for (i = 0x400; i; i >>= 1) { if (d & i) { SIO_SET(WB_SIO_EE_DATAIN); } else { SIO_CLR(WB_SIO_EE_DATAIN); } DELAY(100); SIO_SET(WB_SIO_EE_CLK); DELAY(150); SIO_CLR(WB_SIO_EE_CLK); DELAY(100); } return; } /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void wb_eeprom_getword(sc, addr, dest) struct wb_softc *sc; int addr; u_int16_t *dest; { register int i; u_int16_t word = 0; /* Enter EEPROM access mode. */ CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS); /* * Send address of word we want to read. */ wb_eeprom_putbyte(sc, addr); CSR_WRITE_4(sc, WB_SIO, WB_SIO_EESEL|WB_SIO_EE_CS); /* * Start reading bits from EEPROM. */ for (i = 0x8000; i; i >>= 1) { SIO_SET(WB_SIO_EE_CLK); DELAY(100); if (CSR_READ_4(sc, WB_SIO) & WB_SIO_EE_DATAOUT) word |= i; SIO_CLR(WB_SIO_EE_CLK); DELAY(100); } /* Turn off EEPROM access mode. */ CSR_WRITE_4(sc, WB_SIO, 0); *dest = word; return; } /* * Read a sequence of words from the EEPROM. */ static void wb_read_eeprom(sc, dest, off, cnt, swap) struct wb_softc *sc; caddr_t dest; int off; int cnt; int swap; { int i; u_int16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { wb_eeprom_getword(sc, off + i, &word); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } return; } /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void wb_mii_sync(sc) struct wb_softc *sc; { register int i; SIO_SET(WB_SIO_MII_DIR|WB_SIO_MII_DATAIN); for (i = 0; i < 32; i++) { SIO_SET(WB_SIO_MII_CLK); DELAY(1); SIO_CLR(WB_SIO_MII_CLK); DELAY(1); } return; } /* * Clock a series of bits through the MII. */ static void wb_mii_send(sc, bits, cnt) struct wb_softc *sc; u_int32_t bits; int cnt; { int i; SIO_CLR(WB_SIO_MII_CLK); for (i = (0x1 << (cnt - 1)); i; i >>= 1) { if (bits & i) { SIO_SET(WB_SIO_MII_DATAIN); } else { SIO_CLR(WB_SIO_MII_DATAIN); } DELAY(1); SIO_CLR(WB_SIO_MII_CLK); DELAY(1); SIO_SET(WB_SIO_MII_CLK); } } /* * Read an PHY register through the MII. */ static int wb_mii_readreg(sc, frame) struct wb_softc *sc; struct wb_mii_frame *frame; { int i, ack; WB_LOCK(sc); /* * Set up frame for RX. */ frame->mii_stdelim = WB_MII_STARTDELIM; frame->mii_opcode = WB_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; CSR_WRITE_4(sc, WB_SIO, 0); /* * Turn on data xmit. */ SIO_SET(WB_SIO_MII_DIR); wb_mii_sync(sc); /* * Send command/address info. */ wb_mii_send(sc, frame->mii_stdelim, 2); wb_mii_send(sc, frame->mii_opcode, 2); wb_mii_send(sc, frame->mii_phyaddr, 5); wb_mii_send(sc, frame->mii_regaddr, 5); /* Idle bit */ SIO_CLR((WB_SIO_MII_CLK|WB_SIO_MII_DATAIN)); DELAY(1); SIO_SET(WB_SIO_MII_CLK); DELAY(1); /* Turn off xmit. */ SIO_CLR(WB_SIO_MII_DIR); /* Check for ack */ SIO_CLR(WB_SIO_MII_CLK); DELAY(1); ack = CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT; SIO_SET(WB_SIO_MII_CLK); DELAY(1); SIO_CLR(WB_SIO_MII_CLK); DELAY(1); SIO_SET(WB_SIO_MII_CLK); DELAY(1); /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ if (ack) { for(i = 0; i < 16; i++) { SIO_CLR(WB_SIO_MII_CLK); DELAY(1); SIO_SET(WB_SIO_MII_CLK); DELAY(1); } goto fail; } for (i = 0x8000; i; i >>= 1) { SIO_CLR(WB_SIO_MII_CLK); DELAY(1); if (!ack) { if (CSR_READ_4(sc, WB_SIO) & WB_SIO_MII_DATAOUT) frame->mii_data |= i; DELAY(1); } SIO_SET(WB_SIO_MII_CLK); DELAY(1); } fail: SIO_CLR(WB_SIO_MII_CLK); DELAY(1); SIO_SET(WB_SIO_MII_CLK); DELAY(1); WB_UNLOCK(sc); if (ack) return(1); return(0); } /* * Write to a PHY register through the MII. */ static int wb_mii_writereg(sc, frame) struct wb_softc *sc; struct wb_mii_frame *frame; { WB_LOCK(sc); /* * Set up frame for TX. */ frame->mii_stdelim = WB_MII_STARTDELIM; frame->mii_opcode = WB_MII_WRITEOP; frame->mii_turnaround = WB_MII_TURNAROUND; /* * Turn on data output. */ SIO_SET(WB_SIO_MII_DIR); wb_mii_sync(sc); wb_mii_send(sc, frame->mii_stdelim, 2); wb_mii_send(sc, frame->mii_opcode, 2); wb_mii_send(sc, frame->mii_phyaddr, 5); wb_mii_send(sc, frame->mii_regaddr, 5); wb_mii_send(sc, frame->mii_turnaround, 2); wb_mii_send(sc, frame->mii_data, 16); /* Idle bit. */ SIO_SET(WB_SIO_MII_CLK); DELAY(1); SIO_CLR(WB_SIO_MII_CLK); DELAY(1); /* * Turn off xmit. */ SIO_CLR(WB_SIO_MII_DIR); WB_UNLOCK(sc); return(0); } static int wb_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct wb_softc *sc; struct wb_mii_frame frame; sc = device_get_softc(dev); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; wb_mii_readreg(sc, &frame); return(frame.mii_data); } static int wb_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct wb_softc *sc; struct wb_mii_frame frame; sc = device_get_softc(dev); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = data; wb_mii_writereg(sc, &frame); return(0); } static void wb_miibus_statchg(dev) device_t dev; { struct wb_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); WB_LOCK(sc); mii = device_get_softc(sc->wb_miibus); wb_setcfg(sc, mii->mii_media_active); WB_UNLOCK(sc); return; } /* * Program the 64-bit multicast hash filter. */ static void wb_setmulti(sc) struct wb_softc *sc; { struct ifnet *ifp; int h = 0; u_int32_t hashes[2] = { 0, 0 }; struct ifmultiaddr *ifma; u_int32_t rxfilt; int mcnt = 0; ifp = sc->wb_ifp; rxfilt = CSR_READ_4(sc, WB_NETCFG); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { rxfilt |= WB_NETCFG_RX_MULTI; CSR_WRITE_4(sc, WB_NETCFG, rxfilt); CSR_WRITE_4(sc, WB_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, WB_MAR1, 0xFFFFFFFF); return; } /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, WB_MAR0, 0); CSR_WRITE_4(sc, WB_MAR1, 0); /* now program new ones */ + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ~ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); mcnt++; } + IF_ADDR_UNLOCK(ifp); if (mcnt) rxfilt |= WB_NETCFG_RX_MULTI; else rxfilt &= ~WB_NETCFG_RX_MULTI; CSR_WRITE_4(sc, WB_MAR0, hashes[0]); CSR_WRITE_4(sc, WB_MAR1, hashes[1]); CSR_WRITE_4(sc, WB_NETCFG, rxfilt); return; } /* * The Winbond manual states that in order to fiddle with the * 'full-duplex' and '100Mbps' bits in the netconfig register, we * first have to put the transmit and/or receive logic in the idle state. */ static void wb_setcfg(sc, media) struct wb_softc *sc; u_int32_t media; { int i, restart = 0; if (CSR_READ_4(sc, WB_NETCFG) & (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON)) { restart = 1; WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_TX_ON|WB_NETCFG_RX_ON)); for (i = 0; i < WB_TIMEOUT; i++) { DELAY(10); if ((CSR_READ_4(sc, WB_ISR) & WB_ISR_TX_IDLE) && (CSR_READ_4(sc, WB_ISR) & WB_ISR_RX_IDLE)) break; } if (i == WB_TIMEOUT) printf("wb%d: failed to force tx and " "rx to idle state\n", sc->wb_unit); } if (IFM_SUBTYPE(media) == IFM_10_T) WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS); else WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_100MBPS); if ((media & IFM_GMASK) == IFM_FDX) WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX); else WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_FULLDUPLEX); if (restart) WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON|WB_NETCFG_RX_ON); return; } static void wb_reset(sc) struct wb_softc *sc; { register int i; struct mii_data *mii; CSR_WRITE_4(sc, WB_NETCFG, 0); CSR_WRITE_4(sc, WB_BUSCTL, 0); CSR_WRITE_4(sc, WB_TXADDR, 0); CSR_WRITE_4(sc, WB_RXADDR, 0); WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET); WB_SETBIT(sc, WB_BUSCTL, WB_BUSCTL_RESET); for (i = 0; i < WB_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_4(sc, WB_BUSCTL) & WB_BUSCTL_RESET)) break; } if (i == WB_TIMEOUT) printf("wb%d: reset never completed!\n", sc->wb_unit); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); if (sc->wb_miibus == NULL) return; mii = device_get_softc(sc->wb_miibus); if (mii == NULL) return; if (mii->mii_instance) { struct mii_softc *miisc; LIST_FOREACH(miisc, &mii->mii_phys, mii_list) mii_phy_reset(miisc); } return; } static void wb_fixmedia(sc) struct wb_softc *sc; { struct mii_data *mii = NULL; struct ifnet *ifp; u_int32_t media; if (sc->wb_miibus == NULL) return; mii = device_get_softc(sc->wb_miibus); ifp = sc->wb_ifp; mii_pollstat(mii); if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) { media = mii->mii_media_active & ~IFM_10_T; media |= IFM_100_TX; } else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) { media = mii->mii_media_active & ~IFM_100_TX; media |= IFM_10_T; } else return; ifmedia_set(&mii->mii_media, media); return; } /* * Probe for a Winbond chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int wb_probe(dev) device_t dev; { struct wb_type *t; t = wb_devs; while(t->wb_name != NULL) { if ((pci_get_vendor(dev) == t->wb_vid) && (pci_get_device(dev) == t->wb_did)) { device_set_desc(dev, t->wb_name); return (BUS_PROBE_DEFAULT); } t++; } return(ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int wb_attach(dev) device_t dev; { u_char eaddr[ETHER_ADDR_LEN]; struct wb_softc *sc; struct ifnet *ifp; int unit, error = 0, rid; sc = device_get_softc(dev); unit = device_get_unit(dev); mtx_init(&sc->wb_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); /* * Map control/status registers. */ pci_enable_busmaster(dev); rid = WB_RID; sc->wb_res = bus_alloc_resource_any(dev, WB_RES, &rid, RF_ACTIVE); if (sc->wb_res == NULL) { printf("wb%d: couldn't map ports/memory\n", unit); error = ENXIO; goto fail; } sc->wb_btag = rman_get_bustag(sc->wb_res); sc->wb_bhandle = rman_get_bushandle(sc->wb_res); /* Allocate interrupt */ rid = 0; sc->wb_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->wb_irq == NULL) { printf("wb%d: couldn't map interrupt\n", unit); error = ENXIO; goto fail; } /* Save the cache line size. */ sc->wb_cachesize = pci_read_config(dev, WB_PCI_CACHELEN, 4) & 0xFF; /* Reset the adapter. */ wb_reset(sc); /* * Get station address from the EEPROM. */ wb_read_eeprom(sc, (caddr_t)&eaddr, 0, 3, 0); sc->wb_unit = unit; sc->wb_ldata = contigmalloc(sizeof(struct wb_list_data) + 8, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->wb_ldata == NULL) { printf("wb%d: no memory for list buffers!\n", unit); error = ENXIO; goto fail; } bzero(sc->wb_ldata, sizeof(struct wb_list_data)); ifp = sc->wb_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { printf("wb%d: can not if_alloc()\n", unit); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT; ifp->if_ioctl = wb_ioctl; ifp->if_start = wb_start; ifp->if_watchdog = wb_watchdog; ifp->if_init = wb_init; ifp->if_baudrate = 10000000; ifp->if_snd.ifq_maxlen = WB_TX_LIST_CNT - 1; /* * Do MII setup. */ if (mii_phy_probe(dev, &sc->wb_miibus, wb_ifmedia_upd, wb_ifmedia_sts)) { error = ENXIO; goto fail; } /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->wb_irq, INTR_TYPE_NET, wb_intr, sc, &sc->wb_intrhand); if (error) { printf("wb%d: couldn't set up irq\n", unit); ether_ifdetach(ifp); if_free(ifp); goto fail; } fail: if (error) wb_detach(dev); return(error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int wb_detach(dev) device_t dev; { struct wb_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->wb_mtx), ("wb mutex not initialized")); WB_LOCK(sc); ifp = sc->wb_ifp; /* * Delete any miibus and phy devices attached to this interface. * This should only be done if attach succeeded. */ if (device_is_attached(dev)) { wb_stop(sc); ether_ifdetach(ifp); if_free(ifp); } if (sc->wb_miibus) device_delete_child(dev, sc->wb_miibus); bus_generic_detach(dev); if (sc->wb_intrhand) bus_teardown_intr(dev, sc->wb_irq, sc->wb_intrhand); if (sc->wb_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->wb_irq); if (sc->wb_res) bus_release_resource(dev, WB_RES, WB_RID, sc->wb_res); if (sc->wb_ldata) { contigfree(sc->wb_ldata, sizeof(struct wb_list_data) + 8, M_DEVBUF); } WB_UNLOCK(sc); mtx_destroy(&sc->wb_mtx); return(0); } /* * Initialize the transmit descriptors. */ static int wb_list_tx_init(sc) struct wb_softc *sc; { struct wb_chain_data *cd; struct wb_list_data *ld; int i; cd = &sc->wb_cdata; ld = sc->wb_ldata; for (i = 0; i < WB_TX_LIST_CNT; i++) { cd->wb_tx_chain[i].wb_ptr = &ld->wb_tx_list[i]; if (i == (WB_TX_LIST_CNT - 1)) { cd->wb_tx_chain[i].wb_nextdesc = &cd->wb_tx_chain[0]; } else { cd->wb_tx_chain[i].wb_nextdesc = &cd->wb_tx_chain[i + 1]; } } cd->wb_tx_free = &cd->wb_tx_chain[0]; cd->wb_tx_tail = cd->wb_tx_head = NULL; return(0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int wb_list_rx_init(sc) struct wb_softc *sc; { struct wb_chain_data *cd; struct wb_list_data *ld; int i; cd = &sc->wb_cdata; ld = sc->wb_ldata; for (i = 0; i < WB_RX_LIST_CNT; i++) { cd->wb_rx_chain[i].wb_ptr = (struct wb_desc *)&ld->wb_rx_list[i]; cd->wb_rx_chain[i].wb_buf = (void *)&ld->wb_rxbufs[i]; if (wb_newbuf(sc, &cd->wb_rx_chain[i], NULL) == ENOBUFS) return(ENOBUFS); if (i == (WB_RX_LIST_CNT - 1)) { cd->wb_rx_chain[i].wb_nextdesc = &cd->wb_rx_chain[0]; ld->wb_rx_list[i].wb_next = vtophys(&ld->wb_rx_list[0]); } else { cd->wb_rx_chain[i].wb_nextdesc = &cd->wb_rx_chain[i + 1]; ld->wb_rx_list[i].wb_next = vtophys(&ld->wb_rx_list[i + 1]); } } cd->wb_rx_head = &cd->wb_rx_chain[0]; return(0); } static void wb_bfree(buf, args) void *buf; void *args; { return; } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int wb_newbuf(sc, c, m) struct wb_softc *sc; struct wb_chain_onefrag *c; struct mbuf *m; { struct mbuf *m_new = NULL; if (m == NULL) { MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) return(ENOBUFS); m_new->m_data = c->wb_buf; m_new->m_pkthdr.len = m_new->m_len = WB_BUFBYTES; MEXTADD(m_new, c->wb_buf, WB_BUFBYTES, wb_bfree, NULL, 0, EXT_NET_DRV); } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = WB_BUFBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, sizeof(u_int64_t)); c->wb_mbuf = m_new; c->wb_ptr->wb_data = vtophys(mtod(m_new, caddr_t)); c->wb_ptr->wb_ctl = WB_RXCTL_RLINK | 1536; c->wb_ptr->wb_status = WB_RXSTAT; return(0); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static void wb_rxeof(sc) struct wb_softc *sc; { struct mbuf *m = NULL; struct ifnet *ifp; struct wb_chain_onefrag *cur_rx; int total_len = 0; u_int32_t rxstat; WB_LOCK_ASSERT(sc); ifp = sc->wb_ifp; while(!((rxstat = sc->wb_cdata.wb_rx_head->wb_ptr->wb_status) & WB_RXSTAT_OWN)) { struct mbuf *m0 = NULL; cur_rx = sc->wb_cdata.wb_rx_head; sc->wb_cdata.wb_rx_head = cur_rx->wb_nextdesc; m = cur_rx->wb_mbuf; if ((rxstat & WB_RXSTAT_MIIERR) || (WB_RXBYTES(cur_rx->wb_ptr->wb_status) < WB_MIN_FRAMELEN) || (WB_RXBYTES(cur_rx->wb_ptr->wb_status) > 1536) || !(rxstat & WB_RXSTAT_LASTFRAG) || !(rxstat & WB_RXSTAT_RXCMP)) { ifp->if_ierrors++; wb_newbuf(sc, cur_rx, m); printf("wb%x: receiver babbling: possible chip " "bug, forcing reset\n", sc->wb_unit); wb_fixmedia(sc); wb_reset(sc); wb_init(sc); return; } if (rxstat & WB_RXSTAT_RXERR) { ifp->if_ierrors++; wb_newbuf(sc, cur_rx, m); break; } /* No errors; receive the packet. */ total_len = WB_RXBYTES(cur_rx->wb_ptr->wb_status); /* * XXX The Winbond chip includes the CRC with every * received frame, and there's no way to turn this * behavior off (at least, I can't find anything in * the manual that explains how to do it) so we have * to trim off the CRC manually. */ total_len -= ETHER_CRC_LEN; m0 = m_devget(mtod(m, char *), total_len, ETHER_ALIGN, ifp, NULL); wb_newbuf(sc, cur_rx, m); if (m0 == NULL) { ifp->if_ierrors++; break; } m = m0; ifp->if_ipackets++; WB_UNLOCK(sc); (*ifp->if_input)(ifp, m); WB_LOCK(sc); } } static void wb_rxeoc(sc) struct wb_softc *sc; { wb_rxeof(sc); WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); CSR_WRITE_4(sc, WB_RXADDR, vtophys(&sc->wb_ldata->wb_rx_list[0])); WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); if (CSR_READ_4(sc, WB_ISR) & WB_RXSTATE_SUSPEND) CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF); return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void wb_txeof(sc) struct wb_softc *sc; { struct wb_chain *cur_tx; struct ifnet *ifp; ifp = sc->wb_ifp; /* Clear the timeout timer. */ ifp->if_timer = 0; if (sc->wb_cdata.wb_tx_head == NULL) return; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ while(sc->wb_cdata.wb_tx_head->wb_mbuf != NULL) { u_int32_t txstat; cur_tx = sc->wb_cdata.wb_tx_head; txstat = WB_TXSTATUS(cur_tx); if ((txstat & WB_TXSTAT_OWN) || txstat == WB_UNSENT) break; if (txstat & WB_TXSTAT_TXERR) { ifp->if_oerrors++; if (txstat & WB_TXSTAT_ABORT) ifp->if_collisions++; if (txstat & WB_TXSTAT_LATECOLL) ifp->if_collisions++; } ifp->if_collisions += (txstat & WB_TXSTAT_COLLCNT) >> 3; ifp->if_opackets++; m_freem(cur_tx->wb_mbuf); cur_tx->wb_mbuf = NULL; if (sc->wb_cdata.wb_tx_head == sc->wb_cdata.wb_tx_tail) { sc->wb_cdata.wb_tx_head = NULL; sc->wb_cdata.wb_tx_tail = NULL; break; } sc->wb_cdata.wb_tx_head = cur_tx->wb_nextdesc; } return; } /* * TX 'end of channel' interrupt handler. */ static void wb_txeoc(sc) struct wb_softc *sc; { struct ifnet *ifp; ifp = sc->wb_ifp; ifp->if_timer = 0; if (sc->wb_cdata.wb_tx_head == NULL) { ifp->if_flags &= ~IFF_OACTIVE; sc->wb_cdata.wb_tx_tail = NULL; } else { if (WB_TXOWN(sc->wb_cdata.wb_tx_head) == WB_UNSENT) { WB_TXOWN(sc->wb_cdata.wb_tx_head) = WB_TXSTAT_OWN; ifp->if_timer = 5; CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF); } } return; } static void wb_intr(arg) void *arg; { struct wb_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; WB_LOCK(sc); ifp = sc->wb_ifp; if (!(ifp->if_flags & IFF_UP)) { WB_UNLOCK(sc); return; } /* Disable interrupts. */ CSR_WRITE_4(sc, WB_IMR, 0x00000000); for (;;) { status = CSR_READ_4(sc, WB_ISR); if (status) CSR_WRITE_4(sc, WB_ISR, status); if ((status & WB_INTRS) == 0) break; if ((status & WB_ISR_RX_NOBUF) || (status & WB_ISR_RX_ERR)) { ifp->if_ierrors++; wb_reset(sc); if (status & WB_ISR_RX_ERR) wb_fixmedia(sc); wb_init(sc); continue; } if (status & WB_ISR_RX_OK) wb_rxeof(sc); if (status & WB_ISR_RX_IDLE) wb_rxeoc(sc); if (status & WB_ISR_TX_OK) wb_txeof(sc); if (status & WB_ISR_TX_NOBUF) wb_txeoc(sc); if (status & WB_ISR_TX_IDLE) { wb_txeof(sc); if (sc->wb_cdata.wb_tx_head != NULL) { WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF); } } if (status & WB_ISR_TX_UNDERRUN) { ifp->if_oerrors++; wb_txeof(sc); WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); /* Jack up TX threshold */ sc->wb_txthresh += WB_TXTHRESH_CHUNK; WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH); WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh)); WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); } if (status & WB_ISR_BUS_ERR) { wb_reset(sc); wb_init(sc); } } /* Re-enable interrupts. */ CSR_WRITE_4(sc, WB_IMR, WB_INTRS); if (ifp->if_snd.ifq_head != NULL) { wb_start(ifp); } WB_UNLOCK(sc); return; } static void wb_tick(xsc) void *xsc; { struct wb_softc *sc; struct mii_data *mii; sc = xsc; WB_LOCK(sc); mii = device_get_softc(sc->wb_miibus); mii_tick(mii); sc->wb_stat_ch = timeout(wb_tick, sc, hz); WB_UNLOCK(sc); return; } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int wb_encap(sc, c, m_head) struct wb_softc *sc; struct wb_chain *c; struct mbuf *m_head; { int frag = 0; struct wb_desc *f = NULL; int total_len; struct mbuf *m; /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ m = m_head; total_len = 0; for (m = m_head, frag = 0; m != NULL; m = m->m_next) { if (m->m_len != 0) { if (frag == WB_MAXFRAGS) break; total_len += m->m_len; f = &c->wb_ptr->wb_frag[frag]; f->wb_ctl = WB_TXCTL_TLINK | m->m_len; if (frag == 0) { f->wb_ctl |= WB_TXCTL_FIRSTFRAG; f->wb_status = 0; } else f->wb_status = WB_TXSTAT_OWN; f->wb_next = vtophys(&c->wb_ptr->wb_frag[frag + 1]); f->wb_data = vtophys(mtod(m, vm_offset_t)); frag++; } } /* * Handle special case: we used up all 16 fragments, * but we have more mbufs left in the chain. Copy the * data into an mbuf cluster. Note that we don't * bother clearing the values in the other fragment * pointers/counters; it wouldn't gain us anything, * and would waste cycles. */ if (m != NULL) { struct mbuf *m_new = NULL; MGETHDR(m_new, M_DONTWAIT, MT_DATA); if (m_new == NULL) return(1); if (m_head->m_pkthdr.len > MHLEN) { MCLGET(m_new, M_DONTWAIT); if (!(m_new->m_flags & M_EXT)) { m_freem(m_new); return(1); } } m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t)); m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; m_freem(m_head); m_head = m_new; f = &c->wb_ptr->wb_frag[0]; f->wb_status = 0; f->wb_data = vtophys(mtod(m_new, caddr_t)); f->wb_ctl = total_len = m_new->m_len; f->wb_ctl |= WB_TXCTL_TLINK|WB_TXCTL_FIRSTFRAG; frag = 1; } if (total_len < WB_MIN_FRAMELEN) { f = &c->wb_ptr->wb_frag[frag]; f->wb_ctl = WB_MIN_FRAMELEN - total_len; f->wb_data = vtophys(&sc->wb_cdata.wb_pad); f->wb_ctl |= WB_TXCTL_TLINK; f->wb_status = WB_TXSTAT_OWN; frag++; } c->wb_mbuf = m_head; c->wb_lastdesc = frag - 1; WB_TXCTL(c) |= WB_TXCTL_LASTFRAG; WB_TXNEXT(c) = vtophys(&c->wb_nextdesc->wb_ptr->wb_frag[0]); return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void wb_start(ifp) struct ifnet *ifp; { struct wb_softc *sc; struct mbuf *m_head = NULL; struct wb_chain *cur_tx = NULL, *start_tx; sc = ifp->if_softc; WB_LOCK(sc); /* * Check for an available queue slot. If there are none, * punt. */ if (sc->wb_cdata.wb_tx_free->wb_mbuf != NULL) { ifp->if_flags |= IFF_OACTIVE; WB_UNLOCK(sc); return; } start_tx = sc->wb_cdata.wb_tx_free; while(sc->wb_cdata.wb_tx_free->wb_mbuf == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* Pick a descriptor off the free list. */ cur_tx = sc->wb_cdata.wb_tx_free; sc->wb_cdata.wb_tx_free = cur_tx->wb_nextdesc; /* Pack the data into the descriptor. */ wb_encap(sc, cur_tx, m_head); if (cur_tx != start_tx) WB_TXOWN(cur_tx) = WB_TXSTAT_OWN; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, cur_tx->wb_mbuf); } /* * If there are no packets queued, bail. */ if (cur_tx == NULL) { WB_UNLOCK(sc); return; } /* * Place the request for the upload interrupt * in the last descriptor in the chain. This way, if * we're chaining several packets at once, we'll only * get an interupt once for the whole chain rather than * once for each packet. */ WB_TXCTL(cur_tx) |= WB_TXCTL_FINT; cur_tx->wb_ptr->wb_frag[0].wb_ctl |= WB_TXCTL_FINT; sc->wb_cdata.wb_tx_tail = cur_tx; if (sc->wb_cdata.wb_tx_head == NULL) { sc->wb_cdata.wb_tx_head = start_tx; WB_TXOWN(start_tx) = WB_TXSTAT_OWN; CSR_WRITE_4(sc, WB_TXSTART, 0xFFFFFFFF); } else { /* * We need to distinguish between the case where * the own bit is clear because the chip cleared it * and where the own bit is clear because we haven't * set it yet. The magic value WB_UNSET is just some * ramdomly chosen number which doesn't have the own * bit set. When we actually transmit the frame, the * status word will have _only_ the own bit set, so * the txeoc handler will be able to tell if it needs * to initiate another transmission to flush out pending * frames. */ WB_TXOWN(start_tx) = WB_UNSENT; } /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; WB_UNLOCK(sc); return; } static void wb_init(xsc) void *xsc; { struct wb_softc *sc = xsc; struct ifnet *ifp = sc->wb_ifp; int i; struct mii_data *mii; WB_LOCK(sc); mii = device_get_softc(sc->wb_miibus); /* * Cancel pending I/O and free all RX/TX buffers. */ wb_stop(sc); wb_reset(sc); sc->wb_txthresh = WB_TXTHRESH_INIT; /* * Set cache alignment and burst length. */ #ifdef foo CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_CONFIG); WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_THRESH); WB_SETBIT(sc, WB_NETCFG, WB_TXTHRESH(sc->wb_txthresh)); #endif CSR_WRITE_4(sc, WB_BUSCTL, WB_BUSCTL_MUSTBEONE|WB_BUSCTL_ARBITRATION); WB_SETBIT(sc, WB_BUSCTL, WB_BURSTLEN_16LONG); switch(sc->wb_cachesize) { case 32: WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_32LONG); break; case 16: WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_16LONG); break; case 8: WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_8LONG); break; case 0: default: WB_SETBIT(sc, WB_BUSCTL, WB_CACHEALIGN_NONE); break; } /* This doesn't tend to work too well at 100Mbps. */ WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_EARLY_ON); /* Init our MAC address */ for (i = 0; i < ETHER_ADDR_LEN; i++) { CSR_WRITE_1(sc, WB_NODE0 + i, IFP2ENADDR(sc->wb_ifp)[i]); } /* Init circular RX list. */ if (wb_list_rx_init(sc) == ENOBUFS) { printf("wb%d: initialization failed: no " "memory for rx buffers\n", sc->wb_unit); wb_stop(sc); WB_UNLOCK(sc); return; } /* Init TX descriptors. */ wb_list_tx_init(sc); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS); } else { WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ALLPHYS); } /* * Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD); } else { WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_BROAD); } /* * Program the multicast filter, if necessary. */ wb_setmulti(sc); /* * Load the address of the RX list. */ WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); CSR_WRITE_4(sc, WB_RXADDR, vtophys(&sc->wb_ldata->wb_rx_list[0])); /* * Enable interrupts. */ CSR_WRITE_4(sc, WB_IMR, WB_INTRS); CSR_WRITE_4(sc, WB_ISR, 0xFFFFFFFF); /* Enable receiver and transmitter. */ WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_RX_ON); CSR_WRITE_4(sc, WB_RXSTART, 0xFFFFFFFF); WB_CLRBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); CSR_WRITE_4(sc, WB_TXADDR, vtophys(&sc->wb_ldata->wb_tx_list[0])); WB_SETBIT(sc, WB_NETCFG, WB_NETCFG_TX_ON); mii_mediachg(mii); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->wb_stat_ch = timeout(wb_tick, sc, hz); WB_UNLOCK(sc); return; } /* * Set media options. */ static int wb_ifmedia_upd(ifp) struct ifnet *ifp; { struct wb_softc *sc; sc = ifp->if_softc; if (ifp->if_flags & IFF_UP) wb_init(sc); return(0); } /* * Report current media status. */ static void wb_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct wb_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->wb_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; return; } static int wb_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct wb_softc *sc = ifp->if_softc; struct mii_data *mii; struct ifreq *ifr = (struct ifreq *) data; int error = 0; WB_LOCK(sc); switch(command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { wb_init(sc); } else { if (ifp->if_flags & IFF_RUNNING) wb_stop(sc); } error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: wb_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->wb_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); break; } WB_UNLOCK(sc); return(error); } static void wb_watchdog(ifp) struct ifnet *ifp; { struct wb_softc *sc; sc = ifp->if_softc; WB_LOCK(sc); ifp->if_oerrors++; printf("wb%d: watchdog timeout\n", sc->wb_unit); #ifdef foo if (!(wb_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT)) printf("wb%d: no carrier - transceiver cable problem?\n", sc->wb_unit); #endif wb_stop(sc); wb_reset(sc); wb_init(sc); if (ifp->if_snd.ifq_head != NULL) wb_start(ifp); WB_UNLOCK(sc); return; } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void wb_stop(sc) struct wb_softc *sc; { register int i; struct ifnet *ifp; WB_LOCK(sc); ifp = sc->wb_ifp; ifp->if_timer = 0; untimeout(wb_tick, sc, sc->wb_stat_ch); WB_CLRBIT(sc, WB_NETCFG, (WB_NETCFG_RX_ON|WB_NETCFG_TX_ON)); CSR_WRITE_4(sc, WB_IMR, 0x00000000); CSR_WRITE_4(sc, WB_TXADDR, 0x00000000); CSR_WRITE_4(sc, WB_RXADDR, 0x00000000); /* * Free data in the RX lists. */ for (i = 0; i < WB_RX_LIST_CNT; i++) { if (sc->wb_cdata.wb_rx_chain[i].wb_mbuf != NULL) { m_freem(sc->wb_cdata.wb_rx_chain[i].wb_mbuf); sc->wb_cdata.wb_rx_chain[i].wb_mbuf = NULL; } } bzero((char *)&sc->wb_ldata->wb_rx_list, sizeof(sc->wb_ldata->wb_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < WB_TX_LIST_CNT; i++) { if (sc->wb_cdata.wb_tx_chain[i].wb_mbuf != NULL) { m_freem(sc->wb_cdata.wb_tx_chain[i].wb_mbuf); sc->wb_cdata.wb_tx_chain[i].wb_mbuf = NULL; } } bzero((char *)&sc->wb_ldata->wb_tx_list, sizeof(sc->wb_ldata->wb_tx_list)); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); WB_UNLOCK(sc); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void wb_shutdown(dev) device_t dev; { struct wb_softc *sc; sc = device_get_softc(dev); wb_stop(sc); return; } Index: stable/6/sys/pci/if_xl.c =================================================================== --- stable/6/sys/pci/if_xl.c (revision 149421) +++ stable/6/sys/pci/if_xl.c (revision 149422) @@ -1,3388 +1,3392 @@ /*- * Copyright (c) 1997, 1998, 1999 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * 3Com 3c90x Etherlink XL PCI NIC driver * * Supports the 3Com "boomerang", "cyclone" and "hurricane" PCI * bus-master chips (3c90x cards and embedded controllers) including * the following: * * 3Com 3c900-TPO 10Mbps/RJ-45 * 3Com 3c900-COMBO 10Mbps/RJ-45,AUI,BNC * 3Com 3c905-TX 10/100Mbps/RJ-45 * 3Com 3c905-T4 10/100Mbps/RJ-45 * 3Com 3c900B-TPO 10Mbps/RJ-45 * 3Com 3c900B-COMBO 10Mbps/RJ-45,AUI,BNC * 3Com 3c900B-TPC 10Mbps/RJ-45,BNC * 3Com 3c900B-FL 10Mbps/Fiber-optic * 3Com 3c905B-COMBO 10/100Mbps/RJ-45,AUI,BNC * 3Com 3c905B-TX 10/100Mbps/RJ-45 * 3Com 3c905B-FL/FX 10/100Mbps/Fiber-optic * 3Com 3c905C-TX 10/100Mbps/RJ-45 (Tornado ASIC) * 3Com 3c980-TX 10/100Mbps server adapter (Hurricane ASIC) * 3Com 3c980C-TX 10/100Mbps server adapter (Tornado ASIC) * 3Com 3cSOHO100-TX 10/100Mbps/RJ-45 (Hurricane ASIC) * 3Com 3c450-TX 10/100Mbps/RJ-45 (Tornado ASIC) * 3Com 3c555 10/100Mbps/RJ-45 (MiniPCI, Laptop Hurricane) * 3Com 3c556 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC) * 3Com 3c556B 10/100Mbps/RJ-45 (MiniPCI, Hurricane ASIC) * 3Com 3c575TX 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3c575B 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3c575C 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3cxfem656 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3cxfem656b 10/100Mbps/RJ-45 (Cardbus, Hurricane ASIC) * 3Com 3cxfem656c 10/100Mbps/RJ-45 (Cardbus, Tornado ASIC) * Dell Optiplex GX1 on-board 3c918 10/100Mbps/RJ-45 * Dell on-board 3c920 10/100Mbps/RJ-45 * Dell Precision on-board 3c905B 10/100Mbps/RJ-45 * Dell Latitude laptop docking station embedded 3c905-TX * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The 3c90x series chips use a bus-master DMA interface for transfering * packets to and from the controller chip. Some of the "vortex" cards * (3c59x) also supported a bus master mode, however for those chips * you could only DMA packets to/from a contiguous memory buffer. For * transmission this would mean copying the contents of the queued mbuf * chain into an mbuf cluster and then DMAing the cluster. This extra * copy would sort of defeat the purpose of the bus master support for * any packet that doesn't fit into a single mbuf. * * By contrast, the 3c90x cards support a fragment-based bus master * mode where mbuf chains can be encapsulated using TX descriptors. * This is similar to other PCI chips such as the Texas Instruments * ThunderLAN and the Intel 82557/82558. * * The "vortex" driver (if_vx.c) happens to work for the "boomerang" * bus master chips because they maintain the old PIO interface for * backwards compatibility, but starting with the 3c905B and the * "cyclone" chips, the compatibility interface has been dropped. * Since using bus master DMA is a big win, we use this driver to * support the PCI "boomerang" chips even though they work with the * "vortex" driver in order to obtain better performance. * * This driver is in the /sys/pci directory because it only supports * PCI-based NICs. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(xl, pci, 1, 1, 1); MODULE_DEPEND(xl, ether, 1, 1, 1); MODULE_DEPEND(xl, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #include /* * TX Checksumming is disabled by default for two reasons: * - TX Checksumming will occasionally produce corrupt packets * - TX Checksumming seems to reduce performance * * Only 905B/C cards were reported to have this problem, it is possible * that later chips _may_ be immune. */ #define XL905B_TXCSUM_BROKEN 1 #ifdef XL905B_TXCSUM_BROKEN #define XL905B_CSUM_FEATURES 0 #else #define XL905B_CSUM_FEATURES (CSUM_IP | CSUM_TCP | CSUM_UDP) #endif /* * Various supported device vendors/types and their names. */ static struct xl_type xl_devs[] = { { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT, "3Com 3c900-TPO Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_BOOMERANG_10BT_COMBO, "3Com 3c900-COMBO Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_BOOMERANG_10_100BT, "3Com 3c905-TX Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_BOOMERANG_100BT4, "3Com 3c905-T4 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT, "3Com 3c900B-TPO Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_COMBO, "3Com 3c900B-COMBO Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_KRAKATOA_10BT_TPC, "3Com 3c900B-TPC Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_CYCLONE_10FL, "3Com 3c900B-FL Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT, "3Com 3c905B-TX Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100BT4, "3Com 3c905B-T4 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100FX, "3Com 3c905B-FX/SC Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_CYCLONE_10_100_COMBO, "3Com 3c905B-COMBO Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT, "3Com 3c905C-TX Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B, "3Com 3c920B-EMB Integrated Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_920B_WNM, "3Com 3c920B-EMB-WNM Integrated Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_10_100BT_SERV, "3Com 3c980 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_10_100BT_SERV, "3Com 3c980C Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_SOHO100TX, "3Com 3cSOHO100-TX OfficeConnect" }, { TC_VENDORID, TC_DEVICEID_TORNADO_HOMECONNECT, "3Com 3c450-TX HomeConnect" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_555, "3Com 3c555 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_556, "3Com 3c556 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_556B, "3Com 3c556B Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_575A, "3Com 3c575TX Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_575B, "3Com 3c575B Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_575C, "3Com 3c575C Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_656, "3Com 3c656 Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_HURRICANE_656B, "3Com 3c656B Fast Etherlink XL" }, { TC_VENDORID, TC_DEVICEID_TORNADO_656C, "3Com 3c656C Fast Etherlink XL" }, { 0, 0, NULL } }; static int xl_probe(device_t); static int xl_attach(device_t); static int xl_detach(device_t); static int xl_newbuf(struct xl_softc *, struct xl_chain_onefrag *); static void xl_stats_update(void *); static void xl_stats_update_locked(struct xl_softc *); static int xl_encap(struct xl_softc *, struct xl_chain *, struct mbuf *); static void xl_rxeof(struct xl_softc *); static void xl_rxeof_task(void *, int); static int xl_rx_resync(struct xl_softc *); static void xl_txeof(struct xl_softc *); static void xl_txeof_90xB(struct xl_softc *); static void xl_txeoc(struct xl_softc *); static void xl_intr(void *); static void xl_start(struct ifnet *); static void xl_start_locked(struct ifnet *); static void xl_start_90xB_locked(struct ifnet *); static int xl_ioctl(struct ifnet *, u_long, caddr_t); static void xl_init(void *); static void xl_init_locked(struct xl_softc *); static void xl_stop(struct xl_softc *); static void xl_watchdog(struct ifnet *); static void xl_shutdown(device_t); static int xl_suspend(device_t); static int xl_resume(device_t); #ifdef DEVICE_POLLING static void xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); static void xl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count); #endif /* DEVICE_POLLING */ static int xl_ifmedia_upd(struct ifnet *); static void xl_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int xl_eeprom_wait(struct xl_softc *); static int xl_read_eeprom(struct xl_softc *, caddr_t, int, int, int); static void xl_mii_sync(struct xl_softc *); static void xl_mii_send(struct xl_softc *, u_int32_t, int); static int xl_mii_readreg(struct xl_softc *, struct xl_mii_frame *); static int xl_mii_writereg(struct xl_softc *, struct xl_mii_frame *); static void xl_setcfg(struct xl_softc *); static void xl_setmode(struct xl_softc *, int); static void xl_setmulti(struct xl_softc *); static void xl_setmulti_hash(struct xl_softc *); static void xl_reset(struct xl_softc *); static int xl_list_rx_init(struct xl_softc *); static int xl_list_tx_init(struct xl_softc *); static int xl_list_tx_init_90xB(struct xl_softc *); static void xl_wait(struct xl_softc *); static void xl_mediacheck(struct xl_softc *); static void xl_choose_media(struct xl_softc *sc, int *media); static void xl_choose_xcvr(struct xl_softc *, int); static void xl_dma_map_addr(void *, bus_dma_segment_t *, int, int); static void xl_dma_map_rxbuf(void *, bus_dma_segment_t *, int, bus_size_t, int); static void xl_dma_map_txbuf(void *, bus_dma_segment_t *, int, bus_size_t, int); #ifdef notdef static void xl_testpacket(struct xl_softc *); #endif static int xl_miibus_readreg(device_t, int, int); static int xl_miibus_writereg(device_t, int, int, int); static void xl_miibus_statchg(device_t); static void xl_miibus_mediainit(device_t); static device_method_t xl_methods[] = { /* Device interface */ DEVMETHOD(device_probe, xl_probe), DEVMETHOD(device_attach, xl_attach), DEVMETHOD(device_detach, xl_detach), DEVMETHOD(device_shutdown, xl_shutdown), DEVMETHOD(device_suspend, xl_suspend), DEVMETHOD(device_resume, xl_resume), /* bus interface */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), /* MII interface */ DEVMETHOD(miibus_readreg, xl_miibus_readreg), DEVMETHOD(miibus_writereg, xl_miibus_writereg), DEVMETHOD(miibus_statchg, xl_miibus_statchg), DEVMETHOD(miibus_mediainit, xl_miibus_mediainit), { 0, 0 } }; static driver_t xl_driver = { "xl", xl_methods, sizeof(struct xl_softc) }; static devclass_t xl_devclass; DRIVER_MODULE(xl, cardbus, xl_driver, xl_devclass, 0, 0); DRIVER_MODULE(xl, pci, xl_driver, xl_devclass, 0, 0); DRIVER_MODULE(miibus, xl, miibus_driver, miibus_devclass, 0, 0); static void xl_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { u_int32_t *paddr; paddr = arg; *paddr = segs->ds_addr; } static void xl_dma_map_rxbuf(void *arg, bus_dma_segment_t *segs, int nseg, bus_size_t mapsize, int error) { u_int32_t *paddr; if (error) return; KASSERT(nseg == 1, ("xl_dma_map_rxbuf: too many DMA segments")); paddr = arg; *paddr = segs->ds_addr; } static void xl_dma_map_txbuf(void *arg, bus_dma_segment_t *segs, int nseg, bus_size_t mapsize, int error) { struct xl_list *l; int i, total_len; if (error) return; KASSERT(nseg <= XL_MAXFRAGS, ("too many DMA segments")); total_len = 0; l = arg; for (i = 0; i < nseg; i++) { KASSERT(segs[i].ds_len <= MCLBYTES, ("segment size too large")); l->xl_frag[i].xl_addr = htole32(segs[i].ds_addr); l->xl_frag[i].xl_len = htole32(segs[i].ds_len); total_len += segs[i].ds_len; } l->xl_frag[nseg - 1].xl_len = htole32(segs[nseg - 1].ds_len | XL_LAST_FRAG); l->xl_status = htole32(total_len); l->xl_next = 0; } /* * Murphy's law says that it's possible the chip can wedge and * the 'command in progress' bit may never clear. Hence, we wait * only a finite amount of time to avoid getting caught in an * infinite loop. Normally this delay routine would be a macro, * but it isn't called during normal operation so we can afford * to make it a function. */ static void xl_wait(struct xl_softc *sc) { register int i; for (i = 0; i < XL_TIMEOUT; i++) { if ((CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY) == 0) break; } if (i == XL_TIMEOUT) if_printf(sc->xl_ifp, "command never completed!\n"); } /* * MII access routines are provided for adapters with external * PHYs (3c905-TX, 3c905-T4, 3c905B-T4) and those with built-in * autoneg logic that's faked up to look like a PHY (3c905B-TX). * Note: if you don't perform the MDIO operations just right, * it's possible to end up with code that works correctly with * some chips/CPUs/processor speeds/bus speeds/etc but not * with others. */ #define MII_SET(x) \ CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \ CSR_READ_2(sc, XL_W4_PHY_MGMT) | (x)) #define MII_CLR(x) \ CSR_WRITE_2(sc, XL_W4_PHY_MGMT, \ CSR_READ_2(sc, XL_W4_PHY_MGMT) & ~(x)) /* * Sync the PHYs by setting data bit and strobing the clock 32 times. */ static void xl_mii_sync(struct xl_softc *sc) { register int i; XL_SEL_WIN(4); MII_SET(XL_MII_DIR|XL_MII_DATA); for (i = 0; i < 32; i++) { MII_SET(XL_MII_CLK); MII_SET(XL_MII_DATA); MII_SET(XL_MII_DATA); MII_CLR(XL_MII_CLK); MII_SET(XL_MII_DATA); MII_SET(XL_MII_DATA); } } /* * Clock a series of bits through the MII. */ static void xl_mii_send(struct xl_softc *sc, u_int32_t bits, int cnt) { int i; XL_SEL_WIN(4); MII_CLR(XL_MII_CLK); for (i = (0x1 << (cnt - 1)); i; i >>= 1) { if (bits & i) { MII_SET(XL_MII_DATA); } else { MII_CLR(XL_MII_DATA); } MII_CLR(XL_MII_CLK); MII_SET(XL_MII_CLK); } } /* * Read an PHY register through the MII. */ static int xl_mii_readreg(struct xl_softc *sc, struct xl_mii_frame *frame) { int i, ack; /*XL_LOCK_ASSERT(sc);*/ /* Set up frame for RX. */ frame->mii_stdelim = XL_MII_STARTDELIM; frame->mii_opcode = XL_MII_READOP; frame->mii_turnaround = 0; frame->mii_data = 0; /* Select register window 4. */ XL_SEL_WIN(4); CSR_WRITE_2(sc, XL_W4_PHY_MGMT, 0); /* Turn on data xmit. */ MII_SET(XL_MII_DIR); xl_mii_sync(sc); /* Send command/address info. */ xl_mii_send(sc, frame->mii_stdelim, 2); xl_mii_send(sc, frame->mii_opcode, 2); xl_mii_send(sc, frame->mii_phyaddr, 5); xl_mii_send(sc, frame->mii_regaddr, 5); /* Idle bit */ MII_CLR((XL_MII_CLK|XL_MII_DATA)); MII_SET(XL_MII_CLK); /* Turn off xmit. */ MII_CLR(XL_MII_DIR); /* Check for ack */ MII_CLR(XL_MII_CLK); ack = CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA; MII_SET(XL_MII_CLK); /* * Now try reading data bits. If the ack failed, we still * need to clock through 16 cycles to keep the PHY(s) in sync. */ if (ack) { for (i = 0; i < 16; i++) { MII_CLR(XL_MII_CLK); MII_SET(XL_MII_CLK); } goto fail; } for (i = 0x8000; i; i >>= 1) { MII_CLR(XL_MII_CLK); if (!ack) { if (CSR_READ_2(sc, XL_W4_PHY_MGMT) & XL_MII_DATA) frame->mii_data |= i; } MII_SET(XL_MII_CLK); } fail: MII_CLR(XL_MII_CLK); MII_SET(XL_MII_CLK); return (ack ? 1 : 0); } /* * Write to a PHY register through the MII. */ static int xl_mii_writereg(struct xl_softc *sc, struct xl_mii_frame *frame) { /*XL_LOCK_ASSERT(sc);*/ /* Set up frame for TX. */ frame->mii_stdelim = XL_MII_STARTDELIM; frame->mii_opcode = XL_MII_WRITEOP; frame->mii_turnaround = XL_MII_TURNAROUND; /* Select the window 4. */ XL_SEL_WIN(4); /* Turn on data output. */ MII_SET(XL_MII_DIR); xl_mii_sync(sc); xl_mii_send(sc, frame->mii_stdelim, 2); xl_mii_send(sc, frame->mii_opcode, 2); xl_mii_send(sc, frame->mii_phyaddr, 5); xl_mii_send(sc, frame->mii_regaddr, 5); xl_mii_send(sc, frame->mii_turnaround, 2); xl_mii_send(sc, frame->mii_data, 16); /* Idle bit. */ MII_SET(XL_MII_CLK); MII_CLR(XL_MII_CLK); /* Turn off xmit. */ MII_CLR(XL_MII_DIR); return (0); } static int xl_miibus_readreg(device_t dev, int phy, int reg) { struct xl_softc *sc; struct xl_mii_frame frame; sc = device_get_softc(dev); /* * Pretend that PHYs are only available at MII address 24. * This is to guard against problems with certain 3Com ASIC * revisions that incorrectly map the internal transceiver * control registers at all MII addresses. This can cause * the miibus code to attach the same PHY several times over. */ if ((sc->xl_flags & XL_FLAG_PHYOK) == 0 && phy != 24) return (0); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; xl_mii_readreg(sc, &frame); return (frame.mii_data); } static int xl_miibus_writereg(device_t dev, int phy, int reg, int data) { struct xl_softc *sc; struct xl_mii_frame frame; sc = device_get_softc(dev); if ((sc->xl_flags & XL_FLAG_PHYOK) == 0 && phy != 24) return (0); bzero((char *)&frame, sizeof(frame)); frame.mii_phyaddr = phy; frame.mii_regaddr = reg; frame.mii_data = data; xl_mii_writereg(sc, &frame); return (0); } static void xl_miibus_statchg(device_t dev) { struct xl_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->xl_miibus); /*XL_LOCK_ASSERT(sc);*/ xl_setcfg(sc); /* Set ASIC's duplex mode to match the PHY. */ XL_SEL_WIN(3); if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX); else CSR_WRITE_1(sc, XL_W3_MAC_CTRL, (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX)); } /* * Special support for the 3c905B-COMBO. This card has 10/100 support * plus BNC and AUI ports. This means we will have both an miibus attached * plus some non-MII media settings. In order to allow this, we have to * add the extra media to the miibus's ifmedia struct, but we can't do * that during xl_attach() because the miibus hasn't been attached yet. * So instead, we wait until the miibus probe/attach is done, at which * point we will get a callback telling is that it's safe to add our * extra media. */ static void xl_miibus_mediainit(device_t dev) { struct xl_softc *sc; struct mii_data *mii; struct ifmedia *ifm; sc = device_get_softc(dev); mii = device_get_softc(sc->xl_miibus); ifm = &mii->mii_media; /*XL_LOCK_ASSERT(sc);*/ if (sc->xl_media & (XL_MEDIAOPT_AUI | XL_MEDIAOPT_10FL)) { /* * Check for a 10baseFL board in disguise. */ if (sc->xl_type == XL_TYPE_905B && sc->xl_media == XL_MEDIAOPT_10FL) { if (bootverbose) if_printf(sc->xl_ifp, "found 10baseFL\n"); ifmedia_add(ifm, IFM_ETHER | IFM_10_FL, 0, NULL); ifmedia_add(ifm, IFM_ETHER | IFM_10_FL|IFM_HDX, 0, NULL); if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) ifmedia_add(ifm, IFM_ETHER | IFM_10_FL | IFM_FDX, 0, NULL); } else { if (bootverbose) if_printf(sc->xl_ifp, "found AUI\n"); ifmedia_add(ifm, IFM_ETHER | IFM_10_5, 0, NULL); } } if (sc->xl_media & XL_MEDIAOPT_BNC) { if (bootverbose) if_printf(sc->xl_ifp, "found BNC\n"); ifmedia_add(ifm, IFM_ETHER | IFM_10_2, 0, NULL); } } /* * The EEPROM is slow: give it time to come ready after issuing * it a command. */ static int xl_eeprom_wait(struct xl_softc *sc) { int i; for (i = 0; i < 100; i++) { if (CSR_READ_2(sc, XL_W0_EE_CMD) & XL_EE_BUSY) DELAY(162); else break; } if (i == 100) { if_printf(sc->xl_ifp, "eeprom failed to come ready\n"); return (1); } return (0); } /* * Read a sequence of words from the EEPROM. Note that ethernet address * data is stored in the EEPROM in network byte order. */ static int xl_read_eeprom(struct xl_softc *sc, caddr_t dest, int off, int cnt, int swap) { int err = 0, i; u_int16_t word = 0, *ptr; XL_LOCK_ASSERT(sc); #define EEPROM_5BIT_OFFSET(A) ((((A) << 2) & 0x7F00) | ((A) & 0x003F)) #define EEPROM_8BIT_OFFSET(A) ((A) & 0x003F) /* * XXX: WARNING! DANGER! * It's easy to accidentally overwrite the rom content! * Note: the 3c575 uses 8bit EEPROM offsets. */ XL_SEL_WIN(0); if (xl_eeprom_wait(sc)) return (1); if (sc->xl_flags & XL_FLAG_EEPROM_OFFSET_30) off += 0x30; for (i = 0; i < cnt; i++) { if (sc->xl_flags & XL_FLAG_8BITROM) CSR_WRITE_2(sc, XL_W0_EE_CMD, XL_EE_8BIT_READ | EEPROM_8BIT_OFFSET(off + i)); else CSR_WRITE_2(sc, XL_W0_EE_CMD, XL_EE_READ | EEPROM_5BIT_OFFSET(off + i)); err = xl_eeprom_wait(sc); if (err) break; word = CSR_READ_2(sc, XL_W0_EE_DATA); ptr = (u_int16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } return (err ? 1 : 0); } /* * NICs older than the 3c905B have only one multicast option, which * is to enable reception of all multicast frames. */ static void xl_setmulti(struct xl_softc *sc) { struct ifnet *ifp = sc->xl_ifp; struct ifmultiaddr *ifma; u_int8_t rxfilt; int mcnt = 0; XL_LOCK_ASSERT(sc); XL_SEL_WIN(5); rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); if (ifp->if_flags & IFF_ALLMULTI) { rxfilt |= XL_RXFILTER_ALLMULTI; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); return; } + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) mcnt++; + IF_ADDR_UNLOCK(ifp); if (mcnt) rxfilt |= XL_RXFILTER_ALLMULTI; else rxfilt &= ~XL_RXFILTER_ALLMULTI; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); } /* * 3c905B adapters have a hash filter that we can program. */ static void xl_setmulti_hash(struct xl_softc *sc) { struct ifnet *ifp = sc->xl_ifp; int h = 0, i; struct ifmultiaddr *ifma; u_int8_t rxfilt; int mcnt = 0; XL_LOCK_ASSERT(sc); XL_SEL_WIN(5); rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); if (ifp->if_flags & IFF_ALLMULTI) { rxfilt |= XL_RXFILTER_ALLMULTI; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); return; } else rxfilt &= ~XL_RXFILTER_ALLMULTI; /* first, zot all the existing hash bits */ for (i = 0; i < XL_HASHFILT_SIZE; i++) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_HASH|i); /* now program new ones */ + IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; /* * Note: the 3c905B currently only supports a 64-bit hash * table, which means we really only need 6 bits, but the * manual indicates that future chip revisions will have a * 256-bit hash table, hence the routine is set up to * calculate 8 bits of position info in case we need it some * day. * Note II, The Sequel: _CURRENT_ versions of the 3c905B have * a 256 bit hash table. This means we have to use all 8 bits * regardless. On older cards, the upper 2 bits will be * ignored. Grrrr.... */ h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) & 0xFF; CSR_WRITE_2(sc, XL_COMMAND, h | XL_CMD_RX_SET_HASH | XL_HASH_SET); mcnt++; } + IF_ADDR_UNLOCK(ifp); if (mcnt) rxfilt |= XL_RXFILTER_MULTIHASH; else rxfilt &= ~XL_RXFILTER_MULTIHASH; CSR_WRITE_2(sc, XL_COMMAND, rxfilt | XL_CMD_RX_SET_FILT); } #ifdef notdef static void xl_testpacket(struct xl_softc *sc) { struct mbuf *m; struct ifnet *ifp = sc->xl_ifp; MGETHDR(m, M_DONTWAIT, MT_DATA); if (m == NULL) return; bcopy(&IFP2ENADDR(sc->xl_ifp), mtod(m, struct ether_header *)->ether_dhost, ETHER_ADDR_LEN); bcopy(&IFP2ENADDR(sc->xl_ifp), mtod(m, struct ether_header *)->ether_shost, ETHER_ADDR_LEN); mtod(m, struct ether_header *)->ether_type = htons(3); mtod(m, unsigned char *)[14] = 0; mtod(m, unsigned char *)[15] = 0; mtod(m, unsigned char *)[16] = 0xE3; m->m_len = m->m_pkthdr.len = sizeof(struct ether_header) + 3; IFQ_ENQUEUE(&ifp->if_snd, m); xl_start(ifp); } #endif static void xl_setcfg(struct xl_softc *sc) { u_int32_t icfg; /*XL_LOCK_ASSERT(sc);*/ XL_SEL_WIN(3); icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG); icfg &= ~XL_ICFG_CONNECTOR_MASK; if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BT4) icfg |= (XL_XCVR_MII << XL_ICFG_CONNECTOR_BITS); if (sc->xl_media & XL_MEDIAOPT_BTX) icfg |= (XL_XCVR_AUTO << XL_ICFG_CONNECTOR_BITS); CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); } static void xl_setmode(struct xl_softc *sc, int media) { u_int32_t icfg; u_int16_t mediastat; char *pmsg = "", *dmsg = ""; /*XL_LOCK_ASSERT(sc);*/ XL_SEL_WIN(4); mediastat = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); XL_SEL_WIN(3); icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG); if (sc->xl_media & XL_MEDIAOPT_BT) { if (IFM_SUBTYPE(media) == IFM_10_T) { pmsg = "10baseT transceiver"; sc->xl_xcvr = XL_XCVR_10BT; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_10BT << XL_ICFG_CONNECTOR_BITS); mediastat |= XL_MEDIASTAT_LINKBEAT | XL_MEDIASTAT_JABGUARD; mediastat &= ~XL_MEDIASTAT_SQEENB; } } if (sc->xl_media & XL_MEDIAOPT_BFX) { if (IFM_SUBTYPE(media) == IFM_100_FX) { pmsg = "100baseFX port"; sc->xl_xcvr = XL_XCVR_100BFX; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_100BFX << XL_ICFG_CONNECTOR_BITS); mediastat |= XL_MEDIASTAT_LINKBEAT; mediastat &= ~XL_MEDIASTAT_SQEENB; } } if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) { if (IFM_SUBTYPE(media) == IFM_10_5) { pmsg = "AUI port"; sc->xl_xcvr = XL_XCVR_AUI; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS); mediastat &= ~(XL_MEDIASTAT_LINKBEAT | XL_MEDIASTAT_JABGUARD); mediastat |= ~XL_MEDIASTAT_SQEENB; } if (IFM_SUBTYPE(media) == IFM_10_FL) { pmsg = "10baseFL transceiver"; sc->xl_xcvr = XL_XCVR_AUI; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_AUI << XL_ICFG_CONNECTOR_BITS); mediastat &= ~(XL_MEDIASTAT_LINKBEAT | XL_MEDIASTAT_JABGUARD); mediastat |= ~XL_MEDIASTAT_SQEENB; } } if (sc->xl_media & XL_MEDIAOPT_BNC) { if (IFM_SUBTYPE(media) == IFM_10_2) { pmsg = "AUI port"; sc->xl_xcvr = XL_XCVR_COAX; icfg &= ~XL_ICFG_CONNECTOR_MASK; icfg |= (XL_XCVR_COAX << XL_ICFG_CONNECTOR_BITS); mediastat &= ~(XL_MEDIASTAT_LINKBEAT | XL_MEDIASTAT_JABGUARD | XL_MEDIASTAT_SQEENB); } } if ((media & IFM_GMASK) == IFM_FDX || IFM_SUBTYPE(media) == IFM_100_FX) { dmsg = "full"; XL_SEL_WIN(3); CSR_WRITE_1(sc, XL_W3_MAC_CTRL, XL_MACCTRL_DUPLEX); } else { dmsg = "half"; XL_SEL_WIN(3); CSR_WRITE_1(sc, XL_W3_MAC_CTRL, (CSR_READ_1(sc, XL_W3_MAC_CTRL) & ~XL_MACCTRL_DUPLEX)); } if (IFM_SUBTYPE(media) == IFM_10_2) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START); else CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); CSR_WRITE_4(sc, XL_W3_INTERNAL_CFG, icfg); XL_SEL_WIN(4); CSR_WRITE_2(sc, XL_W4_MEDIA_STATUS, mediastat); DELAY(800); XL_SEL_WIN(7); if_printf(sc->xl_ifp, "selecting %s, %s duplex\n", pmsg, dmsg); } static void xl_reset(struct xl_softc *sc) { register int i; XL_LOCK_ASSERT(sc); XL_SEL_WIN(0); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RESET | ((sc->xl_flags & XL_FLAG_WEIRDRESET) ? XL_RESETOPT_DISADVFD:0)); /* * If we're using memory mapped register mode, pause briefly * after issuing the reset command before trying to access any * other registers. With my 3c575C cardbus card, failing to do * this results in the system locking up while trying to poll * the command busy bit in the status register. */ if (sc->xl_flags & XL_FLAG_USE_MMIO) DELAY(100000); for (i = 0; i < XL_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_2(sc, XL_STATUS) & XL_STAT_CMDBUSY)) break; } if (i == XL_TIMEOUT) if_printf(sc->xl_ifp, "reset didn't complete\n"); /* Reset TX and RX. */ /* Note: the RX reset takes an absurd amount of time * on newer versions of the Tornado chips such as those * on the 3c905CX and newer 3c908C cards. We wait an * extra amount of time so that xl_wait() doesn't complain * and annoy the users. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); DELAY(100000); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); if (sc->xl_flags & XL_FLAG_INVERT_LED_PWR || sc->xl_flags & XL_FLAG_INVERT_MII_PWR) { XL_SEL_WIN(2); CSR_WRITE_2(sc, XL_W2_RESET_OPTIONS, CSR_READ_2(sc, XL_W2_RESET_OPTIONS) | ((sc->xl_flags & XL_FLAG_INVERT_LED_PWR) ? XL_RESETOPT_INVERT_LED : 0) | ((sc->xl_flags & XL_FLAG_INVERT_MII_PWR) ? XL_RESETOPT_INVERT_MII : 0)); } /* Wait a little while for the chip to get its brains in order. */ DELAY(100000); } /* * Probe for a 3Com Etherlink XL chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int xl_probe(device_t dev) { struct xl_type *t; t = xl_devs; while (t->xl_name != NULL) { if ((pci_get_vendor(dev) == t->xl_vid) && (pci_get_device(dev) == t->xl_did)) { device_set_desc(dev, t->xl_name); return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } /* * This routine is a kludge to work around possible hardware faults * or manufacturing defects that can cause the media options register * (or reset options register, as it's called for the first generation * 3c90x adapters) to return an incorrect result. I have encountered * one Dell Latitude laptop docking station with an integrated 3c905-TX * which doesn't have any of the 'mediaopt' bits set. This screws up * the attach routine pretty badly because it doesn't know what media * to look for. If we find ourselves in this predicament, this routine * will try to guess the media options values and warn the user of a * possible manufacturing defect with his adapter/system/whatever. */ static void xl_mediacheck(struct xl_softc *sc) { XL_LOCK_ASSERT(sc); /* * If some of the media options bits are set, assume they are * correct. If not, try to figure it out down below. * XXX I should check for 10baseFL, but I don't have an adapter * to test with. */ if (sc->xl_media & (XL_MEDIAOPT_MASK & ~XL_MEDIAOPT_VCO)) { /* * Check the XCVR value. If it's not in the normal range * of values, we need to fake it up here. */ if (sc->xl_xcvr <= XL_XCVR_AUTO) return; else { if_printf(sc->xl_ifp, "bogus xcvr value in EEPROM (%x)\n", sc->xl_xcvr); if_printf(sc->xl_ifp, "choosing new default based on card type\n"); } } else { if (sc->xl_type == XL_TYPE_905B && sc->xl_media & XL_MEDIAOPT_10FL) return; if_printf(sc->xl_ifp, "WARNING: no media options bits set in the media options register!!\n"); if_printf(sc->xl_ifp, "this could be a manufacturing defect in your adapter or system\n"); if_printf(sc->xl_ifp, "attempting to guess media type; you should probably consult your vendor\n"); } xl_choose_xcvr(sc, 1); } static void xl_choose_xcvr(struct xl_softc *sc, int verbose) { u_int16_t devid; /* * Read the device ID from the EEPROM. * This is what's loaded into the PCI device ID register, so it has * to be correct otherwise we wouldn't have gotten this far. */ xl_read_eeprom(sc, (caddr_t)&devid, XL_EE_PRODID, 1, 0); switch (devid) { case TC_DEVICEID_BOOMERANG_10BT: /* 3c900-TPO */ case TC_DEVICEID_KRAKATOA_10BT: /* 3c900B-TPO */ sc->xl_media = XL_MEDIAOPT_BT; sc->xl_xcvr = XL_XCVR_10BT; if (verbose) if_printf(sc->xl_ifp, "guessing 10BaseT transceiver\n"); break; case TC_DEVICEID_BOOMERANG_10BT_COMBO: /* 3c900-COMBO */ case TC_DEVICEID_KRAKATOA_10BT_COMBO: /* 3c900B-COMBO */ sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI; sc->xl_xcvr = XL_XCVR_10BT; if (verbose) if_printf(sc->xl_ifp, "guessing COMBO (AUI/BNC/TP)\n"); break; case TC_DEVICEID_KRAKATOA_10BT_TPC: /* 3c900B-TPC */ sc->xl_media = XL_MEDIAOPT_BT|XL_MEDIAOPT_BNC; sc->xl_xcvr = XL_XCVR_10BT; if (verbose) if_printf(sc->xl_ifp, "guessing TPC (BNC/TP)\n"); break; case TC_DEVICEID_CYCLONE_10FL: /* 3c900B-FL */ sc->xl_media = XL_MEDIAOPT_10FL; sc->xl_xcvr = XL_XCVR_AUI; if (verbose) if_printf(sc->xl_ifp, "guessing 10baseFL\n"); break; case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */ case TC_DEVICEID_HURRICANE_555: /* 3c555 */ case TC_DEVICEID_HURRICANE_556: /* 3c556 */ case TC_DEVICEID_HURRICANE_556B: /* 3c556B */ case TC_DEVICEID_HURRICANE_575A: /* 3c575TX */ case TC_DEVICEID_HURRICANE_575B: /* 3c575B */ case TC_DEVICEID_HURRICANE_575C: /* 3c575C */ case TC_DEVICEID_HURRICANE_656: /* 3c656 */ case TC_DEVICEID_HURRICANE_656B: /* 3c656B */ case TC_DEVICEID_TORNADO_656C: /* 3c656C */ case TC_DEVICEID_TORNADO_10_100BT_920B: /* 3c920B-EMB */ case TC_DEVICEID_TORNADO_10_100BT_920B_WNM: /* 3c920B-EMB-WNM */ sc->xl_media = XL_MEDIAOPT_MII; sc->xl_xcvr = XL_XCVR_MII; if (verbose) if_printf(sc->xl_ifp, "guessing MII\n"); break; case TC_DEVICEID_BOOMERANG_100BT4: /* 3c905-T4 */ case TC_DEVICEID_CYCLONE_10_100BT4: /* 3c905B-T4 */ sc->xl_media = XL_MEDIAOPT_BT4; sc->xl_xcvr = XL_XCVR_MII; if (verbose) if_printf(sc->xl_ifp, "guessing 100baseT4/MII\n"); break; case TC_DEVICEID_HURRICANE_10_100BT: /* 3c905B-TX */ case TC_DEVICEID_HURRICANE_10_100BT_SERV:/*3c980-TX */ case TC_DEVICEID_TORNADO_10_100BT_SERV: /* 3c980C-TX */ case TC_DEVICEID_HURRICANE_SOHO100TX: /* 3cSOHO100-TX */ case TC_DEVICEID_TORNADO_10_100BT: /* 3c905C-TX */ case TC_DEVICEID_TORNADO_HOMECONNECT: /* 3c450-TX */ sc->xl_media = XL_MEDIAOPT_BTX; sc->xl_xcvr = XL_XCVR_AUTO; if (verbose) if_printf(sc->xl_ifp, "guessing 10/100 internal\n"); break; case TC_DEVICEID_CYCLONE_10_100_COMBO: /* 3c905B-COMBO */ sc->xl_media = XL_MEDIAOPT_BTX|XL_MEDIAOPT_BNC|XL_MEDIAOPT_AUI; sc->xl_xcvr = XL_XCVR_AUTO; if (verbose) if_printf(sc->xl_ifp, "guessing 10/100 plus BNC/AUI\n"); break; default: if_printf(sc->xl_ifp, "unknown device ID: %x -- defaulting to 10baseT\n", devid); sc->xl_media = XL_MEDIAOPT_BT; break; } } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int xl_attach(device_t dev) { u_char eaddr[ETHER_ADDR_LEN]; u_int16_t xcvr[2]; struct xl_softc *sc; struct ifnet *ifp; int media; int unit, error = 0, rid, res; uint16_t did; sc = device_get_softc(dev); unit = device_get_unit(dev); mtx_init(&sc->xl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); ifmedia_init(&sc->ifmedia, 0, xl_ifmedia_upd, xl_ifmedia_sts); did = pci_get_device(dev); sc->xl_flags = 0; if (did == TC_DEVICEID_HURRICANE_555) sc->xl_flags |= XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_PHYOK; if (did == TC_DEVICEID_HURRICANE_556 || did == TC_DEVICEID_HURRICANE_556B) sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK | XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_WEIRDRESET | XL_FLAG_INVERT_LED_PWR | XL_FLAG_INVERT_MII_PWR; if (did == TC_DEVICEID_HURRICANE_555 || did == TC_DEVICEID_HURRICANE_556) sc->xl_flags |= XL_FLAG_8BITROM; if (did == TC_DEVICEID_HURRICANE_556B) sc->xl_flags |= XL_FLAG_NO_XCVR_PWR; if (did == TC_DEVICEID_HURRICANE_575A || did == TC_DEVICEID_HURRICANE_575B || did == TC_DEVICEID_HURRICANE_575C || did == TC_DEVICEID_HURRICANE_656B || did == TC_DEVICEID_TORNADO_656C) sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK | XL_FLAG_EEPROM_OFFSET_30 | XL_FLAG_8BITROM; if (did == TC_DEVICEID_HURRICANE_656) sc->xl_flags |= XL_FLAG_FUNCREG | XL_FLAG_PHYOK; if (did == TC_DEVICEID_HURRICANE_575B) sc->xl_flags |= XL_FLAG_INVERT_LED_PWR; if (did == TC_DEVICEID_HURRICANE_575C) sc->xl_flags |= XL_FLAG_INVERT_MII_PWR; if (did == TC_DEVICEID_TORNADO_656C) sc->xl_flags |= XL_FLAG_INVERT_MII_PWR; if (did == TC_DEVICEID_HURRICANE_656 || did == TC_DEVICEID_HURRICANE_656B) sc->xl_flags |= XL_FLAG_INVERT_MII_PWR | XL_FLAG_INVERT_LED_PWR; if (did == TC_DEVICEID_TORNADO_10_100BT_920B || did == TC_DEVICEID_TORNADO_10_100BT_920B_WNM) sc->xl_flags |= XL_FLAG_PHYOK; switch (did) { case TC_DEVICEID_BOOMERANG_10_100BT: /* 3c905-TX */ case TC_DEVICEID_HURRICANE_575A: case TC_DEVICEID_HURRICANE_575B: case TC_DEVICEID_HURRICANE_575C: sc->xl_flags |= XL_FLAG_NO_MMIO; break; default: break; } /* * Map control/status registers. */ pci_enable_busmaster(dev); if ((sc->xl_flags & XL_FLAG_NO_MMIO) == 0) { rid = XL_PCI_LOMEM; res = SYS_RES_MEMORY; sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE); } if (sc->xl_res != NULL) { sc->xl_flags |= XL_FLAG_USE_MMIO; if (bootverbose) device_printf(dev, "using memory mapped I/O\n"); } else { rid = XL_PCI_LOIO; res = SYS_RES_IOPORT; sc->xl_res = bus_alloc_resource_any(dev, res, &rid, RF_ACTIVE); if (sc->xl_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } if (bootverbose) device_printf(dev, "using port I/O\n"); } sc->xl_btag = rman_get_bustag(sc->xl_res); sc->xl_bhandle = rman_get_bushandle(sc->xl_res); if (sc->xl_flags & XL_FLAG_FUNCREG) { rid = XL_PCI_FUNCMEM; sc->xl_fres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->xl_fres == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } sc->xl_ftag = rman_get_bustag(sc->xl_fres); sc->xl_fhandle = rman_get_bushandle(sc->xl_fres); } /* Allocate interrupt */ rid = 0; sc->xl_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->xl_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } /* Initialize interface name. */ ifp = sc->xl_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); XL_LOCK(sc); /* Reset the adapter. */ xl_reset(sc); /* * Get station address from the EEPROM. */ if (xl_read_eeprom(sc, (caddr_t)&eaddr, XL_EE_OEM_ADR0, 3, 1)) { device_printf(dev, "failed to read station address\n"); error = ENXIO; XL_UNLOCK(sc); goto fail; } XL_UNLOCK(sc); sc->xl_unit = unit; callout_handle_init(&sc->xl_stat_ch); TASK_INIT(&sc->xl_task, 0, xl_rxeof_task, sc); /* * Now allocate a tag for the DMA descriptor lists and a chunk * of DMA-able memory based on the tag. Also obtain the DMA * addresses of the RX and TX ring, which we'll need later. * All of our lists are allocated as a contiguous block * of memory. */ error = bus_dma_tag_create(NULL, 8, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, XL_RX_LIST_SZ, 1, XL_RX_LIST_SZ, 0, NULL, NULL, &sc->xl_ldata.xl_rx_tag); if (error) { device_printf(dev, "failed to allocate rx dma tag\n"); goto fail; } error = bus_dmamem_alloc(sc->xl_ldata.xl_rx_tag, (void **)&sc->xl_ldata.xl_rx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->xl_ldata.xl_rx_dmamap); if (error) { device_printf(dev, "no memory for rx list buffers!\n"); bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag); sc->xl_ldata.xl_rx_tag = NULL; goto fail; } error = bus_dmamap_load(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, sc->xl_ldata.xl_rx_list, XL_RX_LIST_SZ, xl_dma_map_addr, &sc->xl_ldata.xl_rx_dmaaddr, BUS_DMA_NOWAIT); if (error) { device_printf(dev, "cannot get dma address of the rx ring!\n"); bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list, sc->xl_ldata.xl_rx_dmamap); bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag); sc->xl_ldata.xl_rx_tag = NULL; goto fail; } error = bus_dma_tag_create(NULL, 8, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, XL_TX_LIST_SZ, 1, XL_TX_LIST_SZ, 0, NULL, NULL, &sc->xl_ldata.xl_tx_tag); if (error) { device_printf(dev, "failed to allocate tx dma tag\n"); goto fail; } error = bus_dmamem_alloc(sc->xl_ldata.xl_tx_tag, (void **)&sc->xl_ldata.xl_tx_list, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->xl_ldata.xl_tx_dmamap); if (error) { device_printf(dev, "no memory for list buffers!\n"); bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag); sc->xl_ldata.xl_tx_tag = NULL; goto fail; } error = bus_dmamap_load(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap, sc->xl_ldata.xl_tx_list, XL_TX_LIST_SZ, xl_dma_map_addr, &sc->xl_ldata.xl_tx_dmaaddr, BUS_DMA_NOWAIT); if (error) { device_printf(dev, "cannot get dma address of the tx ring!\n"); bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list, sc->xl_ldata.xl_tx_dmamap); bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag); sc->xl_ldata.xl_tx_tag = NULL; goto fail; } /* * Allocate a DMA tag for the mapping of mbufs. */ error = bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * XL_MAXFRAGS, XL_MAXFRAGS, MCLBYTES, 0, NULL, NULL, &sc->xl_mtag); if (error) { device_printf(dev, "failed to allocate mbuf dma tag\n"); goto fail; } /* We need a spare DMA map for the RX ring. */ error = bus_dmamap_create(sc->xl_mtag, 0, &sc->xl_tmpmap); if (error) goto fail; XL_LOCK(sc); /* * Figure out the card type. 3c905B adapters have the * 'supportsNoTxLength' bit set in the capabilities * word in the EEPROM. * Note: my 3c575C cardbus card lies. It returns a value * of 0x1578 for its capabilities word, which is somewhat * nonsensical. Another way to distinguish a 3c90x chip * from a 3c90xB/C chip is to check for the 'supportsLargePackets' * bit. This will only be set for 3c90x boomerage chips. */ xl_read_eeprom(sc, (caddr_t)&sc->xl_caps, XL_EE_CAPS, 1, 0); if (sc->xl_caps & XL_CAPS_NO_TXLENGTH || !(sc->xl_caps & XL_CAPS_LARGE_PKTS)) sc->xl_type = XL_TYPE_905B; else sc->xl_type = XL_TYPE_90X; ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = xl_ioctl; ifp->if_capabilities = IFCAP_VLAN_MTU; if (sc->xl_type == XL_TYPE_905B) { ifp->if_hwassist = XL905B_CSUM_FEATURES; #ifdef XL905B_TXCSUM_BROKEN ifp->if_capabilities |= IFCAP_RXCSUM; #else ifp->if_capabilities |= IFCAP_HWCSUM; #endif } #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* DEVICE_POLLING */ ifp->if_start = xl_start; ifp->if_watchdog = xl_watchdog; ifp->if_init = xl_init; ifp->if_baudrate = 10000000; IFQ_SET_MAXLEN(&ifp->if_snd, XL_TX_LIST_CNT - 1); ifp->if_snd.ifq_drv_maxlen = XL_TX_LIST_CNT - 1; IFQ_SET_READY(&ifp->if_snd); ifp->if_capenable = ifp->if_capabilities; /* * Now we have to see what sort of media we have. * This includes probing for an MII interace and a * possible PHY. */ XL_SEL_WIN(3); sc->xl_media = CSR_READ_2(sc, XL_W3_MEDIA_OPT); if (bootverbose) device_printf(dev, "media options word: %x\n", sc->xl_media); xl_read_eeprom(sc, (char *)&xcvr, XL_EE_ICFG_0, 2, 0); sc->xl_xcvr = xcvr[0] | xcvr[1] << 16; sc->xl_xcvr &= XL_ICFG_CONNECTOR_MASK; sc->xl_xcvr >>= XL_ICFG_CONNECTOR_BITS; xl_mediacheck(sc); /* XXX Downcalls to ifmedia, miibus about to happen. */ XL_UNLOCK(sc); if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX || sc->xl_media & XL_MEDIAOPT_BT4) { if (bootverbose) device_printf(dev, "found MII/AUTO\n"); xl_setcfg(sc); if (mii_phy_probe(dev, &sc->xl_miibus, xl_ifmedia_upd, xl_ifmedia_sts)) { device_printf(dev, "no PHY found!\n"); error = ENXIO; goto fail; } goto done; } /* * Sanity check. If the user has selected "auto" and this isn't * a 10/100 card of some kind, we need to force the transceiver * type to something sane. */ if (sc->xl_xcvr == XL_XCVR_AUTO) { /* XXX Direct hardware access needs lock coverage. */ XL_LOCK(sc); xl_choose_xcvr(sc, bootverbose); XL_UNLOCK(sc); } /* * Do ifmedia setup. */ if (sc->xl_media & XL_MEDIAOPT_BT) { if (bootverbose) device_printf(dev, "found 10baseT\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); } if (sc->xl_media & (XL_MEDIAOPT_AUI|XL_MEDIAOPT_10FL)) { /* * Check for a 10baseFL board in disguise. */ if (sc->xl_type == XL_TYPE_905B && sc->xl_media == XL_MEDIAOPT_10FL) { if (bootverbose) device_printf(dev, "found 10baseFL\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_HDX, 0, NULL); if (sc->xl_caps & XL_CAPS_FULL_DUPLEX) ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_FL|IFM_FDX, 0, NULL); } else { if (bootverbose) device_printf(dev, "found AUI\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_5, 0, NULL); } } if (sc->xl_media & XL_MEDIAOPT_BNC) { if (bootverbose) device_printf(dev, "found BNC\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_2, 0, NULL); } if (sc->xl_media & XL_MEDIAOPT_BFX) { if (bootverbose) device_printf(dev, "found 100baseFX\n"); ifp->if_baudrate = 100000000; ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_FX, 0, NULL); } /* XXX: Unlocked, leaf will take lock. */ media = IFM_ETHER|IFM_100_TX|IFM_FDX; xl_choose_media(sc, &media); if (sc->xl_miibus == NULL) ifmedia_set(&sc->ifmedia, media); done: /* XXX: Unlocked hardware access, narrow race. */ if (sc->xl_flags & XL_FLAG_NO_XCVR_PWR) { XL_SEL_WIN(0); CSR_WRITE_2(sc, XL_W0_MFG_ID, XL_NO_XCVR_PWR_MAGICBITS); } /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); error = bus_setup_intr(dev, sc->xl_irq, INTR_TYPE_NET | INTR_MPSAFE, xl_intr, sc, &sc->xl_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); ether_ifdetach(ifp); if_free(ifp); goto fail; } fail: if (error) xl_detach(dev); return (error); } /* * Choose a default media. * XXX This is a leaf function only called by xl_attach() and * acquires/releases the non-recursible driver mutex. */ static void xl_choose_media(struct xl_softc *sc, int *media) { XL_LOCK(sc); switch (sc->xl_xcvr) { case XL_XCVR_10BT: *media = IFM_ETHER|IFM_10_T; xl_setmode(sc, *media); break; case XL_XCVR_AUI: if (sc->xl_type == XL_TYPE_905B && sc->xl_media == XL_MEDIAOPT_10FL) { *media = IFM_ETHER|IFM_10_FL; xl_setmode(sc, *media); } else { *media = IFM_ETHER|IFM_10_5; xl_setmode(sc, *media); } break; case XL_XCVR_COAX: *media = IFM_ETHER|IFM_10_2; xl_setmode(sc, *media); break; case XL_XCVR_AUTO: case XL_XCVR_100BTX: case XL_XCVR_MII: /* Chosen by miibus */ break; case XL_XCVR_100BFX: *media = IFM_ETHER|IFM_100_FX; break; default: if_printf(sc->xl_ifp, "unknown XCVR type: %d\n", sc->xl_xcvr); /* * This will probably be wrong, but it prevents * the ifmedia code from panicking. */ *media = IFM_ETHER|IFM_10_T; break; } XL_UNLOCK(sc); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int xl_detach(device_t dev) { struct xl_softc *sc; struct ifnet *ifp; int rid, res; sc = device_get_softc(dev); ifp = sc->xl_ifp; KASSERT(mtx_initialized(&sc->xl_mtx), ("xl mutex not initialized")); XL_LOCK(sc); if (sc->xl_flags & XL_FLAG_USE_MMIO) { rid = XL_PCI_LOMEM; res = SYS_RES_MEMORY; } else { rid = XL_PCI_LOIO; res = SYS_RES_IOPORT; } /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { xl_reset(sc); xl_stop(sc); ether_ifdetach(ifp); if_free(ifp); } if (sc->xl_miibus) device_delete_child(dev, sc->xl_miibus); bus_generic_detach(dev); ifmedia_removeall(&sc->ifmedia); if (sc->xl_intrhand) bus_teardown_intr(dev, sc->xl_irq, sc->xl_intrhand); if (sc->xl_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->xl_irq); if (sc->xl_fres != NULL) bus_release_resource(dev, SYS_RES_MEMORY, XL_PCI_FUNCMEM, sc->xl_fres); if (sc->xl_res) bus_release_resource(dev, res, rid, sc->xl_res); if (sc->xl_mtag) { bus_dmamap_destroy(sc->xl_mtag, sc->xl_tmpmap); bus_dma_tag_destroy(sc->xl_mtag); } if (sc->xl_ldata.xl_rx_tag) { bus_dmamap_unload(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap); bus_dmamem_free(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_list, sc->xl_ldata.xl_rx_dmamap); bus_dma_tag_destroy(sc->xl_ldata.xl_rx_tag); } if (sc->xl_ldata.xl_tx_tag) { bus_dmamap_unload(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap); bus_dmamem_free(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_list, sc->xl_ldata.xl_tx_dmamap); bus_dma_tag_destroy(sc->xl_ldata.xl_tx_tag); } XL_UNLOCK(sc); mtx_destroy(&sc->xl_mtx); return (0); } /* * Initialize the transmit descriptors. */ static int xl_list_tx_init(struct xl_softc *sc) { struct xl_chain_data *cd; struct xl_list_data *ld; int error, i; XL_LOCK_ASSERT(sc); cd = &sc->xl_cdata; ld = &sc->xl_ldata; for (i = 0; i < XL_TX_LIST_CNT; i++) { cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i]; error = bus_dmamap_create(sc->xl_mtag, 0, &cd->xl_tx_chain[i].xl_map); if (error) return (error); cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr + i * sizeof(struct xl_list); if (i == (XL_TX_LIST_CNT - 1)) cd->xl_tx_chain[i].xl_next = NULL; else cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1]; } cd->xl_tx_free = &cd->xl_tx_chain[0]; cd->xl_tx_tail = cd->xl_tx_head = NULL; bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE); return (0); } /* * Initialize the transmit descriptors. */ static int xl_list_tx_init_90xB(struct xl_softc *sc) { struct xl_chain_data *cd; struct xl_list_data *ld; int error, i; XL_LOCK_ASSERT(sc); cd = &sc->xl_cdata; ld = &sc->xl_ldata; for (i = 0; i < XL_TX_LIST_CNT; i++) { cd->xl_tx_chain[i].xl_ptr = &ld->xl_tx_list[i]; error = bus_dmamap_create(sc->xl_mtag, 0, &cd->xl_tx_chain[i].xl_map); if (error) return (error); cd->xl_tx_chain[i].xl_phys = ld->xl_tx_dmaaddr + i * sizeof(struct xl_list); if (i == (XL_TX_LIST_CNT - 1)) cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[0]; else cd->xl_tx_chain[i].xl_next = &cd->xl_tx_chain[i + 1]; if (i == 0) cd->xl_tx_chain[i].xl_prev = &cd->xl_tx_chain[XL_TX_LIST_CNT - 1]; else cd->xl_tx_chain[i].xl_prev = &cd->xl_tx_chain[i - 1]; } bzero(ld->xl_tx_list, XL_TX_LIST_SZ); ld->xl_tx_list[0].xl_status = htole32(XL_TXSTAT_EMPTY); cd->xl_tx_prod = 1; cd->xl_tx_cons = 1; cd->xl_tx_cnt = 0; bus_dmamap_sync(ld->xl_tx_tag, ld->xl_tx_dmamap, BUS_DMASYNC_PREWRITE); return (0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int xl_list_rx_init(struct xl_softc *sc) { struct xl_chain_data *cd; struct xl_list_data *ld; int error, i, next; u_int32_t nextptr; XL_LOCK_ASSERT(sc); cd = &sc->xl_cdata; ld = &sc->xl_ldata; for (i = 0; i < XL_RX_LIST_CNT; i++) { cd->xl_rx_chain[i].xl_ptr = &ld->xl_rx_list[i]; error = bus_dmamap_create(sc->xl_mtag, 0, &cd->xl_rx_chain[i].xl_map); if (error) return (error); error = xl_newbuf(sc, &cd->xl_rx_chain[i]); if (error) return (error); if (i == (XL_RX_LIST_CNT - 1)) next = 0; else next = i + 1; nextptr = ld->xl_rx_dmaaddr + next * sizeof(struct xl_list_onefrag); cd->xl_rx_chain[i].xl_next = &cd->xl_rx_chain[next]; ld->xl_rx_list[i].xl_next = htole32(nextptr); } bus_dmamap_sync(ld->xl_rx_tag, ld->xl_rx_dmamap, BUS_DMASYNC_PREWRITE); cd->xl_rx_head = &cd->xl_rx_chain[0]; return (0); } /* * Initialize an RX descriptor and attach an MBUF cluster. * If we fail to do so, we need to leave the old mbuf and * the old DMA map untouched so that it can be reused. */ static int xl_newbuf(struct xl_softc *sc, struct xl_chain_onefrag *c) { struct mbuf *m_new = NULL; bus_dmamap_t map; int error; u_int32_t baddr; XL_LOCK_ASSERT(sc); m_new = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m_new == NULL) return (ENOBUFS); m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; /* Force longword alignment for packet payload. */ m_adj(m_new, ETHER_ALIGN); error = bus_dmamap_load_mbuf(sc->xl_mtag, sc->xl_tmpmap, m_new, xl_dma_map_rxbuf, &baddr, BUS_DMA_NOWAIT); if (error) { m_freem(m_new); if_printf(sc->xl_ifp, "can't map mbuf (error %d)\n", error); return (error); } bus_dmamap_unload(sc->xl_mtag, c->xl_map); map = c->xl_map; c->xl_map = sc->xl_tmpmap; sc->xl_tmpmap = map; c->xl_mbuf = m_new; c->xl_ptr->xl_frag.xl_len = htole32(m_new->m_len | XL_LAST_FRAG); c->xl_ptr->xl_status = 0; c->xl_ptr->xl_frag.xl_addr = htole32(baddr); bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREREAD); return (0); } static int xl_rx_resync(struct xl_softc *sc) { struct xl_chain_onefrag *pos; int i; XL_LOCK_ASSERT(sc); pos = sc->xl_cdata.xl_rx_head; for (i = 0; i < XL_RX_LIST_CNT; i++) { if (pos->xl_ptr->xl_status) break; pos = pos->xl_next; } if (i == XL_RX_LIST_CNT) return (0); sc->xl_cdata.xl_rx_head = pos; return (EAGAIN); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static void xl_rxeof(struct xl_softc *sc) { struct mbuf *m; struct ifnet *ifp = sc->xl_ifp; struct xl_chain_onefrag *cur_rx; int total_len = 0; u_int32_t rxstat; XL_LOCK_ASSERT(sc); again: bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_POSTREAD); while ((rxstat = le32toh(sc->xl_cdata.xl_rx_head->xl_ptr->xl_status))) { #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif /* DEVICE_POLLING */ cur_rx = sc->xl_cdata.xl_rx_head; sc->xl_cdata.xl_rx_head = cur_rx->xl_next; total_len = rxstat & XL_RXSTAT_LENMASK; /* * Since we have told the chip to allow large frames, * we need to trap giant frame errors in software. We allow * a little more than the normal frame size to account for * frames with VLAN tags. */ if (total_len > XL_MAX_FRAMELEN) rxstat |= (XL_RXSTAT_UP_ERROR|XL_RXSTAT_OVERSIZE); /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (rxstat & XL_RXSTAT_UP_ERROR) { ifp->if_ierrors++; cur_rx->xl_ptr->xl_status = 0; bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE); continue; } /* * If the error bit was not set, the upload complete * bit should be set which means we have a valid packet. * If not, something truly strange has happened. */ if (!(rxstat & XL_RXSTAT_UP_CMPLT)) { if_printf(ifp, "bad receive status -- packet dropped\n"); ifp->if_ierrors++; cur_rx->xl_ptr->xl_status = 0; bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE); continue; } /* No errors; receive the packet. */ bus_dmamap_sync(sc->xl_mtag, cur_rx->xl_map, BUS_DMASYNC_POSTREAD); m = cur_rx->xl_mbuf; /* * Try to conjure up a new mbuf cluster. If that * fails, it means we have an out of memory condition and * should leave the buffer in place and continue. This will * result in a lost packet, but there's little else we * can do in this situation. */ if (xl_newbuf(sc, cur_rx)) { ifp->if_ierrors++; cur_rx->xl_ptr->xl_status = 0; bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE); continue; } bus_dmamap_sync(sc->xl_ldata.xl_rx_tag, sc->xl_ldata.xl_rx_dmamap, BUS_DMASYNC_PREWRITE); ifp->if_ipackets++; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; if (ifp->if_capenable & IFCAP_RXCSUM) { /* Do IP checksum checking. */ if (rxstat & XL_RXSTAT_IPCKOK) m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (!(rxstat & XL_RXSTAT_IPCKERR)) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if ((rxstat & XL_RXSTAT_TCPCOK && !(rxstat & XL_RXSTAT_TCPCKERR)) || (rxstat & XL_RXSTAT_UDPCKOK && !(rxstat & XL_RXSTAT_UDPCKERR))) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } } XL_UNLOCK(sc); (*ifp->if_input)(ifp, m); XL_LOCK(sc); } /* * Handle the 'end of channel' condition. When the upload * engine hits the end of the RX ring, it will stall. This * is our cue to flush the RX ring, reload the uplist pointer * register and unstall the engine. * XXX This is actually a little goofy. With the ThunderLAN * chip, you get an interrupt when the receiver hits the end * of the receive ring, which tells you exactly when you * you need to reload the ring pointer. Here we have to * fake it. I'm mad at myself for not being clever enough * to avoid the use of a goto here. */ if (CSR_READ_4(sc, XL_UPLIST_PTR) == 0 || CSR_READ_4(sc, XL_UPLIST_STATUS) & XL_PKTSTAT_UP_STALLED) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL); xl_wait(sc); CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr); sc->xl_cdata.xl_rx_head = &sc->xl_cdata.xl_rx_chain[0]; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL); goto again; } } /* * Taskqueue wrapper for xl_rxeof(). */ static void xl_rxeof_task(void *arg, int pending) { struct xl_softc *sc = (struct xl_softc *)arg; XL_LOCK(sc); xl_rxeof(sc); XL_UNLOCK(sc); } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void xl_txeof(struct xl_softc *sc) { struct xl_chain *cur_tx; struct ifnet *ifp = sc->xl_ifp; XL_LOCK_ASSERT(sc); /* Clear the timeout timer. */ ifp->if_timer = 0; /* * Go through our tx list and free mbufs for those * frames that have been uploaded. Note: the 3c905B * sets a special bit in the status word to let us * know that a frame has been downloaded, but the * original 3c900/3c905 adapters don't do that. * Consequently, we have to use a different test if * xl_type != XL_TYPE_905B. */ while (sc->xl_cdata.xl_tx_head != NULL) { cur_tx = sc->xl_cdata.xl_tx_head; if (CSR_READ_4(sc, XL_DOWNLIST_PTR)) break; sc->xl_cdata.xl_tx_head = cur_tx->xl_next; bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map); m_freem(cur_tx->xl_mbuf); cur_tx->xl_mbuf = NULL; ifp->if_opackets++; cur_tx->xl_next = sc->xl_cdata.xl_tx_free; sc->xl_cdata.xl_tx_free = cur_tx; } if (sc->xl_cdata.xl_tx_head == NULL) { ifp->if_flags &= ~IFF_OACTIVE; sc->xl_cdata.xl_tx_tail = NULL; } else { if (CSR_READ_4(sc, XL_DMACTL) & XL_DMACTL_DOWN_STALLED || !CSR_READ_4(sc, XL_DOWNLIST_PTR)) { CSR_WRITE_4(sc, XL_DOWNLIST_PTR, sc->xl_cdata.xl_tx_head->xl_phys); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); } } } static void xl_txeof_90xB(struct xl_softc *sc) { struct xl_chain *cur_tx = NULL; struct ifnet *ifp = sc->xl_ifp; int idx; XL_LOCK_ASSERT(sc); bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap, BUS_DMASYNC_POSTREAD); idx = sc->xl_cdata.xl_tx_cons; while (idx != sc->xl_cdata.xl_tx_prod) { cur_tx = &sc->xl_cdata.xl_tx_chain[idx]; if (!(le32toh(cur_tx->xl_ptr->xl_status) & XL_TXSTAT_DL_COMPLETE)) break; if (cur_tx->xl_mbuf != NULL) { bus_dmamap_sync(sc->xl_mtag, cur_tx->xl_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->xl_mtag, cur_tx->xl_map); m_freem(cur_tx->xl_mbuf); cur_tx->xl_mbuf = NULL; } ifp->if_opackets++; sc->xl_cdata.xl_tx_cnt--; XL_INC(idx, XL_TX_LIST_CNT); ifp->if_timer = 0; } sc->xl_cdata.xl_tx_cons = idx; if (cur_tx != NULL) ifp->if_flags &= ~IFF_OACTIVE; } /* * TX 'end of channel' interrupt handler. Actually, we should * only get a 'TX complete' interrupt if there's a transmit error, * so this is really TX error handler. */ static void xl_txeoc(struct xl_softc *sc) { u_int8_t txstat; XL_LOCK_ASSERT(sc); while ((txstat = CSR_READ_1(sc, XL_TX_STATUS))) { if (txstat & XL_TXSTATUS_UNDERRUN || txstat & XL_TXSTATUS_JABBER || txstat & XL_TXSTATUS_RECLAIM) { if_printf(sc->xl_ifp, "transmission error: %x\n", txstat); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); if (sc->xl_type == XL_TYPE_905B) { if (sc->xl_cdata.xl_tx_cnt) { int i; struct xl_chain *c; i = sc->xl_cdata.xl_tx_cons; c = &sc->xl_cdata.xl_tx_chain[i]; CSR_WRITE_4(sc, XL_DOWNLIST_PTR, c->xl_phys); CSR_WRITE_1(sc, XL_DOWN_POLL, 64); } } else { if (sc->xl_cdata.xl_tx_head != NULL) CSR_WRITE_4(sc, XL_DOWNLIST_PTR, sc->xl_cdata.xl_tx_head->xl_phys); } /* * Remember to set this for the * first generation 3c90X chips. */ CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8); if (txstat & XL_TXSTATUS_UNDERRUN && sc->xl_tx_thresh < XL_PACKET_SIZE) { sc->xl_tx_thresh += XL_MIN_FRAMELEN; if_printf(sc->xl_ifp, "tx underrun, increasing tx start threshold to %d bytes\n", sc->xl_tx_thresh); } CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh); if (sc->xl_type == XL_TYPE_905B) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4)); } CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); } else { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); } /* * Write an arbitrary byte to the TX_STATUS register * to clear this interrupt/error and advance to the next. */ CSR_WRITE_1(sc, XL_TX_STATUS, 0x01); } } static void xl_intr(void *arg) { struct xl_softc *sc = arg; struct ifnet *ifp = sc->xl_ifp; u_int16_t status; XL_LOCK(sc); #ifdef DEVICE_POLLING if (ifp->if_flags & IFF_POLLING) { XL_UNLOCK(sc); return; } if ((ifp->if_capenable & IFCAP_POLLING) && ether_poll_register(xl_poll, ifp)) { /* Disable interrupts. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF); if (sc->xl_flags & XL_FLAG_FUNCREG) bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000); xl_poll_locked(ifp, 0, 1); XL_UNLOCK(sc); return; } #endif /* DEVICE_POLLING */ while ((status = CSR_READ_2(sc, XL_STATUS)) & XL_INTRS && status != 0xFFFF) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|(status & XL_INTRS)); if (status & XL_STAT_UP_COMPLETE) { int curpkts; curpkts = ifp->if_ipackets; xl_rxeof(sc); if (curpkts == ifp->if_ipackets) { while (xl_rx_resync(sc)) xl_rxeof(sc); } } if (status & XL_STAT_DOWN_COMPLETE) { if (sc->xl_type == XL_TYPE_905B) xl_txeof_90xB(sc); else xl_txeof(sc); } if (status & XL_STAT_TX_COMPLETE) { ifp->if_oerrors++; xl_txeoc(sc); } if (status & XL_STAT_ADFAIL) { xl_reset(sc); xl_init_locked(sc); } if (status & XL_STAT_STATSOFLOW) { sc->xl_stats_no_timeout = 1; xl_stats_update_locked(sc); sc->xl_stats_no_timeout = 0; } } if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { if (sc->xl_type == XL_TYPE_905B) xl_start_90xB_locked(ifp); else xl_start_locked(ifp); } XL_UNLOCK(sc); } #ifdef DEVICE_POLLING static void xl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct xl_softc *sc = ifp->if_softc; XL_LOCK(sc); xl_poll_locked(ifp, cmd, count); XL_UNLOCK(sc); } static void xl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct xl_softc *sc = ifp->if_softc; XL_LOCK_ASSERT(sc); if (!(ifp->if_capenable & IFCAP_POLLING)) { ether_poll_deregister(ifp); cmd = POLL_DEREGISTER; } if (cmd == POLL_DEREGISTER) { /* Final call; enable interrupts. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS); if (sc->xl_flags & XL_FLAG_FUNCREG) bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000); return; } sc->rxcycles = count; xl_rxeof(sc); if (sc->xl_type == XL_TYPE_905B) xl_txeof_90xB(sc); else xl_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { if (sc->xl_type == XL_TYPE_905B) xl_start_90xB_locked(ifp); else xl_start_locked(ifp); } if (cmd == POLL_AND_CHECK_STATUS) { u_int16_t status; status = CSR_READ_2(sc, XL_STATUS); if (status & XL_INTRS && status != 0xFFFF) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|(status & XL_INTRS)); if (status & XL_STAT_TX_COMPLETE) { ifp->if_oerrors++; xl_txeoc(sc); } if (status & XL_STAT_ADFAIL) { xl_reset(sc); xl_init_locked(sc); } if (status & XL_STAT_STATSOFLOW) { sc->xl_stats_no_timeout = 1; xl_stats_update_locked(sc); sc->xl_stats_no_timeout = 0; } } } } #endif /* DEVICE_POLLING */ /* * XXX: This is an entry point for callout which needs to take the lock. */ static void xl_stats_update(void *xsc) { struct xl_softc *sc = xsc; XL_LOCK(sc); xl_stats_update_locked(sc); XL_UNLOCK(sc); } static void xl_stats_update_locked(struct xl_softc *sc) { struct ifnet *ifp = sc->xl_ifp; struct xl_stats xl_stats; u_int8_t *p; int i; struct mii_data *mii = NULL; XL_LOCK_ASSERT(sc); bzero((char *)&xl_stats, sizeof(struct xl_stats)); if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); p = (u_int8_t *)&xl_stats; /* Read all the stats registers. */ XL_SEL_WIN(6); for (i = 0; i < 16; i++) *p++ = CSR_READ_1(sc, XL_W6_CARRIER_LOST + i); ifp->if_ierrors += xl_stats.xl_rx_overrun; ifp->if_collisions += xl_stats.xl_tx_multi_collision + xl_stats.xl_tx_single_collision + xl_stats.xl_tx_late_collision; /* * Boomerang and cyclone chips have an extra stats counter * in window 4 (BadSSD). We have to read this too in order * to clear out all the stats registers and avoid a statsoflow * interrupt. */ XL_SEL_WIN(4); CSR_READ_1(sc, XL_W4_BADSSD); if ((mii != NULL) && (!sc->xl_stats_no_timeout)) mii_tick(mii); XL_SEL_WIN(7); if (!sc->xl_stats_no_timeout) sc->xl_stat_ch = timeout(xl_stats_update, sc, hz); } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int xl_encap(struct xl_softc *sc, struct xl_chain *c, struct mbuf *m_head) { int error; u_int32_t status; struct ifnet *ifp = sc->xl_ifp; XL_LOCK_ASSERT(sc); /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ error = bus_dmamap_load_mbuf(sc->xl_mtag, c->xl_map, m_head, xl_dma_map_txbuf, c->xl_ptr, BUS_DMA_NOWAIT); if (error && error != EFBIG) { m_freem(m_head); if_printf(ifp, "can't map mbuf (error %d)\n", error); return (1); } /* * Handle special case: we used up all 63 fragments, * but we have more mbufs left in the chain. Copy the * data into an mbuf cluster. Note that we don't * bother clearing the values in the other fragment * pointers/counters; it wouldn't gain us anything, * and would waste cycles. */ if (error) { struct mbuf *m_new; m_new = m_defrag(m_head, M_DONTWAIT); if (m_new == NULL) { m_freem(m_head); return (1); } else { m_head = m_new; } error = bus_dmamap_load_mbuf(sc->xl_mtag, c->xl_map, m_head, xl_dma_map_txbuf, c->xl_ptr, BUS_DMA_NOWAIT); if (error) { m_freem(m_head); if_printf(ifp, "can't map mbuf (error %d)\n", error); return (1); } } if (sc->xl_type == XL_TYPE_905B) { status = XL_TXSTAT_RND_DEFEAT; #ifndef XL905B_TXCSUM_BROKEN if (m_head->m_pkthdr.csum_flags) { if (m_head->m_pkthdr.csum_flags & CSUM_IP) status |= XL_TXSTAT_IPCKSUM; if (m_head->m_pkthdr.csum_flags & CSUM_TCP) status |= XL_TXSTAT_TCPCKSUM; if (m_head->m_pkthdr.csum_flags & CSUM_UDP) status |= XL_TXSTAT_UDPCKSUM; } #endif c->xl_ptr->xl_status = htole32(status); } c->xl_mbuf = m_head; bus_dmamap_sync(sc->xl_mtag, c->xl_map, BUS_DMASYNC_PREWRITE); return (0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void xl_start(struct ifnet *ifp) { struct xl_softc *sc = ifp->if_softc; XL_LOCK(sc); if (sc->xl_type == XL_TYPE_905B) xl_start_90xB_locked(ifp); else xl_start_locked(ifp); XL_UNLOCK(sc); } static void xl_start_locked(struct ifnet *ifp) { struct xl_softc *sc = ifp->if_softc; struct mbuf *m_head = NULL; struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx; struct xl_chain *prev_tx; u_int32_t status; int error; XL_LOCK_ASSERT(sc); /* * Check for an available queue slot. If there are none, * punt. */ if (sc->xl_cdata.xl_tx_free == NULL) { xl_txeoc(sc); xl_txeof(sc); if (sc->xl_cdata.xl_tx_free == NULL) { ifp->if_flags |= IFF_OACTIVE; return; } } start_tx = sc->xl_cdata.xl_tx_free; while (sc->xl_cdata.xl_tx_free != NULL) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* Pick a descriptor off the free list. */ prev_tx = cur_tx; cur_tx = sc->xl_cdata.xl_tx_free; /* Pack the data into the descriptor. */ error = xl_encap(sc, cur_tx, m_head); if (error) { cur_tx = prev_tx; continue; } sc->xl_cdata.xl_tx_free = cur_tx->xl_next; cur_tx->xl_next = NULL; /* Chain it together. */ if (prev != NULL) { prev->xl_next = cur_tx; prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys); } prev = cur_tx; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, cur_tx->xl_mbuf); } /* * If there are no packets queued, bail. */ if (cur_tx == NULL) return; /* * Place the request for the upload interrupt * in the last descriptor in the chain. This way, if * we're chaining several packets at once, we'll only * get an interupt once for the whole chain rather than * once for each packet. */ cur_tx->xl_ptr->xl_status = htole32(le32toh(cur_tx->xl_ptr->xl_status) | XL_TXSTAT_DL_INTR); bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap, BUS_DMASYNC_PREWRITE); /* * Queue the packets. If the TX channel is clear, update * the downlist pointer register. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL); xl_wait(sc); if (sc->xl_cdata.xl_tx_head != NULL) { sc->xl_cdata.xl_tx_tail->xl_next = start_tx; sc->xl_cdata.xl_tx_tail->xl_ptr->xl_next = htole32(start_tx->xl_phys); status = sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status; sc->xl_cdata.xl_tx_tail->xl_ptr->xl_status = htole32(le32toh(status) & ~XL_TXSTAT_DL_INTR); sc->xl_cdata.xl_tx_tail = cur_tx; } else { sc->xl_cdata.xl_tx_head = start_tx; sc->xl_cdata.xl_tx_tail = cur_tx; } if (!CSR_READ_4(sc, XL_DOWNLIST_PTR)) CSR_WRITE_4(sc, XL_DOWNLIST_PTR, start_tx->xl_phys); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); XL_SEL_WIN(7); /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; /* * XXX Under certain conditions, usually on slower machines * where interrupts may be dropped, it's possible for the * adapter to chew up all the buffers in the receive ring * and stall, without us being able to do anything about it. * To guard against this, we need to make a pass over the * RX queue to make sure there aren't any packets pending. * Doing it here means we can flush the receive ring at the * same time the chip is DMAing the transmit descriptors we * just gave it. * * 3Com goes to some lengths to emphasize the Parallel Tasking (tm) * nature of their chips in all their marketing literature; * we may as well take advantage of it. :) */ taskqueue_enqueue(taskqueue_swi, &sc->xl_task); } static void xl_start_90xB_locked(struct ifnet *ifp) { struct xl_softc *sc = ifp->if_softc; struct mbuf *m_head = NULL; struct xl_chain *prev = NULL, *cur_tx = NULL, *start_tx; struct xl_chain *prev_tx; int error, idx; XL_LOCK_ASSERT(sc); if (ifp->if_flags & IFF_OACTIVE) return; idx = sc->xl_cdata.xl_tx_prod; start_tx = &sc->xl_cdata.xl_tx_chain[idx]; while (sc->xl_cdata.xl_tx_chain[idx].xl_mbuf == NULL) { if ((XL_TX_LIST_CNT - sc->xl_cdata.xl_tx_cnt) < 3) { ifp->if_flags |= IFF_OACTIVE; break; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; prev_tx = cur_tx; cur_tx = &sc->xl_cdata.xl_tx_chain[idx]; /* Pack the data into the descriptor. */ error = xl_encap(sc, cur_tx, m_head); if (error) { cur_tx = prev_tx; continue; } /* Chain it together. */ if (prev != NULL) prev->xl_ptr->xl_next = htole32(cur_tx->xl_phys); prev = cur_tx; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, cur_tx->xl_mbuf); XL_INC(idx, XL_TX_LIST_CNT); sc->xl_cdata.xl_tx_cnt++; } /* * If there are no packets queued, bail. */ if (cur_tx == NULL) return; /* * Place the request for the upload interrupt * in the last descriptor in the chain. This way, if * we're chaining several packets at once, we'll only * get an interupt once for the whole chain rather than * once for each packet. */ cur_tx->xl_ptr->xl_status = htole32(le32toh(cur_tx->xl_ptr->xl_status) | XL_TXSTAT_DL_INTR); bus_dmamap_sync(sc->xl_ldata.xl_tx_tag, sc->xl_ldata.xl_tx_dmamap, BUS_DMASYNC_PREWRITE); /* Start transmission */ sc->xl_cdata.xl_tx_prod = idx; start_tx->xl_prev->xl_ptr->xl_next = htole32(start_tx->xl_phys); /* * Set a timeout in case the chip goes out to lunch. */ ifp->if_timer = 5; } static void xl_init(void *xsc) { struct xl_softc *sc = xsc; XL_LOCK(sc); xl_init_locked(sc); XL_UNLOCK(sc); } static void xl_init_locked(struct xl_softc *sc) { struct ifnet *ifp = sc->xl_ifp; int error, i; u_int16_t rxfilt = 0; struct mii_data *mii = NULL; XL_LOCK_ASSERT(sc); /* * Cancel pending I/O and free all RX/TX buffers. */ xl_stop(sc); if (sc->xl_miibus == NULL) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); xl_wait(sc); } CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); DELAY(10000); if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); /* Init our MAC address */ XL_SEL_WIN(2); for (i = 0; i < ETHER_ADDR_LEN; i++) { CSR_WRITE_1(sc, XL_W2_STATION_ADDR_LO + i, IFP2ENADDR(sc->xl_ifp)[i]); } /* Clear the station mask. */ for (i = 0; i < 3; i++) CSR_WRITE_2(sc, XL_W2_STATION_MASK_LO + (i * 2), 0); #ifdef notdef /* Reset TX and RX. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); #endif /* Init circular RX list. */ error = xl_list_rx_init(sc); if (error) { if_printf(ifp, "initialization of the rx ring failed (%d)\n", error); xl_stop(sc); return; } /* Init TX descriptors. */ if (sc->xl_type == XL_TYPE_905B) error = xl_list_tx_init_90xB(sc); else error = xl_list_tx_init(sc); if (error) { if_printf(ifp, "initialization of the tx ring failed (%d)\n", error); xl_stop(sc); return; } /* * Set the TX freethresh value. * Note that this has no effect on 3c905B "cyclone" * cards but is required for 3c900/3c905 "boomerang" * cards in order to enable the download engine. */ CSR_WRITE_1(sc, XL_TX_FREETHRESH, XL_PACKET_SIZE >> 8); /* Set the TX start threshold for best performance. */ sc->xl_tx_thresh = XL_MIN_FRAMELEN; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_SET_START|sc->xl_tx_thresh); /* * If this is a 3c905B, also set the tx reclaim threshold. * This helps cut down on the number of tx reclaim errors * that could happen on a busy network. The chip multiplies * the register value by 16 to obtain the actual threshold * in bytes, so we divide by 16 when setting the value here. * The existing threshold value can be examined by reading * the register at offset 9 in window 5. */ if (sc->xl_type == XL_TYPE_905B) { CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_SET_TX_RECLAIM|(XL_PACKET_SIZE >> 4)); } /* Set RX filter bits. */ XL_SEL_WIN(5); rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); /* Set the individual bit to receive frames for this host only. */ rxfilt |= XL_RXFILTER_INDIVIDUAL; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { rxfilt |= XL_RXFILTER_ALLFRAMES; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); } else { rxfilt &= ~XL_RXFILTER_ALLFRAMES; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); } /* * Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { rxfilt |= XL_RXFILTER_BROADCAST; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); } else { rxfilt &= ~XL_RXFILTER_BROADCAST; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); } /* * Program the multicast filter, if necessary. */ if (sc->xl_type == XL_TYPE_905B) xl_setmulti_hash(sc); else xl_setmulti(sc); /* * Load the address of the RX list. We have to * stall the upload engine before we can manipulate * the uplist pointer register, then unstall it when * we're finished. We also have to wait for the * stall command to complete before proceeding. * Note that we have to do this after any RX resets * have completed since the uplist register is cleared * by a reset. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_STALL); xl_wait(sc); CSR_WRITE_4(sc, XL_UPLIST_PTR, sc->xl_ldata.xl_rx_dmaaddr); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_UP_UNSTALL); xl_wait(sc); if (sc->xl_type == XL_TYPE_905B) { /* Set polling interval */ CSR_WRITE_1(sc, XL_DOWN_POLL, 64); /* Load the address of the TX list */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_STALL); xl_wait(sc); CSR_WRITE_4(sc, XL_DOWNLIST_PTR, sc->xl_cdata.xl_tx_chain[0].xl_phys); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_DOWN_UNSTALL); xl_wait(sc); } /* * If the coax transceiver is on, make sure to enable * the DC-DC converter. */ XL_SEL_WIN(3); if (sc->xl_xcvr == XL_XCVR_COAX) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_START); else CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); /* * increase packet size to allow reception of 802.1q or ISL packets. * For the 3c90x chip, set the 'allow large packets' bit in the MAC * control register. For 3c90xB/C chips, use the RX packet size * register. */ if (sc->xl_type == XL_TYPE_905B) CSR_WRITE_2(sc, XL_W3_MAXPKTSIZE, XL_PACKET_SIZE); else { u_int8_t macctl; macctl = CSR_READ_1(sc, XL_W3_MAC_CTRL); macctl |= XL_MACCTRL_ALLOW_LARGE_PACK; CSR_WRITE_1(sc, XL_W3_MAC_CTRL, macctl); } /* Clear out the stats counters. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE); sc->xl_stats_no_timeout = 1; xl_stats_update_locked(sc); sc->xl_stats_no_timeout = 0; XL_SEL_WIN(4); CSR_WRITE_2(sc, XL_W4_NET_DIAG, XL_NETDIAG_UPPER_BYTES_ENABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_ENABLE); /* * Enable interrupts. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|0xFF); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|XL_INTRS); #ifdef DEVICE_POLLING /* Disable interrupts if we are polling. */ if (ifp->if_flags & IFF_POLLING) CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0); else #endif /* DEVICE_POLLING */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|XL_INTRS); if (sc->xl_flags & XL_FLAG_FUNCREG) bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000); /* Set the RX early threshold */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_THRESH|(XL_PACKET_SIZE >>2)); CSR_WRITE_2(sc, XL_DMACTL, XL_DMACTL_UP_RX_EARLY); /* Enable receiver and transmitter. */ CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_ENABLE); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_ENABLE); xl_wait(sc); /* XXX Downcall to miibus. */ if (mii != NULL) mii_mediachg(mii); /* Select window 7 for normal operations. */ XL_SEL_WIN(7); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; sc->xl_stat_ch = timeout(xl_stats_update, sc, hz); } /* * Set media options. */ static int xl_ifmedia_upd(struct ifnet *ifp) { struct xl_softc *sc = ifp->if_softc; struct ifmedia *ifm = NULL; struct mii_data *mii = NULL; /*XL_LOCK_ASSERT(sc);*/ if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); if (mii == NULL) ifm = &sc->ifmedia; else ifm = &mii->mii_media; switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_100_FX: case IFM_10_FL: case IFM_10_2: case IFM_10_5: xl_setmode(sc, ifm->ifm_media); return (0); break; default: break; } if (sc->xl_media & XL_MEDIAOPT_MII || sc->xl_media & XL_MEDIAOPT_BTX || sc->xl_media & XL_MEDIAOPT_BT4) { xl_init(sc); /* XXX */ } else { xl_setmode(sc, ifm->ifm_media); } return (0); } /* * Report current media status. */ static void xl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct xl_softc *sc = ifp->if_softc; u_int32_t icfg; u_int16_t status = 0; struct mii_data *mii = NULL; /*XL_LOCK_ASSERT(sc);*/ if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); XL_SEL_WIN(4); status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); XL_SEL_WIN(3); icfg = CSR_READ_4(sc, XL_W3_INTERNAL_CFG) & XL_ICFG_CONNECTOR_MASK; icfg >>= XL_ICFG_CONNECTOR_BITS; ifmr->ifm_active = IFM_ETHER; ifmr->ifm_status = IFM_AVALID; if ((status & XL_MEDIASTAT_CARRIER) == 0) ifmr->ifm_status |= IFM_ACTIVE; switch (icfg) { case XL_XCVR_10BT: ifmr->ifm_active = IFM_ETHER|IFM_10_T; if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; break; case XL_XCVR_AUI: if (sc->xl_type == XL_TYPE_905B && sc->xl_media == XL_MEDIAOPT_10FL) { ifmr->ifm_active = IFM_ETHER|IFM_10_FL; if (CSR_READ_1(sc, XL_W3_MAC_CTRL) & XL_MACCTRL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } else ifmr->ifm_active = IFM_ETHER|IFM_10_5; break; case XL_XCVR_COAX: ifmr->ifm_active = IFM_ETHER|IFM_10_2; break; /* * XXX MII and BTX/AUTO should be separate cases. */ case XL_XCVR_100BTX: case XL_XCVR_AUTO: case XL_XCVR_MII: if (mii != NULL) { mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; } break; case XL_XCVR_100BFX: ifmr->ifm_active = IFM_ETHER|IFM_100_FX; break; default: if_printf(ifp, "unknown XCVR type: %d\n", icfg); break; } } static int xl_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct xl_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int error = 0; struct mii_data *mii = NULL; u_int8_t rxfilt; switch (command) { case SIOCSIFFLAGS: XL_LOCK(sc); XL_SEL_WIN(5); rxfilt = CSR_READ_1(sc, XL_W5_RX_FILTER); if (ifp->if_flags & IFF_UP) { if (ifp->if_flags & IFF_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->xl_if_flags & IFF_PROMISC)) { rxfilt |= XL_RXFILTER_ALLFRAMES; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); XL_SEL_WIN(7); } else if (ifp->if_flags & IFF_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->xl_if_flags & IFF_PROMISC) { rxfilt &= ~XL_RXFILTER_ALLFRAMES; CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_SET_FILT|rxfilt); XL_SEL_WIN(7); } else { if ((ifp->if_flags & IFF_RUNNING) == 0) xl_init_locked(sc); } } else { if (ifp->if_flags & IFF_RUNNING) xl_stop(sc); } sc->xl_if_flags = ifp->if_flags; XL_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: /* XXX Downcall from if_addmulti() possibly with locks held. */ XL_LOCK(sc); if (sc->xl_type == XL_TYPE_905B) xl_setmulti_hash(sc); else xl_setmulti(sc); XL_UNLOCK(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: /* XXX Downcall from ifmedia possibly with locks held. */ /*XL_LOCK(sc);*/ if (sc->xl_miibus != NULL) mii = device_get_softc(sc->xl_miibus); if (mii == NULL) error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); else error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); /*XL_UNLOCK(sc);*/ break; case SIOCSIFCAP: XL_LOCK(sc); ifp->if_capenable = ifr->ifr_reqcap; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist = XL905B_CSUM_FEATURES; else ifp->if_hwassist = 0; XL_UNLOCK(sc); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } /* * XXX: Invoked from ifnet slow timer. Lock coverage needed. */ static void xl_watchdog(struct ifnet *ifp) { struct xl_softc *sc = ifp->if_softc; u_int16_t status = 0; XL_LOCK(sc); ifp->if_oerrors++; XL_SEL_WIN(4); status = CSR_READ_2(sc, XL_W4_MEDIA_STATUS); if_printf(ifp, "watchdog timeout\n"); if (status & XL_MEDIASTAT_CARRIER) if_printf(ifp, "no carrier - transceiver cable problem?\n"); xl_txeoc(sc); xl_txeof(sc); xl_rxeof(sc); xl_reset(sc); xl_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { if (sc->xl_type == XL_TYPE_905B) xl_start_90xB_locked(ifp); else xl_start_locked(ifp); } XL_UNLOCK(sc); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void xl_stop(struct xl_softc *sc) { register int i; struct ifnet *ifp = sc->xl_ifp; XL_LOCK_ASSERT(sc); ifp->if_timer = 0; #ifdef DEVICE_POLLING ether_poll_deregister(ifp); #endif /* DEVICE_POLLING */ taskqueue_drain(taskqueue_swi, &sc->xl_task); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STATS_DISABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_DISCARD); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_DISABLE); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_COAX_STOP); DELAY(800); #ifdef foo CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_RX_RESET); xl_wait(sc); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_TX_RESET); xl_wait(sc); #endif CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ACK|XL_STAT_INTLATCH); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_STAT_ENB|0); CSR_WRITE_2(sc, XL_COMMAND, XL_CMD_INTR_ENB|0); if (sc->xl_flags & XL_FLAG_FUNCREG) bus_space_write_4(sc->xl_ftag, sc->xl_fhandle, 4, 0x8000); /* Stop the stats updater. */ untimeout(xl_stats_update, sc, sc->xl_stat_ch); /* * Free data in the RX lists. */ for (i = 0; i < XL_RX_LIST_CNT; i++) { if (sc->xl_cdata.xl_rx_chain[i].xl_mbuf != NULL) { bus_dmamap_unload(sc->xl_mtag, sc->xl_cdata.xl_rx_chain[i].xl_map); bus_dmamap_destroy(sc->xl_mtag, sc->xl_cdata.xl_rx_chain[i].xl_map); m_freem(sc->xl_cdata.xl_rx_chain[i].xl_mbuf); sc->xl_cdata.xl_rx_chain[i].xl_mbuf = NULL; } } if (sc->xl_ldata.xl_rx_list != NULL) bzero(sc->xl_ldata.xl_rx_list, XL_RX_LIST_SZ); /* * Free the TX list buffers. */ for (i = 0; i < XL_TX_LIST_CNT; i++) { if (sc->xl_cdata.xl_tx_chain[i].xl_mbuf != NULL) { bus_dmamap_unload(sc->xl_mtag, sc->xl_cdata.xl_tx_chain[i].xl_map); bus_dmamap_destroy(sc->xl_mtag, sc->xl_cdata.xl_tx_chain[i].xl_map); m_freem(sc->xl_cdata.xl_tx_chain[i].xl_mbuf); sc->xl_cdata.xl_tx_chain[i].xl_mbuf = NULL; } } if (sc->xl_ldata.xl_tx_list != NULL) bzero(sc->xl_ldata.xl_tx_list, XL_TX_LIST_SZ); ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static void xl_shutdown(device_t dev) { struct xl_softc *sc; sc = device_get_softc(dev); XL_LOCK(sc); xl_reset(sc); xl_stop(sc); XL_UNLOCK(sc); } static int xl_suspend(device_t dev) { struct xl_softc *sc; sc = device_get_softc(dev); XL_LOCK(sc); xl_stop(sc); XL_UNLOCK(sc); return (0); } static int xl_resume(device_t dev) { struct xl_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->xl_ifp; XL_LOCK(sc); xl_reset(sc); if (ifp->if_flags & IFF_UP) xl_init_locked(sc); XL_UNLOCK(sc); return (0); }