diff --git a/sys/dev/ath/if_ath.c b/sys/dev/ath/if_ath.c index 7318284f8a62..87c9d6151d3b 100644 --- a/sys/dev/ath/if_ath.c +++ b/sys/dev/ath/if_ath.c @@ -1,7021 +1,7022 @@ /*- * Copyright (c) 2002-2008 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Driver for the Atheros Wireless LAN controller. * * This software is derived from work of Atsushi Onoe; his contribution * is greatly appreciated. */ #include "opt_inet.h" #include "opt_ath.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #endif #include #include #include /* XXX for softled */ #ifdef ATH_TX99_DIAG #include #endif /* * We require a HAL w/ the changes for split tx/rx MIC. */ CTASSERT(HAL_ABI_VERSION > 0x06052200); /* * ATH_BCBUF determines the number of vap's that can transmit * beacons and also (currently) the number of vap's that can * have unique mac addresses/bssid. When staggering beacons * 4 is probably a good max as otherwise the beacons become * very closely spaced and there is limited time for cab q traffic * to go out. You can burst beacons instead but that is not good * for stations in power save and at some point you really want * another radio (and channel). * * The limit on the number of mac addresses is tied to our use of * the U/L bit and tracking addresses in a byte; it would be * worthwhile to allow more for applications like proxy sta. */ CTASSERT(ATH_BCBUF <= 8); /* unaligned little endian access */ #define LE_READ_2(p) \ ((u_int16_t) \ ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8))) #define LE_READ_4(p) \ ((u_int32_t) \ ((((u_int8_t *)(p))[0] ) | (((u_int8_t *)(p))[1] << 8) | \ (((u_int8_t *)(p))[2] << 16) | (((u_int8_t *)(p))[3] << 24))) enum { ATH_LED_TX, ATH_LED_RX, ATH_LED_POLL, }; static struct ieee80211vap *ath_vap_create(struct ieee80211com *, const char name[IFNAMSIZ], int unit, int opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]); static void ath_vap_delete(struct ieee80211vap *); static void ath_init(void *); static void ath_stop_locked(struct ifnet *); static void ath_stop(struct ifnet *); static void ath_start(struct ifnet *); static int ath_reset(struct ifnet *); static int ath_reset_vap(struct ieee80211vap *, u_long); static int ath_media_change(struct ifnet *); static void ath_watchdog(struct ifnet *); static int ath_ioctl(struct ifnet *, u_long, caddr_t); static void ath_fatal_proc(void *, int); static void ath_rxorn_proc(void *, int); static void ath_bmiss_vap(struct ieee80211vap *); static void ath_bmiss_proc(void *, int); static int ath_keyset(struct ath_softc *, const struct ieee80211_key *, struct ieee80211_node *); static int ath_key_alloc(struct ieee80211vap *, const struct ieee80211_key *, ieee80211_keyix *, ieee80211_keyix *); static int ath_key_delete(struct ieee80211vap *, const struct ieee80211_key *); static int ath_key_set(struct ieee80211vap *, const struct ieee80211_key *, const u_int8_t mac[IEEE80211_ADDR_LEN]); static void ath_key_update_begin(struct ieee80211vap *); static void ath_key_update_end(struct ieee80211vap *); static void ath_update_mcast(struct ifnet *); static void ath_update_promisc(struct ifnet *); static void ath_mode_init(struct ath_softc *); static void ath_setslottime(struct ath_softc *); static void ath_updateslot(struct ifnet *); static int ath_beaconq_setup(struct ath_hal *); static int ath_beacon_alloc(struct ath_softc *, struct ieee80211_node *); static void ath_beacon_update(struct ieee80211vap *, int item); static void ath_beacon_setup(struct ath_softc *, struct ath_buf *); static void ath_beacon_proc(void *, int); static struct ath_buf *ath_beacon_generate(struct ath_softc *, struct ieee80211vap *); static void ath_bstuck_proc(void *, int); static void ath_beacon_return(struct ath_softc *, struct ath_buf *); static void ath_beacon_free(struct ath_softc *); static void ath_beacon_config(struct ath_softc *, struct ieee80211vap *); static void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *, ath_bufhead *); static int ath_desc_alloc(struct ath_softc *); static void ath_desc_free(struct ath_softc *); -static struct ieee80211_node *ath_node_alloc(struct ieee80211_node_table *); +static struct ieee80211_node *ath_node_alloc(struct ieee80211vap *, + const uint8_t [IEEE80211_ADDR_LEN]); static void ath_node_free(struct ieee80211_node *); static void ath_node_getsignal(const struct ieee80211_node *, int8_t *, int8_t *); static int ath_rxbuf_init(struct ath_softc *, struct ath_buf *); static void ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, int rssi, int noise, u_int32_t rstamp); static void ath_setdefantenna(struct ath_softc *, u_int); static void ath_rx_proc(void *, int); static void ath_txq_init(struct ath_softc *sc, struct ath_txq *, int); static struct ath_txq *ath_txq_setup(struct ath_softc*, int qtype, int subtype); static int ath_tx_setup(struct ath_softc *, int, int); static int ath_wme_update(struct ieee80211com *); static void ath_tx_cleanupq(struct ath_softc *, struct ath_txq *); static void ath_tx_cleanup(struct ath_softc *); static void ath_freetx(struct mbuf *); static int ath_tx_start(struct ath_softc *, struct ieee80211_node *, struct ath_buf *, struct mbuf *); static void ath_tx_proc_q0(void *, int); static void ath_tx_proc_q0123(void *, int); static void ath_tx_proc(void *, int); static void ath_tx_draintxq(struct ath_softc *, struct ath_txq *); static int ath_chan_set(struct ath_softc *, struct ieee80211_channel *); static void ath_draintxq(struct ath_softc *); static void ath_stoprecv(struct ath_softc *); static int ath_startrecv(struct ath_softc *); static void ath_chan_change(struct ath_softc *, struct ieee80211_channel *); static void ath_scan_start(struct ieee80211com *); static void ath_scan_end(struct ieee80211com *); static void ath_set_channel(struct ieee80211com *); static void ath_calibrate(void *); static int ath_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void ath_setup_stationkey(struct ieee80211_node *); static void ath_newassoc(struct ieee80211_node *, int); static int ath_setregdomain(struct ieee80211com *, struct ieee80211_regdomain *, int, struct ieee80211_channel []); static void ath_getradiocaps(struct ieee80211com *, int *, struct ieee80211_channel []); static int ath_getchannels(struct ath_softc *); static void ath_led_event(struct ath_softc *, int); static int ath_rate_setup(struct ath_softc *, u_int mode); static void ath_setcurmode(struct ath_softc *, enum ieee80211_phymode); static void ath_sysctlattach(struct ath_softc *); static int ath_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void ath_bpfattach(struct ath_softc *); static void ath_announce(struct ath_softc *); SYSCTL_DECL(_hw_ath); /* XXX validate sysctl values */ static int ath_calinterval = 30; /* calibrate every 30 secs */ SYSCTL_INT(_hw_ath, OID_AUTO, calibrate, CTLFLAG_RW, &ath_calinterval, 0, "chip calibration interval (secs)"); static int ath_rxbuf = ATH_RXBUF; /* # rx buffers to allocate */ SYSCTL_INT(_hw_ath, OID_AUTO, rxbuf, CTLFLAG_RW, &ath_rxbuf, 0, "rx buffers allocated"); TUNABLE_INT("hw.ath.rxbuf", &ath_rxbuf); static int ath_txbuf = ATH_TXBUF; /* # tx buffers to allocate */ SYSCTL_INT(_hw_ath, OID_AUTO, txbuf, CTLFLAG_RW, &ath_txbuf, 0, "tx buffers allocated"); TUNABLE_INT("hw.ath.txbuf", &ath_txbuf); #ifdef ATH_DEBUG static int ath_debug = 0; SYSCTL_INT(_hw_ath, OID_AUTO, debug, CTLFLAG_RW, &ath_debug, 0, "control debugging printfs"); TUNABLE_INT("hw.ath.debug", &ath_debug); enum { ATH_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ ATH_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */ ATH_DEBUG_RECV = 0x00000004, /* basic recv operation */ ATH_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */ ATH_DEBUG_RATE = 0x00000010, /* rate control */ ATH_DEBUG_RESET = 0x00000020, /* reset processing */ ATH_DEBUG_MODE = 0x00000040, /* mode init/setup */ ATH_DEBUG_BEACON = 0x00000080, /* beacon handling */ ATH_DEBUG_WATCHDOG = 0x00000100, /* watchdog timeout */ ATH_DEBUG_INTR = 0x00001000, /* ISR */ ATH_DEBUG_TX_PROC = 0x00002000, /* tx ISR proc */ ATH_DEBUG_RX_PROC = 0x00004000, /* rx ISR proc */ ATH_DEBUG_BEACON_PROC = 0x00008000, /* beacon ISR proc */ ATH_DEBUG_CALIBRATE = 0x00010000, /* periodic calibration */ ATH_DEBUG_KEYCACHE = 0x00020000, /* key cache management */ ATH_DEBUG_STATE = 0x00040000, /* 802.11 state transitions */ ATH_DEBUG_NODE = 0x00080000, /* node management */ ATH_DEBUG_LED = 0x00100000, /* led management */ ATH_DEBUG_FF = 0x00200000, /* fast frames */ ATH_DEBUG_DFS = 0x00400000, /* DFS processing */ ATH_DEBUG_FATAL = 0x80000000, /* fatal errors */ ATH_DEBUG_ANY = 0xffffffff }; #define IFF_DUMPPKTS(sc, m) \ ((sc->sc_debug & (m)) || \ (sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2)) #define DPRINTF(sc, m, fmt, ...) do { \ if (sc->sc_debug & (m)) \ printf(fmt, __VA_ARGS__); \ } while (0) #define KEYPRINTF(sc, ix, hk, mac) do { \ if (sc->sc_debug & ATH_DEBUG_KEYCACHE) \ ath_keyprint(sc, __func__, ix, hk, mac); \ } while (0) static void ath_printrxbuf(const struct ath_buf *bf, u_int ix, int); static void ath_printtxbuf(const struct ath_buf *bf, u_int qnum, u_int ix, int done); #else #define IFF_DUMPPKTS(sc, m) \ ((sc->sc_ifp->if_flags & (IFF_DEBUG|IFF_LINK2)) == (IFF_DEBUG|IFF_LINK2)) #define DPRINTF(sc, m, fmt, ...) do { \ (void) sc; \ } while (0) #define KEYPRINTF(sc, k, ix, mac) do { \ (void) sc; \ } while (0) #endif MALLOC_DEFINE(M_ATHDEV, "athdev", "ath driver dma buffers"); int ath_attach(u_int16_t devid, struct ath_softc *sc) { struct ifnet *ifp; struct ieee80211com *ic; struct ath_hal *ah = NULL; HAL_STATUS status; int error = 0, i; DPRINTF(sc, ATH_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(sc->sc_dev, "can not if_alloc()\n"); error = ENOSPC; goto bad; } ic = ifp->if_l2com; /* set these up early for if_printf use */ if_initname(ifp, device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev)); ah = ath_hal_attach(devid, sc, sc->sc_st, sc->sc_sh, &status); if (ah == NULL) { if_printf(ifp, "unable to attach hardware; HAL status %u\n", status); error = ENXIO; goto bad; } if (ah->ah_abi != HAL_ABI_VERSION) { if_printf(ifp, "HAL ABI mismatch detected " "(HAL:0x%x != driver:0x%x)\n", ah->ah_abi, HAL_ABI_VERSION); error = ENXIO; goto bad; } sc->sc_ah = ah; sc->sc_invalid = 0; /* ready to go, enable interrupt handling */ /* * Check if the MAC has multi-rate retry support. * We do this by trying to setup a fake extended * descriptor. MAC's that don't have support will * return false w/o doing anything. MAC's that do * support it will return true w/o doing anything. */ sc->sc_mrretry = ath_hal_setupxtxdesc(ah, NULL, 0,0, 0,0, 0,0); /* * Check if the device has hardware counters for PHY * errors. If so we need to enable the MIB interrupt * so we can act on stat triggers. */ if (ath_hal_hwphycounters(ah)) sc->sc_needmib = 1; /* * Get the hardware key cache size. */ sc->sc_keymax = ath_hal_keycachesize(ah); if (sc->sc_keymax > ATH_KEYMAX) { if_printf(ifp, "Warning, using only %u of %u key cache slots\n", ATH_KEYMAX, sc->sc_keymax); sc->sc_keymax = ATH_KEYMAX; } /* * Reset the key cache since some parts do not * reset the contents on initial power up. */ for (i = 0; i < sc->sc_keymax; i++) ath_hal_keyreset(ah, i); /* * Collect the default channel list. */ error = ath_getchannels(sc); if (error != 0) goto bad; /* * Setup rate tables for all potential media types. */ ath_rate_setup(sc, IEEE80211_MODE_11A); ath_rate_setup(sc, IEEE80211_MODE_11B); ath_rate_setup(sc, IEEE80211_MODE_11G); ath_rate_setup(sc, IEEE80211_MODE_TURBO_A); ath_rate_setup(sc, IEEE80211_MODE_TURBO_G); ath_rate_setup(sc, IEEE80211_MODE_STURBO_A); ath_rate_setup(sc, IEEE80211_MODE_11NA); ath_rate_setup(sc, IEEE80211_MODE_11NG); ath_rate_setup(sc, IEEE80211_MODE_HALF); ath_rate_setup(sc, IEEE80211_MODE_QUARTER); /* NB: setup here so ath_rate_update is happy */ ath_setcurmode(sc, IEEE80211_MODE_11A); /* * Allocate tx+rx descriptors and populate the lists. */ error = ath_desc_alloc(sc); if (error != 0) { if_printf(ifp, "failed to allocate descriptors: %d\n", error); goto bad; } callout_init(&sc->sc_cal_ch, CALLOUT_MPSAFE); ATH_TXBUF_LOCK_INIT(sc); sc->sc_tq = taskqueue_create("ath_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->sc_tq); taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", ifp->if_xname); TASK_INIT(&sc->sc_rxtask, 0, ath_rx_proc, sc); TASK_INIT(&sc->sc_rxorntask, 0, ath_rxorn_proc, sc); TASK_INIT(&sc->sc_bmisstask, 0, ath_bmiss_proc, sc); TASK_INIT(&sc->sc_bstucktask,0, ath_bstuck_proc, sc); /* * Allocate hardware transmit queues: one queue for * beacon frames and one data queue for each QoS * priority. Note that the hal handles reseting * these queues at the needed time. * * XXX PS-Poll */ sc->sc_bhalq = ath_beaconq_setup(ah); if (sc->sc_bhalq == (u_int) -1) { if_printf(ifp, "unable to setup a beacon xmit queue!\n"); error = EIO; goto bad2; } sc->sc_cabq = ath_txq_setup(sc, HAL_TX_QUEUE_CAB, 0); if (sc->sc_cabq == NULL) { if_printf(ifp, "unable to setup CAB xmit queue!\n"); error = EIO; goto bad2; } /* NB: insure BK queue is the lowest priority h/w queue */ if (!ath_tx_setup(sc, WME_AC_BK, HAL_WME_AC_BK)) { if_printf(ifp, "unable to setup xmit queue for %s traffic!\n", ieee80211_wme_acnames[WME_AC_BK]); error = EIO; goto bad2; } if (!ath_tx_setup(sc, WME_AC_BE, HAL_WME_AC_BE) || !ath_tx_setup(sc, WME_AC_VI, HAL_WME_AC_VI) || !ath_tx_setup(sc, WME_AC_VO, HAL_WME_AC_VO)) { /* * Not enough hardware tx queues to properly do WME; * just punt and assign them all to the same h/w queue. * We could do a better job of this if, for example, * we allocate queues when we switch from station to * AP mode. */ if (sc->sc_ac2q[WME_AC_VI] != NULL) ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); if (sc->sc_ac2q[WME_AC_BE] != NULL) ath_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; } /* * Special case certain configurations. Note the * CAB queue is handled by these specially so don't * include them when checking the txq setup mask. */ switch (sc->sc_txqsetup &~ (1<sc_cabq->axq_qnum)) { case 0x01: TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0, sc); break; case 0x0f: TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc_q0123, sc); break; default: TASK_INIT(&sc->sc_txtask, 0, ath_tx_proc, sc); break; } /* * Setup rate control. Some rate control modules * call back to change the anntena state so expose * the necessary entry points. * XXX maybe belongs in struct ath_ratectrl? */ sc->sc_setdefantenna = ath_setdefantenna; sc->sc_rc = ath_rate_attach(sc); if (sc->sc_rc == NULL) { error = EIO; goto bad2; } sc->sc_blinking = 0; sc->sc_ledstate = 1; sc->sc_ledon = 0; /* low true */ sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ callout_init(&sc->sc_ledtimer, CALLOUT_MPSAFE); /* * Auto-enable soft led processing for IBM cards and for * 5211 minipci cards. Users can also manually enable/disable * support with a sysctl. */ sc->sc_softled = (devid == AR5212_DEVID_IBM || devid == AR5211_DEVID); if (sc->sc_softled) { ath_hal_gpioCfgOutput(ah, sc->sc_ledpin); ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon); } ifp->if_softc = sc; ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST; ifp->if_start = ath_start; ifp->if_watchdog = ath_watchdog; ifp->if_ioctl = ath_ioctl; ifp->if_init = ath_init; IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); ic->ic_ifp = ifp; /* XXX not right but it's not used anywhere important */ ic->ic_phytype = IEEE80211_T_OFDM; ic->ic_opmode = IEEE80211_M_STA; ic->ic_caps = IEEE80211_C_STA /* station mode */ | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ | IEEE80211_C_HOSTAP /* hostap mode */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_WDS /* 4-address traffic works */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ | IEEE80211_C_TXFRAG /* handle tx frags */ ; /* * Query the hal to figure out h/w crypto support. */ if (ath_hal_ciphersupported(ah, HAL_CIPHER_WEP)) ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_OCB)) ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_OCB; if (ath_hal_ciphersupported(ah, HAL_CIPHER_AES_CCM)) ic->ic_cryptocaps |= IEEE80211_CRYPTO_AES_CCM; if (ath_hal_ciphersupported(ah, HAL_CIPHER_CKIP)) ic->ic_cryptocaps |= IEEE80211_CRYPTO_CKIP; if (ath_hal_ciphersupported(ah, HAL_CIPHER_TKIP)) { ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIP; /* * Check if h/w does the MIC and/or whether the * separate key cache entries are required to * handle both tx+rx MIC keys. */ if (ath_hal_ciphersupported(ah, HAL_CIPHER_MIC)) ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; /* * If the h/w supports storing tx+rx MIC keys * in one cache slot automatically enable use. */ if (ath_hal_hastkipsplit(ah) || !ath_hal_settkipsplit(ah, AH_FALSE)) sc->sc_splitmic = 1; /* * If the h/w can do TKIP MIC together with WME then * we use it; otherwise we force the MIC to be done * in software by the net80211 layer. */ if (ath_hal_haswmetkipmic(ah)) sc->sc_wmetkipmic = 1; } sc->sc_hasclrkey = ath_hal_ciphersupported(ah, HAL_CIPHER_CLR); sc->sc_mcastkey = ath_hal_getmcastkeysearch(ah); /* * Mark key cache slots associated with global keys * as in use. If we knew TKIP was not to be used we * could leave the +32, +64, and +32+64 slots free. */ for (i = 0; i < IEEE80211_WEP_NKID; i++) { setbit(sc->sc_keymap, i); setbit(sc->sc_keymap, i+64); if (sc->sc_splitmic) { setbit(sc->sc_keymap, i+32); setbit(sc->sc_keymap, i+32+64); } } /* * TPC support can be done either with a global cap or * per-packet support. The latter is not available on * all parts. We're a bit pedantic here as all parts * support a global cap. */ if (ath_hal_hastpc(ah) || ath_hal_hastxpowlimit(ah)) ic->ic_caps |= IEEE80211_C_TXPMGT; /* * Mark WME capability only if we have sufficient * hardware queues to do proper priority scheduling. */ if (sc->sc_ac2q[WME_AC_BE] != sc->sc_ac2q[WME_AC_BK]) ic->ic_caps |= IEEE80211_C_WME; /* * Check for misc other capabilities. */ if (ath_hal_hasbursting(ah)) ic->ic_caps |= IEEE80211_C_BURST; sc->sc_hasbmask = ath_hal_hasbssidmask(ah); sc->sc_hastsfadd = ath_hal_hastsfadjust(ah); if (ath_hal_hasfastframes(ah)) ic->ic_caps |= IEEE80211_C_FF; if (ath_hal_getwirelessmodes(ah, ic->ic_regdomain.country) & (HAL_MODE_108G|HAL_MODE_TURBO)) ic->ic_caps |= IEEE80211_C_TURBOP; /* * Indicate we need the 802.11 header padded to a * 32-bit boundary for 4-address and QoS frames. */ ic->ic_flags |= IEEE80211_F_DATAPAD; /* * Query the hal about antenna support. */ sc->sc_defant = ath_hal_getdefantenna(ah); /* * Not all chips have the VEOL support we want to * use with IBSS beacons; check here for it. */ sc->sc_hasveol = ath_hal_hasveol(ah); /* get mac address from hardware */ ath_hal_getmac(ah, ic->ic_myaddr); if (sc->sc_hasbmask) ath_hal_getbssidmask(ah, sc->sc_hwbssidmask); /* NB: used to size node table key mapping array */ ic->ic_max_keyix = sc->sc_keymax; /* call MI attach routine. */ ieee80211_ifattach(ic); ic->ic_setregdomain = ath_setregdomain; ic->ic_getradiocaps = ath_getradiocaps; sc->sc_opmode = HAL_M_STA; /* override default methods */ ic->ic_newassoc = ath_newassoc; ic->ic_updateslot = ath_updateslot; ic->ic_wme.wme_update = ath_wme_update; ic->ic_vap_create = ath_vap_create; ic->ic_vap_delete = ath_vap_delete; ic->ic_raw_xmit = ath_raw_xmit; ic->ic_update_mcast = ath_update_mcast; ic->ic_update_promisc = ath_update_promisc; ic->ic_node_alloc = ath_node_alloc; sc->sc_node_free = ic->ic_node_free; ic->ic_node_free = ath_node_free; ic->ic_node_getsignal = ath_node_getsignal; ic->ic_scan_start = ath_scan_start; ic->ic_scan_end = ath_scan_end; ic->ic_set_channel = ath_set_channel; ath_bpfattach(sc); /* * Setup dynamic sysctl's now that country code and * regdomain are available from the hal. */ ath_sysctlattach(sc); if (bootverbose) ieee80211_announce(ic); ath_announce(sc); return 0; bad2: ath_tx_cleanup(sc); ath_desc_free(sc); bad: if (ah) ath_hal_detach(ah); if (ifp != NULL) if_free(ifp); sc->sc_invalid = 1; return error; } int ath_detach(struct ath_softc *sc) { struct ifnet *ifp = sc->sc_ifp; DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", __func__, ifp->if_flags); /* * NB: the order of these is important: * o stop the chip so no more interrupts will fire * o call the 802.11 layer before detaching the hal to * insure callbacks into the driver to delete global * key cache entries can be handled * o free the taskqueue which drains any pending tasks * o reclaim the bpf tap now that we know nothing will use * it (e.g. rx processing from the task q thread) * o reclaim the tx queue data structures after calling * the 802.11 layer as we'll get called back to reclaim * node state and potentially want to use them * o to cleanup the tx queues the hal is called, so detach * it last * Other than that, it's straightforward... */ ath_stop(ifp); ieee80211_ifdetach(ifp->if_l2com); taskqueue_free(sc->sc_tq); bpfdetach(ifp); #ifdef ATH_TX99_DIAG if (sc->sc_tx99 != NULL) sc->sc_tx99->detach(sc->sc_tx99); #endif ath_rate_detach(sc->sc_rc); ath_desc_free(sc); ath_tx_cleanup(sc); ath_hal_detach(sc->sc_ah); /* NB: sets chip in full sleep */ if_free(ifp); return 0; } /* * MAC address handling for multiple BSS on the same radio. * The first vap uses the MAC address from the EEPROM. For * subsequent vap's we set the U/L bit (bit 1) in the MAC * address and use the next six bits as an index. */ static void assign_address(struct ath_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) { int i; if (clone && sc->sc_hasbmask) { /* NB: we only do this if h/w supports multiple bssid */ for (i = 0; i < 8; i++) if ((sc->sc_bssidmask & (1<sc_bssidmask |= 1<sc_hwbssidmask[0] &= ~mac[0]; if (i == 0) sc->sc_nbssid0++; } static void reclaim_address(struct ath_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) { int i = mac[0] >> 2; uint8_t mask; if (i != 0 || --sc->sc_nbssid0 == 0) { sc->sc_bssidmask &= ~(1<sc_bssidmask & (1<sc_hwbssidmask[0] |= mask; } } /* * Assign a beacon xmit slot. We try to space out * assignments so when beacons are staggered the * traffic coming out of the cab q has maximal time * to go out before the next beacon is scheduled. */ static int assign_bslot(struct ath_softc *sc) { u_int slot, free; free = 0; for (slot = 0; slot < ATH_BCBUF; slot++) if (sc->sc_bslot[slot] == NULL) { if (sc->sc_bslot[(slot+1)%ATH_BCBUF] == NULL && sc->sc_bslot[(slot-1)%ATH_BCBUF] == NULL) return slot; free = slot; /* NB: keep looking for a double slot */ } return free; } static struct ieee80211vap * ath_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, int opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac0[IEEE80211_ADDR_LEN]) { struct ath_softc *sc = ic->ic_ifp->if_softc; struct ath_vap *avp; struct ieee80211vap *vap; uint8_t mac[IEEE80211_ADDR_LEN]; int ic_opmode, needbeacon, error; avp = (struct ath_vap *) malloc(sizeof(struct ath_vap), M_80211_VAP, M_WAITOK | M_ZERO); needbeacon = 0; IEEE80211_ADDR_COPY(mac, mac0); ATH_LOCK(sc); switch (opmode) { case IEEE80211_M_STA: if (sc->sc_nstavaps != 0) { /* XXX only 1 sta for now */ device_printf(sc->sc_dev, "only 1 sta vap supported\n"); goto bad; } if (sc->sc_nvaps) { /* * When there are multiple vaps we must fall * back to s/w beacon miss handling. */ flags |= IEEE80211_CLONE_NOBEACONS; } if (flags & IEEE80211_CLONE_NOBEACONS) { sc->sc_swbmiss = 1; ic_opmode = IEEE80211_M_HOSTAP; } else ic_opmode = opmode; break; case IEEE80211_M_IBSS: if (sc->sc_nvaps != 0) { /* XXX only 1 for now */ device_printf(sc->sc_dev, "only 1 ibss vap supported\n"); goto bad; } ic_opmode = opmode; needbeacon = 1; break; case IEEE80211_M_AHDEMO: /* fall thru... */ case IEEE80211_M_MONITOR: if (sc->sc_nvaps != 0 && ic->ic_opmode != opmode) { /* XXX not right for monitor mode */ ic_opmode = ic->ic_opmode; } else ic_opmode = opmode; break; case IEEE80211_M_HOSTAP: needbeacon = 1; /* fall thru... */ case IEEE80211_M_WDS: if (sc->sc_nvaps && ic->ic_opmode == IEEE80211_M_STA) { device_printf(sc->sc_dev, "wds not supported in sta mode\n"); goto bad; } if (opmode == IEEE80211_M_WDS) { /* * Silently remove any request for a unique * bssid; WDS vap's always share the local * mac address. */ flags &= ~IEEE80211_CLONE_BSSID; } ic_opmode = IEEE80211_M_HOSTAP; break; default: device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); goto bad; } /* * Check that a beacon buffer is available; the code below assumes it. */ if (needbeacon & STAILQ_EMPTY(&sc->sc_bbuf)) { device_printf(sc->sc_dev, "no beacon buffer available\n"); goto bad; } /* STA, AHDEMO? */ if (opmode == IEEE80211_M_HOSTAP) { assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); } vap = &avp->av_vap; /* XXX can't hold mutex across if_alloc */ ATH_UNLOCK(sc); error = ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); ATH_LOCK(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: error %d creating vap\n", __func__, error); goto bad2; } /* h/w crypto support */ vap->iv_key_alloc = ath_key_alloc; vap->iv_key_delete = ath_key_delete; vap->iv_key_set = ath_key_set; vap->iv_key_update_begin = ath_key_update_begin; vap->iv_key_update_end = ath_key_update_end; /* override various methods */ avp->av_recv_mgmt = vap->iv_recv_mgmt; vap->iv_recv_mgmt = ath_recv_mgmt; vap->iv_reset = ath_reset_vap; vap->iv_update_beacon = ath_beacon_update; avp->av_newstate = vap->iv_newstate; vap->iv_newstate = ath_newstate; avp->av_bmiss = vap->iv_bmiss; vap->iv_bmiss = ath_bmiss_vap; avp->av_bslot = -1; if (needbeacon) { /* * Allocate beacon state and setup the q for buffered * multicast frames. We know a beacon buffer is * available because we checked above. */ avp->av_bcbuf = STAILQ_FIRST(&sc->sc_bbuf); STAILQ_REMOVE_HEAD(&sc->sc_bbuf, bf_list); if (opmode != IEEE80211_M_IBSS || !sc->sc_hasveol) { /* * Assign the vap to a beacon xmit slot. As above * this cannot fail to find a free one. */ avp->av_bslot = assign_bslot(sc); KASSERT(sc->sc_bslot[avp->av_bslot] == NULL, ("beacon slot %u not empty", avp->av_bslot)); sc->sc_bslot[avp->av_bslot] = vap; sc->sc_nbcnvaps++; } if (sc->sc_hastsfadd && sc->sc_nbcnvaps > 0) { /* * Multple vaps are to transmit beacons and we * have h/w support for TSF adjusting; enable * use of staggered beacons. */ sc->sc_stagbeacons = 1; } ath_txq_init(sc, &avp->av_mcastq, ATH_TXQ_SWQ); } ic->ic_opmode = ic_opmode; if (opmode != IEEE80211_M_WDS) { sc->sc_nvaps++; if (opmode == IEEE80211_M_STA) sc->sc_nstavaps++; } switch (ic_opmode) { case IEEE80211_M_IBSS: sc->sc_opmode = HAL_M_IBSS; break; case IEEE80211_M_STA: sc->sc_opmode = HAL_M_STA; break; case IEEE80211_M_AHDEMO: case IEEE80211_M_HOSTAP: sc->sc_opmode = HAL_M_HOSTAP; break; case IEEE80211_M_MONITOR: sc->sc_opmode = HAL_M_MONITOR; break; default: /* XXX should not happen */ break; } if (sc->sc_hastsfadd) { /* * Configure whether or not TSF adjust should be done. */ ath_hal_settsfadjust(sc->sc_ah, sc->sc_stagbeacons); } ATH_UNLOCK(sc); /* complete setup */ ieee80211_vap_attach(vap, ath_media_change, ieee80211_media_status); return vap; bad2: reclaim_address(sc, mac); ath_hal_setbssidmask(sc->sc_ah, sc->sc_hwbssidmask); bad: free(avp, M_80211_VAP); ATH_UNLOCK(sc); return NULL; } static void ath_vap_delete(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ifnet *ifp = ic->ic_ifp; struct ath_softc *sc = ifp->if_softc; struct ath_hal *ah = sc->sc_ah; struct ath_vap *avp = ATH_VAP(vap); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { /* * Quiesce the hardware while we remove the vap. In * particular we need to reclaim all references to * the vap state by any frames pending on the tx queues. */ ath_hal_intrset(ah, 0); /* disable interrupts */ ath_draintxq(sc); /* stop xmit side */ ath_stoprecv(sc); /* stop recv side */ } ieee80211_vap_detach(vap); ATH_LOCK(sc); /* * Reclaim beacon state. Note this must be done before * the vap instance is reclaimed as we may have a reference * to it in the buffer for the beacon frame. */ if (avp->av_bcbuf != NULL) { if (avp->av_bslot != -1) { sc->sc_bslot[avp->av_bslot] = NULL; sc->sc_nbcnvaps--; } ath_beacon_return(sc, avp->av_bcbuf); avp->av_bcbuf = NULL; if (sc->sc_nbcnvaps == 0) { sc->sc_stagbeacons = 0; if (sc->sc_hastsfadd) ath_hal_settsfadjust(sc->sc_ah, 0); } /* * Reclaim any pending mcast frames for the vap. */ ath_tx_draintxq(sc, &avp->av_mcastq); ATH_TXQ_LOCK_DESTROY(&avp->av_mcastq); } /* * Update bookkeeping. */ if (vap->iv_opmode == IEEE80211_M_STA) { sc->sc_nstavaps--; if (sc->sc_nstavaps == 0 && sc->sc_swbmiss) sc->sc_swbmiss = 0; } else if (vap->iv_opmode == IEEE80211_M_HOSTAP) { reclaim_address(sc, vap->iv_myaddr); ath_hal_setbssidmask(ah, sc->sc_hwbssidmask); } if (vap->iv_opmode != IEEE80211_M_WDS) sc->sc_nvaps--; ATH_UNLOCK(sc); free(avp, M_80211_VAP); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { /* * Restart rx+tx machines if still running (RUNNING will * be reset if we just destroyed the last vap). */ if (ath_startrecv(sc) != 0) if_printf(ifp, "%s: unable to restart recv logic\n", __func__); if (sc->sc_beacons) ath_beacon_config(sc, NULL); ath_hal_intrset(ah, sc->sc_imask); } } void ath_suspend(struct ath_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", __func__, ifp->if_flags); sc->sc_resume_up = (ifp->if_flags & IFF_UP) != 0; if (ic->ic_opmode == IEEE80211_M_STA) ath_stop(ifp); else ieee80211_suspend_all(ic); /* * NB: don't worry about putting the chip in low power * mode; pci will power off our socket on suspend and * cardbus detaches the device. */ } /* * Reset the key cache since some parts do not reset the * contents on resume. First we clear all entries, then * re-load keys that the 802.11 layer assumes are setup * in h/w. */ static void ath_reset_keycache(struct ath_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ath_hal *ah = sc->sc_ah; int i; for (i = 0; i < sc->sc_keymax; i++) ath_hal_keyreset(ah, i); ieee80211_crypto_reload_keys(ic); } void ath_resume(struct ath_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ath_hal *ah = sc->sc_ah; HAL_STATUS status; DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", __func__, ifp->if_flags); /* * Must reset the chip before we reload the * keycache as we were powered down on suspend. */ ath_hal_reset(ah, sc->sc_opmode, &sc->sc_curchan, AH_FALSE, &status); ath_reset_keycache(sc); if (sc->sc_resume_up) { if (ic->ic_opmode == IEEE80211_M_STA) { ath_init(sc); ieee80211_beacon_miss(ic); } else ieee80211_resume_all(ic); } if (sc->sc_softled) { ath_hal_gpioCfgOutput(ah, sc->sc_ledpin); ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon); } } void ath_shutdown(struct ath_softc *sc) { struct ifnet *ifp = sc->sc_ifp; DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags %x\n", __func__, ifp->if_flags); ath_stop(ifp); /* NB: no point powering down chip as we're about to reboot */ } /* * Interrupt handler. Most of the actual processing is deferred. */ void ath_intr(void *arg) { struct ath_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ath_hal *ah = sc->sc_ah; HAL_INT status; if (sc->sc_invalid) { /* * The hardware is not ready/present, don't touch anything. * Note this can happen early on if the IRQ is shared. */ DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid; ignored\n", __func__); return; } if (!ath_hal_intrpend(ah)) /* shared irq, not for us */ return; if ((ifp->if_flags & IFF_UP) == 0 || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { HAL_INT status; DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", __func__, ifp->if_flags); ath_hal_getisr(ah, &status); /* clear ISR */ ath_hal_intrset(ah, 0); /* disable further intr's */ return; } /* * Figure out the reason(s) for the interrupt. Note * that the hal returns a pseudo-ISR that may include * bits we haven't explicitly enabled so we mask the * value to insure we only process bits we requested. */ ath_hal_getisr(ah, &status); /* NB: clears ISR too */ DPRINTF(sc, ATH_DEBUG_INTR, "%s: status 0x%x\n", __func__, status); status &= sc->sc_imask; /* discard unasked for bits */ if (status & HAL_INT_FATAL) { sc->sc_stats.ast_hardware++; ath_hal_intrset(ah, 0); /* disable intr's until reset */ ath_fatal_proc(sc, 0); } else if (status & HAL_INT_RXORN) { sc->sc_stats.ast_rxorn++; ath_hal_intrset(ah, 0); /* disable intr's until reset */ taskqueue_enqueue(sc->sc_tq, &sc->sc_rxorntask); } else { if (status & HAL_INT_SWBA) { /* * Software beacon alert--time to send a beacon. * Handle beacon transmission directly; deferring * this is too slow to meet timing constraints * under load. */ ath_beacon_proc(sc, 0); } if (status & HAL_INT_RXEOL) { /* * NB: the hardware should re-read the link when * RXE bit is written, but it doesn't work at * least on older hardware revs. */ sc->sc_stats.ast_rxeol++; sc->sc_rxlink = NULL; } if (status & HAL_INT_TXURN) { sc->sc_stats.ast_txurn++; /* bump tx trigger level */ ath_hal_updatetxtriglevel(ah, AH_TRUE); } if (status & HAL_INT_RX) taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); if (status & HAL_INT_TX) taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); if (status & HAL_INT_BMISS) { sc->sc_stats.ast_bmiss++; taskqueue_enqueue(sc->sc_tq, &sc->sc_bmisstask); } if (status & HAL_INT_MIB) { sc->sc_stats.ast_mib++; /* * Disable interrupts until we service the MIB * interrupt; otherwise it will continue to fire. */ ath_hal_intrset(ah, 0); /* * Let the hal handle the event. We assume it will * clear whatever condition caused the interrupt. */ ath_hal_mibevent(ah, &sc->sc_halstats); ath_hal_intrset(ah, sc->sc_imask); } } } static void ath_fatal_proc(void *arg, int pending) { struct ath_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; u_int32_t *state; u_int32_t len; void *sp; if_printf(ifp, "hardware error; resetting\n"); /* * Fatal errors are unrecoverable. Typically these * are caused by DMA errors. Collect h/w state from * the hal so we can diagnose what's going on. */ if (ath_hal_getfatalstate(sc->sc_ah, &sp, &len)) { KASSERT(len >= 6*sizeof(u_int32_t), ("len %u bytes", len)); state = sp; if_printf(ifp, "0x%08x 0x%08x 0x%08x, 0x%08x 0x%08x 0x%08x\n", state[0], state[1] , state[2], state[3], state[4], state[5]); } ath_reset(ifp); } static void ath_rxorn_proc(void *arg, int pending) { struct ath_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; if_printf(ifp, "rx FIFO overrun; resetting\n"); ath_reset(ifp); } static void ath_bmiss_vap(struct ieee80211vap *vap) { struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; u_int64_t lastrx = sc->sc_lastrx; u_int64_t tsf = ath_hal_gettsf64(sc->sc_ah); u_int bmisstimeout = vap->iv_bmissthreshold * vap->iv_bss->ni_intval * 1024; DPRINTF(sc, ATH_DEBUG_BEACON, "%s: tsf %llu lastrx %lld (%llu) bmiss %u\n", __func__, (unsigned long long) tsf, (unsigned long long)(tsf - lastrx), (unsigned long long) lastrx, bmisstimeout); /* * Workaround phantom bmiss interrupts by sanity-checking * the time of our last rx'd frame. If it is within the * beacon miss interval then ignore the interrupt. If it's * truly a bmiss we'll get another interrupt soon and that'll * be dispatched up for processing. */ if (tsf - lastrx > bmisstimeout) ATH_VAP(vap)->av_bmiss(vap); else sc->sc_stats.ast_bmiss_phantom++; } static void ath_bmiss_proc(void *arg, int pending) { struct ath_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; DPRINTF(sc, ATH_DEBUG_ANY, "%s: pending %u\n", __func__, pending); ieee80211_beacon_miss(ifp->if_l2com); } /* * Convert net80211 channel to a HAL channel with the flags * constrained to reflect the current operating mode and * the frequency possibly mapped for GSM channels. */ static void ath_mapchan(HAL_CHANNEL *hc, const struct ieee80211_channel *chan) { #define N(a) (sizeof(a) / sizeof(a[0])) static const u_int modeflags[IEEE80211_MODE_MAX] = { 0, /* IEEE80211_MODE_AUTO */ CHANNEL_A, /* IEEE80211_MODE_11A */ CHANNEL_B, /* IEEE80211_MODE_11B */ CHANNEL_PUREG, /* IEEE80211_MODE_11G */ 0, /* IEEE80211_MODE_FH */ CHANNEL_108A, /* IEEE80211_MODE_TURBO_A */ CHANNEL_108G, /* IEEE80211_MODE_TURBO_G */ CHANNEL_ST, /* IEEE80211_MODE_STURBO_A */ CHANNEL_A, /* IEEE80211_MODE_11NA */ CHANNEL_PUREG, /* IEEE80211_MODE_11NG */ }; enum ieee80211_phymode mode = ieee80211_chan2mode(chan); KASSERT(mode < N(modeflags), ("unexpected phy mode %u", mode)); KASSERT(modeflags[mode] != 0, ("mode %u undefined", mode)); hc->channelFlags = modeflags[mode]; if (IEEE80211_IS_CHAN_HALF(chan)) hc->channelFlags |= CHANNEL_HALF; if (IEEE80211_IS_CHAN_QUARTER(chan)) hc->channelFlags |= CHANNEL_QUARTER; if (IEEE80211_IS_CHAN_HT20(chan)) hc->channelFlags |= CHANNEL_HT20; if (IEEE80211_IS_CHAN_HT40D(chan)) hc->channelFlags |= CHANNEL_HT40MINUS; if (IEEE80211_IS_CHAN_HT40U(chan)) hc->channelFlags |= CHANNEL_HT40PLUS; hc->channel = IEEE80211_IS_CHAN_GSM(chan) ? 2422 + (922 - chan->ic_freq) : chan->ic_freq; #undef N } /* * Handle TKIP MIC setup to deal hardware that doesn't do MIC * calcs together with WME. If necessary disable the crypto * hardware and mark the 802.11 state so keys will be setup * with the MIC work done in software. */ static void ath_settkipmic(struct ath_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; if ((ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP) && !sc->sc_wmetkipmic) { if (ic->ic_flags & IEEE80211_F_WME) { ath_hal_settkipmic(sc->sc_ah, AH_FALSE); ic->ic_cryptocaps &= ~IEEE80211_CRYPTO_TKIPMIC; } else { ath_hal_settkipmic(sc->sc_ah, AH_TRUE); ic->ic_cryptocaps |= IEEE80211_CRYPTO_TKIPMIC; } } } static void ath_init(void *arg) { struct ath_softc *sc = (struct ath_softc *) arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ath_hal *ah = sc->sc_ah; HAL_STATUS status; DPRINTF(sc, ATH_DEBUG_ANY, "%s: if_flags 0x%x\n", __func__, ifp->if_flags); ATH_LOCK(sc); /* * Stop anything previously setup. This is safe * whether this is the first time through or not. */ ath_stop_locked(ifp); /* * The basic interface to setting the hardware in a good * state is ``reset''. On return the hardware is known to * be powered up and with interrupts disabled. This must * be followed by initialization of the appropriate bits * and then setup of the interrupt mask. */ ath_mapchan(&sc->sc_curchan, ic->ic_curchan); ath_settkipmic(sc); if (!ath_hal_reset(ah, sc->sc_opmode, &sc->sc_curchan, AH_FALSE, &status)) { if_printf(ifp, "unable to reset hardware; hal status %u\n", status); ATH_UNLOCK(sc); return; } ath_chan_change(sc, ic->ic_curchan); /* * Likewise this is set during reset so update * state cached in the driver. */ sc->sc_diversity = ath_hal_getdiversity(ah); sc->sc_calinterval = 1; sc->sc_caltries = 0; /* * Setup the hardware after reset: the key cache * is filled as needed and the receive engine is * set going. Frame transmit is handled entirely * in the frame output path; there's nothing to do * here except setup the interrupt mask. */ if (ath_startrecv(sc) != 0) { if_printf(ifp, "unable to start recv logic\n"); ATH_UNLOCK(sc); return; } /* * Enable interrupts. */ sc->sc_imask = HAL_INT_RX | HAL_INT_TX | HAL_INT_RXEOL | HAL_INT_RXORN | HAL_INT_FATAL | HAL_INT_GLOBAL; /* * Enable MIB interrupts when there are hardware phy counters. * Note we only do this (at the moment) for station mode. */ if (sc->sc_needmib && ic->ic_opmode == IEEE80211_M_STA) sc->sc_imask |= HAL_INT_MIB; ifp->if_drv_flags |= IFF_DRV_RUNNING; ath_hal_intrset(ah, sc->sc_imask); ATH_UNLOCK(sc); #ifdef ATH_TX99_DIAG if (sc->sc_tx99 != NULL) sc->sc_tx99->start(sc->sc_tx99); else #endif ieee80211_start_all(ic); /* start all vap's */ } static void ath_stop_locked(struct ifnet *ifp) { struct ath_softc *sc = ifp->if_softc; struct ath_hal *ah = sc->sc_ah; DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid %u if_flags 0x%x\n", __func__, sc->sc_invalid, ifp->if_flags); ATH_LOCK_ASSERT(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { /* * Shutdown the hardware and driver: * reset 802.11 state machine * turn off timers * disable interrupts * turn off the radio * clear transmit machinery * clear receive machinery * drain and release tx queues * reclaim beacon resources * power down hardware * * Note that some of this work is not possible if the * hardware is gone (invalid). */ #ifdef ATH_TX99_DIAG if (sc->sc_tx99 != NULL) sc->sc_tx99->stop(sc->sc_tx99); #endif ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ifp->if_timer = 0; if (!sc->sc_invalid) { if (sc->sc_softled) { callout_stop(&sc->sc_ledtimer); ath_hal_gpioset(ah, sc->sc_ledpin, !sc->sc_ledon); sc->sc_blinking = 0; } ath_hal_intrset(ah, 0); } ath_draintxq(sc); if (!sc->sc_invalid) { ath_stoprecv(sc); ath_hal_phydisable(ah); } else sc->sc_rxlink = NULL; ath_beacon_free(sc); /* XXX not needed */ } } static void ath_stop(struct ifnet *ifp) { struct ath_softc *sc = ifp->if_softc; ATH_LOCK(sc); ath_stop_locked(ifp); ATH_UNLOCK(sc); } /* * Reset the hardware w/o losing operational state. This is * basically a more efficient way of doing ath_stop, ath_init, * followed by state transitions to the current 802.11 * operational state. Used to recover from various errors and * to reset or reload hardware state. */ static int ath_reset(struct ifnet *ifp) { struct ath_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ath_hal *ah = sc->sc_ah; HAL_STATUS status; /* * Convert to a HAL channel description with the flags * constrained to reflect the current operating mode. */ ath_mapchan(&sc->sc_curchan, ic->ic_curchan); ath_hal_intrset(ah, 0); /* disable interrupts */ ath_draintxq(sc); /* stop xmit side */ ath_stoprecv(sc); /* stop recv side */ ath_settkipmic(sc); /* configure TKIP MIC handling */ /* NB: indicate channel change so we do a full reset */ if (!ath_hal_reset(ah, sc->sc_opmode, &sc->sc_curchan, AH_TRUE, &status)) if_printf(ifp, "%s: unable to reset hardware; hal status %u\n", __func__, status); sc->sc_diversity = ath_hal_getdiversity(ah); sc->sc_calinterval = 1; sc->sc_caltries = 0; if (ath_startrecv(sc) != 0) /* restart recv */ if_printf(ifp, "%s: unable to start recv logic\n", __func__); /* * We may be doing a reset in response to an ioctl * that changes the channel so update any state that * might change as a result. */ ath_chan_change(sc, ic->ic_curchan); if (sc->sc_beacons) ath_beacon_config(sc, NULL); /* restart beacons */ ath_hal_intrset(ah, sc->sc_imask); ath_start(ifp); /* restart xmit */ return 0; } static int ath_reset_vap(struct ieee80211vap *vap, u_long cmd) { return ath_reset(vap->iv_ic->ic_ifp); } static int ath_ff_always(struct ath_txq *txq, struct ath_buf *bf) { return 0; } #if 0 static int ath_ff_ageflushtestdone(struct ath_txq *txq, struct ath_buf *bf) { return (txq->axq_curage - bf->bf_age) < ATH_FF_STAGEMAX; } #endif /* * Flush FF staging queue. */ static void ath_ff_stageq_flush(struct ath_softc *sc, struct ath_txq *txq, int (*ath_ff_flushdonetest)(struct ath_txq *txq, struct ath_buf *bf)) { struct ath_buf *bf; struct ieee80211_node *ni; int pktlen, pri; for (;;) { ATH_TXQ_LOCK(txq); /* * Go from the back (oldest) to front so we can * stop early based on the age of the entry. */ bf = TAILQ_LAST(&txq->axq_stageq, axq_headtype); if (bf == NULL || ath_ff_flushdonetest(txq, bf)) { ATH_TXQ_UNLOCK(txq); break; } ni = bf->bf_node; pri = M_WME_GETAC(bf->bf_m); KASSERT(ATH_NODE(ni)->an_ff_buf[pri], ("no bf on staging queue %p", bf)); ATH_NODE(ni)->an_ff_buf[pri] = NULL; TAILQ_REMOVE(&txq->axq_stageq, bf, bf_stagelist); ATH_TXQ_UNLOCK(txq); DPRINTF(sc, ATH_DEBUG_FF, "%s: flush frame, age %u\n", __func__, bf->bf_age); sc->sc_stats.ast_ff_flush++; /* encap and xmit */ bf->bf_m = ieee80211_encap(ni, bf->bf_m); if (bf->bf_m == NULL) { DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF, "%s: discard, encapsulation failure\n", __func__); sc->sc_stats.ast_tx_encap++; goto bad; } pktlen = bf->bf_m->m_pkthdr.len; /* NB: don't reference below */ if (ath_tx_start(sc, ni, bf, bf->bf_m) == 0) { #if 0 /*XXX*/ ifp->if_opackets++; #endif continue; } bad: if (ni != NULL) ieee80211_free_node(ni); bf->bf_node = NULL; if (bf->bf_m != NULL) { m_freem(bf->bf_m); bf->bf_m = NULL; } ATH_TXBUF_LOCK(sc); STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); ATH_TXBUF_UNLOCK(sc); } } static __inline u_int32_t ath_ff_approx_txtime(struct ath_softc *sc, struct ath_node *an, struct mbuf *m) { struct ieee80211com *ic = sc->sc_ifp->if_l2com; u_int32_t framelen; struct ath_buf *bf; /* * Approximate the frame length to be transmitted. A swag to add * the following maximal values to the skb payload: * - 32: 802.11 encap + CRC * - 24: encryption overhead (if wep bit) * - 4 + 6: fast-frame header and padding * - 16: 2 LLC FF tunnel headers * - 14: 1 802.3 FF tunnel header (skb already accounts for 2nd) */ framelen = m->m_pkthdr.len + 32 + 4 + 6 + 16 + 14; if (ic->ic_flags & IEEE80211_F_PRIVACY) framelen += 24; bf = an->an_ff_buf[M_WME_GETAC(m)]; if (bf != NULL) framelen += bf->bf_m->m_pkthdr.len; return ath_hal_computetxtime(sc->sc_ah, sc->sc_currates, framelen, sc->sc_lastdatarix, AH_FALSE); } /* * Determine if a data frame may be aggregated via ff tunnelling. * Note the caller is responsible for checking if the destination * supports fast frames. * * NB: allowing EAPOL frames to be aggregated with other unicast traffic. * Do 802.1x EAPOL frames proceed in the clear? Then they couldn't * be aggregated with other types of frames when encryption is on? * * NB: assumes lock on an_ff_buf effectively held by txq lock mechanism. */ static __inline int ath_ff_can_aggregate(struct ath_softc *sc, struct ath_node *an, struct mbuf *m, int *flushq) { struct ieee80211com *ic = sc->sc_ifp->if_l2com; struct ath_txq *txq; u_int32_t txoplimit; u_int pri; *flushq = 0; /* * If there is no frame to combine with and the txq has * fewer frames than the minimum required; then do not * attempt to aggregate this frame. */ pri = M_WME_GETAC(m); txq = sc->sc_ac2q[pri]; if (an->an_ff_buf[pri] == NULL && txq->axq_depth < sc->sc_fftxqmin) return 0; /* * When not in station mode never aggregate a multicast * frame; this insures, for example, that a combined frame * does not require multiple encryption keys when using * 802.1x/WPA. */ if (ic->ic_opmode != IEEE80211_M_STA && ETHER_IS_MULTICAST(mtod(m, struct ether_header *)->ether_dhost)) return 0; /* * Consult the max bursting interval to insure a combined * frame fits within the TxOp window. */ txoplimit = IEEE80211_TXOP_TO_US( ic->ic_wme.wme_chanParams.cap_wmeParams[pri].wmep_txopLimit); if (txoplimit != 0 && ath_ff_approx_txtime(sc, an, m) > txoplimit) { DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF, "%s: FF TxOp violation\n", __func__); if (an->an_ff_buf[pri] != NULL) *flushq = 1; return 0; } return 1; /* try to aggregate */ } /* * Check if the supplied frame can be partnered with an existing * or pending frame. Return a reference to any frame that should be * sent on return; otherwise return NULL. */ static struct mbuf * ath_ff_check(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf, struct mbuf *m, struct ieee80211_node *ni) { struct ath_node *an = ATH_NODE(ni); struct ath_buf *bfstaged; int ff_flush, pri; /* * Check if the supplied frame can be aggregated. * * NB: we use the txq lock to protect references to * an->an_ff_txbuf in ath_ff_can_aggregate(). */ ATH_TXQ_LOCK(txq); pri = M_WME_GETAC(m); if (ath_ff_can_aggregate(sc, an, m, &ff_flush)) { struct ath_buf *bfstaged = an->an_ff_buf[pri]; if (bfstaged != NULL) { /* * A frame is available for partnering; remove * it, chain it to this one, and encapsulate. */ an->an_ff_buf[pri] = NULL; TAILQ_REMOVE(&txq->axq_stageq, bfstaged, bf_stagelist); ATH_TXQ_UNLOCK(txq); /* * Chain mbufs and add FF magic. */ DPRINTF(sc, ATH_DEBUG_FF, "[%s] aggregate fast-frame, age %u\n", ether_sprintf(ni->ni_macaddr), txq->axq_curage); m->m_nextpkt = NULL; bfstaged->bf_m->m_nextpkt = m; m = bfstaged->bf_m; bfstaged->bf_m = NULL; m->m_flags |= M_FF; /* * Release the node reference held while * the packet sat on an_ff_buf[] */ bfstaged->bf_node = NULL; ieee80211_free_node(ni); /* * Return bfstaged to the free list. */ ATH_TXBUF_LOCK(sc); STAILQ_INSERT_TAIL(&sc->sc_txbuf, bfstaged, bf_list); ATH_TXBUF_UNLOCK(sc); return m; /* ready to go */ } else { /* * No frame available, queue this frame to wait * for a partner. Note that we hold the buffer * and a reference to the node; we need the * buffer in particular so we're certain we * can flush the frame at a later time. */ DPRINTF(sc, ATH_DEBUG_FF, "[%s] stage fast-frame, age %u\n", ether_sprintf(ni->ni_macaddr), txq->axq_curage); bf->bf_m = m; bf->bf_node = ni; /* NB: held reference */ bf->bf_age = txq->axq_curage; an->an_ff_buf[pri] = bf; TAILQ_INSERT_HEAD(&txq->axq_stageq, bf, bf_stagelist); ATH_TXQ_UNLOCK(txq); return NULL; /* consumed */ } } /* * Frame could not be aggregated, it needs to be returned * to the caller for immediate transmission. In addition * we check if we should first flush a frame from the * staging queue before sending this one. * * NB: ath_ff_can_aggregate only marks ff_flush if a frame * is present to flush. */ if (ff_flush) { int pktlen; bfstaged = an->an_ff_buf[pri]; an->an_ff_buf[pri] = NULL; TAILQ_REMOVE(&txq->axq_stageq, bfstaged, bf_stagelist); ATH_TXQ_UNLOCK(txq); DPRINTF(sc, ATH_DEBUG_FF, "[%s] flush staged frame\n", ether_sprintf(an->an_node.ni_macaddr)); /* encap and xmit */ bfstaged->bf_m = ieee80211_encap(ni, bfstaged->bf_m); if (bfstaged->bf_m == NULL) { DPRINTF(sc, ATH_DEBUG_XMIT | ATH_DEBUG_FF, "%s: discard, encap failure\n", __func__); sc->sc_stats.ast_tx_encap++; goto ff_flushbad; } pktlen = bfstaged->bf_m->m_pkthdr.len; if (ath_tx_start(sc, ni, bfstaged, bfstaged->bf_m)) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: discard, xmit failure\n", __func__); ff_flushbad: /* * Unable to transmit frame that was on the staging * queue. Reclaim the node reference and other * resources. */ if (ni != NULL) ieee80211_free_node(ni); bfstaged->bf_node = NULL; if (bfstaged->bf_m != NULL) { m_freem(bfstaged->bf_m); bfstaged->bf_m = NULL; } ATH_TXBUF_LOCK(sc); STAILQ_INSERT_TAIL(&sc->sc_txbuf, bfstaged, bf_list); ATH_TXBUF_UNLOCK(sc); } else { #if 0 ifp->if_opackets++; #endif } } else { if (an->an_ff_buf[pri] != NULL) { /* * XXX: out-of-order condition only occurs for AP * mode and multicast. There may be no valid way * to get this condition. */ DPRINTF(sc, ATH_DEBUG_FF, "[%s] out-of-order frame\n", ether_sprintf(an->an_node.ni_macaddr)); /* XXX stat */ } ATH_TXQ_UNLOCK(txq); } return m; } /* * Cleanup driver resources when we run out of buffers * while processing fragments; return the tx buffers * allocated and drop node references. */ static void ath_txfrag_cleanup(struct ath_softc *sc, ath_bufhead *frags, struct ieee80211_node *ni) { struct ath_buf *bf, *next; ATH_TXBUF_LOCK_ASSERT(sc); STAILQ_FOREACH_SAFE(bf, frags, bf_list, next) { /* NB: bf assumed clean */ STAILQ_REMOVE_HEAD(frags, bf_list); STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); ieee80211_node_decref(ni); } } /* * Setup xmit of a fragmented frame. Allocate a buffer * for each frag and bump the node reference count to * reflect the held reference to be setup by ath_tx_start. */ static int ath_txfrag_setup(struct ath_softc *sc, ath_bufhead *frags, struct mbuf *m0, struct ieee80211_node *ni) { struct mbuf *m; struct ath_buf *bf; ATH_TXBUF_LOCK(sc); for (m = m0->m_nextpkt; m != NULL; m = m->m_nextpkt) { bf = STAILQ_FIRST(&sc->sc_txbuf); if (bf == NULL) { /* out of buffers, cleanup */ ath_txfrag_cleanup(sc, frags, ni); break; } STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list); ieee80211_node_incref(ni); STAILQ_INSERT_TAIL(frags, bf, bf_list); } ATH_TXBUF_UNLOCK(sc); return !STAILQ_EMPTY(frags); } static void ath_start(struct ifnet *ifp) { struct ath_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_node *ni; struct ath_buf *bf; struct mbuf *m, *next; struct ath_txq *txq; ath_bufhead frags; int pri; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) return; for (;;) { /* * Grab a TX buffer and associated resources. */ ATH_TXBUF_LOCK(sc); bf = STAILQ_FIRST(&sc->sc_txbuf); if (bf != NULL) STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list); ATH_TXBUF_UNLOCK(sc); if (bf == NULL) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: out of xmit buffers\n", __func__); sc->sc_stats.ast_tx_qstop++; ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } IFQ_DEQUEUE(&ifp->if_snd, m); if (m == NULL) { ATH_TXBUF_LOCK(sc); STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); ATH_TXBUF_UNLOCK(sc); break; } STAILQ_INIT(&frags); ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; pri = M_WME_GETAC(m); txq = sc->sc_ac2q[pri]; if (ni->ni_ath_flags & IEEE80211_NODE_FF) { /* * Check queue length; if too deep drop this * frame (tail drop considered good). */ if (txq->axq_depth >= sc->sc_fftxqmax) { DPRINTF(sc, ATH_DEBUG_FF, "[%s] tail drop on q %u depth %u\n", ether_sprintf(ni->ni_macaddr), txq->axq_qnum, txq->axq_depth); sc->sc_stats.ast_tx_qfull++; m_freem(m); goto reclaim; } m = ath_ff_check(sc, txq, bf, m, ni); if (m == NULL) { /* NB: ni ref & bf held on stageq */ continue; } } ifp->if_opackets++; /* * Encapsulate the packet in prep for transmission. */ m = ieee80211_encap(ni, m); if (m == NULL) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: encapsulation failure\n", __func__); sc->sc_stats.ast_tx_encap++; goto bad; } /* * Check for fragmentation. If this frame * has been broken up verify we have enough * buffers to send all the fragments so all * go out or none... */ if ((m->m_flags & M_FRAG) && !ath_txfrag_setup(sc, &frags, m, ni)) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: out of txfrag buffers\n", __func__); ic->ic_stats.is_tx_nobuf++; /* XXX */ ath_freetx(m); goto bad; } nextfrag: /* * Pass the frame to the h/w for transmission. * Fragmented frames have each frag chained together * with m_nextpkt. We know there are sufficient ath_buf's * to send all the frags because of work done by * ath_txfrag_setup. We leave m_nextpkt set while * calling ath_tx_start so it can use it to extend the * the tx duration to cover the subsequent frag and * so it can reclaim all the mbufs in case of an error; * ath_tx_start clears m_nextpkt once it commits to * handing the frame to the hardware. */ next = m->m_nextpkt; if (ath_tx_start(sc, ni, bf, m)) { bad: ifp->if_oerrors++; reclaim: bf->bf_m = NULL; bf->bf_node = NULL; ATH_TXBUF_LOCK(sc); STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); ath_txfrag_cleanup(sc, &frags, ni); ATH_TXBUF_UNLOCK(sc); if (ni != NULL) ieee80211_free_node(ni); continue; } if (next != NULL) { /* * Beware of state changing between frags. * XXX check sta power-save state? */ if (ni->ni_vap->iv_state != IEEE80211_S_RUN) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: flush fragmented packet, state %s\n", __func__, ieee80211_state_name[ni->ni_vap->iv_state]); ath_freetx(next); goto reclaim; } m = next; bf = STAILQ_FIRST(&frags); KASSERT(bf != NULL, ("no buf for txfrag")); STAILQ_REMOVE_HEAD(&frags, bf_list); goto nextfrag; } ifp->if_timer = 5; #if 0 /* * Flush stale frames from the fast-frame staging queue. */ if (ic->ic_opmode != IEEE80211_M_STA) ath_ff_stageq_flush(sc, txq, ath_ff_ageflushtestdone); #endif } } static int ath_media_change(struct ifnet *ifp) { int error = ieee80211_media_change(ifp); /* NB: only the fixed rate can change and that doesn't need a reset */ return (error == ENETRESET ? 0 : error); } #ifdef ATH_DEBUG static void ath_keyprint(struct ath_softc *sc, const char *tag, u_int ix, const HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN]) { static const char *ciphers[] = { "WEP", "AES-OCB", "AES-CCM", "CKIP", "TKIP", "CLR", }; int i, n; printf("%s: [%02u] %-7s ", tag, ix, ciphers[hk->kv_type]); for (i = 0, n = hk->kv_len; i < n; i++) printf("%02x", hk->kv_val[i]); printf(" mac %s", ether_sprintf(mac)); if (hk->kv_type == HAL_CIPHER_TKIP) { printf(" %s ", sc->sc_splitmic ? "mic" : "rxmic"); for (i = 0; i < sizeof(hk->kv_mic); i++) printf("%02x", hk->kv_mic[i]); if (!sc->sc_splitmic) { printf(" txmic "); for (i = 0; i < sizeof(hk->kv_txmic); i++) printf("%02x", hk->kv_txmic[i]); } } printf("\n"); } #endif /* * Set a TKIP key into the hardware. This handles the * potential distribution of key state to multiple key * cache slots for TKIP. */ static int ath_keyset_tkip(struct ath_softc *sc, const struct ieee80211_key *k, HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN]) { #define IEEE80211_KEY_XR (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV) static const u_int8_t zerobssid[IEEE80211_ADDR_LEN]; struct ath_hal *ah = sc->sc_ah; KASSERT(k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP, ("got a non-TKIP key, cipher %u", k->wk_cipher->ic_cipher)); if ((k->wk_flags & IEEE80211_KEY_XR) == IEEE80211_KEY_XR) { if (sc->sc_splitmic) { /* * TX key goes at first index, RX key at the rx index. * The hal handles the MIC keys at index+64. */ memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_mic)); KEYPRINTF(sc, k->wk_keyix, hk, zerobssid); if (!ath_hal_keyset(ah, k->wk_keyix, hk, zerobssid)) return 0; memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic)); KEYPRINTF(sc, k->wk_keyix+32, hk, mac); /* XXX delete tx key on failure? */ return ath_hal_keyset(ah, k->wk_keyix+32, hk, mac); } else { /* * Room for both TX+RX MIC keys in one key cache * slot, just set key at the first index; the hal * will handle the rest. */ memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic)); memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic)); KEYPRINTF(sc, k->wk_keyix, hk, mac); return ath_hal_keyset(ah, k->wk_keyix, hk, mac); } } else if (k->wk_flags & IEEE80211_KEY_XMIT) { if (sc->sc_splitmic) { /* * NB: must pass MIC key in expected location when * the keycache only holds one MIC key per entry. */ memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_txmic)); } else memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic)); KEYPRINTF(sc, k->wk_keyix, hk, mac); return ath_hal_keyset(ah, k->wk_keyix, hk, mac); } else if (k->wk_flags & IEEE80211_KEY_RECV) { memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic)); KEYPRINTF(sc, k->wk_keyix, hk, mac); return ath_hal_keyset(ah, k->wk_keyix, hk, mac); } return 0; #undef IEEE80211_KEY_XR } /* * Set a net80211 key into the hardware. This handles the * potential distribution of key state to multiple key * cache slots for TKIP with hardware MIC support. */ static int ath_keyset(struct ath_softc *sc, const struct ieee80211_key *k, struct ieee80211_node *bss) { #define N(a) (sizeof(a)/sizeof(a[0])) static const u_int8_t ciphermap[] = { HAL_CIPHER_WEP, /* IEEE80211_CIPHER_WEP */ HAL_CIPHER_TKIP, /* IEEE80211_CIPHER_TKIP */ HAL_CIPHER_AES_OCB, /* IEEE80211_CIPHER_AES_OCB */ HAL_CIPHER_AES_CCM, /* IEEE80211_CIPHER_AES_CCM */ (u_int8_t) -1, /* 4 is not allocated */ HAL_CIPHER_CKIP, /* IEEE80211_CIPHER_CKIP */ HAL_CIPHER_CLR, /* IEEE80211_CIPHER_NONE */ }; struct ath_hal *ah = sc->sc_ah; const struct ieee80211_cipher *cip = k->wk_cipher; u_int8_t gmac[IEEE80211_ADDR_LEN]; const u_int8_t *mac; HAL_KEYVAL hk; memset(&hk, 0, sizeof(hk)); /* * Software crypto uses a "clear key" so non-crypto * state kept in the key cache are maintained and * so that rx frames have an entry to match. */ if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) { KASSERT(cip->ic_cipher < N(ciphermap), ("invalid cipher type %u", cip->ic_cipher)); hk.kv_type = ciphermap[cip->ic_cipher]; hk.kv_len = k->wk_keylen; memcpy(hk.kv_val, k->wk_key, k->wk_keylen); } else hk.kv_type = HAL_CIPHER_CLR; if ((k->wk_flags & IEEE80211_KEY_GROUP) && sc->sc_mcastkey) { /* * Group keys on hardware that supports multicast frame * key search use a mac that is the sender's address with * the high bit set instead of the app-specified address. */ IEEE80211_ADDR_COPY(gmac, bss->ni_macaddr); gmac[0] |= 0x80; mac = gmac; } else mac = k->wk_macaddr; if (hk.kv_type == HAL_CIPHER_TKIP && (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) { return ath_keyset_tkip(sc, k, &hk, mac); } else { KEYPRINTF(sc, k->wk_keyix, &hk, mac); return ath_hal_keyset(ah, k->wk_keyix, &hk, mac); } #undef N } /* * Allocate tx/rx key slots for TKIP. We allocate two slots for * each key, one for decrypt/encrypt and the other for the MIC. */ static u_int16_t key_alloc_2pair(struct ath_softc *sc, ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix) { #define N(a) (sizeof(a)/sizeof(a[0])) u_int i, keyix; KASSERT(sc->sc_splitmic, ("key cache !split")); /* XXX could optimize */ for (i = 0; i < N(sc->sc_keymap)/4; i++) { u_int8_t b = sc->sc_keymap[i]; if (b != 0xff) { /* * One or more slots in this byte are free. */ keyix = i*NBBY; while (b & 1) { again: keyix++; b >>= 1; } /* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */ if (isset(sc->sc_keymap, keyix+32) || isset(sc->sc_keymap, keyix+64) || isset(sc->sc_keymap, keyix+32+64)) { /* full pair unavailable */ /* XXX statistic */ if (keyix == (i+1)*NBBY) { /* no slots were appropriate, advance */ continue; } goto again; } setbit(sc->sc_keymap, keyix); setbit(sc->sc_keymap, keyix+64); setbit(sc->sc_keymap, keyix+32); setbit(sc->sc_keymap, keyix+32+64); DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key pair %u,%u %u,%u\n", __func__, keyix, keyix+64, keyix+32, keyix+32+64); *txkeyix = keyix; *rxkeyix = keyix+32; return 1; } } DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__); return 0; #undef N } /* * Allocate tx/rx key slots for TKIP. We allocate two slots for * each key, one for decrypt/encrypt and the other for the MIC. */ static u_int16_t key_alloc_pair(struct ath_softc *sc, ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix) { #define N(a) (sizeof(a)/sizeof(a[0])) u_int i, keyix; KASSERT(!sc->sc_splitmic, ("key cache split")); /* XXX could optimize */ for (i = 0; i < N(sc->sc_keymap)/4; i++) { u_int8_t b = sc->sc_keymap[i]; if (b != 0xff) { /* * One or more slots in this byte are free. */ keyix = i*NBBY; while (b & 1) { again: keyix++; b >>= 1; } if (isset(sc->sc_keymap, keyix+64)) { /* full pair unavailable */ /* XXX statistic */ if (keyix == (i+1)*NBBY) { /* no slots were appropriate, advance */ continue; } goto again; } setbit(sc->sc_keymap, keyix); setbit(sc->sc_keymap, keyix+64); DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key pair %u,%u\n", __func__, keyix, keyix+64); *txkeyix = *rxkeyix = keyix; return 1; } } DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__); return 0; #undef N } /* * Allocate a single key cache slot. */ static int key_alloc_single(struct ath_softc *sc, ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix) { #define N(a) (sizeof(a)/sizeof(a[0])) u_int i, keyix; /* XXX try i,i+32,i+64,i+32+64 to minimize key pair conflicts */ for (i = 0; i < N(sc->sc_keymap); i++) { u_int8_t b = sc->sc_keymap[i]; if (b != 0xff) { /* * One or more slots are free. */ keyix = i*NBBY; while (b & 1) keyix++, b >>= 1; setbit(sc->sc_keymap, keyix); DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key %u\n", __func__, keyix); *txkeyix = *rxkeyix = keyix; return 1; } } DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of space\n", __func__); return 0; #undef N } /* * Allocate one or more key cache slots for a uniacst key. The * key itself is needed only to identify the cipher. For hardware * TKIP with split cipher+MIC keys we allocate two key cache slot * pairs so that we can setup separate TX and RX MIC keys. Note * that the MIC key for a TKIP key at slot i is assumed by the * hardware to be at slot i+64. This limits TKIP keys to the first * 64 entries. */ static int ath_key_alloc(struct ieee80211vap *vap, const struct ieee80211_key *k, ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix) { struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; /* * Group key allocation must be handled specially for * parts that do not support multicast key cache search * functionality. For those parts the key id must match * the h/w key index so lookups find the right key. On * parts w/ the key search facility we install the sender's * mac address (with the high bit set) and let the hardware * find the key w/o using the key id. This is preferred as * it permits us to support multiple users for adhoc and/or * multi-station operation. */ if ((k->wk_flags & IEEE80211_KEY_GROUP) && !sc->sc_mcastkey) { if (!(&vap->iv_nw_keys[0] <= k && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) { /* should not happen */ DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: bogus group key\n", __func__); return 0; } /* * XXX we pre-allocate the global keys so * have no way to check if they've already been allocated. */ *keyix = *rxkeyix = k - vap->iv_nw_keys; return 1; } /* * We allocate two pair for TKIP when using the h/w to do * the MIC. For everything else, including software crypto, * we allocate a single entry. Note that s/w crypto requires * a pass-through slot on the 5211 and 5212. The 5210 does * not support pass-through cache entries and we map all * those requests to slot 0. */ if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { return key_alloc_single(sc, keyix, rxkeyix); } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP && (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) { if (sc->sc_splitmic) return key_alloc_2pair(sc, keyix, rxkeyix); else return key_alloc_pair(sc, keyix, rxkeyix); } else { return key_alloc_single(sc, keyix, rxkeyix); } } /* * Delete an entry in the key cache allocated by ath_key_alloc. */ static int ath_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) { struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; struct ath_hal *ah = sc->sc_ah; const struct ieee80211_cipher *cip = k->wk_cipher; u_int keyix = k->wk_keyix; DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: delete key %u\n", __func__, keyix); ath_hal_keyreset(ah, keyix); /* * Handle split tx/rx keying required for TKIP with h/w MIC. */ if (cip->ic_cipher == IEEE80211_CIPHER_TKIP && (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic) ath_hal_keyreset(ah, keyix+32); /* RX key */ if (keyix >= IEEE80211_WEP_NKID) { /* * Don't touch keymap entries for global keys so * they are never considered for dynamic allocation. */ clrbit(sc->sc_keymap, keyix); if (cip->ic_cipher == IEEE80211_CIPHER_TKIP && (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) { clrbit(sc->sc_keymap, keyix+64); /* TX key MIC */ if (sc->sc_splitmic) { /* +32 for RX key, +32+64 for RX key MIC */ clrbit(sc->sc_keymap, keyix+32); clrbit(sc->sc_keymap, keyix+32+64); } } } return 1; } /* * Set the key cache contents for the specified key. Key cache * slot(s) must already have been allocated by ath_key_alloc. */ static int ath_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k, const u_int8_t mac[IEEE80211_ADDR_LEN]) { struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; return ath_keyset(sc, k, vap->iv_bss); } /* * Block/unblock tx+rx processing while a key change is done. * We assume the caller serializes key management operations * so we only need to worry about synchronization with other * uses that originate in the driver. */ static void ath_key_update_begin(struct ieee80211vap *vap) { struct ifnet *ifp = vap->iv_ic->ic_ifp; struct ath_softc *sc = ifp->if_softc; DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); taskqueue_block(sc->sc_tq); IF_LOCK(&ifp->if_snd); /* NB: doesn't block mgmt frames */ } static void ath_key_update_end(struct ieee80211vap *vap) { struct ifnet *ifp = vap->iv_ic->ic_ifp; struct ath_softc *sc = ifp->if_softc; DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s:\n", __func__); IF_UNLOCK(&ifp->if_snd); taskqueue_unblock(sc->sc_tq); } /* * Calculate the receive filter according to the * operating mode and state: * * o always accept unicast, broadcast, and multicast traffic * o accept PHY error frames when hardware doesn't have MIB support * to count and we need them for ANI (sta mode only at the moment) * and we are not scanning (ANI is disabled) * NB: only with recent hal's; older hal's add rx filter bits out * of sight and we need to blindly preserve them * o probe request frames are accepted only when operating in * hostap, adhoc, or monitor modes * o enable promiscuous mode * - when in monitor mode * - if interface marked PROMISC (assumes bridge setting is filtered) * o accept beacons: * - when operating in station mode for collecting rssi data when * the station is otherwise quiet, or * - when operating in adhoc mode so the 802.11 layer creates * node table entries for peers, * - when scanning * - when doing s/w beacon miss (e.g. for ap+sta) * - when operating in ap mode in 11g to detect overlapping bss that * require protection * o accept control frames: * - when in monitor mode * XXX BAR frames for 11n * XXX HT protection for 11n */ static u_int32_t ath_calcrxfilter(struct ath_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; u_int32_t rfilt; #if HAL_ABI_VERSION < 0x08011600 rfilt = (ath_hal_getrxfilter(sc->sc_ah) & (HAL_RX_FILTER_PHYRADAR | HAL_RX_FILTER_PHYERR)) | HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; #else rfilt = HAL_RX_FILTER_UCAST | HAL_RX_FILTER_BCAST | HAL_RX_FILTER_MCAST; if (ic->ic_opmode == IEEE80211_M_STA && !sc->sc_needmib && !sc->sc_scanning) rfilt |= HAL_RX_FILTER_PHYERR; #endif if (ic->ic_opmode != IEEE80211_M_STA) rfilt |= HAL_RX_FILTER_PROBEREQ; if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & IFF_PROMISC)) rfilt |= HAL_RX_FILTER_PROM; if (ic->ic_opmode == IEEE80211_M_STA || sc->sc_opmode == HAL_M_IBSS || sc->sc_swbmiss || sc->sc_scanning) rfilt |= HAL_RX_FILTER_BEACON; /* * NB: We don't recalculate the rx filter when * ic_protmode changes; otherwise we could do * this only when ic_protmode != NONE. */ if (ic->ic_opmode == IEEE80211_M_HOSTAP && IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) rfilt |= HAL_RX_FILTER_BEACON; if (ic->ic_opmode == IEEE80211_M_MONITOR) rfilt |= HAL_RX_FILTER_CONTROL; DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x, %s if_flags 0x%x\n", __func__, rfilt, ieee80211_opmode_name[ic->ic_opmode], ifp->if_flags); return rfilt; } static void ath_update_promisc(struct ifnet *ifp) { struct ath_softc *sc = ifp->if_softc; u_int32_t rfilt; /* configure rx filter */ rfilt = ath_calcrxfilter(sc); ath_hal_setrxfilter(sc->sc_ah, rfilt); DPRINTF(sc, ATH_DEBUG_MODE, "%s: RX filter 0x%x\n", __func__, rfilt); } static void ath_update_mcast(struct ifnet *ifp) { struct ath_softc *sc = ifp->if_softc; u_int32_t mfilt[2]; /* calculate and install multicast filter */ if ((ifp->if_flags & IFF_ALLMULTI) == 0) { struct ifmultiaddr *ifma; /* * Merge multicast addresses to form the hardware filter. */ mfilt[0] = mfilt[1] = 0; IF_ADDR_LOCK(ifp); /* XXX need some fiddling to remove? */ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { caddr_t dl; u_int32_t val; u_int8_t pos; /* calculate XOR of eight 6bit values */ dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); val = LE_READ_4(dl + 0); pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; val = LE_READ_4(dl + 3); pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; pos &= 0x3f; mfilt[pos / 32] |= (1 << (pos % 32)); } IF_ADDR_UNLOCK(ifp); } else mfilt[0] = mfilt[1] = ~0; ath_hal_setmcastfilter(sc->sc_ah, mfilt[0], mfilt[1]); DPRINTF(sc, ATH_DEBUG_MODE, "%s: MC filter %08x:%08x\n", __func__, mfilt[0], mfilt[1]); } static void ath_mode_init(struct ath_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ath_hal *ah = sc->sc_ah; u_int32_t rfilt; /* configure rx filter */ rfilt = ath_calcrxfilter(sc); ath_hal_setrxfilter(ah, rfilt); /* configure operational mode */ ath_hal_setopmode(ah); /* * Handle any link-level address change. Note that we only * need to force ic_myaddr; any other addresses are handled * as a byproduct of the ifnet code marking the interface * down then up. * * XXX should get from lladdr instead of arpcom but that's more work */ IEEE80211_ADDR_COPY(ic->ic_myaddr, IF_LLADDR(ifp)); ath_hal_setmac(ah, ic->ic_myaddr); /* calculate and install multicast filter */ ath_update_mcast(ifp); } /* * Set the slot time based on the current setting. */ static void ath_setslottime(struct ath_softc *sc) { struct ieee80211com *ic = sc->sc_ifp->if_l2com; struct ath_hal *ah = sc->sc_ah; u_int usec; if (IEEE80211_IS_CHAN_HALF(ic->ic_curchan)) usec = 13; else if (IEEE80211_IS_CHAN_QUARTER(ic->ic_curchan)) usec = 21; else if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { /* honor short/long slot time only in 11g */ /* XXX shouldn't honor on pure g or turbo g channel */ if (ic->ic_flags & IEEE80211_F_SHSLOT) usec = HAL_SLOT_TIME_9; else usec = HAL_SLOT_TIME_20; } else usec = HAL_SLOT_TIME_9; DPRINTF(sc, ATH_DEBUG_RESET, "%s: chan %u MHz flags 0x%x %s slot, %u usec\n", __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", usec); ath_hal_setslottime(ah, usec); sc->sc_updateslot = OK; } /* * Callback from the 802.11 layer to update the * slot time based on the current setting. */ static void ath_updateslot(struct ifnet *ifp) { struct ath_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; /* * When not coordinating the BSS, change the hardware * immediately. For other operation we defer the change * until beacon updates have propagated to the stations. */ if (ic->ic_opmode == IEEE80211_M_HOSTAP) sc->sc_updateslot = UPDATE; else ath_setslottime(sc); } /* * Setup a h/w transmit queue for beacons. */ static int ath_beaconq_setup(struct ath_hal *ah) { HAL_TXQ_INFO qi; memset(&qi, 0, sizeof(qi)); qi.tqi_aifs = HAL_TXQ_USEDEFAULT; qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; /* NB: for dynamic turbo, don't enable any other interrupts */ qi.tqi_qflags = HAL_TXQ_TXDESCINT_ENABLE; return ath_hal_setuptxqueue(ah, HAL_TX_QUEUE_BEACON, &qi); } /* * Setup the transmit queue parameters for the beacon queue. */ static int ath_beaconq_config(struct ath_softc *sc) { #define ATH_EXPONENT_TO_VALUE(v) ((1<<(v))-1) struct ieee80211com *ic = sc->sc_ifp->if_l2com; struct ath_hal *ah = sc->sc_ah; HAL_TXQ_INFO qi; ath_hal_gettxqueueprops(ah, sc->sc_bhalq, &qi); if (ic->ic_opmode == IEEE80211_M_HOSTAP) { /* * Always burst out beacon and CAB traffic. */ qi.tqi_aifs = ATH_BEACON_AIFS_DEFAULT; qi.tqi_cwmin = ATH_BEACON_CWMIN_DEFAULT; qi.tqi_cwmax = ATH_BEACON_CWMAX_DEFAULT; } else { struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[WME_AC_BE]; /* * Adhoc mode; important thing is to use 2x cwmin. */ qi.tqi_aifs = wmep->wmep_aifsn; qi.tqi_cwmin = 2*ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); } if (!ath_hal_settxqueueprops(ah, sc->sc_bhalq, &qi)) { device_printf(sc->sc_dev, "unable to update parameters for " "beacon hardware queue!\n"); return 0; } else { ath_hal_resettxqueue(ah, sc->sc_bhalq); /* push to h/w */ return 1; } #undef ATH_EXPONENT_TO_VALUE } /* * Allocate and setup an initial beacon frame. */ static int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ath_vap *avp = ATH_VAP(vap); struct ath_buf *bf; struct mbuf *m; int error; bf = avp->av_bcbuf; if (bf->bf_m != NULL) { bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); m_freem(bf->bf_m); bf->bf_m = NULL; } if (bf->bf_node != NULL) { ieee80211_free_node(bf->bf_node); bf->bf_node = NULL; } /* * NB: the beacon data buffer must be 32-bit aligned; * we assume the mbuf routines will return us something * with this alignment (perhaps should assert). */ m = ieee80211_beacon_alloc(ni, &avp->av_boff); if (m == NULL) { device_printf(sc->sc_dev, "%s: cannot get mbuf\n", __func__); sc->sc_stats.ast_be_nombuf++; return ENOMEM; } error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "%s: cannot map mbuf, bus_dmamap_load_mbuf_sg returns %d\n", __func__, error); m_freem(m); return error; } /* * Calculate a TSF adjustment factor required for staggered * beacons. Note that we assume the format of the beacon * frame leaves the tstamp field immediately following the * header. */ if (sc->sc_stagbeacons && avp->av_bslot > 0) { uint64_t tsfadjust; struct ieee80211_frame *wh; /* * The beacon interval is in TU's; the TSF is in usecs. * We figure out how many TU's to add to align the timestamp * then convert to TSF units and handle byte swapping before * inserting it in the frame. The hardware will then add this * each time a beacon frame is sent. Note that we align vap's * 1..N and leave vap 0 untouched. This means vap 0 has a * timestamp in one beacon interval while the others get a * timstamp aligned to the next interval. */ tsfadjust = ni->ni_intval * (ATH_BCBUF - avp->av_bslot) / ATH_BCBUF; tsfadjust = htole64(tsfadjust << 10); /* TU -> TSF */ DPRINTF(sc, ATH_DEBUG_BEACON, "%s: %s beacons bslot %d intval %u tsfadjust %llu\n", __func__, sc->sc_stagbeacons ? "stagger" : "burst", avp->av_bslot, ni->ni_intval, le64toh(tsfadjust)); wh = mtod(m, struct ieee80211_frame *); memcpy(&wh[1], &tsfadjust, sizeof(tsfadjust)); } bf->bf_m = m; bf->bf_node = ieee80211_ref_node(ni); return 0; } /* * Setup the beacon frame for transmit. */ static void ath_beacon_setup(struct ath_softc *sc, struct ath_buf *bf) { #define USE_SHPREAMBLE(_ic) \ (((_ic)->ic_flags & (IEEE80211_F_SHPREAMBLE | IEEE80211_F_USEBARKER))\ == IEEE80211_F_SHPREAMBLE) struct ieee80211_node *ni = bf->bf_node; struct ieee80211com *ic = ni->ni_ic; struct mbuf *m = bf->bf_m; struct ath_hal *ah = sc->sc_ah; struct ath_desc *ds; int flags, antenna; const HAL_RATE_TABLE *rt; u_int8_t rix, rate; DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: m %p len %u\n", __func__, m, m->m_len); /* setup descriptors */ ds = bf->bf_desc; flags = HAL_TXDESC_NOACK; if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) { ds->ds_link = bf->bf_daddr; /* self-linked */ flags |= HAL_TXDESC_VEOL; /* * Let hardware handle antenna switching. */ antenna = sc->sc_txantenna; } else { ds->ds_link = 0; /* * Switch antenna every 4 beacons. * XXX assumes two antenna */ if (sc->sc_txantenna != 0) antenna = sc->sc_txantenna; else if (sc->sc_stagbeacons && sc->sc_nbcnvaps != 0) antenna = ((sc->sc_stats.ast_be_xmit / sc->sc_nbcnvaps) & 4 ? 2 : 1); else antenna = (sc->sc_stats.ast_be_xmit & 4 ? 2 : 1); } KASSERT(bf->bf_nseg == 1, ("multi-segment beacon frame; nseg %u", bf->bf_nseg)); ds->ds_data = bf->bf_segs[0].ds_addr; /* * Calculate rate code. * XXX everything at min xmit rate */ rix = 0; rt = sc->sc_currates; rate = rt->info[rix].rateCode; if (USE_SHPREAMBLE(ic)) rate |= rt->info[rix].shortPreamble; ath_hal_setuptxdesc(ah, ds , m->m_len + IEEE80211_CRC_LEN /* frame length */ , sizeof(struct ieee80211_frame)/* header length */ , HAL_PKT_TYPE_BEACON /* Atheros packet type */ , ni->ni_txpower /* txpower XXX */ , rate, 1 /* series 0 rate/tries */ , HAL_TXKEYIX_INVALID /* no encryption */ , antenna /* antenna mode */ , flags /* no ack, veol for beacons */ , 0 /* rts/cts rate */ , 0 /* rts/cts duration */ ); /* NB: beacon's BufLen must be a multiple of 4 bytes */ ath_hal_filltxdesc(ah, ds , roundup(m->m_len, 4) /* buffer length */ , AH_TRUE /* first segment */ , AH_TRUE /* last segment */ , ds /* first descriptor */ ); #if 0 ath_desc_swap(ds); #endif #undef USE_SHPREAMBLE } static void ath_beacon_update(struct ieee80211vap *vap, int item) { struct ieee80211_beacon_offsets *bo = &ATH_VAP(vap)->av_boff; setbit(bo->bo_flags, item); } /* * Append the contents of src to dst; both queues * are assumed to be locked. */ static void ath_txqmove(struct ath_txq *dst, struct ath_txq *src) { STAILQ_CONCAT(&dst->axq_q, &src->axq_q); dst->axq_link = src->axq_link; src->axq_link = NULL; dst->axq_depth += src->axq_depth; src->axq_depth = 0; } /* * Transmit a beacon frame at SWBA. Dynamic updates to the * frame contents are done as needed and the slot time is * also adjusted based on current state. */ static void ath_beacon_proc(void *arg, int pending) { struct ath_softc *sc = arg; struct ath_hal *ah = sc->sc_ah; struct ieee80211vap *vap; struct ath_buf *bf; int slot, otherant; uint32_t bfaddr; DPRINTF(sc, ATH_DEBUG_BEACON_PROC, "%s: pending %u\n", __func__, pending); /* * Check if the previous beacon has gone out. If * not don't try to post another, skip this period * and wait for the next. Missed beacons indicate * a problem and should not occur. If we miss too * many consecutive beacons reset the device. */ if (ath_hal_numtxpending(ah, sc->sc_bhalq) != 0) { sc->sc_bmisscount++; DPRINTF(sc, ATH_DEBUG_BEACON, "%s: missed %u consecutive beacons\n", __func__, sc->sc_bmisscount); if (sc->sc_bmisscount > 3) /* NB: 3 is a guess */ taskqueue_enqueue(sc->sc_tq, &sc->sc_bstucktask); return; } if (sc->sc_bmisscount != 0) { DPRINTF(sc, ATH_DEBUG_BEACON, "%s: resume beacon xmit after %u misses\n", __func__, sc->sc_bmisscount); sc->sc_bmisscount = 0; } if (sc->sc_stagbeacons) { /* staggered beacons */ struct ieee80211com *ic = sc->sc_ifp->if_l2com; uint32_t tsftu; tsftu = ath_hal_gettsf32(ah) >> 10; /* XXX lintval */ slot = ((tsftu % ic->ic_lintval) * ATH_BCBUF) / ic->ic_lintval; vap = sc->sc_bslot[(slot+1) % ATH_BCBUF]; bfaddr = 0; if (vap != NULL && vap->iv_state == IEEE80211_S_RUN) { bf = ath_beacon_generate(sc, vap); if (bf != NULL) bfaddr = bf->bf_daddr; } } else { /* burst'd beacons */ uint32_t *bflink = &bfaddr; for (slot = 0; slot < ATH_BCBUF; slot++) { vap = sc->sc_bslot[slot]; if (vap != NULL && vap->iv_state == IEEE80211_S_RUN) { bf = ath_beacon_generate(sc, vap); if (bf != NULL) { *bflink = bf->bf_daddr; bflink = &bf->bf_desc->ds_link; } } } *bflink = 0; /* terminate list */ } /* * Handle slot time change when a non-ERP station joins/leaves * an 11g network. The 802.11 layer notifies us via callback, * we mark updateslot, then wait one beacon before effecting * the change. This gives associated stations at least one * beacon interval to note the state change. */ /* XXX locking */ if (sc->sc_updateslot == UPDATE) { sc->sc_updateslot = COMMIT; /* commit next beacon */ sc->sc_slotupdate = slot; } else if (sc->sc_updateslot == COMMIT && sc->sc_slotupdate == slot) ath_setslottime(sc); /* commit change to h/w */ /* * Check recent per-antenna transmit statistics and flip * the default antenna if noticeably more frames went out * on the non-default antenna. * XXX assumes 2 anntenae */ if (!sc->sc_diversity && (!sc->sc_stagbeacons || slot == 0)) { otherant = sc->sc_defant & 1 ? 2 : 1; if (sc->sc_ant_tx[otherant] > sc->sc_ant_tx[sc->sc_defant] + 2) ath_setdefantenna(sc, otherant); sc->sc_ant_tx[1] = sc->sc_ant_tx[2] = 0; } if (bfaddr != 0) { /* * Stop any current dma and put the new frame on the queue. * This should never fail since we check above that no frames * are still pending on the queue. */ if (!ath_hal_stoptxdma(ah, sc->sc_bhalq)) { DPRINTF(sc, ATH_DEBUG_ANY, "%s: beacon queue %u did not stop?\n", __func__, sc->sc_bhalq); } /* NB: cabq traffic should already be queued and primed */ ath_hal_puttxbuf(ah, sc->sc_bhalq, bfaddr); ath_hal_txstart(ah, sc->sc_bhalq); sc->sc_stats.ast_be_xmit++; } } static struct ath_buf * ath_beacon_generate(struct ath_softc *sc, struct ieee80211vap *vap) { struct ath_vap *avp = ATH_VAP(vap); struct ath_txq *cabq = sc->sc_cabq; struct ath_buf *bf; struct mbuf *m; int nmcastq, error; KASSERT(vap->iv_state == IEEE80211_S_RUN, ("not running, state %d", vap->iv_state)); KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer")); /* * Update dynamic beacon contents. If this returns * non-zero then we need to remap the memory because * the beacon frame changed size (probably because * of the TIM bitmap). */ bf = avp->av_bcbuf; m = bf->bf_m; nmcastq = avp->av_mcastq.axq_depth; if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, nmcastq)) { /* XXX too conservative? */ bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error != 0) { if_printf(vap->iv_ifp, "%s: bus_dmamap_load_mbuf_sg failed, error %u\n", __func__, error); return NULL; } } if ((avp->av_boff.bo_tim[4] & 1) && cabq->axq_depth) { DPRINTF(sc, ATH_DEBUG_BEACON, "%s: cabq did not drain, mcastq %u cabq %u\n", __func__, nmcastq, cabq->axq_depth); sc->sc_stats.ast_cabq_busy++; if (sc->sc_nvaps > 1 && sc->sc_stagbeacons) { /* * CABQ traffic from a previous vap is still pending. * We must drain the q before this beacon frame goes * out as otherwise this vap's stations will get cab * frames from a different vap. * XXX could be slow causing us to miss DBA */ ath_tx_draintxq(sc, cabq); } } ath_beacon_setup(sc, bf); bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); /* * Enable the CAB queue before the beacon queue to * insure cab frames are triggered by this beacon. */ if (avp->av_boff.bo_tim[4] & 1) { struct ath_hal *ah = sc->sc_ah; /* NB: only at DTIM */ ATH_TXQ_LOCK(cabq); ATH_TXQ_LOCK(&avp->av_mcastq); if (nmcastq) { struct ath_buf *bfm; /* * Move frames from the s/w mcast q to the h/w cab q. * XXX MORE_DATA bit */ bfm = STAILQ_FIRST(&avp->av_mcastq.axq_q); if (cabq->axq_link != NULL) { *cabq->axq_link = bfm->bf_daddr; } else ath_hal_puttxbuf(ah, cabq->axq_qnum, bfm->bf_daddr); ath_txqmove(cabq, &avp->av_mcastq); sc->sc_stats.ast_cabq_xmit += nmcastq; } /* NB: gated by beacon so safe to start here */ ath_hal_txstart(ah, cabq->axq_qnum); ATH_TXQ_UNLOCK(cabq); ATH_TXQ_UNLOCK(&avp->av_mcastq); } return bf; } static void ath_beacon_start_adhoc(struct ath_softc *sc, struct ieee80211vap *vap) { struct ath_vap *avp = ATH_VAP(vap); struct ath_hal *ah = sc->sc_ah; struct ath_buf *bf; struct mbuf *m; int error; KASSERT(avp->av_bcbuf != NULL, ("no beacon buffer")); /* * Update dynamic beacon contents. If this returns * non-zero then we need to remap the memory because * the beacon frame changed size (probably because * of the TIM bitmap). */ bf = avp->av_bcbuf; m = bf->bf_m; if (ieee80211_beacon_update(bf->bf_node, &avp->av_boff, m, 0)) { /* XXX too conservative? */ bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error != 0) { if_printf(vap->iv_ifp, "%s: bus_dmamap_load_mbuf_sg failed, error %u\n", __func__, error); return; } } ath_beacon_setup(sc, bf); bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); /* NB: caller is known to have already stopped tx dma */ ath_hal_puttxbuf(ah, sc->sc_bhalq, bf->bf_daddr); ath_hal_txstart(ah, sc->sc_bhalq); } /* * Reset the hardware after detecting beacons have stopped. */ static void ath_bstuck_proc(void *arg, int pending) { struct ath_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; if_printf(ifp, "stuck beacon; resetting (bmiss count %u)\n", sc->sc_bmisscount); ath_reset(ifp); } /* * Reclaim beacon resources and return buffer to the pool. */ static void ath_beacon_return(struct ath_softc *sc, struct ath_buf *bf) { if (bf->bf_m != NULL) { bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); m_freem(bf->bf_m); bf->bf_m = NULL; } if (bf->bf_node != NULL) { ieee80211_free_node(bf->bf_node); bf->bf_node = NULL; } STAILQ_INSERT_TAIL(&sc->sc_bbuf, bf, bf_list); } /* * Reclaim beacon resources. */ static void ath_beacon_free(struct ath_softc *sc) { struct ath_buf *bf; STAILQ_FOREACH(bf, &sc->sc_bbuf, bf_list) { if (bf->bf_m != NULL) { bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); m_freem(bf->bf_m); bf->bf_m = NULL; } if (bf->bf_node != NULL) { ieee80211_free_node(bf->bf_node); bf->bf_node = NULL; } } } /* * Configure the beacon and sleep timers. * * When operating as an AP this resets the TSF and sets * up the hardware to notify us when we need to issue beacons. * * When operating in station mode this sets up the beacon * timers according to the timestamp of the last received * beacon and the current TSF, configures PCF and DTIM * handling, programs the sleep registers so the hardware * will wakeup in time to receive beacons, and configures * the beacon miss handling so we'll receive a BMISS * interrupt when we stop seeing beacons from the AP * we've associated with. */ static void ath_beacon_config(struct ath_softc *sc, struct ieee80211vap *vap) { #define TSF_TO_TU(_h,_l) \ ((((u_int32_t)(_h)) << 22) | (((u_int32_t)(_l)) >> 10)) #define FUDGE 2 struct ath_hal *ah = sc->sc_ah; struct ieee80211com *ic = sc->sc_ifp->if_l2com; struct ieee80211_node *ni; u_int32_t nexttbtt, intval, tsftu; u_int64_t tsf; if (vap == NULL) vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */ ni = vap->iv_bss; /* extract tstamp from last beacon and convert to TU */ nexttbtt = TSF_TO_TU(LE_READ_4(ni->ni_tstamp.data + 4), LE_READ_4(ni->ni_tstamp.data)); if (ic->ic_opmode == IEEE80211_M_HOSTAP) { /* * For multi-bss ap support beacons are either staggered * evenly over N slots or burst together. For the former * arrange for the SWBA to be delivered for each slot. * Slots that are not occupied will generate nothing. */ /* NB: the beacon interval is kept internally in TU's */ intval = ni->ni_intval & HAL_BEACON_PERIOD; if (sc->sc_stagbeacons) intval /= ATH_BCBUF; } else { /* NB: the beacon interval is kept internally in TU's */ intval = ni->ni_intval & HAL_BEACON_PERIOD; } if (nexttbtt == 0) /* e.g. for ap mode */ nexttbtt = intval; else if (intval) /* NB: can be 0 for monitor mode */ nexttbtt = roundup(nexttbtt, intval); DPRINTF(sc, ATH_DEBUG_BEACON, "%s: nexttbtt %u intval %u (%u)\n", __func__, nexttbtt, intval, ni->ni_intval); if (ic->ic_opmode == IEEE80211_M_STA && !sc->sc_swbmiss) { HAL_BEACON_STATE bs; int dtimperiod, dtimcount; int cfpperiod, cfpcount; /* * Setup dtim and cfp parameters according to * last beacon we received (which may be none). */ dtimperiod = ni->ni_dtim_period; if (dtimperiod <= 0) /* NB: 0 if not known */ dtimperiod = 1; dtimcount = ni->ni_dtim_count; if (dtimcount >= dtimperiod) /* NB: sanity check */ dtimcount = 0; /* XXX? */ cfpperiod = 1; /* NB: no PCF support yet */ cfpcount = 0; /* * Pull nexttbtt forward to reflect the current * TSF and calculate dtim+cfp state for the result. */ tsf = ath_hal_gettsf64(ah); tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; do { nexttbtt += intval; if (--dtimcount < 0) { dtimcount = dtimperiod - 1; if (--cfpcount < 0) cfpcount = cfpperiod - 1; } } while (nexttbtt < tsftu); memset(&bs, 0, sizeof(bs)); bs.bs_intval = intval; bs.bs_nexttbtt = nexttbtt; bs.bs_dtimperiod = dtimperiod*intval; bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval; bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod; bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod; bs.bs_cfpmaxduration = 0; #if 0 /* * The 802.11 layer records the offset to the DTIM * bitmap while receiving beacons; use it here to * enable h/w detection of our AID being marked in * the bitmap vector (to indicate frames for us are * pending at the AP). * XXX do DTIM handling in s/w to WAR old h/w bugs * XXX enable based on h/w rev for newer chips */ bs.bs_timoffset = ni->ni_timoff; #endif /* * Calculate the number of consecutive beacons to miss * before taking a BMISS interrupt. * Note that we clamp the result to at most 10 beacons. */ bs.bs_bmissthreshold = vap->iv_bmissthreshold; if (bs.bs_bmissthreshold > 10) bs.bs_bmissthreshold = 10; else if (bs.bs_bmissthreshold <= 0) bs.bs_bmissthreshold = 1; /* * Calculate sleep duration. The configuration is * given in ms. We insure a multiple of the beacon * period is used. Also, if the sleep duration is * greater than the DTIM period then it makes senses * to make it a multiple of that. * * XXX fixed at 100ms */ bs.bs_sleepduration = roundup(IEEE80211_MS_TO_TU(100), bs.bs_intval); if (bs.bs_sleepduration > bs.bs_dtimperiod) bs.bs_sleepduration = roundup(bs.bs_sleepduration, bs.bs_dtimperiod); DPRINTF(sc, ATH_DEBUG_BEACON, "%s: tsf %ju tsf:tu %u intval %u nexttbtt %u dtim %u nextdtim %u bmiss %u sleep %u cfp:period %u maxdur %u next %u timoffset %u\n" , __func__ , tsf, tsftu , bs.bs_intval , bs.bs_nexttbtt , bs.bs_dtimperiod , bs.bs_nextdtim , bs.bs_bmissthreshold , bs.bs_sleepduration , bs.bs_cfpperiod , bs.bs_cfpmaxduration , bs.bs_cfpnext , bs.bs_timoffset ); ath_hal_intrset(ah, 0); ath_hal_beacontimers(ah, &bs); sc->sc_imask |= HAL_INT_BMISS; ath_hal_intrset(ah, sc->sc_imask); } else { ath_hal_intrset(ah, 0); if (nexttbtt == intval) intval |= HAL_BEACON_RESET_TSF; if (ic->ic_opmode == IEEE80211_M_IBSS) { /* * In IBSS mode enable the beacon timers but only * enable SWBA interrupts if we need to manually * prepare beacon frames. Otherwise we use a * self-linked tx descriptor and let the hardware * deal with things. */ intval |= HAL_BEACON_ENA; if (!sc->sc_hasveol) sc->sc_imask |= HAL_INT_SWBA; if ((intval & HAL_BEACON_RESET_TSF) == 0) { /* * Pull nexttbtt forward to reflect * the current TSF. */ tsf = ath_hal_gettsf64(ah); tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; do { nexttbtt += intval; } while (nexttbtt < tsftu); } ath_beaconq_config(sc); } else if (ic->ic_opmode == IEEE80211_M_HOSTAP) { /* * In AP mode we enable the beacon timers and * SWBA interrupts to prepare beacon frames. */ intval |= HAL_BEACON_ENA; sc->sc_imask |= HAL_INT_SWBA; /* beacon prepare */ ath_beaconq_config(sc); } ath_hal_beaconinit(ah, nexttbtt, intval); sc->sc_bmisscount = 0; ath_hal_intrset(ah, sc->sc_imask); /* * When using a self-linked beacon descriptor in * ibss mode load it once here. */ if (ic->ic_opmode == IEEE80211_M_IBSS && sc->sc_hasveol) ath_beacon_start_adhoc(sc, vap); } sc->sc_syncbeacon = 0; #undef FUDGE #undef TSF_TO_TU } static void ath_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; KASSERT(error == 0, ("error %u on bus_dma callback", error)); *paddr = segs->ds_addr; } static int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd, ath_bufhead *head, const char *name, int nbuf, int ndesc) { #define DS2PHYS(_dd, _ds) \ ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) struct ifnet *ifp = sc->sc_ifp; struct ath_desc *ds; struct ath_buf *bf; int i, bsize, error; DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA: %u buffers %u desc/buf\n", __func__, name, nbuf, ndesc); dd->dd_name = name; dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc; /* * Setup DMA descriptor area. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dd->dd_desc_len, /* maxsize */ 1, /* nsegments */ dd->dd_desc_len, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dd->dd_dmat); if (error != 0) { if_printf(ifp, "cannot allocate %s DMA tag\n", dd->dd_name); return error; } /* allocate descriptors */ error = bus_dmamap_create(dd->dd_dmat, BUS_DMA_NOWAIT, &dd->dd_dmamap); if (error != 0) { if_printf(ifp, "unable to create dmamap for %s descriptors, " "error %u\n", dd->dd_name, error); goto fail0; } error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dd->dd_dmamap); if (error != 0) { if_printf(ifp, "unable to alloc memory for %u %s descriptors, " "error %u\n", nbuf * ndesc, dd->dd_name, error); goto fail1; } error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, dd->dd_desc, dd->dd_desc_len, ath_load_cb, &dd->dd_desc_paddr, BUS_DMA_NOWAIT); if (error != 0) { if_printf(ifp, "unable to map %s descriptors, error %u\n", dd->dd_name, error); goto fail2; } ds = dd->dd_desc; DPRINTF(sc, ATH_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> %p (%lu)\n", __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, (caddr_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); /* allocate rx buffers */ bsize = sizeof(struct ath_buf) * nbuf; bf = malloc(bsize, M_ATHDEV, M_NOWAIT | M_ZERO); if (bf == NULL) { if_printf(ifp, "malloc of %s buffers failed, size %u\n", dd->dd_name, bsize); goto fail3; } dd->dd_bufptr = bf; STAILQ_INIT(head); for (i = 0; i < nbuf; i++, bf++, ds += ndesc) { bf->bf_desc = ds; bf->bf_daddr = DS2PHYS(dd, ds); error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &bf->bf_dmamap); if (error != 0) { if_printf(ifp, "unable to create dmamap for %s " "buffer %u, error %u\n", dd->dd_name, i, error); ath_descdma_cleanup(sc, dd, head); return error; } STAILQ_INSERT_TAIL(head, bf, bf_list); } return 0; fail3: bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); fail2: bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); fail1: bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); fail0: bus_dma_tag_destroy(dd->dd_dmat); memset(dd, 0, sizeof(*dd)); return error; #undef DS2PHYS } static void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd, ath_bufhead *head) { struct ath_buf *bf; struct ieee80211_node *ni; bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); bus_dmamap_destroy(dd->dd_dmat, dd->dd_dmamap); bus_dma_tag_destroy(dd->dd_dmat); STAILQ_FOREACH(bf, head, bf_list) { if (bf->bf_m) { m_freem(bf->bf_m); bf->bf_m = NULL; } if (bf->bf_dmamap != NULL) { bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); bf->bf_dmamap = NULL; } ni = bf->bf_node; bf->bf_node = NULL; if (ni != NULL) { /* * Reclaim node reference. */ ieee80211_free_node(ni); } } STAILQ_INIT(head); free(dd->dd_bufptr, M_ATHDEV); memset(dd, 0, sizeof(*dd)); } static int ath_desc_alloc(struct ath_softc *sc) { int error; error = ath_descdma_setup(sc, &sc->sc_rxdma, &sc->sc_rxbuf, "rx", ath_rxbuf, 1); if (error != 0) return error; error = ath_descdma_setup(sc, &sc->sc_txdma, &sc->sc_txbuf, "tx", ath_txbuf, ATH_TXDESC); if (error != 0) { ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); return error; } error = ath_descdma_setup(sc, &sc->sc_bdma, &sc->sc_bbuf, "beacon", ATH_BCBUF, 1); if (error != 0) { ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); return error; } return 0; } static void ath_desc_free(struct ath_softc *sc) { if (sc->sc_bdma.dd_desc_len != 0) ath_descdma_cleanup(sc, &sc->sc_bdma, &sc->sc_bbuf); if (sc->sc_txdma.dd_desc_len != 0) ath_descdma_cleanup(sc, &sc->sc_txdma, &sc->sc_txbuf); if (sc->sc_rxdma.dd_desc_len != 0) ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); } static struct ieee80211_node * -ath_node_alloc(struct ieee80211_node_table *nt) +ath_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { - struct ieee80211com *ic = nt->nt_ic; + struct ieee80211com *ic = vap->iv_ic; struct ath_softc *sc = ic->ic_ifp->if_softc; const size_t space = sizeof(struct ath_node) + sc->sc_rc->arc_space; struct ath_node *an; an = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); if (an == NULL) { /* XXX stat+msg */ return NULL; } ath_rate_node_init(sc, an); DPRINTF(sc, ATH_DEBUG_NODE, "%s: an %p\n", __func__, an); return &an->an_node; } static void ath_node_free(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct ath_softc *sc = ic->ic_ifp->if_softc; DPRINTF(sc, ATH_DEBUG_NODE, "%s: ni %p\n", __func__, ni); ath_rate_node_cleanup(sc, ATH_NODE(ni)); sc->sc_node_free(ni); } static void ath_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) { struct ieee80211com *ic = ni->ni_ic; struct ath_softc *sc = ic->ic_ifp->if_softc; struct ath_hal *ah = sc->sc_ah; HAL_CHANNEL hchan; *rssi = ic->ic_node_getrssi(ni); if (ni->ni_chan != IEEE80211_CHAN_ANYC) { ath_mapchan(&hchan, ni->ni_chan); *noise = ath_hal_getchannoise(ah, &hchan); } else *noise = -95; /* nominally correct */ } static int ath_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) { struct ath_hal *ah = sc->sc_ah; int error; struct mbuf *m; struct ath_desc *ds; m = bf->bf_m; if (m == NULL) { /* * NB: by assigning a page to the rx dma buffer we * implicitly satisfy the Atheros requirement that * this buffer be cache-line-aligned and sized to be * multiple of the cache line size. Not doing this * causes weird stuff to happen (for the 5210 at least). */ m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { DPRINTF(sc, ATH_DEBUG_ANY, "%s: no mbuf/cluster\n", __func__); sc->sc_stats.ast_rx_nombuf++; return ENOMEM; } m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error != 0) { DPRINTF(sc, ATH_DEBUG_ANY, "%s: bus_dmamap_load_mbuf_sg failed; error %d\n", __func__, error); sc->sc_stats.ast_rx_busdma++; m_freem(m); return error; } KASSERT(bf->bf_nseg == 1, ("multi-segment packet; nseg %u", bf->bf_nseg)); bf->bf_m = m; } bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD); /* * Setup descriptors. For receive we always terminate * the descriptor list with a self-linked entry so we'll * not get overrun under high load (as can happen with a * 5212 when ANI processing enables PHY error frames). * * To insure the last descriptor is self-linked we create * each descriptor as self-linked and add it to the end. As * each additional descriptor is added the previous self-linked * entry is ``fixed'' naturally. This should be safe even * if DMA is happening. When processing RX interrupts we * never remove/process the last, self-linked, entry on the * descriptor list. This insures the hardware always has * someplace to write a new frame. */ ds = bf->bf_desc; ds->ds_link = bf->bf_daddr; /* link to self */ ds->ds_data = bf->bf_segs[0].ds_addr; ath_hal_setuprxdesc(ah, ds , m->m_len /* buffer size */ , 0 ); if (sc->sc_rxlink != NULL) *sc->sc_rxlink = bf->bf_daddr; sc->sc_rxlink = &ds->ds_link; return 0; } /* * Extend 15-bit time stamp from rx descriptor to * a full 64-bit TSF using the specified TSF. */ static __inline u_int64_t ath_extend_tsf(u_int32_t rstamp, u_int64_t tsf) { if ((tsf & 0x7fff) < rstamp) tsf -= 0x8000; return ((tsf &~ 0x7fff) | rstamp); } /* * Intercept management frames to collect beacon rssi data * and to do ibss merges. */ static void ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, int rssi, int noise, u_int32_t rstamp) { struct ieee80211vap *vap = ni->ni_vap; struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; /* * Call up first so subsequent work can use information * potentially stored in the node (e.g. for ibss merge). */ ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rssi, noise, rstamp); switch (subtype) { case IEEE80211_FC0_SUBTYPE_BEACON: /* update rssi statistics for use by the hal */ ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi); if (sc->sc_syncbeacon && ni == vap->iv_bss && vap->iv_state == IEEE80211_S_RUN) { /* * Resync beacon timers using the tsf of the beacon * frame we just received. */ ath_beacon_config(sc, vap); } /* fall thru... */ case IEEE80211_FC0_SUBTYPE_PROBE_RESP: if (vap->iv_opmode == IEEE80211_M_IBSS && vap->iv_state == IEEE80211_S_RUN) { u_int64_t tsf = ath_extend_tsf(rstamp, ath_hal_gettsf64(sc->sc_ah)); /* * Handle ibss merge as needed; check the tsf on the * frame before attempting the merge. The 802.11 spec * says the station should change it's bssid to match * the oldest station with the same ssid, where oldest * is determined by the tsf. Note that hardware * reconfiguration happens through callback to * ath_newstate as the state machine will go from * RUN -> RUN when this happens. */ if (le64toh(ni->ni_tstamp.tsf) >= tsf) { DPRINTF(sc, ATH_DEBUG_STATE, "ibss merge, rstamp %u tsf %ju " "tstamp %ju\n", rstamp, (uintmax_t)tsf, (uintmax_t)ni->ni_tstamp.tsf); (void) ieee80211_ibss_merge(ni); } } break; } } /* * Set the default antenna. */ static void ath_setdefantenna(struct ath_softc *sc, u_int antenna) { struct ath_hal *ah = sc->sc_ah; /* XXX block beacon interrupts */ ath_hal_setdefantenna(ah, antenna); if (sc->sc_defant != antenna) sc->sc_stats.ast_ant_defswitch++; sc->sc_defant = antenna; sc->sc_rxotherant = 0; } static int ath_rx_tap(struct ifnet *ifp, struct mbuf *m, const struct ath_rx_status *rs, u_int64_t tsf, int16_t nf) { #define CHAN_HT htole32(CHANNEL_HT20|CHANNEL_HT40PLUS|CHANNEL_HT40MINUS) #define CHAN_HT20 htole32(IEEE80211_CHAN_HT20) #define CHAN_HT40U htole32(IEEE80211_CHAN_HT40U) #define CHAN_HT40D htole32(IEEE80211_CHAN_HT40D) struct ath_softc *sc = ifp->if_softc; u_int8_t rix; /* * Discard anything shorter than an ack or cts. */ if (m->m_pkthdr.len < IEEE80211_ACK_LEN) { DPRINTF(sc, ATH_DEBUG_RECV, "%s: runt packet %d\n", __func__, m->m_pkthdr.len); sc->sc_stats.ast_rx_tooshort++; return 0; } rix = rs->rs_rate; sc->sc_rx_th.wr_rate = sc->sc_hwmap[rix].ieeerate; sc->sc_rx_th.wr_flags = sc->sc_hwmap[rix].rxflags; #if HAL_ABI_VERSION >= 0x07050400 sc->sc_rx_th.wr_chan_flags &= ~CHAN_HT; if (sc->sc_rx_th.wr_rate & 0x80) { /* HT rate */ if ((rs->rs_flags & HAL_RX_2040) == 0) sc->sc_rx_th.wr_chan_flags |= CHAN_HT20; else if (sc->sc_curchan.channelFlags & CHANNEL_HT40PLUS) sc->sc_rx_th.wr_chan_flags |= CHAN_HT40U; else sc->sc_rx_th.wr_chan_flags |= CHAN_HT40D; if ((rs->rs_flags & HAL_RX_GI) == 0) sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTGI; } #endif sc->sc_rx_th.wr_tsf = htole64(ath_extend_tsf(rs->rs_tstamp, tsf)); if (rs->rs_status & HAL_RXERR_CRC) sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_BADFCS; /* XXX propagate other error flags from descriptor */ sc->sc_rx_th.wr_antsignal = rs->rs_rssi + nf; sc->sc_rx_th.wr_antnoise = nf; sc->sc_rx_th.wr_antenna = rs->rs_antenna; bpf_mtap2(ifp->if_bpf, &sc->sc_rx_th, sc->sc_rx_th_len, m); return 1; #undef CHAN_HT20 #undef CHAN_HT40U #undef CHAN_HT40D #undef CHAN_HT } static void ath_handle_micerror(struct ieee80211com *ic, struct ieee80211_frame *wh, int keyix) { struct ieee80211_node *ni; /* XXX recheck MIC to deal w/ chips that lie */ /* XXX discard MIC errors on !data frames */ ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh); if (ni != NULL) { ieee80211_notify_michael_failure(ni->ni_vap, wh, keyix); ieee80211_free_node(ni); } } static void ath_rx_proc(void *arg, int npending) { #define PA2DESC(_sc, _pa) \ ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) struct ath_softc *sc = arg; struct ath_buf *bf; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ath_hal *ah = sc->sc_ah; struct ath_desc *ds; struct ath_rx_status *rs; struct mbuf *m; struct ieee80211_node *ni; int len, type, ngood; u_int phyerr; HAL_STATUS status; int16_t nf; u_int64_t tsf; DPRINTF(sc, ATH_DEBUG_RX_PROC, "%s: pending %u\n", __func__, npending); ngood = 0; nf = ath_hal_getchannoise(ah, &sc->sc_curchan); tsf = ath_hal_gettsf64(ah); do { bf = STAILQ_FIRST(&sc->sc_rxbuf); if (bf == NULL) { /* NB: shouldn't happen */ if_printf(ifp, "%s: no buffer!\n", __func__); break; } m = bf->bf_m; if (m == NULL) { /* NB: shouldn't happen */ /* * If mbuf allocation failed previously there * will be no mbuf; try again to re-populate it. */ /* XXX make debug msg */ if_printf(ifp, "%s: no mbuf!\n", __func__); STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list); goto rx_next; } ds = bf->bf_desc; if (ds->ds_link == bf->bf_daddr) { /* NB: never process the self-linked entry at the end */ break; } /* XXX sync descriptor memory */ /* * Must provide the virtual address of the current * descriptor, the physical address, and the virtual * address of the next descriptor in the h/w chain. * This allows the HAL to look ahead to see if the * hardware is done with a descriptor by checking the * done bit in the following descriptor and the address * of the current descriptor the DMA engine is working * on. All this is necessary because of our use of * a self-linked list to avoid rx overruns. */ rs = &bf->bf_status.ds_rxstat; status = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); #ifdef ATH_DEBUG if (sc->sc_debug & ATH_DEBUG_RECV_DESC) ath_printrxbuf(bf, 0, status == HAL_OK); #endif if (status == HAL_EINPROGRESS) break; STAILQ_REMOVE_HEAD(&sc->sc_rxbuf, bf_list); if (rs->rs_status != 0) { if (rs->rs_status & HAL_RXERR_CRC) sc->sc_stats.ast_rx_crcerr++; if (rs->rs_status & HAL_RXERR_FIFO) sc->sc_stats.ast_rx_fifoerr++; if (rs->rs_status & HAL_RXERR_PHY) { sc->sc_stats.ast_rx_phyerr++; phyerr = rs->rs_phyerr & 0x1f; sc->sc_stats.ast_rx_phy[phyerr]++; goto rx_error; /* NB: don't count in ierrors */ } if (rs->rs_status & HAL_RXERR_DECRYPT) { /* * Decrypt error. If the error occurred * because there was no hardware key, then * let the frame through so the upper layers * can process it. This is necessary for 5210 * parts which have no way to setup a ``clear'' * key cache entry. * * XXX do key cache faulting */ if (rs->rs_keyix == HAL_RXKEYIX_INVALID) goto rx_accept; sc->sc_stats.ast_rx_badcrypt++; } if (rs->rs_status & HAL_RXERR_MIC) { sc->sc_stats.ast_rx_badmic++; /* * Do minimal work required to hand off * the 802.11 header for notifcation. */ /* XXX frag's and qos frames */ len = rs->rs_datalen; if (len >= sizeof (struct ieee80211_frame)) { bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTREAD); ath_handle_micerror(ic, mtod(m, struct ieee80211_frame *), sc->sc_splitmic ? rs->rs_keyix-32 : rs->rs_keyix); } } ifp->if_ierrors++; rx_error: /* * Cleanup any pending partial frame. */ if (sc->sc_rxpending != NULL) { m_freem(sc->sc_rxpending); sc->sc_rxpending = NULL; } /* * When a tap is present pass error frames * that have been requested. By default we * pass decrypt+mic errors but others may be * interesting (e.g. crc). */ if (bpf_peers_present(ifp->if_bpf) && (rs->rs_status & sc->sc_monpass)) { bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTREAD); /* NB: bpf needs the mbuf length setup */ len = rs->rs_datalen; m->m_pkthdr.len = m->m_len = len; (void) ath_rx_tap(ifp, m, rs, tsf, nf); } /* XXX pass MIC errors up for s/w reclaculation */ goto rx_next; } rx_accept: /* * Sync and unmap the frame. At this point we're * committed to passing the mbuf somewhere so clear * bf_m; this means a new mbuf must be allocated * when the rx descriptor is setup again to receive * another frame. */ bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); bf->bf_m = NULL; len = rs->rs_datalen; m->m_len = len; if (rs->rs_more) { /* * Frame spans multiple descriptors; save * it for the next completed descriptor, it * will be used to construct a jumbogram. */ if (sc->sc_rxpending != NULL) { /* NB: max frame size is currently 2 clusters */ sc->sc_stats.ast_rx_toobig++; m_freem(sc->sc_rxpending); } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = len; sc->sc_rxpending = m; goto rx_next; } else if (sc->sc_rxpending != NULL) { /* * This is the second part of a jumbogram, * chain it to the first mbuf, adjust the * frame length, and clear the rxpending state. */ sc->sc_rxpending->m_next = m; sc->sc_rxpending->m_pkthdr.len += len; m = sc->sc_rxpending; sc->sc_rxpending = NULL; } else { /* * Normal single-descriptor receive; setup * the rcvif and packet length. */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = len; } ifp->if_ipackets++; sc->sc_stats.ast_ant_rx[rs->rs_antenna]++; if (bpf_peers_present(ifp->if_bpf) && !ath_rx_tap(ifp, m, rs, tsf, nf)) { m_freem(m); /* XXX reclaim */ goto rx_next; } /* * From this point on we assume the frame is at least * as large as ieee80211_frame_min; verify that. */ if (len < IEEE80211_MIN_LEN) { DPRINTF(sc, ATH_DEBUG_RECV, "%s: short packet %d\n", __func__, len); sc->sc_stats.ast_rx_tooshort++; m_freem(m); goto rx_next; } if (IFF_DUMPPKTS(sc, ATH_DEBUG_RECV)) { ieee80211_dump_pkt(ic, mtod(m, caddr_t), len, sc->sc_hwmap[rs->rs_rate].ieeerate, rs->rs_rssi); } m_adj(m, -IEEE80211_CRC_LEN); /* * Locate the node for sender, track state, and then * pass the (referenced) node up to the 802.11 layer * for its use. */ ni = ieee80211_find_rxnode_withkey(ic, mtod(m, const struct ieee80211_frame_min *), rs->rs_keyix == HAL_RXKEYIX_INVALID ? IEEE80211_KEYIX_NONE : rs->rs_keyix); if (ni != NULL) { /* * Sending station is known, dispatch directly. */ type = ieee80211_input(ni, m, rs->rs_rssi, nf, rs->rs_tstamp); ieee80211_free_node(ni); /* * Arrange to update the last rx timestamp only for * frames from our ap when operating in station mode. * This assumes the rx key is always setup when * associated. */ if (ic->ic_opmode == IEEE80211_M_STA && rs->rs_keyix != HAL_RXKEYIX_INVALID) ngood++; } else { type = ieee80211_input_all(ic, m, rs->rs_rssi, nf, rs->rs_tstamp); } /* * Track rx rssi and do any rx antenna management. */ ATH_RSSI_LPF(sc->sc_halstats.ns_avgrssi, rs->rs_rssi); if (sc->sc_diversity) { /* * When using fast diversity, change the default rx * antenna if diversity chooses the other antenna 3 * times in a row. */ if (sc->sc_defant != rs->rs_antenna) { if (++sc->sc_rxotherant >= 3) ath_setdefantenna(sc, rs->rs_antenna); } else sc->sc_rxotherant = 0; } if (sc->sc_softled) { /* * Blink for any data frame. Otherwise do a * heartbeat-style blink when idle. The latter * is mainly for station mode where we depend on * periodic beacon frames to trigger the poll event. */ if (type == IEEE80211_FC0_TYPE_DATA) { sc->sc_rxrate = rs->rs_rate; ath_led_event(sc, ATH_LED_RX); } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle) ath_led_event(sc, ATH_LED_POLL); } rx_next: STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); } while (ath_rxbuf_init(sc, bf) == 0); /* rx signal state monitoring */ ath_hal_rxmonitor(ah, &sc->sc_halstats, &sc->sc_curchan); if (ngood) sc->sc_lastrx = tsf; if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0 && !IFQ_IS_EMPTY(&ifp->if_snd)) ath_start(ifp); #undef PA2DESC } static void ath_txq_init(struct ath_softc *sc, struct ath_txq *txq, int qnum) { txq->axq_qnum = qnum; txq->axq_depth = 0; txq->axq_intrcnt = 0; txq->axq_link = NULL; STAILQ_INIT(&txq->axq_q); ATH_TXQ_LOCK_INIT(sc, txq); TAILQ_INIT(&txq->axq_stageq); txq->axq_curage = 0; } /* * Setup a h/w transmit queue. */ static struct ath_txq * ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) { #define N(a) (sizeof(a)/sizeof(a[0])) struct ath_hal *ah = sc->sc_ah; HAL_TXQ_INFO qi; int qnum; memset(&qi, 0, sizeof(qi)); qi.tqi_subtype = subtype; qi.tqi_aifs = HAL_TXQ_USEDEFAULT; qi.tqi_cwmin = HAL_TXQ_USEDEFAULT; qi.tqi_cwmax = HAL_TXQ_USEDEFAULT; /* * Enable interrupts only for EOL and DESC conditions. * We mark tx descriptors to receive a DESC interrupt * when a tx queue gets deep; otherwise waiting for the * EOL to reap descriptors. Note that this is done to * reduce interrupt load and this only defers reaping * descriptors, never transmitting frames. Aside from * reducing interrupts this also permits more concurrency. * The only potential downside is if the tx queue backs * up in which case the top half of the kernel may backup * due to a lack of tx descriptors. */ qi.tqi_qflags = HAL_TXQ_TXEOLINT_ENABLE | HAL_TXQ_TXDESCINT_ENABLE; qnum = ath_hal_setuptxqueue(ah, qtype, &qi); if (qnum == -1) { /* * NB: don't print a message, this happens * normally on parts with too few tx queues */ return NULL; } if (qnum >= N(sc->sc_txq)) { device_printf(sc->sc_dev, "hal qnum %u out of range, max %zu!\n", qnum, N(sc->sc_txq)); ath_hal_releasetxqueue(ah, qnum); return NULL; } if (!ATH_TXQ_SETUP(sc, qnum)) { ath_txq_init(sc, &sc->sc_txq[qnum], qnum); sc->sc_txqsetup |= 1<sc_txq[qnum]; #undef N } /* * Setup a hardware data transmit queue for the specified * access control. The hal may not support all requested * queues in which case it will return a reference to a * previously setup queue. We record the mapping from ac's * to h/w queues for use by ath_tx_start and also track * the set of h/w queues being used to optimize work in the * transmit interrupt handler and related routines. */ static int ath_tx_setup(struct ath_softc *sc, int ac, int haltype) { #define N(a) (sizeof(a)/sizeof(a[0])) struct ath_txq *txq; if (ac >= N(sc->sc_ac2q)) { device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", ac, N(sc->sc_ac2q)); return 0; } txq = ath_txq_setup(sc, HAL_TX_QUEUE_DATA, haltype); if (txq != NULL) { sc->sc_ac2q[ac] = txq; return 1; } else return 0; #undef N } /* * Update WME parameters for a transmit queue. */ static int ath_txq_update(struct ath_softc *sc, int ac) { #define ATH_EXPONENT_TO_VALUE(v) ((1<sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ath_txq *txq = sc->sc_ac2q[ac]; struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; struct ath_hal *ah = sc->sc_ah; HAL_TXQ_INFO qi; ath_hal_gettxqueueprops(ah, txq->axq_qnum, &qi); qi.tqi_aifs = wmep->wmep_aifsn; qi.tqi_cwmin = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); qi.tqi_cwmax = ATH_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); qi.tqi_burstTime = ATH_TXOP_TO_US(wmep->wmep_txopLimit); if (!ath_hal_settxqueueprops(ah, txq->axq_qnum, &qi)) { if_printf(ifp, "unable to update hardware queue " "parameters for %s traffic!\n", ieee80211_wme_acnames[ac]); return 0; } else { ath_hal_resettxqueue(ah, txq->axq_qnum); /* push to h/w */ return 1; } #undef ATH_TXOP_TO_US #undef ATH_EXPONENT_TO_VALUE } /* * Callback from the 802.11 layer to update WME parameters. */ static int ath_wme_update(struct ieee80211com *ic) { struct ath_softc *sc = ic->ic_ifp->if_softc; return !ath_txq_update(sc, WME_AC_BE) || !ath_txq_update(sc, WME_AC_BK) || !ath_txq_update(sc, WME_AC_VI) || !ath_txq_update(sc, WME_AC_VO) ? EIO : 0; } /* * Reclaim resources for a setup queue. */ static void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq) { ath_hal_releasetxqueue(sc->sc_ah, txq->axq_qnum); ATH_TXQ_LOCK_DESTROY(txq); sc->sc_txqsetup &= ~(1<axq_qnum); } /* * Reclaim all tx queue resources. */ static void ath_tx_cleanup(struct ath_softc *sc) { int i; ATH_TXBUF_LOCK_DESTROY(sc); for (i = 0; i < HAL_NUM_TX_QUEUES; i++) if (ATH_TXQ_SETUP(sc, i)) ath_tx_cleanupq(sc, &sc->sc_txq[i]); } /* * Return h/w rate index for an IEEE rate (w/o basic rate bit). */ static int ath_tx_findrix(const HAL_RATE_TABLE *rt, int rate) { int i; for (i = 0; i < rt->rateCount; i++) if ((rt->info[i].dot11Rate & IEEE80211_RATE_VAL) == rate) return i; return 0; /* NB: lowest rate */ } /* * Reclaim mbuf resources. For fragmented frames we * need to claim each frag chained with m_nextpkt. */ static void ath_freetx(struct mbuf *m) { struct mbuf *next; do { next = m->m_nextpkt; m->m_nextpkt = NULL; m_freem(m); } while ((m = next) != NULL); } static int ath_tx_dmasetup(struct ath_softc *sc, struct ath_buf *bf, struct mbuf *m0) { struct mbuf *m; int error; /* * Load the DMA map so any coalescing is done. This * also calculates the number of descriptors we need. */ error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error == EFBIG) { /* XXX packet requires too many descriptors */ bf->bf_nseg = ATH_TXDESC+1; } else if (error != 0) { sc->sc_stats.ast_tx_busdma++; ath_freetx(m0); return error; } /* * Discard null packets and check for packets that * require too many TX descriptors. We try to convert * the latter to a cluster. */ if (bf->bf_nseg > ATH_TXDESC) { /* too many desc's, linearize */ sc->sc_stats.ast_tx_linear++; m = m_collapse(m0, M_DONTWAIT, ATH_TXDESC); if (m == NULL) { ath_freetx(m0); sc->sc_stats.ast_tx_nombuf++; return ENOMEM; } m0 = m; error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error != 0) { sc->sc_stats.ast_tx_busdma++; ath_freetx(m0); return error; } KASSERT(bf->bf_nseg <= ATH_TXDESC, ("too many segments after defrag; nseg %u", bf->bf_nseg)); } else if (bf->bf_nseg == 0) { /* null packet, discard */ sc->sc_stats.ast_tx_nodata++; ath_freetx(m0); return EIO; } DPRINTF(sc, ATH_DEBUG_XMIT, "%s: m %p len %u\n", __func__, m0, m0->m_pkthdr.len); bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); bf->bf_m = m0; return 0; } static void ath_tx_handoff(struct ath_softc *sc, struct ath_txq *txq, struct ath_buf *bf) { struct ath_hal *ah = sc->sc_ah; struct ath_desc *ds, *ds0; int i; /* * Fillin the remainder of the descriptor info. */ ds0 = ds = bf->bf_desc; for (i = 0; i < bf->bf_nseg; i++, ds++) { ds->ds_data = bf->bf_segs[i].ds_addr; if (i == bf->bf_nseg - 1) ds->ds_link = 0; else ds->ds_link = bf->bf_daddr + sizeof(*ds) * (i + 1); ath_hal_filltxdesc(ah, ds , bf->bf_segs[i].ds_len /* segment length */ , i == 0 /* first segment */ , i == bf->bf_nseg - 1 /* last segment */ , ds0 /* first descriptor */ ); DPRINTF(sc, ATH_DEBUG_XMIT, "%s: %d: %08x %08x %08x %08x %08x %08x\n", __func__, i, ds->ds_link, ds->ds_data, ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]); } /* * Insert the frame on the outbound list and pass it on * to the hardware. Multicast frames buffered for power * save stations and transmit from the CAB queue are stored * on a s/w only queue and loaded on to the CAB queue in * the SWBA handler since frames only go out on DTIM and * to avoid possible races. */ ATH_TXQ_LOCK(txq); if (txq->axq_qnum != ATH_TXQ_SWQ) { ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); if (txq->axq_link == NULL) { ath_hal_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); DPRINTF(sc, ATH_DEBUG_XMIT, "%s: TXDP[%u] = %p (%p) depth %d\n", __func__, txq->axq_qnum, (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth); } else { *txq->axq_link = bf->bf_daddr; DPRINTF(sc, ATH_DEBUG_XMIT, "%s: link[%u](%p)=%p (%p) depth %d\n", __func__, txq->axq_qnum, txq->axq_link, (caddr_t)bf->bf_daddr, bf->bf_desc, txq->axq_depth); } txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link; ath_hal_txstart(ah, txq->axq_qnum); } else { if (txq->axq_link != NULL) { struct ath_buf *last = ATH_TXQ_LAST(txq); struct ieee80211_frame *wh; /* mark previous frame */ wh = mtod(last->bf_m, struct ieee80211_frame *); wh->i_fc[1] |= IEEE80211_FC1_MORE_DATA; bus_dmamap_sync(sc->sc_dmat, last->bf_dmamap, BUS_DMASYNC_PREWRITE); /* link descriptor */ *txq->axq_link = bf->bf_daddr; } ATH_TXQ_INSERT_TAIL(txq, bf, bf_list); txq->axq_link = &bf->bf_desc[bf->bf_nseg - 1].ds_link; } ATH_TXQ_UNLOCK(txq); } static int ath_tx_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0) { struct ieee80211vap *vap = ni->ni_vap; struct ath_vap *avp = ATH_VAP(vap); struct ath_hal *ah = sc->sc_ah; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; int error, iswep, ismcast, isfrag, ismrr; int keyix, hdrlen, pktlen, try0; u_int8_t rix, txrate, ctsrate; u_int8_t cix = 0xff; /* NB: silence compiler */ struct ath_desc *ds; struct ath_txq *txq; struct ieee80211_frame *wh; u_int subtype, flags, ctsduration; HAL_PKT_TYPE atype; const HAL_RATE_TABLE *rt; HAL_BOOL shortPreamble; struct ath_node *an; u_int pri; wh = mtod(m0, struct ieee80211_frame *); iswep = wh->i_fc[1] & IEEE80211_FC1_WEP; ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); isfrag = m0->m_flags & M_FRAG; hdrlen = ieee80211_anyhdrsize(wh); /* * Packet length must not include any * pad bytes; deduct them here. */ pktlen = m0->m_pkthdr.len - (hdrlen & 3); if (iswep) { const struct ieee80211_cipher *cip; struct ieee80211_key *k; /* * Construct the 802.11 header+trailer for an encrypted * frame. The only reason this can fail is because of an * unknown or unsupported cipher/key type. */ k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { /* * This can happen when the key is yanked after the * frame was queued. Just discard the frame; the * 802.11 layer counts failures and provides * debugging/diagnostics. */ ath_freetx(m0); return EIO; } /* * Adjust the packet + header lengths for the crypto * additions and calculate the h/w key index. When * a s/w mic is done the frame will have had any mic * added to it prior to entry so m0->m_pkthdr.len will * account for it. Otherwise we need to add it to the * packet length. */ cip = k->wk_cipher; hdrlen += cip->ic_header; pktlen += cip->ic_header + cip->ic_trailer; /* NB: frags always have any TKIP MIC done in s/w */ if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && !isfrag) pktlen += cip->ic_miclen; keyix = k->wk_keyix; /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } else if (ni->ni_ucastkey.wk_cipher == &ieee80211_cipher_none) { /* * Use station key cache slot, if assigned. */ keyix = ni->ni_ucastkey.wk_keyix; if (keyix == IEEE80211_KEYIX_NONE) keyix = HAL_TXKEYIX_INVALID; } else keyix = HAL_TXKEYIX_INVALID; pktlen += IEEE80211_CRC_LEN; /* * Load the DMA map so any coalescing is done. This * also calculates the number of descriptors we need. */ error = ath_tx_dmasetup(sc, bf, m0); if (error != 0) return error; bf->bf_node = ni; /* NB: held reference */ m0 = bf->bf_m; /* NB: may have changed */ wh = mtod(m0, struct ieee80211_frame *); /* setup descriptors */ ds = bf->bf_desc; rt = sc->sc_currates; KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); /* * NB: the 802.11 layer marks whether or not we should * use short preamble based on the current mode and * negotiated parameters. */ if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) { shortPreamble = AH_TRUE; sc->sc_stats.ast_tx_shortpre++; } else { shortPreamble = AH_FALSE; } an = ATH_NODE(ni); flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ ismrr = 0; /* default no multi-rate retry*/ pri = M_WME_GETAC(m0); /* honor classification */ /* XXX use txparams instead of fixed values */ /* * Calculate Atheros packet type from IEEE80211 packet header, * setup for rate calculations, and select h/w transmit queue. */ switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { case IEEE80211_FC0_TYPE_MGT: subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; if (subtype == IEEE80211_FC0_SUBTYPE_BEACON) atype = HAL_PKT_TYPE_BEACON; else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) atype = HAL_PKT_TYPE_PROBE_RESP; else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM) atype = HAL_PKT_TYPE_ATIM; else atype = HAL_PKT_TYPE_NORMAL; /* XXX */ rix = an->an_mgmtrix; txrate = rt->info[rix].rateCode; if (shortPreamble) txrate |= rt->info[rix].shortPreamble; try0 = ATH_TXMGTTRY; flags |= HAL_TXDESC_INTREQ; /* force interrupt */ break; case IEEE80211_FC0_TYPE_CTL: atype = HAL_PKT_TYPE_PSPOLL; /* stop setting of duration */ rix = an->an_mgmtrix; txrate = rt->info[rix].rateCode; if (shortPreamble) txrate |= rt->info[rix].shortPreamble; try0 = ATH_TXMGTTRY; flags |= HAL_TXDESC_INTREQ; /* force interrupt */ break; case IEEE80211_FC0_TYPE_DATA: atype = HAL_PKT_TYPE_NORMAL; /* default */ /* * Data frames: multicast frames go out at a fixed rate, * EAPOL frames use the mgmt frame rate; otherwise consult * the rate control module for the rate to use. */ if (ismcast) { rix = an->an_mcastrix; txrate = rt->info[rix].rateCode; if (shortPreamble) txrate |= rt->info[rix].shortPreamble; try0 = 1; } else if (m0->m_flags & M_EAPOL) { /* XXX? maybe always use long preamble? */ rix = an->an_mgmtrix; txrate = rt->info[rix].rateCode; if (shortPreamble) txrate |= rt->info[rix].shortPreamble; try0 = ATH_TXMAXTRY; /* XXX?too many? */ } else { ath_rate_findrate(sc, an, shortPreamble, pktlen, &rix, &try0, &txrate); sc->sc_txrate = txrate; /* for LED blinking */ sc->sc_lastdatarix = rix; /* for fast frames */ if (try0 != ATH_TXMAXTRY) ismrr = 1; } if (cap->cap_wmeParams[pri].wmep_noackPolicy) flags |= HAL_TXDESC_NOACK; break; default: if_printf(ifp, "bogus frame type 0x%x (%s)\n", wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); /* XXX statistic */ ath_freetx(m0); return EIO; } txq = sc->sc_ac2q[pri]; /* * When servicing one or more stations in power-save mode * (or) if there is some mcast data waiting on the mcast * queue (to prevent out of order delivery) multicast * frames must be buffered until after the beacon. */ if (ismcast && (vap->iv_ps_sta || avp->av_mcastq.axq_depth)) txq = &avp->av_mcastq; /* * Calculate miscellaneous flags. */ if (ismcast) { flags |= HAL_TXDESC_NOACK; /* no ack on broad/multicast */ } else if (pktlen > vap->iv_rtsthreshold && (ni->ni_ath_flags & IEEE80211_NODE_FF) == 0) { flags |= HAL_TXDESC_RTSENA; /* RTS based on frame length */ cix = rt->info[rix].controlRate; sc->sc_stats.ast_tx_rts++; } if (flags & HAL_TXDESC_NOACK) /* NB: avoid double counting */ sc->sc_stats.ast_tx_noack++; /* * If 802.11g protection is enabled, determine whether * to use RTS/CTS or just CTS. Note that this is only * done for OFDM unicast frames. */ if ((ic->ic_flags & IEEE80211_F_USEPROT) && rt->info[rix].phy == IEEE80211_T_OFDM && (flags & HAL_TXDESC_NOACK) == 0) { /* XXX fragments must use CCK rates w/ protection */ if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) flags |= HAL_TXDESC_RTSENA; else if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) flags |= HAL_TXDESC_CTSENA; if (isfrag) { /* * For frags it would be desirable to use the * highest CCK rate for RTS/CTS. But stations * farther away may detect it at a lower CCK rate * so use the configured protection rate instead * (for now). */ cix = rt->info[sc->sc_protrix].controlRate; } else cix = rt->info[sc->sc_protrix].controlRate; sc->sc_stats.ast_tx_protect++; } /* * Calculate duration. This logically belongs in the 802.11 * layer but it lacks sufficient information to calculate it. */ if ((flags & HAL_TXDESC_NOACK) == 0 && (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_CTL) { u_int16_t dur; if (shortPreamble) dur = rt->info[rix].spAckDuration; else dur = rt->info[rix].lpAckDuration; if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) { dur += dur; /* additional SIFS+ACK */ KASSERT(m0->m_nextpkt != NULL, ("no fragment")); /* * Include the size of next fragment so NAV is * updated properly. The last fragment uses only * the ACK duration */ dur += ath_hal_computetxtime(ah, rt, m0->m_nextpkt->m_pkthdr.len, rix, shortPreamble); } if (isfrag) { /* * Force hardware to use computed duration for next * fragment by disabling multi-rate retry which updates * duration based on the multi-rate duration table. */ ismrr = 0; try0 = ATH_TXMGTTRY; /* XXX? */ } *(u_int16_t *)wh->i_dur = htole16(dur); } /* * Calculate RTS/CTS rate and duration if needed. */ ctsduration = 0; if (flags & (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)) { /* * CTS transmit rate is derived from the transmit rate * by looking in the h/w rate table. We must also factor * in whether or not a short preamble is to be used. */ /* NB: cix is set above where RTS/CTS is enabled */ KASSERT(cix != 0xff, ("cix not setup")); ctsrate = rt->info[cix].rateCode; /* * Compute the transmit duration based on the frame * size and the size of an ACK frame. We call into the * HAL to do the computation since it depends on the * characteristics of the actual PHY being used. * * NB: CTS is assumed the same size as an ACK so we can * use the precalculated ACK durations. */ if (shortPreamble) { ctsrate |= rt->info[cix].shortPreamble; if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ ctsduration += rt->info[cix].spAckDuration; ctsduration += ath_hal_computetxtime(ah, rt, pktlen, rix, AH_TRUE); if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ ctsduration += rt->info[rix].spAckDuration; } else { if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ ctsduration += rt->info[cix].lpAckDuration; ctsduration += ath_hal_computetxtime(ah, rt, pktlen, rix, AH_FALSE); if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ ctsduration += rt->info[rix].lpAckDuration; } /* * Must disable multi-rate retry when using RTS/CTS. */ ismrr = 0; try0 = ATH_TXMGTTRY; /* XXX */ } else ctsrate = 0; /* * At this point we are committed to sending the frame * and we don't need to look at m_nextpkt; clear it in * case this frame is part of frag chain. */ m0->m_nextpkt = NULL; if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, sc->sc_hwmap[txrate].ieeerate, -1); if (bpf_peers_present(ifp->if_bpf)) { u_int64_t tsf = ath_hal_gettsf64(ah); sc->sc_tx_th.wt_tsf = htole64(tsf); sc->sc_tx_th.wt_flags = sc->sc_hwmap[txrate].txflags; if (iswep) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; if (isfrag) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_FRAG; sc->sc_tx_th.wt_rate = sc->sc_hwmap[txrate].ieeerate; sc->sc_tx_th.wt_txpower = ni->ni_txpower; sc->sc_tx_th.wt_antenna = sc->sc_txantenna; bpf_mtap2(ifp->if_bpf, &sc->sc_tx_th, sc->sc_tx_th_len, m0); } /* * Determine if a tx interrupt should be generated for * this descriptor. We take a tx interrupt to reap * descriptors when the h/w hits an EOL condition or * when the descriptor is specifically marked to generate * an interrupt. We periodically mark descriptors in this * way to insure timely replenishing of the supply needed * for sending frames. Defering interrupts reduces system * load and potentially allows more concurrent work to be * done but if done to aggressively can cause senders to * backup. * * NB: use >= to deal with sc_txintrperiod changing * dynamically through sysctl. */ if (flags & HAL_TXDESC_INTREQ) { txq->axq_intrcnt = 0; } else if (++txq->axq_intrcnt >= sc->sc_txintrperiod) { flags |= HAL_TXDESC_INTREQ; txq->axq_intrcnt = 0; } /* * Formulate first tx descriptor with tx controls. */ /* XXX check return value? */ ath_hal_setuptxdesc(ah, ds , pktlen /* packet length */ , hdrlen /* header length */ , atype /* Atheros packet type */ , ni->ni_txpower /* txpower */ , txrate, try0 /* series 0 rate/tries */ , keyix /* key cache index */ , sc->sc_txantenna /* antenna mode */ , flags /* flags */ , ctsrate /* rts/cts rate */ , ctsduration /* rts/cts duration */ ); bf->bf_flags = flags; /* * Setup the multi-rate retry state only when we're * going to use it. This assumes ath_hal_setuptxdesc * initializes the descriptors (so we don't have to) * when the hardware supports multi-rate retry and * we don't use it. */ if (ismrr) ath_rate_setupxtxdesc(sc, an, ds, shortPreamble, rix); ath_tx_handoff(sc, txq, bf); return 0; } /* * Process completed xmit descriptors from the specified queue. */ static int ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) { struct ath_hal *ah = sc->sc_ah; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ath_buf *bf; struct ath_desc *ds, *ds0; struct ath_tx_status *ts; struct ieee80211_node *ni; struct ath_node *an; int sr, lr, pri, nacked; HAL_STATUS status; DPRINTF(sc, ATH_DEBUG_TX_PROC, "%s: tx queue %u head %p link %p\n", __func__, txq->axq_qnum, (caddr_t)(uintptr_t) ath_hal_gettxbuf(sc->sc_ah, txq->axq_qnum), txq->axq_link); nacked = 0; for (;;) { ATH_TXQ_LOCK(txq); txq->axq_intrcnt = 0; /* reset periodic desc intr count */ bf = STAILQ_FIRST(&txq->axq_q); if (bf == NULL) { ATH_TXQ_UNLOCK(txq); break; } ds0 = &bf->bf_desc[0]; ds = &bf->bf_desc[bf->bf_nseg - 1]; ts = &bf->bf_status.ds_txstat; status = ath_hal_txprocdesc(ah, ds, ts); #ifdef ATH_DEBUG if (sc->sc_debug & ATH_DEBUG_XMIT_DESC) ath_printtxbuf(bf, txq->axq_qnum, 0, status == HAL_OK); #endif if (status == HAL_EINPROGRESS) { ATH_TXQ_UNLOCK(txq); break; } ATH_TXQ_REMOVE_HEAD(txq, bf_list); if (txq->axq_depth == 0) txq->axq_link = NULL; ATH_TXQ_UNLOCK(txq); ni = bf->bf_node; if (ni != NULL) { an = ATH_NODE(ni); if (ts->ts_status == 0) { u_int8_t txant = ts->ts_antenna; sc->sc_stats.ast_ant_tx[txant]++; sc->sc_ant_tx[txant]++; if (ts->ts_rate & HAL_TXSTAT_ALTRATE) sc->sc_stats.ast_tx_altrate++; sc->sc_stats.ast_tx_rssi = ts->ts_rssi; ATH_RSSI_LPF(sc->sc_halstats.ns_avgtxrssi, ts->ts_rssi); pri = M_WME_GETAC(bf->bf_m); if (pri >= WME_AC_VO) ic->ic_wme.wme_hipri_traffic++; ni->ni_inact = ni->ni_inact_reload; } else { if (ts->ts_status & HAL_TXERR_XRETRY) sc->sc_stats.ast_tx_xretries++; if (ts->ts_status & HAL_TXERR_FIFO) sc->sc_stats.ast_tx_fifoerr++; if (ts->ts_status & HAL_TXERR_FILT) sc->sc_stats.ast_tx_filtered++; if (bf->bf_m->m_flags & M_FF) sc->sc_stats.ast_ff_txerr++; } sr = ts->ts_shortretry; lr = ts->ts_longretry; sc->sc_stats.ast_tx_shortretry += sr; sc->sc_stats.ast_tx_longretry += lr; /* * Hand the descriptor to the rate control algorithm. */ if ((ts->ts_status & HAL_TXERR_FILT) == 0 && (bf->bf_flags & HAL_TXDESC_NOACK) == 0) { /* * If frame was ack'd update the last rx time * used to workaround phantom bmiss interrupts. */ if (ts->ts_status == 0) nacked++; ath_rate_tx_complete(sc, an, bf); } /* * Do any tx complete callback. Note this must * be done before releasing the node reference. */ if (bf->bf_m->m_flags & M_TXCB) ieee80211_process_callback(ni, bf->bf_m, ts->ts_status); /* * Reclaim reference to node. * * NB: the node may be reclaimed here if, for example * this is a DEAUTH message that was sent and the * node was timed out due to inactivity. */ ieee80211_free_node(ni); } bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); m_freem(bf->bf_m); bf->bf_m = NULL; bf->bf_node = NULL; ATH_TXBUF_LOCK(sc); STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); ATH_TXBUF_UNLOCK(sc); } /* * Flush fast-frame staging queue when traffic slows. */ if (txq->axq_depth <= 1) ath_ff_stageq_flush(sc, txq, ath_ff_always); return nacked; } static __inline int txqactive(struct ath_hal *ah, int qnum) { u_int32_t txqs = 1<sc_ifp; if (txqactive(sc->sc_ah, 0) && ath_tx_processq(sc, &sc->sc_txq[0])) sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum)) ath_tx_processq(sc, sc->sc_cabq); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_timer = 0; if (sc->sc_softled) ath_led_event(sc, ATH_LED_TX); ath_start(ifp); } /* * Deferred processing of transmit interrupt; special-cased * for four hardware queues, 0-3 (e.g. 5212 w/ WME support). */ static void ath_tx_proc_q0123(void *arg, int npending) { struct ath_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; int nacked; /* * Process each active queue. */ nacked = 0; if (txqactive(sc->sc_ah, 0)) nacked += ath_tx_processq(sc, &sc->sc_txq[0]); if (txqactive(sc->sc_ah, 1)) nacked += ath_tx_processq(sc, &sc->sc_txq[1]); if (txqactive(sc->sc_ah, 2)) nacked += ath_tx_processq(sc, &sc->sc_txq[2]); if (txqactive(sc->sc_ah, 3)) nacked += ath_tx_processq(sc, &sc->sc_txq[3]); if (txqactive(sc->sc_ah, sc->sc_cabq->axq_qnum)) ath_tx_processq(sc, sc->sc_cabq); if (nacked) sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_timer = 0; if (sc->sc_softled) ath_led_event(sc, ATH_LED_TX); ath_start(ifp); } /* * Deferred processing of transmit interrupt. */ static void ath_tx_proc(void *arg, int npending) { struct ath_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; int i, nacked; /* * Process each active queue. */ nacked = 0; for (i = 0; i < HAL_NUM_TX_QUEUES; i++) if (ATH_TXQ_SETUP(sc, i) && txqactive(sc->sc_ah, i)) nacked += ath_tx_processq(sc, &sc->sc_txq[i]); if (nacked) sc->sc_lastrx = ath_hal_gettsf64(sc->sc_ah); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_timer = 0; if (sc->sc_softled) ath_led_event(sc, ATH_LED_TX); ath_start(ifp); } static void ath_tx_draintxq(struct ath_softc *sc, struct ath_txq *txq) { #ifdef ATH_DEBUG struct ath_hal *ah = sc->sc_ah; #endif struct ieee80211_node *ni; struct ath_buf *bf; u_int ix; /* * NB: this assumes output has been stopped and * we do not need to block ath_tx_tasklet */ for (ix = 0;; ix++) { ATH_TXQ_LOCK(txq); bf = STAILQ_FIRST(&txq->axq_q); if (bf == NULL) { txq->axq_link = NULL; ATH_TXQ_UNLOCK(txq); break; } ATH_TXQ_REMOVE_HEAD(txq, bf_list); ATH_TXQ_UNLOCK(txq); #ifdef ATH_DEBUG if (sc->sc_debug & ATH_DEBUG_RESET) { struct ieee80211com *ic = sc->sc_ifp->if_l2com; ath_printtxbuf(bf, txq->axq_qnum, ix, ath_hal_txprocdesc(ah, bf->bf_desc, &bf->bf_status.ds_txstat) == HAL_OK); ieee80211_dump_pkt(ic, mtod(bf->bf_m, caddr_t), bf->bf_m->m_len, 0, -1); } #endif /* ATH_DEBUG */ bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); ni = bf->bf_node; bf->bf_node = NULL; if (ni != NULL) { /* * Do any callback and reclaim the node reference. */ if (bf->bf_m->m_flags & M_TXCB) ieee80211_process_callback(ni, bf->bf_m, -1); ieee80211_free_node(ni); } m_freem(bf->bf_m); bf->bf_m = NULL; ATH_TXBUF_LOCK(sc); STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); ATH_TXBUF_UNLOCK(sc); } } static void ath_tx_stopdma(struct ath_softc *sc, struct ath_txq *txq) { struct ath_hal *ah = sc->sc_ah; DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", __func__, txq->axq_qnum, (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, txq->axq_qnum), txq->axq_link); (void) ath_hal_stoptxdma(ah, txq->axq_qnum); } /* * Drain the transmit queues and reclaim resources. */ static void ath_draintxq(struct ath_softc *sc) { struct ath_hal *ah = sc->sc_ah; struct ifnet *ifp = sc->sc_ifp; int i; /* XXX return value */ if (!sc->sc_invalid) { /* don't touch the hardware if marked invalid */ DPRINTF(sc, ATH_DEBUG_RESET, "%s: tx queue [%u] %p, link %p\n", __func__, sc->sc_bhalq, (caddr_t)(uintptr_t) ath_hal_gettxbuf(ah, sc->sc_bhalq), NULL); (void) ath_hal_stoptxdma(ah, sc->sc_bhalq); for (i = 0; i < HAL_NUM_TX_QUEUES; i++) if (ATH_TXQ_SETUP(sc, i)) ath_tx_stopdma(sc, &sc->sc_txq[i]); } for (i = 0; i < HAL_NUM_TX_QUEUES; i++) if (ATH_TXQ_SETUP(sc, i)) ath_tx_draintxq(sc, &sc->sc_txq[i]); #ifdef ATH_DEBUG if (sc->sc_debug & ATH_DEBUG_RESET) { struct ath_buf *bf = STAILQ_FIRST(&sc->sc_bbuf); if (bf != NULL && bf->bf_m != NULL) { ath_printtxbuf(bf, sc->sc_bhalq, 0, ath_hal_txprocdesc(ah, bf->bf_desc, &bf->bf_status.ds_txstat) == HAL_OK); ieee80211_dump_pkt(ifp->if_l2com, mtod(bf->bf_m, caddr_t), bf->bf_m->m_len, 0, -1); } } #endif /* ATH_DEBUG */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_timer = 0; } /* * Disable the receive h/w in preparation for a reset. */ static void ath_stoprecv(struct ath_softc *sc) { #define PA2DESC(_sc, _pa) \ ((struct ath_desc *)((caddr_t)(_sc)->sc_rxdma.dd_desc + \ ((_pa) - (_sc)->sc_rxdma.dd_desc_paddr))) struct ath_hal *ah = sc->sc_ah; ath_hal_stoppcurecv(ah); /* disable PCU */ ath_hal_setrxfilter(ah, 0); /* clear recv filter */ ath_hal_stopdmarecv(ah); /* disable DMA engine */ DELAY(3000); /* 3ms is long enough for 1 frame */ #ifdef ATH_DEBUG if (sc->sc_debug & (ATH_DEBUG_RESET | ATH_DEBUG_FATAL)) { struct ath_buf *bf; u_int ix; printf("%s: rx queue %p, link %p\n", __func__, (caddr_t)(uintptr_t) ath_hal_getrxbuf(ah), sc->sc_rxlink); ix = 0; STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { struct ath_desc *ds = bf->bf_desc; struct ath_rx_status *rs = &bf->bf_status.ds_rxstat; HAL_STATUS status = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr, PA2DESC(sc, ds->ds_link), rs); if (status == HAL_OK || (sc->sc_debug & ATH_DEBUG_FATAL)) ath_printrxbuf(bf, ix, status == HAL_OK); ix++; } } #endif if (sc->sc_rxpending != NULL) { m_freem(sc->sc_rxpending); sc->sc_rxpending = NULL; } sc->sc_rxlink = NULL; /* just in case */ #undef PA2DESC } /* * Enable the receive h/w following a reset. */ static int ath_startrecv(struct ath_softc *sc) { struct ath_hal *ah = sc->sc_ah; struct ath_buf *bf; sc->sc_rxlink = NULL; sc->sc_rxpending = NULL; STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { int error = ath_rxbuf_init(sc, bf); if (error != 0) { DPRINTF(sc, ATH_DEBUG_RECV, "%s: ath_rxbuf_init failed %d\n", __func__, error); return error; } } bf = STAILQ_FIRST(&sc->sc_rxbuf); ath_hal_putrxbuf(ah, bf->bf_daddr); ath_hal_rxena(ah); /* enable recv descriptors */ ath_mode_init(sc); /* set filters, etc. */ ath_hal_startpcurecv(ah); /* re-enable PCU/DMA engine */ return 0; } /* * Update internal state after a channel change. */ static void ath_chan_change(struct ath_softc *sc, struct ieee80211_channel *chan) { enum ieee80211_phymode mode; /* * Change channels and update the h/w rate map * if we're switching; e.g. 11a to 11b/g. */ if (IEEE80211_IS_CHAN_HALF(chan)) mode = IEEE80211_MODE_HALF; else if (IEEE80211_IS_CHAN_QUARTER(chan)) mode = IEEE80211_MODE_QUARTER; else mode = ieee80211_chan2mode(chan); if (mode != sc->sc_curmode) ath_setcurmode(sc, mode); sc->sc_rx_th.wr_chan_flags = htole32(chan->ic_flags); sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags; sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq); sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq; sc->sc_rx_th.wr_chan_ieee = chan->ic_ieee; sc->sc_tx_th.wt_chan_ieee = sc->sc_rx_th.wr_chan_ieee; sc->sc_rx_th.wr_chan_maxpow = chan->ic_maxregpower; sc->sc_tx_th.wt_chan_maxpow = sc->sc_rx_th.wr_chan_maxpow; } /* * Set/change channels. If the channel is really being changed, * it's done by reseting the chip. To accomplish this we must * first cleanup any pending DMA, then restart stuff after a la * ath_init. */ static int ath_chan_set(struct ath_softc *sc, struct ieee80211_channel *chan) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ath_hal *ah = sc->sc_ah; HAL_CHANNEL hchan; /* * Convert to a HAL channel description with * the flags constrained to reflect the current * operating mode. */ ath_mapchan(&hchan, chan); DPRINTF(sc, ATH_DEBUG_RESET, "%s: %u (%u MHz, hal flags 0x%x) -> %u (%u MHz, hal flags 0x%x)\n", __func__, ath_hal_mhz2ieee(ah, sc->sc_curchan.channel, sc->sc_curchan.channelFlags), sc->sc_curchan.channel, sc->sc_curchan.channelFlags, ath_hal_mhz2ieee(ah, hchan.channel, hchan.channelFlags), hchan.channel, hchan.channelFlags); if (hchan.channel != sc->sc_curchan.channel || hchan.channelFlags != sc->sc_curchan.channelFlags) { HAL_STATUS status; /* * To switch channels clear any pending DMA operations; * wait long enough for the RX fifo to drain, reset the * hardware at the new frequency, and then re-enable * the relevant bits of the h/w. */ ath_hal_intrset(ah, 0); /* disable interrupts */ ath_draintxq(sc); /* clear pending tx frames */ ath_stoprecv(sc); /* turn off frame recv */ if (!ath_hal_reset(ah, sc->sc_opmode, &hchan, AH_TRUE, &status)) { if_printf(ifp, "%s: unable to reset " "channel %u (%u Mhz, flags 0x%x hal flags 0x%x), " "hal status %u\n", __func__, ieee80211_chan2ieee(ic, chan), chan->ic_freq, chan->ic_flags, hchan.channelFlags, status); return EIO; } sc->sc_curchan = hchan; sc->sc_diversity = ath_hal_getdiversity(ah); sc->sc_calinterval = 1; sc->sc_caltries = 0; /* * Re-enable rx framework. */ if (ath_startrecv(sc) != 0) { if_printf(ifp, "%s: unable to restart recv logic\n", __func__); return EIO; } /* * Change channels and update the h/w rate map * if we're switching; e.g. 11a to 11b/g. */ ath_chan_change(sc, chan); /* * Re-enable interrupts. */ ath_hal_intrset(ah, sc->sc_imask); } return 0; } /* * Periodically recalibrate the PHY to account * for temperature/environment changes. */ static void ath_calibrate(void *arg) { struct ath_softc *sc = arg; struct ath_hal *ah = sc->sc_ah; HAL_BOOL iqCalDone; sc->sc_stats.ast_per_cal++; if (ath_hal_getrfgain(ah) == HAL_RFGAIN_NEED_CHANGE) { /* * Rfgain is out of bounds, reset the chip * to load new gain values. */ DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: rfgain change\n", __func__); sc->sc_stats.ast_per_rfgain++; ath_reset(sc->sc_ifp); } if (!ath_hal_calibrate(ah, &sc->sc_curchan, &iqCalDone)) { DPRINTF(sc, ATH_DEBUG_ANY, "%s: calibration of channel %u failed\n", __func__, sc->sc_curchan.channel); sc->sc_stats.ast_per_calfail++; } /* * Calibrate noise floor data again in case of change. */ ath_hal_process_noisefloor(ah); /* * Poll more frequently when the IQ calibration is in * progress to speedup loading the final settings. * We temper this aggressive polling with an exponential * back off after 4 tries up to ath_calinterval. */ if (iqCalDone || sc->sc_calinterval >= ath_calinterval) { sc->sc_caltries = 0; sc->sc_calinterval = ath_calinterval; } else if (sc->sc_caltries > 4) { sc->sc_caltries = 0; sc->sc_calinterval <<= 1; if (sc->sc_calinterval > ath_calinterval) sc->sc_calinterval = ath_calinterval; } KASSERT(0 < sc->sc_calinterval && sc->sc_calinterval <= ath_calinterval, ("bad calibration interval %u", sc->sc_calinterval)); DPRINTF(sc, ATH_DEBUG_CALIBRATE, "%s: next +%u (%siqCalDone tries %u)\n", __func__, sc->sc_calinterval, iqCalDone ? "" : "!", sc->sc_caltries); sc->sc_caltries++; callout_reset(&sc->sc_cal_ch, sc->sc_calinterval * hz, ath_calibrate, sc); } static void ath_scan_start(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct ath_softc *sc = ifp->if_softc; struct ath_hal *ah = sc->sc_ah; u_int32_t rfilt; /* XXX calibration timer? */ sc->sc_scanning = 1; sc->sc_syncbeacon = 0; rfilt = ath_calcrxfilter(sc); ath_hal_setrxfilter(ah, rfilt); ath_hal_setassocid(ah, ifp->if_broadcastaddr, 0); DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0\n", __func__, rfilt, ether_sprintf(ifp->if_broadcastaddr)); } static void ath_scan_end(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct ath_softc *sc = ifp->if_softc; struct ath_hal *ah = sc->sc_ah; u_int32_t rfilt; sc->sc_scanning = 0; rfilt = ath_calcrxfilter(sc); ath_hal_setrxfilter(ah, rfilt); ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); ath_hal_process_noisefloor(ah); DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid); } static void ath_set_channel(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct ath_softc *sc = ifp->if_softc; (void) ath_chan_set(sc, ic->ic_curchan); /* * If we are returning to our bss channel then mark state * so the next recv'd beacon's tsf will be used to sync the * beacon timers. Note that since we only hear beacons in * sta/ibss mode this has no effect in other operating modes. */ if (!sc->sc_scanning && ic->ic_curchan == ic->ic_bsschan) sc->sc_syncbeacon = 1; } /* * Walk the vap list and check if there any vap's in RUN state. */ static int ath_isanyrunningvaps(struct ieee80211vap *this) { struct ieee80211com *ic = this->iv_ic; struct ieee80211vap *vap; IEEE80211_LOCK_ASSERT(ic); TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { if (vap != this && vap->iv_state == IEEE80211_S_RUN) return 1; } return 0; } static int ath_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ieee80211com *ic = vap->iv_ic; struct ath_softc *sc = ic->ic_ifp->if_softc; struct ath_vap *avp = ATH_VAP(vap); struct ath_hal *ah = sc->sc_ah; struct ieee80211_node *ni = NULL; int i, error, stamode; u_int32_t rfilt; static const HAL_LED_STATE leds[] = { HAL_LED_INIT, /* IEEE80211_S_INIT */ HAL_LED_SCAN, /* IEEE80211_S_SCAN */ HAL_LED_AUTH, /* IEEE80211_S_AUTH */ HAL_LED_ASSOC, /* IEEE80211_S_ASSOC */ HAL_LED_RUN, /* IEEE80211_S_CAC */ HAL_LED_RUN, /* IEEE80211_S_RUN */ HAL_LED_RUN, /* IEEE80211_S_CSA */ HAL_LED_RUN, /* IEEE80211_S_SLEEP */ }; DPRINTF(sc, ATH_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); callout_stop(&sc->sc_cal_ch); ath_hal_setledstate(ah, leds[nstate]); /* set LED */ if (nstate == IEEE80211_S_SCAN) { /* * Scanning: turn off beacon miss and don't beacon. * Mark beacon state so when we reach RUN state we'll * [re]setup beacons. Unblock the task q thread so * deferred interrupt processing is done. */ ath_hal_intrset(ah, sc->sc_imask &~ (HAL_INT_SWBA | HAL_INT_BMISS)); sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); sc->sc_beacons = 0; taskqueue_unblock(sc->sc_tq); } ni = vap->iv_bss; rfilt = ath_calcrxfilter(sc); stamode = (vap->iv_opmode == IEEE80211_M_STA || vap->iv_opmode == IEEE80211_M_IBSS); if (stamode && nstate == IEEE80211_S_RUN) { sc->sc_curaid = ni->ni_associd; IEEE80211_ADDR_COPY(sc->sc_curbssid, ni->ni_bssid); ath_hal_setassocid(ah, sc->sc_curbssid, sc->sc_curaid); } DPRINTF(sc, ATH_DEBUG_STATE, "%s: RX filter 0x%x bssid %s aid 0x%x\n", __func__, rfilt, ether_sprintf(sc->sc_curbssid), sc->sc_curaid); ath_hal_setrxfilter(ah, rfilt); /* XXX is this to restore keycache on resume? */ if (vap->iv_opmode != IEEE80211_M_STA && (vap->iv_flags & IEEE80211_F_PRIVACY)) { for (i = 0; i < IEEE80211_WEP_NKID; i++) if (ath_hal_keyisvalid(ah, i)) ath_hal_keysetmac(ah, i, ni->ni_bssid); } /* * Notify the rate control algorithm so rates * are setup should ath_beacon_alloc be called. */ ath_rate_newstate(vap, nstate); /* * Invoke the parent method to do net80211 work. */ error = avp->av_newstate(vap, nstate, arg); if (error != 0) goto bad; if (nstate == IEEE80211_S_RUN) { /* NB: collect bss node again, it may have changed */ ni = vap->iv_bss; DPRINTF(sc, ATH_DEBUG_STATE, "%s(RUN): iv_flags 0x%08x bintvl %d bssid %s " "capinfo 0x%04x chan %d\n", __func__, vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); switch (vap->iv_opmode) { case IEEE80211_M_HOSTAP: case IEEE80211_M_IBSS: /* * Allocate and setup the beacon frame. * * Stop any previous beacon DMA. This may be * necessary, for example, when an ibss merge * causes reconfiguration; there will be a state * transition from RUN->RUN that means we may * be called with beacon transmission active. */ ath_hal_stoptxdma(ah, sc->sc_bhalq); error = ath_beacon_alloc(sc, ni); if (error != 0) goto bad; /* * If joining an adhoc network defer beacon timer * configuration to the next beacon frame so we * have a current TSF to use. Otherwise we're * starting an ibss/bss so there's no need to delay; * if this is the first vap moving to RUN state, then * beacon state needs to be [re]configured. */ if (vap->iv_opmode == IEEE80211_M_IBSS && ni->ni_tstamp.tsf != 0) { sc->sc_syncbeacon = 1; } else if (!sc->sc_beacons) { ath_beacon_config(sc, vap); sc->sc_beacons = 1; } break; case IEEE80211_M_STA: /* * Fakeup since we're not called by net80211. */ ath_newassoc(ni, 1); /* * Defer beacon timer configuration to the next * beacon frame so we have a current TSF to use * (any TSF collected when scanning is likely old). */ sc->sc_syncbeacon = 1; break; case IEEE80211_M_MONITOR: /* * Monitor mode vaps have only INIT->RUN and RUN->RUN * transitions so we must re-enable interrupts here to * handle the case of a single monitor mode vap. */ ath_hal_intrset(ah, sc->sc_imask); break; case IEEE80211_M_WDS: break; default: break; } /* * Let the hal process statistics collected during a * scan so it can provide calibrated noise floor data. */ ath_hal_process_noisefloor(ah); /* * Reset rssi stats; maybe not the best place... */ sc->sc_halstats.ns_avgbrssi = ATH_RSSI_DUMMY_MARKER; sc->sc_halstats.ns_avgrssi = ATH_RSSI_DUMMY_MARKER; sc->sc_halstats.ns_avgtxrssi = ATH_RSSI_DUMMY_MARKER; /* * Finally, start any timers and the task q thread * (in case we didn't go through SCAN state). */ if (sc->sc_calinterval != 0) { /* start periodic recalibration timer */ callout_reset(&sc->sc_cal_ch, sc->sc_calinterval * hz, ath_calibrate, sc); } taskqueue_unblock(sc->sc_tq); } else if (nstate == IEEE80211_S_INIT) { /* * If there are no vaps left in RUN state then * shutdown host/driver operation: * o disable interrupts * o disable the task queue thread * o mark beacon processing as stopped */ if (!ath_isanyrunningvaps(vap)) { sc->sc_imask &= ~(HAL_INT_SWBA | HAL_INT_BMISS); /* disable interrupts */ ath_hal_intrset(ah, sc->sc_imask &~ HAL_INT_GLOBAL); taskqueue_block(sc->sc_tq); sc->sc_beacons = 0; } } bad: return error; } /* * Allocate a key cache slot to the station so we can * setup a mapping from key index to node. The key cache * slot is needed for managing antenna state and for * compression when stations do not use crypto. We do * it uniliaterally here; if crypto is employed this slot * will be reassigned. */ static void ath_setup_stationkey(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; ieee80211_keyix keyix, rxkeyix; if (!ath_key_alloc(vap, &ni->ni_ucastkey, &keyix, &rxkeyix)) { /* * Key cache is full; we'll fall back to doing * the more expensive lookup in software. Note * this also means no h/w compression. */ /* XXX msg+statistic */ } else { /* XXX locking? */ ni->ni_ucastkey.wk_keyix = keyix; ni->ni_ucastkey.wk_rxkeyix = rxkeyix; IEEE80211_ADDR_COPY(ni->ni_ucastkey.wk_macaddr, ni->ni_macaddr); /* NB: this will create a pass-thru key entry */ ath_keyset(sc, &ni->ni_ucastkey, vap->iv_bss); } } /* * Setup driver-specific state for a newly associated node. * Note that we're called also on a re-associate, the isnew * param tells us if this is the first time or not. */ static void ath_newassoc(struct ieee80211_node *ni, int isnew) { struct ath_node *an = ATH_NODE(ni); struct ieee80211vap *vap = ni->ni_vap; struct ath_softc *sc = vap->iv_ic->ic_ifp->if_softc; const struct ieee80211_txparam *tp; enum ieee80211_phymode mode; /* * Deduce netband of station to simplify setting up xmit * parameters. Note this allows us to assign different * parameters to each station in a mixed bss (b/g, n/[abg]). */ if (ni->ni_flags & IEEE80211_NODE_HT) { if (IEEE80211_IS_CHAN_5GHZ(ni->ni_chan)) mode = IEEE80211_MODE_11NA; else mode = IEEE80211_MODE_11NG; } else if (IEEE80211_IS_CHAN_A(ni->ni_chan)) mode = IEEE80211_MODE_11A; else if (ni->ni_flags & IEEE80211_NODE_ERP) mode = IEEE80211_MODE_11G; else mode = IEEE80211_MODE_11B; tp = &vap->iv_txparms[mode]; an->an_tp = tp; an->an_mcastrix = ath_tx_findrix(sc->sc_rates[mode], tp->mcastrate); an->an_mgmtrix = ath_tx_findrix(sc->sc_rates[mode], tp->mgmtrate); ath_rate_newassoc(sc, an, isnew); if (isnew && (vap->iv_flags & IEEE80211_F_PRIVACY) == 0 && sc->sc_hasclrkey && ni->ni_ucastkey.wk_keyix == IEEE80211_KEYIX_NONE) ath_setup_stationkey(ni); } static int getchannels(struct ath_softc *sc, int *nchans, struct ieee80211_channel chans[], int cc, int ecm, int outdoor) { struct ath_hal *ah = sc->sc_ah; HAL_CHANNEL *halchans; int i, nhalchans, error; halchans = malloc(IEEE80211_CHAN_MAX * sizeof(HAL_CHANNEL), M_TEMP, M_NOWAIT | M_ZERO); if (halchans == NULL) { device_printf(sc->sc_dev, "%s: unable to allocate channel table\n", __func__); return ENOMEM; } error = 0; if (!ath_hal_init_channels(ah, halchans, IEEE80211_CHAN_MAX, &nhalchans, NULL, 0, NULL, CTRY_DEFAULT, HAL_MODE_ALL, AH_FALSE, AH_TRUE)) { error = EINVAL; goto done; } if (nchans == NULL) /* no table requested */ goto done; /* * Convert HAL channels to ieee80211 ones. */ for (i = 0; i < nhalchans; i++) { HAL_CHANNEL *c = &halchans[i]; struct ieee80211_channel *ichan = &chans[i]; ichan->ic_ieee = ath_hal_mhz2ieee(ah, c->channel, c->channelFlags); if (bootverbose) device_printf(sc->sc_dev, "hal channel %u/%x -> %u " "maxpow %d minpow %d maxreg %d\n", c->channel, c->channelFlags, ichan->ic_ieee, c->maxTxPower, c->minTxPower, c->maxRegTxPower); ichan->ic_freq = c->channel; if ((c->channelFlags & CHANNEL_PUREG) == CHANNEL_PUREG) { /* * Except for AR5211, HAL's PUREG means mixed * DSSS and OFDM. */ ichan->ic_flags = c->channelFlags &~ CHANNEL_PUREG; ichan->ic_flags |= IEEE80211_CHAN_G; } else { ichan->ic_flags = c->channelFlags; } if (ath_hal_isgsmsku(ah)) { /* remap to true frequencies */ ichan->ic_freq = 922 + (2422 - ichan->ic_freq); ichan->ic_flags |= IEEE80211_CHAN_GSM; ichan->ic_ieee = ieee80211_mhz2ieee(ichan->ic_freq, ichan->ic_flags); } ichan->ic_maxregpower = c->maxRegTxPower; /* dBm */ /* XXX: old hal's don't provide maxTxPower for some parts */ ichan->ic_maxpower = (c->maxTxPower != 0) ? c->maxTxPower : 2*c->maxRegTxPower; /* 1/2 dBm */ ichan->ic_minpower = c->minTxPower; /* 1/2 dBm */ } *nchans = nhalchans; done: free(halchans, M_TEMP); return error; } static int ath_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, int nchans, struct ieee80211_channel chans[]) { struct ath_softc *sc = ic->ic_ifp->if_softc; struct ath_hal *ah = sc->sc_ah; u_int32_t ord; int error; (void) ath_hal_getregdomain(ah, &ord); /* XXX map sku->rd */ ath_hal_setregdomain(ah, rd->regdomain); error = getchannels(sc, &nchans, chans, rd->country, rd->ecm ? AH_TRUE : AH_FALSE, rd->location == 'O' ? AH_TRUE : AH_FALSE); if (error != 0) { /* * Restore previous state. */ ath_hal_setregdomain(ah, ord); (void) getchannels(sc, NULL, NULL, ic->ic_regdomain.country, ic->ic_regdomain.ecm ? AH_TRUE : AH_FALSE, ic->ic_regdomain.location == 'O' ? AH_TRUE : AH_FALSE); return error; } return 0; } static void ath_getradiocaps(struct ieee80211com *ic, int *nchans, struct ieee80211_channel chans[]) { struct ath_softc *sc = ic->ic_ifp->if_softc; struct ath_hal *ah = sc->sc_ah; u_int32_t ord; (void) ath_hal_getregdomain(ah, &ord); ath_hal_setregdomain(ah, 0); /* XXX not quite right but close enough for now */ getchannels(sc, nchans, chans, CTRY_DEBUG, AH_TRUE, AH_FALSE); /* NB: restore previous state */ ath_hal_setregdomain(ah, ord); (void) getchannels(sc, NULL, NULL, ic->ic_regdomain.country, ic->ic_regdomain.ecm ? AH_TRUE : AH_FALSE, ic->ic_regdomain.location == 'O' ? AH_TRUE : AH_FALSE); } static int ath_mapregdomain(struct ath_softc *sc, u_int32_t rd) { /* map Atheros rd's to SKU's */ return rd; } static int ath_getchannels(struct ath_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ath_hal *ah = sc->sc_ah; int error; /* * Convert HAL channels to ieee80211 ones. */ error = getchannels(sc, &ic->ic_nchans, ic->ic_channels, CTRY_DEFAULT, AH_TRUE, AH_FALSE); (void) ath_hal_getregdomain(ah, &sc->sc_eerd); ath_hal_getcountrycode(ah, &sc->sc_eecc); /* NB: cannot fail */ if (error) { if_printf(ifp, "%s: unable to collect channel list from hal, " "error %d\n", __func__, error); if (error == EINVAL) { if_printf(ifp, "%s: regdomain likely %u country code %u\n", __func__, sc->sc_eerd, sc->sc_eecc); } return error; } ic->ic_regdomain.regdomain = ath_mapregdomain(sc, sc->sc_eerd); ic->ic_regdomain.country = sc->sc_eecc; ic->ic_regdomain.ecm = 1; ic->ic_regdomain.location = 'I'; ic->ic_regdomain.isocc[0] = ' '; /* XXX don't know */ ic->ic_regdomain.isocc[1] = ' '; return 0; } static void ath_led_done(void *arg) { struct ath_softc *sc = arg; sc->sc_blinking = 0; } /* * Turn the LED off: flip the pin and then set a timer so no * update will happen for the specified duration. */ static void ath_led_off(void *arg) { struct ath_softc *sc = arg; ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon); callout_reset(&sc->sc_ledtimer, sc->sc_ledoff, ath_led_done, sc); } /* * Blink the LED according to the specified on/off times. */ static void ath_led_blink(struct ath_softc *sc, int on, int off) { DPRINTF(sc, ATH_DEBUG_LED, "%s: on %u off %u\n", __func__, on, off); ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, sc->sc_ledon); sc->sc_blinking = 1; sc->sc_ledoff = off; callout_reset(&sc->sc_ledtimer, on, ath_led_off, sc); } static void ath_led_event(struct ath_softc *sc, int event) { sc->sc_ledevent = ticks; /* time of last event */ if (sc->sc_blinking) /* don't interrupt active blink */ return; switch (event) { case ATH_LED_POLL: ath_led_blink(sc, sc->sc_hwmap[0].ledon, sc->sc_hwmap[0].ledoff); break; case ATH_LED_TX: ath_led_blink(sc, sc->sc_hwmap[sc->sc_txrate].ledon, sc->sc_hwmap[sc->sc_txrate].ledoff); break; case ATH_LED_RX: ath_led_blink(sc, sc->sc_hwmap[sc->sc_rxrate].ledon, sc->sc_hwmap[sc->sc_rxrate].ledoff); break; } } static int ath_rate_setup(struct ath_softc *sc, u_int mode) { struct ath_hal *ah = sc->sc_ah; const HAL_RATE_TABLE *rt; switch (mode) { case IEEE80211_MODE_11A: rt = ath_hal_getratetable(ah, HAL_MODE_11A); break; case IEEE80211_MODE_HALF: rt = ath_hal_getratetable(ah, HAL_MODE_11A_HALF_RATE); break; case IEEE80211_MODE_QUARTER: rt = ath_hal_getratetable(ah, HAL_MODE_11A_QUARTER_RATE); break; case IEEE80211_MODE_11B: rt = ath_hal_getratetable(ah, HAL_MODE_11B); break; case IEEE80211_MODE_11G: rt = ath_hal_getratetable(ah, HAL_MODE_11G); break; case IEEE80211_MODE_TURBO_A: rt = ath_hal_getratetable(ah, HAL_MODE_108A); #if HAL_ABI_VERSION < 0x07013100 if (rt == NULL) /* XXX bandaid for old hal's */ rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); #endif break; case IEEE80211_MODE_TURBO_G: rt = ath_hal_getratetable(ah, HAL_MODE_108G); break; case IEEE80211_MODE_STURBO_A: rt = ath_hal_getratetable(ah, HAL_MODE_TURBO); break; case IEEE80211_MODE_11NA: rt = ath_hal_getratetable(ah, HAL_MODE_11NA_HT20); break; case IEEE80211_MODE_11NG: rt = ath_hal_getratetable(ah, HAL_MODE_11NG_HT20); break; default: DPRINTF(sc, ATH_DEBUG_ANY, "%s: invalid mode %u\n", __func__, mode); return 0; } sc->sc_rates[mode] = rt; return (rt != NULL); } static void ath_setcurmode(struct ath_softc *sc, enum ieee80211_phymode mode) { #define N(a) (sizeof(a)/sizeof(a[0])) /* NB: on/off times from the Atheros NDIS driver, w/ permission */ static const struct { u_int rate; /* tx/rx 802.11 rate */ u_int16_t timeOn; /* LED on time (ms) */ u_int16_t timeOff; /* LED off time (ms) */ } blinkrates[] = { { 108, 40, 10 }, { 96, 44, 11 }, { 72, 50, 13 }, { 48, 57, 14 }, { 36, 67, 16 }, { 24, 80, 20 }, { 22, 100, 25 }, { 18, 133, 34 }, { 12, 160, 40 }, { 10, 200, 50 }, { 6, 240, 58 }, { 4, 267, 66 }, { 2, 400, 100 }, { 0, 500, 130 }, /* XXX half/quarter rates */ }; const HAL_RATE_TABLE *rt; int i, j; memset(sc->sc_rixmap, 0xff, sizeof(sc->sc_rixmap)); rt = sc->sc_rates[mode]; KASSERT(rt != NULL, ("no h/w rate set for phy mode %u", mode)); for (i = 0; i < rt->rateCount; i++) sc->sc_rixmap[rt->info[i].dot11Rate & IEEE80211_RATE_VAL] = i; memset(sc->sc_hwmap, 0, sizeof(sc->sc_hwmap)); for (i = 0; i < 32; i++) { u_int8_t ix = rt->rateCodeToIndex[i]; if (ix == 0xff) { sc->sc_hwmap[i].ledon = (500 * hz) / 1000; sc->sc_hwmap[i].ledoff = (130 * hz) / 1000; continue; } sc->sc_hwmap[i].ieeerate = rt->info[ix].dot11Rate & IEEE80211_RATE_VAL; if (rt->info[ix].phy == IEEE80211_T_HT) sc->sc_hwmap[i].ieeerate |= 0x80; /* MCS */ sc->sc_hwmap[i].txflags = IEEE80211_RADIOTAP_F_DATAPAD; if (rt->info[ix].shortPreamble || rt->info[ix].phy == IEEE80211_T_OFDM) sc->sc_hwmap[i].txflags |= IEEE80211_RADIOTAP_F_SHORTPRE; /* NB: receive frames include FCS */ sc->sc_hwmap[i].rxflags = sc->sc_hwmap[i].txflags | IEEE80211_RADIOTAP_F_FCS; /* setup blink rate table to avoid per-packet lookup */ for (j = 0; j < N(blinkrates)-1; j++) if (blinkrates[j].rate == sc->sc_hwmap[i].ieeerate) break; /* NB: this uses the last entry if the rate isn't found */ /* XXX beware of overlow */ sc->sc_hwmap[i].ledon = (blinkrates[j].timeOn * hz) / 1000; sc->sc_hwmap[i].ledoff = (blinkrates[j].timeOff * hz) / 1000; } sc->sc_currates = rt; sc->sc_curmode = mode; /* * All protection frames are transmited at 2Mb/s for * 11g, otherwise at 1Mb/s. */ if (mode == IEEE80211_MODE_11G) sc->sc_protrix = ath_tx_findrix(rt, 2*2); else sc->sc_protrix = ath_tx_findrix(rt, 2*1); /* NB: caller is responsible for reseting rate control state */ #undef N } #ifdef ATH_DEBUG static void ath_printrxbuf(const struct ath_buf *bf, u_int ix, int done) { const struct ath_rx_status *rs = &bf->bf_status.ds_rxstat; const struct ath_desc *ds; int i; for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { printf("R[%2u] (DS.V:%p DS.P:%p) L:%08x D:%08x%s\n" " %08x %08x %08x %08x\n", ix, ds, (const struct ath_desc *)bf->bf_daddr + i, ds->ds_link, ds->ds_data, !done ? "" : (rs->rs_status == 0) ? " *" : " !", ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1]); } } static void ath_printtxbuf(const struct ath_buf *bf, u_int qnum, u_int ix, int done) { const struct ath_tx_status *ts = &bf->bf_status.ds_txstat; const struct ath_desc *ds; int i; printf("Q%u[%3u]", qnum, ix); for (i = 0, ds = bf->bf_desc; i < bf->bf_nseg; i++, ds++) { printf(" (DS.V:%p DS.P:%p) L:%08x D:%08x F:04%x%s\n" " %08x %08x %08x %08x %08x %08x\n", ds, (const struct ath_desc *)bf->bf_daddr + i, ds->ds_link, ds->ds_data, bf->bf_flags, !done ? "" : (ts->ts_status == 0) ? " *" : " !", ds->ds_ctl0, ds->ds_ctl1, ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3]); } } #endif /* ATH_DEBUG */ static void ath_watchdog(struct ifnet *ifp) { struct ath_softc *sc = ifp->if_softc; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !sc->sc_invalid) { if_printf(ifp, "device timeout\n"); ath_reset(ifp); ifp->if_oerrors++; sc->sc_stats.ast_watchdog++; } } #ifdef ATH_DIAGAPI /* * Diagnostic interface to the HAL. This is used by various * tools to do things like retrieve register contents for * debugging. The mechanism is intentionally opaque so that * it can change frequently w/o concern for compatiblity. */ static int ath_ioctl_diag(struct ath_softc *sc, struct ath_diag *ad) { struct ath_hal *ah = sc->sc_ah; u_int id = ad->ad_id & ATH_DIAG_ID; void *indata = NULL; void *outdata = NULL; u_int32_t insize = ad->ad_in_size; u_int32_t outsize = ad->ad_out_size; int error = 0; if (ad->ad_id & ATH_DIAG_IN) { /* * Copy in data. */ indata = malloc(insize, M_TEMP, M_NOWAIT); if (indata == NULL) { error = ENOMEM; goto bad; } error = copyin(ad->ad_in_data, indata, insize); if (error) goto bad; } if (ad->ad_id & ATH_DIAG_DYN) { /* * Allocate a buffer for the results (otherwise the HAL * returns a pointer to a buffer where we can read the * results). Note that we depend on the HAL leaving this * pointer for us to use below in reclaiming the buffer; * may want to be more defensive. */ outdata = malloc(outsize, M_TEMP, M_NOWAIT); if (outdata == NULL) { error = ENOMEM; goto bad; } } if (ath_hal_getdiagstate(ah, id, indata, insize, &outdata, &outsize)) { if (outsize < ad->ad_out_size) ad->ad_out_size = outsize; if (outdata != NULL) error = copyout(outdata, ad->ad_out_data, ad->ad_out_size); } else { error = EINVAL; } bad: if ((ad->ad_id & ATH_DIAG_IN) && indata != NULL) free(indata, M_TEMP); if ((ad->ad_id & ATH_DIAG_DYN) && outdata != NULL) free(outdata, M_TEMP); return error; } #endif /* ATH_DIAGAPI */ static int ath_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { #define IS_RUNNING(ifp) \ ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) struct ath_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *)data; int error = 0; switch (cmd) { case SIOCSIFFLAGS: ATH_LOCK(sc); if (IS_RUNNING(ifp)) { /* * To avoid rescanning another access point, * do not call ath_init() here. Instead, * only reflect promisc mode settings. */ ath_mode_init(sc); } else if (ifp->if_flags & IFF_UP) { /* * Beware of being called during attach/detach * to reset promiscuous mode. In that case we * will still be marked UP but not RUNNING. * However trying to re-init the interface * is the wrong thing to do as we've already * torn down much of our state. There's * probably a better way to deal with this. */ if (!sc->sc_invalid) ath_init(sc); /* XXX lose error */ } else { ath_stop_locked(ifp); #ifdef notyet /* XXX must wakeup in places like ath_vap_delete */ if (!sc->sc_invalid) ath_hal_setpower(sc->sc_ah, HAL_PM_FULL_SLEEP); #endif } ATH_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGATHSTATS: /* NB: embed these numbers to get a consistent view */ sc->sc_stats.ast_tx_packets = ifp->if_opackets; sc->sc_stats.ast_rx_packets = ifp->if_ipackets; #if 0 ieee80211_getsignal(ic, &sc->sc_stats.ast_rx_rssi, &sc->sc_stats.ast_rx_noise); #endif sc->sc_stats.ast_tx_rate = sc->sc_hwmap[sc->sc_txrate].ieeerate; return copyout(&sc->sc_stats, ifr->ifr_data, sizeof (sc->sc_stats)); #ifdef ATH_DIAGAPI case SIOCGATHDIAG: error = ath_ioctl_diag(sc, (struct ath_diag *) ifr); break; #endif case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return error; #undef IS_RUNNING } static int ath_sysctl_slottime(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int slottime = ath_hal_getslottime(sc->sc_ah); int error; error = sysctl_handle_int(oidp, &slottime, 0, req); if (error || !req->newptr) return error; return !ath_hal_setslottime(sc->sc_ah, slottime) ? EINVAL : 0; } static int ath_sysctl_acktimeout(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int acktimeout = ath_hal_getacktimeout(sc->sc_ah); int error; error = sysctl_handle_int(oidp, &acktimeout, 0, req); if (error || !req->newptr) return error; return !ath_hal_setacktimeout(sc->sc_ah, acktimeout) ? EINVAL : 0; } static int ath_sysctl_ctstimeout(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int ctstimeout = ath_hal_getctstimeout(sc->sc_ah); int error; error = sysctl_handle_int(oidp, &ctstimeout, 0, req); if (error || !req->newptr) return error; return !ath_hal_setctstimeout(sc->sc_ah, ctstimeout) ? EINVAL : 0; } static int ath_sysctl_softled(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; int softled = sc->sc_softled; int error; error = sysctl_handle_int(oidp, &softled, 0, req); if (error || !req->newptr) return error; softled = (softled != 0); if (softled != sc->sc_softled) { if (softled) { /* NB: handle any sc_ledpin change */ ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin); ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon); } sc->sc_softled = softled; } return 0; } static int ath_sysctl_ledpin(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; int ledpin = sc->sc_ledpin; int error; error = sysctl_handle_int(oidp, &ledpin, 0, req); if (error || !req->newptr) return error; if (ledpin != sc->sc_ledpin) { sc->sc_ledpin = ledpin; if (sc->sc_softled) { ath_hal_gpioCfgOutput(sc->sc_ah, sc->sc_ledpin); ath_hal_gpioset(sc->sc_ah, sc->sc_ledpin, !sc->sc_ledon); } } return 0; } static int ath_sysctl_txantenna(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int txantenna = ath_hal_getantennaswitch(sc->sc_ah); int error; error = sysctl_handle_int(oidp, &txantenna, 0, req); if (!error && req->newptr) { /* XXX assumes 2 antenna ports */ if (txantenna < HAL_ANT_VARIABLE || txantenna > HAL_ANT_FIXED_B) return EINVAL; ath_hal_setantennaswitch(sc->sc_ah, txantenna); /* * NB: with the switch locked this isn't meaningful, * but set it anyway so things like radiotap get * consistent info in their data. */ sc->sc_txantenna = txantenna; } return error; } static int ath_sysctl_rxantenna(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int defantenna = ath_hal_getdefantenna(sc->sc_ah); int error; error = sysctl_handle_int(oidp, &defantenna, 0, req); if (!error && req->newptr) ath_hal_setdefantenna(sc->sc_ah, defantenna); return error; } static int ath_sysctl_diversity(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int diversity = ath_hal_getdiversity(sc->sc_ah); int error; error = sysctl_handle_int(oidp, &diversity, 0, req); if (error || !req->newptr) return error; if (!ath_hal_setdiversity(sc->sc_ah, diversity)) return EINVAL; sc->sc_diversity = diversity; return 0; } static int ath_sysctl_diag(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int32_t diag; int error; if (!ath_hal_getdiag(sc->sc_ah, &diag)) return EINVAL; error = sysctl_handle_int(oidp, &diag, 0, req); if (error || !req->newptr) return error; return !ath_hal_setdiag(sc->sc_ah, diag) ? EINVAL : 0; } static int ath_sysctl_tpscale(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; struct ifnet *ifp = sc->sc_ifp; u_int32_t scale; int error; (void) ath_hal_gettpscale(sc->sc_ah, &scale); error = sysctl_handle_int(oidp, &scale, 0, req); if (error || !req->newptr) return error; return !ath_hal_settpscale(sc->sc_ah, scale) ? EINVAL : (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0; } static int ath_sysctl_tpc(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int tpc = ath_hal_gettpc(sc->sc_ah); int error; error = sysctl_handle_int(oidp, &tpc, 0, req); if (error || !req->newptr) return error; return !ath_hal_settpc(sc->sc_ah, tpc) ? EINVAL : 0; } static int ath_sysctl_rfkill(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; struct ifnet *ifp = sc->sc_ifp; struct ath_hal *ah = sc->sc_ah; u_int rfkill = ath_hal_getrfkill(ah); int error; error = sysctl_handle_int(oidp, &rfkill, 0, req); if (error || !req->newptr) return error; if (rfkill == ath_hal_getrfkill(ah)) /* unchanged */ return 0; if (!ath_hal_setrfkill(ah, rfkill)) return EINVAL; return (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0; } static int ath_sysctl_rfsilent(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int rfsilent; int error; (void) ath_hal_getrfsilent(sc->sc_ah, &rfsilent); error = sysctl_handle_int(oidp, &rfsilent, 0, req); if (error || !req->newptr) return error; if (!ath_hal_setrfsilent(sc->sc_ah, rfsilent)) return EINVAL; sc->sc_rfsilentpin = rfsilent & 0x1c; sc->sc_rfsilentpol = (rfsilent & 0x2) != 0; return 0; } static int ath_sysctl_tpack(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int32_t tpack; int error; (void) ath_hal_gettpack(sc->sc_ah, &tpack); error = sysctl_handle_int(oidp, &tpack, 0, req); if (error || !req->newptr) return error; return !ath_hal_settpack(sc->sc_ah, tpack) ? EINVAL : 0; } static int ath_sysctl_tpcts(SYSCTL_HANDLER_ARGS) { struct ath_softc *sc = arg1; u_int32_t tpcts; int error; (void) ath_hal_gettpcts(sc->sc_ah, &tpcts); error = sysctl_handle_int(oidp, &tpcts, 0, req); if (error || !req->newptr) return error; return !ath_hal_settpcts(sc->sc_ah, tpcts) ? EINVAL : 0; } static void ath_sysctlattach(struct ath_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); struct ath_hal *ah = sc->sc_ah; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "countrycode", CTLFLAG_RD, &sc->sc_eecc, 0, "EEPROM country code"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "regdomain", CTLFLAG_RD, &sc->sc_eerd, 0, "EEPROM regdomain code"); #ifdef ATH_DEBUG sc->sc_debug = ath_debug; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs"); #endif SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "slottime", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_slottime, "I", "802.11 slot time (us)"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "acktimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_acktimeout, "I", "802.11 ACK timeout (us)"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "ctstimeout", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_ctstimeout, "I", "802.11 CTS timeout (us)"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "softled", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_softled, "I", "enable/disable software LED support"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "ledpin", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_ledpin, "I", "GPIO pin connected to LED"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "ledon", CTLFLAG_RW, &sc->sc_ledon, 0, "setting to turn LED on"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "ledidle", CTLFLAG_RW, &sc->sc_ledidle, 0, "idle time for inactivity LED (ticks)"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "txantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_txantenna, "I", "antenna switch"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "rxantenna", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_rxantenna, "I", "default/rx antenna"); if (ath_hal_hasdiversity(ah)) SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "diversity", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_diversity, "I", "antenna diversity"); sc->sc_txintrperiod = ATH_TXINTR_PERIOD; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "txintrperiod", CTLFLAG_RW, &sc->sc_txintrperiod, 0, "tx descriptor batching"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "diag", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_diag, "I", "h/w diagnostic control"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tpscale", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_tpscale, "I", "tx power scaling"); if (ath_hal_hastpc(ah)) { SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tpc", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_tpc, "I", "enable/disable per-packet TPC"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tpack", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_tpack, "I", "tx power for ack frames"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tpcts", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_tpcts, "I", "tx power for cts frames"); } if (ath_hal_hasfastframes(sc->sc_ah)) { sc->sc_fftxqmin = ATH_FF_TXQMIN; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "fftxqmin", CTLFLAG_RW, &sc->sc_fftxqmin, 0, "min frames before fast-frame staging"); sc->sc_fftxqmax = ATH_FF_TXQMAX; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "fftxqmax", CTLFLAG_RW, &sc->sc_fftxqmax, 0, "max queued frames before tail drop"); } if (ath_hal_hasrfsilent(ah)) { SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "rfsilent", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_rfsilent, "I", "h/w RF silent config"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "rfkill", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ath_sysctl_rfkill, "I", "enable/disable RF kill switch"); } sc->sc_monpass = HAL_RXERR_DECRYPT | HAL_RXERR_MIC; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "monpass", CTLFLAG_RW, &sc->sc_monpass, 0, "mask of error frames to pass when monitoring"); } static void ath_bpfattach(struct ath_softc *sc) { struct ifnet *ifp = sc->sc_ifp; bpfattach(ifp, DLT_IEEE802_11_RADIO, sizeof(struct ieee80211_frame) + sizeof(sc->sc_tx_th)); /* * Initialize constant fields. * XXX make header lengths a multiple of 32-bits so subsequent * headers are properly aligned; this is a kludge to keep * certain applications happy. * * NB: the channel is setup each time we transition to the * RUN state to avoid filling it in for each frame. */ sc->sc_tx_th_len = roundup(sizeof(sc->sc_tx_th), sizeof(u_int32_t)); sc->sc_tx_th.wt_ihdr.it_len = htole16(sc->sc_tx_th_len); sc->sc_tx_th.wt_ihdr.it_present = htole32(ATH_TX_RADIOTAP_PRESENT); sc->sc_rx_th_len = roundup(sizeof(sc->sc_rx_th), sizeof(u_int32_t)); sc->sc_rx_th.wr_ihdr.it_len = htole16(sc->sc_rx_th_len); sc->sc_rx_th.wr_ihdr.it_present = htole32(ATH_RX_RADIOTAP_PRESENT); } static int ath_tx_raw_start(struct ath_softc *sc, struct ieee80211_node *ni, struct ath_buf *bf, struct mbuf *m0, const struct ieee80211_bpf_params *params) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ath_hal *ah = sc->sc_ah; int error, ismcast, ismrr; int hdrlen, pktlen, try0, txantenna; u_int8_t rix, cix, txrate, ctsrate, rate1, rate2, rate3; struct ieee80211_frame *wh; u_int flags, ctsduration; HAL_PKT_TYPE atype; const HAL_RATE_TABLE *rt; struct ath_desc *ds; u_int pri; wh = mtod(m0, struct ieee80211_frame *); ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); hdrlen = ieee80211_anyhdrsize(wh); /* * Packet length must not include any * pad bytes; deduct them here. */ /* XXX honor IEEE80211_BPF_DATAPAD */ pktlen = m0->m_pkthdr.len - (hdrlen & 3) + IEEE80211_CRC_LEN; error = ath_tx_dmasetup(sc, bf, m0); if (error != 0) return error; m0 = bf->bf_m; /* NB: may have changed */ wh = mtod(m0, struct ieee80211_frame *); bf->bf_node = ni; /* NB: held reference */ flags = HAL_TXDESC_CLRDMASK; /* XXX needed for crypto errs */ flags |= HAL_TXDESC_INTREQ; /* force interrupt */ if (params->ibp_flags & IEEE80211_BPF_RTS) flags |= HAL_TXDESC_RTSENA; else if (params->ibp_flags & IEEE80211_BPF_CTS) flags |= HAL_TXDESC_CTSENA; /* XXX leave ismcast to injector? */ if ((params->ibp_flags & IEEE80211_BPF_NOACK) || ismcast) flags |= HAL_TXDESC_NOACK; rt = sc->sc_currates; KASSERT(rt != NULL, ("no rate table, mode %u", sc->sc_curmode)); rix = ath_tx_findrix(rt, params->ibp_rate0); txrate = rt->info[rix].rateCode; if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) txrate |= rt->info[rix].shortPreamble; sc->sc_txrate = txrate; try0 = params->ibp_try0; ismrr = (params->ibp_try1 != 0); txantenna = params->ibp_pri >> 2; if (txantenna == 0) /* XXX? */ txantenna = sc->sc_txantenna; ctsduration = 0; if (flags & (HAL_TXDESC_CTSENA | HAL_TXDESC_RTSENA)) { cix = ath_tx_findrix(rt, params->ibp_ctsrate); ctsrate = rt->info[cix].rateCode; if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) { ctsrate |= rt->info[cix].shortPreamble; if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ ctsduration += rt->info[cix].spAckDuration; ctsduration += ath_hal_computetxtime(ah, rt, pktlen, rix, AH_TRUE); if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ ctsduration += rt->info[rix].spAckDuration; } else { if (flags & HAL_TXDESC_RTSENA) /* SIFS + CTS */ ctsduration += rt->info[cix].lpAckDuration; ctsduration += ath_hal_computetxtime(ah, rt, pktlen, rix, AH_FALSE); if ((flags & HAL_TXDESC_NOACK) == 0) /* SIFS + ACK */ ctsduration += rt->info[rix].lpAckDuration; } ismrr = 0; /* XXX */ } else ctsrate = 0; pri = params->ibp_pri & 3; /* * NB: we mark all packets as type PSPOLL so the h/w won't * set the sequence number, duration, etc. */ atype = HAL_PKT_TYPE_PSPOLL; if (IFF_DUMPPKTS(sc, ATH_DEBUG_XMIT)) ieee80211_dump_pkt(ic, mtod(m0, caddr_t), m0->m_len, sc->sc_hwmap[txrate].ieeerate, -1); if (bpf_peers_present(ifp->if_bpf)) { u_int64_t tsf = ath_hal_gettsf64(ah); sc->sc_tx_th.wt_tsf = htole64(tsf); sc->sc_tx_th.wt_flags = sc->sc_hwmap[txrate].txflags; if (wh->i_fc[1] & IEEE80211_FC1_WEP) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; sc->sc_tx_th.wt_rate = sc->sc_hwmap[txrate].ieeerate; sc->sc_tx_th.wt_txpower = ni->ni_txpower; sc->sc_tx_th.wt_antenna = sc->sc_txantenna; bpf_mtap2(ifp->if_bpf, &sc->sc_tx_th, sc->sc_tx_th_len, m0); } /* * Formulate first tx descriptor with tx controls. */ ds = bf->bf_desc; /* XXX check return value? */ ath_hal_setuptxdesc(ah, ds , pktlen /* packet length */ , hdrlen /* header length */ , atype /* Atheros packet type */ , params->ibp_power /* txpower */ , txrate, try0 /* series 0 rate/tries */ , HAL_TXKEYIX_INVALID /* key cache index */ , txantenna /* antenna mode */ , flags /* flags */ , ctsrate /* rts/cts rate */ , ctsduration /* rts/cts duration */ ); bf->bf_flags = flags; if (ismrr) { rix = ath_tx_findrix(rt, params->ibp_rate1); rate1 = rt->info[rix].rateCode; if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) rate1 |= rt->info[rix].shortPreamble; if (params->ibp_try2) { rix = ath_tx_findrix(rt, params->ibp_rate2); rate2 = rt->info[rix].rateCode; if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) rate2 |= rt->info[rix].shortPreamble; } else rate2 = 0; if (params->ibp_try3) { rix = ath_tx_findrix(rt, params->ibp_rate3); rate3 = rt->info[rix].rateCode; if (params->ibp_flags & IEEE80211_BPF_SHORTPRE) rate3 |= rt->info[rix].shortPreamble; } else rate3 = 0; ath_hal_setupxtxdesc(ah, ds , rate1, params->ibp_try1 /* series 1 */ , rate2, params->ibp_try2 /* series 2 */ , rate3, params->ibp_try3 /* series 3 */ ); } /* NB: no buffered multicast in power save support */ ath_tx_handoff(sc, sc->sc_ac2q[pri], bf); return 0; } static int ath_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct ath_softc *sc = ifp->if_softc; struct ath_buf *bf; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->sc_invalid) { ieee80211_free_node(ni); m_freem(m); return ENETDOWN; } /* * Grab a TX buffer and associated resources. */ ATH_TXBUF_LOCK(sc); bf = STAILQ_FIRST(&sc->sc_txbuf); if (bf != NULL) STAILQ_REMOVE_HEAD(&sc->sc_txbuf, bf_list); ATH_TXBUF_UNLOCK(sc); if (bf == NULL) { DPRINTF(sc, ATH_DEBUG_XMIT, "%s: out of xmit buffers\n", __func__); sc->sc_stats.ast_tx_qstop++; ifp->if_drv_flags |= IFF_DRV_OACTIVE; ieee80211_free_node(ni); m_freem(m); return ENOBUFS; } ifp->if_opackets++; sc->sc_stats.ast_tx_raw++; if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ if (ath_tx_start(sc, ni, bf, m)) goto bad; } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ if (ath_tx_raw_start(sc, ni, bf, m, params)) goto bad; } ifp->if_timer = 5; return 0; bad: ifp->if_oerrors++; ATH_TXBUF_LOCK(sc); STAILQ_INSERT_TAIL(&sc->sc_txbuf, bf, bf_list); ATH_TXBUF_UNLOCK(sc); ieee80211_free_node(ni); return EIO; /* XXX */ } /* * Announce various information on device/driver attach. */ static void ath_announce(struct ath_softc *sc) { #define HAL_MODE_DUALBAND (HAL_MODE_11A|HAL_MODE_11B) struct ifnet *ifp = sc->sc_ifp; struct ath_hal *ah = sc->sc_ah; u_int modes, cc; if_printf(ifp, "mac %d.%d phy %d.%d", ah->ah_macVersion, ah->ah_macRev, ah->ah_phyRev >> 4, ah->ah_phyRev & 0xf); /* * Print radio revision(s). We check the wireless modes * to avoid falsely printing revs for inoperable parts. * Dual-band radio revs are returned in the 5Ghz rev number. */ ath_hal_getcountrycode(ah, &cc); modes = ath_hal_getwirelessmodes(ah, cc); if ((modes & HAL_MODE_DUALBAND) == HAL_MODE_DUALBAND) { if (ah->ah_analog5GhzRev && ah->ah_analog2GhzRev) printf(" 5ghz radio %d.%d 2ghz radio %d.%d", ah->ah_analog5GhzRev >> 4, ah->ah_analog5GhzRev & 0xf, ah->ah_analog2GhzRev >> 4, ah->ah_analog2GhzRev & 0xf); else printf(" radio %d.%d", ah->ah_analog5GhzRev >> 4, ah->ah_analog5GhzRev & 0xf); } else printf(" radio %d.%d", ah->ah_analog5GhzRev >> 4, ah->ah_analog5GhzRev & 0xf); printf("\n"); if (bootverbose) { int i; for (i = 0; i <= WME_AC_VO; i++) { struct ath_txq *txq = sc->sc_ac2q[i]; if_printf(ifp, "Use hw queue %u for %s traffic\n", txq->axq_qnum, ieee80211_wme_acnames[i]); } if_printf(ifp, "Use hw queue %u for CAB traffic\n", sc->sc_cabq->axq_qnum); if_printf(ifp, "Use hw queue %u for beacons\n", sc->sc_bhalq); } if (ath_rxbuf != ATH_RXBUF) if_printf(ifp, "using %u rx buffers\n", ath_rxbuf); if (ath_txbuf != ATH_TXBUF) if_printf(ifp, "using %u tx buffers\n", ath_txbuf); #undef HAL_MODE_DUALBAND } diff --git a/sys/dev/iwi/if_iwi.c b/sys/dev/iwi/if_iwi.c index 4f81c1b48e4d..81e0220f2a59 100644 --- a/sys/dev/iwi/if_iwi.c +++ b/sys/dev/iwi/if_iwi.c @@ -1,3695 +1,3696 @@ /*- * Copyright (c) 2004, 2005 * Damien Bergamini . All rights reserved. * Copyright (c) 2005-2006 Sam Leffler, Errno Consulting * Copyright (c) 2007 Andrew Thompson * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /*- * Intel(R) PRO/Wireless 2200BG/2225BG/2915ABG driver * http://www.intel.com/network/connectivity/products/wireless/prowireless_mobile.htm */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define IWI_DEBUG #ifdef IWI_DEBUG #define DPRINTF(x) do { if (iwi_debug > 0) printf x; } while (0) #define DPRINTFN(n, x) do { if (iwi_debug >= (n)) printf x; } while (0) int iwi_debug = 0; SYSCTL_INT(_debug, OID_AUTO, iwi, CTLFLAG_RW, &iwi_debug, 0, "iwi debug level"); static const char *iwi_fw_states[] = { "IDLE", /* IWI_FW_IDLE */ "LOADING", /* IWI_FW_LOADING */ "ASSOCIATING", /* IWI_FW_ASSOCIATING */ "DISASSOCIATING", /* IWI_FW_DISASSOCIATING */ "SCANNING", /* IWI_FW_SCANNING */ }; #else #define DPRINTF(x) #define DPRINTFN(n, x) #endif MODULE_DEPEND(iwi, pci, 1, 1, 1); MODULE_DEPEND(iwi, wlan, 1, 1, 1); MODULE_DEPEND(iwi, firmware, 1, 1, 1); enum { IWI_LED_TX, IWI_LED_RX, IWI_LED_POLL, }; struct iwi_ident { uint16_t vendor; uint16_t device; const char *name; }; static const struct iwi_ident iwi_ident_table[] = { { 0x8086, 0x4220, "Intel(R) PRO/Wireless 2200BG" }, { 0x8086, 0x4221, "Intel(R) PRO/Wireless 2225BG" }, { 0x8086, 0x4223, "Intel(R) PRO/Wireless 2915ABG" }, { 0x8086, 0x4224, "Intel(R) PRO/Wireless 2915ABG" }, { 0, 0, NULL } }; static struct ieee80211vap *iwi_vap_create(struct ieee80211com *, const char name[IFNAMSIZ], int unit, int opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]); static void iwi_vap_delete(struct ieee80211vap *); static void iwi_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int iwi_alloc_cmd_ring(struct iwi_softc *, struct iwi_cmd_ring *, int); static void iwi_reset_cmd_ring(struct iwi_softc *, struct iwi_cmd_ring *); static void iwi_free_cmd_ring(struct iwi_softc *, struct iwi_cmd_ring *); static int iwi_alloc_tx_ring(struct iwi_softc *, struct iwi_tx_ring *, int, bus_addr_t, bus_addr_t); static void iwi_reset_tx_ring(struct iwi_softc *, struct iwi_tx_ring *); static void iwi_free_tx_ring(struct iwi_softc *, struct iwi_tx_ring *); static int iwi_alloc_rx_ring(struct iwi_softc *, struct iwi_rx_ring *, int); static void iwi_reset_rx_ring(struct iwi_softc *, struct iwi_rx_ring *); static void iwi_free_rx_ring(struct iwi_softc *, struct iwi_rx_ring *); -static struct ieee80211_node *iwi_node_alloc(struct ieee80211_node_table *); +static struct ieee80211_node *iwi_node_alloc(struct ieee80211vap *, + const uint8_t [IEEE80211_ADDR_LEN]); static void iwi_node_free(struct ieee80211_node *); static void iwi_media_status(struct ifnet *, struct ifmediareq *); static int iwi_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void iwi_wme_init(struct iwi_softc *); static int iwi_wme_setparams(struct iwi_softc *, struct ieee80211com *); static int iwi_wme_update(struct ieee80211com *); static uint16_t iwi_read_prom_word(struct iwi_softc *, uint8_t); static void iwi_frame_intr(struct iwi_softc *, struct iwi_rx_data *, int, struct iwi_frame *); static void iwi_authsuccess(void *, int); static void iwi_assocsuccess(void *, int); static void iwi_assocfailed(void *, int); static void iwi_notification_intr(struct iwi_softc *, struct iwi_notif *); static void iwi_rx_intr(struct iwi_softc *); static void iwi_tx_intr(struct iwi_softc *, struct iwi_tx_ring *); static void iwi_intr(void *); static int iwi_cmd(struct iwi_softc *, uint8_t, void *, uint8_t); static void iwi_write_ibssnode(struct iwi_softc *, const u_int8_t [], int); static int iwi_tx_start(struct ifnet *, struct mbuf *, struct ieee80211_node *, int); static int iwi_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void iwi_start_locked(struct ifnet *); static void iwi_start(struct ifnet *); static void iwi_watchdog(void *); static int iwi_ioctl(struct ifnet *, u_long, caddr_t); static void iwi_stop_master(struct iwi_softc *); static int iwi_reset(struct iwi_softc *); static int iwi_load_ucode(struct iwi_softc *, const struct iwi_fw *); static int iwi_load_firmware(struct iwi_softc *, const struct iwi_fw *); static void iwi_release_fw_dma(struct iwi_softc *sc); static int iwi_config(struct iwi_softc *); static int iwi_get_firmware(struct iwi_softc *, enum ieee80211_opmode); static void iwi_put_firmware(struct iwi_softc *); static int iwi_scanchan(struct iwi_softc *, unsigned long, int); static void iwi_scan_start(struct ieee80211com *); static void iwi_scan_end(struct ieee80211com *); static void iwi_scanabort(void *, int); static void iwi_set_channel(struct ieee80211com *); static void iwi_scan_curchan(struct ieee80211_scan_state *, unsigned long maxdwell); #if 0 static void iwi_scan_allchan(struct ieee80211com *, unsigned long maxdwell); #endif static void iwi_scan_mindwell(struct ieee80211_scan_state *); static void iwi_ops(void *, int); static int iwi_queue_cmd(struct iwi_softc *, int, unsigned long); static int iwi_auth_and_assoc(struct iwi_softc *, struct ieee80211vap *); static int iwi_disassociate(struct iwi_softc *, int quiet); static void iwi_init_locked(struct iwi_softc *); static void iwi_init(void *); static int iwi_init_fw_dma(struct iwi_softc *, int); static void iwi_stop_locked(void *); static void iwi_stop(struct iwi_softc *); static void iwi_restart(void *, int); static int iwi_getrfkill(struct iwi_softc *); static void iwi_radio_on(void *, int); static void iwi_radio_off(void *, int); static void iwi_sysctlattach(struct iwi_softc *); static void iwi_led_event(struct iwi_softc *, int); static void iwi_ledattach(struct iwi_softc *); static int iwi_probe(device_t); static int iwi_attach(device_t); static int iwi_detach(device_t); static int iwi_shutdown(device_t); static int iwi_suspend(device_t); static int iwi_resume(device_t); static device_method_t iwi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, iwi_probe), DEVMETHOD(device_attach, iwi_attach), DEVMETHOD(device_detach, iwi_detach), DEVMETHOD(device_shutdown, iwi_shutdown), DEVMETHOD(device_suspend, iwi_suspend), DEVMETHOD(device_resume, iwi_resume), { 0, 0 } }; static driver_t iwi_driver = { "iwi", iwi_methods, sizeof (struct iwi_softc) }; static devclass_t iwi_devclass; DRIVER_MODULE(iwi, pci, iwi_driver, iwi_devclass, 0, 0); static __inline uint8_t MEM_READ_1(struct iwi_softc *sc, uint32_t addr) { CSR_WRITE_4(sc, IWI_CSR_INDIRECT_ADDR, addr); return CSR_READ_1(sc, IWI_CSR_INDIRECT_DATA); } static __inline uint32_t MEM_READ_4(struct iwi_softc *sc, uint32_t addr) { CSR_WRITE_4(sc, IWI_CSR_INDIRECT_ADDR, addr); return CSR_READ_4(sc, IWI_CSR_INDIRECT_DATA); } static int iwi_probe(device_t dev) { const struct iwi_ident *ident; for (ident = iwi_ident_table; ident->name != NULL; ident++) { if (pci_get_vendor(dev) == ident->vendor && pci_get_device(dev) == ident->device) { device_set_desc(dev, ident->name); return 0; } } return ENXIO; } /* Base Address Register */ #define IWI_PCI_BAR0 0x10 static int iwi_attach(device_t dev) { struct iwi_softc *sc = device_get_softc(dev); struct ifnet *ifp; struct ieee80211com *ic; uint16_t val; int i, error; uint8_t bands; sc->sc_dev = dev; ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); return ENXIO; } ic = ifp->if_l2com; IWI_LOCK_INIT(sc); IWI_CMD_LOCK_INIT(sc); sc->sc_unr = new_unrhdr(1, IWI_MAX_IBSSNODE-1, &sc->sc_mtx); sc->sc_tq = taskqueue_create("iwi_taskq", M_NOWAIT | M_ZERO, taskqueue_thread_enqueue, &sc->sc_tq); taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(dev)); sc->sc_tq2 = taskqueue_create("iwi_taskq2", M_NOWAIT | M_ZERO, taskqueue_thread_enqueue, &sc->sc_tq2); taskqueue_start_threads(&sc->sc_tq2, 1, PI_NET, "%s taskq2", device_get_nameunit(dev)); TASK_INIT(&sc->sc_radiontask, 0, iwi_radio_on, sc); TASK_INIT(&sc->sc_radiofftask, 0, iwi_radio_off, sc); TASK_INIT(&sc->sc_restarttask, 0, iwi_restart, sc); TASK_INIT(&sc->sc_opstask, 0, iwi_ops, sc); TASK_INIT(&sc->sc_scanaborttask, 0, iwi_scanabort, sc); callout_init_mtx(&sc->sc_wdtimer, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_rftimer, &sc->sc_mtx, 0); if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { device_printf(dev, "chip is in D%d power mode " "-- setting to D0\n", pci_get_powerstate(dev)); pci_set_powerstate(dev, PCI_POWERSTATE_D0); } pci_write_config(dev, 0x41, 0, 1); /* enable bus-mastering */ pci_enable_busmaster(dev); sc->mem_rid = IWI_PCI_BAR0; sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE); if (sc->mem == NULL) { device_printf(dev, "could not allocate memory resource\n"); goto fail; } sc->sc_st = rman_get_bustag(sc->mem); sc->sc_sh = rman_get_bushandle(sc->mem); sc->irq_rid = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE | RF_SHAREABLE); if (sc->irq == NULL) { device_printf(dev, "could not allocate interrupt resource\n"); goto fail; } if (iwi_reset(sc) != 0) { device_printf(dev, "could not reset adapter\n"); goto fail; } /* * Allocate rings. */ if (iwi_alloc_cmd_ring(sc, &sc->cmdq, IWI_CMD_RING_COUNT) != 0) { device_printf(dev, "could not allocate Cmd ring\n"); goto fail; } for (i = 0; i < 4; i++) { error = iwi_alloc_tx_ring(sc, &sc->txq[i], IWI_TX_RING_COUNT, IWI_CSR_TX1_RIDX + i * 4, IWI_CSR_TX1_WIDX + i * 4); if (error != 0) { device_printf(dev, "could not allocate Tx ring %d\n", i+i); goto fail; } } if (iwi_alloc_rx_ring(sc, &sc->rxq, IWI_RX_RING_COUNT) != 0) { device_printf(dev, "could not allocate Rx ring\n"); goto fail; } iwi_wme_init(sc); ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = iwi_init; ifp->if_ioctl = iwi_ioctl; ifp->if_start = iwi_start; IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); ic->ic_ifp = ifp; ic->ic_opmode = IEEE80211_M_STA; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_IBSS /* IBSS mode supported */ | IEEE80211_C_MONITOR /* monitor mode supported */ | IEEE80211_C_PMGT /* power save supported */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_WPA /* 802.11i */ | IEEE80211_C_WME /* 802.11e */ #if 0 | IEEE80211_C_BGSCAN /* capable of bg scanning */ #endif ; /* read MAC address from EEPROM */ val = iwi_read_prom_word(sc, IWI_EEPROM_MAC + 0); ic->ic_myaddr[0] = val & 0xff; ic->ic_myaddr[1] = val >> 8; val = iwi_read_prom_word(sc, IWI_EEPROM_MAC + 1); ic->ic_myaddr[2] = val & 0xff; ic->ic_myaddr[3] = val >> 8; val = iwi_read_prom_word(sc, IWI_EEPROM_MAC + 2); ic->ic_myaddr[4] = val & 0xff; ic->ic_myaddr[5] = val >> 8; bands = 0; setbit(&bands, IEEE80211_MODE_11B); setbit(&bands, IEEE80211_MODE_11G); if (pci_get_device(dev) >= 0x4223) setbit(&bands, IEEE80211_MODE_11A); ieee80211_init_channels(ic, NULL, &bands); ieee80211_ifattach(ic); /* override default methods */ ic->ic_node_alloc = iwi_node_alloc; sc->sc_node_free = ic->ic_node_free; ic->ic_node_free = iwi_node_free; ic->ic_raw_xmit = iwi_raw_xmit; ic->ic_scan_start = iwi_scan_start; ic->ic_scan_end = iwi_scan_end; ic->ic_set_channel = iwi_set_channel; ic->ic_scan_curchan = iwi_scan_curchan; ic->ic_scan_mindwell = iwi_scan_mindwell; ic->ic_wme.wme_update = iwi_wme_update; ic->ic_vap_create = iwi_vap_create; ic->ic_vap_delete = iwi_vap_delete; bpfattach(ifp, DLT_IEEE802_11_RADIO, sizeof (struct ieee80211_frame) + sizeof (sc->sc_txtap)); sc->sc_rxtap_len = sizeof sc->sc_rxtap; sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); sc->sc_rxtap.wr_ihdr.it_present = htole32(IWI_RX_RADIOTAP_PRESENT); sc->sc_txtap_len = sizeof sc->sc_txtap; sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); sc->sc_txtap.wt_ihdr.it_present = htole32(IWI_TX_RADIOTAP_PRESENT); iwi_sysctlattach(sc); iwi_ledattach(sc); /* * Hook our interrupt after all initialization is complete. */ error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, iwi_intr, sc, &sc->sc_ih); if (error != 0) { device_printf(dev, "could not set up interrupt\n"); goto fail; } if (bootverbose) ieee80211_announce(ic); return 0; fail: /* XXX fix */ iwi_detach(dev); return ENXIO; } static int iwi_detach(device_t dev) { struct iwi_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; iwi_stop(sc); bpfdetach(ifp); ieee80211_ifdetach(ic); /* NB: do early to drain any pending tasks */ taskqueue_free(sc->sc_tq); taskqueue_free(sc->sc_tq2); iwi_put_firmware(sc); iwi_release_fw_dma(sc); iwi_free_cmd_ring(sc, &sc->cmdq); iwi_free_tx_ring(sc, &sc->txq[0]); iwi_free_tx_ring(sc, &sc->txq[1]); iwi_free_tx_ring(sc, &sc->txq[2]); iwi_free_tx_ring(sc, &sc->txq[3]); iwi_free_rx_ring(sc, &sc->rxq); bus_teardown_intr(dev, sc->irq, sc->sc_ih); bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem); delete_unrhdr(sc->sc_unr); IWI_LOCK_DESTROY(sc); IWI_CMD_LOCK_DESTROY(sc); if_free(ifp); return 0; } static struct ieee80211vap * iwi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, int opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ifnet *ifp = ic->ic_ifp; struct iwi_softc *sc = ifp->if_softc; struct iwi_vap *ivp; struct ieee80211vap *vap; int i; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; /* * Get firmware image (and possibly dma memory) on mode change. */ if (iwi_get_firmware(sc, opmode)) return NULL; /* allocate DMA memory for mapping firmware image */ i = sc->fw_fw.size; if (sc->fw_boot.size > i) i = sc->fw_boot.size; /* XXX do we dma the ucode as well ? */ if (sc->fw_uc.size > i) i = sc->fw_uc.size; if (iwi_init_fw_dma(sc, i)) return NULL; ivp = (struct iwi_vap *) malloc(sizeof(struct iwi_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (ivp == NULL) return NULL; vap = &ivp->iwi_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); /* override the default, the setting comes from the linux driver */ vap->iv_bmissthreshold = 24; /* override with driver methods */ ivp->iwi_newstate = vap->iv_newstate; vap->iv_newstate = iwi_newstate; TASK_INIT(&ivp->iwi_authsuccess_task, 0, iwi_authsuccess, vap); TASK_INIT(&ivp->iwi_assocsuccess_task, 0, iwi_assocsuccess, vap); TASK_INIT(&ivp->iwi_assocfailed_task, 0, iwi_assocfailed, vap); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, iwi_media_status); ic->ic_opmode = opmode; return vap; } static void iwi_vap_delete(struct ieee80211vap *vap) { struct iwi_vap *ivp = IWI_VAP(vap); ieee80211_vap_detach(vap); free(ivp, M_80211_VAP); } static void iwi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error != 0) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); *(bus_addr_t *)arg = segs[0].ds_addr; } static int iwi_alloc_cmd_ring(struct iwi_softc *sc, struct iwi_cmd_ring *ring, int count) { int error; ring->count = count; ring->queued = 0; ring->cur = ring->next = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * IWI_CMD_DESC_SIZE, 1, count * IWI_CMD_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * IWI_CMD_DESC_SIZE, iwi_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } return 0; fail: iwi_free_cmd_ring(sc, ring); return error; } static void iwi_reset_cmd_ring(struct iwi_softc *sc, struct iwi_cmd_ring *ring) { ring->queued = 0; ring->cur = ring->next = 0; } static void iwi_free_cmd_ring(struct iwi_softc *sc, struct iwi_cmd_ring *ring) { if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); } static int iwi_alloc_tx_ring(struct iwi_softc *sc, struct iwi_tx_ring *ring, int count, bus_addr_t csr_ridx, bus_addr_t csr_widx) { int i, error; ring->count = count; ring->queued = 0; ring->cur = ring->next = 0; ring->csr_ridx = csr_ridx; ring->csr_widx = csr_widx; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * IWI_TX_DESC_SIZE, 1, count * IWI_TX_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * IWI_TX_DESC_SIZE, iwi_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } ring->data = malloc(count * sizeof (struct iwi_tx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, IWI_MAX_NSEG, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { error = bus_dmamap_create(ring->data_dmat, 0, &ring->data[i].map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } } return 0; fail: iwi_free_tx_ring(sc, ring); return error; } static void iwi_reset_tx_ring(struct iwi_softc *sc, struct iwi_tx_ring *ring) { struct iwi_tx_data *data; int i; for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } } ring->queued = 0; ring->cur = ring->next = 0; } static void iwi_free_tx_ring(struct iwi_softc *sc, struct iwi_tx_ring *ring) { struct iwi_tx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->ni != NULL) ieee80211_free_node(data->ni); if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int iwi_alloc_rx_ring(struct iwi_softc *sc, struct iwi_rx_ring *ring, int count) { struct iwi_rx_data *data; int i, error; ring->count = count; ring->cur = 0; ring->data = malloc(count * sizeof (struct iwi_rx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { data = &ring->data[i]; error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } data->m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (data->m == NULL) { device_printf(sc->sc_dev, "could not allocate rx mbuf\n"); error = ENOMEM; goto fail; } error = bus_dmamap_load(ring->data_dmat, data->map, mtod(data->m, void *), MCLBYTES, iwi_dma_map_addr, &data->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load rx buf DMA map"); goto fail; } data->reg = IWI_CSR_RX_BASE + i * 4; } return 0; fail: iwi_free_rx_ring(sc, ring); return error; } static void iwi_reset_rx_ring(struct iwi_softc *sc, struct iwi_rx_ring *ring) { ring->cur = 0; } static void iwi_free_rx_ring(struct iwi_softc *sc, struct iwi_rx_ring *ring) { struct iwi_rx_data *data; int i; if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int iwi_shutdown(device_t dev) { struct iwi_softc *sc = device_get_softc(dev); iwi_stop(sc); iwi_put_firmware(sc); /* ??? XXX */ return 0; } static int iwi_suspend(device_t dev) { struct iwi_softc *sc = device_get_softc(dev); iwi_stop(sc); return 0; } static int iwi_resume(device_t dev) { struct iwi_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->sc_ifp; pci_write_config(dev, 0x41, 0, 1); if (ifp->if_flags & IFF_UP) iwi_init(sc); return 0; } static struct ieee80211_node * -iwi_node_alloc(struct ieee80211_node_table *nt) +iwi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { struct iwi_node *in; in = malloc(sizeof (struct iwi_node), M_80211_NODE, M_NOWAIT | M_ZERO); if (in == NULL) return NULL; - + /* XXX assign sta table entry for adhoc */ in->in_station = -1; return &in->in_node; } static void iwi_node_free(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct iwi_softc *sc = ic->ic_ifp->if_softc; struct iwi_node *in = (struct iwi_node *)ni; if (in->in_station != -1) { DPRINTF(("%s mac %6D station %u\n", __func__, ni->ni_macaddr, ":", in->in_station)); free_unr(sc->sc_unr, in->in_station); } sc->sc_node_free(ni); } /* * Convert h/w rate code to IEEE rate code. */ static int iwi_cvtrate(int iwirate) { switch (iwirate) { case IWI_RATE_DS1: return 2; case IWI_RATE_DS2: return 4; case IWI_RATE_DS5: return 11; case IWI_RATE_DS11: return 22; case IWI_RATE_OFDM6: return 12; case IWI_RATE_OFDM9: return 18; case IWI_RATE_OFDM12: return 24; case IWI_RATE_OFDM18: return 36; case IWI_RATE_OFDM24: return 48; case IWI_RATE_OFDM36: return 72; case IWI_RATE_OFDM48: return 96; case IWI_RATE_OFDM54: return 108; } return 0; } /* * The firmware automatically adapts the transmit speed. We report its current * value here. */ static void iwi_media_status(struct ifnet *ifp, struct ifmediareq *imr) { struct ieee80211vap *vap = ifp->if_softc; struct ieee80211com *ic = vap->iv_ic; struct iwi_softc *sc = ic->ic_ifp->if_softc; /* read current transmission rate from adapter */ vap->iv_bss->ni_txrate = iwi_cvtrate(CSR_READ_4(sc, IWI_CSR_CURRENT_TX_RATE)); ieee80211_media_status(ifp, imr); } static int iwi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct iwi_vap *ivp = IWI_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct ifnet *ifp = ic->ic_ifp; struct iwi_softc *sc = ifp->if_softc; IWI_LOCK_DECL; DPRINTF(("%s: %s -> %s flags 0x%x\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate], sc->flags)); switch (nstate) { case IEEE80211_S_INIT: IWI_LOCK(sc); /* * NB: don't try to do this if iwi_stop_master has * shutdown the firmware and disabled interrupts. */ if (vap->iv_state == IEEE80211_S_RUN && (sc->flags & IWI_FLAG_FW_INITED)) iwi_queue_cmd(sc, IWI_DISASSOC, 1); IWI_UNLOCK(sc); break; case IEEE80211_S_AUTH: iwi_queue_cmd(sc, IWI_AUTH, arg); return EINPROGRESS; case IEEE80211_S_RUN: if (vap->iv_opmode == IEEE80211_M_IBSS && vap->iv_state == IEEE80211_S_SCAN) { /* * XXX when joining an ibss network we are called * with a SCAN -> RUN transition on scan complete. * Use that to call iwi_auth_and_assoc. On completing * the join we are then called again with an * AUTH -> RUN transition and we want to do nothing. * This is all totally bogus and needs to be redone. */ iwi_queue_cmd(sc, IWI_ASSOC, 0); return EINPROGRESS; } break; case IEEE80211_S_ASSOC: /* * If we are transitioning from AUTH then just wait * for the ASSOC status to come back from the firmware. * Otherwise we need to issue the association request. */ if (vap->iv_state == IEEE80211_S_AUTH) break; iwi_queue_cmd(sc, IWI_ASSOC, arg); return EINPROGRESS; default: break; } return ivp->iwi_newstate(vap, nstate, arg); } /* * WME parameters coming from IEEE 802.11e specification. These values are * already declared in ieee80211_proto.c, but they are static so they can't * be reused here. */ static const struct wmeParams iwi_wme_cck_params[WME_NUM_AC] = { { 0, 3, 5, 7, 0 }, /* WME_AC_BE */ { 0, 3, 5, 10, 0 }, /* WME_AC_BK */ { 0, 2, 4, 5, 188 }, /* WME_AC_VI */ { 0, 2, 3, 4, 102 } /* WME_AC_VO */ }; static const struct wmeParams iwi_wme_ofdm_params[WME_NUM_AC] = { { 0, 3, 4, 6, 0 }, /* WME_AC_BE */ { 0, 3, 4, 10, 0 }, /* WME_AC_BK */ { 0, 2, 3, 4, 94 }, /* WME_AC_VI */ { 0, 2, 2, 3, 47 } /* WME_AC_VO */ }; #define IWI_EXP2(v) htole16((1 << (v)) - 1) #define IWI_USEC(v) htole16(IEEE80211_TXOP_TO_US(v)) static void iwi_wme_init(struct iwi_softc *sc) { const struct wmeParams *wmep; int ac; memset(sc->wme, 0, sizeof sc->wme); for (ac = 0; ac < WME_NUM_AC; ac++) { /* set WME values for CCK modulation */ wmep = &iwi_wme_cck_params[ac]; sc->wme[1].aifsn[ac] = wmep->wmep_aifsn; sc->wme[1].cwmin[ac] = IWI_EXP2(wmep->wmep_logcwmin); sc->wme[1].cwmax[ac] = IWI_EXP2(wmep->wmep_logcwmax); sc->wme[1].burst[ac] = IWI_USEC(wmep->wmep_txopLimit); sc->wme[1].acm[ac] = wmep->wmep_acm; /* set WME values for OFDM modulation */ wmep = &iwi_wme_ofdm_params[ac]; sc->wme[2].aifsn[ac] = wmep->wmep_aifsn; sc->wme[2].cwmin[ac] = IWI_EXP2(wmep->wmep_logcwmin); sc->wme[2].cwmax[ac] = IWI_EXP2(wmep->wmep_logcwmax); sc->wme[2].burst[ac] = IWI_USEC(wmep->wmep_txopLimit); sc->wme[2].acm[ac] = wmep->wmep_acm; } } static int iwi_wme_setparams(struct iwi_softc *sc, struct ieee80211com *ic) { const struct wmeParams *wmep; int ac; for (ac = 0; ac < WME_NUM_AC; ac++) { /* set WME values for current operating mode */ wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; sc->wme[0].aifsn[ac] = wmep->wmep_aifsn; sc->wme[0].cwmin[ac] = IWI_EXP2(wmep->wmep_logcwmin); sc->wme[0].cwmax[ac] = IWI_EXP2(wmep->wmep_logcwmax); sc->wme[0].burst[ac] = IWI_USEC(wmep->wmep_txopLimit); sc->wme[0].acm[ac] = wmep->wmep_acm; } DPRINTF(("Setting WME parameters\n")); return iwi_cmd(sc, IWI_CMD_SET_WME_PARAMS, sc->wme, sizeof sc->wme); } #undef IWI_USEC #undef IWI_EXP2 static int iwi_wme_update(struct ieee80211com *ic) { struct iwi_softc *sc = ic->ic_ifp->if_softc; /* * We may be called to update the WME parameters in * the adapter at various places. If we're already * associated then initiate the request immediately * (via the taskqueue); otherwise we assume the params * will get sent down to the adapter as part of the * work iwi_auth_and_assoc does. */ return iwi_queue_cmd(sc, IWI_SET_WME, 0); } static int iwi_wme_setie(struct iwi_softc *sc) { struct ieee80211_wme_info wme; memset(&wme, 0, sizeof wme); wme.wme_id = IEEE80211_ELEMID_VENDOR; wme.wme_len = sizeof (struct ieee80211_wme_info) - 2; wme.wme_oui[0] = 0x00; wme.wme_oui[1] = 0x50; wme.wme_oui[2] = 0xf2; wme.wme_type = WME_OUI_TYPE; wme.wme_subtype = WME_INFO_OUI_SUBTYPE; wme.wme_version = WME_VERSION; wme.wme_info = 0; DPRINTF(("Setting WME IE (len=%u)\n", wme.wme_len)); return iwi_cmd(sc, IWI_CMD_SET_WMEIE, &wme, sizeof wme); } /* * Read 16 bits at address 'addr' from the serial EEPROM. */ static uint16_t iwi_read_prom_word(struct iwi_softc *sc, uint8_t addr) { uint32_t tmp; uint16_t val; int n; /* clock C once before the first command */ IWI_EEPROM_CTL(sc, 0); IWI_EEPROM_CTL(sc, IWI_EEPROM_S); IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_C); IWI_EEPROM_CTL(sc, IWI_EEPROM_S); /* write start bit (1) */ IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_D); IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_D | IWI_EEPROM_C); /* write READ opcode (10) */ IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_D); IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_D | IWI_EEPROM_C); IWI_EEPROM_CTL(sc, IWI_EEPROM_S); IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_C); /* write address A7-A0 */ for (n = 7; n >= 0; n--) { IWI_EEPROM_CTL(sc, IWI_EEPROM_S | (((addr >> n) & 1) << IWI_EEPROM_SHIFT_D)); IWI_EEPROM_CTL(sc, IWI_EEPROM_S | (((addr >> n) & 1) << IWI_EEPROM_SHIFT_D) | IWI_EEPROM_C); } IWI_EEPROM_CTL(sc, IWI_EEPROM_S); /* read data Q15-Q0 */ val = 0; for (n = 15; n >= 0; n--) { IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_C); IWI_EEPROM_CTL(sc, IWI_EEPROM_S); tmp = MEM_READ_4(sc, IWI_MEM_EEPROM_CTL); val |= ((tmp & IWI_EEPROM_Q) >> IWI_EEPROM_SHIFT_Q) << n; } IWI_EEPROM_CTL(sc, 0); /* clear Chip Select and clock C */ IWI_EEPROM_CTL(sc, IWI_EEPROM_S); IWI_EEPROM_CTL(sc, 0); IWI_EEPROM_CTL(sc, IWI_EEPROM_C); return val; } static void iwi_setcurchan(struct iwi_softc *sc, int chan) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; sc->curchan = chan; sc->sc_rxtap.wr_chan_freq = sc->sc_txtap.wt_chan_freq = htole16(ic->ic_curchan->ic_freq); sc->sc_rxtap.wr_chan_flags = sc->sc_txtap.wt_chan_flags = htole16(ic->ic_curchan->ic_flags); } static void iwi_frame_intr(struct iwi_softc *sc, struct iwi_rx_data *data, int i, struct iwi_frame *frame) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct mbuf *mnew, *m; struct ieee80211_node *ni; int type, error, framelen; IWI_LOCK_DECL; framelen = le16toh(frame->len); if (framelen < IEEE80211_MIN_LEN || framelen > MCLBYTES) { /* * XXX >MCLBYTES is bogus as it means the h/w dma'd * out of bounds; need to figure out how to limit * frame size in the firmware */ /* XXX stat */ DPRINTFN(1, ("drop rx frame len=%u chan=%u rssi=%u rssi_dbm=%u\n", le16toh(frame->len), frame->chan, frame->rssi, frame->rssi_dbm)); return; } DPRINTFN(5, ("received frame len=%u chan=%u rssi=%u rssi_dbm=%u\n", le16toh(frame->len), frame->chan, frame->rssi, frame->rssi_dbm)); if (frame->chan != sc->curchan) iwi_setcurchan(sc, frame->chan); /* * Try to allocate a new mbuf for this ring element and load it before * processing the current mbuf. If the ring element cannot be loaded, * drop the received packet and reuse the old mbuf. In the unlikely * case that the old mbuf can't be reloaded either, explicitly panic. */ mnew = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (mnew == NULL) { ifp->if_ierrors++; return; } bus_dmamap_unload(sc->rxq.data_dmat, data->map); error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(mnew, void *), MCLBYTES, iwi_dma_map_addr, &data->physaddr, 0); if (error != 0) { m_freem(mnew); /* try to reload the old mbuf */ error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(data->m, void *), MCLBYTES, iwi_dma_map_addr, &data->physaddr, 0); if (error != 0) { /* very unlikely that it will fail... */ panic("%s: could not load old rx mbuf", device_get_name(sc->sc_dev)); } ifp->if_ierrors++; return; } /* * New mbuf successfully loaded, update Rx ring and continue * processing. */ m = data->m; data->m = mnew; CSR_WRITE_4(sc, data->reg, data->physaddr); /* finalize mbuf */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = sizeof (struct iwi_hdr) + sizeof (struct iwi_frame) + framelen; m_adj(m, sizeof (struct iwi_hdr) + sizeof (struct iwi_frame)); if (bpf_peers_present(ifp->if_bpf)) { struct iwi_rx_radiotap_header *tap = &sc->sc_rxtap; tap->wr_flags = 0; tap->wr_rate = iwi_cvtrate(frame->rate); tap->wr_antsignal = frame->signal; tap->wr_antenna = frame->antenna; bpf_mtap2(ifp->if_bpf, tap, sc->sc_rxtap_len, m); } IWI_UNLOCK(sc); ni = ieee80211_find_rxnode(ic, mtod(m, struct ieee80211_frame_min *)); if (ni != NULL) { type = ieee80211_input(ni, m, frame->rssi_dbm, 0, 0); ieee80211_free_node(ni); } else type = ieee80211_input_all(ic, m, frame->rssi_dbm, 0, 0); IWI_LOCK(sc); if (sc->sc_softled) { /* * Blink for any data frame. Otherwise do a * heartbeat-style blink when idle. The latter * is mainly for station mode where we depend on * periodic beacon frames to trigger the poll event. */ if (type == IEEE80211_FC0_TYPE_DATA) { sc->sc_rxrate = frame->rate; iwi_led_event(sc, IWI_LED_RX); } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle) iwi_led_event(sc, IWI_LED_POLL); } } /* * Check for an association response frame to see if QoS * has been negotiated. We parse just enough to figure * out if we're supposed to use QoS. The proper solution * is to pass the frame up so ieee80211_input can do the * work but that's made hard by how things currently are * done in the driver. */ static void iwi_checkforqos(struct ieee80211vap *vap, const struct ieee80211_frame *wh, int len) { #define SUBTYPE(wh) ((wh)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) const uint8_t *frm, *efrm, *wme; struct ieee80211_node *ni; uint16_t capinfo, status, associd; /* NB: +8 for capinfo, status, associd, and first ie */ if (!(sizeof(*wh)+8 < len && len < IEEE80211_MAX_LEN) || SUBTYPE(wh) != IEEE80211_FC0_SUBTYPE_ASSOC_RESP) return; /* * asresp frame format * [2] capability information * [2] status * [2] association ID * [tlv] supported rates * [tlv] extended supported rates * [tlv] WME */ frm = (const uint8_t *)&wh[1]; efrm = ((const uint8_t *) wh) + len; capinfo = le16toh(*(const uint16_t *)frm); frm += 2; status = le16toh(*(const uint16_t *)frm); frm += 2; associd = le16toh(*(const uint16_t *)frm); frm += 2; wme = NULL; while (frm < efrm) { IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1], return); switch (*frm) { case IEEE80211_ELEMID_VENDOR: if (iswmeoui(frm)) wme = frm; break; } frm += frm[1] + 2; } ni = vap->iv_bss; ni->ni_capinfo = capinfo; ni->ni_associd = associd; if (wme != NULL) ni->ni_flags |= IEEE80211_NODE_QOS; else ni->ni_flags &= ~IEEE80211_NODE_QOS; #undef SUBTYPE } /* * Task queue callbacks for iwi_notification_intr used to avoid LOR's. */ static void iwi_authsuccess(void *arg, int npending) { struct ieee80211vap *vap = arg; ieee80211_new_state(vap, IEEE80211_S_ASSOC, -1); } static void iwi_assocsuccess(void *arg, int npending) { struct ieee80211vap *vap = arg; ieee80211_new_state(vap, IEEE80211_S_RUN, -1); } static void iwi_assocfailed(void *arg, int npending) { struct ieee80211vap *vap = arg; ieee80211_new_state(vap, IEEE80211_S_SCAN, -1); } static void iwi_notification_intr(struct iwi_softc *sc, struct iwi_notif *notif) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct iwi_notif_scan_channel *chan; struct iwi_notif_scan_complete *scan; struct iwi_notif_authentication *auth; struct iwi_notif_association *assoc; struct iwi_notif_beacon_state *beacon; switch (notif->type) { case IWI_NOTIF_TYPE_SCAN_CHANNEL: chan = (struct iwi_notif_scan_channel *)(notif + 1); DPRINTFN(3, ("Scan of channel %u complete (%u)\n", ieee80211_ieee2mhz(chan->nchan, 0), chan->nchan)); /* Reset the timer, the scan is still going */ sc->sc_state_timer = 3; break; case IWI_NOTIF_TYPE_SCAN_COMPLETE: scan = (struct iwi_notif_scan_complete *)(notif + 1); DPRINTFN(2, ("Scan completed (%u, %u)\n", scan->nchan, scan->status)); IWI_STATE_END(sc, IWI_FW_SCANNING); if (scan->status == IWI_SCAN_COMPLETED) { /* NB: don't need to defer, net80211 does it for us */ ieee80211_scan_next(vap); } break; case IWI_NOTIF_TYPE_AUTHENTICATION: auth = (struct iwi_notif_authentication *)(notif + 1); switch (auth->state) { case IWI_AUTH_SUCCESS: DPRINTFN(2, ("Authentication succeeeded\n")); taskqueue_enqueue(taskqueue_swi, &IWI_VAP(vap)->iwi_authsuccess_task); break; case IWI_AUTH_FAIL: /* * These are delivered as an unsolicited deauth * (e.g. due to inactivity) or in response to an * associate request. */ sc->flags &= ~IWI_FLAG_ASSOCIATED; if (vap->iv_state != IEEE80211_S_RUN) { DPRINTFN(2, ("Authentication failed\n")); vap->iv_stats.is_rx_auth_fail++; IWI_STATE_END(sc, IWI_FW_ASSOCIATING); } else { DPRINTFN(2, ("Deauthenticated\n")); vap->iv_stats.is_rx_deauth++; } taskqueue_enqueue(taskqueue_swi, &IWI_VAP(vap)->iwi_assocfailed_task); break; case IWI_AUTH_SENT_1: case IWI_AUTH_RECV_2: case IWI_AUTH_SEQ1_PASS: break; case IWI_AUTH_SEQ1_FAIL: DPRINTFN(2, ("Initial authentication handshake failed; " "you probably need shared key\n")); vap->iv_stats.is_rx_auth_fail++; IWI_STATE_END(sc, IWI_FW_ASSOCIATING); /* XXX retry shared key when in auto */ break; default: device_printf(sc->sc_dev, "unknown authentication state %u\n", auth->state); break; } break; case IWI_NOTIF_TYPE_ASSOCIATION: assoc = (struct iwi_notif_association *)(notif + 1); switch (assoc->state) { case IWI_AUTH_SUCCESS: /* re-association, do nothing */ break; case IWI_ASSOC_SUCCESS: DPRINTFN(2, ("Association succeeded\n")); sc->flags |= IWI_FLAG_ASSOCIATED; IWI_STATE_END(sc, IWI_FW_ASSOCIATING); iwi_checkforqos(vap, (const struct ieee80211_frame *)(assoc+1), le16toh(notif->len) - sizeof(*assoc)); taskqueue_enqueue(taskqueue_swi, &IWI_VAP(vap)->iwi_assocsuccess_task); break; case IWI_ASSOC_INIT: sc->flags &= ~IWI_FLAG_ASSOCIATED; switch (sc->fw_state) { case IWI_FW_ASSOCIATING: DPRINTFN(2, ("Association failed\n")); IWI_STATE_END(sc, IWI_FW_ASSOCIATING); taskqueue_enqueue(taskqueue_swi, &IWI_VAP(vap)->iwi_assocfailed_task); break; case IWI_FW_DISASSOCIATING: DPRINTFN(2, ("Dissassociated\n")); IWI_STATE_END(sc, IWI_FW_DISASSOCIATING); vap->iv_stats.is_rx_disassoc++; taskqueue_enqueue(taskqueue_swi, &IWI_VAP(vap)->iwi_assocfailed_task); break; } break; default: device_printf(sc->sc_dev, "unknown association state %u\n", assoc->state); break; } break; case IWI_NOTIF_TYPE_BEACON: /* XXX check struct length */ beacon = (struct iwi_notif_beacon_state *)(notif + 1); DPRINTFN(5, ("Beacon state (%u, %u)\n", beacon->state, le32toh(beacon->number))); if (beacon->state == IWI_BEACON_MISS) { /* * The firmware notifies us of every beacon miss * so we need to track the count against the * configured threshold before notifying the * 802.11 layer. * XXX try to roam, drop assoc only on much higher count */ if (le32toh(beacon->number) >= vap->iv_bmissthreshold) { DPRINTF(("Beacon miss: %u >= %u\n", le32toh(beacon->number), vap->iv_bmissthreshold)); vap->iv_stats.is_beacon_miss++; /* * It's pointless to notify the 802.11 layer * as it'll try to send a probe request (which * we'll discard) and then timeout and drop us * into scan state. Instead tell the firmware * to disassociate and then on completion we'll * kick the state machine to scan. */ iwi_queue_cmd(sc, IWI_DISASSOC, 1); } } break; case IWI_NOTIF_TYPE_CALIBRATION: case IWI_NOTIF_TYPE_NOISE: case IWI_NOTIF_TYPE_LINK_QUALITY: DPRINTFN(5, ("Notification (%u)\n", notif->type)); break; default: DPRINTF(("unknown notification type %u flags 0x%x len %u\n", notif->type, notif->flags, le16toh(notif->len))); break; } } static void iwi_rx_intr(struct iwi_softc *sc) { struct iwi_rx_data *data; struct iwi_hdr *hdr; uint32_t hw; hw = CSR_READ_4(sc, IWI_CSR_RX_RIDX); for (; sc->rxq.cur != hw;) { data = &sc->rxq.data[sc->rxq.cur]; bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); hdr = mtod(data->m, struct iwi_hdr *); switch (hdr->type) { case IWI_HDR_TYPE_FRAME: iwi_frame_intr(sc, data, sc->rxq.cur, (struct iwi_frame *)(hdr + 1)); break; case IWI_HDR_TYPE_NOTIF: iwi_notification_intr(sc, (struct iwi_notif *)(hdr + 1)); break; default: device_printf(sc->sc_dev, "unknown hdr type %u\n", hdr->type); } DPRINTFN(15, ("rx done idx=%u\n", sc->rxq.cur)); sc->rxq.cur = (sc->rxq.cur + 1) % IWI_RX_RING_COUNT; } /* tell the firmware what we have processed */ hw = (hw == 0) ? IWI_RX_RING_COUNT - 1 : hw - 1; CSR_WRITE_4(sc, IWI_CSR_RX_WIDX, hw); } static void iwi_tx_intr(struct iwi_softc *sc, struct iwi_tx_ring *txq) { struct ifnet *ifp = sc->sc_ifp; struct iwi_tx_data *data; uint32_t hw; hw = CSR_READ_4(sc, txq->csr_ridx); for (; txq->next != hw;) { data = &txq->data[txq->next]; bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->data_dmat, data->map); if (data->m->m_flags & M_TXCB) ieee80211_process_callback(data->ni, data->m, 0/*XXX*/); m_freem(data->m); data->m = NULL; ieee80211_free_node(data->ni); data->ni = NULL; DPRINTFN(15, ("tx done idx=%u\n", txq->next)); ifp->if_opackets++; txq->queued--; txq->next = (txq->next + 1) % IWI_TX_RING_COUNT; } sc->sc_tx_timer = 0; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (sc->sc_softled) iwi_led_event(sc, IWI_LED_TX); iwi_start_locked(ifp); } static void iwi_intr(void *arg) { struct iwi_softc *sc = arg; uint32_t r; IWI_LOCK_DECL; IWI_LOCK(sc); if ((r = CSR_READ_4(sc, IWI_CSR_INTR)) == 0 || r == 0xffffffff) { IWI_UNLOCK(sc); return; } /* acknowledge interrupts */ CSR_WRITE_4(sc, IWI_CSR_INTR, r); if (r & IWI_INTR_FATAL_ERROR) { device_printf(sc->sc_dev, "firmware error\n"); taskqueue_enqueue(sc->sc_tq2, &sc->sc_restarttask); sc->flags &= ~IWI_FLAG_BUSY; sc->sc_busy_timer = 0; wakeup(sc); } if (r & IWI_INTR_FW_INITED) { if (!(r & (IWI_INTR_FATAL_ERROR | IWI_INTR_PARITY_ERROR))) wakeup(sc); } if (r & IWI_INTR_RADIO_OFF) taskqueue_enqueue(sc->sc_tq, &sc->sc_radiofftask); if (r & IWI_INTR_CMD_DONE) { sc->flags &= ~IWI_FLAG_BUSY; sc->sc_busy_timer = 0; wakeup(sc); } if (r & IWI_INTR_TX1_DONE) iwi_tx_intr(sc, &sc->txq[0]); if (r & IWI_INTR_TX2_DONE) iwi_tx_intr(sc, &sc->txq[1]); if (r & IWI_INTR_TX3_DONE) iwi_tx_intr(sc, &sc->txq[2]); if (r & IWI_INTR_TX4_DONE) iwi_tx_intr(sc, &sc->txq[3]); if (r & IWI_INTR_RX_DONE) iwi_rx_intr(sc); if (r & IWI_INTR_PARITY_ERROR) { /* XXX rate-limit */ device_printf(sc->sc_dev, "parity error\n"); } IWI_UNLOCK(sc); } static int iwi_cmd(struct iwi_softc *sc, uint8_t type, void *data, uint8_t len) { struct iwi_cmd_desc *desc; IWI_LOCK_ASSERT(sc); if (sc->flags & IWI_FLAG_BUSY) { device_printf(sc->sc_dev, "%s: cmd %d not sent, busy\n", __func__, type); return EAGAIN; } sc->flags |= IWI_FLAG_BUSY; sc->sc_busy_timer = 2; desc = &sc->cmdq.desc[sc->cmdq.cur]; desc->hdr.type = IWI_HDR_TYPE_COMMAND; desc->hdr.flags = IWI_HDR_FLAG_IRQ; desc->type = type; desc->len = len; memcpy(desc->data, data, len); bus_dmamap_sync(sc->cmdq.desc_dmat, sc->cmdq.desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(2, ("sending command idx=%u type=%u len=%u\n", sc->cmdq.cur, type, len)); sc->cmdq.cur = (sc->cmdq.cur + 1) % IWI_CMD_RING_COUNT; CSR_WRITE_4(sc, IWI_CSR_CMD_WIDX, sc->cmdq.cur); return msleep(sc, &sc->sc_mtx, 0, "iwicmd", hz); } static void iwi_write_ibssnode(struct iwi_softc *sc, const u_int8_t addr[IEEE80211_ADDR_LEN], int entry) { struct iwi_ibssnode node; /* write node information into NIC memory */ memset(&node, 0, sizeof node); IEEE80211_ADDR_COPY(node.bssid, addr); DPRINTF(("%s mac %6D station %u\n", __func__, node.bssid, ":", entry)); CSR_WRITE_REGION_1(sc, IWI_CSR_NODE_BASE + entry * sizeof node, (uint8_t *)&node, sizeof node); } static int iwi_tx_start(struct ifnet *ifp, struct mbuf *m0, struct ieee80211_node *ni, int ac) { struct iwi_softc *sc = ifp->if_softc; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct iwi_node *in = (struct iwi_node *)ni; const struct ieee80211_frame *wh; struct ieee80211_key *k; const struct chanAccParams *cap; struct iwi_tx_ring *txq = &sc->txq[ac]; struct iwi_tx_data *data; struct iwi_tx_desc *desc; struct mbuf *mnew; bus_dma_segment_t segs[IWI_MAX_NSEG]; int error, nsegs, hdrlen, i; int ismcast, flags, xflags, staid; IWI_LOCK_ASSERT(sc); wh = mtod(m0, const struct ieee80211_frame *); /* NB: only data frames use this path */ hdrlen = ieee80211_hdrsize(wh); ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); flags = xflags = 0; if (!ismcast) flags |= IWI_DATA_FLAG_NEED_ACK; if (vap->iv_flags & IEEE80211_F_SHPREAMBLE) flags |= IWI_DATA_FLAG_SHPREAMBLE; if (IEEE80211_QOS_HAS_SEQ(wh)) { xflags |= IWI_DATA_XFLAG_QOS; cap = &ic->ic_wme.wme_chanParams; if (!cap->cap_wmeParams[ac].wmep_noackPolicy) flags &= ~IWI_DATA_FLAG_NEED_ACK; } /* * This is only used in IBSS mode where the firmware expect an index * in a h/w table instead of a destination address. */ if (vap->iv_opmode == IEEE80211_M_IBSS) { if (!ismcast) { if (in->in_station == -1) { in->in_station = alloc_unr(sc->sc_unr); if (in->in_station == -1) { /* h/w table is full */ m_freem(m0); ieee80211_free_node(ni); ifp->if_oerrors++; return 0; } iwi_write_ibssnode(sc, ni->ni_macaddr, in->in_station); } staid = in->in_station; } else { /* * Multicast addresses have no associated node * so there will be no station entry. We reserve * entry 0 for one mcast address and use that. * If there are many being used this will be * expensive and we'll need to do a better job * but for now this handles the broadcast case. */ if (!IEEE80211_ADDR_EQ(wh->i_addr1, sc->sc_mcast)) { IEEE80211_ADDR_COPY(sc->sc_mcast, wh->i_addr1); iwi_write_ibssnode(sc, sc->sc_mcast, 0); } staid = 0; } } else staid = 0; if (wh->i_fc[1] & IEEE80211_FC1_WEP) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (bpf_peers_present(ifp->if_bpf)) { struct iwi_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; bpf_mtap2(ifp->if_bpf, tap, sc->sc_txtap_len, m0); } data = &txq->data[txq->cur]; desc = &txq->desc[txq->cur]; /* save and trim IEEE802.11 header */ m_copydata(m0, 0, hdrlen, (caddr_t)&desc->wh); m_adj(m0, hdrlen); error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (error != 0) { mnew = m_defrag(m0, M_DONTWAIT); if (mnew == NULL) { device_printf(sc->sc_dev, "could not defragment mbuf\n"); m_freem(m0); return ENOBUFS; } m0 = mnew; error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } } data->m = m0; data->ni = ni; desc->hdr.type = IWI_HDR_TYPE_DATA; desc->hdr.flags = IWI_HDR_FLAG_IRQ; desc->station = staid; desc->cmd = IWI_DATA_CMD_TX; desc->len = htole16(m0->m_pkthdr.len); desc->flags = flags; desc->xflags = xflags; #if 0 if (vap->iv_flags & IEEE80211_F_PRIVACY) desc->wep_txkey = vap->iv_def_txkey; else #endif desc->flags |= IWI_DATA_FLAG_NO_WEP; desc->nseg = htole32(nsegs); for (i = 0; i < nsegs; i++) { desc->seg_addr[i] = htole32(segs[i].ds_addr); desc->seg_len[i] = htole16(segs[i].ds_len); } bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(5, ("sending data frame txq=%u idx=%u len=%u nseg=%u\n", ac, txq->cur, le16toh(desc->len), nsegs)); txq->queued++; txq->cur = (txq->cur + 1) % IWI_TX_RING_COUNT; CSR_WRITE_4(sc, txq->csr_widx, txq->cur); return 0; } static int iwi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { /* no support; just discard */ m_freem(m); ieee80211_free_node(ni); return 0; } static void iwi_start_locked(struct ifnet *ifp) { struct iwi_softc *sc = ifp->if_softc; struct mbuf *m; struct ieee80211_node *ni; int ac; IWI_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; ac = M_WME_GETAC(m); if (sc->txq[ac].queued > IWI_TX_RING_COUNT - 8) { /* there is no place left in this ring; tail drop */ /* XXX tail drop */ IFQ_DRV_PREPEND(&ifp->if_snd, m); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } BPF_MTAP(ifp, m); ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; m = ieee80211_encap(ni, m); if (m == NULL) { ieee80211_free_node(ni); ifp->if_oerrors++; continue; } if (iwi_tx_start(ifp, m, ni, ac) != 0) { ieee80211_free_node(ni); ifp->if_oerrors++; break; } sc->sc_tx_timer = 5; } } static void iwi_start(struct ifnet *ifp) { struct iwi_softc *sc = ifp->if_softc; IWI_LOCK_DECL; IWI_LOCK(sc); iwi_start_locked(ifp); IWI_UNLOCK(sc); } static void iwi_watchdog(void *arg) { struct iwi_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; IWI_LOCK_ASSERT(sc); if (sc->sc_tx_timer > 0) { if (--sc->sc_tx_timer == 0) { if_printf(ifp, "device timeout\n"); ifp->if_oerrors++; taskqueue_enqueue(sc->sc_tq2, &sc->sc_restarttask); } } if (sc->sc_state_timer > 0) { if (--sc->sc_state_timer == 0) { if_printf(ifp, "firmware stuck in state %d, resetting\n", sc->fw_state); taskqueue_enqueue(sc->sc_tq2, &sc->sc_restarttask); if (sc->fw_state == IWI_FW_SCANNING) { struct ieee80211com *ic = ifp->if_l2com; ieee80211_cancel_scan(TAILQ_FIRST(&ic->ic_vaps)); } sc->sc_state_timer = 3; } } if (sc->sc_busy_timer > 0) { if (--sc->sc_busy_timer == 0) { if_printf(ifp, "firmware command timeout, resetting\n"); taskqueue_enqueue(sc->sc_tq2, &sc->sc_restarttask); } } callout_reset(&sc->sc_wdtimer, hz, iwi_watchdog, sc); } static int iwi_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct iwi_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; int error = 0, startall = 0; IWI_LOCK_DECL; switch (cmd) { case SIOCSIFFLAGS: IWI_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { iwi_init_locked(sc); startall = 1; } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) iwi_stop_locked(sc); } IWI_UNLOCK(sc); if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return error; } static void iwi_stop_master(struct iwi_softc *sc) { uint32_t tmp; int ntries; /* disable interrupts */ CSR_WRITE_4(sc, IWI_CSR_INTR_MASK, 0); CSR_WRITE_4(sc, IWI_CSR_RST, IWI_RST_STOP_MASTER); for (ntries = 0; ntries < 5; ntries++) { if (CSR_READ_4(sc, IWI_CSR_RST) & IWI_RST_MASTER_DISABLED) break; DELAY(10); } if (ntries == 5) device_printf(sc->sc_dev, "timeout waiting for master\n"); tmp = CSR_READ_4(sc, IWI_CSR_RST); CSR_WRITE_4(sc, IWI_CSR_RST, tmp | IWI_RST_PRINCETON_RESET); sc->flags &= ~IWI_FLAG_FW_INITED; } static int iwi_reset(struct iwi_softc *sc) { uint32_t tmp; int i, ntries; iwi_stop_master(sc); tmp = CSR_READ_4(sc, IWI_CSR_CTL); CSR_WRITE_4(sc, IWI_CSR_CTL, tmp | IWI_CTL_INIT); CSR_WRITE_4(sc, IWI_CSR_READ_INT, IWI_READ_INT_INIT_HOST); /* wait for clock stabilization */ for (ntries = 0; ntries < 1000; ntries++) { if (CSR_READ_4(sc, IWI_CSR_CTL) & IWI_CTL_CLOCK_READY) break; DELAY(200); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for clock stabilization\n"); return EIO; } tmp = CSR_READ_4(sc, IWI_CSR_RST); CSR_WRITE_4(sc, IWI_CSR_RST, tmp | IWI_RST_SOFT_RESET); DELAY(10); tmp = CSR_READ_4(sc, IWI_CSR_CTL); CSR_WRITE_4(sc, IWI_CSR_CTL, tmp | IWI_CTL_INIT); /* clear NIC memory */ CSR_WRITE_4(sc, IWI_CSR_AUTOINC_ADDR, 0); for (i = 0; i < 0xc000; i++) CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, 0); return 0; } static const struct iwi_firmware_ohdr * iwi_setup_ofw(struct iwi_softc *sc, struct iwi_fw *fw) { const struct firmware *fp = fw->fp; const struct iwi_firmware_ohdr *hdr; if (fp->datasize < sizeof (struct iwi_firmware_ohdr)) { device_printf(sc->sc_dev, "image '%s' too small\n", fp->name); return NULL; } hdr = (const struct iwi_firmware_ohdr *)fp->data; if ((IWI_FW_GET_MAJOR(le32toh(hdr->version)) != IWI_FW_REQ_MAJOR) || (IWI_FW_GET_MINOR(le32toh(hdr->version)) != IWI_FW_REQ_MINOR)) { device_printf(sc->sc_dev, "version for '%s' %d.%d != %d.%d\n", fp->name, IWI_FW_GET_MAJOR(le32toh(hdr->version)), IWI_FW_GET_MINOR(le32toh(hdr->version)), IWI_FW_REQ_MAJOR, IWI_FW_REQ_MINOR); return NULL; } fw->data = ((const char *) fp->data) + sizeof(struct iwi_firmware_ohdr); fw->size = fp->datasize - sizeof(struct iwi_firmware_ohdr); fw->name = fp->name; return hdr; } static const struct iwi_firmware_ohdr * iwi_setup_oucode(struct iwi_softc *sc, struct iwi_fw *fw) { const struct iwi_firmware_ohdr *hdr; hdr = iwi_setup_ofw(sc, fw); if (hdr != NULL && le32toh(hdr->mode) != IWI_FW_MODE_UCODE) { device_printf(sc->sc_dev, "%s is not a ucode image\n", fw->name); hdr = NULL; } return hdr; } static void iwi_getfw(struct iwi_fw *fw, const char *fwname, struct iwi_fw *uc, const char *ucname) { if (fw->fp == NULL) fw->fp = firmware_get(fwname); /* NB: pre-3.0 ucode is packaged separately */ if (uc->fp == NULL && fw->fp != NULL && fw->fp->version < 300) uc->fp = firmware_get(ucname); } /* * Get the required firmware images if not already loaded. * Note that we hold firmware images so long as the device * is marked up in case we need to reload them on device init. * This is necessary because we re-init the device sometimes * from a context where we cannot read from the filesystem * (e.g. from the taskqueue thread when rfkill is re-enabled). * XXX return 0 on success, 1 on error. * * NB: the order of get'ing and put'ing images here is * intentional to support handling firmware images bundled * by operating mode and/or all together in one file with * the boot firmware as "master". */ static int iwi_get_firmware(struct iwi_softc *sc, enum ieee80211_opmode opmode) { const struct iwi_firmware_hdr *hdr; const struct firmware *fp; /* invalidate cached firmware on mode change */ if (sc->fw_mode != opmode) iwi_put_firmware(sc); switch (opmode) { case IEEE80211_M_STA: iwi_getfw(&sc->fw_fw, "iwi_bss", &sc->fw_uc, "iwi_ucode_bss"); break; case IEEE80211_M_IBSS: iwi_getfw(&sc->fw_fw, "iwi_ibss", &sc->fw_uc, "iwi_ucode_ibss"); break; case IEEE80211_M_MONITOR: iwi_getfw(&sc->fw_fw, "iwi_monitor", &sc->fw_uc, "iwi_ucode_monitor"); break; default: break; } fp = sc->fw_fw.fp; if (fp == NULL) { device_printf(sc->sc_dev, "could not load firmware\n"); goto bad; } if (fp->version < 300) { /* * Firmware prior to 3.0 was packaged as separate * boot, firmware, and ucode images. Verify the * ucode image was read in, retrieve the boot image * if needed, and check version stamps for consistency. * The version stamps in the data are also checked * above; this is a bit paranoid but is a cheap * safeguard against mis-packaging. */ if (sc->fw_uc.fp == NULL) { device_printf(sc->sc_dev, "could not load ucode\n"); goto bad; } if (sc->fw_boot.fp == NULL) { sc->fw_boot.fp = firmware_get("iwi_boot"); if (sc->fw_boot.fp == NULL) { device_printf(sc->sc_dev, "could not load boot firmware\n"); goto bad; } } if (sc->fw_boot.fp->version != sc->fw_fw.fp->version || sc->fw_boot.fp->version != sc->fw_uc.fp->version) { device_printf(sc->sc_dev, "firmware version mismatch: " "'%s' is %d, '%s' is %d, '%s' is %d\n", sc->fw_boot.fp->name, sc->fw_boot.fp->version, sc->fw_uc.fp->name, sc->fw_uc.fp->version, sc->fw_fw.fp->name, sc->fw_fw.fp->version ); goto bad; } /* * Check and setup each image. */ if (iwi_setup_oucode(sc, &sc->fw_uc) == NULL || iwi_setup_ofw(sc, &sc->fw_boot) == NULL || iwi_setup_ofw(sc, &sc->fw_fw) == NULL) goto bad; } else { /* * Check and setup combined image. */ if (fp->datasize < sizeof(struct iwi_firmware_hdr)) { device_printf(sc->sc_dev, "image '%s' too small\n", fp->name); goto bad; } hdr = (const struct iwi_firmware_hdr *)fp->data; if (fp->datasize < sizeof(*hdr) + le32toh(hdr->bsize) + le32toh(hdr->usize) + le32toh(hdr->fsize)) { device_printf(sc->sc_dev, "image '%s' too small (2)\n", fp->name); goto bad; } sc->fw_boot.data = ((const char *) fp->data) + sizeof(*hdr); sc->fw_boot.size = le32toh(hdr->bsize); sc->fw_boot.name = fp->name; sc->fw_uc.data = sc->fw_boot.data + sc->fw_boot.size; sc->fw_uc.size = le32toh(hdr->usize); sc->fw_uc.name = fp->name; sc->fw_fw.data = sc->fw_uc.data + sc->fw_uc.size; sc->fw_fw.size = le32toh(hdr->fsize); sc->fw_fw.name = fp->name; } #if 0 device_printf(sc->sc_dev, "boot %d ucode %d fw %d bytes\n", sc->fw_boot.size, sc->fw_uc.size, sc->fw_fw.size); #endif sc->fw_mode = opmode; return 0; bad: iwi_put_firmware(sc); return 1; } static void iwi_put_fw(struct iwi_fw *fw) { if (fw->fp != NULL) { firmware_put(fw->fp, FIRMWARE_UNLOAD); fw->fp = NULL; } fw->data = NULL; fw->size = 0; fw->name = NULL; } /* * Release any cached firmware images. */ static void iwi_put_firmware(struct iwi_softc *sc) { iwi_put_fw(&sc->fw_uc); iwi_put_fw(&sc->fw_fw); iwi_put_fw(&sc->fw_boot); } static int iwi_load_ucode(struct iwi_softc *sc, const struct iwi_fw *fw) { uint32_t tmp; const uint16_t *w; const char *uc = fw->data; size_t size = fw->size; int i, ntries, error; IWI_LOCK_ASSERT(sc); error = 0; CSR_WRITE_4(sc, IWI_CSR_RST, CSR_READ_4(sc, IWI_CSR_RST) | IWI_RST_STOP_MASTER); for (ntries = 0; ntries < 5; ntries++) { if (CSR_READ_4(sc, IWI_CSR_RST) & IWI_RST_MASTER_DISABLED) break; DELAY(10); } if (ntries == 5) { device_printf(sc->sc_dev, "timeout waiting for master\n"); error = EIO; goto fail; } MEM_WRITE_4(sc, 0x3000e0, 0x80000000); DELAY(5000); tmp = CSR_READ_4(sc, IWI_CSR_RST); tmp &= ~IWI_RST_PRINCETON_RESET; CSR_WRITE_4(sc, IWI_CSR_RST, tmp); DELAY(5000); MEM_WRITE_4(sc, 0x3000e0, 0); DELAY(1000); MEM_WRITE_4(sc, IWI_MEM_EEPROM_EVENT, 1); DELAY(1000); MEM_WRITE_4(sc, IWI_MEM_EEPROM_EVENT, 0); DELAY(1000); MEM_WRITE_1(sc, 0x200000, 0x00); MEM_WRITE_1(sc, 0x200000, 0x40); DELAY(1000); /* write microcode into adapter memory */ for (w = (const uint16_t *)uc; size > 0; w++, size -= 2) MEM_WRITE_2(sc, 0x200010, htole16(*w)); MEM_WRITE_1(sc, 0x200000, 0x00); MEM_WRITE_1(sc, 0x200000, 0x80); /* wait until we get an answer */ for (ntries = 0; ntries < 100; ntries++) { if (MEM_READ_1(sc, 0x200000) & 1) break; DELAY(100); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for ucode to initialize\n"); error = EIO; goto fail; } /* read the answer or the firmware will not initialize properly */ for (i = 0; i < 7; i++) MEM_READ_4(sc, 0x200004); MEM_WRITE_1(sc, 0x200000, 0x00); fail: return error; } /* macro to handle unaligned little endian data in firmware image */ #define GETLE32(p) ((p)[0] | (p)[1] << 8 | (p)[2] << 16 | (p)[3] << 24) static int iwi_load_firmware(struct iwi_softc *sc, const struct iwi_fw *fw) { u_char *p, *end; uint32_t sentinel, ctl, src, dst, sum, len, mlen, tmp; int ntries, error; IWI_LOCK_ASSERT(sc); /* copy firmware image to DMA memory */ memcpy(sc->fw_virtaddr, fw->data, fw->size); /* make sure the adapter will get up-to-date values */ bus_dmamap_sync(sc->fw_dmat, sc->fw_map, BUS_DMASYNC_PREWRITE); /* tell the adapter where the command blocks are stored */ MEM_WRITE_4(sc, 0x3000a0, 0x27000); /* * Store command blocks into adapter's internal memory using register * indirections. The adapter will read the firmware image through DMA * using information stored in command blocks. */ src = sc->fw_physaddr; p = sc->fw_virtaddr; end = p + fw->size; CSR_WRITE_4(sc, IWI_CSR_AUTOINC_ADDR, 0x27000); while (p < end) { dst = GETLE32(p); p += 4; src += 4; len = GETLE32(p); p += 4; src += 4; p += len; while (len > 0) { mlen = min(len, IWI_CB_MAXDATALEN); ctl = IWI_CB_DEFAULT_CTL | mlen; sum = ctl ^ src ^ dst; /* write a command block */ CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, ctl); CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, src); CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, dst); CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, sum); src += mlen; dst += mlen; len -= mlen; } } /* write a fictive final command block (sentinel) */ sentinel = CSR_READ_4(sc, IWI_CSR_AUTOINC_ADDR); CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, 0); tmp = CSR_READ_4(sc, IWI_CSR_RST); tmp &= ~(IWI_RST_MASTER_DISABLED | IWI_RST_STOP_MASTER); CSR_WRITE_4(sc, IWI_CSR_RST, tmp); /* tell the adapter to start processing command blocks */ MEM_WRITE_4(sc, 0x3000a4, 0x540100); /* wait until the adapter reaches the sentinel */ for (ntries = 0; ntries < 400; ntries++) { if (MEM_READ_4(sc, 0x3000d0) >= sentinel) break; DELAY(100); } /* sync dma, just in case */ bus_dmamap_sync(sc->fw_dmat, sc->fw_map, BUS_DMASYNC_POSTWRITE); if (ntries == 400) { device_printf(sc->sc_dev, "timeout processing command blocks for %s firmware\n", fw->name); return EIO; } /* we're done with command blocks processing */ MEM_WRITE_4(sc, 0x3000a4, 0x540c00); /* allow interrupts so we know when the firmware is ready */ CSR_WRITE_4(sc, IWI_CSR_INTR_MASK, IWI_INTR_MASK); /* tell the adapter to initialize the firmware */ CSR_WRITE_4(sc, IWI_CSR_RST, 0); tmp = CSR_READ_4(sc, IWI_CSR_CTL); CSR_WRITE_4(sc, IWI_CSR_CTL, tmp | IWI_CTL_ALLOW_STANDBY); /* wait at most one second for firmware initialization to complete */ if ((error = msleep(sc, &sc->sc_mtx, 0, "iwiinit", hz)) != 0) { device_printf(sc->sc_dev, "timeout waiting for %s firmware " "initialization to complete\n", fw->name); } return error; } static int iwi_setpowermode(struct iwi_softc *sc, struct ieee80211vap *vap) { uint32_t data; if (vap->iv_flags & IEEE80211_F_PMGTON) { /* XXX set more fine-grained operation */ data = htole32(IWI_POWER_MODE_MAX); } else data = htole32(IWI_POWER_MODE_CAM); DPRINTF(("Setting power mode to %u\n", le32toh(data))); return iwi_cmd(sc, IWI_CMD_SET_POWER_MODE, &data, sizeof data); } static int iwi_setwepkeys(struct iwi_softc *sc, struct ieee80211vap *vap) { struct iwi_wep_key wepkey; struct ieee80211_key *wk; int error, i; for (i = 0; i < IEEE80211_WEP_NKID; i++) { wk = &vap->iv_nw_keys[i]; wepkey.cmd = IWI_WEP_KEY_CMD_SETKEY; wepkey.idx = i; wepkey.len = wk->wk_keylen; memset(wepkey.key, 0, sizeof wepkey.key); memcpy(wepkey.key, wk->wk_key, wk->wk_keylen); DPRINTF(("Setting wep key index %u len %u\n", wepkey.idx, wepkey.len)); error = iwi_cmd(sc, IWI_CMD_SET_WEP_KEY, &wepkey, sizeof wepkey); if (error != 0) return error; } return 0; } static int iwi_config(struct iwi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct iwi_configuration config; struct iwi_rateset rs; struct iwi_txpower power; uint32_t data; int error, i; IWI_LOCK_ASSERT(sc); IEEE80211_ADDR_COPY(ic->ic_myaddr, IF_LLADDR(ifp)); DPRINTF(("Setting MAC address to %6D\n", ic->ic_myaddr, ":")); error = iwi_cmd(sc, IWI_CMD_SET_MAC_ADDRESS, ic->ic_myaddr, IEEE80211_ADDR_LEN); if (error != 0) return error; memset(&config, 0, sizeof config); config.bluetooth_coexistence = sc->bluetooth; config.silence_threshold = 0x1e; config.antenna = sc->antenna; config.multicast_enabled = 1; config.answer_pbreq = (ic->ic_opmode == IEEE80211_M_IBSS) ? 1 : 0; config.disable_unicast_decryption = 1; config.disable_multicast_decryption = 1; DPRINTF(("Configuring adapter\n")); error = iwi_cmd(sc, IWI_CMD_SET_CONFIG, &config, sizeof config); if (error != 0) return error; if (ic->ic_opmode == IEEE80211_M_IBSS) { power.mode = IWI_MODE_11B; power.nchan = 11; for (i = 0; i < 11; i++) { power.chan[i].chan = i + 1; power.chan[i].power = IWI_TXPOWER_MAX; } DPRINTF(("Setting .11b channels tx power\n")); error = iwi_cmd(sc, IWI_CMD_SET_TX_POWER, &power, sizeof power); if (error != 0) return error; power.mode = IWI_MODE_11G; DPRINTF(("Setting .11g channels tx power\n")); error = iwi_cmd(sc, IWI_CMD_SET_TX_POWER, &power, sizeof power); if (error != 0) return error; } memset(&rs, 0, sizeof rs); rs.mode = IWI_MODE_11G; rs.type = IWI_RATESET_TYPE_SUPPORTED; rs.nrates = ic->ic_sup_rates[IEEE80211_MODE_11G].rs_nrates; memcpy(rs.rates, ic->ic_sup_rates[IEEE80211_MODE_11G].rs_rates, rs.nrates); DPRINTF(("Setting .11bg supported rates (%u)\n", rs.nrates)); error = iwi_cmd(sc, IWI_CMD_SET_RATES, &rs, sizeof rs); if (error != 0) return error; memset(&rs, 0, sizeof rs); rs.mode = IWI_MODE_11A; rs.type = IWI_RATESET_TYPE_SUPPORTED; rs.nrates = ic->ic_sup_rates[IEEE80211_MODE_11A].rs_nrates; memcpy(rs.rates, ic->ic_sup_rates[IEEE80211_MODE_11A].rs_rates, rs.nrates); DPRINTF(("Setting .11a supported rates (%u)\n", rs.nrates)); error = iwi_cmd(sc, IWI_CMD_SET_RATES, &rs, sizeof rs); if (error != 0) return error; data = htole32(arc4random()); DPRINTF(("Setting initialization vector to %u\n", le32toh(data))); error = iwi_cmd(sc, IWI_CMD_SET_IV, &data, sizeof data); if (error != 0) return error; /* enable adapter */ DPRINTF(("Enabling adapter\n")); return iwi_cmd(sc, IWI_CMD_ENABLE, NULL, 0); } static __inline void set_scan_type(struct iwi_scan_ext *scan, int ix, int scan_type) { uint8_t *st = &scan->scan_type[ix / 2]; if (ix % 2) *st = (*st & 0xf0) | ((scan_type & 0xf) << 0); else *st = (*st & 0x0f) | ((scan_type & 0xf) << 4); } static int scan_type(const struct ieee80211_scan_state *ss, const struct ieee80211_channel *chan) { /* We can only set one essid for a directed scan */ if (ss->ss_nssid != 0) return IWI_SCAN_TYPE_BDIRECTED; if ((ss->ss_flags & IEEE80211_SCAN_ACTIVE) && (chan->ic_flags & IEEE80211_CHAN_PASSIVE) == 0) return IWI_SCAN_TYPE_BROADCAST; return IWI_SCAN_TYPE_PASSIVE; } static __inline int scan_band(const struct ieee80211_channel *c) { return IEEE80211_IS_CHAN_5GHZ(c) ? IWI_CHAN_5GHZ : IWI_CHAN_2GHZ; } /* * Start a scan on the current channel or all channels. */ static int iwi_scanchan(struct iwi_softc *sc, unsigned long maxdwell, int mode) { struct ieee80211com *ic; struct ieee80211_channel *chan; struct ieee80211_scan_state *ss; struct iwi_scan_ext scan; int error = 0; IWI_LOCK_ASSERT(sc); if (sc->fw_state == IWI_FW_SCANNING) { /* * This should not happen as we only trigger scan_next after * completion */ DPRINTF(("%s: called too early - still scanning\n", __func__)); return (EBUSY); } IWI_STATE_BEGIN(sc, IWI_FW_SCANNING); ic = sc->sc_ifp->if_l2com; ss = ic->ic_scan; memset(&scan, 0, sizeof scan); scan.full_scan_index = htole32(++sc->sc_scangen); scan.dwell_time[IWI_SCAN_TYPE_PASSIVE] = htole16(maxdwell); if (ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN) { /* * Use very short dwell times for when we send probe request * frames. Without this bg scans hang. Ideally this should * be handled with early-termination as done by net80211 but * that's not feasible (aborting a scan is problematic). */ scan.dwell_time[IWI_SCAN_TYPE_BROADCAST] = htole16(30); scan.dwell_time[IWI_SCAN_TYPE_BDIRECTED] = htole16(30); } else { scan.dwell_time[IWI_SCAN_TYPE_BROADCAST] = htole16(maxdwell); scan.dwell_time[IWI_SCAN_TYPE_BDIRECTED] = htole16(maxdwell); } /* We can only set one essid for a directed scan */ if (ss->ss_nssid != 0) { error = iwi_cmd(sc, IWI_CMD_SET_ESSID, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); if (error) return (error); } if (mode == IWI_SCAN_ALLCHAN) { int i, next, band, b, bstart; /* * Convert scan list to run-length encoded channel list * the firmware requires (preserving the order setup by * net80211). The first entry in each run specifies the * band and the count of items in the run. */ next = 0; /* next open slot */ bstart = 0; /* NB: not needed, silence compiler */ band = -1; /* NB: impossible value */ KASSERT(ss->ss_last > 0, ("no channels")); for (i = 0; i < ss->ss_last; i++) { chan = ss->ss_chans[i]; b = scan_band(chan); if (b != band) { if (band != -1) scan.channels[bstart] = (next - bstart) | band; /* NB: this allocates a slot for the run-len */ band = b, bstart = next++; } if (next >= IWI_SCAN_CHANNELS) { DPRINTF(("truncating scan list\n")); break; } scan.channels[next] = ieee80211_chan2ieee(ic, chan); set_scan_type(&scan, next, scan_type(ss, chan)); next++; } scan.channels[bstart] = (next - bstart) | band; } else { /* Scan the current channel only */ chan = ic->ic_curchan; scan.channels[0] = 1 | scan_band(chan); scan.channels[1] = ieee80211_chan2ieee(ic, chan); set_scan_type(&scan, 1, scan_type(ss, chan)); } #ifdef IWI_DEBUG if (iwi_debug > 0) { static const char *scantype[8] = { "PSTOP", "PASV", "DIR", "BCAST", "BDIR", "5", "6", "7" }; int i; printf("Scan request: index %u dwell %d/%d/%d\n" , le32toh(scan.full_scan_index) , le16toh(scan.dwell_time[IWI_SCAN_TYPE_PASSIVE]) , le16toh(scan.dwell_time[IWI_SCAN_TYPE_BROADCAST]) , le16toh(scan.dwell_time[IWI_SCAN_TYPE_BDIRECTED]) ); i = 0; do { int run = scan.channels[i]; if (run == 0) break; printf("Scan %d %s channels:", run & 0x3f, run & IWI_CHAN_2GHZ ? "2.4GHz" : "5GHz"); for (run &= 0x3f, i++; run > 0; run--, i++) { uint8_t type = scan.scan_type[i/2]; printf(" %u/%s", scan.channels[i], scantype[(i & 1 ? type : type>>4) & 7]); } printf("\n"); } while (i < IWI_SCAN_CHANNELS); } #endif return (iwi_cmd(sc, IWI_CMD_SCAN_EXT, &scan, sizeof scan)); } static void iwi_scanabort(void *arg, int npending) { struct iwi_softc *sc = arg; IWI_LOCK_DECL; IWI_LOCK(sc); sc->flags &= ~IWI_FLAG_CHANNEL_SCAN; /* NB: make sure we're still scanning */ if (sc->fw_state == IWI_FW_SCANNING) iwi_cmd(sc, IWI_CMD_ABORT_SCAN, NULL, 0); IWI_UNLOCK(sc); } static int iwi_set_sensitivity(struct iwi_softc *sc, int8_t rssi_dbm) { struct iwi_sensitivity sens; DPRINTF(("Setting sensitivity to %d\n", rssi_dbm)); memset(&sens, 0, sizeof sens); sens.rssi = htole16(rssi_dbm); return iwi_cmd(sc, IWI_CMD_SET_SENSITIVITY, &sens, sizeof sens); } static int iwi_auth_and_assoc(struct iwi_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ifnet *ifp = vap->iv_ifp; struct ieee80211_node *ni = vap->iv_bss; struct iwi_configuration config; struct iwi_associate *assoc = &sc->assoc; struct iwi_rateset rs; uint16_t capinfo; uint32_t data; int error, mode; IWI_LOCK_ASSERT(sc); if (sc->flags & IWI_FLAG_ASSOCIATED) { DPRINTF(("Already associated\n")); return (-1); } IWI_STATE_BEGIN(sc, IWI_FW_ASSOCIATING); error = 0; mode = 0; if (IEEE80211_IS_CHAN_A(ic->ic_curchan)) mode = IWI_MODE_11A; else if (IEEE80211_IS_CHAN_G(ic->ic_curchan)) mode = IWI_MODE_11G; if (IEEE80211_IS_CHAN_B(ic->ic_curchan)) mode = IWI_MODE_11B; if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { memset(&config, 0, sizeof config); config.bluetooth_coexistence = sc->bluetooth; config.antenna = sc->antenna; config.multicast_enabled = 1; if (mode == IWI_MODE_11G) config.use_protection = 1; config.answer_pbreq = (vap->iv_opmode == IEEE80211_M_IBSS) ? 1 : 0; config.disable_unicast_decryption = 1; config.disable_multicast_decryption = 1; DPRINTF(("Configuring adapter\n")); error = iwi_cmd(sc, IWI_CMD_SET_CONFIG, &config, sizeof config); if (error != 0) goto done; } #ifdef IWI_DEBUG if (iwi_debug > 0) { printf("Setting ESSID to "); ieee80211_print_essid(ni->ni_essid, ni->ni_esslen); printf("\n"); } #endif error = iwi_cmd(sc, IWI_CMD_SET_ESSID, ni->ni_essid, ni->ni_esslen); if (error != 0) goto done; error = iwi_setpowermode(sc, vap); if (error != 0) goto done; data = htole32(vap->iv_rtsthreshold); DPRINTF(("Setting RTS threshold to %u\n", le32toh(data))); error = iwi_cmd(sc, IWI_CMD_SET_RTS_THRESHOLD, &data, sizeof data); if (error != 0) goto done; data = htole32(vap->iv_fragthreshold); DPRINTF(("Setting fragmentation threshold to %u\n", le32toh(data))); error = iwi_cmd(sc, IWI_CMD_SET_FRAG_THRESHOLD, &data, sizeof data); if (error != 0) goto done; /* the rate set has already been "negotiated" */ memset(&rs, 0, sizeof rs); rs.mode = mode; rs.type = IWI_RATESET_TYPE_NEGOTIATED; rs.nrates = ni->ni_rates.rs_nrates; if (rs.nrates > IWI_RATESET_SIZE) { DPRINTF(("Truncating negotiated rate set from %u\n", rs.nrates)); rs.nrates = IWI_RATESET_SIZE; } memcpy(rs.rates, ni->ni_rates.rs_rates, rs.nrates); DPRINTF(("Setting negotiated rates (%u)\n", rs.nrates)); error = iwi_cmd(sc, IWI_CMD_SET_RATES, &rs, sizeof rs); if (error != 0) goto done; memset(assoc, 0, sizeof *assoc); if ((vap->iv_flags & IEEE80211_F_WME) && ni->ni_ies.wme_ie != NULL) { /* NB: don't treat WME setup as failure */ if (iwi_wme_setparams(sc, ic) == 0 && iwi_wme_setie(sc) == 0) assoc->policy |= htole16(IWI_POLICY_WME); /* XXX complain on failure? */ } if (vap->iv_appie_wpa != NULL) { struct ieee80211_appie *ie = vap->iv_appie_wpa; DPRINTF(("Setting optional IE (len=%u)\n", ie->ie_len)); error = iwi_cmd(sc, IWI_CMD_SET_OPTIE, ie->ie_data, ie->ie_len); if (error != 0) goto done; } error = iwi_set_sensitivity(sc, ic->ic_node_getrssi(ni)); if (error != 0) goto done; assoc->mode = mode; assoc->chan = ic->ic_curchan->ic_ieee; /* * NB: do not arrange for shared key auth w/o privacy * (i.e. a wep key); it causes a firmware error. */ if ((vap->iv_flags & IEEE80211_F_PRIVACY) && ni->ni_authmode == IEEE80211_AUTH_SHARED) { assoc->auth = IWI_AUTH_SHARED; /* * It's possible to have privacy marked but no default * key setup. This typically is due to a user app bug * but if we blindly grab the key the firmware will * barf so avoid it for now. */ if (vap->iv_def_txkey != IEEE80211_KEYIX_NONE) assoc->auth |= vap->iv_def_txkey << 4; error = iwi_setwepkeys(sc, vap); if (error != 0) goto done; } if (vap->iv_flags & IEEE80211_F_WPA) assoc->policy |= htole16(IWI_POLICY_WPA); if (vap->iv_opmode == IEEE80211_M_IBSS && ni->ni_tstamp.tsf == 0) assoc->type = IWI_HC_IBSS_START; else assoc->type = IWI_HC_ASSOC; memcpy(assoc->tstamp, ni->ni_tstamp.data, 8); if (vap->iv_opmode == IEEE80211_M_IBSS) capinfo = IEEE80211_CAPINFO_IBSS; else capinfo = IEEE80211_CAPINFO_ESS; if (vap->iv_flags & IEEE80211_F_PRIVACY) capinfo |= IEEE80211_CAPINFO_PRIVACY; if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE; if (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_SLOTTIME) capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME; assoc->capinfo = htole16(capinfo); assoc->lintval = htole16(ic->ic_lintval); assoc->intval = htole16(ni->ni_intval); IEEE80211_ADDR_COPY(assoc->bssid, ni->ni_bssid); if (vap->iv_opmode == IEEE80211_M_IBSS) IEEE80211_ADDR_COPY(assoc->dst, ifp->if_broadcastaddr); else IEEE80211_ADDR_COPY(assoc->dst, ni->ni_bssid); DPRINTF(("%s bssid %6D dst %6D channel %u policy 0x%x " "auth %u capinfo 0x%x lintval %u bintval %u\n", assoc->type == IWI_HC_IBSS_START ? "Start" : "Join", assoc->bssid, ":", assoc->dst, ":", assoc->chan, le16toh(assoc->policy), assoc->auth, le16toh(assoc->capinfo), le16toh(assoc->lintval), le16toh(assoc->intval))); error = iwi_cmd(sc, IWI_CMD_ASSOCIATE, assoc, sizeof *assoc); done: if (error) IWI_STATE_END(sc, IWI_FW_ASSOCIATING); return (error); } static int iwi_disassociate(struct iwi_softc *sc, int quiet) { struct iwi_associate *assoc = &sc->assoc; if ((sc->flags & IWI_FLAG_ASSOCIATED) == 0) { DPRINTF(("Not associated\n")); return (-1); } IWI_STATE_BEGIN(sc, IWI_FW_DISASSOCIATING); if (quiet) assoc->type = IWI_HC_DISASSOC_QUIET; else assoc->type = IWI_HC_DISASSOC; DPRINTF(("Trying to disassociate from %6D channel %u\n", assoc->bssid, ":", assoc->chan)); return iwi_cmd(sc, IWI_CMD_ASSOCIATE, assoc, sizeof *assoc); } /* * release dma resources for the firmware */ static void iwi_release_fw_dma(struct iwi_softc *sc) { if (sc->fw_flags & IWI_FW_HAVE_PHY) bus_dmamap_unload(sc->fw_dmat, sc->fw_map); if (sc->fw_flags & IWI_FW_HAVE_MAP) bus_dmamem_free(sc->fw_dmat, sc->fw_virtaddr, sc->fw_map); if (sc->fw_flags & IWI_FW_HAVE_DMAT) bus_dma_tag_destroy(sc->fw_dmat); sc->fw_flags = 0; sc->fw_dma_size = 0; sc->fw_dmat = NULL; sc->fw_map = NULL; sc->fw_physaddr = 0; sc->fw_virtaddr = NULL; } /* * allocate the dma descriptor for the firmware. * Return 0 on success, 1 on error. * Must be called unlocked, protected by IWI_FLAG_FW_LOADING. */ static int iwi_init_fw_dma(struct iwi_softc *sc, int size) { if (sc->fw_dma_size >= size) return 0; if (bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1, size, 0, NULL, NULL, &sc->fw_dmat) != 0) { device_printf(sc->sc_dev, "could not create firmware DMA tag\n"); goto error; } sc->fw_flags |= IWI_FW_HAVE_DMAT; if (bus_dmamem_alloc(sc->fw_dmat, &sc->fw_virtaddr, 0, &sc->fw_map) != 0) { device_printf(sc->sc_dev, "could not allocate firmware DMA memory\n"); goto error; } sc->fw_flags |= IWI_FW_HAVE_MAP; if (bus_dmamap_load(sc->fw_dmat, sc->fw_map, sc->fw_virtaddr, size, iwi_dma_map_addr, &sc->fw_physaddr, 0) != 0) { device_printf(sc->sc_dev, "could not load firmware DMA map\n"); goto error; } sc->fw_flags |= IWI_FW_HAVE_PHY; sc->fw_dma_size = size; return 0; error: iwi_release_fw_dma(sc); return 1; } static void iwi_init_locked(struct iwi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct iwi_rx_data *data; int i; IWI_LOCK_ASSERT(sc); if (sc->fw_state == IWI_FW_LOADING) { device_printf(sc->sc_dev, "%s: already loading\n", __func__); return; /* XXX: condvar? */ } iwi_stop_locked(sc); IWI_STATE_BEGIN(sc, IWI_FW_LOADING); taskqueue_unblock(sc->sc_tq); taskqueue_unblock(sc->sc_tq2); if (iwi_reset(sc) != 0) { device_printf(sc->sc_dev, "could not reset adapter\n"); goto fail; } if (iwi_load_firmware(sc, &sc->fw_boot) != 0) { device_printf(sc->sc_dev, "could not load boot firmware %s\n", sc->fw_boot.name); goto fail; } if (iwi_load_ucode(sc, &sc->fw_uc) != 0) { device_printf(sc->sc_dev, "could not load microcode %s\n", sc->fw_uc.name); goto fail; } iwi_stop_master(sc); CSR_WRITE_4(sc, IWI_CSR_CMD_BASE, sc->cmdq.physaddr); CSR_WRITE_4(sc, IWI_CSR_CMD_SIZE, sc->cmdq.count); CSR_WRITE_4(sc, IWI_CSR_CMD_WIDX, sc->cmdq.cur); CSR_WRITE_4(sc, IWI_CSR_TX1_BASE, sc->txq[0].physaddr); CSR_WRITE_4(sc, IWI_CSR_TX1_SIZE, sc->txq[0].count); CSR_WRITE_4(sc, IWI_CSR_TX1_WIDX, sc->txq[0].cur); CSR_WRITE_4(sc, IWI_CSR_TX2_BASE, sc->txq[1].physaddr); CSR_WRITE_4(sc, IWI_CSR_TX2_SIZE, sc->txq[1].count); CSR_WRITE_4(sc, IWI_CSR_TX2_WIDX, sc->txq[1].cur); CSR_WRITE_4(sc, IWI_CSR_TX3_BASE, sc->txq[2].physaddr); CSR_WRITE_4(sc, IWI_CSR_TX3_SIZE, sc->txq[2].count); CSR_WRITE_4(sc, IWI_CSR_TX3_WIDX, sc->txq[2].cur); CSR_WRITE_4(sc, IWI_CSR_TX4_BASE, sc->txq[3].physaddr); CSR_WRITE_4(sc, IWI_CSR_TX4_SIZE, sc->txq[3].count); CSR_WRITE_4(sc, IWI_CSR_TX4_WIDX, sc->txq[3].cur); for (i = 0; i < sc->rxq.count; i++) { data = &sc->rxq.data[i]; CSR_WRITE_4(sc, data->reg, data->physaddr); } CSR_WRITE_4(sc, IWI_CSR_RX_WIDX, sc->rxq.count - 1); if (iwi_load_firmware(sc, &sc->fw_fw) != 0) { device_printf(sc->sc_dev, "could not load main firmware %s\n", sc->fw_fw.name); goto fail; } sc->flags |= IWI_FLAG_FW_INITED; IWI_STATE_END(sc, IWI_FW_LOADING); if (iwi_config(sc) != 0) { device_printf(sc->sc_dev, "unable to enable adapter\n"); goto fail2; } callout_reset(&sc->sc_wdtimer, hz, iwi_watchdog, sc); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; return; fail: IWI_STATE_END(sc, IWI_FW_LOADING); fail2: iwi_stop_locked(sc); } static void iwi_init(void *priv) { struct iwi_softc *sc = priv; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; IWI_LOCK_DECL; IWI_LOCK(sc); iwi_init_locked(sc); IWI_UNLOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ieee80211_start_all(ic); } static void iwi_stop_locked(void *priv) { struct iwi_softc *sc = priv; struct ifnet *ifp = sc->sc_ifp; IWI_LOCK_ASSERT(sc); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); taskqueue_block(sc->sc_tq); taskqueue_block(sc->sc_tq2); if (sc->sc_softled) { callout_stop(&sc->sc_ledtimer); sc->sc_blinking = 0; } callout_stop(&sc->sc_wdtimer); callout_stop(&sc->sc_rftimer); iwi_stop_master(sc); CSR_WRITE_4(sc, IWI_CSR_RST, IWI_RST_SOFT_RESET); /* reset rings */ iwi_reset_cmd_ring(sc, &sc->cmdq); iwi_reset_tx_ring(sc, &sc->txq[0]); iwi_reset_tx_ring(sc, &sc->txq[1]); iwi_reset_tx_ring(sc, &sc->txq[2]); iwi_reset_tx_ring(sc, &sc->txq[3]); iwi_reset_rx_ring(sc, &sc->rxq); memset(sc->sc_cmd, 0, sizeof(sc->sc_cmd)); sc->sc_tx_timer = 0; sc->sc_state_timer = 0; sc->sc_busy_timer = 0; sc->flags &= ~(IWI_FLAG_BUSY | IWI_FLAG_ASSOCIATED); sc->fw_state = IWI_FW_IDLE; wakeup(sc); } static void iwi_stop(struct iwi_softc *sc) { IWI_LOCK_DECL; IWI_LOCK(sc); iwi_stop_locked(sc); IWI_UNLOCK(sc); } static void iwi_restart(void *arg, int npending) { struct iwi_softc *sc = arg; iwi_init(sc); } /* * Return whether or not the radio is enabled in hardware * (i.e. the rfkill switch is "off"). */ static int iwi_getrfkill(struct iwi_softc *sc) { return (CSR_READ_4(sc, IWI_CSR_IO) & IWI_IO_RADIO_ENABLED) == 0; } static void iwi_radio_on(void *arg, int pending) { struct iwi_softc *sc = arg; struct ieee80211com *ic = sc->sc_ifp->if_l2com; device_printf(sc->sc_dev, "radio turned on\n"); iwi_init(sc); ieee80211_notify_radio(ic, 1); } static void iwi_rfkill_poll(void *arg) { struct iwi_softc *sc = arg; IWI_LOCK_ASSERT(sc); /* * Check for a change in rfkill state. We get an * interrupt when a radio is disabled but not when * it is enabled so we must poll for the latter. */ if (!iwi_getrfkill(sc)) { taskqueue_unblock(sc->sc_tq); taskqueue_enqueue(sc->sc_tq, &sc->sc_radiontask); return; } callout_reset(&sc->sc_rftimer, 2*hz, iwi_rfkill_poll, sc); } static void iwi_radio_off(void *arg, int pending) { struct iwi_softc *sc = arg; struct ieee80211com *ic = sc->sc_ifp->if_l2com; IWI_LOCK_DECL; device_printf(sc->sc_dev, "radio turned off\n"); ieee80211_notify_radio(ic, 0); IWI_LOCK(sc); iwi_stop_locked(sc); iwi_rfkill_poll(sc); IWI_UNLOCK(sc); } static int iwi_sysctl_stats(SYSCTL_HANDLER_ARGS) { struct iwi_softc *sc = arg1; uint32_t size, buf[128]; memset(buf, 0, sizeof buf); if (!(sc->flags & IWI_FLAG_FW_INITED)) return SYSCTL_OUT(req, buf, sizeof buf); size = min(CSR_READ_4(sc, IWI_CSR_TABLE0_SIZE), 128 - 1); CSR_READ_REGION_4(sc, IWI_CSR_TABLE0_BASE, &buf[1], size); return SYSCTL_OUT(req, buf, size); } static int iwi_sysctl_radio(SYSCTL_HANDLER_ARGS) { struct iwi_softc *sc = arg1; int val = !iwi_getrfkill(sc); return SYSCTL_OUT(req, &val, sizeof val); } /* * Add sysctl knobs. */ static void iwi_sysctlattach(struct iwi_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "radio", CTLTYPE_INT | CTLFLAG_RD, sc, 0, iwi_sysctl_radio, "I", "radio transmitter switch state (0=off, 1=on)"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "stats", CTLTYPE_OPAQUE | CTLFLAG_RD, sc, 0, iwi_sysctl_stats, "S", "statistics"); sc->bluetooth = 0; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "bluetooth", CTLFLAG_RW, &sc->bluetooth, 0, "bluetooth coexistence"); sc->antenna = IWI_ANTENNA_AUTO; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "antenna", CTLFLAG_RW, &sc->antenna, 0, "antenna (0=auto)"); } /* * LED support. * * Different cards have different capabilities. Some have three * led's while others have only one. The linux ipw driver defines * led's for link state (associated or not), band (11a, 11g, 11b), * and for link activity. We use one led and vary the blink rate * according to the tx/rx traffic a la the ath driver. */ static __inline uint32_t iwi_toggle_event(uint32_t r) { return r &~ (IWI_RST_STANDBY | IWI_RST_GATE_ODMA | IWI_RST_GATE_IDMA | IWI_RST_GATE_ADMA); } static uint32_t iwi_read_event(struct iwi_softc *sc) { return MEM_READ_4(sc, IWI_MEM_EEPROM_EVENT); } static void iwi_write_event(struct iwi_softc *sc, uint32_t v) { MEM_WRITE_4(sc, IWI_MEM_EEPROM_EVENT, v); } static void iwi_led_done(void *arg) { struct iwi_softc *sc = arg; sc->sc_blinking = 0; } /* * Turn the activity LED off: flip the pin and then set a timer so no * update will happen for the specified duration. */ static void iwi_led_off(void *arg) { struct iwi_softc *sc = arg; uint32_t v; v = iwi_read_event(sc); v &= ~sc->sc_ledpin; iwi_write_event(sc, iwi_toggle_event(v)); callout_reset(&sc->sc_ledtimer, sc->sc_ledoff, iwi_led_done, sc); } /* * Blink the LED according to the specified on/off times. */ static void iwi_led_blink(struct iwi_softc *sc, int on, int off) { uint32_t v; v = iwi_read_event(sc); v |= sc->sc_ledpin; iwi_write_event(sc, iwi_toggle_event(v)); sc->sc_blinking = 1; sc->sc_ledoff = off; callout_reset(&sc->sc_ledtimer, on, iwi_led_off, sc); } static void iwi_led_event(struct iwi_softc *sc, int event) { #define N(a) (sizeof(a)/sizeof(a[0])) /* NB: on/off times from the Atheros NDIS driver, w/ permission */ static const struct { u_int rate; /* tx/rx iwi rate */ u_int16_t timeOn; /* LED on time (ms) */ u_int16_t timeOff; /* LED off time (ms) */ } blinkrates[] = { { IWI_RATE_OFDM54, 40, 10 }, { IWI_RATE_OFDM48, 44, 11 }, { IWI_RATE_OFDM36, 50, 13 }, { IWI_RATE_OFDM24, 57, 14 }, { IWI_RATE_OFDM18, 67, 16 }, { IWI_RATE_OFDM12, 80, 20 }, { IWI_RATE_DS11, 100, 25 }, { IWI_RATE_OFDM9, 133, 34 }, { IWI_RATE_OFDM6, 160, 40 }, { IWI_RATE_DS5, 200, 50 }, { 6, 240, 58 }, /* XXX 3Mb/s if it existed */ { IWI_RATE_DS2, 267, 66 }, { IWI_RATE_DS1, 400, 100 }, { 0, 500, 130 }, /* unknown rate/polling */ }; uint32_t txrate; int j = 0; /* XXX silence compiler */ sc->sc_ledevent = ticks; /* time of last event */ if (sc->sc_blinking) /* don't interrupt active blink */ return; switch (event) { case IWI_LED_POLL: j = N(blinkrates)-1; break; case IWI_LED_TX: /* read current transmission rate from adapter */ txrate = CSR_READ_4(sc, IWI_CSR_CURRENT_TX_RATE); if (blinkrates[sc->sc_txrix].rate != txrate) { for (j = 0; j < N(blinkrates)-1; j++) if (blinkrates[j].rate == txrate) break; sc->sc_txrix = j; } else j = sc->sc_txrix; break; case IWI_LED_RX: if (blinkrates[sc->sc_rxrix].rate != sc->sc_rxrate) { for (j = 0; j < N(blinkrates)-1; j++) if (blinkrates[j].rate == sc->sc_rxrate) break; sc->sc_rxrix = j; } else j = sc->sc_rxrix; break; } /* XXX beware of overflow */ iwi_led_blink(sc, (blinkrates[j].timeOn * hz) / 1000, (blinkrates[j].timeOff * hz) / 1000); #undef N } static int iwi_sysctl_softled(SYSCTL_HANDLER_ARGS) { struct iwi_softc *sc = arg1; int softled = sc->sc_softled; int error; error = sysctl_handle_int(oidp, &softled, 0, req); if (error || !req->newptr) return error; softled = (softled != 0); if (softled != sc->sc_softled) { if (softled) { uint32_t v = iwi_read_event(sc); v &= ~sc->sc_ledpin; iwi_write_event(sc, iwi_toggle_event(v)); } sc->sc_softled = softled; } return 0; } static void iwi_ledattach(struct iwi_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); sc->sc_blinking = 0; sc->sc_ledstate = 1; sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ callout_init_mtx(&sc->sc_ledtimer, &sc->sc_mtx, 0); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "softled", CTLTYPE_INT | CTLFLAG_RW, sc, 0, iwi_sysctl_softled, "I", "enable/disable software LED support"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "ledpin", CTLFLAG_RW, &sc->sc_ledpin, 0, "pin setting to turn activity LED on"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "ledidle", CTLFLAG_RW, &sc->sc_ledidle, 0, "idle time for inactivity LED (ticks)"); /* XXX for debugging */ SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "nictype", CTLFLAG_RD, &sc->sc_nictype, 0, "NIC type from EEPROM"); sc->sc_ledpin = IWI_RST_LED_ACTIVITY; sc->sc_softled = 1; sc->sc_nictype = (iwi_read_prom_word(sc, IWI_EEPROM_NIC) >> 8) & 0xff; if (sc->sc_nictype == 1) { /* * NB: led's are reversed. */ sc->sc_ledpin = IWI_RST_LED_ASSOCIATED; } } static void iwi_ops(void *arg0, int npending) { static const char *opnames[] = { [IWI_CMD_FREE] = "FREE", [IWI_SCAN_START] = "SCAN_START", [IWI_SET_CHANNEL] = "SET_CHANNEL", [IWI_AUTH] = "AUTH", [IWI_ASSOC] = "ASSOC", [IWI_DISASSOC] = "DISASSOC", [IWI_SCAN_CURCHAN] = "SCAN_CURCHAN", [IWI_SCAN_ALLCHAN] = "SCAN_ALLCHAN", [IWI_SET_WME] = "SET_WME", }; struct iwi_softc *sc = arg0; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); IWI_LOCK_DECL; int cmd; unsigned long arg; again: IWI_CMD_LOCK(sc); cmd = sc->sc_cmd[sc->sc_cmd_cur]; if (cmd == IWI_CMD_FREE) { /* No more commands to process */ IWI_CMD_UNLOCK(sc); return; } arg = sc->sc_arg[sc->sc_cmd_cur]; sc->sc_cmd[sc->sc_cmd_cur] = IWI_CMD_FREE; /* free the slot */ sc->sc_cmd_cur = (sc->sc_cmd_cur + 1) % IWI_CMD_MAXOPS; IWI_CMD_UNLOCK(sc); IWI_LOCK(sc); while (sc->fw_state != IWI_FW_IDLE || (sc->flags & IWI_FLAG_BUSY)) { msleep(sc, &sc->sc_mtx, 0, "iwicmd", hz/10); } if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { IWI_UNLOCK(sc); return; } DPRINTF(("%s: %s arg %lu\n", __func__, opnames[cmd], arg)); switch (cmd) { case IWI_AUTH: case IWI_ASSOC: if (cmd == IWI_AUTH) vap->iv_state = IEEE80211_S_AUTH; else vap->iv_state = IEEE80211_S_ASSOC; iwi_auth_and_assoc(sc, vap); /* NB: completion done in iwi_notification_intr */ break; case IWI_DISASSOC: iwi_disassociate(sc, 0); break; case IWI_SET_WME: if (vap->iv_state == IEEE80211_S_RUN) (void) iwi_wme_setparams(sc, ic); break; case IWI_SCAN_START: sc->flags |= IWI_FLAG_CHANNEL_SCAN; break; case IWI_SCAN_CURCHAN: case IWI_SCAN_ALLCHAN: if (!(sc->flags & IWI_FLAG_CHANNEL_SCAN)) { DPRINTF(("%s: ic_scan_curchan while not scanning\n", __func__)); goto done; } if (iwi_scanchan(sc, arg, cmd)) ieee80211_cancel_scan(vap); break; } done: IWI_UNLOCK(sc); /* Take another pass */ goto again; } static int iwi_queue_cmd(struct iwi_softc *sc, int cmd, unsigned long arg) { IWI_CMD_LOCK(sc); if (sc->sc_cmd[sc->sc_cmd_next] != 0) { IWI_CMD_UNLOCK(sc); DPRINTF(("%s: command %d dropped\n", __func__, cmd)); return (EBUSY); } sc->sc_cmd[sc->sc_cmd_next] = cmd; sc->sc_arg[sc->sc_cmd_next] = arg; sc->sc_cmd_next = (sc->sc_cmd_next + 1) % IWI_CMD_MAXOPS; taskqueue_enqueue(sc->sc_tq, &sc->sc_opstask); IWI_CMD_UNLOCK(sc); return (0); } static void iwi_scan_start(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct iwi_softc *sc = ifp->if_softc; iwi_queue_cmd(sc, IWI_SCAN_START, 0); } static void iwi_set_channel(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct iwi_softc *sc = ifp->if_softc; if (sc->fw_state == IWI_FW_IDLE) iwi_setcurchan(sc, ic->ic_curchan->ic_ieee); } static void iwi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) { struct ieee80211vap *vap = ss->ss_vap; struct ifnet *ifp = vap->iv_ic->ic_ifp; struct iwi_softc *sc = ifp->if_softc; iwi_queue_cmd(sc, IWI_SCAN_CURCHAN, maxdwell); } #if 0 static void iwi_scan_allchan(struct ieee80211com *ic, unsigned long maxdwell) { struct ifnet *ifp = ic->ic_ifp; struct iwi_softc *sc = ifp->if_softc; iwi_queue_cmd(sc, IWI_SCAN_ALLCHAN, maxdwell); } #endif static void iwi_scan_mindwell(struct ieee80211_scan_state *ss) { /* NB: don't try to abort scan; wait for firmware to finish */ } static void iwi_scan_end(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct iwi_softc *sc = ifp->if_softc; taskqueue_enqueue(sc->sc_tq2, &sc->sc_scanaborttask); } diff --git a/sys/dev/iwn/if_iwn.c b/sys/dev/iwn/if_iwn.c index ee7bda581596..332c2ce6b043 100644 --- a/sys/dev/iwn/if_iwn.c +++ b/sys/dev/iwn/if_iwn.c @@ -1,4630 +1,4631 @@ /*- * Copyright (c) 2007 * Damien Bergamini * Copyright (c) 2008 * Benjamin Close * Copyright (c) 2008 Sam Leffler, Errno Consulting * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* * Driver for Intel Wireless WiFi Link 4965AGN 802.11 network adapters. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int iwn_probe(device_t); static int iwn_attach(device_t); static int iwn_detach(device_t); static int iwn_cleanup(device_t); static struct ieee80211vap *iwn_vap_create(struct ieee80211com *, const char name[IFNAMSIZ], int unit, int opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]); static void iwn_vap_delete(struct ieee80211vap *); static int iwn_shutdown(device_t); static int iwn_suspend(device_t); static int iwn_resume(device_t); static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *, void **, bus_size_t, bus_size_t, int); static void iwn_dma_contig_free(struct iwn_dma_info *); int iwn_alloc_shared(struct iwn_softc *); void iwn_free_shared(struct iwn_softc *); int iwn_alloc_kw(struct iwn_softc *); void iwn_free_kw(struct iwn_softc *); int iwn_alloc_fwmem(struct iwn_softc *); void iwn_free_fwmem(struct iwn_softc *); struct iwn_rbuf *iwn_alloc_rbuf(struct iwn_softc *); void iwn_free_rbuf(void *, void *); int iwn_alloc_rpool(struct iwn_softc *); void iwn_free_rpool(struct iwn_softc *); int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, int); void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); -struct ieee80211_node *iwn_node_alloc(struct ieee80211_node_table *); +static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *, + const uint8_t [IEEE80211_ADDR_LEN]); void iwn_newassoc(struct ieee80211_node *, int); int iwn_media_change(struct ifnet *); int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); void iwn_mem_lock(struct iwn_softc *); void iwn_mem_unlock(struct iwn_softc *); uint32_t iwn_mem_read(struct iwn_softc *, uint32_t); void iwn_mem_write(struct iwn_softc *, uint32_t, uint32_t); void iwn_mem_write_region_4(struct iwn_softc *, uint32_t, const uint32_t *, int); int iwn_eeprom_lock(struct iwn_softc *); void iwn_eeprom_unlock(struct iwn_softc *); int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); int iwn_transfer_microcode(struct iwn_softc *, const uint8_t *, int); int iwn_transfer_firmware(struct iwn_softc *); int iwn_load_firmware(struct iwn_softc *); void iwn_unload_firmware(struct iwn_softc *); static void iwn_timer_timeout(void *); static void iwn_calib_reset(struct iwn_softc *); void iwn_ampdu_rx_start(struct iwn_softc *, struct iwn_rx_desc *); void iwn_rx_intr(struct iwn_softc *, struct iwn_rx_desc *, struct iwn_rx_data *); void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *); void iwn_tx_intr(struct iwn_softc *, struct iwn_rx_desc *); void iwn_cmd_intr(struct iwn_softc *, struct iwn_rx_desc *); static void iwn_bmiss(void *, int); void iwn_notif_intr(struct iwn_softc *); void iwn_intr(void *); void iwn_read_eeprom(struct iwn_softc *); static void iwn_read_eeprom_channels(struct iwn_softc *); void iwn_print_power_group(struct iwn_softc *, int); uint8_t iwn_plcp_signal(int); int iwn_tx_data(struct iwn_softc *, struct mbuf *, struct ieee80211_node *, struct iwn_tx_ring *); void iwn_start(struct ifnet *); void iwn_start_locked(struct ifnet *); static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void iwn_watchdog(struct iwn_softc *); int iwn_ioctl(struct ifnet *, u_long, caddr_t); int iwn_cmd(struct iwn_softc *, int, const void *, int, int); int iwn_set_link_quality(struct iwn_softc *, uint8_t, const struct ieee80211_channel *, int); int iwn_set_key(struct ieee80211com *, struct ieee80211_node *, const struct ieee80211_key *); int iwn_wme_update(struct ieee80211com *); void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); int iwn_set_critical_temp(struct iwn_softc *); void iwn_enable_tsf(struct iwn_softc *, struct ieee80211_node *); void iwn_power_calibration(struct iwn_softc *, int); int iwn_set_txpower(struct iwn_softc *, struct ieee80211_channel *, int); int8_t iwn_get_rssi(struct iwn_softc *, const struct iwn_rx_stat *); int iwn_get_noise(const struct iwn_rx_general_stats *); int iwn_get_temperature(struct iwn_softc *); int iwn_init_sensitivity(struct iwn_softc *); void iwn_compute_differential_gain(struct iwn_softc *, const struct iwn_rx_general_stats *); void iwn_tune_sensitivity(struct iwn_softc *, const struct iwn_rx_stats *); int iwn_send_sensitivity(struct iwn_softc *); int iwn_auth(struct iwn_softc *); int iwn_run(struct iwn_softc *); int iwn_scan(struct iwn_softc *); int iwn_config(struct iwn_softc *); void iwn_post_alive(struct iwn_softc *); void iwn_stop_master(struct iwn_softc *); int iwn_reset(struct iwn_softc *); void iwn_hw_config(struct iwn_softc *); void iwn_init_locked(struct iwn_softc *); void iwn_init(void *); void iwn_stop_locked(struct iwn_softc *); void iwn_stop(struct iwn_softc *); static void iwn_scan_start(struct ieee80211com *); static void iwn_scan_end(struct ieee80211com *); static void iwn_set_channel(struct ieee80211com *); static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long); static void iwn_scan_mindwell(struct ieee80211_scan_state *); static void iwn_ops(void *, int); static int iwn_queue_cmd( struct iwn_softc *, int, int, int); static void iwn_bpfattach(struct iwn_softc *); static void iwn_sysctlattach(struct iwn_softc *); #define IWN_DEBUG #ifdef IWN_DEBUG enum { IWN_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ IWN_DEBUG_RECV = 0x00000002, /* basic recv operation */ IWN_DEBUG_STATE = 0x00000004, /* 802.11 state transitions */ IWN_DEBUG_TXPOW = 0x00000008, /* tx power processing */ IWN_DEBUG_RESET = 0x00000010, /* reset processing */ IWN_DEBUG_OPS = 0x00000020, /* iwn_ops processing */ IWN_DEBUG_BEACON = 0x00000040, /* beacon handling */ IWN_DEBUG_WATCHDOG = 0x00000080, /* watchdog timeout */ IWN_DEBUG_INTR = 0x00000100, /* ISR */ IWN_DEBUG_CALIBRATE = 0x00000200, /* periodic calibration */ IWN_DEBUG_NODE = 0x00000400, /* node management */ IWN_DEBUG_LED = 0x00000800, /* led management */ IWN_DEBUG_CMD = 0x00001000, /* cmd submission */ IWN_DEBUG_FATAL = 0x80000000, /* fatal errors */ IWN_DEBUG_ANY = 0xffffffff }; #define DPRINTF(sc, m, fmt, ...) do { \ if (sc->sc_debug & (m)) \ printf(fmt, __VA_ARGS__); \ } while (0) static const char *iwn_ops_str(int); static const char *iwn_intr_str(uint8_t); #else #define DPRINTF(sc, m, fmt, ...) do { (void) sc; } while (0) #endif struct iwn_ident { uint16_t vendor; uint16_t device; const char *name; }; static const struct iwn_ident iwn_ident_table [] = { { 0x8086, 0x4229, "Intel(R) PRO/Wireless 4965BGN" }, { 0x8086, 0x422D, "Intel(R) PRO/Wireless 4965BGN" }, { 0x8086, 0x4230, "Intel(R) PRO/Wireless 4965BGN" }, { 0x8086, 0x4233, "Intel(R) PRO/Wireless 4965BGN" }, { 0, 0, NULL } }; static int iwn_probe(device_t dev) { const struct iwn_ident *ident; for (ident = iwn_ident_table; ident->name != NULL; ident++) { if (pci_get_vendor(dev) == ident->vendor && pci_get_device(dev) == ident->device) { device_set_desc(dev, ident->name); return 0; } } return ENXIO; } static int iwn_attach(device_t dev) { struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev); struct ieee80211com *ic; struct ifnet *ifp; int i, error; sc->sc_dev = dev; /* XXX */ if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { device_printf(dev, "chip is in D%d power mode " "-- setting to D0\n", pci_get_powerstate(dev)); pci_set_powerstate(dev, PCI_POWERSTATE_D0); } /* clear device specific PCI configuration register 0x41 */ pci_write_config(dev, 0x41, 0, 1); /* enable bus-mastering */ pci_enable_busmaster(dev); sc->mem_rid= PCIR_BAR(0); sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE); if (sc->mem == NULL ) { device_printf(dev, "could not allocate memory resources\n"); error = ENOMEM; return error; } sc->sc_st = rman_get_bustag(sc->mem); sc->sc_sh = rman_get_bushandle(sc->mem); sc->irq_rid = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE | RF_SHAREABLE); if (sc->irq == NULL) { device_printf(dev, "could not allocate interrupt resource\n"); error = ENOMEM; return error; } IWN_LOCK_INIT(sc); IWN_CMD_LOCK_INIT(sc); callout_init_mtx(&sc->sc_timer_to, &sc->sc_mtx, 0); /* * Create the taskqueues used by the driver. Primarily * sc_tq handles most the task */ sc->sc_tq = taskqueue_create("iwn_taskq", M_NOWAIT | M_ZERO, taskqueue_thread_enqueue, &sc->sc_tq); taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(dev)); TASK_INIT(&sc->sc_ops_task, 0, iwn_ops, sc ); TASK_INIT(&sc->sc_bmiss_task, 0, iwn_bmiss, sc); /* * Put adapter into a known state. */ error = iwn_reset(sc); if (error != 0) { device_printf(dev, "could not reset adapter, error %d\n", error); goto fail; } /* * Allocate DMA memory for firmware transfers. */ error = iwn_alloc_fwmem(sc); if (error != 0) { device_printf(dev, "could not allocate firmware memory, error %d\n", error); goto fail; } /* * Allocate a "keep warm" page. */ error = iwn_alloc_kw(sc); if (error != 0) { device_printf(dev, "could not allocate keep-warm page, error %d\n", error); goto fail; } /* * Allocate shared area (communication area). */ error = iwn_alloc_shared(sc); if (error != 0) { device_printf(dev, "could not allocate shared area, error %d\n", error); goto fail; } /* * Allocate Tx rings. */ for (i = 0; i < IWN_NTXQUEUES; i++) { error = iwn_alloc_tx_ring(sc, &sc->txq[i], i); if (error != 0) { device_printf(dev, "could not allocate Tx ring %d, error %d\n", i, error); goto fail; } } error = iwn_alloc_rx_ring(sc, &sc->rxq); if (error != 0 ){ device_printf(dev, "could not allocate Rx ring, error %d\n", error); goto fail; } ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(dev, "can not allocate ifnet structure\n"); goto fail; } ic = ifp->if_l2com; ic->ic_ifp = ifp; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_MONITOR /* monitor mode supported */ | IEEE80211_C_TXPMGT /* tx power management */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WPA | IEEE80211_C_SHPREAMBLE /* short preamble supported */ #if 0 | IEEE80211_C_BGSCAN /* background scanning */ | IEEE80211_C_IBSS /* ibss/adhoc mode */ #endif | IEEE80211_C_WME /* WME */ ; #if 0 /* XXX disable until HT channel setup works */ ic->ic_htcaps = IEEE80211_HTCAP_SMPS_ENA /* SM PS mode enabled */ | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width */ | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ | IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */ | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ /* s/w capabilities */ | IEEE80211_HTC_HT /* HT operation */ | IEEE80211_HTC_AMPDU /* tx A-MPDU */ | IEEE80211_HTC_AMSDU /* tx A-MSDU */ ; #endif /* read supported channels and MAC address from EEPROM */ iwn_read_eeprom(sc); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = iwn_init; ifp->if_ioctl = iwn_ioctl; ifp->if_start = iwn_start; IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); ieee80211_ifattach(ic); ic->ic_vap_create = iwn_vap_create; ic->ic_vap_delete = iwn_vap_delete; ic->ic_raw_xmit = iwn_raw_xmit; ic->ic_node_alloc = iwn_node_alloc; ic->ic_newassoc = iwn_newassoc; ic->ic_wme.wme_update = iwn_wme_update; ic->ic_scan_start = iwn_scan_start; ic->ic_scan_end = iwn_scan_end; ic->ic_set_channel = iwn_set_channel; ic->ic_scan_curchan = iwn_scan_curchan; ic->ic_scan_mindwell = iwn_scan_mindwell; iwn_bpfattach(sc); iwn_sysctlattach(sc); /* * Hook our interrupt after all initialization is complete. */ error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, iwn_intr, sc, &sc->sc_ih); if (error != 0) { device_printf(dev, "could not set up interrupt, error %d\n", error); goto fail; } ieee80211_announce(ic); return 0; fail: iwn_cleanup(dev); return error; } static int iwn_detach(device_t dev) { iwn_cleanup(dev); return 0; } /* * Cleanup any device resources that were allocated */ int iwn_cleanup(device_t dev) { struct iwn_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; int i; if (ifp != NULL) { iwn_stop(sc); callout_drain(&sc->sc_timer_to); bpfdetach(ifp); ieee80211_ifdetach(ic); } iwn_unload_firmware(sc); iwn_free_rx_ring(sc, &sc->rxq); for (i = 0; i < IWN_NTXQUEUES; i++) iwn_free_tx_ring(sc, &sc->txq[i]); iwn_free_kw(sc); iwn_free_fwmem(sc); if (sc->irq != NULL) { bus_teardown_intr(dev, sc->irq, sc->sc_ih); bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); } if (sc->mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem); if (ifp != NULL) if_free(ifp); taskqueue_free(sc->sc_tq); IWN_CMD_LOCK_DESTROY(sc); IWN_LOCK_DESTROY(sc); return 0; } static struct ieee80211vap * iwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, int opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct iwn_vap *ivp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; ivp = (struct iwn_vap *) malloc(sizeof(struct iwn_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (ivp == NULL) return NULL; vap = &ivp->iv_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); vap->iv_bmissthreshold = 10; /* override default */ /* override with driver methods */ ivp->iv_newstate = vap->iv_newstate; vap->iv_newstate = iwn_newstate; ieee80211_amrr_init(&ivp->iv_amrr, vap, IEEE80211_AMRR_MIN_SUCCESS_THRESHOLD, IEEE80211_AMRR_MAX_SUCCESS_THRESHOLD, 500 /*ms*/); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status); ic->ic_opmode = opmode; return vap; } static void iwn_vap_delete(struct ieee80211vap *vap) { struct iwn_vap *ivp = IWN_VAP(vap); ieee80211_amrr_cleanup(&ivp->iv_amrr); ieee80211_vap_detach(vap); free(ivp, M_80211_VAP); } static int iwn_shutdown(device_t dev) { struct iwn_softc *sc = device_get_softc(dev); iwn_stop(sc); return 0; } static int iwn_suspend(device_t dev) { struct iwn_softc *sc = device_get_softc(dev); iwn_stop(sc); return 0; } static int iwn_resume(device_t dev) { struct iwn_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->sc_ifp; pci_write_config(dev, 0x41, 0, 1); if (ifp->if_flags & IFF_UP) iwn_init(sc); return 0; } static void iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { if (error != 0) return; KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); *(bus_addr_t *)arg = segs[0].ds_addr; } static int iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma, void **kvap, bus_size_t size, bus_size_t alignment, int flags) { int error, lalignment, i; /* * FreeBSD can't guarrenty 16k alignment at the moment (11/2007) so * we allocate an extra 12k with 4k alignement and walk through * it trying to find where the alignment is. It's a nasty fix for * a bigger problem. */ DPRINTF(sc, IWN_DEBUG_RESET, "Size: %zd - alignment %zd\n", size, alignment); if (alignment == 0x4000) { size += 12*1024; lalignment = 4096; DPRINTF(sc, IWN_DEBUG_RESET, "%s\n", "Attempting to find a 16k boundary"); } else lalignment = alignment; dma->size = size; dma->tag = NULL; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), lalignment, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1, size, flags, NULL, NULL, &dma->tag); if (error != 0) { device_printf(sc->sc_dev, "%s: bus_dma_tag_create failed, error %d\n", __func__, error); goto fail; } error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, flags | BUS_DMA_ZERO, &dma->map); if (error != 0) { device_printf(sc->sc_dev, "%s: bus_dmamem_alloc failed, error %d\n", __func__, error); goto fail; } if (alignment == 0x4000) { for (i = 0; i < 3 && (((uintptr_t)dma->vaddr) & 0x3fff); i++) { DPRINTF(sc, IWN_DEBUG_RESET, "%s\n", "Memory Unaligned, shifting pointer by 4k"); dma->vaddr += 4096; size -= 4096; } if ((((uintptr_t)dma->vaddr ) & (alignment-1))) { DPRINTF(sc, IWN_DEBUG_ANY, "%s: failed to align memory, vaddr %p, align %zd\n", __func__, dma->vaddr, alignment); error = ENOMEM; goto fail; } } error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, iwn_dma_map_addr, &dma->paddr, flags); if (error != 0) { device_printf(sc->sc_dev, "%s: bus_dmamap_load failed, error %d\n", __func__, error); goto fail; } if (kvap != NULL) *kvap = dma->vaddr; return 0; fail: iwn_dma_contig_free(dma); return error; } static void iwn_dma_contig_free(struct iwn_dma_info *dma) { if (dma->tag != NULL) { if (dma->map != NULL) { if (dma->paddr == 0) { bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dma->tag, dma->map); } bus_dmamem_free(dma->tag, &dma->vaddr, dma->map); } bus_dma_tag_destroy(dma->tag); } } int iwn_alloc_shared(struct iwn_softc *sc) { /* must be aligned on a 1KB boundary */ return iwn_dma_contig_alloc(sc, &sc->shared_dma, (void **)&sc->shared, sizeof (struct iwn_shared), 1024, BUS_DMA_NOWAIT); } void iwn_free_shared(struct iwn_softc *sc) { iwn_dma_contig_free(&sc->shared_dma); } int iwn_alloc_kw(struct iwn_softc *sc) { /* must be aligned on a 4k boundary */ return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, PAGE_SIZE, PAGE_SIZE, BUS_DMA_NOWAIT); } void iwn_free_kw(struct iwn_softc *sc) { iwn_dma_contig_free(&sc->kw_dma); } int iwn_alloc_fwmem(struct iwn_softc *sc) { /* allocate enough contiguous space to store text and data */ return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, IWN_FW_MAIN_TEXT_MAXSZ + IWN_FW_MAIN_DATA_MAXSZ, 16, BUS_DMA_NOWAIT); } void iwn_free_fwmem(struct iwn_softc *sc) { iwn_dma_contig_free(&sc->fw_dma); } int iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) { int i, error; ring->cur = 0; error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, IWN_RX_RING_COUNT * sizeof (uint32_t), IWN_RING_DMA_ALIGN, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "%s: could not allocate rx ring DMA memory, error %d\n", __func__, error); goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MJUMPAGESIZE, 1, MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "%s: bus_dma_tag_create_failed, error %d\n", __func__, error); goto fail; } /* * Setup Rx buffers. */ for (i = 0; i < IWN_RX_RING_COUNT; i++) { struct iwn_rx_data *data = &ring->data[i]; struct mbuf *m; bus_addr_t paddr; error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "%s: bus_dmamap_create failed, error %d\n", __func__, error); goto fail; } m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); if (m == NULL) { device_printf(sc->sc_dev, "%s: could not allocate rx mbuf\n", __func__); error = ENOMEM; goto fail; } /* map page */ error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m, caddr_t), MJUMPAGESIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "%s: bus_dmamap_load failed, error %d\n", __func__, error); m_freem(m); error = ENOMEM; /* XXX unique code */ goto fail; } bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); data->m = m; /* Rx buffers are aligned on a 256-byte boundary */ ring->desc[i] = htole32(paddr >> 8); } bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); return 0; fail: iwn_free_rx_ring(sc, ring); return error; } void iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) { int ntries; iwn_mem_lock(sc); IWN_WRITE(sc, IWN_RX_CONFIG, 0); for (ntries = 0; ntries < 100; ntries++) { if (IWN_READ(sc, IWN_RX_STATUS) & IWN_RX_IDLE) break; DELAY(10); } #ifdef IWN_DEBUG if (ntries == 100) DPRINTF(sc, IWN_DEBUG_ANY, "%s\n", "timeout resetting Rx ring"); #endif iwn_mem_unlock(sc); ring->cur = 0; } void iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) { int i; iwn_dma_contig_free(&ring->desc_dma); for (i = 0; i < IWN_RX_RING_COUNT; i++) if (ring->data[i].m != NULL) m_freem(ring->data[i].m); } int iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) { bus_size_t size; int i, error; ring->qid = qid; ring->queued = 0; ring->cur = 0; size = IWN_TX_RING_COUNT * sizeof(struct iwn_tx_desc); error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, size, IWN_RING_DMA_ALIGN, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "%s: could not allocate tx ring DMA memory, error %d\n", __func__, error); goto fail; } size = IWN_TX_RING_COUNT * sizeof(struct iwn_tx_cmd); error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, size, 4, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "%s: could not allocate tx cmd DMA memory, error %d\n", __func__, error); goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, IWN_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "%s: bus_dma_tag_create_failed, error %d\n", __func__, error); goto fail; } for (i = 0; i < IWN_TX_RING_COUNT; i++) { struct iwn_tx_data *data = &ring->data[i]; error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "%s: bus_dmamap_create failed, error %d\n", __func__, error); goto fail; } bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); } return 0; fail: iwn_free_tx_ring(sc, ring); return error; } void iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) { uint32_t tmp; int i, ntries; iwn_mem_lock(sc); IWN_WRITE(sc, IWN_TX_CONFIG(ring->qid), 0); for (ntries = 0; ntries < 20; ntries++) { tmp = IWN_READ(sc, IWN_TX_STATUS); if ((tmp & IWN_TX_IDLE(ring->qid)) == IWN_TX_IDLE(ring->qid)) break; DELAY(10); } #ifdef IWN_DEBUG if (ntries == 20) DPRINTF(sc, IWN_DEBUG_RESET, "%s: timeout resetting Tx ring %d\n", __func__, ring->qid); #endif iwn_mem_unlock(sc); for (i = 0; i < IWN_TX_RING_COUNT; i++) { struct iwn_tx_data *data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } } ring->queued = 0; ring->cur = 0; } void iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) { int i; iwn_dma_contig_free(&ring->desc_dma); iwn_dma_contig_free(&ring->cmd_dma); if (ring->data != NULL) { for (i = 0; i < IWN_TX_RING_COUNT; i++) { struct iwn_tx_data *data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } } } } struct ieee80211_node * -iwn_node_alloc(struct ieee80211_node_table *ic) +iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { return malloc(sizeof (struct iwn_node), M_80211_NODE,M_NOWAIT | M_ZERO); } void iwn_newassoc(struct ieee80211_node *ni, int isnew) { struct ieee80211vap *vap = ni->ni_vap; ieee80211_amrr_node_init(&IWN_VAP(vap)->iv_amrr, &IWN_NODE(ni)->amn, ni); } int iwn_media_change(struct ifnet *ifp) { int error = ieee80211_media_change(ifp); /* NB: only the fixed rate can change and that doesn't need a reset */ return (error == ENETRESET ? 0 : error); } int iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct iwn_vap *ivp = IWN_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct iwn_softc *sc = ic->ic_ifp->if_softc; int error; DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); IWN_LOCK(sc); callout_stop(&sc->sc_timer_to); IWN_UNLOCK(sc); /* * Some state transitions require issuing a configure request * to the adapter. This must be done in a blocking context * so we toss control to the task q thread where the state * change will be finished after the command completes. */ if (nstate == IEEE80211_S_AUTH && vap->iv_state != IEEE80211_S_AUTH) { /* !AUTH -> AUTH requires adapter config */ error = iwn_queue_cmd(sc, IWN_AUTH, arg, IWN_QUEUE_NORMAL); return (error != 0 ? error : EINPROGRESS); } if (nstate == IEEE80211_S_RUN && vap->iv_state != IEEE80211_S_RUN) { /* * !RUN -> RUN requires setting the association id * which is done with a firmware cmd. We also defer * starting the timers until that work is done. */ error = iwn_queue_cmd(sc, IWN_RUN, arg, IWN_QUEUE_NORMAL); return (error != 0 ? error : EINPROGRESS); } if (nstate == IEEE80211_S_RUN) { /* * RUN -> RUN transition; just restart the timers. */ iwn_calib_reset(sc); } return ivp->iv_newstate(vap, nstate, arg); } /* * Grab exclusive access to NIC memory. */ void iwn_mem_lock(struct iwn_softc *sc) { uint32_t tmp; int ntries; tmp = IWN_READ(sc, IWN_GPIO_CTL); IWN_WRITE(sc, IWN_GPIO_CTL, tmp | IWN_GPIO_MAC); /* spin until we actually get the lock */ for (ntries = 0; ntries < 1000; ntries++) { if ((IWN_READ(sc, IWN_GPIO_CTL) & (IWN_GPIO_CLOCK | IWN_GPIO_SLEEP)) == IWN_GPIO_CLOCK) break; DELAY(10); } if (ntries == 1000) device_printf(sc->sc_dev, "%s: could not lock memory\n", __func__); } /* * Release lock on NIC memory. */ void iwn_mem_unlock(struct iwn_softc *sc) { uint32_t tmp = IWN_READ(sc, IWN_GPIO_CTL); IWN_WRITE(sc, IWN_GPIO_CTL, tmp & ~IWN_GPIO_MAC); } uint32_t iwn_mem_read(struct iwn_softc *sc, uint32_t addr) { IWN_WRITE(sc, IWN_READ_MEM_ADDR, IWN_MEM_4 | addr); return IWN_READ(sc, IWN_READ_MEM_DATA); } void iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) { IWN_WRITE(sc, IWN_WRITE_MEM_ADDR, IWN_MEM_4 | addr); IWN_WRITE(sc, IWN_WRITE_MEM_DATA, data); } void iwn_mem_write_region_4(struct iwn_softc *sc, uint32_t addr, const uint32_t *data, int wlen) { for (; wlen > 0; wlen--, data++, addr += 4) iwn_mem_write(sc, addr, *data); } int iwn_eeprom_lock(struct iwn_softc *sc) { uint32_t tmp; int ntries; tmp = IWN_READ(sc, IWN_HWCONFIG); IWN_WRITE(sc, IWN_HWCONFIG, tmp | IWN_HW_EEPROM_LOCKED); /* spin until we actually get the lock */ for (ntries = 0; ntries < 100; ntries++) { if (IWN_READ(sc, IWN_HWCONFIG) & IWN_HW_EEPROM_LOCKED) return 0; DELAY(10); } return ETIMEDOUT; } void iwn_eeprom_unlock(struct iwn_softc *sc) { uint32_t tmp = IWN_READ(sc, IWN_HWCONFIG); IWN_WRITE(sc, IWN_HWCONFIG, tmp & ~IWN_HW_EEPROM_LOCKED); } /* * Read `len' bytes from the EEPROM. We access the EEPROM through the MAC * instead of using the traditional bit-bang method. */ int iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int len) { uint8_t *out = data; uint32_t val; int ntries, tmp; iwn_mem_lock(sc); for (; len > 0; len -= 2, addr++) { IWN_WRITE(sc, IWN_EEPROM_CTL, addr << 2); tmp = IWN_READ(sc, IWN_EEPROM_CTL); IWN_WRITE(sc, IWN_EEPROM_CTL, tmp & ~IWN_EEPROM_MSK ); for (ntries = 0; ntries < 10; ntries++) { if ((val = IWN_READ(sc, IWN_EEPROM_CTL)) & IWN_EEPROM_READY) break; DELAY(5); } if (ntries == 10) { device_printf(sc->sc_dev,"could not read EEPROM\n"); return ETIMEDOUT; } *out++ = val >> 16; if (len > 1) *out++ = val >> 24; } iwn_mem_unlock(sc); return 0; } /* * The firmware boot code is small and is intended to be copied directly into * the NIC internal memory. */ int iwn_transfer_microcode(struct iwn_softc *sc, const uint8_t *ucode, int size) { int ntries; size /= sizeof (uint32_t); iwn_mem_lock(sc); /* copy microcode image into NIC memory */ iwn_mem_write_region_4(sc, IWN_MEM_UCODE_BASE, (const uint32_t *)ucode, size); iwn_mem_write(sc, IWN_MEM_UCODE_SRC, 0); iwn_mem_write(sc, IWN_MEM_UCODE_DST, IWN_FW_TEXT); iwn_mem_write(sc, IWN_MEM_UCODE_SIZE, size); /* run microcode */ iwn_mem_write(sc, IWN_MEM_UCODE_CTL, IWN_UC_RUN); /* wait for transfer to complete */ for (ntries = 0; ntries < 1000; ntries++) { if (!(iwn_mem_read(sc, IWN_MEM_UCODE_CTL) & IWN_UC_RUN)) break; DELAY(10); } if (ntries == 1000) { iwn_mem_unlock(sc); device_printf(sc->sc_dev, "%s: could not load boot firmware\n", __func__); return ETIMEDOUT; } iwn_mem_write(sc, IWN_MEM_UCODE_CTL, IWN_UC_ENABLE); iwn_mem_unlock(sc); return 0; } int iwn_load_firmware(struct iwn_softc *sc) { int error; KASSERT(sc->fw_fp == NULL, ("firmware already loaded")); IWN_UNLOCK(sc); /* load firmware image from disk */ sc->fw_fp = firmware_get("iwnfw"); if (sc->fw_fp == NULL) { device_printf(sc->sc_dev, "%s: could not load firmare image \"iwnfw\"\n", __func__); error = EINVAL; } else error = 0; IWN_LOCK(sc); return error; } int iwn_transfer_firmware(struct iwn_softc *sc) { struct iwn_dma_info *dma = &sc->fw_dma; const struct iwn_firmware_hdr *hdr; const uint8_t *init_text, *init_data, *main_text, *main_data; const uint8_t *boot_text; uint32_t init_textsz, init_datasz, main_textsz, main_datasz; uint32_t boot_textsz; int error = 0; const struct firmware *fp = sc->fw_fp; /* extract firmware header information */ if (fp->datasize < sizeof (struct iwn_firmware_hdr)) { device_printf(sc->sc_dev, "%s: truncated firmware header: %zu bytes, expecting %zu\n", __func__, fp->datasize, sizeof (struct iwn_firmware_hdr)); error = EINVAL; goto fail; } hdr = (const struct iwn_firmware_hdr *)fp->data; main_textsz = le32toh(hdr->main_textsz); main_datasz = le32toh(hdr->main_datasz); init_textsz = le32toh(hdr->init_textsz); init_datasz = le32toh(hdr->init_datasz); boot_textsz = le32toh(hdr->boot_textsz); /* sanity-check firmware segments sizes */ if (main_textsz > IWN_FW_MAIN_TEXT_MAXSZ || main_datasz > IWN_FW_MAIN_DATA_MAXSZ || init_textsz > IWN_FW_INIT_TEXT_MAXSZ || init_datasz > IWN_FW_INIT_DATA_MAXSZ || boot_textsz > IWN_FW_BOOT_TEXT_MAXSZ || (boot_textsz & 3) != 0) { device_printf(sc->sc_dev, "%s: invalid firmware header, main [%d,%d], init [%d,%d] " "boot %d\n", __func__, main_textsz, main_datasz, init_textsz, init_datasz, boot_textsz); error = EINVAL; goto fail; } /* check that all firmware segments are present */ if (fp->datasize < sizeof (struct iwn_firmware_hdr) + main_textsz + main_datasz + init_textsz + init_datasz + boot_textsz) { device_printf(sc->sc_dev, "%s: firmware file too short: " "%zu bytes, main [%d, %d], init [%d,%d] boot %d\n", __func__, fp->datasize, main_textsz, main_datasz, init_textsz, init_datasz, boot_textsz); error = EINVAL; goto fail; } /* get pointers to firmware segments */ main_text = (const uint8_t *)(hdr + 1); main_data = main_text + main_textsz; init_text = main_data + main_datasz; init_data = init_text + init_textsz; boot_text = init_data + init_datasz; /* copy initialization images into pre-allocated DMA-safe memory */ memcpy(dma->vaddr, init_data, init_datasz); memcpy(dma->vaddr + IWN_FW_INIT_DATA_MAXSZ, init_text, init_textsz); /* tell adapter where to find initialization images */ iwn_mem_lock(sc); iwn_mem_write(sc, IWN_MEM_DATA_BASE, dma->paddr >> 4); iwn_mem_write(sc, IWN_MEM_DATA_SIZE, init_datasz); iwn_mem_write(sc, IWN_MEM_TEXT_BASE, (dma->paddr + IWN_FW_INIT_DATA_MAXSZ) >> 4); iwn_mem_write(sc, IWN_MEM_TEXT_SIZE, init_textsz); iwn_mem_unlock(sc); /* load firmware boot code */ error = iwn_transfer_microcode(sc, boot_text, boot_textsz); if (error != 0) { device_printf(sc->sc_dev, "%s: could not load boot firmware, error %d\n", __func__, error); goto fail; } /* now press "execute" ;-) */ IWN_WRITE(sc, IWN_RESET, 0); /* wait at most one second for first alive notification */ error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz); if (error != 0) { /* this isn't what was supposed to happen.. */ device_printf(sc->sc_dev, "%s: timeout waiting for first alive notice, error %d\n", __func__, error); goto fail; } /* copy runtime images into pre-allocated DMA-safe memory */ memcpy(dma->vaddr, main_data, main_datasz); memcpy(dma->vaddr + IWN_FW_MAIN_DATA_MAXSZ, main_text, main_textsz); /* tell adapter where to find runtime images */ iwn_mem_lock(sc); iwn_mem_write(sc, IWN_MEM_DATA_BASE, dma->paddr >> 4); iwn_mem_write(sc, IWN_MEM_DATA_SIZE, main_datasz); iwn_mem_write(sc, IWN_MEM_TEXT_BASE, (dma->paddr + IWN_FW_MAIN_DATA_MAXSZ) >> 4); iwn_mem_write(sc, IWN_MEM_TEXT_SIZE, IWN_FW_UPDATED | main_textsz); iwn_mem_unlock(sc); /* wait at most one second for second alive notification */ error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz); if (error != 0) { /* this isn't what was supposed to happen.. */ device_printf(sc->sc_dev, "%s: timeout waiting for second alive notice, error %d\n", __func__, error); goto fail; } return 0; fail: return error; } void iwn_unload_firmware(struct iwn_softc *sc) { if (sc->fw_fp != NULL) { firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); sc->fw_fp = NULL; } } static void iwn_timer_timeout(void *arg) { struct iwn_softc *sc = arg; IWN_LOCK_ASSERT(sc); if (sc->calib_cnt && --sc->calib_cnt == 0) { DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n", "send statistics request"); (void) iwn_cmd(sc, IWN_CMD_GET_STATISTICS, NULL, 0, 1); sc->calib_cnt = 60; /* do calibration every 60s */ } iwn_watchdog(sc); /* NB: piggyback tx watchdog */ callout_reset(&sc->sc_timer_to, hz, iwn_timer_timeout, sc); } static void iwn_calib_reset(struct iwn_softc *sc) { callout_reset(&sc->sc_timer_to, hz, iwn_timer_timeout, sc); sc->calib_cnt = 60; /* do calibration every 60s */ } void iwn_ampdu_rx_start(struct iwn_softc *sc, struct iwn_rx_desc *desc) { struct iwn_rx_stat *stat; DPRINTF(sc, IWN_DEBUG_RECV, "%s\n", "received AMPDU stats"); /* save Rx statistics, they will be used on IWN_AMPDU_RX_DONE */ stat = (struct iwn_rx_stat *)(desc + 1); memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); sc->last_rx_valid = 1; } static __inline int maprate(int iwnrate) { switch (iwnrate) { /* CCK rates */ case 10: return 2; case 20: return 4; case 55: return 11; case 110: return 22; /* OFDM rates */ case 0xd: return 12; case 0xf: return 18; case 0x5: return 24; case 0x7: return 36; case 0x9: return 48; case 0xb: return 72; case 0x1: return 96; case 0x3: return 108; /* XXX MCS */ } /* unknown rate: should not happen */ return 0; } void iwn_rx_intr(struct iwn_softc *sc, struct iwn_rx_desc *desc, struct iwn_rx_data *data) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct iwn_rx_ring *ring = &sc->rxq; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct mbuf *m, *mnew; struct iwn_rx_stat *stat; caddr_t head; uint32_t *tail; int8_t rssi, nf; int len, error; bus_addr_t paddr; if (desc->type == IWN_AMPDU_RX_DONE) { /* check for prior AMPDU_RX_START */ if (!sc->last_rx_valid) { DPRINTF(sc, IWN_DEBUG_ANY, "%s: missing AMPDU_RX_START\n", __func__); ifp->if_ierrors++; return; } sc->last_rx_valid = 0; stat = &sc->last_rx_stat; } else stat = (struct iwn_rx_stat *)(desc + 1); if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { device_printf(sc->sc_dev, "%s: invalid rx statistic header, len %d\n", __func__, stat->cfg_phy_len); ifp->if_ierrors++; return; } if (desc->type == IWN_AMPDU_RX_DONE) { struct iwn_rx_ampdu *ampdu = (struct iwn_rx_ampdu *)(desc + 1); head = (caddr_t)(ampdu + 1); len = le16toh(ampdu->len); } else { head = (caddr_t)(stat + 1) + stat->cfg_phy_len; len = le16toh(stat->len); } /* discard Rx frames with bad CRC early */ tail = (uint32_t *)(head + len); if ((le32toh(*tail) & IWN_RX_NOERROR) != IWN_RX_NOERROR) { DPRINTF(sc, IWN_DEBUG_RECV, "%s: rx flags error %x\n", __func__, le32toh(*tail)); ifp->if_ierrors++; return; } if (len < sizeof (struct ieee80211_frame)) { DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n", __func__, len); ic->ic_stats.is_rx_tooshort++; ifp->if_ierrors++; return; } /* XXX don't need mbuf, just dma buffer */ mnew = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); if (mnew == NULL) { DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n", __func__); ic->ic_stats.is_rx_nobuf++; ifp->if_ierrors++; return; } error = bus_dmamap_load(ring->data_dmat, data->map, mtod(mnew, caddr_t), MJUMPAGESIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "%s: bus_dmamap_load failed, error %d\n", __func__, error); m_freem(mnew); ic->ic_stats.is_rx_nobuf++; /* XXX need stat */ ifp->if_ierrors++; return; } bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); /* finalize mbuf and swap in new one */ m = data->m; m->m_pkthdr.rcvif = ifp; m->m_data = head; m->m_pkthdr.len = m->m_len = len; data->m = mnew; /* update Rx descriptor */ ring->desc[ring->cur] = htole32(paddr >> 8); rssi = iwn_get_rssi(sc, stat); /* grab a reference to the source node */ wh = mtod(m, struct ieee80211_frame *); ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN && (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95; if (bpf_peers_present(ifp->if_bpf)) { struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; tap->wr_flags = 0; tap->wr_dbm_antsignal = rssi; tap->wr_dbm_antnoise = nf; tap->wr_rate = maprate(stat->rate); tap->wr_tsft = htole64(stat->tstamp); if (stat->flags & htole16(IWN_CONFIG_SHPREAMBLE)) tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; bpf_mtap2(ifp->if_bpf, tap, sc->sc_rxtap_len, m); } IWN_UNLOCK(sc); /* send the frame to the 802.11 layer */ if (ni != NULL) { (void) ieee80211_input(ni, m, rssi - nf, nf, 0); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi - nf, nf, 0); IWN_LOCK(sc); } void iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct iwn_calib_state *calib = &sc->calib; struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); /* beacon stats are meaningful only when associated and not scanning */ if (vap->iv_state != IEEE80211_S_RUN || (ic->ic_flags & IEEE80211_F_SCAN)) return; DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: cmd %d\n", __func__, desc->type); iwn_calib_reset(sc); /* test if temperature has changed */ if (stats->general.temp != sc->rawtemp) { int temp; sc->rawtemp = stats->general.temp; temp = iwn_get_temperature(sc); DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n", __func__, temp); /* update Tx power if need be */ iwn_power_calibration(sc, temp); } if (desc->type != IWN_BEACON_STATISTICS) return; /* reply to a statistics request */ sc->noise = iwn_get_noise(&stats->rx.general); DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise); /* test that RSSI and noise are present in stats report */ if (stats->rx.general.flags != htole32(1)) { DPRINTF(sc, IWN_DEBUG_ANY, "%s\n", "received statistics without RSSI"); return; } if (calib->state == IWN_CALIB_STATE_ASSOC) iwn_compute_differential_gain(sc, &stats->rx.general); else if (calib->state == IWN_CALIB_STATE_RUN) iwn_tune_sensitivity(sc, &stats->rx); } void iwn_tx_intr(struct iwn_softc *sc, struct iwn_rx_desc *desc) { struct ifnet *ifp = sc->sc_ifp; struct iwn_tx_ring *ring = &sc->txq[desc->qid & 0xf]; struct iwn_tx_data *data = &ring->data[desc->idx]; struct iwn_tx_stat *stat = (struct iwn_tx_stat *)(desc + 1); struct iwn_node *wn = IWN_NODE(data->ni); struct mbuf *m; struct ieee80211_node *ni; uint32_t status; KASSERT(data->ni != NULL, ("no node")); DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " "qid %d idx %d retries %d nkill %d rate %x duration %d status %x\n", __func__, desc->qid, desc->idx, stat->ntries, stat->nkill, stat->rate, le16toh(stat->duration), le32toh(stat->status)); /* * Update rate control statistics for the node. */ status = le32toh(stat->status) & 0xff; if (status & 0x80) { DPRINTF(sc, IWN_DEBUG_ANY, "%s: status 0x%x\n", __func__, le32toh(stat->status)); ifp->if_oerrors++; ieee80211_amrr_tx_complete(&wn->amn, IEEE80211_AMRR_FAILURE, stat->ntries); } else { ieee80211_amrr_tx_complete(&wn->amn, IEEE80211_AMRR_SUCCESS, stat->ntries); } bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m = data->m, data->m = NULL; ni = data->ni, data->ni = NULL; if (m->m_flags & M_TXCB) { /* * Channels marked for "radar" require traffic to be received * to unlock before we can transmit. Until traffic is seen * any attempt to transmit is returned immediately with status * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily * happen on first authenticate after scanning. To workaround * this we ignore a failure of this sort in AUTH state so the * 802.11 layer will fall back to using a timeout to wait for * the AUTH reply. This allows the firmware time to see * traffic so a subsequent retry of AUTH succeeds. It's * unclear why the firmware does not maintain state for * channels recently visited as this would allow immediate * use of the channel after a scan (where we see traffic). */ if (status == IWN_TX_FAIL_TX_LOCKED && ni->ni_vap->iv_state == IEEE80211_S_AUTH) ieee80211_process_callback(ni, m, 0); else ieee80211_process_callback(ni, m, (status & IWN_TX_FAIL) != 0); } m_freem(m); ieee80211_free_node(ni); ring->queued--; sc->sc_tx_timer = 0; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; iwn_start_locked(ifp); } void iwn_cmd_intr(struct iwn_softc *sc, struct iwn_rx_desc *desc) { struct iwn_tx_ring *ring = &sc->txq[4]; struct iwn_tx_data *data; if ((desc->qid & 0xf) != 4) return; /* not a command ack */ data = &ring->data[desc->idx]; /* if the command was mapped in a mbuf, free it */ if (data->m != NULL) { bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } wakeup(&ring->cmd[desc->idx]); } static void iwn_bmiss(void *arg, int npending) { struct iwn_softc *sc = arg; struct ieee80211com *ic = sc->sc_ifp->if_l2com; ieee80211_beacon_miss(ic); } void iwn_notif_intr(struct iwn_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint16_t hw; hw = le16toh(sc->shared->closed_count) & 0xfff; while (sc->rxq.cur != hw) { struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; struct iwn_rx_desc *desc = (void *)data->m->m_ext.ext_buf; DPRINTF(sc, IWN_DEBUG_RECV, "%s: qid %x idx %d flags %x type %d(%s) len %d\n", __func__, desc->qid, desc->idx, desc->flags, desc->type, iwn_intr_str(desc->type), le16toh(desc->len)); if (!(desc->qid & 0x80)) /* reply to a command */ iwn_cmd_intr(sc, desc); switch (desc->type) { case IWN_RX_DONE: case IWN_AMPDU_RX_DONE: iwn_rx_intr(sc, desc, data); break; case IWN_AMPDU_RX_START: iwn_ampdu_rx_start(sc, desc); break; case IWN_TX_DONE: /* a 802.11 frame has been transmitted */ iwn_tx_intr(sc, desc); break; case IWN_RX_STATISTICS: case IWN_BEACON_STATISTICS: iwn_rx_statistics(sc, desc); break; case IWN_BEACON_MISSED: { struct iwn_beacon_missed *miss = (struct iwn_beacon_missed *)(desc + 1); int misses = le32toh(miss->consecutive); /* XXX not sure why we're notified w/ zero */ if (misses == 0) break; DPRINTF(sc, IWN_DEBUG_STATE, "%s: beacons missed %d/%d\n", __func__, misses, le32toh(miss->total)); /* * If more than 5 consecutive beacons are missed, * reinitialize the sensitivity state machine. */ if (vap->iv_state == IEEE80211_S_RUN && misses > 5) (void) iwn_init_sensitivity(sc); if (misses >= vap->iv_bmissthreshold) taskqueue_enqueue(taskqueue_swi, &sc->sc_bmiss_task); break; } case IWN_UC_READY: { struct iwn_ucode_info *uc = (struct iwn_ucode_info *)(desc + 1); /* the microcontroller is ready */ DPRINTF(sc, IWN_DEBUG_RESET, "microcode alive notification version=%d.%d " "subtype=%x alive=%x\n", uc->major, uc->minor, uc->subtype, le32toh(uc->valid)); if (le32toh(uc->valid) != 1) { device_printf(sc->sc_dev, "microcontroller initialization failed"); break; } if (uc->subtype == IWN_UCODE_INIT) { /* save microcontroller's report */ memcpy(&sc->ucode_info, uc, sizeof (*uc)); } break; } case IWN_STATE_CHANGED: { uint32_t *status = (uint32_t *)(desc + 1); /* * State change allows hardware switch change to be * noted. However, we handle this in iwn_intr as we * get both the enable/disble intr. */ DPRINTF(sc, IWN_DEBUG_INTR, "state changed to %x\n", le32toh(*status)); break; } case IWN_START_SCAN: { struct iwn_start_scan *scan = (struct iwn_start_scan *)(desc + 1); DPRINTF(sc, IWN_DEBUG_ANY, "%s: scanning channel %d status %x\n", __func__, scan->chan, le32toh(scan->status)); break; } case IWN_STOP_SCAN: { struct iwn_stop_scan *scan = (struct iwn_stop_scan *)(desc + 1); DPRINTF(sc, IWN_DEBUG_STATE, "scan finished nchan=%d status=%d chan=%d\n", scan->nchan, scan->status, scan->chan); iwn_queue_cmd(sc, IWN_SCAN_NEXT, 0, IWN_QUEUE_NORMAL); break; } } sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; } /* tell the firmware what we have processed */ hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; IWN_WRITE(sc, IWN_RX_WIDX, hw & ~7); } void iwn_intr(void *arg) { struct iwn_softc *sc = arg; uint32_t r1, r2; IWN_LOCK(sc); /* disable interrupts */ IWN_WRITE(sc, IWN_MASK, 0); r1 = IWN_READ(sc, IWN_INTR); r2 = IWN_READ(sc, IWN_INTR_STATUS); if (r1 == 0 && r2 == 0) { IWN_WRITE(sc, IWN_MASK, IWN_INTR_MASK); goto done; /* not for us */ } if (r1 == 0xffffffff) goto done; /* hardware gone */ /* ack interrupts */ IWN_WRITE(sc, IWN_INTR, r1); IWN_WRITE(sc, IWN_INTR_STATUS, r2); DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=%x reg2=%x\n", r1, r2); if (r1 & IWN_RF_TOGGLED) { uint32_t tmp = IWN_READ(sc, IWN_GPIO_CTL); device_printf(sc->sc_dev, "RF switch: radio %s\n", (tmp & IWN_GPIO_RF_ENABLED) ? "enabled" : "disabled"); if (tmp & IWN_GPIO_RF_ENABLED) iwn_queue_cmd(sc, IWN_RADIO_ENABLE, 0, IWN_QUEUE_CLEAR); else iwn_queue_cmd(sc, IWN_RADIO_DISABLE, 0, IWN_QUEUE_CLEAR); } if (r1 & IWN_CT_REACHED) device_printf(sc->sc_dev, "critical temperature reached!\n"); if (r1 & (IWN_SW_ERROR | IWN_HW_ERROR)) { device_printf(sc->sc_dev, "error, INTR=%b STATUS=0x%x\n", r1, IWN_INTR_BITS, r2); iwn_queue_cmd(sc, IWN_REINIT, 0, IWN_QUEUE_CLEAR); goto done; } if ((r1 & (IWN_RX_INTR | IWN_SW_RX_INTR)) || (r2 & IWN_RX_STATUS_INTR)) iwn_notif_intr(sc); if (r1 & IWN_ALIVE_INTR) wakeup(sc); /* re-enable interrupts */ IWN_WRITE(sc, IWN_MASK, IWN_INTR_MASK); done: IWN_UNLOCK(sc); } uint8_t iwn_plcp_signal(int rate) { switch (rate) { /* CCK rates (returned values are device-dependent) */ case 2: return 10; case 4: return 20; case 11: return 55; case 22: return 110; /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ /* R1-R4, (u)ral is R4-R1 */ case 12: return 0xd; case 18: return 0xf; case 24: return 0x5; case 36: return 0x7; case 48: return 0x9; case 72: return 0xb; case 96: return 0x1; case 108: return 0x3; case 120: return 0x3; } /* unknown rate (should not get there) */ return 0; } /* determine if a given rate is CCK or OFDM */ #define IWN_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22) int iwn_tx_data(struct iwn_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, struct iwn_tx_ring *ring) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = sc->sc_ifp; const struct ieee80211_txparam *tp; struct iwn_tx_desc *desc; struct iwn_tx_data *data; struct iwn_tx_cmd *cmd; struct iwn_cmd_data *tx; struct ieee80211_frame *wh; struct ieee80211_key *k; bus_addr_t paddr; uint32_t flags; uint16_t timeout; uint8_t type; u_int hdrlen; struct mbuf *mnew; int rate, error, pad, nsegs, i, ismcast, id; bus_dma_segment_t segs[IWN_MAX_SCATTER]; IWN_LOCK_ASSERT(sc); wh = mtod(m0, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); hdrlen = ieee80211_anyhdrsize(wh); /* pick a tx rate */ /* XXX ni_chan */ tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; if (type == IEEE80211_FC0_TYPE_MGT) rate = tp->mgmtrate; else if (ismcast) rate = tp->mcastrate; else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = tp->ucastrate; else { (void) ieee80211_amrr_choose(ni, &IWN_NODE(ni)->amn); rate = ni->ni_txrate; } if (wh->i_fc[1] & IEEE80211_FC1_WEP) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } else k = NULL; if (bpf_peers_present(ifp->if_bpf)) { struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; if (k != NULL) tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; bpf_mtap2(ifp->if_bpf, tap, sc->sc_txtap_len, m0); } flags = IWN_TX_AUTO_SEQ; /* XXX honor ACM */ if (!ismcast) flags |= IWN_TX_NEED_ACK; if (ismcast || type != IEEE80211_FC0_TYPE_DATA) id = IWN_ID_BROADCAST; else id = IWN_ID_BSS; /* check if RTS/CTS or CTS-to-self protection must be used */ if (!ismcast) { /* multicast frames are not sent at OFDM rates in 802.11b/g */ if (m0->m_pkthdr.len+IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP; } else if ((ic->ic_flags & IEEE80211_F_USEPROT) && IWN_RATE_IS_OFDM(rate)) { if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP; else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP; } } if (type == IEEE80211_FC0_TYPE_MGT) { uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; /* tell h/w to set timestamp in probe responses */ if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) flags |= IWN_TX_INSERT_TSTAMP; if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) timeout = htole16(3); else timeout = htole16(2); } else timeout = htole16(0); if (hdrlen & 3) { /* first segment's length must be a multiple of 4 */ flags |= IWN_TX_NEED_PADDING; pad = 4 - (hdrlen & 3); } else pad = 0; desc = &ring->desc[ring->cur]; data = &ring->data[ring->cur]; cmd = &ring->cmd[ring->cur]; cmd->code = IWN_CMD_TX_DATA; cmd->flags = 0; cmd->qid = ring->qid; cmd->idx = ring->cur; tx = (struct iwn_cmd_data *)cmd->data; /* NB: no need to bzero tx, all fields are reinitialized here */ tx->id = id; tx->flags = htole32(flags); tx->len = htole16(m0->m_pkthdr.len); tx->rate = iwn_plcp_signal(rate); tx->rts_ntries = 60; /* XXX? */ tx->data_ntries = 15; /* XXX? */ tx->lifetime = htole32(IWN_LIFETIME_INFINITE); tx->timeout = timeout; if (k != NULL) { /* XXX fill in */; } else tx->security = 0; /* XXX alternate between Ant A and Ant B ? */ tx->rflags = IWN_RFLAG_ANT_B; if (tx->id == IWN_ID_BROADCAST) { tx->ridx = IWN_MAX_TX_RETRIES - 1; if (!IWN_RATE_IS_OFDM(rate)) tx->rflags |= IWN_RFLAG_CCK; } else { tx->ridx = 0; /* tell adapter to ignore rflags */ tx->flags |= htole32(IWN_TX_USE_NODE_RATE); } /* copy and trim IEEE802.11 header */ memcpy((uint8_t *)(tx + 1), wh, hdrlen); m_adj(m0, hdrlen); error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { if (error == EFBIG) { /* too many fragments, linearize */ mnew = m_collapse(m0, M_DONTWAIT, IWN_MAX_SCATTER); if (mnew == NULL) { IWN_UNLOCK(sc); device_printf(sc->sc_dev, "%s: could not defrag mbuf\n", __func__); m_freem(m0); return ENOBUFS; } m0 = mnew; error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m0, segs, &nsegs, BUS_DMA_NOWAIT); } if (error != 0) { IWN_UNLOCK(sc); device_printf(sc->sc_dev, "%s: bus_dmamap_load_mbuf_sg failed, error %d\n", __func__, error); m_freem(m0); return error; } } data->m = m0; data->ni = ni; DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", __func__, ring->qid, ring->cur, m0->m_pkthdr.len, nsegs); paddr = ring->cmd_dma.paddr + ring->cur * sizeof (struct iwn_tx_cmd); tx->loaddr = htole32(paddr + 4 + offsetof(struct iwn_cmd_data, ntries)); tx->hiaddr = 0; /* limit to 32-bit physical addresses */ /* first scatter/gather segment is used by the tx data command */ IWN_SET_DESC_NSEGS(desc, 1 + nsegs); IWN_SET_DESC_SEG(desc, 0, paddr, 4 + sizeof (*tx) + hdrlen + pad); for (i = 1; i <= nsegs; i++) { IWN_SET_DESC_SEG(desc, i, segs[i - 1].ds_addr, segs[i - 1].ds_len); } sc->shared->len[ring->qid][ring->cur] = htole16(hdrlen + m0->m_pkthdr.len + 8); if (ring->cur < IWN_TX_WINDOW) sc->shared->len[ring->qid][ring->cur + IWN_TX_RING_COUNT] = htole16(hdrlen + m0->m_pkthdr.len + 8); ring->queued++; /* kick Tx ring */ ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; IWN_WRITE(sc, IWN_TX_WIDX, ring->qid << 8 | ring->cur); ifp->if_opackets++; sc->sc_tx_timer = 5; return 0; } void iwn_start(struct ifnet *ifp) { struct iwn_softc *sc = ifp->if_softc; IWN_LOCK(sc); iwn_start_locked(ifp); IWN_UNLOCK(sc); } void iwn_start_locked(struct ifnet *ifp) { struct iwn_softc *sc = ifp->if_softc; struct ieee80211_node *ni; struct iwn_tx_ring *txq; struct mbuf *m; int pri; IWN_LOCK_ASSERT(sc); for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; pri = M_WME_GETAC(m); txq = &sc->txq[pri]; m = ieee80211_encap(ni, m); if (m == NULL) { ifp->if_oerrors++; ieee80211_free_node(ni); continue; } if (txq->queued >= IWN_TX_RING_COUNT - 8) { /* XXX not right */ /* ring is nearly full, stop flow */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; } if (iwn_tx_data(sc, m, ni, txq) != 0) { ifp->if_oerrors++; ieee80211_free_node(ni); IWN_UNLOCK(sc); break; } } } static int iwn_tx_handoff(struct iwn_softc *sc, struct iwn_tx_ring *ring, struct iwn_tx_cmd *cmd, struct iwn_cmd_data *tx, struct ieee80211_node *ni, struct mbuf *m0, u_int hdrlen, int pad) { struct ifnet *ifp = sc->sc_ifp; struct iwn_tx_desc *desc; struct iwn_tx_data *data; bus_addr_t paddr; struct mbuf *mnew; int error, nsegs, i; bus_dma_segment_t segs[IWN_MAX_SCATTER]; /* copy and trim IEEE802.11 header */ memcpy((uint8_t *)(tx + 1), mtod(m0, uint8_t *), hdrlen); m_adj(m0, hdrlen); desc = &ring->desc[ring->cur]; data = &ring->data[ring->cur]; error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { if (error == EFBIG) { /* too many fragments, linearize */ mnew = m_collapse(m0, M_DONTWAIT, IWN_MAX_SCATTER); if (mnew == NULL) { IWN_UNLOCK(sc); device_printf(sc->sc_dev, "%s: could not defrag mbuf\n", __func__); m_freem(m0); return ENOBUFS; } m0 = mnew; error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m0, segs, &nsegs, BUS_DMA_NOWAIT); } if (error != 0) { IWN_UNLOCK(sc); device_printf(sc->sc_dev, "%s: bus_dmamap_load_mbuf_sg failed, error %d\n", __func__, error); m_freem(m0); return error; } } data->m = m0; data->ni = ni; DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d\n", __func__, ring->qid, ring->cur, m0->m_pkthdr.len, nsegs); paddr = ring->cmd_dma.paddr + ring->cur * sizeof (struct iwn_tx_cmd); tx->loaddr = htole32(paddr + 4 + offsetof(struct iwn_cmd_data, ntries)); tx->hiaddr = 0; /* limit to 32-bit physical addresses */ /* first scatter/gather segment is used by the tx data command */ IWN_SET_DESC_NSEGS(desc, 1 + nsegs); IWN_SET_DESC_SEG(desc, 0, paddr, 4 + sizeof (*tx) + hdrlen + pad); for (i = 1; i <= nsegs; i++) { IWN_SET_DESC_SEG(desc, i, segs[i - 1].ds_addr, segs[i - 1].ds_len); } sc->shared->len[ring->qid][ring->cur] = htole16(hdrlen + m0->m_pkthdr.len + 8); if (ring->cur < IWN_TX_WINDOW) sc->shared->len[ring->qid][ring->cur + IWN_TX_RING_COUNT] = htole16(hdrlen + m0->m_pkthdr.len + 8); ring->queued++; /* kick Tx ring */ ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; IWN_WRITE(sc, IWN_TX_WIDX, ring->qid << 8 | ring->cur); ifp->if_opackets++; sc->sc_tx_timer = 5; return 0; } static int iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, struct iwn_tx_ring *ring, const struct ieee80211_bpf_params *params) { struct ifnet *ifp = sc->sc_ifp; struct iwn_tx_cmd *cmd; struct iwn_cmd_data *tx; struct ieee80211_frame *wh; uint32_t flags; uint8_t type, subtype; u_int hdrlen; int rate, pad; IWN_LOCK_ASSERT(sc); wh = mtod(m0, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; hdrlen = ieee80211_anyhdrsize(wh); flags = IWN_TX_AUTO_SEQ; if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) flags |= IWN_TX_NEED_ACK; if (params->ibp_flags & IEEE80211_BPF_RTS) flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP; if (params->ibp_flags & IEEE80211_BPF_CTS) flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP; if (type == IEEE80211_FC0_TYPE_MGT && subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) { /* tell h/w to set timestamp in probe responses */ flags |= IWN_TX_INSERT_TSTAMP; } if (hdrlen & 3) { /* first segment's length must be a multiple of 4 */ flags |= IWN_TX_NEED_PADDING; pad = 4 - (hdrlen & 3); } else pad = 0; /* pick a tx rate */ rate = params->ibp_rate0; if (bpf_peers_present(ifp->if_bpf)) { struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; bpf_mtap2(ifp->if_bpf, tap, sc->sc_txtap_len, m0); } cmd = &ring->cmd[ring->cur]; cmd->code = IWN_CMD_TX_DATA; cmd->flags = 0; cmd->qid = ring->qid; cmd->idx = ring->cur; tx = (struct iwn_cmd_data *)cmd->data; /* NB: no need to bzero tx, all fields are reinitialized here */ tx->id = IWN_ID_BROADCAST; tx->flags = htole32(flags); tx->len = htole16(m0->m_pkthdr.len); tx->rate = iwn_plcp_signal(rate); tx->rts_ntries = params->ibp_try1; /* XXX? */ tx->data_ntries = params->ibp_try0; tx->lifetime = htole32(IWN_LIFETIME_INFINITE); /* XXX use try count? */ if (type == IEEE80211_FC0_TYPE_MGT) { if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) tx->timeout = htole16(3); else tx->timeout = htole16(2); } else tx->timeout = htole16(0); tx->security = 0; /* XXX alternate between Ant A and Ant B ? */ tx->rflags = IWN_RFLAG_ANT_B; /* XXX params->ibp_pri >> 2 */ tx->ridx = IWN_MAX_TX_RETRIES - 1; if (!IWN_RATE_IS_OFDM(rate)) tx->rflags |= IWN_RFLAG_CCK; return iwn_tx_handoff(sc, ring, cmd, tx, ni, m0, hdrlen, pad); } static int iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct iwn_softc *sc = ifp->if_softc; struct iwn_tx_ring *txq; int error; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { ieee80211_free_node(ni); m_freem(m); return ENETDOWN; } IWN_LOCK(sc); if (params == NULL) txq = &sc->txq[M_WME_GETAC(m)]; else txq = &sc->txq[params->ibp_pri & 3]; if (txq->queued >= IWN_TX_RING_COUNT - 8) { /* XXX not right */ /* ring is nearly full, stop flow */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; } if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ error = iwn_tx_data(sc, m, ni, txq); } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ error = iwn_tx_data_raw(sc, m, ni, txq, params); } if (error != 0) { /* NB: m is reclaimed on tx failure */ ieee80211_free_node(ni); ifp->if_oerrors++; } IWN_UNLOCK(sc); return error; } static void iwn_watchdog(struct iwn_softc *sc) { if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) { struct ifnet *ifp = sc->sc_ifp; if_printf(ifp, "device timeout\n"); iwn_queue_cmd(sc, IWN_REINIT, 0, IWN_QUEUE_CLEAR); } } int iwn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct iwn_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; int error = 0, startall = 0; switch (cmd) { case SIOCSIFFLAGS: IWN_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { iwn_init_locked(sc); startall = 1; } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) iwn_stop_locked(sc); } IWN_UNLOCK(sc); if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return error; } void iwn_read_eeprom(struct iwn_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; char domain[4]; uint16_t val; int i, error; if ((error = iwn_eeprom_lock(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not lock EEPROM, error %d\n", __func__, error); return; } /* read and print regulatory domain */ iwn_read_prom_data(sc, IWN_EEPROM_DOMAIN, domain, 4); device_printf(sc->sc_dev,"Reg Domain: %.4s", domain); /* read and print MAC address */ iwn_read_prom_data(sc, IWN_EEPROM_MAC, ic->ic_myaddr, 6); printf(", address %s\n", ether_sprintf(ic->ic_myaddr)); /* read the list of authorized channels */ iwn_read_eeprom_channels(sc); /* read maximum allowed Tx power for 2GHz and 5GHz bands */ iwn_read_prom_data(sc, IWN_EEPROM_MAXPOW, &val, 2); sc->maxpwr2GHz = val & 0xff; sc->maxpwr5GHz = val >> 8; /* check that EEPROM values are correct */ if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) sc->maxpwr5GHz = 38; if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) sc->maxpwr2GHz = 38; DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n", sc->maxpwr2GHz, sc->maxpwr5GHz); /* read voltage at which samples were taken */ iwn_read_prom_data(sc, IWN_EEPROM_VOLTAGE, &val, 2); sc->eeprom_voltage = (int16_t)le16toh(val); DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n", sc->eeprom_voltage); /* read power groups */ iwn_read_prom_data(sc, IWN_EEPROM_BANDS, sc->bands, sizeof sc->bands); #ifdef IWN_DEBUG if (sc->sc_debug & IWN_DEBUG_ANY) { for (i = 0; i < IWN_NBANDS; i++) iwn_print_power_group(sc, i); } #endif iwn_eeprom_unlock(sc); } struct iwn_chan_band { uint32_t addr; /* offset in EEPROM */ uint32_t flags; /* net80211 flags */ uint8_t nchan; #define IWN_MAX_CHAN_PER_BAND 14 uint8_t chan[IWN_MAX_CHAN_PER_BAND]; }; static void iwn_read_eeprom_band(struct iwn_softc *sc, const struct iwn_chan_band *band) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct iwn_eeprom_chan channels[IWN_MAX_CHAN_PER_BAND]; struct ieee80211_channel *c; int i, chan, flags; iwn_read_prom_data(sc, band->addr, channels, band->nchan * sizeof (struct iwn_eeprom_chan)); for (i = 0; i < band->nchan; i++) { if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { DPRINTF(sc, IWN_DEBUG_RESET, "skip chan %d flags 0x%x maxpwr %d\n", band->chan[i], channels[i].flags, channels[i].maxpwr); continue; } chan = band->chan[i]; /* translate EEPROM flags to net80211 */ flags = 0; if ((channels[i].flags & IWN_EEPROM_CHAN_ACTIVE) == 0) flags |= IEEE80211_CHAN_PASSIVE; if ((channels[i].flags & IWN_EEPROM_CHAN_IBSS) == 0) flags |= IEEE80211_CHAN_NOADHOC; if (channels[i].flags & IWN_EEPROM_CHAN_RADAR) { flags |= IEEE80211_CHAN_DFS; /* XXX apparently IBSS may still be marked */ flags |= IEEE80211_CHAN_NOADHOC; } DPRINTF(sc, IWN_DEBUG_RESET, "add chan %d flags 0x%x maxpwr %d\n", chan, channels[i].flags, channels[i].maxpwr); c = &ic->ic_channels[ic->ic_nchans++]; c->ic_ieee = chan; c->ic_freq = ieee80211_ieee2mhz(chan, band->flags); c->ic_maxregpower = channels[i].maxpwr; c->ic_maxpower = 2*c->ic_maxregpower; if (band->flags & IEEE80211_CHAN_2GHZ) { /* G =>'s B is supported */ c->ic_flags = IEEE80211_CHAN_B | flags; c = &ic->ic_channels[ic->ic_nchans++]; c[0] = c[-1]; c->ic_flags = IEEE80211_CHAN_G | flags; } else { /* 5GHz band */ c->ic_flags = IEEE80211_CHAN_A | flags; } /* XXX no constraints on using HT20 */ /* add HT20, HT40 added separately */ c = &ic->ic_channels[ic->ic_nchans++]; c[0] = c[-1]; c->ic_flags |= IEEE80211_CHAN_HT20; /* XXX NARROW =>'s 1/2 and 1/4 width? */ } } static void iwn_read_eeprom_ht40(struct iwn_softc *sc, const struct iwn_chan_band *band) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct iwn_eeprom_chan channels[IWN_MAX_CHAN_PER_BAND]; struct ieee80211_channel *c, *cent, *extc; int i; iwn_read_prom_data(sc, band->addr, channels, band->nchan * sizeof (struct iwn_eeprom_chan)); for (i = 0; i < band->nchan; i++) { if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID) || !(channels[i].flags & IWN_EEPROM_CHAN_WIDE)) { DPRINTF(sc, IWN_DEBUG_RESET, "skip chan %d flags 0x%x maxpwr %d\n", band->chan[i], channels[i].flags, channels[i].maxpwr); continue; } /* * Each entry defines an HT40 channel pair; find the * center channel, then the extension channel above. */ cent = ieee80211_find_channel_byieee(ic, band->chan[i], band->flags & ~IEEE80211_CHAN_HT); if (cent == NULL) { /* XXX shouldn't happen */ device_printf(sc->sc_dev, "%s: no entry for channel %d\n", __func__, band->chan[i]); continue; } extc = ieee80211_find_channel(ic, cent->ic_freq+20, band->flags & ~IEEE80211_CHAN_HT); if (extc == NULL) { DPRINTF(sc, IWN_DEBUG_RESET, "skip chan %d, extension channel not found\n", band->chan[i]); continue; } DPRINTF(sc, IWN_DEBUG_RESET, "add ht40 chan %d flags 0x%x maxpwr %d\n", band->chan[i], channels[i].flags, channels[i].maxpwr); c = &ic->ic_channels[ic->ic_nchans++]; c[0] = cent[0]; c->ic_extieee = extc->ic_ieee; c->ic_flags &= ~IEEE80211_CHAN_HT; c->ic_flags |= IEEE80211_CHAN_HT40U; c = &ic->ic_channels[ic->ic_nchans++]; c[0] = extc[0]; c->ic_extieee = cent->ic_ieee; c->ic_flags &= ~IEEE80211_CHAN_HT; c->ic_flags |= IEEE80211_CHAN_HT40D; } } static void iwn_read_eeprom_channels(struct iwn_softc *sc) { #define N(a) (sizeof(a)/sizeof(a[0])) static const struct iwn_chan_band iwn_bands[] = { { IWN_EEPROM_BAND1, IEEE80211_CHAN_G, 14, { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 } }, { IWN_EEPROM_BAND2, IEEE80211_CHAN_A, 13, { 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16 } }, { IWN_EEPROM_BAND3, IEEE80211_CHAN_A, 12, { 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64 } }, { IWN_EEPROM_BAND4, IEEE80211_CHAN_A, 11, { 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 } }, { IWN_EEPROM_BAND5, IEEE80211_CHAN_A, 6, { 145, 149, 153, 157, 161, 165 } }, { IWN_EEPROM_BAND6, IEEE80211_CHAN_G | IEEE80211_CHAN_HT40, 7, { 1, 2, 3, 4, 5, 6, 7 } }, { IWN_EEPROM_BAND7, IEEE80211_CHAN_A | IEEE80211_CHAN_HT40, 11, { 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157 } } }; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; int i; /* read the list of authorized channels */ for (i = 0; i < N(iwn_bands)-2; i++) iwn_read_eeprom_band(sc, &iwn_bands[i]); for (; i < N(iwn_bands); i++) iwn_read_eeprom_ht40(sc, &iwn_bands[i]); ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); #undef N } #ifdef IWN_DEBUG void iwn_print_power_group(struct iwn_softc *sc, int i) { struct iwn_eeprom_band *band = &sc->bands[i]; struct iwn_eeprom_chan_samples *chans = band->chans; int j, c; printf("===band %d===\n", i); printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi); printf("chan1 num=%d\n", chans[0].num); for (c = 0; c < IWN_NTXCHAINS; c++) { for (j = 0; j < IWN_NSAMPLES; j++) { printf("chain %d, sample %d: temp=%d gain=%d " "power=%d pa_det=%d\n", c, j, chans[0].samples[c][j].temp, chans[0].samples[c][j].gain, chans[0].samples[c][j].power, chans[0].samples[c][j].pa_det); } } printf("chan2 num=%d\n", chans[1].num); for (c = 0; c < IWN_NTXCHAINS; c++) { for (j = 0; j < IWN_NSAMPLES; j++) { printf("chain %d, sample %d: temp=%d gain=%d " "power=%d pa_det=%d\n", c, j, chans[1].samples[c][j].temp, chans[1].samples[c][j].gain, chans[1].samples[c][j].power, chans[1].samples[c][j].pa_det); } } } #endif /* * Send a command to the firmware. */ int iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) { struct iwn_tx_ring *ring = &sc->txq[4]; struct iwn_tx_desc *desc; struct iwn_tx_cmd *cmd; bus_addr_t paddr; IWN_LOCK_ASSERT(sc); KASSERT(size <= sizeof cmd->data, ("Command too big")); desc = &ring->desc[ring->cur]; cmd = &ring->cmd[ring->cur]; cmd->code = code; cmd->flags = 0; cmd->qid = ring->qid; cmd->idx = ring->cur; memcpy(cmd->data, buf, size); paddr = ring->cmd_dma.paddr + ring->cur * sizeof (struct iwn_tx_cmd); IWN_SET_DESC_NSEGS(desc, 1); IWN_SET_DESC_SEG(desc, 0, paddr, 4 + size); sc->shared->len[ring->qid][ring->cur] = htole16(8); if (ring->cur < IWN_TX_WINDOW) { sc->shared->len[ring->qid][ring->cur + IWN_TX_RING_COUNT] = htole16(8); } DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n", __func__, iwn_intr_str(cmd->code), cmd->code, cmd->flags, cmd->qid, cmd->idx); /* kick cmd ring */ ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; IWN_WRITE(sc, IWN_TX_WIDX, ring->qid << 8 | ring->cur); return async ? 0 : msleep(cmd, &sc->sc_mtx, PCATCH, "iwncmd", hz); } static const uint8_t iwn_ridx_to_plcp[] = { 10, 20, 55, 110, /* CCK */ 0xd, 0xf, 0x5, 0x7, 0x9, 0xb, 0x1, 0x3, 0x3 /* OFDM R1-R4 */ }; static const uint8_t iwn_siso_mcs_to_plcp[] = { 0, 0, 0, 0, /* CCK */ 0, 0, 1, 2, 3, 4, 5, 6, 7 /* HT */ }; static const uint8_t iwn_mimo_mcs_to_plcp[] = { 0, 0, 0, 0, /* CCK */ 8, 8, 9, 10, 11, 12, 13, 14, 15 /* HT */ }; static const uint8_t iwn_prev_ridx[] = { /* NB: allow fallback from CCK11 to OFDM9 and from OFDM6 to CCK5 */ 0, 0, 1, 5, /* CCK */ 2, 4, 3, 6, 7, 8, 9, 10, 10 /* OFDM */ }; /* * Configure hardware link parameters for the specified * node operating on the specified channel. */ int iwn_set_link_quality(struct iwn_softc *sc, uint8_t id, const struct ieee80211_channel *c, int async) { struct iwn_cmd_link_quality lq; int i, ridx; memset(&lq, 0, sizeof(lq)); lq.id = id; if (IEEE80211_IS_CHAN_HT(c)) { lq.mimo = 1; lq.ssmask = 0x1; } else lq.ssmask = 0x2; if (id == IWN_ID_BSS) ridx = IWN_RATE_OFDM54; else if (IEEE80211_IS_CHAN_A(c)) ridx = IWN_RATE_OFDM6; else ridx = IWN_RATE_CCK1; for (i = 0; i < IWN_MAX_TX_RETRIES; i++) { /* XXX toggle antenna for retry patterns */ if (IEEE80211_IS_CHAN_HT40(c)) { lq.table[i].rate = iwn_mimo_mcs_to_plcp[ridx] | IWN_RATE_MCS; lq.table[i].rflags = IWN_RFLAG_HT | IWN_RFLAG_HT40 | IWN_RFLAG_ANT_A; /* XXX shortGI */ } else if (IEEE80211_IS_CHAN_HT(c)) { lq.table[i].rate = iwn_siso_mcs_to_plcp[ridx] | IWN_RATE_MCS; lq.table[i].rflags = IWN_RFLAG_HT | IWN_RFLAG_ANT_A; /* XXX shortGI */ } else { lq.table[i].rate = iwn_ridx_to_plcp[ridx]; if (ridx <= IWN_RATE_CCK11) lq.table[i].rflags = IWN_RFLAG_CCK; lq.table[i].rflags |= IWN_RFLAG_ANT_B; } ridx = iwn_prev_ridx[ridx]; } lq.dsmask = 0x3; lq.ampdu_disable = 3; lq.ampdu_limit = htole16(4000); #ifdef IWN_DEBUG if (sc->sc_debug & IWN_DEBUG_STATE) { printf("%s: set link quality for node %d, mimo %d ssmask %d\n", __func__, id, lq.mimo, lq.ssmask); printf("%s:", __func__); for (i = 0; i < IWN_MAX_TX_RETRIES; i++) printf(" %d:%x", lq.table[i].rate, lq.table[i].rflags); printf("\n"); } #endif return iwn_cmd(sc, IWN_CMD_TX_LINK_QUALITY, &lq, sizeof(lq), async); } #if 0 /* * Install a pairwise key into the hardware. */ int iwn_set_key(struct ieee80211com *ic, struct ieee80211_node *ni, const struct ieee80211_key *k) { struct iwn_softc *sc = ic->ic_softc; struct iwn_node_info node; if (k->k_flags & IEEE80211_KEY_GROUP) return 0; memset(&node, 0, sizeof node); switch (k->k_cipher) { case IEEE80211_CIPHER_CCMP: node.security = htole16(IWN_CIPHER_CCMP); memcpy(node.key, k->k_key, k->k_len); break; default: return 0; } node.id = IWN_ID_BSS; IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); node.control = IWN_NODE_UPDATE; node.flags = IWN_FLAG_SET_KEY; return iwn_cmd(sc, IWN_CMD_ADD_NODE, &node, sizeof node, 1); } #endif int iwn_wme_update(struct ieee80211com *ic) { #define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ #define IWN_TXOP_TO_US(v) (v<<5) struct iwn_softc *sc = ic->ic_ifp->if_softc; struct iwn_edca_params cmd; int i; memset(&cmd, 0, sizeof cmd); cmd.flags = htole32(IWN_EDCA_UPDATE); for (i = 0; i < WME_NUM_AC; i++) { const struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[i]; cmd.ac[i].aifsn = wmep->wmep_aifsn; cmd.ac[i].cwmin = htole16(IWN_EXP2(wmep->wmep_logcwmin)); cmd.ac[i].cwmax = htole16(IWN_EXP2(wmep->wmep_logcwmax)); cmd.ac[i].txoplimit = htole16(IWN_TXOP_TO_US(wmep->wmep_txopLimit)); } IWN_LOCK(sc); (void) iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1 /*async*/); IWN_UNLOCK(sc); return 0; #undef IWN_TXOP_TO_US #undef IWN_EXP2 } void iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) { struct iwn_cmd_led led; led.which = which; led.unit = htole32(100000); /* on/off in unit of 100ms */ led.off = off; led.on = on; (void) iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); } /* * Set the critical temperature at which the firmware will automatically stop * the radio transmitter. */ int iwn_set_critical_temp(struct iwn_softc *sc) { struct iwn_ucode_info *uc = &sc->ucode_info; struct iwn_critical_temp crit; uint32_t r1, r2, r3, temp; r1 = le32toh(uc->temp[0].chan20MHz); r2 = le32toh(uc->temp[1].chan20MHz); r3 = le32toh(uc->temp[2].chan20MHz); /* inverse function of iwn_get_temperature() */ temp = r2 + (IWN_CTOK(110) * (r3 - r1)) / 259; IWN_WRITE(sc, IWN_UCODE_CLR, IWN_CTEMP_STOP_RF); memset(&crit, 0, sizeof crit); crit.tempR = htole32(temp); DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %u\n", temp); return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); } void iwn_enable_tsf(struct iwn_softc *sc, struct ieee80211_node *ni) { struct iwn_cmd_tsf tsf; uint64_t val, mod; memset(&tsf, 0, sizeof tsf); memcpy(&tsf.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); tsf.bintval = htole16(ni->ni_intval); tsf.lintval = htole16(10); /* XXX all wrong */ /* compute remaining time until next beacon */ val = (uint64_t)ni->ni_intval * 1024; /* msecs -> usecs */ DPRINTF(sc, IWN_DEBUG_ANY, "%s: val = %ju %s\n", __func__, val, val == 0 ? "correcting" : ""); if (val == 0) val = 1; mod = le64toh(tsf.tstamp) % val; tsf.binitval = htole32((uint32_t)(val - mod)); DPRINTF(sc, IWN_DEBUG_RESET, "TSF bintval=%u tstamp=%ju, init=%u\n", ni->ni_intval, le64toh(tsf.tstamp), (uint32_t)(val - mod)); if (iwn_cmd(sc, IWN_CMD_TSF, &tsf, sizeof tsf, 1) != 0) device_printf(sc->sc_dev, "%s: could not enable TSF\n", __func__); } void iwn_power_calibration(struct iwn_softc *sc, int temp) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; #if 0 KASSERT(ic->ic_state == IEEE80211_S_RUN, ("not running")); #endif DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n", __func__, sc->temp, temp); /* adjust Tx power if need be (delta >= 3°C) */ if (abs(temp - sc->temp) < 3) return; sc->temp = temp; DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: set Tx power for channel %d\n", __func__, ieee80211_chan2ieee(ic, ic->ic_bsschan)); if (iwn_set_txpower(sc, ic->ic_bsschan, 1) != 0) { /* just warn, too bad for the automatic calibration... */ device_printf(sc->sc_dev, "%s: could not adjust Tx power\n", __func__); } } /* * Set Tx power for a given channel (each rate has its own power settings). * This function takes into account the regulatory information from EEPROM, * the current temperature and the current voltage. */ int iwn_set_txpower(struct iwn_softc *sc, struct ieee80211_channel *ch, int async) { /* fixed-point arithmetic division using a n-bit fractional part */ #define fdivround(a, b, n) \ ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) /* linear interpolation */ #define interpolate(x, x1, y1, x2, y2, n) \ ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct iwn_ucode_info *uc = &sc->ucode_info; struct iwn_cmd_txpower cmd; struct iwn_eeprom_chan_samples *chans; const uint8_t *rf_gain, *dsp_gain; int32_t vdiff, tdiff; int i, c, grp, maxpwr; u_int chan; /* get channel number */ chan = ieee80211_chan2ieee(ic, ch); memset(&cmd, 0, sizeof cmd); cmd.band = IEEE80211_IS_CHAN_5GHZ(ch) ? 0 : 1; cmd.chan = chan; if (IEEE80211_IS_CHAN_5GHZ(ch)) { maxpwr = sc->maxpwr5GHz; rf_gain = iwn_rf_gain_5ghz; dsp_gain = iwn_dsp_gain_5ghz; } else { maxpwr = sc->maxpwr2GHz; rf_gain = iwn_rf_gain_2ghz; dsp_gain = iwn_dsp_gain_2ghz; } /* compute voltage compensation */ vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7; if (vdiff > 0) vdiff *= 2; if (abs(vdiff) > 2) vdiff = 0; DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage); /* get channel's attenuation group */ if (chan <= 20) /* 1-20 */ grp = 4; else if (chan <= 43) /* 34-43 */ grp = 0; else if (chan <= 70) /* 44-70 */ grp = 1; else if (chan <= 124) /* 71-124 */ grp = 2; else /* 125-200 */ grp = 3; DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, "%s: chan %d, attenuation group=%d\n", __func__, chan, grp); /* get channel's sub-band */ for (i = 0; i < IWN_NBANDS; i++) if (sc->bands[i].lo != 0 && sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) break; chans = sc->bands[i].chans; DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, "%s: chan %d sub-band=%d\n", __func__, chan, i); for (c = 0; c < IWN_NTXCHAINS; c++) { uint8_t power, gain, temp; int maxchpwr, pwr, ridx, idx; power = interpolate(chan, chans[0].num, chans[0].samples[c][1].power, chans[1].num, chans[1].samples[c][1].power, 1); gain = interpolate(chan, chans[0].num, chans[0].samples[c][1].gain, chans[1].num, chans[1].samples[c][1].gain, 1); temp = interpolate(chan, chans[0].num, chans[0].samples[c][1].temp, chans[1].num, chans[1].samples[c][1].temp, 1); DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, "%s: Tx chain %d: power=%d gain=%d temp=%d\n", __func__, c, power, gain, temp); /* compute temperature compensation */ tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n", __func__, tdiff, sc->temp, temp); for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { maxchpwr = ch->ic_maxpower; if ((ridx / 8) & 1) { /* MIMO: decrease Tx power (-3dB) */ maxchpwr -= 6; } pwr = maxpwr - 10; /* decrease power for highest OFDM rates */ if ((ridx % 8) == 5) /* 48Mbit/s */ pwr -= 5; else if ((ridx % 8) == 6) /* 54Mbit/s */ pwr -= 7; else if ((ridx % 8) == 7) /* 60Mbit/s */ pwr -= 10; if (pwr > maxchpwr) pwr = maxchpwr; idx = gain - (pwr - power) - tdiff - vdiff; if ((ridx / 8) & 1) /* MIMO */ idx += (int32_t)le32toh(uc->atten[grp][c]); if (cmd.band == 0) idx += 9; /* 5GHz */ if (ridx == IWN_RIDX_MAX) idx += 5; /* CCK */ /* make sure idx stays in a valid range */ if (idx < 0) idx = 0; else if (idx > IWN_MAX_PWR_INDEX) idx = IWN_MAX_PWR_INDEX; DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, "%s: Tx chain %d, rate idx %d: power=%d\n", __func__, c, ridx, idx); cmd.power[ridx].rf_gain[c] = rf_gain[idx]; cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; } } DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, "%s: set tx power for chan %d\n", __func__, chan); return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); #undef interpolate #undef fdivround } /* * Get the best (maximum) RSSI among the * connected antennas and convert to dBm. */ int8_t iwn_get_rssi(struct iwn_softc *sc, const struct iwn_rx_stat *stat) { int mask, agc, rssi; mask = (le16toh(stat->antenna) >> 4) & 0x7; agc = (le16toh(stat->agc) >> 7) & 0x7f; rssi = 0; #if 0 if (mask & (1 << 0)) /* Ant A */ rssi = max(rssi, stat->rssi[0]); if (mask & (1 << 1)) /* Ant B */ rssi = max(rssi, stat->rssi[2]); if (mask & (1 << 2)) /* Ant C */ rssi = max(rssi, stat->rssi[4]); #else rssi = max(rssi, stat->rssi[0]); rssi = max(rssi, stat->rssi[2]); rssi = max(rssi, stat->rssi[4]); #endif DPRINTF(sc, IWN_DEBUG_RECV, "%s: agc %d mask 0x%x rssi %d %d %d " "result %d\n", __func__, agc, mask, stat->rssi[0], stat->rssi[2], stat->rssi[4], rssi - agc - IWN_RSSI_TO_DBM); return rssi - agc - IWN_RSSI_TO_DBM; } /* * Get the average noise among Rx antennas (in dBm). */ int iwn_get_noise(const struct iwn_rx_general_stats *stats) { int i, total, nbant, noise; total = nbant = 0; for (i = 0; i < 3; i++) { noise = le32toh(stats->noise[i]) & 0xff; if (noise != 0) { total += noise; nbant++; } } /* there should be at least one antenna but check anyway */ return (nbant == 0) ? -127 : (total / nbant) - 107; } /* * Read temperature (in degC) from the on-board thermal sensor. */ int iwn_get_temperature(struct iwn_softc *sc) { struct iwn_ucode_info *uc = &sc->ucode_info; int32_t r1, r2, r3, r4, temp; r1 = le32toh(uc->temp[0].chan20MHz); r2 = le32toh(uc->temp[1].chan20MHz); r3 = le32toh(uc->temp[2].chan20MHz); r4 = le32toh(sc->rawtemp); if (r1 == r3) /* prevents division by 0 (should not happen) */ return 0; /* sign-extend 23-bit R4 value to 32-bit */ r4 = (r4 << 8) >> 8; /* compute temperature */ temp = (259 * (r4 - r2)) / (r3 - r1); temp = (temp * 97) / 100 + 8; return IWN_KTOC(temp); } /* * Initialize sensitivity calibration state machine. */ int iwn_init_sensitivity(struct iwn_softc *sc) { struct iwn_calib_state *calib = &sc->calib; struct iwn_phy_calib_cmd cmd; int error; /* reset calibration state */ memset(calib, 0, sizeof (*calib)); calib->state = IWN_CALIB_STATE_INIT; calib->cck_state = IWN_CCK_STATE_HIFA; /* initial values taken from the reference driver */ calib->corr_ofdm_x1 = 105; calib->corr_ofdm_mrc_x1 = 220; calib->corr_ofdm_x4 = 90; calib->corr_ofdm_mrc_x4 = 170; calib->corr_cck_x4 = 125; calib->corr_cck_mrc_x4 = 200; calib->energy_cck = 100; /* write initial sensitivity values */ error = iwn_send_sensitivity(sc); if (error != 0) return error; memset(&cmd, 0, sizeof cmd); cmd.code = IWN_SET_DIFF_GAIN; /* differential gains initially set to 0 for all 3 antennas */ DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: calibrate phy\n", __func__); return iwn_cmd(sc, IWN_PHY_CALIB, &cmd, sizeof cmd, 1); } /* * Collect noise and RSSI statistics for the first 20 beacons received * after association and use them to determine connected antennas and * set differential gains. */ void iwn_compute_differential_gain(struct iwn_softc *sc, const struct iwn_rx_general_stats *stats) { struct iwn_calib_state *calib = &sc->calib; struct iwn_phy_calib_cmd cmd; int i, val; /* accumulate RSSI and noise for all 3 antennas */ for (i = 0; i < 3; i++) { calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff; calib->noise[i] += le32toh(stats->noise[i]) & 0xff; } /* we update differential gain only once after 20 beacons */ if (++calib->nbeacons < 20) return; /* determine antenna with highest average RSSI */ val = max(calib->rssi[0], calib->rssi[1]); val = max(calib->rssi[2], val); /* determine which antennas are connected */ sc->antmsk = 0; for (i = 0; i < 3; i++) if (val - calib->rssi[i] <= 15 * 20) sc->antmsk |= 1 << i; /* if neither Ant A and Ant B are connected.. */ if ((sc->antmsk & (1 << 0 | 1 << 1)) == 0) sc->antmsk |= 1 << 1; /* ..mark Ant B as connected! */ /* get minimal noise among connected antennas */ val = INT_MAX; /* ok, there's at least one */ for (i = 0; i < 3; i++) if (sc->antmsk & (1 << i)) val = min(calib->noise[i], val); memset(&cmd, 0, sizeof cmd); cmd.code = IWN_SET_DIFF_GAIN; /* set differential gains for connected antennas */ for (i = 0; i < 3; i++) { if (sc->antmsk & (1 << i)) { cmd.gain[i] = (calib->noise[i] - val) / 30; /* limit differential gain to 3 */ cmd.gain[i] = min(cmd.gain[i], 3); cmd.gain[i] |= IWN_GAIN_SET; } } DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: set differential gains Ant A/B/C: %x/%x/%x (%x)\n", __func__,cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->antmsk); if (iwn_cmd(sc, IWN_PHY_CALIB, &cmd, sizeof cmd, 1) == 0) calib->state = IWN_CALIB_STATE_RUN; } /* * Tune RF Rx sensitivity based on the number of false alarms detected * during the last beacon period. */ void iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) { #define inc_clip(val, inc, max) \ if ((val) < (max)) { \ if ((val) < (max) - (inc)) \ (val) += (inc); \ else \ (val) = (max); \ needs_update = 1; \ } #define dec_clip(val, dec, min) \ if ((val) > (min)) { \ if ((val) > (min) + (dec)) \ (val) -= (dec); \ else \ (val) = (min); \ needs_update = 1; \ } struct iwn_calib_state *calib = &sc->calib; uint32_t val, rxena, fa; uint32_t energy[3], energy_min; uint8_t noise[3], noise_ref; int i, needs_update = 0; /* check that we've been enabled long enough */ if ((rxena = le32toh(stats->general.load)) == 0) return; /* compute number of false alarms since last call for OFDM */ fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm; fa *= 200 * 1024; /* 200TU */ /* save counters values for next call */ calib->bad_plcp_ofdm = le32toh(stats->ofdm.bad_plcp); calib->fa_ofdm = le32toh(stats->ofdm.fa); if (fa > 50 * rxena) { /* high false alarm count, decrease sensitivity */ DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: OFDM high false alarm count: %u\n", __func__, fa); inc_clip(calib->corr_ofdm_x1, 1, 140); inc_clip(calib->corr_ofdm_mrc_x1, 1, 270); inc_clip(calib->corr_ofdm_x4, 1, 120); inc_clip(calib->corr_ofdm_mrc_x4, 1, 210); } else if (fa < 5 * rxena) { /* low false alarm count, increase sensitivity */ DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: OFDM low false alarm count: %u\n", __func__, fa); dec_clip(calib->corr_ofdm_x1, 1, 105); dec_clip(calib->corr_ofdm_mrc_x1, 1, 220); dec_clip(calib->corr_ofdm_x4, 1, 85); dec_clip(calib->corr_ofdm_mrc_x4, 1, 170); } /* compute maximum noise among 3 antennas */ for (i = 0; i < 3; i++) noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff; val = max(noise[0], noise[1]); val = max(noise[2], val); /* insert it into our samples table */ calib->noise_samples[calib->cur_noise_sample] = val; calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; /* compute maximum noise among last 20 samples */ noise_ref = calib->noise_samples[0]; for (i = 1; i < 20; i++) noise_ref = max(noise_ref, calib->noise_samples[i]); /* compute maximum energy among 3 antennas */ for (i = 0; i < 3; i++) energy[i] = le32toh(stats->general.energy[i]); val = min(energy[0], energy[1]); val = min(energy[2], val); /* insert it into our samples table */ calib->energy_samples[calib->cur_energy_sample] = val; calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; /* compute minimum energy among last 10 samples */ energy_min = calib->energy_samples[0]; for (i = 1; i < 10; i++) energy_min = max(energy_min, calib->energy_samples[i]); energy_min += 6; /* compute number of false alarms since last call for CCK */ fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck; fa += le32toh(stats->cck.fa) - calib->fa_cck; fa *= 200 * 1024; /* 200TU */ /* save counters values for next call */ calib->bad_plcp_cck = le32toh(stats->cck.bad_plcp); calib->fa_cck = le32toh(stats->cck.fa); if (fa > 50 * rxena) { /* high false alarm count, decrease sensitivity */ DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: CCK high false alarm count: %u\n", __func__, fa); calib->cck_state = IWN_CCK_STATE_HIFA; calib->low_fa = 0; if (calib->corr_cck_x4 > 160) { calib->noise_ref = noise_ref; if (calib->energy_cck > 2) dec_clip(calib->energy_cck, 2, energy_min); } if (calib->corr_cck_x4 < 160) { calib->corr_cck_x4 = 161; needs_update = 1; } else inc_clip(calib->corr_cck_x4, 3, 200); inc_clip(calib->corr_cck_mrc_x4, 3, 400); } else if (fa < 5 * rxena) { /* low false alarm count, increase sensitivity */ DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: CCK low false alarm count: %u\n", __func__, fa); calib->cck_state = IWN_CCK_STATE_LOFA; calib->low_fa++; if (calib->cck_state != 0 && ((calib->noise_ref - noise_ref) > 2 || calib->low_fa > 100)) { inc_clip(calib->energy_cck, 2, 97); dec_clip(calib->corr_cck_x4, 3, 125); dec_clip(calib->corr_cck_mrc_x4, 3, 200); } } else { /* not worth to increase or decrease sensitivity */ DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: CCK normal false alarm count: %u\n", __func__, fa); calib->low_fa = 0; calib->noise_ref = noise_ref; if (calib->cck_state == IWN_CCK_STATE_HIFA) { /* previous interval had many false alarms */ dec_clip(calib->energy_cck, 8, energy_min); } calib->cck_state = IWN_CCK_STATE_INIT; } if (needs_update) (void)iwn_send_sensitivity(sc); #undef dec_clip #undef inc_clip } int iwn_send_sensitivity(struct iwn_softc *sc) { struct iwn_calib_state *calib = &sc->calib; struct iwn_sensitivity_cmd cmd; memset(&cmd, 0, sizeof cmd); cmd.which = IWN_SENSITIVITY_WORKTBL; /* OFDM modulation */ cmd.corr_ofdm_x1 = htole16(calib->corr_ofdm_x1); cmd.corr_ofdm_mrc_x1 = htole16(calib->corr_ofdm_mrc_x1); cmd.corr_ofdm_x4 = htole16(calib->corr_ofdm_x4); cmd.corr_ofdm_mrc_x4 = htole16(calib->corr_ofdm_mrc_x4); cmd.energy_ofdm = htole16(100); cmd.energy_ofdm_th = htole16(62); /* CCK modulation */ cmd.corr_cck_x4 = htole16(calib->corr_cck_x4); cmd.corr_cck_mrc_x4 = htole16(calib->corr_cck_mrc_x4); cmd.energy_cck = htole16(calib->energy_cck); /* Barker modulation: use default values */ cmd.corr_barker = htole16(190); cmd.corr_barker_mrc = htole16(390); DPRINTF(sc, IWN_DEBUG_RESET, "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__, calib->corr_ofdm_x1, calib->corr_ofdm_mrc_x1, calib->corr_ofdm_x4, calib->corr_ofdm_mrc_x4, calib->corr_cck_x4, calib->corr_cck_mrc_x4, calib->energy_cck); return iwn_cmd(sc, IWN_SENSITIVITY, &cmd, sizeof cmd, 1); } int iwn_auth(struct iwn_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); /*XXX*/ struct ieee80211_node *ni = vap->iv_bss; struct iwn_node_info node; int error; sc->calib.state = IWN_CALIB_STATE_INIT; /* update adapter's configuration */ sc->config.associd = 0; IEEE80211_ADDR_COPY(sc->config.bssid, ni->ni_bssid); sc->config.chan = htole16(ieee80211_chan2ieee(ic, ni->ni_chan)); sc->config.flags = htole32(IWN_CONFIG_TSF); if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) sc->config.flags |= htole32(IWN_CONFIG_AUTO | IWN_CONFIG_24GHZ); if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { sc->config.cck_mask = 0; sc->config.ofdm_mask = 0x15; } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { sc->config.cck_mask = 0x03; sc->config.ofdm_mask = 0; } else { /* XXX assume 802.11b/g */ sc->config.cck_mask = 0x0f; sc->config.ofdm_mask = 0x15; } if (ic->ic_flags & IEEE80211_F_SHSLOT) sc->config.flags |= htole32(IWN_CONFIG_SHSLOT); if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) sc->config.flags |= htole32(IWN_CONFIG_SHPREAMBLE); sc->config.filter &= ~htole32(IWN_FILTER_BSS); DPRINTF(sc, IWN_DEBUG_STATE, "%s: config chan %d mode %d flags 0x%x cck 0x%x ofdm 0x%x " "ht_single 0x%x ht_dual 0x%x rxchain 0x%x " "myaddr %6D wlap %6D bssid %6D associd %d filter 0x%x\n", __func__, le16toh(sc->config.chan), sc->config.mode, le32toh(sc->config.flags), sc->config.cck_mask, sc->config.ofdm_mask, sc->config.ht_single_mask, sc->config.ht_dual_mask, le16toh(sc->config.rxchain), sc->config.myaddr, ":", sc->config.wlap, ":", sc->config.bssid, ":", le16toh(sc->config.associd), le32toh(sc->config.filter)); error = iwn_cmd(sc, IWN_CMD_CONFIGURE, &sc->config, sizeof (struct iwn_config), 1); if (error != 0) { device_printf(sc->sc_dev, "%s: could not configure, error %d\n", __func__, error); return error; } sc->sc_curchan = ic->ic_curchan; /* configuration has changed, set Tx power accordingly */ error = iwn_set_txpower(sc, ni->ni_chan, 1); if (error != 0) { device_printf(sc->sc_dev, "%s: could not set Tx power, error %d\n", __func__, error); return error; } /* * Reconfiguring clears the adapter's nodes table so we must * add the broadcast node again. */ memset(&node, 0, sizeof node); IEEE80211_ADDR_COPY(node.macaddr, ifp->if_broadcastaddr); node.id = IWN_ID_BROADCAST; DPRINTF(sc, IWN_DEBUG_STATE, "%s: add broadcast node\n", __func__); error = iwn_cmd(sc, IWN_CMD_ADD_NODE, &node, sizeof node, 1); if (error != 0) { device_printf(sc->sc_dev, "%s: could not add broadcast node, error %d\n", __func__, error); return error; } error = iwn_set_link_quality(sc, node.id, ic->ic_curchan, 1); if (error != 0) { device_printf(sc->sc_dev, "%s: could not setup MRR for broadcast node, error %d\n", __func__, error); return error; } return 0; } /* * Configure the adapter for associated state. */ int iwn_run(struct iwn_softc *sc) { #define MS(v,x) (((v) & x) >> x##_S) struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); /*XXX*/ struct ieee80211_node *ni = vap->iv_bss; struct iwn_node_info node; int error, maxrxampdu, ampdudensity; sc->calib.state = IWN_CALIB_STATE_INIT; if (ic->ic_opmode == IEEE80211_M_MONITOR) { /* link LED blinks while monitoring */ iwn_set_led(sc, IWN_LED_LINK, 5, 5); return 0; } iwn_enable_tsf(sc, ni); /* update adapter's configuration */ sc->config.associd = htole16(IEEE80211_AID(ni->ni_associd)); /* short preamble/slot time are negotiated when associating */ sc->config.flags &= ~htole32(IWN_CONFIG_SHPREAMBLE | IWN_CONFIG_SHSLOT); if (ic->ic_flags & IEEE80211_F_SHSLOT) sc->config.flags |= htole32(IWN_CONFIG_SHSLOT); if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) sc->config.flags |= htole32(IWN_CONFIG_SHPREAMBLE); if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { sc->config.flags &= ~htole32(IWN_CONFIG_HT); if (IEEE80211_IS_CHAN_HT40U(ni->ni_chan)) sc->config.flags |= htole32(IWN_CONFIG_HT40U); else if (IEEE80211_IS_CHAN_HT40D(ni->ni_chan)) sc->config.flags |= htole32(IWN_CONFIG_HT40D); else sc->config.flags |= htole32(IWN_CONFIG_HT20); sc->config.rxchain = htole16( (3 << IWN_RXCHAIN_VALID_S) | (3 << IWN_RXCHAIN_MIMO_CNT_S) | (1 << IWN_RXCHAIN_CNT_S) | IWN_RXCHAIN_MIMO_FORCE); maxrxampdu = MS(ni->ni_htparam, IEEE80211_HTCAP_MAXRXAMPDU); ampdudensity = MS(ni->ni_htparam, IEEE80211_HTCAP_MPDUDENSITY); } else maxrxampdu = ampdudensity = 0; sc->config.filter |= htole32(IWN_FILTER_BSS); DPRINTF(sc, IWN_DEBUG_STATE, "%s: config chan %d mode %d flags 0x%x cck 0x%x ofdm 0x%x " "ht_single 0x%x ht_dual 0x%x rxchain 0x%x " "myaddr %6D wlap %6D bssid %6D associd %d filter 0x%x\n", __func__, le16toh(sc->config.chan), sc->config.mode, le32toh(sc->config.flags), sc->config.cck_mask, sc->config.ofdm_mask, sc->config.ht_single_mask, sc->config.ht_dual_mask, le16toh(sc->config.rxchain), sc->config.myaddr, ":", sc->config.wlap, ":", sc->config.bssid, ":", le16toh(sc->config.associd), le32toh(sc->config.filter)); error = iwn_cmd(sc, IWN_CMD_CONFIGURE, &sc->config, sizeof (struct iwn_config), 1); if (error != 0) { device_printf(sc->sc_dev, "%s: could not update configuration, error %d\n", __func__, error); return error; } sc->sc_curchan = ni->ni_chan; /* configuration has changed, set Tx power accordingly */ error = iwn_set_txpower(sc, ni->ni_chan, 1); if (error != 0) { device_printf(sc->sc_dev, "%s: could not set Tx power, error %d\n", __func__, error); return error; } /* add BSS node */ memset(&node, 0, sizeof node); IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); node.id = IWN_ID_BSS; node.htflags = htole32( (maxrxampdu << IWN_MAXRXAMPDU_S) | (ampdudensity << IWN_MPDUDENSITY_S)); DPRINTF(sc, IWN_DEBUG_STATE, "%s: add BSS node, id %d htflags 0x%x\n", __func__, node.id, le32toh(node.htflags)); error = iwn_cmd(sc, IWN_CMD_ADD_NODE, &node, sizeof node, 1); if (error != 0) { device_printf(sc->sc_dev,"could not add BSS node\n"); return error; } error = iwn_set_link_quality(sc, node.id, ni->ni_chan, 1); if (error != 0) { device_printf(sc->sc_dev, "%s: could not setup MRR for node %d, error %d\n", __func__, node.id, error); return error; } if (ic->ic_opmode == IEEE80211_M_STA) { /* fake a join to init the tx rate */ iwn_newassoc(ni, 1); } error = iwn_init_sensitivity(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: could not set sensitivity, error %d\n", __func__, error); return error; } /* start/restart periodic calibration timer */ sc->calib.state = IWN_CALIB_STATE_ASSOC; iwn_calib_reset(sc); /* link LED always on while associated */ iwn_set_led(sc, IWN_LED_LINK, 0, 1); return 0; #undef MS } /* * Send a scan request to the firmware. Since this command is huge, we map it * into a mbuf instead of using the pre-allocated set of commands. */ int iwn_scan(struct iwn_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_scan_state *ss = ic->ic_scan; /*XXX*/ struct iwn_tx_ring *ring = &sc->txq[4]; struct iwn_tx_desc *desc; struct iwn_tx_data *data; struct iwn_tx_cmd *cmd; struct iwn_cmd_data *tx; struct iwn_scan_hdr *hdr; struct iwn_scan_essid *essid; struct iwn_scan_chan *chan; struct ieee80211_frame *wh; struct ieee80211_rateset *rs; struct ieee80211_channel *c; enum ieee80211_phymode mode; uint8_t *frm; int pktlen, error, nrates; bus_addr_t physaddr; desc = &ring->desc[ring->cur]; data = &ring->data[ring->cur]; /* XXX malloc */ data->m = m_getcl(M_DONTWAIT, MT_DATA, 0); if (data->m == NULL) { device_printf(sc->sc_dev, "%s: could not allocate mbuf for scan command\n", __func__); return ENOMEM; } cmd = mtod(data->m, struct iwn_tx_cmd *); cmd->code = IWN_CMD_SCAN; cmd->flags = 0; cmd->qid = ring->qid; cmd->idx = ring->cur; hdr = (struct iwn_scan_hdr *)cmd->data; memset(hdr, 0, sizeof (struct iwn_scan_hdr)); /* XXX use scan state */ /* * Move to the next channel if no packets are received within 5 msecs * after sending the probe request (this helps to reduce the duration * of active scans). */ hdr->quiet = htole16(5); /* timeout in milliseconds */ hdr->plcp_threshold = htole16(1); /* min # of packets */ /* select Ant B and Ant C for scanning */ hdr->rxchain = htole16(0x3e1 | (7 << IWN_RXCHAIN_VALID_S)); tx = (struct iwn_cmd_data *)(hdr + 1); memset(tx, 0, sizeof (struct iwn_cmd_data)); tx->flags = htole32(IWN_TX_AUTO_SEQ | 0x200); /* XXX */ tx->id = IWN_ID_BROADCAST; tx->lifetime = htole32(IWN_LIFETIME_INFINITE); tx->rflags = IWN_RFLAG_ANT_B; if (IEEE80211_IS_CHAN_A(ic->ic_curchan)) { hdr->crc_threshold = htole16(1); /* send probe requests at 6Mbps */ tx->rate = iwn_ridx_to_plcp[IWN_RATE_OFDM6]; } else { hdr->flags = htole32(IWN_CONFIG_24GHZ | IWN_CONFIG_AUTO); /* send probe requests at 1Mbps */ tx->rate = iwn_ridx_to_plcp[IWN_RATE_CCK1]; tx->rflags |= IWN_RFLAG_CCK; } essid = (struct iwn_scan_essid *)(tx + 1); memset(essid, 0, 4 * sizeof (struct iwn_scan_essid)); essid[0].id = IEEE80211_ELEMID_SSID; essid[0].len = ss->ss_ssid[0].len; memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); /* * Build a probe request frame. Most of the following code is a * copy & paste of what is done in net80211. */ wh = (struct ieee80211_frame *)&essid[4]; wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_REQ; wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr); *(u_int16_t *)&wh->i_dur[0] = 0; /* filled by h/w */ *(u_int16_t *)&wh->i_seq[0] = 0; /* filled by h/w */ frm = (uint8_t *)(wh + 1); /* add SSID IE */ *frm++ = IEEE80211_ELEMID_SSID; *frm++ = ss->ss_ssid[0].len; memcpy(frm, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); frm += ss->ss_ssid[0].len; mode = ieee80211_chan2mode(ic->ic_curchan); rs = &ic->ic_sup_rates[mode]; /* add supported rates IE */ *frm++ = IEEE80211_ELEMID_RATES; nrates = rs->rs_nrates; if (nrates > IEEE80211_RATE_SIZE) nrates = IEEE80211_RATE_SIZE; *frm++ = nrates; memcpy(frm, rs->rs_rates, nrates); frm += nrates; /* add supported xrates IE */ if (rs->rs_nrates > IEEE80211_RATE_SIZE) { nrates = rs->rs_nrates - IEEE80211_RATE_SIZE; *frm++ = IEEE80211_ELEMID_XRATES; *frm++ = (uint8_t)nrates; memcpy(frm, rs->rs_rates + IEEE80211_RATE_SIZE, nrates); frm += nrates; } /* setup length of probe request */ tx->len = htole16(frm - (uint8_t *)wh); c = ic->ic_curchan; chan = (struct iwn_scan_chan *)frm; chan->chan = ieee80211_chan2ieee(ic, c); chan->flags = 0; if ((c->ic_flags & IEEE80211_CHAN_PASSIVE) == 0) { chan->flags |= IWN_CHAN_ACTIVE; if (ss->ss_nssid > 0) chan->flags |= IWN_CHAN_DIRECT; } chan->dsp_gain = 0x6e; if (IEEE80211_IS_CHAN_5GHZ(c)) { chan->rf_gain = 0x3b; chan->active = htole16(10); chan->passive = htole16(110); } else { chan->rf_gain = 0x28; chan->active = htole16(20); chan->passive = htole16(120); } DPRINTF(sc, IWN_DEBUG_STATE, "%s: chan %u flags 0x%x rf_gain 0x%x " "dsp_gain 0x%x active 0x%x passive 0x%x\n", __func__, chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain, chan->active, chan->passive); hdr->nchan++; chan++; frm += sizeof (struct iwn_scan_chan); hdr->len = htole16(frm - (uint8_t *)hdr); pktlen = frm - (uint8_t *)cmd; error = bus_dmamap_load(ring->data_dmat, data->map, cmd, pktlen, iwn_dma_map_addr, &physaddr, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "%s: could not map scan command, error %d\n", __func__, error); m_freem(data->m); data->m = NULL; return error; } IWN_SET_DESC_NSEGS(desc, 1); IWN_SET_DESC_SEG(desc, 0, physaddr, pktlen); sc->shared->len[ring->qid][ring->cur] = htole16(8); if (ring->cur < IWN_TX_WINDOW) sc->shared->len[ring->qid][ring->cur + IWN_TX_RING_COUNT] = htole16(8); bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); /* kick cmd ring */ ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; IWN_WRITE(sc, IWN_TX_WIDX, ring->qid << 8 | ring->cur); return 0; /* will be notified async. of failure/success */ } int iwn_config(struct iwn_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct iwn_power power; struct iwn_bluetooth bluetooth; struct iwn_node_info node; int error; /* set power mode */ memset(&power, 0, sizeof power); power.flags = htole16(IWN_POWER_CAM | 0x8); DPRINTF(sc, IWN_DEBUG_RESET, "%s: set power mode\n", __func__); error = iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &power, sizeof power, 0); if (error != 0) { device_printf(sc->sc_dev, "%s: could not set power mode, error %d\n", __func__, error); return error; } /* configure bluetooth coexistence */ memset(&bluetooth, 0, sizeof bluetooth); bluetooth.flags = 3; bluetooth.lead = 0xaa; bluetooth.kill = 1; DPRINTF(sc, IWN_DEBUG_RESET, "%s: config bluetooth coexistence\n", __func__); error = iwn_cmd(sc, IWN_CMD_BLUETOOTH, &bluetooth, sizeof bluetooth, 0); if (error != 0) { device_printf(sc->sc_dev, "%s: could not configure bluetooth coexistence, error %d\n", __func__, error); return error; } /* configure adapter */ memset(&sc->config, 0, sizeof (struct iwn_config)); IEEE80211_ADDR_COPY(sc->config.myaddr, ic->ic_myaddr); IEEE80211_ADDR_COPY(sc->config.wlap, ic->ic_myaddr); /* set default channel */ sc->config.chan = htole16(ieee80211_chan2ieee(ic, ic->ic_curchan)); sc->config.flags = htole32(IWN_CONFIG_TSF); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) sc->config.flags |= htole32(IWN_CONFIG_AUTO | IWN_CONFIG_24GHZ); sc->config.filter = 0; switch (ic->ic_opmode) { case IEEE80211_M_STA: sc->config.mode = IWN_MODE_STA; sc->config.filter |= htole32(IWN_FILTER_MULTICAST); break; case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: sc->config.mode = IWN_MODE_IBSS; break; case IEEE80211_M_HOSTAP: sc->config.mode = IWN_MODE_HOSTAP; break; case IEEE80211_M_MONITOR: sc->config.mode = IWN_MODE_MONITOR; sc->config.filter |= htole32(IWN_FILTER_MULTICAST | IWN_FILTER_CTL | IWN_FILTER_PROMISC); break; default: break; } sc->config.cck_mask = 0x0f; /* not yet negotiated */ sc->config.ofdm_mask = 0xff; /* not yet negotiated */ sc->config.ht_single_mask = 0xff; sc->config.ht_dual_mask = 0xff; sc->config.rxchain = htole16(0x2800 | (7 << IWN_RXCHAIN_VALID_S)); DPRINTF(sc, IWN_DEBUG_STATE, "%s: config chan %d mode %d flags 0x%x cck 0x%x ofdm 0x%x " "ht_single 0x%x ht_dual 0x%x rxchain 0x%x " "myaddr %6D wlap %6D bssid %6D associd %d filter 0x%x\n", __func__, le16toh(sc->config.chan), sc->config.mode, le32toh(sc->config.flags), sc->config.cck_mask, sc->config.ofdm_mask, sc->config.ht_single_mask, sc->config.ht_dual_mask, le16toh(sc->config.rxchain), sc->config.myaddr, ":", sc->config.wlap, ":", sc->config.bssid, ":", le16toh(sc->config.associd), le32toh(sc->config.filter)); error = iwn_cmd(sc, IWN_CMD_CONFIGURE, &sc->config, sizeof (struct iwn_config), 0); if (error != 0) { device_printf(sc->sc_dev, "%s: configure command failed, error %d\n", __func__, error); return error; } sc->sc_curchan = ic->ic_curchan; /* configuration has changed, set Tx power accordingly */ error = iwn_set_txpower(sc, ic->ic_curchan, 0); if (error != 0) { device_printf(sc->sc_dev, "%s: could not set Tx power, error %d\n", __func__, error); return error; } /* add broadcast node */ memset(&node, 0, sizeof node); IEEE80211_ADDR_COPY(node.macaddr, ic->ic_ifp->if_broadcastaddr); node.id = IWN_ID_BROADCAST; node.rate = iwn_plcp_signal(2); DPRINTF(sc, IWN_DEBUG_RESET, "%s: add broadcast node\n", __func__); error = iwn_cmd(sc, IWN_CMD_ADD_NODE, &node, sizeof node, 0); if (error != 0) { device_printf(sc->sc_dev, "%s: could not add broadcast node, error %d\n", __func__, error); return error; } error = iwn_set_link_quality(sc, node.id, ic->ic_curchan, 0); if (error != 0) { device_printf(sc->sc_dev, "%s: could not setup MRR for node %d, error %d\n", __func__, node.id, error); return error; } error = iwn_set_critical_temp(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: could not set critical temperature, error %d\n", __func__, error); return error; } return 0; } /* * Do post-alive initialization of the NIC (after firmware upload). */ void iwn_post_alive(struct iwn_softc *sc) { uint32_t base; uint16_t offset; int qid; iwn_mem_lock(sc); /* clear SRAM */ base = iwn_mem_read(sc, IWN_SRAM_BASE); for (offset = 0x380; offset < 0x520; offset += 4) { IWN_WRITE(sc, IWN_MEM_WADDR, base + offset); IWN_WRITE(sc, IWN_MEM_WDATA, 0); } /* shared area is aligned on a 1K boundary */ iwn_mem_write(sc, IWN_SRAM_BASE, sc->shared_dma.paddr >> 10); iwn_mem_write(sc, IWN_SELECT_QCHAIN, 0); for (qid = 0; qid < IWN_NTXQUEUES; qid++) { iwn_mem_write(sc, IWN_QUEUE_RIDX(qid), 0); IWN_WRITE(sc, IWN_TX_WIDX, qid << 8 | 0); /* set sched. window size */ IWN_WRITE(sc, IWN_MEM_WADDR, base + IWN_QUEUE_OFFSET(qid)); IWN_WRITE(sc, IWN_MEM_WDATA, 64); /* set sched. frame limit */ IWN_WRITE(sc, IWN_MEM_WADDR, base + IWN_QUEUE_OFFSET(qid) + 4); IWN_WRITE(sc, IWN_MEM_WDATA, 10 << 16); } /* enable interrupts for all 16 queues */ iwn_mem_write(sc, IWN_QUEUE_INTR_MASK, 0xffff); /* identify active Tx rings (0-7) */ iwn_mem_write(sc, IWN_TX_ACTIVE, 0xff); /* mark Tx rings (4 EDCA + cmd + 2 HCCA) as active */ for (qid = 0; qid < 7; qid++) { iwn_mem_write(sc, IWN_TXQ_STATUS(qid), IWN_TXQ_STATUS_ACTIVE | qid << 1); } iwn_mem_unlock(sc); } void iwn_stop_master(struct iwn_softc *sc) { uint32_t tmp; int ntries; tmp = IWN_READ(sc, IWN_RESET); IWN_WRITE(sc, IWN_RESET, tmp | IWN_STOP_MASTER); tmp = IWN_READ(sc, IWN_GPIO_CTL); if ((tmp & IWN_GPIO_PWR_STATUS) == IWN_GPIO_PWR_SLEEP) return; /* already asleep */ for (ntries = 0; ntries < 100; ntries++) { if (IWN_READ(sc, IWN_RESET) & IWN_MASTER_DISABLED) break; DELAY(10); } if (ntries == 100) device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__); } int iwn_reset(struct iwn_softc *sc) { uint32_t tmp; int ntries; /* clear any pending interrupts */ IWN_WRITE(sc, IWN_INTR, 0xffffffff); tmp = IWN_READ(sc, IWN_CHICKEN); IWN_WRITE(sc, IWN_CHICKEN, tmp | IWN_CHICKEN_DISLOS); tmp = IWN_READ(sc, IWN_GPIO_CTL); IWN_WRITE(sc, IWN_GPIO_CTL, tmp | IWN_GPIO_INIT); /* wait for clock stabilization */ for (ntries = 0; ntries < 1000; ntries++) { if (IWN_READ(sc, IWN_GPIO_CTL) & IWN_GPIO_CLOCK) break; DELAY(10); } if (ntries == 1000) { device_printf(sc->sc_dev, "%s: timeout waiting for clock stabilization\n", __func__); return ETIMEDOUT; } return 0; } void iwn_hw_config(struct iwn_softc *sc) { uint32_t tmp, hw; /* enable interrupts mitigation */ IWN_WRITE(sc, IWN_INTR_MIT, 512 / 32); /* voodoo from the reference driver */ tmp = pci_read_config(sc->sc_dev, PCIR_REVID,1); if ((tmp & 0x80) && (tmp & 0x7f) < 8) { /* enable "no snoop" field */ tmp = pci_read_config(sc->sc_dev, 0xe8, 1); tmp &= ~IWN_DIS_NOSNOOP; /* clear device specific PCI configuration register 0x41 */ pci_write_config(sc->sc_dev, 0xe8, tmp, 1); } /* disable L1 entry to work around a hardware bug */ tmp = pci_read_config(sc->sc_dev, 0xf0, 1); tmp &= ~IWN_ENA_L1; pci_write_config(sc->sc_dev, 0xf0, tmp, 1 ); hw = IWN_READ(sc, IWN_HWCONFIG); IWN_WRITE(sc, IWN_HWCONFIG, hw | 0x310); iwn_mem_lock(sc); tmp = iwn_mem_read(sc, IWN_MEM_POWER); iwn_mem_write(sc, IWN_MEM_POWER, tmp | IWN_POWER_RESET); DELAY(5); tmp = iwn_mem_read(sc, IWN_MEM_POWER); iwn_mem_write(sc, IWN_MEM_POWER, tmp & ~IWN_POWER_RESET); iwn_mem_unlock(sc); } void iwn_init_locked(struct iwn_softc *sc) { struct ifnet *ifp = sc->sc_ifp; uint32_t tmp; int error, qid; IWN_LOCK_ASSERT(sc); /* load the firmware */ if (sc->fw_fp == NULL && (error = iwn_load_firmware(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not load firmware, error %d\n", __func__, error); return; } error = iwn_reset(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: could not reset adapter, error %d\n", __func__, error); return; } iwn_mem_lock(sc); iwn_mem_read(sc, IWN_CLOCK_CTL); iwn_mem_write(sc, IWN_CLOCK_CTL, 0xa00); iwn_mem_read(sc, IWN_CLOCK_CTL); iwn_mem_unlock(sc); DELAY(20); iwn_mem_lock(sc); tmp = iwn_mem_read(sc, IWN_MEM_PCIDEV); iwn_mem_write(sc, IWN_MEM_PCIDEV, tmp | 0x800); iwn_mem_unlock(sc); iwn_mem_lock(sc); tmp = iwn_mem_read(sc, IWN_MEM_POWER); iwn_mem_write(sc, IWN_MEM_POWER, tmp & ~0x03000000); iwn_mem_unlock(sc); iwn_hw_config(sc); /* init Rx ring */ iwn_mem_lock(sc); IWN_WRITE(sc, IWN_RX_CONFIG, 0); IWN_WRITE(sc, IWN_RX_WIDX, 0); /* Rx ring is aligned on a 256-byte boundary */ IWN_WRITE(sc, IWN_RX_BASE, sc->rxq.desc_dma.paddr >> 8); /* shared area is aligned on a 16-byte boundary */ IWN_WRITE(sc, IWN_RW_WIDX_PTR, (sc->shared_dma.paddr + offsetof(struct iwn_shared, closed_count)) >> 4); IWN_WRITE(sc, IWN_RX_CONFIG, 0x80601000); iwn_mem_unlock(sc); IWN_WRITE(sc, IWN_RX_WIDX, (IWN_RX_RING_COUNT - 1) & ~7); iwn_mem_lock(sc); iwn_mem_write(sc, IWN_TX_ACTIVE, 0); /* set physical address of "keep warm" page */ IWN_WRITE(sc, IWN_KW_BASE, sc->kw_dma.paddr >> 4); /* init Tx rings */ for (qid = 0; qid < IWN_NTXQUEUES; qid++) { struct iwn_tx_ring *txq = &sc->txq[qid]; IWN_WRITE(sc, IWN_TX_BASE(qid), txq->desc_dma.paddr >> 8); IWN_WRITE(sc, IWN_TX_CONFIG(qid), 0x80000008); } iwn_mem_unlock(sc); /* clear "radio off" and "disable command" bits (reversed logic) */ IWN_WRITE(sc, IWN_UCODE_CLR, IWN_RADIO_OFF); IWN_WRITE(sc, IWN_UCODE_CLR, IWN_DISABLE_CMD); /* clear any pending interrupts */ IWN_WRITE(sc, IWN_INTR, 0xffffffff); /* enable interrupts */ IWN_WRITE(sc, IWN_MASK, IWN_INTR_MASK); /* not sure why/if this is necessary... */ IWN_WRITE(sc, IWN_UCODE_CLR, IWN_RADIO_OFF); IWN_WRITE(sc, IWN_UCODE_CLR, IWN_RADIO_OFF); /* check that the radio is not disabled by RF switch */ if (!(IWN_READ(sc, IWN_GPIO_CTL) & IWN_GPIO_RF_ENABLED)) { device_printf(sc->sc_dev, "radio is disabled by hardware switch\n"); return; } error = iwn_transfer_firmware(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: could not load firmware, error %d\n", __func__, error); return; } /* firmware has notified us that it is alive.. */ iwn_post_alive(sc); /* ..do post alive initialization */ sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; sc->temp = iwn_get_temperature(sc); DPRINTF(sc, IWN_DEBUG_RESET, "%s: temperature=%d\n", __func__, sc->temp); error = iwn_config(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: could not configure device, error %d\n", __func__, error); return; } ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; } void iwn_init(void *arg) { struct iwn_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; IWN_LOCK(sc); iwn_init_locked(sc); IWN_UNLOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ieee80211_start_all(ic); } void iwn_stop_locked(struct iwn_softc *sc) { struct ifnet *ifp = sc->sc_ifp; uint32_t tmp; int i; IWN_LOCK_ASSERT(sc); IWN_WRITE(sc, IWN_RESET, IWN_NEVO_RESET); sc->sc_tx_timer = 0; callout_stop(&sc->sc_timer_to); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); /* disable interrupts */ IWN_WRITE(sc, IWN_MASK, 0); IWN_WRITE(sc, IWN_INTR, 0xffffffff); IWN_WRITE(sc, IWN_INTR_STATUS, 0xffffffff); /* Clear any commands left in the taskq command buffer */ memset(sc->sc_cmd, 0, sizeof(sc->sc_cmd)); /* reset all Tx rings */ for (i = 0; i < IWN_NTXQUEUES; i++) iwn_reset_tx_ring(sc, &sc->txq[i]); /* reset Rx ring */ iwn_reset_rx_ring(sc, &sc->rxq); iwn_mem_lock(sc); iwn_mem_write(sc, IWN_MEM_CLOCK2, 0x200); iwn_mem_unlock(sc); DELAY(5); iwn_stop_master(sc); tmp = IWN_READ(sc, IWN_RESET); IWN_WRITE(sc, IWN_RESET, tmp | IWN_SW_RESET); } void iwn_stop(struct iwn_softc *sc) { IWN_LOCK(sc); iwn_stop_locked(sc); IWN_UNLOCK(sc); } /* * Callback from net80211 to start a scan. */ static void iwn_scan_start(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct iwn_softc *sc = ifp->if_softc; iwn_queue_cmd(sc, IWN_SCAN_START, 0, IWN_QUEUE_NORMAL); } /* * Callback from net80211 to terminate a scan. */ static void iwn_scan_end(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct iwn_softc *sc = ifp->if_softc; iwn_queue_cmd(sc, IWN_SCAN_STOP, 0, IWN_QUEUE_NORMAL); } /* * Callback from net80211 to force a channel change. */ static void iwn_set_channel(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct iwn_softc *sc = ifp->if_softc; const struct ieee80211_channel *c = ic->ic_curchan; if (c != sc->sc_curchan) { sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); iwn_queue_cmd(sc, IWN_SET_CHAN, 0, IWN_QUEUE_NORMAL); } } /* * Callback from net80211 to start scanning of the current channel. */ static void iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) { struct ieee80211vap *vap = ss->ss_vap; struct iwn_softc *sc = vap->iv_ic->ic_ifp->if_softc; iwn_queue_cmd(sc, IWN_SCAN_CURCHAN, 0, IWN_QUEUE_NORMAL); } /* * Callback from net80211 to handle the minimum dwell time being met. * The intent is to terminate the scan but we just let the firmware * notify us when it's finished as we have no safe way to abort it. */ static void iwn_scan_mindwell(struct ieee80211_scan_state *ss) { /* NB: don't try to abort scan; wait for firmware to finish */ } /* * Carry out work in the taskq context. */ static void iwn_ops(void *arg0, int pending) { struct iwn_softc *sc = arg0; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap; int cmd, arg, error; enum ieee80211_state nstate; for (;;) { IWN_CMD_LOCK(sc); cmd = sc->sc_cmd[sc->sc_cmd_cur]; if (cmd == 0) { /* No more commands to process */ IWN_CMD_UNLOCK(sc); return; } if ((sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 && cmd != IWN_RADIO_ENABLE ) { IWN_CMD_UNLOCK(sc); return; } arg = sc->sc_cmd_arg[sc->sc_cmd_cur]; sc->sc_cmd[sc->sc_cmd_cur] = 0; /* free the slot */ sc->sc_cmd_cur = (sc->sc_cmd_cur + 1) % IWN_CMD_MAXOPS; IWN_CMD_UNLOCK(sc); IWN_LOCK(sc); /* NB: sync debug printfs on smp */ DPRINTF(sc, IWN_DEBUG_OPS, "%s: %s (cmd 0x%x)\n", __func__, iwn_ops_str(cmd), cmd); vap = TAILQ_FIRST(&ic->ic_vaps); /* XXX */ switch (cmd) { case IWN_SCAN_START: /* make the link LED blink while we're scanning */ iwn_set_led(sc, IWN_LED_LINK, 20, 2); break; case IWN_SCAN_STOP: break; case IWN_SCAN_NEXT: ieee80211_scan_next(vap); break; case IWN_SCAN_CURCHAN: error = iwn_scan(sc); if (error != 0) { IWN_UNLOCK(sc); ieee80211_cancel_scan(vap); IWN_LOCK(sc); return; } break; case IWN_SET_CHAN: error = iwn_config(sc); if (error != 0) { DPRINTF(sc, IWN_DEBUG_STATE, "%s: set chan failed, cancel scan\n", __func__); IWN_UNLOCK(sc); //XXX Handle failed scan correctly ieee80211_cancel_scan(vap); return; } break; case IWN_AUTH: case IWN_RUN: if (cmd == IWN_AUTH) { error = iwn_auth(sc); nstate = IEEE80211_S_AUTH; } else { error = iwn_run(sc); nstate = IEEE80211_S_RUN; } if (error == 0) { IWN_UNLOCK(sc); IEEE80211_LOCK(ic); IWN_VAP(vap)->iv_newstate(vap, nstate, arg); if (vap->iv_newstate_cb != NULL) vap->iv_newstate_cb(vap, nstate, arg); IEEE80211_UNLOCK(ic); IWN_LOCK(sc); } else { device_printf(sc->sc_dev, "%s: %s state change failed, error %d\n", __func__, ieee80211_state_name[nstate], error); } break; case IWN_REINIT: IWN_UNLOCK(sc); iwn_init(sc); IWN_LOCK(sc); ieee80211_notify_radio(ic, 1); break; case IWN_RADIO_ENABLE: KASSERT(sc->fw_fp != NULL, ("Fware Not Loaded, can't load from tq")); IWN_UNLOCK(sc); iwn_init(sc); IWN_LOCK(sc); break; case IWN_RADIO_DISABLE: ieee80211_notify_radio(ic, 0); iwn_stop_locked(sc); break; } IWN_UNLOCK(sc); } } /* * Queue a command for execution in the taskq thread. * This is needed as the net80211 callbacks do not allow * sleeping, since we need to sleep to confirm commands have * been processed by the firmware, we must defer execution to * a sleep enabled thread. */ static int iwn_queue_cmd(struct iwn_softc *sc, int cmd, int arg, int clear) { IWN_CMD_LOCK(sc); if (clear) { sc->sc_cmd[0] = cmd; sc->sc_cmd_arg[0] = arg; sc->sc_cmd_cur = 0; sc->sc_cmd_next = 1; } else { if (sc->sc_cmd[sc->sc_cmd_next] != 0) { IWN_CMD_UNLOCK(sc); DPRINTF(sc, IWN_DEBUG_ANY, "%s: command %d dropped\n", __func__, cmd); return EBUSY; } sc->sc_cmd[sc->sc_cmd_next] = cmd; sc->sc_cmd_arg[sc->sc_cmd_next] = arg; sc->sc_cmd_next = (sc->sc_cmd_next + 1) % IWN_CMD_MAXOPS; } taskqueue_enqueue(sc->sc_tq, &sc->sc_ops_task); IWN_CMD_UNLOCK(sc); return 0; } static void iwn_bpfattach(struct iwn_softc *sc) { struct ifnet *ifp = sc->sc_ifp; bpfattach(ifp, DLT_IEEE802_11_RADIO, sizeof (struct ieee80211_frame) + sizeof (sc->sc_txtap)); sc->sc_rxtap_len = sizeof sc->sc_rxtap; sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); sc->sc_rxtap.wr_ihdr.it_present = htole32(IWN_RX_RADIOTAP_PRESENT); sc->sc_txtap_len = sizeof sc->sc_txtap; sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); sc->sc_txtap.wt_ihdr.it_present = htole32(IWN_TX_RADIOTAP_PRESENT); } static void iwn_sysctlattach(struct iwn_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); #ifdef IWN_DEBUG sc->sc_debug = 0; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "control debugging printfs"); #endif } #ifdef IWN_DEBUG static const char * iwn_ops_str(int cmd) { switch (cmd) { case IWN_SCAN_START: return "SCAN_START"; case IWN_SCAN_CURCHAN: return "SCAN_CURCHAN"; case IWN_SCAN_STOP: return "SCAN_STOP"; case IWN_SET_CHAN: return "SET_CHAN"; case IWN_AUTH: return "AUTH"; case IWN_SCAN_NEXT: return "SCAN_NEXT"; case IWN_RUN: return "RUN"; case IWN_RADIO_ENABLE: return "RADIO_ENABLE"; case IWN_RADIO_DISABLE: return "RADIO_DISABLE"; case IWN_REINIT: return "REINIT"; } return "UNKNOWN COMMAND"; } static const char * iwn_intr_str(uint8_t cmd) { switch (cmd) { /* Notifications */ case IWN_UC_READY: return "UC_READY"; case IWN_ADD_NODE_DONE: return "ADD_NODE_DONE"; case IWN_TX_DONE: return "TX_DONE"; case IWN_START_SCAN: return "START_SCAN"; case IWN_STOP_SCAN: return "STOP_SCAN"; case IWN_RX_STATISTICS: return "RX_STATS"; case IWN_BEACON_STATISTICS: return "BEACON_STATS"; case IWN_STATE_CHANGED: return "STATE_CHANGED"; case IWN_BEACON_MISSED: return "BEACON_MISSED"; case IWN_AMPDU_RX_START: return "AMPDU_RX_START"; case IWN_AMPDU_RX_DONE: return "AMPDU_RX_DONE"; case IWN_RX_DONE: return "RX_DONE"; /* Command Notifications */ case IWN_CMD_CONFIGURE: return "IWN_CMD_CONFIGURE"; case IWN_CMD_ASSOCIATE: return "IWN_CMD_ASSOCIATE"; case IWN_CMD_EDCA_PARAMS: return "IWN_CMD_EDCA_PARAMS"; case IWN_CMD_TSF: return "IWN_CMD_TSF"; case IWN_CMD_TX_LINK_QUALITY: return "IWN_CMD_TX_LINK_QUALITY"; case IWN_CMD_SET_LED: return "IWN_CMD_SET_LED"; case IWN_CMD_SET_POWER_MODE: return "IWN_CMD_SET_POWER_MODE"; case IWN_CMD_SCAN: return "IWN_CMD_SCAN"; case IWN_CMD_TXPOWER: return "IWN_CMD_TXPOWER"; case IWN_CMD_BLUETOOTH: return "IWN_CMD_BLUETOOTH"; case IWN_CMD_SET_CRITICAL_TEMP: return "IWN_CMD_SET_CRITICAL_TEMP"; case IWN_SENSITIVITY: return "IWN_SENSITIVITY"; case IWN_PHY_CALIB: return "IWN_PHY_CALIB"; } return "UNKNOWN INTR NOTIF/CMD"; } #endif /* IWN_DEBUG */ static device_method_t iwn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, iwn_probe), DEVMETHOD(device_attach, iwn_attach), DEVMETHOD(device_detach, iwn_detach), DEVMETHOD(device_shutdown, iwn_shutdown), DEVMETHOD(device_suspend, iwn_suspend), DEVMETHOD(device_resume, iwn_resume), { 0, 0 } }; static driver_t iwn_driver = { "iwn", iwn_methods, sizeof (struct iwn_softc) }; static devclass_t iwn_devclass; DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, 0, 0); MODULE_DEPEND(iwn, pci, 1, 1, 1); MODULE_DEPEND(iwn, firmware, 1, 1, 1); MODULE_DEPEND(iwn, wlan, 1, 1, 1); MODULE_DEPEND(iwn, wlan_amrr, 1, 1, 1); diff --git a/sys/dev/ral/rt2560.c b/sys/dev/ral/rt2560.c index d012b8f9dd14..013a0e209fd7 100644 --- a/sys/dev/ral/rt2560.c +++ b/sys/dev/ral/rt2560.c @@ -1,2861 +1,2862 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2005, 2006 * Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /*- * Ralink Technology RT2560 chipset driver * http://www.ralinktech.com/ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define RT2560_RSSI(sc, rssi) \ ((rssi) > (RT2560_NOISE_FLOOR + (sc)->rssi_corr) ? \ ((rssi) - RT2560_NOISE_FLOOR - (sc)->rssi_corr) : 0) #define RAL_DEBUG #ifdef RAL_DEBUG #define DPRINTF(sc, fmt, ...) do { \ if (sc->sc_debug > 0) \ printf(fmt, __VA_ARGS__); \ } while (0) #define DPRINTFN(sc, n, fmt, ...) do { \ if (sc->sc_debug >= (n)) \ printf(fmt, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, fmt, ...) #define DPRINTFN(sc, n, fmt, ...) #endif static struct ieee80211vap *rt2560_vap_create(struct ieee80211com *, const char name[IFNAMSIZ], int unit, int opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]); static void rt2560_vap_delete(struct ieee80211vap *); static void rt2560_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int rt2560_alloc_tx_ring(struct rt2560_softc *, struct rt2560_tx_ring *, int); static void rt2560_reset_tx_ring(struct rt2560_softc *, struct rt2560_tx_ring *); static void rt2560_free_tx_ring(struct rt2560_softc *, struct rt2560_tx_ring *); static int rt2560_alloc_rx_ring(struct rt2560_softc *, struct rt2560_rx_ring *, int); static void rt2560_reset_rx_ring(struct rt2560_softc *, struct rt2560_rx_ring *); static void rt2560_free_rx_ring(struct rt2560_softc *, struct rt2560_rx_ring *); -static struct ieee80211_node *rt2560_node_alloc( - struct ieee80211_node_table *); +static struct ieee80211_node *rt2560_node_alloc(struct ieee80211vap *, + const uint8_t [IEEE80211_ADDR_LEN]); static void rt2560_newassoc(struct ieee80211_node *, int); static int rt2560_newstate(struct ieee80211vap *, enum ieee80211_state, int); static uint16_t rt2560_eeprom_read(struct rt2560_softc *, uint8_t); static void rt2560_encryption_intr(struct rt2560_softc *); static void rt2560_tx_intr(struct rt2560_softc *); static void rt2560_prio_intr(struct rt2560_softc *); static void rt2560_decryption_intr(struct rt2560_softc *); static void rt2560_rx_intr(struct rt2560_softc *); static void rt2560_beacon_update(struct ieee80211vap *, int item); static void rt2560_beacon_expire(struct rt2560_softc *); static void rt2560_wakeup_expire(struct rt2560_softc *); static void rt2560_scan_start(struct ieee80211com *); static void rt2560_scan_end(struct ieee80211com *); static void rt2560_set_channel(struct ieee80211com *); static void rt2560_setup_tx_desc(struct rt2560_softc *, struct rt2560_tx_desc *, uint32_t, int, int, int, bus_addr_t); static int rt2560_tx_bcn(struct rt2560_softc *, struct mbuf *, struct ieee80211_node *); static int rt2560_tx_mgt(struct rt2560_softc *, struct mbuf *, struct ieee80211_node *); static int rt2560_tx_data(struct rt2560_softc *, struct mbuf *, struct ieee80211_node *); static void rt2560_start_locked(struct ifnet *); static void rt2560_start(struct ifnet *); static void rt2560_watchdog(void *); static int rt2560_ioctl(struct ifnet *, u_long, caddr_t); static void rt2560_bbp_write(struct rt2560_softc *, uint8_t, uint8_t); static uint8_t rt2560_bbp_read(struct rt2560_softc *, uint8_t); static void rt2560_rf_write(struct rt2560_softc *, uint8_t, uint32_t); static void rt2560_set_chan(struct rt2560_softc *, struct ieee80211_channel *); #if 0 static void rt2560_disable_rf_tune(struct rt2560_softc *); #endif static void rt2560_enable_tsf_sync(struct rt2560_softc *); static void rt2560_update_plcp(struct rt2560_softc *); static void rt2560_update_slot(struct ifnet *); static void rt2560_set_basicrates(struct rt2560_softc *); static void rt2560_update_led(struct rt2560_softc *, int, int); static void rt2560_set_bssid(struct rt2560_softc *, const uint8_t *); static void rt2560_set_macaddr(struct rt2560_softc *, uint8_t *); static void rt2560_get_macaddr(struct rt2560_softc *, uint8_t *); static void rt2560_update_promisc(struct ifnet *); static const char *rt2560_get_rf(int); static void rt2560_read_config(struct rt2560_softc *); static int rt2560_bbp_init(struct rt2560_softc *); static void rt2560_set_txantenna(struct rt2560_softc *, int); static void rt2560_set_rxantenna(struct rt2560_softc *, int); static void rt2560_init_locked(struct rt2560_softc *); static void rt2560_init(void *); static void rt2560_stop_locked(struct rt2560_softc *); static int rt2560_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static const struct { uint32_t reg; uint32_t val; } rt2560_def_mac[] = { RT2560_DEF_MAC }; static const struct { uint8_t reg; uint8_t val; } rt2560_def_bbp[] = { RT2560_DEF_BBP }; static const uint32_t rt2560_rf2522_r2[] = RT2560_RF2522_R2; static const uint32_t rt2560_rf2523_r2[] = RT2560_RF2523_R2; static const uint32_t rt2560_rf2524_r2[] = RT2560_RF2524_R2; static const uint32_t rt2560_rf2525_r2[] = RT2560_RF2525_R2; static const uint32_t rt2560_rf2525_hi_r2[] = RT2560_RF2525_HI_R2; static const uint32_t rt2560_rf2525e_r2[] = RT2560_RF2525E_R2; static const uint32_t rt2560_rf2526_r2[] = RT2560_RF2526_R2; static const uint32_t rt2560_rf2526_hi_r2[] = RT2560_RF2526_HI_R2; static const struct { uint8_t chan; uint32_t r1, r2, r4; } rt2560_rf5222[] = { RT2560_RF5222 }; int rt2560_attach(device_t dev, int id) { struct rt2560_softc *sc = device_get_softc(dev); struct ieee80211com *ic; struct ifnet *ifp; int error; uint8_t bands; sc->sc_dev = dev; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); callout_init_mtx(&sc->watchdog_ch, &sc->sc_mtx, 0); /* retrieve RT2560 rev. no */ sc->asic_rev = RAL_READ(sc, RT2560_CSR0); /* retrieve RF rev. no and various other things from EEPROM */ rt2560_read_config(sc); device_printf(dev, "MAC/BBP RT2560 (rev 0x%02x), RF %s\n", sc->asic_rev, rt2560_get_rf(sc->rf_rev)); /* * Allocate Tx and Rx rings. */ error = rt2560_alloc_tx_ring(sc, &sc->txq, RT2560_TX_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Tx ring\n"); goto fail1; } error = rt2560_alloc_tx_ring(sc, &sc->atimq, RT2560_ATIM_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate ATIM ring\n"); goto fail2; } error = rt2560_alloc_tx_ring(sc, &sc->prioq, RT2560_PRIO_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Prio ring\n"); goto fail3; } error = rt2560_alloc_tx_ring(sc, &sc->bcnq, RT2560_BEACON_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Beacon ring\n"); goto fail4; } error = rt2560_alloc_rx_ring(sc, &sc->rxq, RT2560_RX_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Rx ring\n"); goto fail5; } ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(sc->sc_dev, "can not if_alloc()\n"); goto fail6; } ic = ifp->if_l2com; /* retrieve MAC address */ rt2560_get_macaddr(sc, ic->ic_myaddr); ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = rt2560_init; ifp->if_ioctl = rt2560_ioctl; ifp->if_start = rt2560_start; IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); ic->ic_ifp = ifp; ic->ic_opmode = IEEE80211_M_STA; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode */ | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ | IEEE80211_C_HOSTAP /* hostap mode */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_WDS /* 4-address traffic works */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ #ifdef notyet | IEEE80211_C_TXFRAG /* handle tx frags */ #endif ; bands = 0; setbit(&bands, IEEE80211_MODE_11B); setbit(&bands, IEEE80211_MODE_11G); if (sc->rf_rev == RT2560_RF_5222) setbit(&bands, IEEE80211_MODE_11A); ieee80211_init_channels(ic, NULL, &bands); ieee80211_ifattach(ic); ic->ic_newassoc = rt2560_newassoc; ic->ic_raw_xmit = rt2560_raw_xmit; ic->ic_updateslot = rt2560_update_slot; ic->ic_update_promisc = rt2560_update_promisc; ic->ic_node_alloc = rt2560_node_alloc; ic->ic_scan_start = rt2560_scan_start; ic->ic_scan_end = rt2560_scan_end; ic->ic_set_channel = rt2560_set_channel; ic->ic_vap_create = rt2560_vap_create; ic->ic_vap_delete = rt2560_vap_delete; bpfattach(ifp, DLT_IEEE802_11_RADIO, sizeof (struct ieee80211_frame) + sizeof (sc->sc_txtap)); sc->sc_rxtap_len = sizeof sc->sc_rxtap; sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); sc->sc_rxtap.wr_ihdr.it_present = htole32(RT2560_RX_RADIOTAP_PRESENT); sc->sc_txtap_len = sizeof sc->sc_txtap; sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); sc->sc_txtap.wt_ihdr.it_present = htole32(RT2560_TX_RADIOTAP_PRESENT); /* * Add a few sysctl knobs. */ #ifdef RAL_DEBUG SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "debug msgs"); #endif SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "txantenna", CTLFLAG_RW, &sc->tx_ant, 0, "tx antenna (0=auto)"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rxantenna", CTLFLAG_RW, &sc->rx_ant, 0, "rx antenna (0=auto)"); if (bootverbose) ieee80211_announce(ic); return 0; fail6: rt2560_free_rx_ring(sc, &sc->rxq); fail5: rt2560_free_tx_ring(sc, &sc->bcnq); fail4: rt2560_free_tx_ring(sc, &sc->prioq); fail3: rt2560_free_tx_ring(sc, &sc->atimq); fail2: rt2560_free_tx_ring(sc, &sc->txq); fail1: mtx_destroy(&sc->sc_mtx); return ENXIO; } int rt2560_detach(void *xsc) { struct rt2560_softc *sc = xsc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; rt2560_stop(sc); bpfdetach(ifp); ieee80211_ifdetach(ic); rt2560_free_tx_ring(sc, &sc->txq); rt2560_free_tx_ring(sc, &sc->atimq); rt2560_free_tx_ring(sc, &sc->prioq); rt2560_free_tx_ring(sc, &sc->bcnq); rt2560_free_rx_ring(sc, &sc->rxq); if_free(ifp); mtx_destroy(&sc->sc_mtx); return 0; } static struct ieee80211vap * rt2560_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, int opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ifnet *ifp = ic->ic_ifp; struct rt2560_vap *rvp; struct ieee80211vap *vap; switch (opmode) { case IEEE80211_M_STA: case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: case IEEE80211_M_MONITOR: case IEEE80211_M_HOSTAP: if (!TAILQ_EMPTY(&ic->ic_vaps)) { if_printf(ifp, "only 1 vap supported\n"); return NULL; } if (opmode == IEEE80211_M_STA) flags |= IEEE80211_CLONE_NOBEACONS; break; case IEEE80211_M_WDS: if (TAILQ_EMPTY(&ic->ic_vaps) || ic->ic_opmode != IEEE80211_M_HOSTAP) { if_printf(ifp, "wds only supported in ap mode\n"); return NULL; } /* * Silently remove any request for a unique * bssid; WDS vap's always share the local * mac address. */ flags &= ~IEEE80211_CLONE_BSSID; break; default: if_printf(ifp, "unknown opmode %d\n", opmode); return NULL; } rvp = (struct rt2560_vap *) malloc(sizeof(struct rt2560_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (rvp == NULL) return NULL; vap = &rvp->ral_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); /* override state transition machine */ rvp->ral_newstate = vap->iv_newstate; vap->iv_newstate = rt2560_newstate; vap->iv_update_beacon = rt2560_beacon_update; ieee80211_amrr_init(&rvp->amrr, vap, IEEE80211_AMRR_MIN_SUCCESS_THRESHOLD, IEEE80211_AMRR_MAX_SUCCESS_THRESHOLD, 500 /* ms */); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status); if (TAILQ_FIRST(&ic->ic_vaps) == vap) ic->ic_opmode = opmode; return vap; } static void rt2560_vap_delete(struct ieee80211vap *vap) { struct rt2560_vap *rvp = RT2560_VAP(vap); ieee80211_amrr_cleanup(&rvp->amrr); ieee80211_vap_detach(vap); free(rvp, M_80211_VAP); } void rt2560_resume(void *xsc) { struct rt2560_softc *sc = xsc; struct ifnet *ifp = sc->sc_ifp; if (ifp->if_flags & IFF_UP) rt2560_init(sc); } static void rt2560_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error != 0) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); *(bus_addr_t *)arg = segs[0].ds_addr; } static int rt2560_alloc_tx_ring(struct rt2560_softc *sc, struct rt2560_tx_ring *ring, int count) { int i, error; ring->count = count; ring->queued = 0; ring->cur = ring->next = 0; ring->cur_encrypt = ring->next_encrypt = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * RT2560_TX_DESC_SIZE, 1, count * RT2560_TX_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * RT2560_TX_DESC_SIZE, rt2560_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } ring->data = malloc(count * sizeof (struct rt2560_tx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, RT2560_MAX_SCATTER, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { error = bus_dmamap_create(ring->data_dmat, 0, &ring->data[i].map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } } return 0; fail: rt2560_free_tx_ring(sc, ring); return error; } static void rt2560_reset_tx_ring(struct rt2560_softc *sc, struct rt2560_tx_ring *ring) { struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; int i; for (i = 0; i < ring->count; i++) { desc = &ring->desc[i]; data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } desc->flags = 0; } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); ring->queued = 0; ring->cur = ring->next = 0; ring->cur_encrypt = ring->next_encrypt = 0; } static void rt2560_free_tx_ring(struct rt2560_softc *sc, struct rt2560_tx_ring *ring) { struct rt2560_tx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->ni != NULL) ieee80211_free_node(data->ni); if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int rt2560_alloc_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring, int count) { struct rt2560_rx_desc *desc; struct rt2560_rx_data *data; bus_addr_t physaddr; int i, error; ring->count = count; ring->cur = ring->next = 0; ring->cur_decrypt = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * RT2560_RX_DESC_SIZE, 1, count * RT2560_RX_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * RT2560_RX_DESC_SIZE, rt2560_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } ring->data = malloc(count * sizeof (struct rt2560_rx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } /* * Pre-allocate Rx buffers and populate Rx ring. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { desc = &sc->rxq.desc[i]; data = &sc->rxq.data[i]; error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } data->m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (data->m == NULL) { device_printf(sc->sc_dev, "could not allocate rx mbuf\n"); error = ENOMEM; goto fail; } error = bus_dmamap_load(ring->data_dmat, data->map, mtod(data->m, void *), MCLBYTES, rt2560_dma_map_addr, &physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load rx buf DMA map"); goto fail; } desc->flags = htole32(RT2560_RX_BUSY); desc->physaddr = htole32(physaddr); } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); return 0; fail: rt2560_free_rx_ring(sc, ring); return error; } static void rt2560_reset_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring) { int i; for (i = 0; i < ring->count; i++) { ring->desc[i].flags = htole32(RT2560_RX_BUSY); ring->data[i].drop = 0; } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); ring->cur = ring->next = 0; ring->cur_decrypt = 0; } static void rt2560_free_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring) { struct rt2560_rx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static struct ieee80211_node * -rt2560_node_alloc(struct ieee80211_node_table *nt) +rt2560_node_alloc(struct ieee80211vap *vap, + const uint8_t mac[IEEE80211_ADDR_LEN]) { struct rt2560_node *rn; rn = malloc(sizeof (struct rt2560_node), M_80211_NODE, M_NOWAIT | M_ZERO); return (rn != NULL) ? &rn->ni : NULL; } static void rt2560_newassoc(struct ieee80211_node *ni, int isnew) { struct ieee80211vap *vap = ni->ni_vap; ieee80211_amrr_node_init(&RT2560_VAP(vap)->amrr, &RT2560_NODE(ni)->amrr, ni); } static int rt2560_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct rt2560_vap *rvp = RT2560_VAP(vap); struct ifnet *ifp = vap->iv_ic->ic_ifp; struct rt2560_softc *sc = ifp->if_softc; int error; if (nstate == IEEE80211_S_INIT && vap->iv_state == IEEE80211_S_RUN) { /* abort TSF synchronization */ RAL_WRITE(sc, RT2560_CSR14, 0); /* turn association led off */ rt2560_update_led(sc, 0, 0); } error = rvp->ral_newstate(vap, nstate, arg); if (error == 0 && nstate == IEEE80211_S_RUN) { struct ieee80211_node *ni = vap->iv_bss; struct mbuf *m; if (vap->iv_opmode != IEEE80211_M_MONITOR) { rt2560_update_plcp(sc); rt2560_set_basicrates(sc); rt2560_set_bssid(sc, ni->ni_bssid); } if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS) { m = ieee80211_beacon_alloc(ni, &rvp->ral_bo); if (m == NULL) { if_printf(ifp, "could not allocate beacon\n"); return ENOBUFS; } ieee80211_ref_node(ni); error = rt2560_tx_bcn(sc, m, ni); if (error != 0) return error; } /* turn assocation led on */ rt2560_update_led(sc, 1, 0); if (vap->iv_opmode != IEEE80211_M_MONITOR) { if (vap->iv_opmode == IEEE80211_M_STA) { /* fake a join to init the tx rate */ rt2560_newassoc(ni, 1); } rt2560_enable_tsf_sync(sc); } } return error; } /* * Read 16 bits at address 'addr' from the serial EEPROM (either 93C46 or * 93C66). */ static uint16_t rt2560_eeprom_read(struct rt2560_softc *sc, uint8_t addr) { uint32_t tmp; uint16_t val; int n; /* clock C once before the first command */ RT2560_EEPROM_CTL(sc, 0); RT2560_EEPROM_CTL(sc, RT2560_S); RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_C); RT2560_EEPROM_CTL(sc, RT2560_S); /* write start bit (1) */ RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D); RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D | RT2560_C); /* write READ opcode (10) */ RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D); RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D | RT2560_C); RT2560_EEPROM_CTL(sc, RT2560_S); RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_C); /* write address (A5-A0 or A7-A0) */ n = (RAL_READ(sc, RT2560_CSR21) & RT2560_93C46) ? 5 : 7; for (; n >= 0; n--) { RT2560_EEPROM_CTL(sc, RT2560_S | (((addr >> n) & 1) << RT2560_SHIFT_D)); RT2560_EEPROM_CTL(sc, RT2560_S | (((addr >> n) & 1) << RT2560_SHIFT_D) | RT2560_C); } RT2560_EEPROM_CTL(sc, RT2560_S); /* read data Q15-Q0 */ val = 0; for (n = 15; n >= 0; n--) { RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_C); tmp = RAL_READ(sc, RT2560_CSR21); val |= ((tmp & RT2560_Q) >> RT2560_SHIFT_Q) << n; RT2560_EEPROM_CTL(sc, RT2560_S); } RT2560_EEPROM_CTL(sc, 0); /* clear Chip Select and clock C */ RT2560_EEPROM_CTL(sc, RT2560_S); RT2560_EEPROM_CTL(sc, 0); RT2560_EEPROM_CTL(sc, RT2560_C); return val; } /* * Some frames were processed by the hardware cipher engine and are ready for * transmission. */ static void rt2560_encryption_intr(struct rt2560_softc *sc) { struct rt2560_tx_desc *desc; int hw; /* retrieve last descriptor index processed by cipher engine */ hw = RAL_READ(sc, RT2560_SECCSR1) - sc->txq.physaddr; hw /= RT2560_TX_DESC_SIZE; bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_POSTREAD); while (sc->txq.next_encrypt != hw) { if (sc->txq.next_encrypt == sc->txq.cur_encrypt) { printf("hw encrypt %d, cur_encrypt %d\n", hw, sc->txq.cur_encrypt); break; } desc = &sc->txq.desc[sc->txq.next_encrypt]; if ((le32toh(desc->flags) & RT2560_TX_BUSY) || (le32toh(desc->flags) & RT2560_TX_CIPHER_BUSY)) break; /* for TKIP, swap eiv field to fix a bug in ASIC */ if ((le32toh(desc->flags) & RT2560_TX_CIPHER_MASK) == RT2560_TX_CIPHER_TKIP) desc->eiv = bswap32(desc->eiv); /* mark the frame ready for transmission */ desc->flags |= htole32(RT2560_TX_VALID); desc->flags |= htole32(RT2560_TX_BUSY); DPRINTFN(sc, 15, "encryption done idx=%u\n", sc->txq.next_encrypt); sc->txq.next_encrypt = (sc->txq.next_encrypt + 1) % RT2560_TX_RING_COUNT; } bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_PREWRITE); /* kick Tx */ RAL_WRITE(sc, RT2560_TXCSR0, RT2560_KICK_TX); } static void rt2560_tx_intr(struct rt2560_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct rt2560_node *rn; struct mbuf *m; uint32_t flags; int retrycnt; bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_POSTREAD); for (;;) { desc = &sc->txq.desc[sc->txq.next]; data = &sc->txq.data[sc->txq.next]; flags = le32toh(desc->flags); if ((flags & RT2560_TX_BUSY) || (flags & RT2560_TX_CIPHER_BUSY) || !(flags & RT2560_TX_VALID)) break; rn = (struct rt2560_node *)data->ni; m = data->m; switch (flags & RT2560_TX_RESULT_MASK) { case RT2560_TX_SUCCESS: DPRINTFN(sc, 10, "%s\n", "data frame sent successfully"); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_amrr_tx_complete(&rn->amrr, IEEE80211_AMRR_SUCCESS, 0); ifp->if_opackets++; break; case RT2560_TX_SUCCESS_RETRY: retrycnt = RT2560_TX_RETRYCNT(flags); DPRINTFN(sc, 9, "data frame sent after %u retries\n", retrycnt); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_amrr_tx_complete(&rn->amrr, IEEE80211_AMRR_SUCCESS, retrycnt); ifp->if_opackets++; break; case RT2560_TX_FAIL_RETRY: retrycnt = RT2560_TX_RETRYCNT(flags); DPRINTFN(sc, 9, "data frame failed after %d retries\n", retrycnt); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_amrr_tx_complete(&rn->amrr, IEEE80211_AMRR_FAILURE, retrycnt); ifp->if_oerrors++; break; case RT2560_TX_FAIL_INVALID: case RT2560_TX_FAIL_OTHER: default: device_printf(sc->sc_dev, "sending data frame failed " "0x%08x\n", flags); ifp->if_oerrors++; } bus_dmamap_sync(sc->txq.data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->txq.data_dmat, data->map); m_freem(m); data->m = NULL; ieee80211_free_node(data->ni); data->ni = NULL; /* descriptor is no longer valid */ desc->flags &= ~htole32(RT2560_TX_VALID); DPRINTFN(sc, 15, "tx done idx=%u\n", sc->txq.next); sc->txq.queued--; sc->txq.next = (sc->txq.next + 1) % RT2560_TX_RING_COUNT; } bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_PREWRITE); if (sc->prioq.queued == 0 && sc->txq.queued == 0) sc->sc_tx_timer = 0; if (sc->txq.queued < RT2560_TX_RING_COUNT - 1) { sc->sc_flags &= ~RT2560_F_DATA_OACTIVE; if ((sc->sc_flags & (RT2560_F_DATA_OACTIVE | RT2560_F_PRIO_OACTIVE)) == 0) ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; rt2560_start_locked(ifp); } } static void rt2560_prio_intr(struct rt2560_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct ieee80211_node *ni; struct mbuf *m; int flags; bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map, BUS_DMASYNC_POSTREAD); for (;;) { desc = &sc->prioq.desc[sc->prioq.next]; data = &sc->prioq.data[sc->prioq.next]; flags = le32toh(desc->flags); if ((flags & RT2560_TX_BUSY) || (flags & RT2560_TX_VALID) == 0) break; switch (flags & RT2560_TX_RESULT_MASK) { case RT2560_TX_SUCCESS: DPRINTFN(sc, 10, "%s\n", "mgt frame sent successfully"); break; case RT2560_TX_SUCCESS_RETRY: DPRINTFN(sc, 9, "mgt frame sent after %u retries\n", (flags >> 5) & 0x7); break; case RT2560_TX_FAIL_RETRY: DPRINTFN(sc, 9, "%s\n", "sending mgt frame failed (too much retries)"); break; case RT2560_TX_FAIL_INVALID: case RT2560_TX_FAIL_OTHER: default: device_printf(sc->sc_dev, "sending mgt frame failed " "0x%08x\n", flags); break; } bus_dmamap_sync(sc->prioq.data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->prioq.data_dmat, data->map); m = data->m; data->m = NULL; ni = data->ni; data->ni = NULL; /* descriptor is no longer valid */ desc->flags &= ~htole32(RT2560_TX_VALID); DPRINTFN(sc, 15, "prio done idx=%u\n", sc->prioq.next); sc->prioq.queued--; sc->prioq.next = (sc->prioq.next + 1) % RT2560_PRIO_RING_COUNT; if (m->m_flags & M_TXCB) ieee80211_process_callback(ni, m, (flags & RT2560_TX_RESULT_MASK) &~ (RT2560_TX_SUCCESS | RT2560_TX_SUCCESS_RETRY)); m_freem(m); ieee80211_free_node(ni); } bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map, BUS_DMASYNC_PREWRITE); if (sc->prioq.queued == 0 && sc->txq.queued == 0) sc->sc_tx_timer = 0; if (sc->prioq.queued < RT2560_PRIO_RING_COUNT) { sc->sc_flags &= ~RT2560_F_PRIO_OACTIVE; if ((sc->sc_flags & (RT2560_F_DATA_OACTIVE | RT2560_F_PRIO_OACTIVE)) == 0) ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; rt2560_start_locked(ifp); } } /* * Some frames were processed by the hardware cipher engine and are ready for * handoff to the IEEE802.11 layer. */ static void rt2560_decryption_intr(struct rt2560_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct rt2560_rx_desc *desc; struct rt2560_rx_data *data; bus_addr_t physaddr; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct mbuf *mnew, *m; int hw, error; /* retrieve last decriptor index processed by cipher engine */ hw = RAL_READ(sc, RT2560_SECCSR0) - sc->rxq.physaddr; hw /= RT2560_RX_DESC_SIZE; bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_POSTREAD); for (; sc->rxq.cur_decrypt != hw;) { desc = &sc->rxq.desc[sc->rxq.cur_decrypt]; data = &sc->rxq.data[sc->rxq.cur_decrypt]; if ((le32toh(desc->flags) & RT2560_RX_BUSY) || (le32toh(desc->flags) & RT2560_RX_CIPHER_BUSY)) break; if (data->drop) { ifp->if_ierrors++; goto skip; } if ((le32toh(desc->flags) & RT2560_RX_CIPHER_MASK) != 0 && (le32toh(desc->flags) & RT2560_RX_ICV_ERROR)) { ifp->if_ierrors++; goto skip; } /* * Try to allocate a new mbuf for this ring element and load it * before processing the current mbuf. If the ring element * cannot be loaded, drop the received packet and reuse the old * mbuf. In the unlikely case that the old mbuf can't be * reloaded either, explicitly panic. */ mnew = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (mnew == NULL) { ifp->if_ierrors++; goto skip; } bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rxq.data_dmat, data->map); error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(mnew, void *), MCLBYTES, rt2560_dma_map_addr, &physaddr, 0); if (error != 0) { m_freem(mnew); /* try to reload the old mbuf */ error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(data->m, void *), MCLBYTES, rt2560_dma_map_addr, &physaddr, 0); if (error != 0) { /* very unlikely that it will fail... */ panic("%s: could not load old rx mbuf", device_get_name(sc->sc_dev)); } ifp->if_ierrors++; goto skip; } /* * New mbuf successfully loaded, update Rx ring and continue * processing. */ m = data->m; data->m = mnew; desc->physaddr = htole32(physaddr); /* finalize mbuf */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = (le32toh(desc->flags) >> 16) & 0xfff; if (bpf_peers_present(ifp->if_bpf)) { struct rt2560_rx_radiotap_header *tap = &sc->sc_rxtap; uint32_t tsf_lo, tsf_hi; /* get timestamp (low and high 32 bits) */ tsf_hi = RAL_READ(sc, RT2560_CSR17); tsf_lo = RAL_READ(sc, RT2560_CSR16); tap->wr_tsf = htole64(((uint64_t)tsf_hi << 32) | tsf_lo); tap->wr_flags = 0; tap->wr_rate = ieee80211_plcp2rate(desc->rate, (desc->flags & htole32(RT2560_RX_OFDM)) ? IEEE80211_T_OFDM : IEEE80211_T_CCK); tap->wr_antenna = sc->rx_ant; tap->wr_antsignal = RT2560_RSSI(sc, desc->rssi); bpf_mtap2(ifp->if_bpf, tap, sc->sc_rxtap_len, m); } sc->sc_flags |= RT2560_F_INPUT_RUNNING; RAL_UNLOCK(sc); wh = mtod(m, struct ieee80211_frame *); ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); if (ni != NULL) { (void) ieee80211_input(ni, m, RT2560_RSSI(sc, desc->rssi), RT2560_NOISE_FLOOR, 0); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, RT2560_RSSI(sc, desc->rssi), RT2560_NOISE_FLOOR, 0); RAL_LOCK(sc); sc->sc_flags &= ~RT2560_F_INPUT_RUNNING; skip: desc->flags = htole32(RT2560_RX_BUSY); DPRINTFN(sc, 15, "decryption done idx=%u\n", sc->rxq.cur_decrypt); sc->rxq.cur_decrypt = (sc->rxq.cur_decrypt + 1) % RT2560_RX_RING_COUNT; } bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_PREWRITE); } /* * Some frames were received. Pass them to the hardware cipher engine before * sending them to the 802.11 layer. */ static void rt2560_rx_intr(struct rt2560_softc *sc) { struct rt2560_rx_desc *desc; struct rt2560_rx_data *data; bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_POSTREAD); for (;;) { desc = &sc->rxq.desc[sc->rxq.cur]; data = &sc->rxq.data[sc->rxq.cur]; if ((le32toh(desc->flags) & RT2560_RX_BUSY) || (le32toh(desc->flags) & RT2560_RX_CIPHER_BUSY)) break; data->drop = 0; if ((le32toh(desc->flags) & RT2560_RX_PHY_ERROR) || (le32toh(desc->flags) & RT2560_RX_CRC_ERROR)) { /* * This should not happen since we did not request * to receive those frames when we filled RXCSR0. */ DPRINTFN(sc, 5, "PHY or CRC error flags 0x%08x\n", le32toh(desc->flags)); data->drop = 1; } if (((le32toh(desc->flags) >> 16) & 0xfff) > MCLBYTES) { DPRINTFN(sc, 5, "%s\n", "bad length"); data->drop = 1; } /* mark the frame for decryption */ desc->flags |= htole32(RT2560_RX_CIPHER_BUSY); DPRINTFN(sc, 15, "rx done idx=%u\n", sc->rxq.cur); sc->rxq.cur = (sc->rxq.cur + 1) % RT2560_RX_RING_COUNT; } bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_PREWRITE); /* kick decrypt */ RAL_WRITE(sc, RT2560_SECCSR0, RT2560_KICK_DECRYPT); } static void rt2560_beacon_update(struct ieee80211vap *vap, int item) { struct rt2560_vap *rvp = RT2560_VAP(vap); struct ieee80211_beacon_offsets *bo = &rvp->ral_bo; setbit(bo->bo_flags, item); } /* * This function is called periodically in IBSS mode when a new beacon must be * sent out. */ static void rt2560_beacon_expire(struct rt2560_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct rt2560_vap *rvp = RT2560_VAP(vap); struct rt2560_tx_data *data; if (ic->ic_opmode != IEEE80211_M_IBSS && ic->ic_opmode != IEEE80211_M_HOSTAP) return; data = &sc->bcnq.data[sc->bcnq.next]; /* * Don't send beacon if bsschan isn't set */ if (data->ni == NULL) return; bus_dmamap_sync(sc->bcnq.data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->bcnq.data_dmat, data->map); /* XXX 1 =>'s mcast frames which means all PS sta's will wakeup! */ ieee80211_beacon_update(data->ni, &rvp->ral_bo, data->m, 1); rt2560_tx_bcn(sc, data->m, data->ni); DPRINTFN(sc, 15, "%s", "beacon expired\n"); sc->bcnq.next = (sc->bcnq.next + 1) % RT2560_BEACON_RING_COUNT; } /* ARGSUSED */ static void rt2560_wakeup_expire(struct rt2560_softc *sc) { DPRINTFN(sc, 2, "%s", "wakeup expired\n"); } void rt2560_intr(void *arg) { struct rt2560_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; uint32_t r; RAL_LOCK(sc); /* disable interrupts */ RAL_WRITE(sc, RT2560_CSR8, 0xffffffff); /* don't re-enable interrupts if we're shutting down */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { RAL_UNLOCK(sc); return; } r = RAL_READ(sc, RT2560_CSR7); RAL_WRITE(sc, RT2560_CSR7, r); if (r & RT2560_BEACON_EXPIRE) rt2560_beacon_expire(sc); if (r & RT2560_WAKEUP_EXPIRE) rt2560_wakeup_expire(sc); if (r & RT2560_ENCRYPTION_DONE) rt2560_encryption_intr(sc); if (r & RT2560_TX_DONE) rt2560_tx_intr(sc); if (r & RT2560_PRIO_DONE) rt2560_prio_intr(sc); if (r & RT2560_DECRYPTION_DONE) rt2560_decryption_intr(sc); if (r & RT2560_RX_DONE) { rt2560_rx_intr(sc); rt2560_encryption_intr(sc); } /* re-enable interrupts */ RAL_WRITE(sc, RT2560_CSR8, RT2560_INTR_MASK); RAL_UNLOCK(sc); } #define RAL_SIFS 10 /* us */ #define RT2560_TXRX_TURNAROUND 10 /* us */ static uint8_t rt2560_plcp_signal(int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return 0xb; case 18: return 0xf; case 24: return 0xa; case 36: return 0xe; case 48: return 0x9; case 72: return 0xd; case 96: return 0x8; case 108: return 0xc; /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return 0x0; case 4: return 0x1; case 11: return 0x2; case 22: return 0x3; } return 0xff; /* XXX unsupported/unknown rate */ } static void rt2560_setup_tx_desc(struct rt2560_softc *sc, struct rt2560_tx_desc *desc, uint32_t flags, int len, int rate, int encrypt, bus_addr_t physaddr) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint16_t plcp_length; int remainder; desc->flags = htole32(flags); desc->flags |= htole32(len << 16); desc->physaddr = htole32(physaddr); desc->wme = htole16( RT2560_AIFSN(2) | RT2560_LOGCWMIN(3) | RT2560_LOGCWMAX(8)); /* setup PLCP fields */ desc->plcp_signal = rt2560_plcp_signal(rate); desc->plcp_service = 4; len += IEEE80211_CRC_LEN; if (ieee80211_rate2phytype(sc->sc_rates, rate) == IEEE80211_T_OFDM) { desc->flags |= htole32(RT2560_TX_OFDM); plcp_length = len & 0xfff; desc->plcp_length_hi = plcp_length >> 6; desc->plcp_length_lo = plcp_length & 0x3f; } else { plcp_length = (16 * len + rate - 1) / rate; if (rate == 22) { remainder = (16 * len) % 22; if (remainder != 0 && remainder < 7) desc->plcp_service |= RT2560_PLCP_LENGEXT; } desc->plcp_length_hi = plcp_length >> 8; desc->plcp_length_lo = plcp_length & 0xff; if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) desc->plcp_signal |= 0x08; } if (!encrypt) desc->flags |= htole32(RT2560_TX_VALID); desc->flags |= encrypt ? htole32(RT2560_TX_CIPHER_BUSY) : htole32(RT2560_TX_BUSY); } static int rt2560_tx_bcn(struct rt2560_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = sc->sc_ifp; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; int nsegs, rate, error; desc = &sc->bcnq.desc[sc->bcnq.cur]; data = &sc->bcnq.data[sc->bcnq.cur]; /* XXX maybe a separate beacon rate? */ rate = vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)].mgmtrate; error = bus_dmamap_load_mbuf_sg(sc->bcnq.data_dmat, data->map, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (bpf_peers_present(ifp->if_bpf)) { struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wt_chan_flags = htole16(ic->ic_curchan->ic_flags); tap->wt_antenna = sc->tx_ant; bpf_mtap2(ifp->if_bpf, tap, sc->sc_txtap_len, m0); } data->m = m0; data->ni = ni; rt2560_setup_tx_desc(sc, desc, RT2560_TX_IFS_NEWBACKOFF | RT2560_TX_TIMESTAMP, m0->m_pkthdr.len, rate, 0, segs->ds_addr); DPRINTFN(sc, 10, "sending beacon frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->bcnq.cur, rate); bus_dmamap_sync(sc->bcnq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->bcnq.desc_dmat, sc->bcnq.desc_map, BUS_DMASYNC_PREWRITE); sc->bcnq.cur = (sc->bcnq.cur + 1) % RT2560_BEACON_RING_COUNT; return 0; } static int rt2560_tx_mgt(struct rt2560_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = sc->sc_ifp; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct ieee80211_frame *wh; struct ieee80211_key *k; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; uint16_t dur; uint32_t flags = 0; int nsegs, rate, error; desc = &sc->prioq.desc[sc->prioq.cur]; data = &sc->prioq.data[sc->prioq.cur]; rate = vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)].mgmtrate; wh = mtod(m0, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_WEP) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } } error = bus_dmamap_load_mbuf_sg(sc->prioq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (bpf_peers_present(ifp->if_bpf)) { struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wt_chan_flags = htole16(ic->ic_curchan->ic_flags); tap->wt_antenna = sc->tx_ant; bpf_mtap2(ifp->if_bpf, tap, sc->sc_txtap_len, m0); } data->m = m0; data->ni = ni; /* management frames are not taken into account for amrr */ data->rix = IEEE80211_FIXED_RATE_NONE; wh = mtod(m0, struct ieee80211_frame *); if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2560_TX_ACK; dur = ieee80211_ack_duration(sc->sc_rates, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); *(uint16_t *)wh->i_dur = htole16(dur); /* tell hardware to add timestamp for probe responses */ if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_MGT && (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_PROBE_RESP) flags |= RT2560_TX_TIMESTAMP; } rt2560_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate, 0, segs->ds_addr); bus_dmamap_sync(sc->prioq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending mgt frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->prioq.cur, rate); /* kick prio */ sc->prioq.queued++; sc->prioq.cur = (sc->prioq.cur + 1) % RT2560_PRIO_RING_COUNT; RAL_WRITE(sc, RT2560_TXCSR0, RT2560_KICK_PRIO); return 0; } static int rt2560_sendprot(struct rt2560_softc *sc, const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate) { struct ieee80211com *ic = ni->ni_ic; const struct ieee80211_frame *wh; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct mbuf *mprot; int protrate, ackrate, pktlen, flags, isshort, error; uint16_t dur; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; int nsegs; KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY, ("protection %d", prot)); wh = mtod(m, const struct ieee80211_frame *); pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN; protrate = ieee80211_ctl_rate(sc->sc_rates, rate); ackrate = ieee80211_ack_rate(sc->sc_rates, rate); isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; dur = ieee80211_compute_duration(sc->sc_rates, pktlen, rate, isshort) + ieee80211_ack_duration(sc->sc_rates, rate, isshort); flags = RT2560_TX_MORE_FRAG; if (prot == IEEE80211_PROT_RTSCTS) { /* NB: CTS is the same size as an ACK */ dur += ieee80211_ack_duration(sc->sc_rates, rate, isshort); flags |= RT2560_TX_ACK; mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur); } else { mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur); } if (mprot == NULL) { /* XXX stat + msg */ return ENOBUFS; } desc = &sc->txq.desc[sc->txq.cur_encrypt]; data = &sc->txq.data[sc->txq.cur_encrypt]; error = bus_dmamap_load_mbuf_sg(sc->txq.data_dmat, data->map, mprot, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(mprot); return error; } data->m = mprot; data->ni = ieee80211_ref_node(ni); /* ctl frames are not taken into account for amrr */ data->rix = IEEE80211_FIXED_RATE_NONE; rt2560_setup_tx_desc(sc, desc, flags, mprot->m_pkthdr.len, protrate, 1, segs->ds_addr); bus_dmamap_sync(sc->txq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); sc->txq.queued++; sc->txq.cur_encrypt = (sc->txq.cur_encrypt + 1) % RT2560_TX_RING_COUNT; return 0; } static int rt2560_tx_raw(struct rt2560_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; uint32_t flags; int nsegs, rate, error; desc = &sc->prioq.desc[sc->prioq.cur]; data = &sc->prioq.data[sc->prioq.cur]; rate = params->ibp_rate0 & IEEE80211_RATE_VAL; /* XXX validate */ if (rate == 0) { /* XXX fall back to mcast/mgmt rate? */ m_freem(m0); return EINVAL; } flags = 0; if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) flags |= RT2560_TX_ACK; if (params->ibp_flags & (IEEE80211_BPF_RTS|IEEE80211_BPF_CTS)) { error = rt2560_sendprot(sc, m0, ni, params->ibp_flags & IEEE80211_BPF_RTS ? IEEE80211_PROT_RTSCTS : IEEE80211_PROT_CTSONLY, rate); if (error) { m_freem(m0); return error; } flags |= RT2560_TX_LONG_RETRY | RT2560_TX_IFS_SIFS; } error = bus_dmamap_load_mbuf_sg(sc->prioq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (bpf_peers_present(ifp->if_bpf)) { struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wt_chan_flags = htole16(ic->ic_curchan->ic_flags); tap->wt_antenna = sc->tx_ant; bpf_mtap2(ifp->if_bpf, tap, sc->sc_txtap_len, m0); } data->m = m0; data->ni = ni; /* XXX need to setup descriptor ourself */ rt2560_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate, (params->ibp_flags & IEEE80211_BPF_CRYPTO) != 0, segs->ds_addr); bus_dmamap_sync(sc->prioq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending raw frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->prioq.cur, rate); /* kick prio */ sc->prioq.queued++; sc->prioq.cur = (sc->prioq.cur + 1) % RT2560_PRIO_RING_COUNT; RAL_WRITE(sc, RT2560_TXCSR0, RT2560_KICK_PRIO); return 0; } static int rt2560_tx_data(struct rt2560_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = sc->sc_ifp; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp; struct ieee80211_key *k; struct mbuf *mnew; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; uint16_t dur; uint32_t flags; int nsegs, rate, error; wh = mtod(m0, struct ieee80211_frame *); tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { rate = tp->mcastrate; } else if (m0->m_flags & M_EAPOL) { rate = tp->mgmtrate; } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { rate = tp->ucastrate; } else { (void) ieee80211_amrr_choose(ni, &RT2560_NODE(ni)->amrr); rate = ni->ni_txrate; } if (wh->i_fc[1] & IEEE80211_FC1_WEP) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } flags = 0; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { int prot = IEEE80211_PROT_NONE; if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) prot = IEEE80211_PROT_RTSCTS; else if ((ic->ic_flags & IEEE80211_F_USEPROT) && ieee80211_rate2phytype(sc->sc_rates, rate) == IEEE80211_T_OFDM) prot = ic->ic_protmode; if (prot != IEEE80211_PROT_NONE) { error = rt2560_sendprot(sc, m0, ni, prot, rate); if (error) { m_freem(m0); return error; } flags |= RT2560_TX_LONG_RETRY | RT2560_TX_IFS_SIFS; } } data = &sc->txq.data[sc->txq.cur_encrypt]; desc = &sc->txq.desc[sc->txq.cur_encrypt]; error = bus_dmamap_load_mbuf_sg(sc->txq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (error != 0) { mnew = m_defrag(m0, M_DONTWAIT); if (mnew == NULL) { device_printf(sc->sc_dev, "could not defragment mbuf\n"); m_freem(m0); return ENOBUFS; } m0 = mnew; error = bus_dmamap_load_mbuf_sg(sc->txq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (bpf_peers_present(ifp->if_bpf)) { struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_antenna = sc->tx_ant; bpf_mtap2(ifp->if_bpf, tap, sc->sc_txtap_len, m0); } data->m = m0; data->ni = ni; /* remember link conditions for rate adaptation algorithm */ if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) { data->rix = ni->ni_txrate; /* XXX probably need last rssi value and not avg */ data->rssi = ic->ic_node_getrssi(ni); } else data->rix = IEEE80211_FIXED_RATE_NONE; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2560_TX_ACK; dur = ieee80211_ack_duration(sc->sc_rates, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); *(uint16_t *)wh->i_dur = htole16(dur); } rt2560_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate, 1, segs->ds_addr); bus_dmamap_sync(sc->txq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending data frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->txq.cur_encrypt, rate); /* kick encrypt */ sc->txq.queued++; sc->txq.cur_encrypt = (sc->txq.cur_encrypt + 1) % RT2560_TX_RING_COUNT; RAL_WRITE(sc, RT2560_SECCSR1, RT2560_KICK_ENCRYPT); return 0; } static void rt2560_start_locked(struct ifnet *ifp) { struct rt2560_softc *sc = ifp->if_softc; struct mbuf *m; struct ieee80211_node *ni; RAL_LOCK_ASSERT(sc); for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; if (sc->txq.queued >= RT2560_TX_RING_COUNT - 1) { IFQ_DRV_PREPEND(&ifp->if_snd, m); ifp->if_drv_flags |= IFF_DRV_OACTIVE; sc->sc_flags |= RT2560_F_DATA_OACTIVE; break; } ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; m = ieee80211_encap(ni, m); if (m == NULL) { ieee80211_free_node(ni); ifp->if_oerrors++; continue; } if (rt2560_tx_data(sc, m, ni) != 0) { ieee80211_free_node(ni); ifp->if_oerrors++; break; } sc->sc_tx_timer = 5; } } static void rt2560_start(struct ifnet *ifp) { struct rt2560_softc *sc = ifp->if_softc; RAL_LOCK(sc); rt2560_start_locked(ifp); RAL_UNLOCK(sc); } static void rt2560_watchdog(void *arg) { struct rt2560_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; RAL_LOCK_ASSERT(sc); KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running")); if (sc->sc_invalid) /* card ejected */ return; rt2560_encryption_intr(sc); rt2560_tx_intr(sc); if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) { if_printf(ifp, "device timeout\n"); rt2560_init_locked(sc); ifp->if_oerrors++; /* NB: callout is reset in rt2560_init() */ return; } callout_reset(&sc->watchdog_ch, hz, rt2560_watchdog, sc); } static int rt2560_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct rt2560_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; int error = 0, startall = 0; switch (cmd) { case SIOCSIFFLAGS: RAL_LOCK(sc); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { rt2560_init_locked(sc); startall = 1; } else rt2560_update_promisc(ifp); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) rt2560_stop_locked(sc); } RAL_UNLOCK(sc); if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return error; } static void rt2560_bbp_write(struct rt2560_softc *sc, uint8_t reg, uint8_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2560_BBPCSR) & RT2560_BBP_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to BBP\n"); return; } tmp = RT2560_BBP_WRITE | RT2560_BBP_BUSY | reg << 8 | val; RAL_WRITE(sc, RT2560_BBPCSR, tmp); DPRINTFN(sc, 15, "BBP R%u <- 0x%02x\n", reg, val); } static uint8_t rt2560_bbp_read(struct rt2560_softc *sc, uint8_t reg) { uint32_t val; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2560_BBPCSR) & RT2560_BBP_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not read from BBP\n"); return 0; } val = RT2560_BBP_BUSY | reg << 8; RAL_WRITE(sc, RT2560_BBPCSR, val); for (ntries = 0; ntries < 100; ntries++) { val = RAL_READ(sc, RT2560_BBPCSR); if (!(val & RT2560_BBP_BUSY)) return val & 0xff; DELAY(1); } device_printf(sc->sc_dev, "could not read from BBP\n"); return 0; } static void rt2560_rf_write(struct rt2560_softc *sc, uint8_t reg, uint32_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2560_RFCSR) & RT2560_RF_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to RF\n"); return; } tmp = RT2560_RF_BUSY | RT2560_RF_20BIT | (val & 0xfffff) << 2 | (reg & 0x3); RAL_WRITE(sc, RT2560_RFCSR, tmp); /* remember last written value in sc */ sc->rf_regs[reg] = val; DPRINTFN(sc, 15, "RF R[%u] <- 0x%05x\n", reg & 0x3, val & 0xfffff); } static void rt2560_set_chan(struct rt2560_softc *sc, struct ieee80211_channel *c) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint8_t power, tmp; u_int i, chan; chan = ieee80211_chan2ieee(ic, c); KASSERT(chan != 0 && chan != IEEE80211_CHAN_ANY, ("chan 0x%x", chan)); sc->sc_rates = ieee80211_get_ratetable(c); if (IEEE80211_IS_CHAN_2GHZ(c)) power = min(sc->txpow[chan - 1], 31); else power = 31; /* adjust txpower using ifconfig settings */ power -= (100 - ic->ic_txpowlimit) / 8; DPRINTFN(sc, 2, "setting channel to %u, txpower to %u\n", chan, power); switch (sc->rf_rev) { case RT2560_RF_2522: rt2560_rf_write(sc, RAL_RF1, 0x00814); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2522_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x00040); break; case RT2560_RF_2523: rt2560_rf_write(sc, RAL_RF1, 0x08804); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2523_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x38044); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RT2560_RF_2524: rt2560_rf_write(sc, RAL_RF1, 0x0c808); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2524_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x00040); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RT2560_RF_2525: rt2560_rf_write(sc, RAL_RF1, 0x08808); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2525_hi_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); rt2560_rf_write(sc, RAL_RF1, 0x08808); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2525_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RT2560_RF_2525E: rt2560_rf_write(sc, RAL_RF1, 0x08808); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2525e_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00286 : 0x00282); break; case RT2560_RF_2526: rt2560_rf_write(sc, RAL_RF2, rt2560_rf2526_hi_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); rt2560_rf_write(sc, RAL_RF1, 0x08804); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2526_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044); rt2560_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); break; /* dual-band RF */ case RT2560_RF_5222: for (i = 0; rt2560_rf5222[i].chan != chan; i++); rt2560_rf_write(sc, RAL_RF1, rt2560_rf5222[i].r1); rt2560_rf_write(sc, RAL_RF2, rt2560_rf5222[i].r2); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x00040); rt2560_rf_write(sc, RAL_RF4, rt2560_rf5222[i].r4); break; default: printf("unknown ral rev=%d\n", sc->rf_rev); } /* XXX */ if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) { /* set Japan filter bit for channel 14 */ tmp = rt2560_bbp_read(sc, 70); tmp &= ~RT2560_JAPAN_FILTER; if (chan == 14) tmp |= RT2560_JAPAN_FILTER; rt2560_bbp_write(sc, 70, tmp); /* clear CRC errors */ RAL_READ(sc, RT2560_CNT0); } } static void rt2560_set_channel(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct rt2560_softc *sc = ifp->if_softc; RAL_LOCK(sc); rt2560_set_chan(sc, ic->ic_curchan); sc->sc_txtap.wt_chan_freq = htole16(ic->ic_curchan->ic_freq); sc->sc_txtap.wt_chan_flags = htole16(ic->ic_curchan->ic_flags); sc->sc_rxtap.wr_chan_freq = htole16(ic->ic_curchan->ic_freq); sc->sc_rxtap.wr_chan_flags = htole16(ic->ic_curchan->ic_flags); RAL_UNLOCK(sc); } #if 0 /* * Disable RF auto-tuning. */ static void rt2560_disable_rf_tune(struct rt2560_softc *sc) { uint32_t tmp; if (sc->rf_rev != RT2560_RF_2523) { tmp = sc->rf_regs[RAL_RF1] & ~RAL_RF1_AUTOTUNE; rt2560_rf_write(sc, RAL_RF1, tmp); } tmp = sc->rf_regs[RAL_RF3] & ~RAL_RF3_AUTOTUNE; rt2560_rf_write(sc, RAL_RF3, tmp); DPRINTFN(sc, 2, "%s", "disabling RF autotune\n"); } #endif /* * Refer to IEEE Std 802.11-1999 pp. 123 for more information on TSF * synchronization. */ static void rt2560_enable_tsf_sync(struct rt2560_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint16_t logcwmin, preload; uint32_t tmp; /* first, disable TSF synchronization */ RAL_WRITE(sc, RT2560_CSR14, 0); tmp = 16 * vap->iv_bss->ni_intval; RAL_WRITE(sc, RT2560_CSR12, tmp); RAL_WRITE(sc, RT2560_CSR13, 0); logcwmin = 5; preload = (vap->iv_opmode == IEEE80211_M_STA) ? 384 : 1024; tmp = logcwmin << 16 | preload; RAL_WRITE(sc, RT2560_BCNOCSR, tmp); /* finally, enable TSF synchronization */ tmp = RT2560_ENABLE_TSF | RT2560_ENABLE_TBCN; if (ic->ic_opmode == IEEE80211_M_STA) tmp |= RT2560_ENABLE_TSF_SYNC(1); else tmp |= RT2560_ENABLE_TSF_SYNC(2) | RT2560_ENABLE_BEACON_GENERATOR; RAL_WRITE(sc, RT2560_CSR14, tmp); DPRINTF(sc, "%s", "enabling TSF synchronization\n"); } static void rt2560_update_plcp(struct rt2560_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; /* no short preamble for 1Mbps */ RAL_WRITE(sc, RT2560_PLCP1MCSR, 0x00700400); if (!(ic->ic_flags & IEEE80211_F_SHPREAMBLE)) { /* values taken from the reference driver */ RAL_WRITE(sc, RT2560_PLCP2MCSR, 0x00380401); RAL_WRITE(sc, RT2560_PLCP5p5MCSR, 0x00150402); RAL_WRITE(sc, RT2560_PLCP11MCSR, 0x000b8403); } else { /* same values as above or'ed 0x8 */ RAL_WRITE(sc, RT2560_PLCP2MCSR, 0x00380409); RAL_WRITE(sc, RT2560_PLCP5p5MCSR, 0x0015040a); RAL_WRITE(sc, RT2560_PLCP11MCSR, 0x000b840b); } DPRINTF(sc, "updating PLCP for %s preamble\n", (ic->ic_flags & IEEE80211_F_SHPREAMBLE) ? "short" : "long"); } /* * This function can be called by ieee80211_set_shortslottime(). Refer to * IEEE Std 802.11-1999 pp. 85 to know how these values are computed. */ static void rt2560_update_slot(struct ifnet *ifp) { struct rt2560_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; uint8_t slottime; uint16_t tx_sifs, tx_pifs, tx_difs, eifs; uint32_t tmp; #ifndef FORCE_SLOTTIME slottime = (ic->ic_flags & IEEE80211_F_SHSLOT) ? 9 : 20; #else /* * Setting slot time according to "short slot time" capability * in beacon/probe_resp seems to cause problem to acknowledge * certain AP's data frames transimitted at CCK/DS rates: the * problematic AP keeps retransmitting data frames, probably * because MAC level acks are not received by hardware. * So we cheat a little bit here by claiming we are capable of * "short slot time" but setting hardware slot time to the normal * slot time. ral(4) does not seem to have trouble to receive * frames transmitted using short slot time even if hardware * slot time is set to normal slot time. If we didn't use this * trick, we would have to claim that short slot time is not * supported; this would give relative poor RX performance * (-1Mb~-2Mb lower) and the _whole_ BSS would stop using short * slot time. */ slottime = 20; #endif /* update the MAC slot boundaries */ tx_sifs = RAL_SIFS - RT2560_TXRX_TURNAROUND; tx_pifs = tx_sifs + slottime; tx_difs = tx_sifs + 2 * slottime; eifs = (ic->ic_curmode == IEEE80211_MODE_11B) ? 364 : 60; tmp = RAL_READ(sc, RT2560_CSR11); tmp = (tmp & ~0x1f00) | slottime << 8; RAL_WRITE(sc, RT2560_CSR11, tmp); tmp = tx_pifs << 16 | tx_sifs; RAL_WRITE(sc, RT2560_CSR18, tmp); tmp = eifs << 16 | tx_difs; RAL_WRITE(sc, RT2560_CSR19, tmp); DPRINTF(sc, "setting slottime to %uus\n", slottime); } static void rt2560_set_basicrates(struct rt2560_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; /* update basic rate set */ if (ic->ic_curmode == IEEE80211_MODE_11B) { /* 11b basic rates: 1, 2Mbps */ RAL_WRITE(sc, RT2560_ARSP_PLCP_1, 0x3); } else if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) { /* 11a basic rates: 6, 12, 24Mbps */ RAL_WRITE(sc, RT2560_ARSP_PLCP_1, 0x150); } else { /* 11g basic rates: 1, 2, 5.5, 11, 6, 12, 24Mbps */ RAL_WRITE(sc, RT2560_ARSP_PLCP_1, 0x15f); } } static void rt2560_update_led(struct rt2560_softc *sc, int led1, int led2) { uint32_t tmp; /* set ON period to 70ms and OFF period to 30ms */ tmp = led1 << 16 | led2 << 17 | 70 << 8 | 30; RAL_WRITE(sc, RT2560_LEDCSR, tmp); } static void rt2560_set_bssid(struct rt2560_softc *sc, const uint8_t *bssid) { uint32_t tmp; tmp = bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24; RAL_WRITE(sc, RT2560_CSR5, tmp); tmp = bssid[4] | bssid[5] << 8; RAL_WRITE(sc, RT2560_CSR6, tmp); DPRINTF(sc, "setting BSSID to %6D\n", bssid, ":"); } static void rt2560_set_macaddr(struct rt2560_softc *sc, uint8_t *addr) { uint32_t tmp; tmp = addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24; RAL_WRITE(sc, RT2560_CSR3, tmp); tmp = addr[4] | addr[5] << 8; RAL_WRITE(sc, RT2560_CSR4, tmp); DPRINTF(sc, "setting MAC address to %6D\n", addr, ":"); } static void rt2560_get_macaddr(struct rt2560_softc *sc, uint8_t *addr) { uint32_t tmp; tmp = RAL_READ(sc, RT2560_CSR3); addr[0] = tmp & 0xff; addr[1] = (tmp >> 8) & 0xff; addr[2] = (tmp >> 16) & 0xff; addr[3] = (tmp >> 24); tmp = RAL_READ(sc, RT2560_CSR4); addr[4] = tmp & 0xff; addr[5] = (tmp >> 8) & 0xff; } static void rt2560_update_promisc(struct ifnet *ifp) { struct rt2560_softc *sc = ifp->if_softc; uint32_t tmp; tmp = RAL_READ(sc, RT2560_RXCSR0); tmp &= ~RT2560_DROP_NOT_TO_ME; if (!(ifp->if_flags & IFF_PROMISC)) tmp |= RT2560_DROP_NOT_TO_ME; RAL_WRITE(sc, RT2560_RXCSR0, tmp); DPRINTF(sc, "%s promiscuous mode\n", (ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving"); } static const char * rt2560_get_rf(int rev) { switch (rev) { case RT2560_RF_2522: return "RT2522"; case RT2560_RF_2523: return "RT2523"; case RT2560_RF_2524: return "RT2524"; case RT2560_RF_2525: return "RT2525"; case RT2560_RF_2525E: return "RT2525e"; case RT2560_RF_2526: return "RT2526"; case RT2560_RF_5222: return "RT5222"; default: return "unknown"; } } static void rt2560_read_config(struct rt2560_softc *sc) { uint16_t val; int i; val = rt2560_eeprom_read(sc, RT2560_EEPROM_CONFIG0); sc->rf_rev = (val >> 11) & 0x7; sc->hw_radio = (val >> 10) & 0x1; sc->led_mode = (val >> 6) & 0x7; sc->rx_ant = (val >> 4) & 0x3; sc->tx_ant = (val >> 2) & 0x3; sc->nb_ant = val & 0x3; /* read default values for BBP registers */ for (i = 0; i < 16; i++) { val = rt2560_eeprom_read(sc, RT2560_EEPROM_BBP_BASE + i); if (val == 0 || val == 0xffff) continue; sc->bbp_prom[i].reg = val >> 8; sc->bbp_prom[i].val = val & 0xff; } /* read Tx power for all b/g channels */ for (i = 0; i < 14 / 2; i++) { val = rt2560_eeprom_read(sc, RT2560_EEPROM_TXPOWER + i); sc->txpow[i * 2] = val & 0xff; sc->txpow[i * 2 + 1] = val >> 8; } for (i = 0; i < 14; ++i) { if (sc->txpow[i] > 31) sc->txpow[i] = 24; } val = rt2560_eeprom_read(sc, RT2560_EEPROM_CALIBRATE); if ((val & 0xff) == 0xff) sc->rssi_corr = RT2560_DEFAULT_RSSI_CORR; else sc->rssi_corr = val & 0xff; DPRINTF(sc, "rssi correction %d, calibrate 0x%02x\n", sc->rssi_corr, val); } static void rt2560_scan_start(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct rt2560_softc *sc = ifp->if_softc; /* abort TSF synchronization */ RAL_WRITE(sc, RT2560_CSR14, 0); rt2560_set_bssid(sc, ifp->if_broadcastaddr); } static void rt2560_scan_end(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct rt2560_softc *sc = ifp->if_softc; struct ieee80211vap *vap = ic->ic_scan->ss_vap; rt2560_enable_tsf_sync(sc); /* XXX keep local copy */ rt2560_set_bssid(sc, vap->iv_bss->ni_bssid); } static int rt2560_bbp_init(struct rt2560_softc *sc) { #define N(a) (sizeof (a) / sizeof ((a)[0])) int i, ntries; /* wait for BBP to be ready */ for (ntries = 0; ntries < 100; ntries++) { if (rt2560_bbp_read(sc, RT2560_BBP_VERSION) != 0) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP\n"); return EIO; } /* initialize BBP registers to default values */ for (i = 0; i < N(rt2560_def_bbp); i++) { rt2560_bbp_write(sc, rt2560_def_bbp[i].reg, rt2560_def_bbp[i].val); } /* initialize BBP registers to values stored in EEPROM */ for (i = 0; i < 16; i++) { if (sc->bbp_prom[i].reg == 0 && sc->bbp_prom[i].val == 0) break; rt2560_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } rt2560_bbp_write(sc, 17, 0x48); /* XXX restore bbp17 */ return 0; #undef N } static void rt2560_set_txantenna(struct rt2560_softc *sc, int antenna) { uint32_t tmp; uint8_t tx; tx = rt2560_bbp_read(sc, RT2560_BBP_TX) & ~RT2560_BBP_ANTMASK; if (antenna == 1) tx |= RT2560_BBP_ANTA; else if (antenna == 2) tx |= RT2560_BBP_ANTB; else tx |= RT2560_BBP_DIVERSITY; /* need to force I/Q flip for RF 2525e, 2526 and 5222 */ if (sc->rf_rev == RT2560_RF_2525E || sc->rf_rev == RT2560_RF_2526 || sc->rf_rev == RT2560_RF_5222) tx |= RT2560_BBP_FLIPIQ; rt2560_bbp_write(sc, RT2560_BBP_TX, tx); /* update values for CCK and OFDM in BBPCSR1 */ tmp = RAL_READ(sc, RT2560_BBPCSR1) & ~0x00070007; tmp |= (tx & 0x7) << 16 | (tx & 0x7); RAL_WRITE(sc, RT2560_BBPCSR1, tmp); } static void rt2560_set_rxantenna(struct rt2560_softc *sc, int antenna) { uint8_t rx; rx = rt2560_bbp_read(sc, RT2560_BBP_RX) & ~RT2560_BBP_ANTMASK; if (antenna == 1) rx |= RT2560_BBP_ANTA; else if (antenna == 2) rx |= RT2560_BBP_ANTB; else rx |= RT2560_BBP_DIVERSITY; /* need to force no I/Q flip for RF 2525e and 2526 */ if (sc->rf_rev == RT2560_RF_2525E || sc->rf_rev == RT2560_RF_2526) rx &= ~RT2560_BBP_FLIPIQ; rt2560_bbp_write(sc, RT2560_BBP_RX, rx); } static void rt2560_init_locked(struct rt2560_softc *sc) { #define N(a) (sizeof (a) / sizeof ((a)[0])) struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t tmp; int i; RAL_LOCK_ASSERT(sc); rt2560_stop_locked(sc); /* setup tx rings */ tmp = RT2560_PRIO_RING_COUNT << 24 | RT2560_ATIM_RING_COUNT << 16 | RT2560_TX_RING_COUNT << 8 | RT2560_TX_DESC_SIZE; /* rings must be initialized in this exact order */ RAL_WRITE(sc, RT2560_TXCSR2, tmp); RAL_WRITE(sc, RT2560_TXCSR3, sc->txq.physaddr); RAL_WRITE(sc, RT2560_TXCSR5, sc->prioq.physaddr); RAL_WRITE(sc, RT2560_TXCSR4, sc->atimq.physaddr); RAL_WRITE(sc, RT2560_TXCSR6, sc->bcnq.physaddr); /* setup rx ring */ tmp = RT2560_RX_RING_COUNT << 8 | RT2560_RX_DESC_SIZE; RAL_WRITE(sc, RT2560_RXCSR1, tmp); RAL_WRITE(sc, RT2560_RXCSR2, sc->rxq.physaddr); /* initialize MAC registers to default values */ for (i = 0; i < N(rt2560_def_mac); i++) RAL_WRITE(sc, rt2560_def_mac[i].reg, rt2560_def_mac[i].val); IEEE80211_ADDR_COPY(ic->ic_myaddr, IF_LLADDR(ifp)); rt2560_set_macaddr(sc, ic->ic_myaddr); /* set basic rate set (will be updated later) */ RAL_WRITE(sc, RT2560_ARSP_PLCP_1, 0x153); rt2560_update_slot(ifp); rt2560_update_plcp(sc); rt2560_update_led(sc, 0, 0); RAL_WRITE(sc, RT2560_CSR1, RT2560_RESET_ASIC); RAL_WRITE(sc, RT2560_CSR1, RT2560_HOST_READY); if (rt2560_bbp_init(sc) != 0) { rt2560_stop(sc); RAL_UNLOCK(sc); return; } rt2560_set_txantenna(sc, sc->tx_ant); rt2560_set_rxantenna(sc, sc->rx_ant); /* set default BSS channel */ rt2560_set_chan(sc, ic->ic_curchan); /* kick Rx */ tmp = RT2560_DROP_PHY_ERROR | RT2560_DROP_CRC_ERROR; if (ic->ic_opmode != IEEE80211_M_MONITOR) { tmp |= RT2560_DROP_CTL | RT2560_DROP_VERSION_ERROR; if (ic->ic_opmode != IEEE80211_M_HOSTAP) tmp |= RT2560_DROP_TODS; if (!(ifp->if_flags & IFF_PROMISC)) tmp |= RT2560_DROP_NOT_TO_ME; } RAL_WRITE(sc, RT2560_RXCSR0, tmp); /* clear old FCS and Rx FIFO errors */ RAL_READ(sc, RT2560_CNT0); RAL_READ(sc, RT2560_CNT4); /* clear any pending interrupts */ RAL_WRITE(sc, RT2560_CSR7, 0xffffffff); /* enable interrupts */ RAL_WRITE(sc, RT2560_CSR8, RT2560_INTR_MASK); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; callout_reset(&sc->watchdog_ch, hz, rt2560_watchdog, sc); #undef N } static void rt2560_init(void *priv) { struct rt2560_softc *sc = priv; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; RAL_LOCK(sc); rt2560_init_locked(sc); RAL_UNLOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ieee80211_start_all(ic); /* start all vap's */ } static void rt2560_stop_locked(struct rt2560_softc *sc) { struct ifnet *ifp = sc->sc_ifp; volatile int *flags = &sc->sc_flags; RAL_LOCK_ASSERT(sc); while (*flags & RT2560_F_INPUT_RUNNING) msleep(sc, &sc->sc_mtx, 0, "ralrunning", hz/10); callout_stop(&sc->watchdog_ch); sc->sc_tx_timer = 0; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); /* abort Tx */ RAL_WRITE(sc, RT2560_TXCSR0, RT2560_ABORT_TX); /* disable Rx */ RAL_WRITE(sc, RT2560_RXCSR0, RT2560_DISABLE_RX); /* reset ASIC (imply reset BBP) */ RAL_WRITE(sc, RT2560_CSR1, RT2560_RESET_ASIC); RAL_WRITE(sc, RT2560_CSR1, 0); /* disable interrupts */ RAL_WRITE(sc, RT2560_CSR8, 0xffffffff); /* reset Tx and Rx rings */ rt2560_reset_tx_ring(sc, &sc->txq); rt2560_reset_tx_ring(sc, &sc->atimq); rt2560_reset_tx_ring(sc, &sc->prioq); rt2560_reset_tx_ring(sc, &sc->bcnq); rt2560_reset_rx_ring(sc, &sc->rxq); } sc->sc_flags &= ~(RT2560_F_PRIO_OACTIVE | RT2560_F_DATA_OACTIVE); } void rt2560_stop(void *arg) { struct rt2560_softc *sc = arg; RAL_LOCK(sc); rt2560_stop_locked(sc); RAL_UNLOCK(sc); } static int rt2560_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct rt2560_softc *sc = ifp->if_softc; RAL_LOCK(sc); /* prevent management frames from being sent if we're not ready */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { RAL_UNLOCK(sc); m_freem(m); ieee80211_free_node(ni); return ENETDOWN; } if (sc->prioq.queued >= RT2560_PRIO_RING_COUNT) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; sc->sc_flags |= RT2560_F_PRIO_OACTIVE; RAL_UNLOCK(sc); m_freem(m); ieee80211_free_node(ni); return ENOBUFS; /* XXX */ } ifp->if_opackets++; if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ if (rt2560_tx_mgt(sc, m, ni) != 0) goto bad; } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ if (rt2560_tx_raw(sc, m, ni, params)) goto bad; } sc->sc_tx_timer = 5; RAL_UNLOCK(sc); return 0; bad: ifp->if_oerrors++; ieee80211_free_node(ni); RAL_UNLOCK(sc); return EIO; /* XXX */ } diff --git a/sys/dev/ral/rt2661.c b/sys/dev/ral/rt2661.c index cc89d165f2fc..ae31d5e8a13e 100644 --- a/sys/dev/ral/rt2661.c +++ b/sys/dev/ral/rt2661.c @@ -1,2896 +1,2897 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2006 * Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /*- * Ralink Technology RT2561, RT2561S and RT2661 chipset driver * http://www.ralinktech.com/ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define RAL_DEBUG #ifdef RAL_DEBUG #define DPRINTF(sc, fmt, ...) do { \ if (sc->sc_debug > 0) \ printf(fmt, __VA_ARGS__); \ } while (0) #define DPRINTFN(sc, n, fmt, ...) do { \ if (sc->sc_debug >= (n)) \ printf(fmt, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, fmt, ...) #define DPRINTFN(sc, n, fmt, ...) #endif static struct ieee80211vap *rt2661_vap_create(struct ieee80211com *, const char name[IFNAMSIZ], int unit, int opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]); static void rt2661_vap_delete(struct ieee80211vap *); static void rt2661_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int rt2661_alloc_tx_ring(struct rt2661_softc *, struct rt2661_tx_ring *, int); static void rt2661_reset_tx_ring(struct rt2661_softc *, struct rt2661_tx_ring *); static void rt2661_free_tx_ring(struct rt2661_softc *, struct rt2661_tx_ring *); static int rt2661_alloc_rx_ring(struct rt2661_softc *, struct rt2661_rx_ring *, int); static void rt2661_reset_rx_ring(struct rt2661_softc *, struct rt2661_rx_ring *); static void rt2661_free_rx_ring(struct rt2661_softc *, struct rt2661_rx_ring *); -static struct ieee80211_node *rt2661_node_alloc( - struct ieee80211_node_table *); +static struct ieee80211_node *rt2661_node_alloc(struct ieee80211vap *, + const uint8_t [IEEE80211_ADDR_LEN]); static void rt2661_newassoc(struct ieee80211_node *, int); static int rt2661_newstate(struct ieee80211vap *, enum ieee80211_state, int); static uint16_t rt2661_eeprom_read(struct rt2661_softc *, uint8_t); static void rt2661_rx_intr(struct rt2661_softc *); static void rt2661_tx_intr(struct rt2661_softc *); static void rt2661_tx_dma_intr(struct rt2661_softc *, struct rt2661_tx_ring *); static void rt2661_mcu_beacon_expire(struct rt2661_softc *); static void rt2661_mcu_wakeup(struct rt2661_softc *); static void rt2661_mcu_cmd_intr(struct rt2661_softc *); static void rt2661_scan_start(struct ieee80211com *); static void rt2661_scan_end(struct ieee80211com *); static void rt2661_set_channel(struct ieee80211com *); static void rt2661_setup_tx_desc(struct rt2661_softc *, struct rt2661_tx_desc *, uint32_t, uint16_t, int, int, const bus_dma_segment_t *, int, int); static int rt2661_tx_data(struct rt2661_softc *, struct mbuf *, struct ieee80211_node *, int); static int rt2661_tx_mgt(struct rt2661_softc *, struct mbuf *, struct ieee80211_node *); static void rt2661_start_locked(struct ifnet *); static void rt2661_start(struct ifnet *); static int rt2661_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void rt2661_watchdog(void *); static int rt2661_ioctl(struct ifnet *, u_long, caddr_t); static void rt2661_bbp_write(struct rt2661_softc *, uint8_t, uint8_t); static uint8_t rt2661_bbp_read(struct rt2661_softc *, uint8_t); static void rt2661_rf_write(struct rt2661_softc *, uint8_t, uint32_t); static int rt2661_tx_cmd(struct rt2661_softc *, uint8_t, uint16_t); static void rt2661_select_antenna(struct rt2661_softc *); static void rt2661_enable_mrr(struct rt2661_softc *); static void rt2661_set_txpreamble(struct rt2661_softc *); static void rt2661_set_basicrates(struct rt2661_softc *, const struct ieee80211_rateset *); static void rt2661_select_band(struct rt2661_softc *, struct ieee80211_channel *); static void rt2661_set_chan(struct rt2661_softc *, struct ieee80211_channel *); static void rt2661_set_bssid(struct rt2661_softc *, const uint8_t *); static void rt2661_set_macaddr(struct rt2661_softc *, const uint8_t *); static void rt2661_update_promisc(struct ifnet *); static int rt2661_wme_update(struct ieee80211com *) __unused; static void rt2661_update_slot(struct ifnet *); static const char *rt2661_get_rf(int); static void rt2661_read_eeprom(struct rt2661_softc *, struct ieee80211com *); static int rt2661_bbp_init(struct rt2661_softc *); static void rt2661_init_locked(struct rt2661_softc *); static void rt2661_init(void *); static void rt2661_stop_locked(struct rt2661_softc *); static void rt2661_stop(void *); static int rt2661_load_microcode(struct rt2661_softc *); #ifdef notyet static void rt2661_rx_tune(struct rt2661_softc *); static void rt2661_radar_start(struct rt2661_softc *); static int rt2661_radar_stop(struct rt2661_softc *); #endif static int rt2661_prepare_beacon(struct rt2661_softc *, struct ieee80211vap *); static void rt2661_enable_tsf_sync(struct rt2661_softc *); static int rt2661_get_rssi(struct rt2661_softc *, uint8_t); static const struct { uint32_t reg; uint32_t val; } rt2661_def_mac[] = { RT2661_DEF_MAC }; static const struct { uint8_t reg; uint8_t val; } rt2661_def_bbp[] = { RT2661_DEF_BBP }; static const struct rfprog { uint8_t chan; uint32_t r1, r2, r3, r4; } rt2661_rf5225_1[] = { RT2661_RF5225_1 }, rt2661_rf5225_2[] = { RT2661_RF5225_2 }; int rt2661_attach(device_t dev, int id) { struct rt2661_softc *sc = device_get_softc(dev); struct ieee80211com *ic; struct ifnet *ifp; uint32_t val; int error, ac, ntries; uint8_t bands; sc->sc_id = id; sc->sc_dev = dev; ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(sc->sc_dev, "can not if_alloc()\n"); return ENOMEM; } ic = ifp->if_l2com; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); callout_init_mtx(&sc->watchdog_ch, &sc->sc_mtx, 0); /* wait for NIC to initialize */ for (ntries = 0; ntries < 1000; ntries++) { if ((val = RAL_READ(sc, RT2661_MAC_CSR0)) != 0) break; DELAY(1000); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for NIC to initialize\n"); error = EIO; goto fail1; } /* retrieve RF rev. no and various other things from EEPROM */ rt2661_read_eeprom(sc, ic); device_printf(dev, "MAC/BBP RT%X, RF %s\n", val, rt2661_get_rf(sc->rf_rev)); /* * Allocate Tx and Rx rings. */ for (ac = 0; ac < 4; ac++) { error = rt2661_alloc_tx_ring(sc, &sc->txq[ac], RT2661_TX_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Tx ring %d\n", ac); goto fail2; } } error = rt2661_alloc_tx_ring(sc, &sc->mgtq, RT2661_MGT_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Mgt ring\n"); goto fail2; } error = rt2661_alloc_rx_ring(sc, &sc->rxq, RT2661_RX_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Rx ring\n"); goto fail3; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = rt2661_init; ifp->if_ioctl = rt2661_ioctl; ifp->if_start = rt2661_start; IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); ic->ic_ifp = ifp; ic->ic_opmode = IEEE80211_M_STA; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode */ | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ | IEEE80211_C_HOSTAP /* hostap mode */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_WDS /* 4-address traffic works */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ #ifdef notyet | IEEE80211_C_TXFRAG /* handle tx frags */ | IEEE80211_C_WME /* 802.11e */ #endif ; bands = 0; setbit(&bands, IEEE80211_MODE_11B); setbit(&bands, IEEE80211_MODE_11G); if (sc->rf_rev == RT2661_RF_5225 || sc->rf_rev == RT2661_RF_5325) setbit(&bands, IEEE80211_MODE_11A); ieee80211_init_channels(ic, NULL, &bands); ieee80211_ifattach(ic); ic->ic_newassoc = rt2661_newassoc; ic->ic_node_alloc = rt2661_node_alloc; #if 0 ic->ic_wme.wme_update = rt2661_wme_update; #endif ic->ic_scan_start = rt2661_scan_start; ic->ic_scan_end = rt2661_scan_end; ic->ic_set_channel = rt2661_set_channel; ic->ic_updateslot = rt2661_update_slot; ic->ic_update_promisc = rt2661_update_promisc; ic->ic_raw_xmit = rt2661_raw_xmit; ic->ic_vap_create = rt2661_vap_create; ic->ic_vap_delete = rt2661_vap_delete; sc->sc_rates = ieee80211_get_ratetable(ic->ic_curchan); bpfattach(ifp, DLT_IEEE802_11_RADIO, sizeof (struct ieee80211_frame) + sizeof (sc->sc_txtap)); sc->sc_rxtap_len = sizeof sc->sc_rxtap; sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); sc->sc_rxtap.wr_ihdr.it_present = htole32(RT2661_RX_RADIOTAP_PRESENT); sc->sc_txtap_len = sizeof sc->sc_txtap; sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); sc->sc_txtap.wt_ihdr.it_present = htole32(RT2661_TX_RADIOTAP_PRESENT); #ifdef RAL_DEBUG SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "debug msgs"); #endif if (bootverbose) ieee80211_announce(ic); return 0; fail3: rt2661_free_tx_ring(sc, &sc->mgtq); fail2: while (--ac >= 0) rt2661_free_tx_ring(sc, &sc->txq[ac]); fail1: mtx_destroy(&sc->sc_mtx); if_free(ifp); return error; } int rt2661_detach(void *xsc) { struct rt2661_softc *sc = xsc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; RAL_LOCK(sc); rt2661_stop_locked(sc); RAL_UNLOCK(sc); bpfdetach(ifp); ieee80211_ifdetach(ic); rt2661_free_tx_ring(sc, &sc->txq[0]); rt2661_free_tx_ring(sc, &sc->txq[1]); rt2661_free_tx_ring(sc, &sc->txq[2]); rt2661_free_tx_ring(sc, &sc->txq[3]); rt2661_free_tx_ring(sc, &sc->mgtq); rt2661_free_rx_ring(sc, &sc->rxq); if_free(ifp); mtx_destroy(&sc->sc_mtx); return 0; } static struct ieee80211vap * rt2661_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, int opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ifnet *ifp = ic->ic_ifp; struct rt2661_vap *rvp; struct ieee80211vap *vap; switch (opmode) { case IEEE80211_M_STA: case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: case IEEE80211_M_MONITOR: case IEEE80211_M_HOSTAP: if (!TAILQ_EMPTY(&ic->ic_vaps)) { if_printf(ifp, "only 1 vap supported\n"); return NULL; } if (opmode == IEEE80211_M_STA) flags |= IEEE80211_CLONE_NOBEACONS; break; case IEEE80211_M_WDS: if (TAILQ_EMPTY(&ic->ic_vaps) || ic->ic_opmode != IEEE80211_M_HOSTAP) { if_printf(ifp, "wds only supported in ap mode\n"); return NULL; } /* * Silently remove any request for a unique * bssid; WDS vap's always share the local * mac address. */ flags &= ~IEEE80211_CLONE_BSSID; break; default: if_printf(ifp, "unknown opmode %d\n", opmode); return NULL; } rvp = (struct rt2661_vap *) malloc(sizeof(struct rt2661_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (rvp == NULL) return NULL; vap = &rvp->ral_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); /* override state transition machine */ rvp->ral_newstate = vap->iv_newstate; vap->iv_newstate = rt2661_newstate; #if 0 vap->iv_update_beacon = rt2661_beacon_update; #endif ieee80211_amrr_init(&rvp->amrr, vap, IEEE80211_AMRR_MIN_SUCCESS_THRESHOLD, IEEE80211_AMRR_MAX_SUCCESS_THRESHOLD, 500 /* ms */); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status); if (TAILQ_FIRST(&ic->ic_vaps) == vap) ic->ic_opmode = opmode; return vap; } static void rt2661_vap_delete(struct ieee80211vap *vap) { struct rt2661_vap *rvp = RT2661_VAP(vap); ieee80211_amrr_cleanup(&rvp->amrr); ieee80211_vap_detach(vap); free(rvp, M_80211_VAP); } void rt2661_shutdown(void *xsc) { struct rt2661_softc *sc = xsc; rt2661_stop(sc); } void rt2661_suspend(void *xsc) { struct rt2661_softc *sc = xsc; rt2661_stop(sc); } void rt2661_resume(void *xsc) { struct rt2661_softc *sc = xsc; struct ifnet *ifp = sc->sc_ifp; if (ifp->if_flags & IFF_UP) rt2661_init(sc); } static void rt2661_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error != 0) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); *(bus_addr_t *)arg = segs[0].ds_addr; } static int rt2661_alloc_tx_ring(struct rt2661_softc *sc, struct rt2661_tx_ring *ring, int count) { int i, error; ring->count = count; ring->queued = 0; ring->cur = ring->next = ring->stat = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * RT2661_TX_DESC_SIZE, 1, count * RT2661_TX_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * RT2661_TX_DESC_SIZE, rt2661_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } ring->data = malloc(count * sizeof (struct rt2661_tx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, RT2661_MAX_SCATTER, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { error = bus_dmamap_create(ring->data_dmat, 0, &ring->data[i].map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } } return 0; fail: rt2661_free_tx_ring(sc, ring); return error; } static void rt2661_reset_tx_ring(struct rt2661_softc *sc, struct rt2661_tx_ring *ring) { struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; int i; for (i = 0; i < ring->count; i++) { desc = &ring->desc[i]; data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } desc->flags = 0; } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); ring->queued = 0; ring->cur = ring->next = ring->stat = 0; } static void rt2661_free_tx_ring(struct rt2661_softc *sc, struct rt2661_tx_ring *ring) { struct rt2661_tx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->ni != NULL) ieee80211_free_node(data->ni); if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int rt2661_alloc_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring, int count) { struct rt2661_rx_desc *desc; struct rt2661_rx_data *data; bus_addr_t physaddr; int i, error; ring->count = count; ring->cur = ring->next = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * RT2661_RX_DESC_SIZE, 1, count * RT2661_RX_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * RT2661_RX_DESC_SIZE, rt2661_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } ring->data = malloc(count * sizeof (struct rt2661_rx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } /* * Pre-allocate Rx buffers and populate Rx ring. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { desc = &sc->rxq.desc[i]; data = &sc->rxq.data[i]; error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } data->m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (data->m == NULL) { device_printf(sc->sc_dev, "could not allocate rx mbuf\n"); error = ENOMEM; goto fail; } error = bus_dmamap_load(ring->data_dmat, data->map, mtod(data->m, void *), MCLBYTES, rt2661_dma_map_addr, &physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load rx buf DMA map"); goto fail; } desc->flags = htole32(RT2661_RX_BUSY); desc->physaddr = htole32(physaddr); } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); return 0; fail: rt2661_free_rx_ring(sc, ring); return error; } static void rt2661_reset_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring) { int i; for (i = 0; i < ring->count; i++) ring->desc[i].flags = htole32(RT2661_RX_BUSY); bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); ring->cur = ring->next = 0; } static void rt2661_free_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring) { struct rt2661_rx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static struct ieee80211_node * -rt2661_node_alloc(struct ieee80211_node_table *nt) +rt2661_node_alloc(struct ieee80211vap *vap, + const uint8_t mac[IEEE80211_ADDR_LEN]) { struct rt2661_node *rn; rn = malloc(sizeof (struct rt2661_node), M_80211_NODE, M_NOWAIT | M_ZERO); return (rn != NULL) ? &rn->ni : NULL; } static void rt2661_newassoc(struct ieee80211_node *ni, int isnew) { struct ieee80211vap *vap = ni->ni_vap; ieee80211_amrr_node_init(&RT2661_VAP(vap)->amrr, &RT2661_NODE(ni)->amrr, ni); } static int rt2661_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct rt2661_vap *rvp = RT2661_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct rt2661_softc *sc = ic->ic_ifp->if_softc; int error; if (nstate == IEEE80211_S_INIT && vap->iv_state == IEEE80211_S_RUN) { uint32_t tmp; /* abort TSF synchronization */ tmp = RAL_READ(sc, RT2661_TXRX_CSR9); RAL_WRITE(sc, RT2661_TXRX_CSR9, tmp & ~0x00ffffff); } error = rvp->ral_newstate(vap, nstate, arg); if (error == 0 && nstate == IEEE80211_S_RUN) { struct ieee80211_node *ni = vap->iv_bss; if (vap->iv_opmode != IEEE80211_M_MONITOR) { rt2661_enable_mrr(sc); rt2661_set_txpreamble(sc); rt2661_set_basicrates(sc, &ni->ni_rates); rt2661_set_bssid(sc, ni->ni_bssid); } if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS) { error = rt2661_prepare_beacon(sc, vap); if (error != 0) return error; } if (vap->iv_opmode != IEEE80211_M_MONITOR) { if (vap->iv_opmode == IEEE80211_M_STA) { /* fake a join to init the tx rate */ rt2661_newassoc(ni, 1); } rt2661_enable_tsf_sync(sc); } } return error; } /* * Read 16 bits at address 'addr' from the serial EEPROM (either 93C46 or * 93C66). */ static uint16_t rt2661_eeprom_read(struct rt2661_softc *sc, uint8_t addr) { uint32_t tmp; uint16_t val; int n; /* clock C once before the first command */ RT2661_EEPROM_CTL(sc, 0); RT2661_EEPROM_CTL(sc, RT2661_S); RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_C); RT2661_EEPROM_CTL(sc, RT2661_S); /* write start bit (1) */ RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D); RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D | RT2661_C); /* write READ opcode (10) */ RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D); RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D | RT2661_C); RT2661_EEPROM_CTL(sc, RT2661_S); RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_C); /* write address (A5-A0 or A7-A0) */ n = (RAL_READ(sc, RT2661_E2PROM_CSR) & RT2661_93C46) ? 5 : 7; for (; n >= 0; n--) { RT2661_EEPROM_CTL(sc, RT2661_S | (((addr >> n) & 1) << RT2661_SHIFT_D)); RT2661_EEPROM_CTL(sc, RT2661_S | (((addr >> n) & 1) << RT2661_SHIFT_D) | RT2661_C); } RT2661_EEPROM_CTL(sc, RT2661_S); /* read data Q15-Q0 */ val = 0; for (n = 15; n >= 0; n--) { RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_C); tmp = RAL_READ(sc, RT2661_E2PROM_CSR); val |= ((tmp & RT2661_Q) >> RT2661_SHIFT_Q) << n; RT2661_EEPROM_CTL(sc, RT2661_S); } RT2661_EEPROM_CTL(sc, 0); /* clear Chip Select and clock C */ RT2661_EEPROM_CTL(sc, RT2661_S); RT2661_EEPROM_CTL(sc, 0); RT2661_EEPROM_CTL(sc, RT2661_C); return val; } static void rt2661_tx_intr(struct rt2661_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct rt2661_tx_ring *txq; struct rt2661_tx_data *data; struct rt2661_node *rn; uint32_t val; int qid, retrycnt; for (;;) { struct ieee80211_node *ni; struct mbuf *m; val = RAL_READ(sc, RT2661_STA_CSR4); if (!(val & RT2661_TX_STAT_VALID)) break; /* retrieve the queue in which this frame was sent */ qid = RT2661_TX_QID(val); txq = (qid <= 3) ? &sc->txq[qid] : &sc->mgtq; /* retrieve rate control algorithm context */ data = &txq->data[txq->stat]; m = data->m; data->m = NULL; ni = data->ni; data->ni = NULL; /* if no frame has been sent, ignore */ if (ni == NULL) continue; rn = RT2661_NODE(ni); switch (RT2661_TX_RESULT(val)) { case RT2661_TX_SUCCESS: retrycnt = RT2661_TX_RETRYCNT(val); DPRINTFN(sc, 10, "data frame sent successfully after " "%d retries\n", retrycnt); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_amrr_tx_complete(&rn->amrr, IEEE80211_AMRR_SUCCESS, retrycnt); ifp->if_opackets++; break; case RT2661_TX_RETRY_FAIL: retrycnt = RT2661_TX_RETRYCNT(val); DPRINTFN(sc, 9, "%s\n", "sending data frame failed (too much retries)"); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_amrr_tx_complete(&rn->amrr, IEEE80211_AMRR_FAILURE, retrycnt); ifp->if_oerrors++; break; default: /* other failure */ device_printf(sc->sc_dev, "sending data frame failed 0x%08x\n", val); ifp->if_oerrors++; } DPRINTFN(sc, 15, "tx done q=%d idx=%u\n", qid, txq->stat); txq->queued--; if (++txq->stat >= txq->count) /* faster than % count */ txq->stat = 0; if (m->m_flags & M_TXCB) ieee80211_process_callback(ni, m, RT2661_TX_RESULT(val) != RT2661_TX_SUCCESS); m_freem(m); ieee80211_free_node(ni); } sc->sc_tx_timer = 0; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; rt2661_start_locked(ifp); } static void rt2661_tx_dma_intr(struct rt2661_softc *sc, struct rt2661_tx_ring *txq) { struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_POSTREAD); for (;;) { desc = &txq->desc[txq->next]; data = &txq->data[txq->next]; if ((le32toh(desc->flags) & RT2661_TX_BUSY) || !(le32toh(desc->flags) & RT2661_TX_VALID)) break; bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->data_dmat, data->map); /* descriptor is no longer valid */ desc->flags &= ~htole32(RT2661_TX_VALID); DPRINTFN(sc, 15, "tx dma done q=%p idx=%u\n", txq, txq->next); if (++txq->next >= txq->count) /* faster than % count */ txq->next = 0; } bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE); } static void rt2661_rx_intr(struct rt2661_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct rt2661_rx_desc *desc; struct rt2661_rx_data *data; bus_addr_t physaddr; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct mbuf *mnew, *m; int error; bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_POSTREAD); for (;;) { int rssi; desc = &sc->rxq.desc[sc->rxq.cur]; data = &sc->rxq.data[sc->rxq.cur]; if (le32toh(desc->flags) & RT2661_RX_BUSY) break; if ((le32toh(desc->flags) & RT2661_RX_PHY_ERROR) || (le32toh(desc->flags) & RT2661_RX_CRC_ERROR)) { /* * This should not happen since we did not request * to receive those frames when we filled TXRX_CSR0. */ DPRINTFN(sc, 5, "PHY or CRC error flags 0x%08x\n", le32toh(desc->flags)); ifp->if_ierrors++; goto skip; } if ((le32toh(desc->flags) & RT2661_RX_CIPHER_MASK) != 0) { ifp->if_ierrors++; goto skip; } /* * Try to allocate a new mbuf for this ring element and load it * before processing the current mbuf. If the ring element * cannot be loaded, drop the received packet and reuse the old * mbuf. In the unlikely case that the old mbuf can't be * reloaded either, explicitly panic. */ mnew = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (mnew == NULL) { ifp->if_ierrors++; goto skip; } bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rxq.data_dmat, data->map); error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(mnew, void *), MCLBYTES, rt2661_dma_map_addr, &physaddr, 0); if (error != 0) { m_freem(mnew); /* try to reload the old mbuf */ error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(data->m, void *), MCLBYTES, rt2661_dma_map_addr, &physaddr, 0); if (error != 0) { /* very unlikely that it will fail... */ panic("%s: could not load old rx mbuf", device_get_name(sc->sc_dev)); } ifp->if_ierrors++; goto skip; } /* * New mbuf successfully loaded, update Rx ring and continue * processing. */ m = data->m; data->m = mnew; desc->physaddr = htole32(physaddr); /* finalize mbuf */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = (le32toh(desc->flags) >> 16) & 0xfff; rssi = rt2661_get_rssi(sc, desc->rssi); if (bpf_peers_present(ifp->if_bpf)) { struct rt2661_rx_radiotap_header *tap = &sc->sc_rxtap; uint32_t tsf_lo, tsf_hi; /* get timestamp (low and high 32 bits) */ tsf_hi = RAL_READ(sc, RT2661_TXRX_CSR13); tsf_lo = RAL_READ(sc, RT2661_TXRX_CSR12); tap->wr_tsf = htole64(((uint64_t)tsf_hi << 32) | tsf_lo); tap->wr_flags = 0; tap->wr_rate = ieee80211_plcp2rate(desc->rate, (desc->flags & htole32(RT2661_RX_OFDM)) ? IEEE80211_T_OFDM : IEEE80211_T_CCK); tap->wr_antsignal = rssi < 0 ? 0 : rssi; bpf_mtap2(ifp->if_bpf, tap, sc->sc_rxtap_len, m); } sc->sc_flags |= RAL_INPUT_RUNNING; RAL_UNLOCK(sc); wh = mtod(m, struct ieee80211_frame *); /* send the frame to the 802.11 layer */ ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); if (ni != NULL) { /* Error happened during RSSI conversion. */ if (rssi < 0) rssi = -30; /* XXX ignored by net80211 */ (void) ieee80211_input(ni, m, rssi, RT2661_NOISE_FLOOR, 0); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, RT2661_NOISE_FLOOR, 0); RAL_LOCK(sc); sc->sc_flags &= ~RAL_INPUT_RUNNING; skip: desc->flags |= htole32(RT2661_RX_BUSY); DPRINTFN(sc, 15, "rx intr idx=%u\n", sc->rxq.cur); sc->rxq.cur = (sc->rxq.cur + 1) % RT2661_RX_RING_COUNT; } bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_PREWRITE); } /* ARGSUSED */ static void rt2661_mcu_beacon_expire(struct rt2661_softc *sc) { /* do nothing */ } static void rt2661_mcu_wakeup(struct rt2661_softc *sc) { RAL_WRITE(sc, RT2661_MAC_CSR11, 5 << 16); RAL_WRITE(sc, RT2661_SOFT_RESET_CSR, 0x7); RAL_WRITE(sc, RT2661_IO_CNTL_CSR, 0x18); RAL_WRITE(sc, RT2661_PCI_USEC_CSR, 0x20); /* send wakeup command to MCU */ rt2661_tx_cmd(sc, RT2661_MCU_CMD_WAKEUP, 0); } static void rt2661_mcu_cmd_intr(struct rt2661_softc *sc) { RAL_READ(sc, RT2661_M2H_CMD_DONE_CSR); RAL_WRITE(sc, RT2661_M2H_CMD_DONE_CSR, 0xffffffff); } void rt2661_intr(void *arg) { struct rt2661_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; uint32_t r1, r2; RAL_LOCK(sc); /* disable MAC and MCU interrupts */ RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0xffffff7f); RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0xffffffff); /* don't re-enable interrupts if we're shutting down */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { RAL_UNLOCK(sc); return; } r1 = RAL_READ(sc, RT2661_INT_SOURCE_CSR); RAL_WRITE(sc, RT2661_INT_SOURCE_CSR, r1); r2 = RAL_READ(sc, RT2661_MCU_INT_SOURCE_CSR); RAL_WRITE(sc, RT2661_MCU_INT_SOURCE_CSR, r2); if (r1 & RT2661_MGT_DONE) rt2661_tx_dma_intr(sc, &sc->mgtq); if (r1 & RT2661_RX_DONE) rt2661_rx_intr(sc); if (r1 & RT2661_TX0_DMA_DONE) rt2661_tx_dma_intr(sc, &sc->txq[0]); if (r1 & RT2661_TX1_DMA_DONE) rt2661_tx_dma_intr(sc, &sc->txq[1]); if (r1 & RT2661_TX2_DMA_DONE) rt2661_tx_dma_intr(sc, &sc->txq[2]); if (r1 & RT2661_TX3_DMA_DONE) rt2661_tx_dma_intr(sc, &sc->txq[3]); if (r1 & RT2661_TX_DONE) rt2661_tx_intr(sc); if (r2 & RT2661_MCU_CMD_DONE) rt2661_mcu_cmd_intr(sc); if (r2 & RT2661_MCU_BEACON_EXPIRE) rt2661_mcu_beacon_expire(sc); if (r2 & RT2661_MCU_WAKEUP) rt2661_mcu_wakeup(sc); /* re-enable MAC and MCU interrupts */ RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0x0000ff10); RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0); RAL_UNLOCK(sc); } static uint8_t rt2661_plcp_signal(int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return 0xb; case 18: return 0xf; case 24: return 0xa; case 36: return 0xe; case 48: return 0x9; case 72: return 0xd; case 96: return 0x8; case 108: return 0xc; /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return 0x0; case 4: return 0x1; case 11: return 0x2; case 22: return 0x3; } return 0xff; /* XXX unsupported/unknown rate */ } static void rt2661_setup_tx_desc(struct rt2661_softc *sc, struct rt2661_tx_desc *desc, uint32_t flags, uint16_t xflags, int len, int rate, const bus_dma_segment_t *segs, int nsegs, int ac) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint16_t plcp_length; int i, remainder; desc->flags = htole32(flags); desc->flags |= htole32(len << 16); desc->flags |= htole32(RT2661_TX_BUSY | RT2661_TX_VALID); desc->xflags = htole16(xflags); desc->xflags |= htole16(nsegs << 13); desc->wme = htole16( RT2661_QID(ac) | RT2661_AIFSN(2) | RT2661_LOGCWMIN(4) | RT2661_LOGCWMAX(10)); /* * Remember in which queue this frame was sent. This field is driver * private data only. It will be made available by the NIC in STA_CSR4 * on Tx interrupts. */ desc->qid = ac; /* setup PLCP fields */ desc->plcp_signal = rt2661_plcp_signal(rate); desc->plcp_service = 4; len += IEEE80211_CRC_LEN; if (ieee80211_rate2phytype(sc->sc_rates, rate) == IEEE80211_T_OFDM) { desc->flags |= htole32(RT2661_TX_OFDM); plcp_length = len & 0xfff; desc->plcp_length_hi = plcp_length >> 6; desc->plcp_length_lo = plcp_length & 0x3f; } else { plcp_length = (16 * len + rate - 1) / rate; if (rate == 22) { remainder = (16 * len) % 22; if (remainder != 0 && remainder < 7) desc->plcp_service |= RT2661_PLCP_LENGEXT; } desc->plcp_length_hi = plcp_length >> 8; desc->plcp_length_lo = plcp_length & 0xff; if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) desc->plcp_signal |= 0x08; } /* RT2x61 supports scatter with up to 5 segments */ for (i = 0; i < nsegs; i++) { desc->addr[i] = htole32(segs[i].ds_addr); desc->len [i] = htole16(segs[i].ds_len); } } static int rt2661_tx_mgt(struct rt2661_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = sc->sc_ifp; struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; struct ieee80211_frame *wh; struct ieee80211_key *k; bus_dma_segment_t segs[RT2661_MAX_SCATTER]; uint16_t dur; uint32_t flags = 0; /* XXX HWSEQ */ int nsegs, rate, error; desc = &sc->mgtq.desc[sc->mgtq.cur]; data = &sc->mgtq.data[sc->mgtq.cur]; rate = vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)].mgmtrate; wh = mtod(m0, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_WEP) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } } error = bus_dmamap_load_mbuf_sg(sc->mgtq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (bpf_peers_present(ifp->if_bpf)) { struct rt2661_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; bpf_mtap2(ifp->if_bpf, tap, sc->sc_txtap_len, m0); } data->m = m0; data->ni = ni; /* management frames are not taken into account for amrr */ data->rix = IEEE80211_FIXED_RATE_NONE; wh = mtod(m0, struct ieee80211_frame *); if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2661_TX_NEED_ACK; dur = ieee80211_ack_duration(sc->sc_rates, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); *(uint16_t *)wh->i_dur = htole16(dur); /* tell hardware to add timestamp in probe responses */ if ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP)) flags |= RT2661_TX_TIMESTAMP; } rt2661_setup_tx_desc(sc, desc, flags, 0 /* XXX HWSEQ */, m0->m_pkthdr.len, rate, segs, nsegs, RT2661_QID_MGT); bus_dmamap_sync(sc->mgtq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->mgtq.desc_dmat, sc->mgtq.desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending mgt frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->mgtq.cur, rate); /* kick mgt */ sc->mgtq.queued++; sc->mgtq.cur = (sc->mgtq.cur + 1) % RT2661_MGT_RING_COUNT; RAL_WRITE(sc, RT2661_TX_CNTL_CSR, RT2661_KICK_MGT); return 0; } static int rt2661_sendprot(struct rt2661_softc *sc, int ac, const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate) { struct ieee80211com *ic = ni->ni_ic; struct rt2661_tx_ring *txq = &sc->txq[ac]; const struct ieee80211_frame *wh; struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; struct mbuf *mprot; int protrate, ackrate, pktlen, flags, isshort, error; uint16_t dur; bus_dma_segment_t segs[RT2661_MAX_SCATTER]; int nsegs; KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY, ("protection %d", prot)); wh = mtod(m, const struct ieee80211_frame *); pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN; protrate = ieee80211_ctl_rate(sc->sc_rates, rate); ackrate = ieee80211_ack_rate(sc->sc_rates, rate); isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; dur = ieee80211_compute_duration(sc->sc_rates, pktlen, rate, isshort) + ieee80211_ack_duration(sc->sc_rates, rate, isshort); flags = RT2661_TX_MORE_FRAG; if (prot == IEEE80211_PROT_RTSCTS) { /* NB: CTS is the same size as an ACK */ dur += ieee80211_ack_duration(sc->sc_rates, rate, isshort); flags |= RT2661_TX_NEED_ACK; mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur); } else { mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur); } if (mprot == NULL) { /* XXX stat + msg */ return ENOBUFS; } data = &txq->data[txq->cur]; desc = &txq->desc[txq->cur]; error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, mprot, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(mprot); return error; } data->m = mprot; data->ni = ieee80211_ref_node(ni); /* ctl frames are not taken into account for amrr */ data->rix = IEEE80211_FIXED_RATE_NONE; rt2661_setup_tx_desc(sc, desc, flags, 0, mprot->m_pkthdr.len, protrate, segs, 1, ac); bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE); txq->queued++; txq->cur = (txq->cur + 1) % RT2661_TX_RING_COUNT; return 0; } static int rt2661_tx_data(struct rt2661_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, int ac) { struct ieee80211vap *vap = ni->ni_vap; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct rt2661_tx_ring *txq = &sc->txq[ac]; struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp; struct ieee80211_key *k; const struct chanAccParams *cap; struct mbuf *mnew; bus_dma_segment_t segs[RT2661_MAX_SCATTER]; uint16_t dur; uint32_t flags; int error, nsegs, rate, noack = 0; wh = mtod(m0, struct ieee80211_frame *); tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { rate = tp->mcastrate; } else if (m0->m_flags & M_EAPOL) { rate = tp->mgmtrate; } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { rate = tp->ucastrate; } else { (void) ieee80211_amrr_choose(ni, &RT2661_NODE(ni)->amrr); rate = ni->ni_txrate; } rate &= IEEE80211_RATE_VAL; if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) { cap = &ic->ic_wme.wme_chanParams; noack = cap->cap_wmeParams[ac].wmep_noackPolicy; } if (wh->i_fc[1] & IEEE80211_FC1_WEP) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } flags = 0; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { int prot = IEEE80211_PROT_NONE; if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) prot = IEEE80211_PROT_RTSCTS; else if ((ic->ic_flags & IEEE80211_F_USEPROT) && ieee80211_rate2phytype(sc->sc_rates, rate) == IEEE80211_T_OFDM) prot = ic->ic_protmode; if (prot != IEEE80211_PROT_NONE) { error = rt2661_sendprot(sc, ac, m0, ni, prot, rate); if (error) { m_freem(m0); return error; } flags |= RT2661_TX_LONG_RETRY | RT2661_TX_IFS; } } data = &txq->data[txq->cur]; desc = &txq->desc[txq->cur]; error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (error != 0) { mnew = m_defrag(m0, M_DONTWAIT); if (mnew == NULL) { device_printf(sc->sc_dev, "could not defragment mbuf\n"); m_freem(m0); return ENOBUFS; } m0 = mnew; error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } /* packet header have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (bpf_peers_present(ifp->if_bpf)) { struct rt2661_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wt_chan_flags = htole16(ic->ic_curchan->ic_flags); bpf_mtap2(ifp->if_bpf, tap, sc->sc_txtap_len, m0); } data->m = m0; data->ni = ni; /* remember link conditions for rate adaptation algorithm */ if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) { data->rix = ni->ni_txrate; /* XXX probably need last rssi value and not avg */ data->rssi = ic->ic_node_getrssi(ni); } else data->rix = IEEE80211_FIXED_RATE_NONE; if (!noack && !IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2661_TX_NEED_ACK; dur = ieee80211_ack_duration(sc->sc_rates, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); *(uint16_t *)wh->i_dur = htole16(dur); } rt2661_setup_tx_desc(sc, desc, flags, 0, m0->m_pkthdr.len, rate, segs, nsegs, ac); bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending data frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, txq->cur, rate); /* kick Tx */ txq->queued++; txq->cur = (txq->cur + 1) % RT2661_TX_RING_COUNT; RAL_WRITE(sc, RT2661_TX_CNTL_CSR, 1 << ac); return 0; } static void rt2661_start_locked(struct ifnet *ifp) { struct rt2661_softc *sc = ifp->if_softc; struct mbuf *m; struct ieee80211_node *ni; int ac; RAL_LOCK_ASSERT(sc); /* prevent management frames from being sent if we're not ready */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) || sc->sc_invalid) return; for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; ac = M_WME_GETAC(m); if (sc->txq[ac].queued >= RT2661_TX_RING_COUNT - 1) { /* there is no place left in this ring */ IFQ_DRV_PREPEND(&ifp->if_snd, m); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; m = ieee80211_encap(ni, m); if (m == NULL) { ieee80211_free_node(ni); ifp->if_oerrors++; continue; } if (rt2661_tx_data(sc, m, ni, ac) != 0) { ieee80211_free_node(ni); ifp->if_oerrors++; break; } sc->sc_tx_timer = 5; } } static void rt2661_start(struct ifnet *ifp) { struct rt2661_softc *sc = ifp->if_softc; RAL_LOCK(sc); rt2661_start_locked(ifp); RAL_UNLOCK(sc); } static int rt2661_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct rt2661_softc *sc = ifp->if_softc; RAL_LOCK(sc); /* prevent management frames from being sent if we're not ready */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { RAL_UNLOCK(sc); m_freem(m); ieee80211_free_node(ni); return ENETDOWN; } if (sc->mgtq.queued >= RT2661_MGT_RING_COUNT) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; RAL_UNLOCK(sc); m_freem(m); ieee80211_free_node(ni); return ENOBUFS; /* XXX */ } ifp->if_opackets++; /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. * XXX raw path */ if (rt2661_tx_mgt(sc, m, ni) != 0) goto bad; sc->sc_tx_timer = 5; RAL_UNLOCK(sc); return 0; bad: ifp->if_oerrors++; ieee80211_free_node(ni); RAL_UNLOCK(sc); return EIO; /* XXX */ } static void rt2661_watchdog(void *arg) { struct rt2661_softc *sc = (struct rt2661_softc *)arg; struct ifnet *ifp = sc->sc_ifp; RAL_LOCK_ASSERT(sc); KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("not running")); if (sc->sc_invalid) /* card ejected */ return; if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) { if_printf(ifp, "device timeout\n"); rt2661_init_locked(sc); ifp->if_oerrors++; /* NB: callout is reset in rt2661_init() */ return; } callout_reset(&sc->watchdog_ch, hz, rt2661_watchdog, sc); } static int rt2661_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct rt2661_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; int error = 0, startall = 0; switch (cmd) { case SIOCSIFFLAGS: RAL_LOCK(sc); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { rt2661_init_locked(sc); startall = 1; } else rt2661_update_promisc(ifp); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) rt2661_stop_locked(sc); } RAL_UNLOCK(sc); if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return error; } static void rt2661_bbp_write(struct rt2661_softc *sc, uint8_t reg, uint8_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2661_PHY_CSR3) & RT2661_BBP_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to BBP\n"); return; } tmp = RT2661_BBP_BUSY | (reg & 0x7f) << 8 | val; RAL_WRITE(sc, RT2661_PHY_CSR3, tmp); DPRINTFN(sc, 15, "BBP R%u <- 0x%02x\n", reg, val); } static uint8_t rt2661_bbp_read(struct rt2661_softc *sc, uint8_t reg) { uint32_t val; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2661_PHY_CSR3) & RT2661_BBP_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not read from BBP\n"); return 0; } val = RT2661_BBP_BUSY | RT2661_BBP_READ | reg << 8; RAL_WRITE(sc, RT2661_PHY_CSR3, val); for (ntries = 0; ntries < 100; ntries++) { val = RAL_READ(sc, RT2661_PHY_CSR3); if (!(val & RT2661_BBP_BUSY)) return val & 0xff; DELAY(1); } device_printf(sc->sc_dev, "could not read from BBP\n"); return 0; } static void rt2661_rf_write(struct rt2661_softc *sc, uint8_t reg, uint32_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2661_PHY_CSR4) & RT2661_RF_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to RF\n"); return; } tmp = RT2661_RF_BUSY | RT2661_RF_21BIT | (val & 0x1fffff) << 2 | (reg & 3); RAL_WRITE(sc, RT2661_PHY_CSR4, tmp); /* remember last written value in sc */ sc->rf_regs[reg] = val; DPRINTFN(sc, 15, "RF R[%u] <- 0x%05x\n", reg & 3, val & 0x1fffff); } static int rt2661_tx_cmd(struct rt2661_softc *sc, uint8_t cmd, uint16_t arg) { if (RAL_READ(sc, RT2661_H2M_MAILBOX_CSR) & RT2661_H2M_BUSY) return EIO; /* there is already a command pending */ RAL_WRITE(sc, RT2661_H2M_MAILBOX_CSR, RT2661_H2M_BUSY | RT2661_TOKEN_NO_INTR << 16 | arg); RAL_WRITE(sc, RT2661_HOST_CMD_CSR, RT2661_KICK_CMD | cmd); return 0; } static void rt2661_select_antenna(struct rt2661_softc *sc) { uint8_t bbp4, bbp77; uint32_t tmp; bbp4 = rt2661_bbp_read(sc, 4); bbp77 = rt2661_bbp_read(sc, 77); /* TBD */ /* make sure Rx is disabled before switching antenna */ tmp = RAL_READ(sc, RT2661_TXRX_CSR0); RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp | RT2661_DISABLE_RX); rt2661_bbp_write(sc, 4, bbp4); rt2661_bbp_write(sc, 77, bbp77); /* restore Rx filter */ RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp); } /* * Enable multi-rate retries for frames sent at OFDM rates. * In 802.11b/g mode, allow fallback to CCK rates. */ static void rt2661_enable_mrr(struct rt2661_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t tmp; tmp = RAL_READ(sc, RT2661_TXRX_CSR4); tmp &= ~RT2661_MRR_CCK_FALLBACK; if (!IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan)) tmp |= RT2661_MRR_CCK_FALLBACK; tmp |= RT2661_MRR_ENABLED; RAL_WRITE(sc, RT2661_TXRX_CSR4, tmp); } static void rt2661_set_txpreamble(struct rt2661_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t tmp; tmp = RAL_READ(sc, RT2661_TXRX_CSR4); tmp &= ~RT2661_SHORT_PREAMBLE; if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) tmp |= RT2661_SHORT_PREAMBLE; RAL_WRITE(sc, RT2661_TXRX_CSR4, tmp); } static void rt2661_set_basicrates(struct rt2661_softc *sc, const struct ieee80211_rateset *rs) { #define RV(r) ((r) & IEEE80211_RATE_VAL) struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t mask = 0; uint8_t rate; int i, j; for (i = 0; i < rs->rs_nrates; i++) { rate = rs->rs_rates[i]; if (!(rate & IEEE80211_RATE_BASIC)) continue; /* * Find h/w rate index. We know it exists because the rate * set has already been negotiated. */ for (j = 0; ic->ic_sup_rates[IEEE80211_MODE_11G].rs_rates[j] != RV(rate); j++); mask |= 1 << j; } RAL_WRITE(sc, RT2661_TXRX_CSR5, mask); DPRINTF(sc, "Setting basic rate mask to 0x%x\n", mask); #undef RV } /* * Reprogram MAC/BBP to switch to a new band. Values taken from the reference * driver. */ static void rt2661_select_band(struct rt2661_softc *sc, struct ieee80211_channel *c) { uint8_t bbp17, bbp35, bbp96, bbp97, bbp98, bbp104; uint32_t tmp; /* update all BBP registers that depend on the band */ bbp17 = 0x20; bbp96 = 0x48; bbp104 = 0x2c; bbp35 = 0x50; bbp97 = 0x48; bbp98 = 0x48; if (IEEE80211_IS_CHAN_5GHZ(c)) { bbp17 += 0x08; bbp96 += 0x10; bbp104 += 0x0c; bbp35 += 0x10; bbp97 += 0x10; bbp98 += 0x10; } if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) { bbp17 += 0x10; bbp96 += 0x10; bbp104 += 0x10; } rt2661_bbp_write(sc, 17, bbp17); rt2661_bbp_write(sc, 96, bbp96); rt2661_bbp_write(sc, 104, bbp104); if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) { rt2661_bbp_write(sc, 75, 0x80); rt2661_bbp_write(sc, 86, 0x80); rt2661_bbp_write(sc, 88, 0x80); } rt2661_bbp_write(sc, 35, bbp35); rt2661_bbp_write(sc, 97, bbp97); rt2661_bbp_write(sc, 98, bbp98); tmp = RAL_READ(sc, RT2661_PHY_CSR0); tmp &= ~(RT2661_PA_PE_2GHZ | RT2661_PA_PE_5GHZ); if (IEEE80211_IS_CHAN_2GHZ(c)) tmp |= RT2661_PA_PE_2GHZ; else tmp |= RT2661_PA_PE_5GHZ; RAL_WRITE(sc, RT2661_PHY_CSR0, tmp); } static void rt2661_set_chan(struct rt2661_softc *sc, struct ieee80211_channel *c) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; const struct rfprog *rfprog; uint8_t bbp3, bbp94 = RT2661_BBPR94_DEFAULT; int8_t power; u_int i, chan; chan = ieee80211_chan2ieee(ic, c); KASSERT(chan != 0 && chan != IEEE80211_CHAN_ANY, ("chan 0x%x", chan)); sc->sc_rates = ieee80211_get_ratetable(c); /* select the appropriate RF settings based on what EEPROM says */ rfprog = (sc->rfprog == 0) ? rt2661_rf5225_1 : rt2661_rf5225_2; /* find the settings for this channel (we know it exists) */ for (i = 0; rfprog[i].chan != chan; i++); power = sc->txpow[i]; if (power < 0) { bbp94 += power; power = 0; } else if (power > 31) { bbp94 += power - 31; power = 31; } /* * If we are switching from the 2GHz band to the 5GHz band or * vice-versa, BBP registers need to be reprogrammed. */ if (c->ic_flags != sc->sc_curchan->ic_flags) { rt2661_select_band(sc, c); rt2661_select_antenna(sc); } sc->sc_curchan = c; rt2661_rf_write(sc, RAL_RF1, rfprog[i].r1); rt2661_rf_write(sc, RAL_RF2, rfprog[i].r2); rt2661_rf_write(sc, RAL_RF3, rfprog[i].r3 | power << 7); rt2661_rf_write(sc, RAL_RF4, rfprog[i].r4 | sc->rffreq << 10); DELAY(200); rt2661_rf_write(sc, RAL_RF1, rfprog[i].r1); rt2661_rf_write(sc, RAL_RF2, rfprog[i].r2); rt2661_rf_write(sc, RAL_RF3, rfprog[i].r3 | power << 7 | 1); rt2661_rf_write(sc, RAL_RF4, rfprog[i].r4 | sc->rffreq << 10); DELAY(200); rt2661_rf_write(sc, RAL_RF1, rfprog[i].r1); rt2661_rf_write(sc, RAL_RF2, rfprog[i].r2); rt2661_rf_write(sc, RAL_RF3, rfprog[i].r3 | power << 7); rt2661_rf_write(sc, RAL_RF4, rfprog[i].r4 | sc->rffreq << 10); /* enable smart mode for MIMO-capable RFs */ bbp3 = rt2661_bbp_read(sc, 3); bbp3 &= ~RT2661_SMART_MODE; if (sc->rf_rev == RT2661_RF_5325 || sc->rf_rev == RT2661_RF_2529) bbp3 |= RT2661_SMART_MODE; rt2661_bbp_write(sc, 3, bbp3); if (bbp94 != RT2661_BBPR94_DEFAULT) rt2661_bbp_write(sc, 94, bbp94); /* 5GHz radio needs a 1ms delay here */ if (IEEE80211_IS_CHAN_5GHZ(c)) DELAY(1000); } static void rt2661_set_bssid(struct rt2661_softc *sc, const uint8_t *bssid) { uint32_t tmp; tmp = bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24; RAL_WRITE(sc, RT2661_MAC_CSR4, tmp); tmp = bssid[4] | bssid[5] << 8 | RT2661_ONE_BSSID << 16; RAL_WRITE(sc, RT2661_MAC_CSR5, tmp); } static void rt2661_set_macaddr(struct rt2661_softc *sc, const uint8_t *addr) { uint32_t tmp; tmp = addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24; RAL_WRITE(sc, RT2661_MAC_CSR2, tmp); tmp = addr[4] | addr[5] << 8; RAL_WRITE(sc, RT2661_MAC_CSR3, tmp); } static void rt2661_update_promisc(struct ifnet *ifp) { struct rt2661_softc *sc = ifp->if_softc; uint32_t tmp; tmp = RAL_READ(sc, RT2661_TXRX_CSR0); tmp &= ~RT2661_DROP_NOT_TO_ME; if (!(ifp->if_flags & IFF_PROMISC)) tmp |= RT2661_DROP_NOT_TO_ME; RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp); DPRINTF(sc, "%s promiscuous mode\n", (ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving"); } /* * Update QoS (802.11e) settings for each h/w Tx ring. */ static int rt2661_wme_update(struct ieee80211com *ic) { struct rt2661_softc *sc = ic->ic_ifp->if_softc; const struct wmeParams *wmep; wmep = ic->ic_wme.wme_chanParams.cap_wmeParams; /* XXX: not sure about shifts. */ /* XXX: the reference driver plays with AC_VI settings too. */ /* update TxOp */ RAL_WRITE(sc, RT2661_AC_TXOP_CSR0, wmep[WME_AC_BE].wmep_txopLimit << 16 | wmep[WME_AC_BK].wmep_txopLimit); RAL_WRITE(sc, RT2661_AC_TXOP_CSR1, wmep[WME_AC_VI].wmep_txopLimit << 16 | wmep[WME_AC_VO].wmep_txopLimit); /* update CWmin */ RAL_WRITE(sc, RT2661_CWMIN_CSR, wmep[WME_AC_BE].wmep_logcwmin << 12 | wmep[WME_AC_BK].wmep_logcwmin << 8 | wmep[WME_AC_VI].wmep_logcwmin << 4 | wmep[WME_AC_VO].wmep_logcwmin); /* update CWmax */ RAL_WRITE(sc, RT2661_CWMAX_CSR, wmep[WME_AC_BE].wmep_logcwmax << 12 | wmep[WME_AC_BK].wmep_logcwmax << 8 | wmep[WME_AC_VI].wmep_logcwmax << 4 | wmep[WME_AC_VO].wmep_logcwmax); /* update Aifsn */ RAL_WRITE(sc, RT2661_AIFSN_CSR, wmep[WME_AC_BE].wmep_aifsn << 12 | wmep[WME_AC_BK].wmep_aifsn << 8 | wmep[WME_AC_VI].wmep_aifsn << 4 | wmep[WME_AC_VO].wmep_aifsn); return 0; } static void rt2661_update_slot(struct ifnet *ifp) { struct rt2661_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; uint8_t slottime; uint32_t tmp; slottime = (ic->ic_flags & IEEE80211_F_SHSLOT) ? 9 : 20; tmp = RAL_READ(sc, RT2661_MAC_CSR9); tmp = (tmp & ~0xff) | slottime; RAL_WRITE(sc, RT2661_MAC_CSR9, tmp); } static const char * rt2661_get_rf(int rev) { switch (rev) { case RT2661_RF_5225: return "RT5225"; case RT2661_RF_5325: return "RT5325 (MIMO XR)"; case RT2661_RF_2527: return "RT2527"; case RT2661_RF_2529: return "RT2529 (MIMO XR)"; default: return "unknown"; } } static void rt2661_read_eeprom(struct rt2661_softc *sc, struct ieee80211com *ic) { uint16_t val; int i; /* read MAC address */ val = rt2661_eeprom_read(sc, RT2661_EEPROM_MAC01); ic->ic_myaddr[0] = val & 0xff; ic->ic_myaddr[1] = val >> 8; val = rt2661_eeprom_read(sc, RT2661_EEPROM_MAC23); ic->ic_myaddr[2] = val & 0xff; ic->ic_myaddr[3] = val >> 8; val = rt2661_eeprom_read(sc, RT2661_EEPROM_MAC45); ic->ic_myaddr[4] = val & 0xff; ic->ic_myaddr[5] = val >> 8; val = rt2661_eeprom_read(sc, RT2661_EEPROM_ANTENNA); /* XXX: test if different from 0xffff? */ sc->rf_rev = (val >> 11) & 0x1f; sc->hw_radio = (val >> 10) & 0x1; sc->rx_ant = (val >> 4) & 0x3; sc->tx_ant = (val >> 2) & 0x3; sc->nb_ant = val & 0x3; DPRINTF(sc, "RF revision=%d\n", sc->rf_rev); val = rt2661_eeprom_read(sc, RT2661_EEPROM_CONFIG2); sc->ext_5ghz_lna = (val >> 6) & 0x1; sc->ext_2ghz_lna = (val >> 4) & 0x1; DPRINTF(sc, "External 2GHz LNA=%d\nExternal 5GHz LNA=%d\n", sc->ext_2ghz_lna, sc->ext_5ghz_lna); val = rt2661_eeprom_read(sc, RT2661_EEPROM_RSSI_2GHZ_OFFSET); if ((val & 0xff) != 0xff) sc->rssi_2ghz_corr = (int8_t)(val & 0xff); /* signed */ /* Only [-10, 10] is valid */ if (sc->rssi_2ghz_corr < -10 || sc->rssi_2ghz_corr > 10) sc->rssi_2ghz_corr = 0; val = rt2661_eeprom_read(sc, RT2661_EEPROM_RSSI_5GHZ_OFFSET); if ((val & 0xff) != 0xff) sc->rssi_5ghz_corr = (int8_t)(val & 0xff); /* signed */ /* Only [-10, 10] is valid */ if (sc->rssi_5ghz_corr < -10 || sc->rssi_5ghz_corr > 10) sc->rssi_5ghz_corr = 0; /* adjust RSSI correction for external low-noise amplifier */ if (sc->ext_2ghz_lna) sc->rssi_2ghz_corr -= 14; if (sc->ext_5ghz_lna) sc->rssi_5ghz_corr -= 14; DPRINTF(sc, "RSSI 2GHz corr=%d\nRSSI 5GHz corr=%d\n", sc->rssi_2ghz_corr, sc->rssi_5ghz_corr); val = rt2661_eeprom_read(sc, RT2661_EEPROM_FREQ_OFFSET); if ((val >> 8) != 0xff) sc->rfprog = (val >> 8) & 0x3; if ((val & 0xff) != 0xff) sc->rffreq = val & 0xff; DPRINTF(sc, "RF prog=%d\nRF freq=%d\n", sc->rfprog, sc->rffreq); /* read Tx power for all a/b/g channels */ for (i = 0; i < 19; i++) { val = rt2661_eeprom_read(sc, RT2661_EEPROM_TXPOWER + i); sc->txpow[i * 2] = (int8_t)(val >> 8); /* signed */ DPRINTF(sc, "Channel=%d Tx power=%d\n", rt2661_rf5225_1[i * 2].chan, sc->txpow[i * 2]); sc->txpow[i * 2 + 1] = (int8_t)(val & 0xff); /* signed */ DPRINTF(sc, "Channel=%d Tx power=%d\n", rt2661_rf5225_1[i * 2 + 1].chan, sc->txpow[i * 2 + 1]); } /* read vendor-specific BBP values */ for (i = 0; i < 16; i++) { val = rt2661_eeprom_read(sc, RT2661_EEPROM_BBP_BASE + i); if (val == 0 || val == 0xffff) continue; /* skip invalid entries */ sc->bbp_prom[i].reg = val >> 8; sc->bbp_prom[i].val = val & 0xff; DPRINTF(sc, "BBP R%d=%02x\n", sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } } static int rt2661_bbp_init(struct rt2661_softc *sc) { #define N(a) (sizeof (a) / sizeof ((a)[0])) int i, ntries; uint8_t val; /* wait for BBP to be ready */ for (ntries = 0; ntries < 100; ntries++) { val = rt2661_bbp_read(sc, 0); if (val != 0 && val != 0xff) break; DELAY(100); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP\n"); return EIO; } /* initialize BBP registers to default values */ for (i = 0; i < N(rt2661_def_bbp); i++) { rt2661_bbp_write(sc, rt2661_def_bbp[i].reg, rt2661_def_bbp[i].val); } /* write vendor-specific BBP values (from EEPROM) */ for (i = 0; i < 16; i++) { if (sc->bbp_prom[i].reg == 0) continue; rt2661_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } return 0; #undef N } static void rt2661_init_locked(struct rt2661_softc *sc) { #define N(a) (sizeof (a) / sizeof ((a)[0])) struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t tmp, sta[3]; int i, error, ntries; RAL_LOCK_ASSERT(sc); if ((sc->sc_flags & RAL_FW_LOADED) == 0) { error = rt2661_load_microcode(sc); if (error != 0) { if_printf(ifp, "%s: could not load 8051 microcode, error %d\n", __func__, error); return; } sc->sc_flags |= RAL_FW_LOADED; } rt2661_stop_locked(sc); /* initialize Tx rings */ RAL_WRITE(sc, RT2661_AC1_BASE_CSR, sc->txq[1].physaddr); RAL_WRITE(sc, RT2661_AC0_BASE_CSR, sc->txq[0].physaddr); RAL_WRITE(sc, RT2661_AC2_BASE_CSR, sc->txq[2].physaddr); RAL_WRITE(sc, RT2661_AC3_BASE_CSR, sc->txq[3].physaddr); /* initialize Mgt ring */ RAL_WRITE(sc, RT2661_MGT_BASE_CSR, sc->mgtq.physaddr); /* initialize Rx ring */ RAL_WRITE(sc, RT2661_RX_BASE_CSR, sc->rxq.physaddr); /* initialize Tx rings sizes */ RAL_WRITE(sc, RT2661_TX_RING_CSR0, RT2661_TX_RING_COUNT << 24 | RT2661_TX_RING_COUNT << 16 | RT2661_TX_RING_COUNT << 8 | RT2661_TX_RING_COUNT); RAL_WRITE(sc, RT2661_TX_RING_CSR1, RT2661_TX_DESC_WSIZE << 16 | RT2661_TX_RING_COUNT << 8 | /* XXX: HCCA ring unused */ RT2661_MGT_RING_COUNT); /* initialize Rx rings */ RAL_WRITE(sc, RT2661_RX_RING_CSR, RT2661_RX_DESC_BACK << 16 | RT2661_RX_DESC_WSIZE << 8 | RT2661_RX_RING_COUNT); /* XXX: some magic here */ RAL_WRITE(sc, RT2661_TX_DMA_DST_CSR, 0xaa); /* load base addresses of all 5 Tx rings (4 data + 1 mgt) */ RAL_WRITE(sc, RT2661_LOAD_TX_RING_CSR, 0x1f); /* load base address of Rx ring */ RAL_WRITE(sc, RT2661_RX_CNTL_CSR, 2); /* initialize MAC registers to default values */ for (i = 0; i < N(rt2661_def_mac); i++) RAL_WRITE(sc, rt2661_def_mac[i].reg, rt2661_def_mac[i].val); IEEE80211_ADDR_COPY(ic->ic_myaddr, IF_LLADDR(ifp)); rt2661_set_macaddr(sc, ic->ic_myaddr); /* set host ready */ RAL_WRITE(sc, RT2661_MAC_CSR1, 3); RAL_WRITE(sc, RT2661_MAC_CSR1, 0); /* wait for BBP/RF to wakeup */ for (ntries = 0; ntries < 1000; ntries++) { if (RAL_READ(sc, RT2661_MAC_CSR12) & 8) break; DELAY(1000); } if (ntries == 1000) { printf("timeout waiting for BBP/RF to wakeup\n"); rt2661_stop_locked(sc); return; } if (rt2661_bbp_init(sc) != 0) { rt2661_stop_locked(sc); return; } /* select default channel */ sc->sc_curchan = ic->ic_curchan; rt2661_select_band(sc, sc->sc_curchan); rt2661_select_antenna(sc); rt2661_set_chan(sc, sc->sc_curchan); /* update Rx filter */ tmp = RAL_READ(sc, RT2661_TXRX_CSR0) & 0xffff; tmp |= RT2661_DROP_PHY_ERROR | RT2661_DROP_CRC_ERROR; if (ic->ic_opmode != IEEE80211_M_MONITOR) { tmp |= RT2661_DROP_CTL | RT2661_DROP_VER_ERROR | RT2661_DROP_ACKCTS; if (ic->ic_opmode != IEEE80211_M_HOSTAP) tmp |= RT2661_DROP_TODS; if (!(ifp->if_flags & IFF_PROMISC)) tmp |= RT2661_DROP_NOT_TO_ME; } RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp); /* clear STA registers */ RAL_READ_REGION_4(sc, RT2661_STA_CSR0, sta, N(sta)); /* initialize ASIC */ RAL_WRITE(sc, RT2661_MAC_CSR1, 4); /* clear any pending interrupt */ RAL_WRITE(sc, RT2661_INT_SOURCE_CSR, 0xffffffff); /* enable interrupts */ RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0x0000ff10); RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0); /* kick Rx */ RAL_WRITE(sc, RT2661_RX_CNTL_CSR, 1); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; callout_reset(&sc->watchdog_ch, hz, rt2661_watchdog, sc); #undef N } static void rt2661_init(void *priv) { struct rt2661_softc *sc = priv; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; RAL_LOCK(sc); rt2661_init_locked(sc); RAL_UNLOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ieee80211_start_all(ic); /* start all vap's */ } void rt2661_stop_locked(struct rt2661_softc *sc) { struct ifnet *ifp = sc->sc_ifp; uint32_t tmp; volatile int *flags = &sc->sc_flags; while (*flags & RAL_INPUT_RUNNING) msleep(sc, &sc->sc_mtx, 0, "ralrunning", hz/10); callout_stop(&sc->watchdog_ch); sc->sc_tx_timer = 0; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); /* abort Tx (for all 5 Tx rings) */ RAL_WRITE(sc, RT2661_TX_CNTL_CSR, 0x1f << 16); /* disable Rx (value remains after reset!) */ tmp = RAL_READ(sc, RT2661_TXRX_CSR0); RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp | RT2661_DISABLE_RX); /* reset ASIC */ RAL_WRITE(sc, RT2661_MAC_CSR1, 3); RAL_WRITE(sc, RT2661_MAC_CSR1, 0); /* disable interrupts */ RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0xffffffff); RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0xffffffff); /* clear any pending interrupt */ RAL_WRITE(sc, RT2661_INT_SOURCE_CSR, 0xffffffff); RAL_WRITE(sc, RT2661_MCU_INT_SOURCE_CSR, 0xffffffff); /* reset Tx and Rx rings */ rt2661_reset_tx_ring(sc, &sc->txq[0]); rt2661_reset_tx_ring(sc, &sc->txq[1]); rt2661_reset_tx_ring(sc, &sc->txq[2]); rt2661_reset_tx_ring(sc, &sc->txq[3]); rt2661_reset_tx_ring(sc, &sc->mgtq); rt2661_reset_rx_ring(sc, &sc->rxq); } } void rt2661_stop(void *priv) { struct rt2661_softc *sc = priv; RAL_LOCK(sc); rt2661_stop_locked(sc); RAL_UNLOCK(sc); } static int rt2661_load_microcode(struct rt2661_softc *sc) { struct ifnet *ifp = sc->sc_ifp; const struct firmware *fp; const char *imagename; int ntries, error; RAL_LOCK_ASSERT(sc); switch (sc->sc_id) { case 0x0301: imagename = "rt2561sfw"; break; case 0x0302: imagename = "rt2561fw"; break; case 0x0401: imagename = "rt2661fw"; break; default: if_printf(ifp, "%s: unexpected pci device id 0x%x, " "don't know how to retrieve firmware\n", __func__, sc->sc_id); return EINVAL; } RAL_UNLOCK(sc); fp = firmware_get(imagename); RAL_LOCK(sc); if (fp == NULL) { if_printf(ifp, "%s: unable to retrieve firmware image %s\n", __func__, imagename); return EINVAL; } /* * Load 8051 microcode into NIC. */ /* reset 8051 */ RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, RT2661_MCU_RESET); /* cancel any pending Host to MCU command */ RAL_WRITE(sc, RT2661_H2M_MAILBOX_CSR, 0); RAL_WRITE(sc, RT2661_M2H_CMD_DONE_CSR, 0xffffffff); RAL_WRITE(sc, RT2661_HOST_CMD_CSR, 0); /* write 8051's microcode */ RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, RT2661_MCU_RESET | RT2661_MCU_SEL); RAL_WRITE_REGION_1(sc, RT2661_MCU_CODE_BASE, fp->data, fp->datasize); RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, RT2661_MCU_RESET); /* kick 8051's ass */ RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, 0); /* wait for 8051 to initialize */ for (ntries = 0; ntries < 500; ntries++) { if (RAL_READ(sc, RT2661_MCU_CNTL_CSR) & RT2661_MCU_READY) break; DELAY(100); } if (ntries == 500) { if_printf(ifp, "%s: timeout waiting for MCU to initialize\n", __func__); error = EIO; } else error = 0; firmware_put(fp, FIRMWARE_UNLOAD); return error; } #ifdef notyet /* * Dynamically tune Rx sensitivity (BBP register 17) based on average RSSI and * false CCA count. This function is called periodically (every seconds) when * in the RUN state. Values taken from the reference driver. */ static void rt2661_rx_tune(struct rt2661_softc *sc) { uint8_t bbp17; uint16_t cca; int lo, hi, dbm; /* * Tuning range depends on operating band and on the presence of an * external low-noise amplifier. */ lo = 0x20; if (IEEE80211_IS_CHAN_5GHZ(sc->sc_curchan)) lo += 0x08; if ((IEEE80211_IS_CHAN_2GHZ(sc->sc_curchan) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(sc->sc_curchan) && sc->ext_5ghz_lna)) lo += 0x10; hi = lo + 0x20; /* retrieve false CCA count since last call (clear on read) */ cca = RAL_READ(sc, RT2661_STA_CSR1) & 0xffff; if (dbm >= -35) { bbp17 = 0x60; } else if (dbm >= -58) { bbp17 = hi; } else if (dbm >= -66) { bbp17 = lo + 0x10; } else if (dbm >= -74) { bbp17 = lo + 0x08; } else { /* RSSI < -74dBm, tune using false CCA count */ bbp17 = sc->bbp17; /* current value */ hi -= 2 * (-74 - dbm); if (hi < lo) hi = lo; if (bbp17 > hi) { bbp17 = hi; } else if (cca > 512) { if (++bbp17 > hi) bbp17 = hi; } else if (cca < 100) { if (--bbp17 < lo) bbp17 = lo; } } if (bbp17 != sc->bbp17) { rt2661_bbp_write(sc, 17, bbp17); sc->bbp17 = bbp17; } } /* * Enter/Leave radar detection mode. * This is for 802.11h additional regulatory domains. */ static void rt2661_radar_start(struct rt2661_softc *sc) { uint32_t tmp; /* disable Rx */ tmp = RAL_READ(sc, RT2661_TXRX_CSR0); RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp | RT2661_DISABLE_RX); rt2661_bbp_write(sc, 82, 0x20); rt2661_bbp_write(sc, 83, 0x00); rt2661_bbp_write(sc, 84, 0x40); /* save current BBP registers values */ sc->bbp18 = rt2661_bbp_read(sc, 18); sc->bbp21 = rt2661_bbp_read(sc, 21); sc->bbp22 = rt2661_bbp_read(sc, 22); sc->bbp16 = rt2661_bbp_read(sc, 16); sc->bbp17 = rt2661_bbp_read(sc, 17); sc->bbp64 = rt2661_bbp_read(sc, 64); rt2661_bbp_write(sc, 18, 0xff); rt2661_bbp_write(sc, 21, 0x3f); rt2661_bbp_write(sc, 22, 0x3f); rt2661_bbp_write(sc, 16, 0xbd); rt2661_bbp_write(sc, 17, sc->ext_5ghz_lna ? 0x44 : 0x34); rt2661_bbp_write(sc, 64, 0x21); /* restore Rx filter */ RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp); } static int rt2661_radar_stop(struct rt2661_softc *sc) { uint8_t bbp66; /* read radar detection result */ bbp66 = rt2661_bbp_read(sc, 66); /* restore BBP registers values */ rt2661_bbp_write(sc, 16, sc->bbp16); rt2661_bbp_write(sc, 17, sc->bbp17); rt2661_bbp_write(sc, 18, sc->bbp18); rt2661_bbp_write(sc, 21, sc->bbp21); rt2661_bbp_write(sc, 22, sc->bbp22); rt2661_bbp_write(sc, 64, sc->bbp64); return bbp66 == 1; } #endif static int rt2661_prepare_beacon(struct rt2661_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_beacon_offsets bo; struct rt2661_tx_desc desc; struct mbuf *m0; int rate; m0 = ieee80211_beacon_alloc(vap->iv_bss, &bo); if (m0 == NULL) { device_printf(sc->sc_dev, "could not allocate beacon frame\n"); return ENOBUFS; } /* send beacons at the lowest available rate */ rate = IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan) ? 12 : 2; rt2661_setup_tx_desc(sc, &desc, RT2661_TX_TIMESTAMP, RT2661_TX_HWSEQ, m0->m_pkthdr.len, rate, NULL, 0, RT2661_QID_MGT); /* copy the first 24 bytes of Tx descriptor into NIC memory */ RAL_WRITE_REGION_1(sc, RT2661_HW_BEACON_BASE0, (uint8_t *)&desc, 24); /* copy beacon header and payload into NIC memory */ RAL_WRITE_REGION_1(sc, RT2661_HW_BEACON_BASE0 + 24, mtod(m0, uint8_t *), m0->m_pkthdr.len); m_freem(m0); return 0; } /* * Enable TSF synchronization and tell h/w to start sending beacons for IBSS * and HostAP operating modes. */ static void rt2661_enable_tsf_sync(struct rt2661_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t tmp; if (vap->iv_opmode != IEEE80211_M_STA) { /* * Change default 16ms TBTT adjustment to 8ms. * Must be done before enabling beacon generation. */ RAL_WRITE(sc, RT2661_TXRX_CSR10, 1 << 12 | 8); } tmp = RAL_READ(sc, RT2661_TXRX_CSR9) & 0xff000000; /* set beacon interval (in 1/16ms unit) */ tmp |= vap->iv_bss->ni_intval * 16; tmp |= RT2661_TSF_TICKING | RT2661_ENABLE_TBTT; if (vap->iv_opmode == IEEE80211_M_STA) tmp |= RT2661_TSF_MODE(1); else tmp |= RT2661_TSF_MODE(2) | RT2661_GENERATE_BEACON; RAL_WRITE(sc, RT2661_TXRX_CSR9, tmp); } /* * Retrieve the "Received Signal Strength Indicator" from the raw values * contained in Rx descriptors. The computation depends on which band the * frame was received. Correction values taken from the reference driver. */ static int rt2661_get_rssi(struct rt2661_softc *sc, uint8_t raw) { int lna, agc, rssi; lna = (raw >> 5) & 0x3; agc = raw & 0x1f; if (lna == 0) { /* * No mapping available. * * NB: Since RSSI is relative to noise floor, -1 is * adequate for caller to know error happened. */ return -1; } rssi = (2 * agc) - RT2661_NOISE_FLOOR; if (IEEE80211_IS_CHAN_2GHZ(sc->sc_curchan)) { rssi += sc->rssi_2ghz_corr; if (lna == 1) rssi -= 64; else if (lna == 2) rssi -= 74; else if (lna == 3) rssi -= 90; } else { rssi += sc->rssi_5ghz_corr; if (lna == 1) rssi -= 64; else if (lna == 2) rssi -= 86; else if (lna == 3) rssi -= 100; } return rssi; } static void rt2661_scan_start(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct rt2661_softc *sc = ifp->if_softc; uint32_t tmp; /* abort TSF synchronization */ tmp = RAL_READ(sc, RT2661_TXRX_CSR9); RAL_WRITE(sc, RT2661_TXRX_CSR9, tmp & ~0xffffff); rt2661_set_bssid(sc, ifp->if_broadcastaddr); } static void rt2661_scan_end(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct rt2661_softc *sc = ifp->if_softc; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); rt2661_enable_tsf_sync(sc); /* XXX keep local copy */ rt2661_set_bssid(sc, vap->iv_bss->ni_bssid); } static void rt2661_set_channel(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct rt2661_softc *sc = ifp->if_softc; RAL_LOCK(sc); rt2661_set_chan(sc, ic->ic_curchan); sc->sc_txtap.wt_chan_freq = htole16(ic->ic_curchan->ic_freq); sc->sc_txtap.wt_chan_flags = htole16(ic->ic_curchan->ic_flags); sc->sc_rxtap.wr_chan_freq = htole16(ic->ic_curchan->ic_freq); sc->sc_rxtap.wr_chan_flags = htole16(ic->ic_curchan->ic_flags); RAL_UNLOCK(sc); } diff --git a/sys/dev/usb/if_rum.c b/sys/dev/usb/if_rum.c index 3c68d49b54db..2f26991926b8 100644 --- a/sys/dev/usb/if_rum.c +++ b/sys/dev/usb/if_rum.c @@ -1,2555 +1,2557 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2005-2007 Damien Bergamini * Copyright (c) 2006 Niall O'Higgins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /*- * Ralink Technology RT2501USB/RT2601USB chipset driver * http://www.ralinktech.com.tw/ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "usbdevs.h" #include #include #include #ifdef USB_DEBUG #define DPRINTF(x) do { if (rumdebug > 0) printf x; } while (0) #define DPRINTFN(n, x) do { if (rumdebug >= (n)) printf x; } while (0) int rumdebug = 0; SYSCTL_NODE(_hw_usb, OID_AUTO, rum, CTLFLAG_RW, 0, "USB rum"); SYSCTL_INT(_hw_usb_rum, OID_AUTO, debug, CTLFLAG_RW, &rumdebug, 0, "rum debug level"); #else #define DPRINTF(x) #define DPRINTFN(n, x) #endif /* various supported device vendors/products */ static const struct usb_devno rum_devs[] = { { USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_HWU54DM }, { USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_RT2573_2 }, { USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_RT2573_3 }, { USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_RT2573_4 }, { USB_VENDOR_ABOCOM, USB_PRODUCT_ABOCOM_WUG2700 }, { USB_VENDOR_AMIT, USB_PRODUCT_AMIT_CGWLUSB2GO }, { USB_VENDOR_ASUS, USB_PRODUCT_ASUS_RT2573_1 }, { USB_VENDOR_ASUS, USB_PRODUCT_ASUS_RT2573_2 }, { USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5D7050A }, { USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5D9050V3 }, { USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_WUSB54GC }, { USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_WUSB54GR }, { USB_VENDOR_CONCEPTRONIC2, USB_PRODUCT_CONCEPTRONIC2_C54RU2 }, { USB_VENDOR_COREGA, USB_PRODUCT_COREGA_CGWLUSB2GL }, { USB_VENDOR_COREGA, USB_PRODUCT_COREGA_CGWLUSB2GPX }, { USB_VENDOR_DICKSMITH, USB_PRODUCT_DICKSMITH_CWD854F }, { USB_VENDOR_DICKSMITH, USB_PRODUCT_DICKSMITH_RT2573 }, { USB_VENDOR_DLINK2, USB_PRODUCT_DLINK2_DWLG122C1 }, { USB_VENDOR_DLINK2, USB_PRODUCT_DLINK2_WUA1340 }, { USB_VENDOR_GIGABYTE, USB_PRODUCT_GIGABYTE_GNWB01GS }, { USB_VENDOR_GIGABYTE, USB_PRODUCT_GIGABYTE_GNWI05GS }, { USB_VENDOR_GIGASET, USB_PRODUCT_GIGASET_RT2573 }, { USB_VENDOR_GOODWAY, USB_PRODUCT_GOODWAY_RT2573 }, { USB_VENDOR_GUILLEMOT, USB_PRODUCT_GUILLEMOT_HWGUSB254LB }, { USB_VENDOR_GUILLEMOT, USB_PRODUCT_GUILLEMOT_HWGUSB254V2AP }, { USB_VENDOR_HUAWEI3COM, USB_PRODUCT_HUAWEI3COM_WUB320G }, { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_G54HP }, { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_SG54HP }, { USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2573_1 }, { USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2573_2 }, { USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2573_3 }, { USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2573_4 }, { USB_VENDOR_NOVATECH, USB_PRODUCT_NOVATECH_RT2573 }, { USB_VENDOR_PLANEX2, USB_PRODUCT_PLANEX2_GWUS54HP }, { USB_VENDOR_PLANEX2, USB_PRODUCT_PLANEX2_GWUS54MINI2 }, { USB_VENDOR_PLANEX2, USB_PRODUCT_PLANEX2_GWUSMM }, { USB_VENDOR_QCOM, USB_PRODUCT_QCOM_RT2573 }, { USB_VENDOR_QCOM, USB_PRODUCT_QCOM_RT2573_2 }, { USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2573 }, { USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2573_2 }, { USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2671 }, { USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_WL113R2 }, { USB_VENDOR_SITECOMEU, USB_PRODUCT_SITECOMEU_WL172 }, { USB_VENDOR_SPARKLAN, USB_PRODUCT_SPARKLAN_RT2573 }, { USB_VENDOR_SURECOM, USB_PRODUCT_SURECOM_RT2573 } }; MODULE_DEPEND(rum, wlan, 1, 1, 1); MODULE_DEPEND(rum, wlan_amrr, 1, 1, 1); MODULE_DEPEND(rum, usb, 1, 1, 1); static struct ieee80211vap *rum_vap_create(struct ieee80211com *, const char name[IFNAMSIZ], int unit, int opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]); static void rum_vap_delete(struct ieee80211vap *); static int rum_alloc_tx_list(struct rum_softc *); static void rum_free_tx_list(struct rum_softc *); static int rum_alloc_rx_list(struct rum_softc *); static void rum_free_rx_list(struct rum_softc *); static void rum_task(void *); static void rum_scantask(void *); static int rum_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void rum_txeof(usbd_xfer_handle, usbd_private_handle, usbd_status); static void rum_rxeof(usbd_xfer_handle, usbd_private_handle, usbd_status); static void rum_setup_tx_desc(struct rum_softc *, struct rum_tx_desc *, uint32_t, uint16_t, int, int); static int rum_tx_mgt(struct rum_softc *, struct mbuf *, struct ieee80211_node *); static int rum_tx_raw(struct rum_softc *, struct mbuf *, struct ieee80211_node *, const struct ieee80211_bpf_params *); static int rum_tx_data(struct rum_softc *, struct mbuf *, struct ieee80211_node *); static void rum_start(struct ifnet *); static void rum_watchdog(void *); static int rum_ioctl(struct ifnet *, u_long, caddr_t); static void rum_eeprom_read(struct rum_softc *, uint16_t, void *, int); static uint32_t rum_read(struct rum_softc *, uint16_t); static void rum_read_multi(struct rum_softc *, uint16_t, void *, int); static void rum_write(struct rum_softc *, uint16_t, uint32_t); static void rum_write_multi(struct rum_softc *, uint16_t, void *, size_t); static void rum_bbp_write(struct rum_softc *, uint8_t, uint8_t); static uint8_t rum_bbp_read(struct rum_softc *, uint8_t); static void rum_rf_write(struct rum_softc *, uint8_t, uint32_t); static void rum_select_antenna(struct rum_softc *); static void rum_enable_mrr(struct rum_softc *); static void rum_set_txpreamble(struct rum_softc *); static void rum_set_basicrates(struct rum_softc *); static void rum_select_band(struct rum_softc *, struct ieee80211_channel *); static void rum_set_chan(struct rum_softc *, struct ieee80211_channel *); static void rum_enable_tsf_sync(struct rum_softc *); static void rum_update_slot(struct ifnet *); static void rum_set_bssid(struct rum_softc *, const uint8_t *); static void rum_set_macaddr(struct rum_softc *, const uint8_t *); static void rum_update_promisc(struct rum_softc *); static const char *rum_get_rf(int); static void rum_read_eeprom(struct rum_softc *); static int rum_bbp_init(struct rum_softc *); static void rum_init_locked(struct rum_softc *); static void rum_init(void *); static void rum_stop(void *); static int rum_load_microcode(struct rum_softc *, const u_char *, size_t); static int rum_prepare_beacon(struct rum_softc *, struct ieee80211vap *); static int rum_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); -static struct ieee80211_node *rum_node_alloc(struct ieee80211_node_table *); +static struct ieee80211_node *rum_node_alloc(struct ieee80211vap *, + const uint8_t mac[IEEE80211_ADDR_LEN]); static void rum_newassoc(struct ieee80211_node *, int); static void rum_scan_start(struct ieee80211com *); static void rum_scan_end(struct ieee80211com *); static void rum_set_channel(struct ieee80211com *); static int rum_get_rssi(struct rum_softc *, uint8_t); static void rum_amrr_start(struct rum_softc *, struct ieee80211_node *); static void rum_amrr_timeout(void *); static void rum_amrr_update(usbd_xfer_handle, usbd_private_handle, usbd_status); static const struct { uint32_t reg; uint32_t val; } rum_def_mac[] = { { RT2573_TXRX_CSR0, 0x025fb032 }, { RT2573_TXRX_CSR1, 0x9eaa9eaf }, { RT2573_TXRX_CSR2, 0x8a8b8c8d }, { RT2573_TXRX_CSR3, 0x00858687 }, { RT2573_TXRX_CSR7, 0x2e31353b }, { RT2573_TXRX_CSR8, 0x2a2a2a2c }, { RT2573_TXRX_CSR15, 0x0000000f }, { RT2573_MAC_CSR6, 0x00000fff }, { RT2573_MAC_CSR8, 0x016c030a }, { RT2573_MAC_CSR10, 0x00000718 }, { RT2573_MAC_CSR12, 0x00000004 }, { RT2573_MAC_CSR13, 0x00007f00 }, { RT2573_SEC_CSR0, 0x00000000 }, { RT2573_SEC_CSR1, 0x00000000 }, { RT2573_SEC_CSR5, 0x00000000 }, { RT2573_PHY_CSR1, 0x000023b0 }, { RT2573_PHY_CSR5, 0x00040a06 }, { RT2573_PHY_CSR6, 0x00080606 }, { RT2573_PHY_CSR7, 0x00000408 }, { RT2573_AIFSN_CSR, 0x00002273 }, { RT2573_CWMIN_CSR, 0x00002344 }, { RT2573_CWMAX_CSR, 0x000034aa } }; static const struct { uint8_t reg; uint8_t val; } rum_def_bbp[] = { { 3, 0x80 }, { 15, 0x30 }, { 17, 0x20 }, { 21, 0xc8 }, { 22, 0x38 }, { 23, 0x06 }, { 24, 0xfe }, { 25, 0x0a }, { 26, 0x0d }, { 32, 0x0b }, { 34, 0x12 }, { 37, 0x07 }, { 39, 0xf8 }, { 41, 0x60 }, { 53, 0x10 }, { 54, 0x18 }, { 60, 0x10 }, { 61, 0x04 }, { 62, 0x04 }, { 75, 0xfe }, { 86, 0xfe }, { 88, 0xfe }, { 90, 0x0f }, { 99, 0x00 }, { 102, 0x16 }, { 107, 0x04 } }; static const struct rfprog { uint8_t chan; uint32_t r1, r2, r3, r4; } rum_rf5226[] = { { 1, 0x00b03, 0x001e1, 0x1a014, 0x30282 }, { 2, 0x00b03, 0x001e1, 0x1a014, 0x30287 }, { 3, 0x00b03, 0x001e2, 0x1a014, 0x30282 }, { 4, 0x00b03, 0x001e2, 0x1a014, 0x30287 }, { 5, 0x00b03, 0x001e3, 0x1a014, 0x30282 }, { 6, 0x00b03, 0x001e3, 0x1a014, 0x30287 }, { 7, 0x00b03, 0x001e4, 0x1a014, 0x30282 }, { 8, 0x00b03, 0x001e4, 0x1a014, 0x30287 }, { 9, 0x00b03, 0x001e5, 0x1a014, 0x30282 }, { 10, 0x00b03, 0x001e5, 0x1a014, 0x30287 }, { 11, 0x00b03, 0x001e6, 0x1a014, 0x30282 }, { 12, 0x00b03, 0x001e6, 0x1a014, 0x30287 }, { 13, 0x00b03, 0x001e7, 0x1a014, 0x30282 }, { 14, 0x00b03, 0x001e8, 0x1a014, 0x30284 }, { 34, 0x00b03, 0x20266, 0x36014, 0x30282 }, { 38, 0x00b03, 0x20267, 0x36014, 0x30284 }, { 42, 0x00b03, 0x20268, 0x36014, 0x30286 }, { 46, 0x00b03, 0x20269, 0x36014, 0x30288 }, { 36, 0x00b03, 0x00266, 0x26014, 0x30288 }, { 40, 0x00b03, 0x00268, 0x26014, 0x30280 }, { 44, 0x00b03, 0x00269, 0x26014, 0x30282 }, { 48, 0x00b03, 0x0026a, 0x26014, 0x30284 }, { 52, 0x00b03, 0x0026b, 0x26014, 0x30286 }, { 56, 0x00b03, 0x0026c, 0x26014, 0x30288 }, { 60, 0x00b03, 0x0026e, 0x26014, 0x30280 }, { 64, 0x00b03, 0x0026f, 0x26014, 0x30282 }, { 100, 0x00b03, 0x0028a, 0x2e014, 0x30280 }, { 104, 0x00b03, 0x0028b, 0x2e014, 0x30282 }, { 108, 0x00b03, 0x0028c, 0x2e014, 0x30284 }, { 112, 0x00b03, 0x0028d, 0x2e014, 0x30286 }, { 116, 0x00b03, 0x0028e, 0x2e014, 0x30288 }, { 120, 0x00b03, 0x002a0, 0x2e014, 0x30280 }, { 124, 0x00b03, 0x002a1, 0x2e014, 0x30282 }, { 128, 0x00b03, 0x002a2, 0x2e014, 0x30284 }, { 132, 0x00b03, 0x002a3, 0x2e014, 0x30286 }, { 136, 0x00b03, 0x002a4, 0x2e014, 0x30288 }, { 140, 0x00b03, 0x002a6, 0x2e014, 0x30280 }, { 149, 0x00b03, 0x002a8, 0x2e014, 0x30287 }, { 153, 0x00b03, 0x002a9, 0x2e014, 0x30289 }, { 157, 0x00b03, 0x002ab, 0x2e014, 0x30281 }, { 161, 0x00b03, 0x002ac, 0x2e014, 0x30283 }, { 165, 0x00b03, 0x002ad, 0x2e014, 0x30285 } }, rum_rf5225[] = { { 1, 0x00b33, 0x011e1, 0x1a014, 0x30282 }, { 2, 0x00b33, 0x011e1, 0x1a014, 0x30287 }, { 3, 0x00b33, 0x011e2, 0x1a014, 0x30282 }, { 4, 0x00b33, 0x011e2, 0x1a014, 0x30287 }, { 5, 0x00b33, 0x011e3, 0x1a014, 0x30282 }, { 6, 0x00b33, 0x011e3, 0x1a014, 0x30287 }, { 7, 0x00b33, 0x011e4, 0x1a014, 0x30282 }, { 8, 0x00b33, 0x011e4, 0x1a014, 0x30287 }, { 9, 0x00b33, 0x011e5, 0x1a014, 0x30282 }, { 10, 0x00b33, 0x011e5, 0x1a014, 0x30287 }, { 11, 0x00b33, 0x011e6, 0x1a014, 0x30282 }, { 12, 0x00b33, 0x011e6, 0x1a014, 0x30287 }, { 13, 0x00b33, 0x011e7, 0x1a014, 0x30282 }, { 14, 0x00b33, 0x011e8, 0x1a014, 0x30284 }, { 34, 0x00b33, 0x01266, 0x26014, 0x30282 }, { 38, 0x00b33, 0x01267, 0x26014, 0x30284 }, { 42, 0x00b33, 0x01268, 0x26014, 0x30286 }, { 46, 0x00b33, 0x01269, 0x26014, 0x30288 }, { 36, 0x00b33, 0x01266, 0x26014, 0x30288 }, { 40, 0x00b33, 0x01268, 0x26014, 0x30280 }, { 44, 0x00b33, 0x01269, 0x26014, 0x30282 }, { 48, 0x00b33, 0x0126a, 0x26014, 0x30284 }, { 52, 0x00b33, 0x0126b, 0x26014, 0x30286 }, { 56, 0x00b33, 0x0126c, 0x26014, 0x30288 }, { 60, 0x00b33, 0x0126e, 0x26014, 0x30280 }, { 64, 0x00b33, 0x0126f, 0x26014, 0x30282 }, { 100, 0x00b33, 0x0128a, 0x2e014, 0x30280 }, { 104, 0x00b33, 0x0128b, 0x2e014, 0x30282 }, { 108, 0x00b33, 0x0128c, 0x2e014, 0x30284 }, { 112, 0x00b33, 0x0128d, 0x2e014, 0x30286 }, { 116, 0x00b33, 0x0128e, 0x2e014, 0x30288 }, { 120, 0x00b33, 0x012a0, 0x2e014, 0x30280 }, { 124, 0x00b33, 0x012a1, 0x2e014, 0x30282 }, { 128, 0x00b33, 0x012a2, 0x2e014, 0x30284 }, { 132, 0x00b33, 0x012a3, 0x2e014, 0x30286 }, { 136, 0x00b33, 0x012a4, 0x2e014, 0x30288 }, { 140, 0x00b33, 0x012a6, 0x2e014, 0x30280 }, { 149, 0x00b33, 0x012a8, 0x2e014, 0x30287 }, { 153, 0x00b33, 0x012a9, 0x2e014, 0x30289 }, { 157, 0x00b33, 0x012ab, 0x2e014, 0x30281 }, { 161, 0x00b33, 0x012ac, 0x2e014, 0x30283 }, { 165, 0x00b33, 0x012ad, 0x2e014, 0x30285 } }; static int rum_match(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); if (uaa->iface != NULL) return UMATCH_NONE; return (usb_lookup(rum_devs, uaa->vendor, uaa->product) != NULL) ? UMATCH_VENDOR_PRODUCT : UMATCH_NONE; } static int rum_attach(device_t self) { struct rum_softc *sc = device_get_softc(self); struct usb_attach_arg *uaa = device_get_ivars(self); struct ieee80211com *ic; struct ifnet *ifp; const uint8_t *ucode = NULL; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; usbd_status error; int i, ntries, size; uint8_t bands; uint32_t tmp; sc->sc_udev = uaa->device; sc->sc_dev = self; if (usbd_set_config_no(sc->sc_udev, RT2573_CONFIG_NO, 0) != 0) { device_printf(self, "could not set configuration no\n"); return ENXIO; } /* get the first interface handle */ error = usbd_device2interface_handle(sc->sc_udev, RT2573_IFACE_INDEX, &sc->sc_iface); if (error != 0) { device_printf(self, "could not get interface handle\n"); return ENXIO; } /* * Find endpoints. */ id = usbd_get_interface_descriptor(sc->sc_iface); sc->sc_rx_no = sc->sc_tx_no = -1; for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(sc->sc_iface, i); if (ed == NULL) { device_printf(self, "no endpoint descriptor for iface %d\n", i); return ENXIO; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) sc->sc_rx_no = ed->bEndpointAddress; else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) sc->sc_tx_no = ed->bEndpointAddress; } if (sc->sc_rx_no == -1 || sc->sc_tx_no == -1) { device_printf(self, "missing endpoint\n"); return ENXIO; } ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(self, "can not if_alloc()\n"); return ENXIO; } ic = ifp->if_l2com; mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); usb_init_task(&sc->sc_task, rum_task, sc); usb_init_task(&sc->sc_scantask, rum_scantask, sc); callout_init(&sc->watchdog_ch, 0); /* retrieve RT2573 rev. no */ for (ntries = 0; ntries < 1000; ntries++) { if ((tmp = rum_read(sc, RT2573_MAC_CSR0)) != 0) break; DELAY(1000); } if (ntries == 1000) { device_printf(self, "timeout waiting for chip to settle\n"); goto bad; } /* retrieve MAC address and various other things from EEPROM */ rum_read_eeprom(sc); device_printf(self, "MAC/BBP RT2573 (rev 0x%05x), RF %s\n", tmp, rum_get_rf(sc->rf_rev)); ucode = rt2573_ucode; size = sizeof rt2573_ucode; error = rum_load_microcode(sc, ucode, size); if (error != 0) { device_printf(self, "could not load 8051 microcode\n"); goto bad; } ifp->if_softc = sc; if_initname(ifp, "rum", device_get_unit(sc->sc_dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT; /* USB stack is still under Giant lock */ ifp->if_init = rum_init; ifp->if_ioctl = rum_ioctl; ifp->if_start = rum_start; IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); ic->ic_ifp = ifp; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_IBSS /* IBSS mode supported */ | IEEE80211_C_MONITOR /* monitor mode supported */ | IEEE80211_C_HOSTAP /* HostAp mode supported */ | IEEE80211_C_TXPMGT /* tx power management */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_BGSCAN /* bg scanning supported */ | IEEE80211_C_WPA /* 802.11i */ ; bands = 0; setbit(&bands, IEEE80211_MODE_11B); setbit(&bands, IEEE80211_MODE_11G); if (sc->rf_rev == RT2573_RF_5225 || sc->rf_rev == RT2573_RF_5226) setbit(&bands, IEEE80211_MODE_11A); ieee80211_init_channels(ic, NULL, &bands); ieee80211_ifattach(ic); ic->ic_newassoc = rum_newassoc; ic->ic_raw_xmit = rum_raw_xmit; ic->ic_node_alloc = rum_node_alloc; ic->ic_scan_start = rum_scan_start; ic->ic_scan_end = rum_scan_end; ic->ic_set_channel = rum_set_channel; ic->ic_vap_create = rum_vap_create; ic->ic_vap_delete = rum_vap_delete; sc->sc_rates = ieee80211_get_ratetable(ic->ic_curchan); bpfattach(ifp, DLT_IEEE802_11_RADIO, sizeof (struct ieee80211_frame) + sizeof(sc->sc_txtap)); sc->sc_rxtap_len = sizeof sc->sc_rxtap; sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); sc->sc_rxtap.wr_ihdr.it_present = htole32(RT2573_RX_RADIOTAP_PRESENT); sc->sc_txtap_len = sizeof sc->sc_txtap; sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); sc->sc_txtap.wt_ihdr.it_present = htole32(RT2573_TX_RADIOTAP_PRESENT); if (bootverbose) ieee80211_announce(ic); return 0; bad: mtx_destroy(&sc->sc_mtx); if_free(ifp); return ENXIO; } static int rum_detach(device_t self) { struct rum_softc *sc = device_get_softc(self); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; rum_stop(sc); bpfdetach(ifp); ieee80211_ifdetach(ic); usb_rem_task(sc->sc_udev, &sc->sc_task); usb_rem_task(sc->sc_udev, &sc->sc_scantask); callout_stop(&sc->watchdog_ch); if (sc->amrr_xfer != NULL) { usbd_free_xfer(sc->amrr_xfer); sc->amrr_xfer = NULL; } if (sc->sc_rx_pipeh != NULL) { usbd_abort_pipe(sc->sc_rx_pipeh); usbd_close_pipe(sc->sc_rx_pipeh); } if (sc->sc_tx_pipeh != NULL) { usbd_abort_pipe(sc->sc_tx_pipeh); usbd_close_pipe(sc->sc_tx_pipeh); } rum_free_rx_list(sc); rum_free_tx_list(sc); if_free(ifp); mtx_destroy(&sc->sc_mtx); return 0; } static struct ieee80211vap * rum_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, int opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct rum_vap *rvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; rvp = (struct rum_vap *) malloc(sizeof(struct rum_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (rvp == NULL) return NULL; vap = &rvp->vap; /* enable s/w bmiss handling for sta mode */ ieee80211_vap_setup(ic, vap, name, unit, opmode, flags | IEEE80211_CLONE_NOBEACONS, bssid, mac); /* override state transition machine */ rvp->newstate = vap->iv_newstate; vap->iv_newstate = rum_newstate; callout_init(&rvp->amrr_ch, 0); ieee80211_amrr_init(&rvp->amrr, vap, IEEE80211_AMRR_MIN_SUCCESS_THRESHOLD, IEEE80211_AMRR_MAX_SUCCESS_THRESHOLD, 1000 /* 1 sec */); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status); ic->ic_opmode = opmode; return vap; } static void rum_vap_delete(struct ieee80211vap *vap) { struct rum_vap *rvp = RUM_VAP(vap); callout_stop(&rvp->amrr_ch); ieee80211_amrr_cleanup(&rvp->amrr); ieee80211_vap_detach(vap); free(rvp, M_80211_VAP); } static int rum_alloc_tx_list(struct rum_softc *sc) { struct rum_tx_data *data; int i, error; sc->tx_queued = sc->tx_cur = 0; for (i = 0; i < RUM_TX_LIST_COUNT; i++) { data = &sc->tx_data[i]; data->sc = sc; data->xfer = usbd_alloc_xfer(sc->sc_udev); if (data->xfer == NULL) { device_printf(sc->sc_dev, "could not allocate tx xfer\n"); error = ENOMEM; goto fail; } data->buf = usbd_alloc_buffer(data->xfer, RT2573_TX_DESC_SIZE + MCLBYTES); if (data->buf == NULL) { device_printf(sc->sc_dev, "could not allocate tx buffer\n"); error = ENOMEM; goto fail; } /* clean Tx descriptor */ bzero(data->buf, RT2573_TX_DESC_SIZE); } return 0; fail: rum_free_tx_list(sc); return error; } static void rum_free_tx_list(struct rum_softc *sc) { struct rum_tx_data *data; int i; for (i = 0; i < RUM_TX_LIST_COUNT; i++) { data = &sc->tx_data[i]; if (data->xfer != NULL) { usbd_free_xfer(data->xfer); data->xfer = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } } } static int rum_alloc_rx_list(struct rum_softc *sc) { struct rum_rx_data *data; int i, error; for (i = 0; i < RUM_RX_LIST_COUNT; i++) { data = &sc->rx_data[i]; data->sc = sc; data->xfer = usbd_alloc_xfer(sc->sc_udev); if (data->xfer == NULL) { device_printf(sc->sc_dev, "could not allocate rx xfer\n"); error = ENOMEM; goto fail; } if (usbd_alloc_buffer(data->xfer, MCLBYTES) == NULL) { device_printf(sc->sc_dev, "could not allocate rx buffer\n"); error = ENOMEM; goto fail; } data->m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (data->m == NULL) { device_printf(sc->sc_dev, "could not allocate rx mbuf\n"); error = ENOMEM; goto fail; } data->buf = mtod(data->m, uint8_t *); } return 0; fail: rum_free_tx_list(sc); return error; } static void rum_free_rx_list(struct rum_softc *sc) { struct rum_rx_data *data; int i; for (i = 0; i < RUM_RX_LIST_COUNT; i++) { data = &sc->rx_data[i]; if (data->xfer != NULL) { usbd_free_xfer(data->xfer); data->xfer = NULL; } if (data->m != NULL) { m_freem(data->m); data->m = NULL; } } } static void rum_task(void *arg) { struct rum_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct rum_vap *rvp = RUM_VAP(vap); const struct ieee80211_txparam *tp; enum ieee80211_state ostate; struct ieee80211_node *ni; uint32_t tmp; ostate = vap->iv_state; RUM_LOCK(sc); switch (sc->sc_state) { case IEEE80211_S_INIT: if (ostate == IEEE80211_S_RUN) { /* abort TSF synchronization */ tmp = rum_read(sc, RT2573_TXRX_CSR9); rum_write(sc, RT2573_TXRX_CSR9, tmp & ~0x00ffffff); } break; case IEEE80211_S_RUN: ni = vap->iv_bss; if (vap->iv_opmode != IEEE80211_M_MONITOR) { rum_update_slot(ic->ic_ifp); rum_enable_mrr(sc); rum_set_txpreamble(sc); rum_set_basicrates(sc); rum_set_bssid(sc, ni->ni_bssid); } if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS) rum_prepare_beacon(sc, vap); if (vap->iv_opmode != IEEE80211_M_MONITOR) rum_enable_tsf_sync(sc); /* enable automatic rate adaptation */ tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_bsschan)]; if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) rum_amrr_start(sc, ni); break; default: break; } RUM_UNLOCK(sc); IEEE80211_LOCK(ic); rvp->newstate(vap, sc->sc_state, sc->sc_arg); if (vap->iv_newstate_cb != NULL) vap->iv_newstate_cb(vap, sc->sc_state, sc->sc_arg); IEEE80211_UNLOCK(ic); } static int rum_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct rum_vap *rvp = RUM_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct rum_softc *sc = ic->ic_ifp->if_softc; usb_rem_task(sc->sc_udev, &sc->sc_task); usb_rem_task(sc->sc_udev, &sc->sc_scantask); callout_stop(&rvp->amrr_ch); /* do it in a process context */ sc->sc_state = nstate; sc->sc_arg = arg; if (nstate == IEEE80211_S_INIT) { rvp->newstate(vap, nstate, arg); return 0; } else { usb_add_task(sc->sc_udev, &sc->sc_task, USB_TASKQ_DRIVER); return EINPROGRESS; } } static void rum_txeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct rum_tx_data *data = priv; struct rum_softc *sc = data->sc; struct ifnet *ifp = sc->sc_ifp; if (data->m != NULL && data->m->m_flags & M_TXCB) ieee80211_process_callback(data->ni, data->m, 0/*XXX*/); if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) return; device_printf(sc->sc_dev, "could not transmit buffer: %s\n", usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall_async(sc->sc_tx_pipeh); ifp->if_oerrors++; return; } m_freem(data->m); data->m = NULL; ieee80211_free_node(data->ni); data->ni = NULL; sc->tx_queued--; ifp->if_opackets++; DPRINTFN(10, ("tx done\n")); sc->sc_tx_timer = 0; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; rum_start(ifp); } static void rum_rxeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct rum_rx_data *data = priv; struct rum_softc *sc = data->sc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct rum_rx_desc *desc; struct ieee80211_node *ni; struct mbuf *mnew, *m; int len, rssi; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) return; if (status == USBD_STALLED) usbd_clear_endpoint_stall_async(sc->sc_rx_pipeh); goto skip; } usbd_get_xfer_status(xfer, NULL, NULL, &len, NULL); if (len < RT2573_RX_DESC_SIZE + sizeof (struct ieee80211_frame_min)) { DPRINTF(("%s: xfer too short %d\n", device_get_nameunit(sc->sc_dev), len)); ifp->if_ierrors++; goto skip; } desc = (struct rum_rx_desc *)data->buf; if (le32toh(desc->flags) & RT2573_RX_CRC_ERROR) { /* * This should not happen since we did not request to receive * those frames when we filled RT2573_TXRX_CSR0. */ DPRINTFN(5, ("CRC error\n")); ifp->if_ierrors++; goto skip; } mnew = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (mnew == NULL) { ifp->if_ierrors++; goto skip; } m = data->m; data->m = mnew; data->buf = mtod(data->m, uint8_t *); /* finalize mbuf */ m->m_pkthdr.rcvif = ifp; m->m_data = (caddr_t)(desc + 1); m->m_pkthdr.len = m->m_len = (le32toh(desc->flags) >> 16) & 0xfff; rssi = rum_get_rssi(sc, desc->rssi); if (bpf_peers_present(ifp->if_bpf)) { struct rum_rx_radiotap_header *tap = &sc->sc_rxtap; tap->wr_flags = IEEE80211_RADIOTAP_F_FCS; tap->wr_rate = ieee80211_plcp2rate(desc->rate, (desc->flags & htole32(RT2573_RX_OFDM)) ? IEEE80211_T_OFDM : IEEE80211_T_CCK); tap->wr_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags); tap->wr_antenna = sc->rx_ant; tap->wr_antsignal = rssi; bpf_mtap2(ifp->if_bpf, tap, sc->sc_rxtap_len, m); } ni = ieee80211_find_rxnode(ic, mtod(m, struct ieee80211_frame_min *)); if (ni != NULL) { /* Error happened during RSSI conversion. */ if (rssi < 0) rssi = -30; /* XXX ignored by net80211 */ (void) ieee80211_input(ni, m, rssi, RT2573_NOISE_FLOOR, 0); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, RT2573_NOISE_FLOOR, 0); DPRINTFN(15, ("rx done\n")); skip: /* setup a new transfer */ usbd_setup_xfer(xfer, sc->sc_rx_pipeh, data, data->buf, MCLBYTES, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, rum_rxeof); usbd_transfer(xfer); } static uint8_t rum_plcp_signal(int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return 0xb; case 18: return 0xf; case 24: return 0xa; case 36: return 0xe; case 48: return 0x9; case 72: return 0xd; case 96: return 0x8; case 108: return 0xc; /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return 0x0; case 4: return 0x1; case 11: return 0x2; case 22: return 0x3; } return 0xff; /* XXX unsupported/unknown rate */ } static void rum_setup_tx_desc(struct rum_softc *sc, struct rum_tx_desc *desc, uint32_t flags, uint16_t xflags, int len, int rate) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint16_t plcp_length; int remainder; desc->flags = htole32(flags); desc->flags |= htole32(RT2573_TX_VALID); desc->flags |= htole32(len << 16); desc->xflags = htole16(xflags); desc->wme = htole16(RT2573_QID(0) | RT2573_AIFSN(2) | RT2573_LOGCWMIN(4) | RT2573_LOGCWMAX(10)); /* setup PLCP fields */ desc->plcp_signal = rum_plcp_signal(rate); desc->plcp_service = 4; len += IEEE80211_CRC_LEN; if (ieee80211_rate2phytype(sc->sc_rates, rate) == IEEE80211_T_OFDM) { desc->flags |= htole32(RT2573_TX_OFDM); plcp_length = len & 0xfff; desc->plcp_length_hi = plcp_length >> 6; desc->plcp_length_lo = plcp_length & 0x3f; } else { plcp_length = (16 * len + rate - 1) / rate; if (rate == 22) { remainder = (16 * len) % 22; if (remainder != 0 && remainder < 7) desc->plcp_service |= RT2573_PLCP_LENGEXT; } desc->plcp_length_hi = plcp_length >> 8; desc->plcp_length_lo = plcp_length & 0xff; if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) desc->plcp_signal |= 0x08; } } #define RUM_TX_TIMEOUT 5000 static int rum_sendprot(struct rum_softc *sc, const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate) { struct ieee80211com *ic = ni->ni_ic; const struct ieee80211_frame *wh; struct rum_tx_desc *desc; struct rum_tx_data *data; struct mbuf *mprot; int protrate, ackrate, pktlen, flags, isshort; uint16_t dur; usbd_status error; KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY, ("protection %d", prot)); wh = mtod(m, const struct ieee80211_frame *); pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN; protrate = ieee80211_ctl_rate(sc->sc_rates, rate); ackrate = ieee80211_ack_rate(sc->sc_rates, rate); isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; dur = ieee80211_compute_duration(sc->sc_rates, pktlen, rate, isshort); + ieee80211_ack_duration(sc->sc_rates, rate, isshort); flags = RT2573_TX_MORE_FRAG; if (prot == IEEE80211_PROT_RTSCTS) { /* NB: CTS is the same size as an ACK */ dur += ieee80211_ack_duration(sc->sc_rates, rate, isshort); flags |= RT2573_TX_NEED_ACK; mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur); } else { mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur); } if (mprot == NULL) { /* XXX stat + msg */ return ENOBUFS; } data = &sc->tx_data[sc->tx_cur]; desc = (struct rum_tx_desc *)data->buf; data->m = mprot; data->ni = ieee80211_ref_node(ni); m_copydata(mprot, 0, mprot->m_pkthdr.len, data->buf + RT2573_TX_DESC_SIZE); rum_setup_tx_desc(sc, desc, flags, 0, mprot->m_pkthdr.len, protrate); usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf, /* NB: no roundup necessary */ RT2573_TX_DESC_SIZE + mprot->m_pkthdr.len, USBD_FORCE_SHORT_XFER | USBD_NO_COPY, RUM_TX_TIMEOUT, rum_txeof); error = usbd_transfer(data->xfer); if (error != USBD_NORMAL_COMPLETION && error != USBD_IN_PROGRESS) { data->m = NULL; data->ni = NULL; return error; } sc->tx_queued++; sc->tx_cur = (sc->tx_cur + 1) % RUM_TX_LIST_COUNT; return 0; } static int rum_tx_mgt(struct rum_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct rum_tx_desc *desc; struct rum_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp; struct ieee80211_key *k; uint32_t flags = 0; uint16_t dur; usbd_status error; int xferlen; data = &sc->tx_data[sc->tx_cur]; data->m = m0; data->ni = ni; desc = (struct rum_tx_desc *)data->buf; wh = mtod(m0, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_WEP) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } wh = mtod(m0, struct ieee80211_frame *); } tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2573_TX_NEED_ACK; dur = ieee80211_ack_duration(sc->sc_rates, tp->mgmtrate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); *(uint16_t *)wh->i_dur = htole16(dur); /* tell hardware to add timestamp for probe responses */ if ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP)) flags |= RT2573_TX_TIMESTAMP; } if (bpf_peers_present(ifp->if_bpf)) { struct rum_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = tp->mgmtrate; tap->wt_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wt_chan_flags = htole16(ic->ic_curchan->ic_flags); tap->wt_antenna = sc->tx_ant; bpf_mtap2(ifp->if_bpf, tap, sc->sc_txtap_len, m0); } m_copydata(m0, 0, m0->m_pkthdr.len, data->buf + RT2573_TX_DESC_SIZE); rum_setup_tx_desc(sc, desc, flags, 0, m0->m_pkthdr.len, tp->mgmtrate); /* align end on a 4-bytes boundary */ xferlen = (RT2573_TX_DESC_SIZE + m0->m_pkthdr.len + 3) & ~3; /* * No space left in the last URB to store the extra 4 bytes, force * sending of another URB. */ if ((xferlen % 64) == 0) xferlen += 4; DPRINTFN(10, ("sending mgt frame len=%d rate=%d xfer len=%d\n", m0->m_pkthdr.len + (int)RT2573_TX_DESC_SIZE, tp->mgmtrate, xferlen)); usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf, xferlen, USBD_FORCE_SHORT_XFER | USBD_NO_COPY, RUM_TX_TIMEOUT, rum_txeof); error = usbd_transfer(data->xfer); if (error != USBD_NORMAL_COMPLETION && error != USBD_IN_PROGRESS) { m_freem(m0); data->m = NULL; data->ni = NULL; return error; } sc->tx_queued++; sc->tx_cur = (sc->tx_cur + 1) % RUM_TX_LIST_COUNT; return 0; } static int rum_tx_raw(struct rum_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct rum_tx_desc *desc; struct rum_tx_data *data; uint32_t flags; usbd_status error; int xferlen, rate; KASSERT(params != NULL, ("no raw xmit params")); data = &sc->tx_data[sc->tx_cur]; desc = (struct rum_tx_desc *)data->buf; rate = params->ibp_rate0 & IEEE80211_RATE_VAL; /* XXX validate */ if (rate == 0) { m_freem(m0); return EINVAL; } flags = 0; if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) flags |= RT2573_TX_NEED_ACK; if (params->ibp_flags & (IEEE80211_BPF_RTS|IEEE80211_BPF_CTS)) { error = rum_sendprot(sc, m0, ni, params->ibp_flags & IEEE80211_BPF_RTS ? IEEE80211_PROT_RTSCTS : IEEE80211_PROT_CTSONLY, rate); if (error) { m_freem(m0); return error; } flags |= RT2573_TX_LONG_RETRY | RT2573_TX_IFS_SIFS; } if (bpf_peers_present(ifp->if_bpf)) { struct rum_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wt_chan_flags = htole16(ic->ic_curchan->ic_flags); tap->wt_antenna = sc->tx_ant; bpf_mtap2(ifp->if_bpf, tap, sc->sc_txtap_len, m0); } data->m = m0; data->ni = ni; m_copydata(m0, 0, m0->m_pkthdr.len, data->buf + RT2573_TX_DESC_SIZE); /* XXX need to setup descriptor ourself */ rum_setup_tx_desc(sc, desc, flags, 0, m0->m_pkthdr.len, rate); /* align end on a 4-bytes boundary */ xferlen = (RT2573_TX_DESC_SIZE + m0->m_pkthdr.len + 3) & ~3; /* * No space left in the last URB to store the extra 4 bytes, force * sending of another URB. */ if ((xferlen % 64) == 0) xferlen += 4; DPRINTFN(10, ("sending raw frame len=%u rate=%u xfer len=%u\n", m0->m_pkthdr.len, rate, xferlen)); usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf, xferlen, USBD_FORCE_SHORT_XFER | USBD_NO_COPY, RUM_TX_TIMEOUT, rum_txeof); error = usbd_transfer(data->xfer); if (error != USBD_NORMAL_COMPLETION && error != USBD_IN_PROGRESS) return error; sc->tx_queued++; sc->tx_cur = (sc->tx_cur + 1) % RUM_TX_LIST_COUNT; return 0; } static int rum_tx_data(struct rum_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct rum_tx_desc *desc; struct rum_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp; struct ieee80211_key *k; uint32_t flags = 0; uint16_t dur; usbd_status error; int rate, xferlen; wh = mtod(m0, struct ieee80211_frame *); tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; if (IEEE80211_IS_MULTICAST(wh->i_addr1)) rate = tp->mcastrate; else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = tp->ucastrate; else { (void) ieee80211_amrr_choose(ni, &RUM_NODE(ni)->amn); rate = ni->ni_txrate; } if (wh->i_fc[1] & IEEE80211_FC1_WEP) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { int prot = IEEE80211_PROT_NONE; if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) prot = IEEE80211_PROT_RTSCTS; else if ((ic->ic_flags & IEEE80211_F_USEPROT) && ieee80211_rate2phytype(sc->sc_rates, rate) == IEEE80211_T_OFDM) prot = ic->ic_protmode; if (prot != IEEE80211_PROT_NONE) { error = rum_sendprot(sc, m0, ni, prot, rate); if (error) { m_freem(m0); return error; } flags |= RT2573_TX_LONG_RETRY | RT2573_TX_IFS_SIFS; } } data = &sc->tx_data[sc->tx_cur]; desc = (struct rum_tx_desc *)data->buf; data->m = m0; data->ni = ni; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2573_TX_NEED_ACK; flags |= RT2573_TX_MORE_FRAG; dur = ieee80211_ack_duration(sc->sc_rates, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); *(uint16_t *)wh->i_dur = htole16(dur); } if (bpf_peers_present(ifp->if_bpf)) { struct rum_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wt_chan_flags = htole16(ic->ic_curchan->ic_flags); tap->wt_antenna = sc->tx_ant; bpf_mtap2(ifp->if_bpf, tap, sc->sc_txtap_len, m0); } m_copydata(m0, 0, m0->m_pkthdr.len, data->buf + RT2573_TX_DESC_SIZE); rum_setup_tx_desc(sc, desc, flags, 0, m0->m_pkthdr.len, rate); /* align end on a 4-bytes boundary */ xferlen = (RT2573_TX_DESC_SIZE + m0->m_pkthdr.len + 3) & ~3; /* * No space left in the last URB to store the extra 4 bytes, force * sending of another URB. */ if ((xferlen % 64) == 0) xferlen += 4; DPRINTFN(10, ("sending frame len=%d rate=%d xfer len=%d\n", m0->m_pkthdr.len + (int)RT2573_TX_DESC_SIZE, rate, xferlen)); usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf, xferlen, USBD_FORCE_SHORT_XFER | USBD_NO_COPY, RUM_TX_TIMEOUT, rum_txeof); error = usbd_transfer(data->xfer); if (error != USBD_NORMAL_COMPLETION && error != USBD_IN_PROGRESS) { m_freem(m0); data->m = NULL; data->ni = NULL; return error; } sc->tx_queued++; sc->tx_cur = (sc->tx_cur + 1) % RUM_TX_LIST_COUNT; return 0; } static void rum_start(struct ifnet *ifp) { struct rum_softc *sc = ifp->if_softc; struct ieee80211_node *ni; struct mbuf *m; for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; if (sc->tx_queued >= RUM_TX_LIST_COUNT-1) { IFQ_DRV_PREPEND(&ifp->if_snd, m); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; m = ieee80211_encap(ni, m); if (m == NULL) { ieee80211_free_node(ni); continue; } if (rum_tx_data(sc, m, ni) != 0) { ieee80211_free_node(ni); ifp->if_oerrors++; break; } sc->sc_tx_timer = 5; callout_reset(&sc->watchdog_ch, hz, rum_watchdog, sc); } } static void rum_watchdog(void *arg) { struct rum_softc *sc = arg; RUM_LOCK(sc); if (sc->sc_tx_timer > 0) { if (--sc->sc_tx_timer == 0) { device_printf(sc->sc_dev, "device timeout\n"); /*rum_init(ifp); XXX needs a process context! */ sc->sc_ifp->if_oerrors++; RUM_UNLOCK(sc); return; } callout_reset(&sc->watchdog_ch, hz, rum_watchdog, sc); } RUM_UNLOCK(sc); } static int rum_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct rum_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; int error = 0, startall = 0; switch (cmd) { case SIOCSIFFLAGS: RUM_LOCK(sc); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { rum_init(sc); startall = 1; } else rum_update_promisc(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) rum_stop(sc); } RUM_UNLOCK(sc); if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return error; } static void rum_eeprom_read(struct rum_softc *sc, uint16_t addr, void *buf, int len) { usb_device_request_t req; usbd_status error; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RT2573_READ_EEPROM; USETW(req.wValue, 0); USETW(req.wIndex, addr); USETW(req.wLength, len); error = usbd_do_request(sc->sc_udev, &req, buf); if (error != 0) { device_printf(sc->sc_dev, "could not read EEPROM: %s\n", usbd_errstr(error)); } } static uint32_t rum_read(struct rum_softc *sc, uint16_t reg) { uint32_t val; rum_read_multi(sc, reg, &val, sizeof val); return le32toh(val); } static void rum_read_multi(struct rum_softc *sc, uint16_t reg, void *buf, int len) { usb_device_request_t req; usbd_status error; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RT2573_READ_MULTI_MAC; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, len); error = usbd_do_request(sc->sc_udev, &req, buf); if (error != 0) { device_printf(sc->sc_dev, "could not multi read MAC register: %s\n", usbd_errstr(error)); } } static void rum_write(struct rum_softc *sc, uint16_t reg, uint32_t val) { uint32_t tmp = htole32(val); rum_write_multi(sc, reg, &tmp, sizeof tmp); } static void rum_write_multi(struct rum_softc *sc, uint16_t reg, void *buf, size_t len) { usb_device_request_t req; usbd_status error; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RT2573_WRITE_MULTI_MAC; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, len); error = usbd_do_request(sc->sc_udev, &req, buf); if (error != 0) { device_printf(sc->sc_dev, "could not multi write MAC register: %s\n", usbd_errstr(error)); } } static void rum_bbp_write(struct rum_softc *sc, uint8_t reg, uint8_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 5; ntries++) { if (!(rum_read(sc, RT2573_PHY_CSR3) & RT2573_BBP_BUSY)) break; } if (ntries == 5) { device_printf(sc->sc_dev, "could not write to BBP\n"); return; } tmp = RT2573_BBP_BUSY | (reg & 0x7f) << 8 | val; rum_write(sc, RT2573_PHY_CSR3, tmp); } static uint8_t rum_bbp_read(struct rum_softc *sc, uint8_t reg) { uint32_t val; int ntries; for (ntries = 0; ntries < 5; ntries++) { if (!(rum_read(sc, RT2573_PHY_CSR3) & RT2573_BBP_BUSY)) break; } if (ntries == 5) { device_printf(sc->sc_dev, "could not read BBP\n"); return 0; } val = RT2573_BBP_BUSY | RT2573_BBP_READ | reg << 8; rum_write(sc, RT2573_PHY_CSR3, val); for (ntries = 0; ntries < 100; ntries++) { val = rum_read(sc, RT2573_PHY_CSR3); if (!(val & RT2573_BBP_BUSY)) return val & 0xff; DELAY(1); } device_printf(sc->sc_dev, "could not read BBP\n"); return 0; } static void rum_rf_write(struct rum_softc *sc, uint8_t reg, uint32_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 5; ntries++) { if (!(rum_read(sc, RT2573_PHY_CSR4) & RT2573_RF_BUSY)) break; } if (ntries == 5) { device_printf(sc->sc_dev, "could not write to RF\n"); return; } tmp = RT2573_RF_BUSY | RT2573_RF_20BIT | (val & 0xfffff) << 2 | (reg & 3); rum_write(sc, RT2573_PHY_CSR4, tmp); /* remember last written value in sc */ sc->rf_regs[reg] = val; DPRINTFN(15, ("RF R[%u] <- 0x%05x\n", reg & 3, val & 0xfffff)); } static void rum_select_antenna(struct rum_softc *sc) { uint8_t bbp4, bbp77; uint32_t tmp; bbp4 = rum_bbp_read(sc, 4); bbp77 = rum_bbp_read(sc, 77); /* TBD */ /* make sure Rx is disabled before switching antenna */ tmp = rum_read(sc, RT2573_TXRX_CSR0); rum_write(sc, RT2573_TXRX_CSR0, tmp | RT2573_DISABLE_RX); rum_bbp_write(sc, 4, bbp4); rum_bbp_write(sc, 77, bbp77); rum_write(sc, RT2573_TXRX_CSR0, tmp); } /* * Enable multi-rate retries for frames sent at OFDM rates. * In 802.11b/g mode, allow fallback to CCK rates. */ static void rum_enable_mrr(struct rum_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t tmp; tmp = rum_read(sc, RT2573_TXRX_CSR4); tmp &= ~RT2573_MRR_CCK_FALLBACK; if (!IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan)) tmp |= RT2573_MRR_CCK_FALLBACK; tmp |= RT2573_MRR_ENABLED; rum_write(sc, RT2573_TXRX_CSR4, tmp); } static void rum_set_txpreamble(struct rum_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t tmp; tmp = rum_read(sc, RT2573_TXRX_CSR4); tmp &= ~RT2573_SHORT_PREAMBLE; if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) tmp |= RT2573_SHORT_PREAMBLE; rum_write(sc, RT2573_TXRX_CSR4, tmp); } static void rum_set_basicrates(struct rum_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; /* update basic rate set */ if (ic->ic_curmode == IEEE80211_MODE_11B) { /* 11b basic rates: 1, 2Mbps */ rum_write(sc, RT2573_TXRX_CSR5, 0x3); } else if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan)) { /* 11a basic rates: 6, 12, 24Mbps */ rum_write(sc, RT2573_TXRX_CSR5, 0x150); } else { /* 11b/g basic rates: 1, 2, 5.5, 11Mbps */ rum_write(sc, RT2573_TXRX_CSR5, 0xf); } } /* * Reprogram MAC/BBP to switch to a new band. Values taken from the reference * driver. */ static void rum_select_band(struct rum_softc *sc, struct ieee80211_channel *c) { uint8_t bbp17, bbp35, bbp96, bbp97, bbp98, bbp104; uint32_t tmp; /* update all BBP registers that depend on the band */ bbp17 = 0x20; bbp96 = 0x48; bbp104 = 0x2c; bbp35 = 0x50; bbp97 = 0x48; bbp98 = 0x48; if (IEEE80211_IS_CHAN_5GHZ(c)) { bbp17 += 0x08; bbp96 += 0x10; bbp104 += 0x0c; bbp35 += 0x10; bbp97 += 0x10; bbp98 += 0x10; } if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) { bbp17 += 0x10; bbp96 += 0x10; bbp104 += 0x10; } sc->bbp17 = bbp17; rum_bbp_write(sc, 17, bbp17); rum_bbp_write(sc, 96, bbp96); rum_bbp_write(sc, 104, bbp104); if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) { rum_bbp_write(sc, 75, 0x80); rum_bbp_write(sc, 86, 0x80); rum_bbp_write(sc, 88, 0x80); } rum_bbp_write(sc, 35, bbp35); rum_bbp_write(sc, 97, bbp97); rum_bbp_write(sc, 98, bbp98); tmp = rum_read(sc, RT2573_PHY_CSR0); tmp &= ~(RT2573_PA_PE_2GHZ | RT2573_PA_PE_5GHZ); if (IEEE80211_IS_CHAN_2GHZ(c)) tmp |= RT2573_PA_PE_2GHZ; else tmp |= RT2573_PA_PE_5GHZ; rum_write(sc, RT2573_PHY_CSR0, tmp); } static void rum_set_chan(struct rum_softc *sc, struct ieee80211_channel *c) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; const struct rfprog *rfprog; uint8_t bbp3, bbp94 = RT2573_BBPR94_DEFAULT; int8_t power; u_int i, chan; chan = ieee80211_chan2ieee(ic, c); if (chan == 0 || chan == IEEE80211_CHAN_ANY) return; /* select the appropriate RF settings based on what EEPROM says */ rfprog = (sc->rf_rev == RT2573_RF_5225 || sc->rf_rev == RT2573_RF_2527) ? rum_rf5225 : rum_rf5226; /* find the settings for this channel (we know it exists) */ for (i = 0; rfprog[i].chan != chan; i++); power = sc->txpow[i]; if (power < 0) { bbp94 += power; power = 0; } else if (power > 31) { bbp94 += power - 31; power = 31; } /* * If we are switching from the 2GHz band to the 5GHz band or * vice-versa, BBP registers need to be reprogrammed. */ if (c->ic_flags != ic->ic_curchan->ic_flags) { rum_select_band(sc, c); rum_select_antenna(sc); } ic->ic_curchan = c; rum_rf_write(sc, RT2573_RF1, rfprog[i].r1); rum_rf_write(sc, RT2573_RF2, rfprog[i].r2); rum_rf_write(sc, RT2573_RF3, rfprog[i].r3 | power << 7); rum_rf_write(sc, RT2573_RF4, rfprog[i].r4 | sc->rffreq << 10); rum_rf_write(sc, RT2573_RF1, rfprog[i].r1); rum_rf_write(sc, RT2573_RF2, rfprog[i].r2); rum_rf_write(sc, RT2573_RF3, rfprog[i].r3 | power << 7 | 1); rum_rf_write(sc, RT2573_RF4, rfprog[i].r4 | sc->rffreq << 10); rum_rf_write(sc, RT2573_RF1, rfprog[i].r1); rum_rf_write(sc, RT2573_RF2, rfprog[i].r2); rum_rf_write(sc, RT2573_RF3, rfprog[i].r3 | power << 7); rum_rf_write(sc, RT2573_RF4, rfprog[i].r4 | sc->rffreq << 10); DELAY(10); /* enable smart mode for MIMO-capable RFs */ bbp3 = rum_bbp_read(sc, 3); bbp3 &= ~RT2573_SMART_MODE; if (sc->rf_rev == RT2573_RF_5225 || sc->rf_rev == RT2573_RF_2527) bbp3 |= RT2573_SMART_MODE; rum_bbp_write(sc, 3, bbp3); if (bbp94 != RT2573_BBPR94_DEFAULT) rum_bbp_write(sc, 94, bbp94); } /* * Enable TSF synchronization and tell h/w to start sending beacons for IBSS * and HostAP operating modes. */ static void rum_enable_tsf_sync(struct rum_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t tmp; if (vap->iv_opmode != IEEE80211_M_STA) { /* * Change default 16ms TBTT adjustment to 8ms. * Must be done before enabling beacon generation. */ rum_write(sc, RT2573_TXRX_CSR10, 1 << 12 | 8); } tmp = rum_read(sc, RT2573_TXRX_CSR9) & 0xff000000; /* set beacon interval (in 1/16ms unit) */ tmp |= vap->iv_bss->ni_intval * 16; tmp |= RT2573_TSF_TICKING | RT2573_ENABLE_TBTT; if (vap->iv_opmode == IEEE80211_M_STA) tmp |= RT2573_TSF_MODE(1); else tmp |= RT2573_TSF_MODE(2) | RT2573_GENERATE_BEACON; rum_write(sc, RT2573_TXRX_CSR9, tmp); } static void rum_update_slot(struct ifnet *ifp) { struct rum_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; uint8_t slottime; uint32_t tmp; slottime = (ic->ic_flags & IEEE80211_F_SHSLOT) ? 9 : 20; tmp = rum_read(sc, RT2573_MAC_CSR9); tmp = (tmp & ~0xff) | slottime; rum_write(sc, RT2573_MAC_CSR9, tmp); DPRINTF(("setting slot time to %uus\n", slottime)); } static void rum_set_bssid(struct rum_softc *sc, const uint8_t *bssid) { uint32_t tmp; tmp = bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24; rum_write(sc, RT2573_MAC_CSR4, tmp); tmp = bssid[4] | bssid[5] << 8 | RT2573_ONE_BSSID << 16; rum_write(sc, RT2573_MAC_CSR5, tmp); } static void rum_set_macaddr(struct rum_softc *sc, const uint8_t *addr) { uint32_t tmp; tmp = addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24; rum_write(sc, RT2573_MAC_CSR2, tmp); tmp = addr[4] | addr[5] << 8 | 0xff << 16; rum_write(sc, RT2573_MAC_CSR3, tmp); } static void rum_update_promisc(struct rum_softc *sc) { struct ifnet *ifp = sc->sc_ifp; uint32_t tmp; tmp = rum_read(sc, RT2573_TXRX_CSR0); tmp &= ~RT2573_DROP_NOT_TO_ME; if (!(ifp->if_flags & IFF_PROMISC)) tmp |= RT2573_DROP_NOT_TO_ME; rum_write(sc, RT2573_TXRX_CSR0, tmp); DPRINTF(("%s promiscuous mode\n", (ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving")); } static const char * rum_get_rf(int rev) { switch (rev) { case RT2573_RF_2527: return "RT2527 (MIMO XR)"; case RT2573_RF_2528: return "RT2528"; case RT2573_RF_5225: return "RT5225 (MIMO XR)"; case RT2573_RF_5226: return "RT5226"; default: return "unknown"; } } static void rum_read_eeprom(struct rum_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint16_t val; #ifdef RUM_DEBUG int i; #endif /* read MAC address */ rum_eeprom_read(sc, RT2573_EEPROM_ADDRESS, ic->ic_myaddr, 6); rum_eeprom_read(sc, RT2573_EEPROM_ANTENNA, &val, 2); val = le16toh(val); sc->rf_rev = (val >> 11) & 0x1f; sc->hw_radio = (val >> 10) & 0x1; sc->rx_ant = (val >> 4) & 0x3; sc->tx_ant = (val >> 2) & 0x3; sc->nb_ant = val & 0x3; DPRINTF(("RF revision=%d\n", sc->rf_rev)); rum_eeprom_read(sc, RT2573_EEPROM_CONFIG2, &val, 2); val = le16toh(val); sc->ext_5ghz_lna = (val >> 6) & 0x1; sc->ext_2ghz_lna = (val >> 4) & 0x1; DPRINTF(("External 2GHz LNA=%d\nExternal 5GHz LNA=%d\n", sc->ext_2ghz_lna, sc->ext_5ghz_lna)); rum_eeprom_read(sc, RT2573_EEPROM_RSSI_2GHZ_OFFSET, &val, 2); val = le16toh(val); if ((val & 0xff) != 0xff) sc->rssi_2ghz_corr = (int8_t)(val & 0xff); /* signed */ /* Only [-10, 10] is valid */ if (sc->rssi_2ghz_corr < -10 || sc->rssi_2ghz_corr > 10) sc->rssi_2ghz_corr = 0; rum_eeprom_read(sc, RT2573_EEPROM_RSSI_5GHZ_OFFSET, &val, 2); val = le16toh(val); if ((val & 0xff) != 0xff) sc->rssi_5ghz_corr = (int8_t)(val & 0xff); /* signed */ /* Only [-10, 10] is valid */ if (sc->rssi_5ghz_corr < -10 || sc->rssi_5ghz_corr > 10) sc->rssi_5ghz_corr = 0; if (sc->ext_2ghz_lna) sc->rssi_2ghz_corr -= 14; if (sc->ext_5ghz_lna) sc->rssi_5ghz_corr -= 14; DPRINTF(("RSSI 2GHz corr=%d\nRSSI 5GHz corr=%d\n", sc->rssi_2ghz_corr, sc->rssi_5ghz_corr)); rum_eeprom_read(sc, RT2573_EEPROM_FREQ_OFFSET, &val, 2); val = le16toh(val); if ((val & 0xff) != 0xff) sc->rffreq = val & 0xff; DPRINTF(("RF freq=%d\n", sc->rffreq)); /* read Tx power for all a/b/g channels */ rum_eeprom_read(sc, RT2573_EEPROM_TXPOWER, sc->txpow, 14); /* XXX default Tx power for 802.11a channels */ memset(sc->txpow + 14, 24, sizeof (sc->txpow) - 14); #ifdef RUM_DEBUG for (i = 0; i < 14; i++) DPRINTF(("Channel=%d Tx power=%d\n", i + 1, sc->txpow[i])); #endif /* read default values for BBP registers */ rum_eeprom_read(sc, RT2573_EEPROM_BBP_BASE, sc->bbp_prom, 2 * 16); #ifdef RUM_DEBUG for (i = 0; i < 14; i++) { if (sc->bbp_prom[i].reg == 0 || sc->bbp_prom[i].reg == 0xff) continue; DPRINTF(("BBP R%d=%02x\n", sc->bbp_prom[i].reg, sc->bbp_prom[i].val)); } #endif } static int rum_bbp_init(struct rum_softc *sc) { #define N(a) (sizeof (a) / sizeof ((a)[0])) int i, ntries; /* wait for BBP to be ready */ for (ntries = 0; ntries < 100; ntries++) { const uint8_t val = rum_bbp_read(sc, 0); if (val != 0 && val != 0xff) break; DELAY(1000); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP\n"); return EIO; } /* initialize BBP registers to default values */ for (i = 0; i < N(rum_def_bbp); i++) rum_bbp_write(sc, rum_def_bbp[i].reg, rum_def_bbp[i].val); /* write vendor-specific BBP values (from EEPROM) */ for (i = 0; i < 16; i++) { if (sc->bbp_prom[i].reg == 0 || sc->bbp_prom[i].reg == 0xff) continue; rum_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } return 0; #undef N } static void rum_init_locked(struct rum_softc *sc) { #define N(a) (sizeof (a) / sizeof ((a)[0])) struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct rum_rx_data *data; uint32_t tmp; usbd_status error; int i, ntries; rum_stop(sc); /* initialize MAC registers to default values */ for (i = 0; i < N(rum_def_mac); i++) rum_write(sc, rum_def_mac[i].reg, rum_def_mac[i].val); /* set host ready */ rum_write(sc, RT2573_MAC_CSR1, 3); rum_write(sc, RT2573_MAC_CSR1, 0); /* wait for BBP/RF to wakeup */ for (ntries = 0; ntries < 1000; ntries++) { if (rum_read(sc, RT2573_MAC_CSR12) & 8) break; rum_write(sc, RT2573_MAC_CSR12, 4); /* force wakeup */ DELAY(1000); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for BBP/RF to wakeup\n"); goto fail; } if ((error = rum_bbp_init(sc)) != 0) goto fail; /* select default channel */ rum_select_band(sc, ic->ic_curchan); rum_select_antenna(sc); rum_set_chan(sc, ic->ic_curchan); /* clear STA registers */ rum_read_multi(sc, RT2573_STA_CSR0, sc->sta, sizeof sc->sta); IEEE80211_ADDR_COPY(ic->ic_myaddr, IF_LLADDR(ifp)); rum_set_macaddr(sc, ic->ic_myaddr); /* initialize ASIC */ rum_write(sc, RT2573_MAC_CSR1, 4); /* * Allocate xfer for AMRR statistics requests. */ sc->amrr_xfer = usbd_alloc_xfer(sc->sc_udev); if (sc->amrr_xfer == NULL) { device_printf(sc->sc_dev, "could not allocate AMRR xfer\n"); goto fail; } /* * Open Tx and Rx USB bulk pipes. */ error = usbd_open_pipe(sc->sc_iface, sc->sc_tx_no, USBD_EXCLUSIVE_USE, &sc->sc_tx_pipeh); if (error != 0) { device_printf(sc->sc_dev, "could not open Tx pipe: %s\n", usbd_errstr(error)); goto fail; } error = usbd_open_pipe(sc->sc_iface, sc->sc_rx_no, USBD_EXCLUSIVE_USE, &sc->sc_rx_pipeh); if (error != 0) { device_printf(sc->sc_dev, "could not open Rx pipe: %s\n", usbd_errstr(error)); goto fail; } /* * Allocate Tx and Rx xfer queues. */ error = rum_alloc_tx_list(sc); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Tx list\n"); goto fail; } error = rum_alloc_rx_list(sc); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Rx list\n"); goto fail; } /* * Start up the receive pipe. */ for (i = 0; i < RUM_RX_LIST_COUNT; i++) { data = &sc->rx_data[i]; usbd_setup_xfer(data->xfer, sc->sc_rx_pipeh, data, data->buf, MCLBYTES, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, rum_rxeof); usbd_transfer(data->xfer); } /* update Rx filter */ tmp = rum_read(sc, RT2573_TXRX_CSR0) & 0xffff; tmp |= RT2573_DROP_PHY_ERROR | RT2573_DROP_CRC_ERROR; if (ic->ic_opmode != IEEE80211_M_MONITOR) { tmp |= RT2573_DROP_CTL | RT2573_DROP_VER_ERROR | RT2573_DROP_ACKCTS; if (ic->ic_opmode != IEEE80211_M_HOSTAP) tmp |= RT2573_DROP_TODS; if (!(ifp->if_flags & IFF_PROMISC)) tmp |= RT2573_DROP_NOT_TO_ME; } rum_write(sc, RT2573_TXRX_CSR0, tmp); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; return; fail: rum_stop(sc); #undef N } static void rum_init(void *priv) { struct rum_softc *sc = priv; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; RUM_LOCK(sc); rum_init_locked(sc); RUM_UNLOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ieee80211_start_all(ic); /* start all vap's */ } static void rum_stop(void *priv) { struct rum_softc *sc = priv; struct ifnet *ifp = sc->sc_ifp; uint32_t tmp; sc->sc_tx_timer = 0; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); /* disable Rx */ tmp = rum_read(sc, RT2573_TXRX_CSR0); rum_write(sc, RT2573_TXRX_CSR0, tmp | RT2573_DISABLE_RX); /* reset ASIC */ rum_write(sc, RT2573_MAC_CSR1, 3); rum_write(sc, RT2573_MAC_CSR1, 0); if (sc->amrr_xfer != NULL) { usbd_free_xfer(sc->amrr_xfer); sc->amrr_xfer = NULL; } if (sc->sc_rx_pipeh != NULL) { usbd_abort_pipe(sc->sc_rx_pipeh); usbd_close_pipe(sc->sc_rx_pipeh); sc->sc_rx_pipeh = NULL; } if (sc->sc_tx_pipeh != NULL) { usbd_abort_pipe(sc->sc_tx_pipeh); usbd_close_pipe(sc->sc_tx_pipeh); sc->sc_tx_pipeh = NULL; } rum_free_rx_list(sc); rum_free_tx_list(sc); } static int rum_load_microcode(struct rum_softc *sc, const u_char *ucode, size_t size) { usb_device_request_t req; uint16_t reg = RT2573_MCU_CODE_BASE; usbd_status error; /* copy firmware image into NIC */ for (; size >= 4; reg += 4, ucode += 4, size -= 4) rum_write(sc, reg, UGETDW(ucode)); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RT2573_MCU_CNTL; USETW(req.wValue, RT2573_MCU_RUN); USETW(req.wIndex, 0); USETW(req.wLength, 0); error = usbd_do_request(sc->sc_udev, &req, NULL); if (error != 0) { device_printf(sc->sc_dev, "could not run firmware: %s\n", usbd_errstr(error)); } return error; } static int rum_prepare_beacon(struct rum_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; const struct ieee80211_txparam *tp; struct rum_tx_desc desc; struct mbuf *m0; m0 = ieee80211_beacon_alloc(vap->iv_bss, &RUM_VAP(vap)->bo); if (m0 == NULL) { return ENOBUFS; } tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_bsschan)]; rum_setup_tx_desc(sc, &desc, RT2573_TX_TIMESTAMP, RT2573_TX_HWSEQ, m0->m_pkthdr.len, tp->mgmtrate); /* copy the first 24 bytes of Tx descriptor into NIC memory */ rum_write_multi(sc, RT2573_HW_BEACON_BASE0, (uint8_t *)&desc, 24); /* copy beacon header and payload into NIC memory */ rum_write_multi(sc, RT2573_HW_BEACON_BASE0 + 24, mtod(m0, uint8_t *), m0->m_pkthdr.len); m_freem(m0); return 0; } static int rum_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ifnet *ifp = ni->ni_ic->ic_ifp; struct rum_softc *sc = ifp->if_softc; /* prevent management frames from being sent if we're not ready */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { m_freem(m); ieee80211_free_node(ni); return ENETDOWN; } if (sc->tx_queued >= RUM_TX_LIST_COUNT-1) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; m_freem(m); ieee80211_free_node(ni); return EIO; } ifp->if_opackets++; if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ if (rum_tx_mgt(sc, m, ni) != 0) goto bad; } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ if (rum_tx_raw(sc, m, ni, params) != 0) goto bad; } sc->sc_tx_timer = 5; callout_reset(&sc->watchdog_ch, hz, rum_watchdog, sc); return 0; bad: ifp->if_oerrors++; ieee80211_free_node(ni); return EIO; } static void rum_amrr_start(struct rum_softc *sc, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct rum_vap *rvp = RUM_VAP(vap); /* clear statistic registers (STA_CSR0 to STA_CSR5) */ rum_read_multi(sc, RT2573_STA_CSR0, sc->sta, sizeof sc->sta); ieee80211_amrr_node_init(&rvp->amrr, &RUM_NODE(ni)->amn, ni); callout_reset(&rvp->amrr_ch, hz, rum_amrr_timeout, vap); } static void rum_amrr_timeout(void *arg) { struct ieee80211vap *vap = arg; struct rum_softc *sc = vap->iv_ic->ic_ifp->if_softc; usb_device_request_t req; /* * Asynchronously read statistic registers (cleared by read). */ req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RT2573_READ_MULTI_MAC; USETW(req.wValue, 0); USETW(req.wIndex, RT2573_STA_CSR0); USETW(req.wLength, sizeof sc->sta); usbd_setup_default_xfer(sc->amrr_xfer, sc->sc_udev, vap, USBD_DEFAULT_TIMEOUT, &req, sc->sta, sizeof sc->sta, 0, rum_amrr_update); (void)usbd_transfer(sc->amrr_xfer); } static void rum_amrr_update(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct ieee80211vap *vap = priv; struct rum_vap *rvp = RUM_VAP(vap); struct ifnet *ifp = vap->iv_ic->ic_ifp; struct rum_softc *sc = ifp->if_softc; int ok, fail; if (status != USBD_NORMAL_COMPLETION) { device_printf(sc->sc_dev, "could not retrieve Tx statistics - " "cancelling automatic rate control\n"); return; } ok = (le32toh(sc->sta[4]) >> 16) + /* TX ok w/o retry */ (le32toh(sc->sta[5]) & 0xffff); /* TX ok w/ retry */ fail = (le32toh(sc->sta[5]) >> 16); /* TX retry-fail count */ ieee80211_amrr_tx_update(&RUM_NODE(vap->iv_bss)->amn, ok+fail, ok, (le32toh(sc->sta[5]) & 0xffff) + fail); ifp->if_oerrors += fail; /* count TX retry-fail as Tx errors */ callout_reset(&rvp->amrr_ch, hz, rum_amrr_timeout, vap); } /* ARGUSED */ static struct ieee80211_node * -rum_node_alloc(struct ieee80211_node_table *nt __unused) +rum_node_alloc(struct ieee80211vap *vap __unused, + const uint8_t mac[IEEE80211_ADDR_LEN] __unused) { struct rum_node *rn; rn = malloc(sizeof(struct rum_node), M_80211_NODE, M_NOWAIT | M_ZERO); return rn != NULL ? &rn->ni : NULL; } static void rum_newassoc(struct ieee80211_node *ni, int isnew) { struct ieee80211vap *vap = ni->ni_vap; ieee80211_amrr_node_init(&RUM_VAP(vap)->amrr, &RUM_NODE(ni)->amn, ni); } static void rum_scan_start(struct ieee80211com *ic) { struct rum_softc *sc = ic->ic_ifp->if_softc; usb_rem_task(sc->sc_udev, &sc->sc_scantask); /* do it in a process context */ sc->sc_scan_action = RUM_SCAN_START; usb_add_task(sc->sc_udev, &sc->sc_scantask, USB_TASKQ_DRIVER); } static void rum_scan_end(struct ieee80211com *ic) { struct rum_softc *sc = ic->ic_ifp->if_softc; usb_rem_task(sc->sc_udev, &sc->sc_scantask); /* do it in a process context */ sc->sc_scan_action = RUM_SCAN_END; usb_add_task(sc->sc_udev, &sc->sc_scantask, USB_TASKQ_DRIVER); } static void rum_set_channel(struct ieee80211com *ic) { struct rum_softc *sc = ic->ic_ifp->if_softc; usb_rem_task(sc->sc_udev, &sc->sc_scantask); /* do it in a process context */ sc->sc_scan_action = RUM_SET_CHANNEL; usb_add_task(sc->sc_udev, &sc->sc_scantask, USB_TASKQ_DRIVER); sc->sc_rates = ieee80211_get_ratetable(ic->ic_curchan); } static void rum_scantask(void *arg) { struct rum_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t tmp; RUM_LOCK(sc); switch (sc->sc_scan_action) { case RUM_SCAN_START: /* abort TSF synchronization */ tmp = rum_read(sc, RT2573_TXRX_CSR9); rum_write(sc, RT2573_TXRX_CSR9, tmp & ~0x00ffffff); rum_set_bssid(sc, ifp->if_broadcastaddr); break; case RUM_SCAN_END: rum_enable_tsf_sync(sc); /* XXX keep local copy */ rum_set_bssid(sc, vap->iv_bss->ni_bssid); break; case RUM_SET_CHANNEL: mtx_lock(&Giant); rum_set_chan(sc, ic->ic_curchan); mtx_unlock(&Giant); break; default: panic("unknown scan action %d\n", sc->sc_scan_action); /* NEVER REACHED */ break; } RUM_UNLOCK(sc); } static int rum_get_rssi(struct rum_softc *sc, uint8_t raw) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; int lna, agc, rssi; lna = (raw >> 5) & 0x3; agc = raw & 0x1f; if (lna == 0) { /* * No RSSI mapping * * NB: Since RSSI is relative to noise floor, -1 is * adequate for caller to know error happened. */ return -1; } rssi = (2 * agc) - RT2573_NOISE_FLOOR; if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { rssi += sc->rssi_2ghz_corr; if (lna == 1) rssi -= 64; else if (lna == 2) rssi -= 74; else if (lna == 3) rssi -= 90; } else { rssi += sc->rssi_5ghz_corr; if (!sc->ext_5ghz_lna && lna != 1) rssi += 4; if (lna == 1) rssi -= 64; else if (lna == 2) rssi -= 86; else if (lna == 3) rssi -= 100; } return rssi; } static device_method_t rum_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rum_match), DEVMETHOD(device_attach, rum_attach), DEVMETHOD(device_detach, rum_detach), { 0, 0 } }; static driver_t rum_driver = { "rum", rum_methods, sizeof(struct rum_softc) }; static devclass_t rum_devclass; DRIVER_MODULE(rum, uhub, rum_driver, rum_devclass, usbd_driver_load, 0); diff --git a/sys/dev/usb/if_ural.c b/sys/dev/usb/if_ural.c index b072fa0acc9e..1205dd1cf6e7 100644 --- a/sys/dev/usb/if_ural.c +++ b/sys/dev/usb/if_ural.c @@ -1,2504 +1,2506 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2005, 2006 * Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /*- * Ralink Technology RT2500USB chipset driver * http://www.ralinktech.com/ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "usbdevs.h" #include #include #ifdef USB_DEBUG #define DPRINTF(x) do { if (uraldebug > 0) printf x; } while (0) #define DPRINTFN(n, x) do { if (uraldebug >= (n)) printf x; } while (0) int uraldebug = 0; SYSCTL_NODE(_hw_usb, OID_AUTO, ural, CTLFLAG_RW, 0, "USB ural"); SYSCTL_INT(_hw_usb_ural, OID_AUTO, debug, CTLFLAG_RW, &uraldebug, 0, "ural debug level"); #else #define DPRINTF(x) #define DPRINTFN(n, x) #endif #define URAL_RSSI(rssi) \ ((rssi) > (RAL_NOISE_FLOOR + RAL_RSSI_CORR) ? \ ((rssi) - (RAL_NOISE_FLOOR + RAL_RSSI_CORR)) : 0) /* various supported device vendors/products */ static const struct usb_devno ural_devs[] = { { USB_VENDOR_ASUS, USB_PRODUCT_ASUS_WL167G }, { USB_VENDOR_ASUS, USB_PRODUCT_RALINK_RT2570 }, { USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5D7050 }, { USB_VENDOR_BELKIN, USB_PRODUCT_BELKIN_F5D7051 }, { USB_VENDOR_CONCEPTRONIC2, USB_PRODUCT_CONCEPTRONIC2_C54RU }, { USB_VENDOR_DLINK, USB_PRODUCT_DLINK_DWLG122 }, { USB_VENDOR_GIGABYTE, USB_PRODUCT_GIGABYTE_GNWBKG }, { USB_VENDOR_GIGABYTE, USB_PRODUCT_GIGABYTE_GN54G }, { USB_VENDOR_GUILLEMOT, USB_PRODUCT_GUILLEMOT_HWGUSB254 }, { USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_WUSB54G }, { USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_WUSB54GP }, { USB_VENDOR_CISCOLINKSYS, USB_PRODUCT_CISCOLINKSYS_HU200TS }, { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_KG54 }, { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_KG54AI }, { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_KG54YB }, { USB_VENDOR_MELCO, USB_PRODUCT_MELCO_NINWIFI }, { USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2570 }, { USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2570_2 }, { USB_VENDOR_MSI, USB_PRODUCT_MSI_RT2570_3 }, { USB_VENDOR_NOVATECH, USB_PRODUCT_NOVATECH_NV902 }, { USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2570 }, { USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2570_2 }, { USB_VENDOR_RALINK, USB_PRODUCT_RALINK_RT2570_3 }, { USB_VENDOR_SIEMENS2, USB_PRODUCT_SIEMENS2_WL54G }, { USB_VENDOR_SMC, USB_PRODUCT_SMC_2862WG }, { USB_VENDOR_SPHAIRON, USB_PRODUCT_SPHAIRON_UB801R}, { USB_VENDOR_SURECOM, USB_PRODUCT_SURECOM_RT2570 }, { USB_VENDOR_VTECH, USB_PRODUCT_VTECH_RT2570 }, { USB_VENDOR_ZINWELL, USB_PRODUCT_ZINWELL_RT2570 } }; MODULE_DEPEND(ural, wlan, 1, 1, 1); MODULE_DEPEND(ural, wlan_amrr, 1, 1, 1); MODULE_DEPEND(ural, usb, 1, 1, 1); static struct ieee80211vap *ural_vap_create(struct ieee80211com *, const char name[IFNAMSIZ], int unit, int opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]); static void ural_vap_delete(struct ieee80211vap *); static int ural_alloc_tx_list(struct ural_softc *); static void ural_free_tx_list(struct ural_softc *); static int ural_alloc_rx_list(struct ural_softc *); static void ural_free_rx_list(struct ural_softc *); static void ural_task(void *); static void ural_scantask(void *); static int ural_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void ural_txeof(usbd_xfer_handle, usbd_private_handle, usbd_status); static void ural_rxeof(usbd_xfer_handle, usbd_private_handle, usbd_status); static void ural_setup_tx_desc(struct ural_softc *, struct ural_tx_desc *, uint32_t, int, int); static int ural_tx_bcn(struct ural_softc *, struct mbuf *, struct ieee80211_node *); static int ural_tx_mgt(struct ural_softc *, struct mbuf *, struct ieee80211_node *); static int ural_tx_data(struct ural_softc *, struct mbuf *, struct ieee80211_node *); static void ural_start(struct ifnet *); static void ural_watchdog(void *); static int ural_ioctl(struct ifnet *, u_long, caddr_t); static void ural_set_testmode(struct ural_softc *); static void ural_eeprom_read(struct ural_softc *, uint16_t, void *, int); static uint16_t ural_read(struct ural_softc *, uint16_t); static void ural_read_multi(struct ural_softc *, uint16_t, void *, int); static void ural_write(struct ural_softc *, uint16_t, uint16_t); static void ural_write_multi(struct ural_softc *, uint16_t, void *, int) __unused; static void ural_bbp_write(struct ural_softc *, uint8_t, uint8_t); static uint8_t ural_bbp_read(struct ural_softc *, uint8_t); static void ural_rf_write(struct ural_softc *, uint8_t, uint32_t); -static struct ieee80211_node *ural_node_alloc(struct ieee80211_node_table *); +static struct ieee80211_node *ural_node_alloc(struct ieee80211vap *, + const uint8_t mac[IEEE80211_ADDR_LEN]); static void ural_newassoc(struct ieee80211_node *, int); static void ural_scan_start(struct ieee80211com *); static void ural_scan_end(struct ieee80211com *); static void ural_set_channel(struct ieee80211com *); static void ural_set_chan(struct ural_softc *, struct ieee80211_channel *); static void ural_disable_rf_tune(struct ural_softc *); static void ural_enable_tsf_sync(struct ural_softc *); static void ural_update_slot(struct ifnet *); static void ural_set_txpreamble(struct ural_softc *); static void ural_set_basicrates(struct ural_softc *, const struct ieee80211_channel *); static void ural_set_bssid(struct ural_softc *, const uint8_t *); static void ural_set_macaddr(struct ural_softc *, uint8_t *); static void ural_update_promisc(struct ural_softc *); static const char *ural_get_rf(int); static void ural_read_eeprom(struct ural_softc *); static int ural_bbp_init(struct ural_softc *); static void ural_set_txantenna(struct ural_softc *, int); static void ural_set_rxantenna(struct ural_softc *, int); static void ural_init_locked(struct ural_softc *); static void ural_init(void *); static void ural_stop(void *); static int ural_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void ural_amrr_start(struct ural_softc *, struct ieee80211_node *); static void ural_amrr_timeout(void *); static void ural_amrr_update(usbd_xfer_handle, usbd_private_handle, usbd_status status); /* * Default values for MAC registers; values taken from the reference driver. */ static const struct { uint16_t reg; uint16_t val; } ural_def_mac[] = { { RAL_TXRX_CSR5, 0x8c8d }, { RAL_TXRX_CSR6, 0x8b8a }, { RAL_TXRX_CSR7, 0x8687 }, { RAL_TXRX_CSR8, 0x0085 }, { RAL_MAC_CSR13, 0x1111 }, { RAL_MAC_CSR14, 0x1e11 }, { RAL_TXRX_CSR21, 0xe78f }, { RAL_MAC_CSR9, 0xff1d }, { RAL_MAC_CSR11, 0x0002 }, { RAL_MAC_CSR22, 0x0053 }, { RAL_MAC_CSR15, 0x0000 }, { RAL_MAC_CSR8, 0x0780 }, { RAL_TXRX_CSR19, 0x0000 }, { RAL_TXRX_CSR18, 0x005a }, { RAL_PHY_CSR2, 0x0000 }, { RAL_TXRX_CSR0, 0x1ec0 }, { RAL_PHY_CSR4, 0x000f } }; /* * Default values for BBP registers; values taken from the reference driver. */ static const struct { uint8_t reg; uint8_t val; } ural_def_bbp[] = { { 3, 0x02 }, { 4, 0x19 }, { 14, 0x1c }, { 15, 0x30 }, { 16, 0xac }, { 17, 0x48 }, { 18, 0x18 }, { 19, 0xff }, { 20, 0x1e }, { 21, 0x08 }, { 22, 0x08 }, { 23, 0x08 }, { 24, 0x80 }, { 25, 0x50 }, { 26, 0x08 }, { 27, 0x23 }, { 30, 0x10 }, { 31, 0x2b }, { 32, 0xb9 }, { 34, 0x12 }, { 35, 0x50 }, { 39, 0xc4 }, { 40, 0x02 }, { 41, 0x60 }, { 53, 0x10 }, { 54, 0x18 }, { 56, 0x08 }, { 57, 0x10 }, { 58, 0x08 }, { 61, 0x60 }, { 62, 0x10 }, { 75, 0xff } }; /* * Default values for RF register R2 indexed by channel numbers. */ static const uint32_t ural_rf2522_r2[] = { 0x307f6, 0x307fb, 0x30800, 0x30805, 0x3080a, 0x3080f, 0x30814, 0x30819, 0x3081e, 0x30823, 0x30828, 0x3082d, 0x30832, 0x3083e }; static const uint32_t ural_rf2523_r2[] = { 0x00327, 0x00328, 0x00329, 0x0032a, 0x0032b, 0x0032c, 0x0032d, 0x0032e, 0x0032f, 0x00340, 0x00341, 0x00342, 0x00343, 0x00346 }; static const uint32_t ural_rf2524_r2[] = { 0x00327, 0x00328, 0x00329, 0x0032a, 0x0032b, 0x0032c, 0x0032d, 0x0032e, 0x0032f, 0x00340, 0x00341, 0x00342, 0x00343, 0x00346 }; static const uint32_t ural_rf2525_r2[] = { 0x20327, 0x20328, 0x20329, 0x2032a, 0x2032b, 0x2032c, 0x2032d, 0x2032e, 0x2032f, 0x20340, 0x20341, 0x20342, 0x20343, 0x20346 }; static const uint32_t ural_rf2525_hi_r2[] = { 0x2032f, 0x20340, 0x20341, 0x20342, 0x20343, 0x20344, 0x20345, 0x20346, 0x20347, 0x20348, 0x20349, 0x2034a, 0x2034b, 0x2034e }; static const uint32_t ural_rf2525e_r2[] = { 0x2044d, 0x2044e, 0x2044f, 0x20460, 0x20461, 0x20462, 0x20463, 0x20464, 0x20465, 0x20466, 0x20467, 0x20468, 0x20469, 0x2046b }; static const uint32_t ural_rf2526_hi_r2[] = { 0x0022a, 0x0022b, 0x0022b, 0x0022c, 0x0022c, 0x0022d, 0x0022d, 0x0022e, 0x0022e, 0x0022f, 0x0022d, 0x00240, 0x00240, 0x00241 }; static const uint32_t ural_rf2526_r2[] = { 0x00226, 0x00227, 0x00227, 0x00228, 0x00228, 0x00229, 0x00229, 0x0022a, 0x0022a, 0x0022b, 0x0022b, 0x0022c, 0x0022c, 0x0022d }; /* * For dual-band RF, RF registers R1 and R4 also depend on channel number; * values taken from the reference driver. */ static const struct { uint8_t chan; uint32_t r1; uint32_t r2; uint32_t r4; } ural_rf5222[] = { { 1, 0x08808, 0x0044d, 0x00282 }, { 2, 0x08808, 0x0044e, 0x00282 }, { 3, 0x08808, 0x0044f, 0x00282 }, { 4, 0x08808, 0x00460, 0x00282 }, { 5, 0x08808, 0x00461, 0x00282 }, { 6, 0x08808, 0x00462, 0x00282 }, { 7, 0x08808, 0x00463, 0x00282 }, { 8, 0x08808, 0x00464, 0x00282 }, { 9, 0x08808, 0x00465, 0x00282 }, { 10, 0x08808, 0x00466, 0x00282 }, { 11, 0x08808, 0x00467, 0x00282 }, { 12, 0x08808, 0x00468, 0x00282 }, { 13, 0x08808, 0x00469, 0x00282 }, { 14, 0x08808, 0x0046b, 0x00286 }, { 36, 0x08804, 0x06225, 0x00287 }, { 40, 0x08804, 0x06226, 0x00287 }, { 44, 0x08804, 0x06227, 0x00287 }, { 48, 0x08804, 0x06228, 0x00287 }, { 52, 0x08804, 0x06229, 0x00287 }, { 56, 0x08804, 0x0622a, 0x00287 }, { 60, 0x08804, 0x0622b, 0x00287 }, { 64, 0x08804, 0x0622c, 0x00287 }, { 100, 0x08804, 0x02200, 0x00283 }, { 104, 0x08804, 0x02201, 0x00283 }, { 108, 0x08804, 0x02202, 0x00283 }, { 112, 0x08804, 0x02203, 0x00283 }, { 116, 0x08804, 0x02204, 0x00283 }, { 120, 0x08804, 0x02205, 0x00283 }, { 124, 0x08804, 0x02206, 0x00283 }, { 128, 0x08804, 0x02207, 0x00283 }, { 132, 0x08804, 0x02208, 0x00283 }, { 136, 0x08804, 0x02209, 0x00283 }, { 140, 0x08804, 0x0220a, 0x00283 }, { 149, 0x08808, 0x02429, 0x00281 }, { 153, 0x08808, 0x0242b, 0x00281 }, { 157, 0x08808, 0x0242d, 0x00281 }, { 161, 0x08808, 0x0242f, 0x00281 } }; static device_probe_t ural_match; static device_attach_t ural_attach; static device_detach_t ural_detach; static device_method_t ural_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ural_match), DEVMETHOD(device_attach, ural_attach), DEVMETHOD(device_detach, ural_detach), { 0, 0 } }; static driver_t ural_driver = { "ural", ural_methods, sizeof(struct ural_softc) }; static devclass_t ural_devclass; DRIVER_MODULE(ural, uhub, ural_driver, ural_devclass, usbd_driver_load, 0); static int ural_match(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); if (uaa->iface != NULL) return UMATCH_NONE; return (usb_lookup(ural_devs, uaa->vendor, uaa->product) != NULL) ? UMATCH_VENDOR_PRODUCT : UMATCH_NONE; } static int ural_attach(device_t self) { struct ural_softc *sc = device_get_softc(self); struct usb_attach_arg *uaa = device_get_ivars(self); struct ifnet *ifp; struct ieee80211com *ic; usb_interface_descriptor_t *id; usb_endpoint_descriptor_t *ed; usbd_status error; int i; uint8_t bands; sc->sc_udev = uaa->device; sc->sc_dev = self; if (usbd_set_config_no(sc->sc_udev, RAL_CONFIG_NO, 0) != 0) { device_printf(self, "could not set configuration no\n"); return ENXIO; } /* get the first interface handle */ error = usbd_device2interface_handle(sc->sc_udev, RAL_IFACE_INDEX, &sc->sc_iface); if (error != 0) { device_printf(self, "could not get interface handle\n"); return ENXIO; } /* * Find endpoints. */ id = usbd_get_interface_descriptor(sc->sc_iface); sc->sc_rx_no = sc->sc_tx_no = -1; for (i = 0; i < id->bNumEndpoints; i++) { ed = usbd_interface2endpoint_descriptor(sc->sc_iface, i); if (ed == NULL) { device_printf(self, "no endpoint descriptor for %d\n", i); return ENXIO; } if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_IN && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) sc->sc_rx_no = ed->bEndpointAddress; else if (UE_GET_DIR(ed->bEndpointAddress) == UE_DIR_OUT && UE_GET_XFERTYPE(ed->bmAttributes) == UE_BULK) sc->sc_tx_no = ed->bEndpointAddress; } if (sc->sc_rx_no == -1 || sc->sc_tx_no == -1) { device_printf(self, "missing endpoint\n"); return ENXIO; } ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(sc->sc_dev, "can not if_alloc()\n"); return ENXIO; } ic = ifp->if_l2com; mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); usb_init_task(&sc->sc_task, ural_task, sc); usb_init_task(&sc->sc_scantask, ural_scantask, sc); callout_init(&sc->watchdog_ch, 0); /* retrieve RT2570 rev. no */ sc->asic_rev = ural_read(sc, RAL_MAC_CSR0); /* retrieve MAC address and various other things from EEPROM */ ural_read_eeprom(sc); device_printf(sc->sc_dev, "MAC/BBP RT2570 (rev 0x%02x), RF %s\n", sc->asic_rev, ural_get_rf(sc->rf_rev)); ifp->if_softc = sc; if_initname(ifp, "ural", device_get_unit(sc->sc_dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT; /* USB stack is still under Giant lock */ ifp->if_init = ural_init; ifp->if_ioctl = ural_ioctl; ifp->if_start = ural_start; IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); ic->ic_ifp = ifp; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_IBSS /* IBSS mode supported */ | IEEE80211_C_MONITOR /* monitor mode supported */ | IEEE80211_C_HOSTAP /* HostAp mode supported */ | IEEE80211_C_TXPMGT /* tx power management */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_BGSCAN /* bg scanning supported */ | IEEE80211_C_WPA /* 802.11i */ ; bands = 0; setbit(&bands, IEEE80211_MODE_11B); setbit(&bands, IEEE80211_MODE_11G); if (sc->rf_rev == RAL_RF_5222) setbit(&bands, IEEE80211_MODE_11A); ieee80211_init_channels(ic, NULL, &bands); ieee80211_ifattach(ic); ic->ic_newassoc = ural_newassoc; ic->ic_raw_xmit = ural_raw_xmit; ic->ic_node_alloc = ural_node_alloc; ic->ic_scan_start = ural_scan_start; ic->ic_scan_end = ural_scan_end; ic->ic_set_channel = ural_set_channel; ic->ic_vap_create = ural_vap_create; ic->ic_vap_delete = ural_vap_delete; sc->sc_rates = ieee80211_get_ratetable(ic->ic_curchan); bpfattach(ifp, DLT_IEEE802_11_RADIO, sizeof (struct ieee80211_frame) + sizeof(sc->sc_txtap)); sc->sc_rxtap_len = sizeof sc->sc_rxtap; sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); sc->sc_rxtap.wr_ihdr.it_present = htole32(RAL_RX_RADIOTAP_PRESENT); sc->sc_txtap_len = sizeof sc->sc_txtap; sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); sc->sc_txtap.wt_ihdr.it_present = htole32(RAL_TX_RADIOTAP_PRESENT); if (bootverbose) ieee80211_announce(ic); return 0; } static int ural_detach(device_t self) { struct ural_softc *sc = device_get_softc(self); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; ural_stop(sc); bpfdetach(ifp); ieee80211_ifdetach(ic); usb_rem_task(sc->sc_udev, &sc->sc_task); usb_rem_task(sc->sc_udev, &sc->sc_scantask); callout_stop(&sc->watchdog_ch); if (sc->amrr_xfer != NULL) { usbd_free_xfer(sc->amrr_xfer); sc->amrr_xfer = NULL; } if (sc->sc_rx_pipeh != NULL) { usbd_abort_pipe(sc->sc_rx_pipeh); usbd_close_pipe(sc->sc_rx_pipeh); } if (sc->sc_tx_pipeh != NULL) { usbd_abort_pipe(sc->sc_tx_pipeh); usbd_close_pipe(sc->sc_tx_pipeh); } ural_free_rx_list(sc); ural_free_tx_list(sc); if_free(ifp); mtx_destroy(&sc->sc_mtx); return 0; } static struct ieee80211vap * ural_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, int opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ural_vap *uvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; uvp = (struct ural_vap *) malloc(sizeof(struct ural_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (uvp == NULL) return NULL; vap = &uvp->vap; /* enable s/w bmiss handling for sta mode */ ieee80211_vap_setup(ic, vap, name, unit, opmode, flags | IEEE80211_CLONE_NOBEACONS, bssid, mac); /* override state transition machine */ uvp->newstate = vap->iv_newstate; vap->iv_newstate = ural_newstate; callout_init(&uvp->amrr_ch, 0); ieee80211_amrr_init(&uvp->amrr, vap, IEEE80211_AMRR_MIN_SUCCESS_THRESHOLD, IEEE80211_AMRR_MAX_SUCCESS_THRESHOLD, 1000 /* 1 sec */); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status); ic->ic_opmode = opmode; return vap; } static void ural_vap_delete(struct ieee80211vap *vap) { struct ural_vap *uvp = URAL_VAP(vap); callout_stop(&uvp->amrr_ch); ieee80211_amrr_cleanup(&uvp->amrr); ieee80211_vap_detach(vap); free(uvp, M_80211_VAP); } static int ural_alloc_tx_list(struct ural_softc *sc) { struct ural_tx_data *data; int i, error; sc->tx_queued = sc->tx_cur = 0; for (i = 0; i < RAL_TX_LIST_COUNT; i++) { data = &sc->tx_data[i]; data->sc = sc; data->xfer = usbd_alloc_xfer(sc->sc_udev); if (data->xfer == NULL) { device_printf(sc->sc_dev, "could not allocate tx xfer\n"); error = ENOMEM; goto fail; } data->buf = usbd_alloc_buffer(data->xfer, RAL_TX_DESC_SIZE + MCLBYTES); if (data->buf == NULL) { device_printf(sc->sc_dev, "could not allocate tx buffer\n"); error = ENOMEM; goto fail; } } return 0; fail: ural_free_tx_list(sc); return error; } static void ural_free_tx_list(struct ural_softc *sc) { struct ural_tx_data *data; int i; for (i = 0; i < RAL_TX_LIST_COUNT; i++) { data = &sc->tx_data[i]; if (data->xfer != NULL) { usbd_free_xfer(data->xfer); data->xfer = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } } } static int ural_alloc_rx_list(struct ural_softc *sc) { struct ural_rx_data *data; int i, error; for (i = 0; i < RAL_RX_LIST_COUNT; i++) { data = &sc->rx_data[i]; data->sc = sc; data->xfer = usbd_alloc_xfer(sc->sc_udev); if (data->xfer == NULL) { device_printf(sc->sc_dev, "could not allocate rx xfer\n"); error = ENOMEM; goto fail; } if (usbd_alloc_buffer(data->xfer, MCLBYTES) == NULL) { device_printf(sc->sc_dev, "could not allocate rx buffer\n"); error = ENOMEM; goto fail; } data->m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (data->m == NULL) { device_printf(sc->sc_dev, "could not allocate rx mbuf\n"); error = ENOMEM; goto fail; } data->buf = mtod(data->m, uint8_t *); } return 0; fail: ural_free_tx_list(sc); return error; } static void ural_free_rx_list(struct ural_softc *sc) { struct ural_rx_data *data; int i; for (i = 0; i < RAL_RX_LIST_COUNT; i++) { data = &sc->rx_data[i]; if (data->xfer != NULL) { usbd_free_xfer(data->xfer); data->xfer = NULL; } if (data->m != NULL) { m_freem(data->m); data->m = NULL; } } } static void ural_task(void *xarg) { struct ural_softc *sc = xarg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct ural_vap *uvp = URAL_VAP(vap); const struct ieee80211_txparam *tp; enum ieee80211_state ostate; struct ieee80211_node *ni; struct mbuf *m; ostate = vap->iv_state; RAL_LOCK(sc); switch (sc->sc_state) { case IEEE80211_S_INIT: if (ostate == IEEE80211_S_RUN) { /* abort TSF synchronization */ ural_write(sc, RAL_TXRX_CSR19, 0); /* force tx led to stop blinking */ ural_write(sc, RAL_MAC_CSR20, 0); } break; case IEEE80211_S_RUN: ni = vap->iv_bss; if (vap->iv_opmode != IEEE80211_M_MONITOR) { ural_update_slot(ic->ic_ifp); ural_set_txpreamble(sc); ural_set_basicrates(sc, ic->ic_bsschan); ural_set_bssid(sc, ni->ni_bssid); } if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS) { m = ieee80211_beacon_alloc(ni, &uvp->bo); if (m == NULL) { device_printf(sc->sc_dev, "could not allocate beacon\n"); return; } if (ural_tx_bcn(sc, m, ni) != 0) { device_printf(sc->sc_dev, "could not send beacon\n"); return; } } /* make tx led blink on tx (controlled by ASIC) */ ural_write(sc, RAL_MAC_CSR20, 1); if (vap->iv_opmode != IEEE80211_M_MONITOR) ural_enable_tsf_sync(sc); /* enable automatic rate adaptation */ tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_bsschan)]; if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) ural_amrr_start(sc, ni); break; default: break; } RAL_UNLOCK(sc); IEEE80211_LOCK(ic); uvp->newstate(vap, sc->sc_state, sc->sc_arg); if (vap->iv_newstate_cb != NULL) vap->iv_newstate_cb(vap, sc->sc_state, sc->sc_arg); IEEE80211_UNLOCK(ic); } static void ural_scantask(void *arg) { struct ural_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); RAL_LOCK(sc); if (sc->sc_scan_action == URAL_SCAN_START) { /* abort TSF synchronization */ ural_write(sc, RAL_TXRX_CSR19, 0); ural_set_bssid(sc, ifp->if_broadcastaddr); } else if (sc->sc_scan_action == URAL_SET_CHANNEL) { mtx_lock(&Giant); ural_set_chan(sc, ic->ic_curchan); mtx_unlock(&Giant); } else { ural_enable_tsf_sync(sc); /* XXX keep local copy */ ural_set_bssid(sc, vap->iv_bss->ni_bssid); } RAL_UNLOCK(sc); } static int ural_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ural_vap *uvp = URAL_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct ural_softc *sc = ic->ic_ifp->if_softc; callout_stop(&uvp->amrr_ch); /* do it in a process context */ sc->sc_state = nstate; sc->sc_arg = arg; usb_rem_task(sc->sc_udev, &sc->sc_task); if (nstate == IEEE80211_S_INIT) { uvp->newstate(vap, nstate, arg); return 0; } else { usb_add_task(sc->sc_udev, &sc->sc_task, USB_TASKQ_DRIVER); return EINPROGRESS; } } #define RAL_RXTX_TURNAROUND 5 /* us */ static void ural_txeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct ural_tx_data *data = priv; struct ural_softc *sc = data->sc; struct ifnet *ifp = sc->sc_ifp; if (data->m->m_flags & M_TXCB) ieee80211_process_callback(data->ni, data->m, status == USBD_NORMAL_COMPLETION ? 0 : ETIMEDOUT); if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) return; device_printf(sc->sc_dev, "could not transmit buffer: %s\n", usbd_errstr(status)); if (status == USBD_STALLED) usbd_clear_endpoint_stall_async(sc->sc_rx_pipeh); ifp->if_oerrors++; /* XXX mbuf leak? */ return; } m_freem(data->m); data->m = NULL; ieee80211_free_node(data->ni); data->ni = NULL; sc->tx_queued--; ifp->if_opackets++; DPRINTFN(10, ("tx done\n")); sc->sc_tx_timer = 0; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ural_start(ifp); } static void ural_rxeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct ural_rx_data *data = priv; struct ural_softc *sc = data->sc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ural_rx_desc *desc; struct ieee80211_node *ni; struct mbuf *mnew, *m; int len, rssi; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) return; if (status == USBD_STALLED) usbd_clear_endpoint_stall_async(sc->sc_rx_pipeh); goto skip; } usbd_get_xfer_status(xfer, NULL, NULL, &len, NULL); if (len < RAL_RX_DESC_SIZE + IEEE80211_MIN_LEN) { DPRINTF(("%s: xfer too short %d\n", device_get_nameunit(sc->sc_dev), len)); ifp->if_ierrors++; goto skip; } /* rx descriptor is located at the end */ desc = (struct ural_rx_desc *)(data->buf + len - RAL_RX_DESC_SIZE); if ((le32toh(desc->flags) & RAL_RX_PHY_ERROR) || (le32toh(desc->flags) & RAL_RX_CRC_ERROR)) { /* * This should not happen since we did not request to receive * those frames when we filled RAL_TXRX_CSR2. */ DPRINTFN(5, ("PHY or CRC error\n")); ifp->if_ierrors++; goto skip; } mnew = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (mnew == NULL) { ifp->if_ierrors++; goto skip; } m = data->m; data->m = mnew; data->buf = mtod(data->m, uint8_t *); /* finalize mbuf */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = (le32toh(desc->flags) >> 16) & 0xfff; if (bpf_peers_present(ifp->if_bpf)) { struct ural_rx_radiotap_header *tap = &sc->sc_rxtap; tap->wr_flags = IEEE80211_RADIOTAP_F_FCS; tap->wr_rate = ieee80211_plcp2rate(desc->rate, (desc->flags & htole32(RAL_RX_OFDM)) ? IEEE80211_T_OFDM : IEEE80211_T_CCK); tap->wr_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags); tap->wr_antenna = sc->rx_ant; tap->wr_antsignal = URAL_RSSI(desc->rssi); bpf_mtap2(ifp->if_bpf, tap, sc->sc_rxtap_len, m); } /* Strip trailing 802.11 MAC FCS. */ m_adj(m, -IEEE80211_CRC_LEN); rssi = URAL_RSSI(desc->rssi); ni = ieee80211_find_rxnode(ic, mtod(m, struct ieee80211_frame_min *)); if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, RAL_NOISE_FLOOR, 0); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, RAL_NOISE_FLOOR, 0); DPRINTFN(15, ("rx done\n")); skip: /* setup a new transfer */ usbd_setup_xfer(xfer, sc->sc_rx_pipeh, data, data->buf, MCLBYTES, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ural_rxeof); usbd_transfer(xfer); } static uint8_t ural_plcp_signal(int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return 0xb; case 18: return 0xf; case 24: return 0xa; case 36: return 0xe; case 48: return 0x9; case 72: return 0xd; case 96: return 0x8; case 108: return 0xc; /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return 0x0; case 4: return 0x1; case 11: return 0x2; case 22: return 0x3; } return 0xff; /* XXX unsupported/unknown rate */ } static void ural_setup_tx_desc(struct ural_softc *sc, struct ural_tx_desc *desc, uint32_t flags, int len, int rate) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint16_t plcp_length; int remainder; desc->flags = htole32(flags); desc->flags |= htole32(RAL_TX_NEWSEQ); desc->flags |= htole32(len << 16); desc->wme = htole16(RAL_AIFSN(2) | RAL_LOGCWMIN(3) | RAL_LOGCWMAX(5)); desc->wme |= htole16(RAL_IVOFFSET(sizeof (struct ieee80211_frame))); /* setup PLCP fields */ desc->plcp_signal = ural_plcp_signal(rate); desc->plcp_service = 4; len += IEEE80211_CRC_LEN; if (ieee80211_rate2phytype(sc->sc_rates, rate) == IEEE80211_T_OFDM) { desc->flags |= htole32(RAL_TX_OFDM); plcp_length = len & 0xfff; desc->plcp_length_hi = plcp_length >> 6; desc->plcp_length_lo = plcp_length & 0x3f; } else { plcp_length = (16 * len + rate - 1) / rate; if (rate == 22) { remainder = (16 * len) % 22; if (remainder != 0 && remainder < 7) desc->plcp_service |= RAL_PLCP_LENGEXT; } desc->plcp_length_hi = plcp_length >> 8; desc->plcp_length_lo = plcp_length & 0xff; if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) desc->plcp_signal |= 0x08; } desc->iv = 0; desc->eiv = 0; } #define RAL_TX_TIMEOUT 5000 static int ural_tx_bcn(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; const struct ieee80211_txparam *tp; struct ural_tx_desc *desc; usbd_xfer_handle xfer; uint8_t cmd; usbd_status error; uint8_t *buf; int xferlen; tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_bsschan)]; xfer = usbd_alloc_xfer(sc->sc_udev); if (xfer == NULL) return ENOMEM; /* xfer length needs to be a multiple of two! */ xferlen = (RAL_TX_DESC_SIZE + m0->m_pkthdr.len + 1) & ~1; buf = usbd_alloc_buffer(xfer, xferlen); if (buf == NULL) { usbd_free_xfer(xfer); return ENOMEM; } cmd = 0; usbd_setup_xfer(xfer, sc->sc_tx_pipeh, NULL, &cmd, sizeof cmd, USBD_FORCE_SHORT_XFER, RAL_TX_TIMEOUT, NULL); error = usbd_sync_transfer(xfer); if (error != 0) { usbd_free_xfer(xfer); return error; } desc = (struct ural_tx_desc *)buf; m_copydata(m0, 0, m0->m_pkthdr.len, buf + RAL_TX_DESC_SIZE); ural_setup_tx_desc(sc, desc, RAL_TX_IFS_NEWBACKOFF | RAL_TX_TIMESTAMP, m0->m_pkthdr.len, tp->mgmtrate); DPRINTFN(10, ("sending beacon frame len=%u rate=%u xfer len=%u\n", m0->m_pkthdr.len, tp->mgmtrate, xferlen)); usbd_setup_xfer(xfer, sc->sc_tx_pipeh, NULL, buf, xferlen, USBD_FORCE_SHORT_XFER | USBD_NO_COPY, RAL_TX_TIMEOUT, NULL); error = usbd_sync_transfer(xfer); usbd_free_xfer(xfer); return error; } static int ural_tx_mgt(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = sc->sc_ifp; const struct ieee80211_txparam *tp; struct ural_tx_desc *desc; struct ural_tx_data *data; struct ieee80211_frame *wh; struct ieee80211_key *k; uint32_t flags; uint16_t dur; usbd_status error; int xferlen; data = &sc->tx_data[sc->tx_cur]; desc = (struct ural_tx_desc *)data->buf; tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; wh = mtod(m0, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_WEP) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } wh = mtod(m0, struct ieee80211_frame *); } data->m = m0; data->ni = ni; flags = 0; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RAL_TX_ACK; dur = ieee80211_ack_duration(sc->sc_rates, tp->mgmtrate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); *(uint16_t *)wh->i_dur = htole16(dur); /* tell hardware to add timestamp for probe responses */ if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_MGT && (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_PROBE_RESP) flags |= RAL_TX_TIMESTAMP; } if (bpf_peers_present(ifp->if_bpf)) { struct ural_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = tp->mgmtrate; tap->wt_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wt_chan_flags = htole16(ic->ic_curchan->ic_flags); tap->wt_antenna = sc->tx_ant; bpf_mtap2(ifp->if_bpf, tap, sc->sc_txtap_len, m0); } m_copydata(m0, 0, m0->m_pkthdr.len, data->buf + RAL_TX_DESC_SIZE); ural_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, tp->mgmtrate); /* align end on a 2-bytes boundary */ xferlen = (RAL_TX_DESC_SIZE + m0->m_pkthdr.len + 1) & ~1; /* * No space left in the last URB to store the extra 2 bytes, force * sending of another URB. */ if ((xferlen % 64) == 0) xferlen += 2; DPRINTFN(10, ("sending mgt frame len=%u rate=%u xfer len=%u\n", m0->m_pkthdr.len, tp->mgmtrate, xferlen)); usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf, xferlen, USBD_FORCE_SHORT_XFER | USBD_NO_COPY, RAL_TX_TIMEOUT, ural_txeof); error = usbd_transfer(data->xfer); if (error != USBD_NORMAL_COMPLETION && error != USBD_IN_PROGRESS) { m_freem(m0); data->m = NULL; data->ni = NULL; return error; } sc->tx_queued++; sc->tx_cur = (sc->tx_cur + 1) % RAL_TX_LIST_COUNT; return 0; } static int ural_sendprot(struct ural_softc *sc, const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate) { struct ieee80211com *ic = ni->ni_ic; const struct ieee80211_frame *wh; struct ural_tx_desc *desc; struct ural_tx_data *data; struct mbuf *mprot; int protrate, ackrate, pktlen, flags, isshort; uint16_t dur; usbd_status error; KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY, ("protection %d", prot)); wh = mtod(m, const struct ieee80211_frame *); pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN; protrate = ieee80211_ctl_rate(sc->sc_rates, rate); ackrate = ieee80211_ack_rate(sc->sc_rates, rate); isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; dur = ieee80211_compute_duration(sc->sc_rates, pktlen, rate, isshort); + ieee80211_ack_duration(sc->sc_rates, rate, isshort); flags = RAL_TX_RETRY(7); if (prot == IEEE80211_PROT_RTSCTS) { /* NB: CTS is the same size as an ACK */ dur += ieee80211_ack_duration(sc->sc_rates, rate, isshort); flags |= RAL_TX_ACK; mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur); } else { mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur); } if (mprot == NULL) { /* XXX stat + msg */ return ENOBUFS; } data = &sc->tx_data[sc->tx_cur]; desc = (struct ural_tx_desc *)data->buf; data->m = mprot; data->ni = ieee80211_ref_node(ni); m_copydata(mprot, 0, mprot->m_pkthdr.len, data->buf + RAL_TX_DESC_SIZE); ural_setup_tx_desc(sc, desc, flags, mprot->m_pkthdr.len, protrate); usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf, /* NB: no roundup necessary */ RAL_TX_DESC_SIZE + mprot->m_pkthdr.len, USBD_FORCE_SHORT_XFER | USBD_NO_COPY, RAL_TX_TIMEOUT, ural_txeof); error = usbd_transfer(data->xfer); if (error != USBD_NORMAL_COMPLETION && error != USBD_IN_PROGRESS) { data->m = NULL; data->ni = NULL; return error; } sc->tx_queued++; sc->tx_cur = (sc->tx_cur + 1) % RAL_TX_LIST_COUNT; return 0; } static int ural_tx_raw(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ural_tx_desc *desc; struct ural_tx_data *data; uint32_t flags; usbd_status error; int xferlen, rate; KASSERT(params != NULL, ("no raw xmit params")); data = &sc->tx_data[sc->tx_cur]; desc = (struct ural_tx_desc *)data->buf; rate = params->ibp_rate0 & IEEE80211_RATE_VAL; /* XXX validate */ if (rate == 0) { m_freem(m0); return EINVAL; } flags = 0; if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) flags |= RAL_TX_ACK; if (params->ibp_flags & (IEEE80211_BPF_RTS|IEEE80211_BPF_CTS)) { error = ural_sendprot(sc, m0, ni, params->ibp_flags & IEEE80211_BPF_RTS ? IEEE80211_PROT_RTSCTS : IEEE80211_PROT_CTSONLY, rate); if (error) { m_freem(m0); return error; } flags |= RAL_TX_IFS_SIFS; } if (bpf_peers_present(ifp->if_bpf)) { struct ural_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wt_chan_flags = htole16(ic->ic_curchan->ic_flags); tap->wt_antenna = sc->tx_ant; bpf_mtap2(ifp->if_bpf, tap, sc->sc_txtap_len, m0); } data->m = m0; data->ni = ni; m_copydata(m0, 0, m0->m_pkthdr.len, data->buf + RAL_TX_DESC_SIZE); /* XXX need to setup descriptor ourself */ ural_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate); /* align end on a 2-bytes boundary */ xferlen = (RAL_TX_DESC_SIZE + m0->m_pkthdr.len + 1) & ~1; /* * No space left in the last URB to store the extra 2 bytes, force * sending of another URB. */ if ((xferlen % 64) == 0) xferlen += 2; DPRINTFN(10, ("sending raw frame len=%u rate=%u xfer len=%u\n", m0->m_pkthdr.len, rate, xferlen)); usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf, xferlen, USBD_FORCE_SHORT_XFER | USBD_NO_COPY, RAL_TX_TIMEOUT, ural_txeof); error = usbd_transfer(data->xfer); if (error != USBD_NORMAL_COMPLETION && error != USBD_IN_PROGRESS) { m_freem(m0); data->m = NULL; data->ni = NULL; return error; } sc->tx_queued++; sc->tx_cur = (sc->tx_cur + 1) % RAL_TX_LIST_COUNT; return 0; } static int ural_tx_data(struct ural_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = sc->sc_ifp; struct ural_tx_desc *desc; struct ural_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp; struct ieee80211_key *k; uint32_t flags = 0; uint16_t dur; usbd_status error; int xferlen, rate; wh = mtod(m0, struct ieee80211_frame *); tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; if (IEEE80211_IS_MULTICAST(wh->i_addr1)) rate = tp->mcastrate; else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = tp->ucastrate; else rate = ni->ni_txrate; if (wh->i_fc[1] & IEEE80211_FC1_WEP) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { int prot = IEEE80211_PROT_NONE; if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) prot = IEEE80211_PROT_RTSCTS; else if ((ic->ic_flags & IEEE80211_F_USEPROT) && ieee80211_rate2phytype(sc->sc_rates, rate) == IEEE80211_T_OFDM) prot = ic->ic_protmode; if (prot != IEEE80211_PROT_NONE) { error = ural_sendprot(sc, m0, ni, prot, rate); if (error) { m_freem(m0); return error; } flags |= RAL_TX_IFS_SIFS; } } data = &sc->tx_data[sc->tx_cur]; desc = (struct ural_tx_desc *)data->buf; data->m = m0; data->ni = ni; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RAL_TX_ACK; flags |= RAL_TX_RETRY(7); dur = ieee80211_ack_duration(sc->sc_rates, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); *(uint16_t *)wh->i_dur = htole16(dur); } if (bpf_peers_present(ifp->if_bpf)) { struct ural_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wt_chan_flags = htole16(ic->ic_curchan->ic_flags); tap->wt_antenna = sc->tx_ant; bpf_mtap2(ifp->if_bpf, tap, sc->sc_txtap_len, m0); } m_copydata(m0, 0, m0->m_pkthdr.len, data->buf + RAL_TX_DESC_SIZE); ural_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate); /* align end on a 2-bytes boundary */ xferlen = (RAL_TX_DESC_SIZE + m0->m_pkthdr.len + 1) & ~1; /* * No space left in the last URB to store the extra 2 bytes, force * sending of another URB. */ if ((xferlen % 64) == 0) xferlen += 2; DPRINTFN(10, ("sending data frame len=%u rate=%u xfer len=%u\n", m0->m_pkthdr.len, rate, xferlen)); usbd_setup_xfer(data->xfer, sc->sc_tx_pipeh, data, data->buf, xferlen, USBD_FORCE_SHORT_XFER | USBD_NO_COPY, RAL_TX_TIMEOUT, ural_txeof); error = usbd_transfer(data->xfer); if (error != USBD_NORMAL_COMPLETION && error != USBD_IN_PROGRESS) { m_freem(m0); data->m = NULL; data->ni = NULL; return error; } sc->tx_queued++; sc->tx_cur = (sc->tx_cur + 1) % RAL_TX_LIST_COUNT; return 0; } static void ural_start(struct ifnet *ifp) { struct ural_softc *sc = ifp->if_softc; struct ieee80211_node *ni; struct mbuf *m; for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; if (sc->tx_queued >= RAL_TX_LIST_COUNT-1) { IFQ_DRV_PREPEND(&ifp->if_snd, m); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; m = ieee80211_encap(ni, m); if (m == NULL) { ieee80211_free_node(ni); continue; } if (ural_tx_data(sc, m, ni) != 0) { ieee80211_free_node(ni); ifp->if_oerrors++; break; } sc->sc_tx_timer = 5; callout_reset(&sc->watchdog_ch, hz, ural_watchdog, sc); } } static void ural_watchdog(void *arg) { struct ural_softc *sc = (struct ural_softc *)arg; RAL_LOCK(sc); if (sc->sc_tx_timer > 0) { if (--sc->sc_tx_timer == 0) { device_printf(sc->sc_dev, "device timeout\n"); /*ural_init(sc); XXX needs a process context! */ sc->sc_ifp->if_oerrors++; RAL_UNLOCK(sc); return; } callout_reset(&sc->watchdog_ch, hz, ural_watchdog, sc); } RAL_UNLOCK(sc); } static int ural_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct ural_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; int error = 0, startall = 1; RAL_LOCK(sc); switch (cmd) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { ural_init_locked(sc); startall = 1; } else ural_update_promisc(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) ural_stop(sc); } break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; default: error = ether_ioctl(ifp, cmd, data); break; } RAL_UNLOCK(sc); if (startall) ieee80211_start_all(ic); return error; } static void ural_set_testmode(struct ural_softc *sc) { usb_device_request_t req; usbd_status error; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RAL_VENDOR_REQUEST; USETW(req.wValue, 4); USETW(req.wIndex, 1); USETW(req.wLength, 0); error = usbd_do_request(sc->sc_udev, &req, NULL); if (error != 0) { device_printf(sc->sc_dev, "could not set test mode: %s\n", usbd_errstr(error)); } } static void ural_eeprom_read(struct ural_softc *sc, uint16_t addr, void *buf, int len) { usb_device_request_t req; usbd_status error; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RAL_READ_EEPROM; USETW(req.wValue, 0); USETW(req.wIndex, addr); USETW(req.wLength, len); error = usbd_do_request(sc->sc_udev, &req, buf); if (error != 0) { device_printf(sc->sc_dev, "could not read EEPROM: %s\n", usbd_errstr(error)); } } static uint16_t ural_read(struct ural_softc *sc, uint16_t reg) { usb_device_request_t req; usbd_status error; uint16_t val; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RAL_READ_MAC; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, sizeof (uint16_t)); error = usbd_do_request(sc->sc_udev, &req, &val); if (error != 0) { device_printf(sc->sc_dev, "could not read MAC register: %s\n", usbd_errstr(error)); return 0; } return le16toh(val); } static void ural_read_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len) { usb_device_request_t req; usbd_status error; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RAL_READ_MULTI_MAC; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, len); error = usbd_do_request(sc->sc_udev, &req, buf); if (error != 0) { device_printf(sc->sc_dev, "could not read MAC register: %s\n", usbd_errstr(error)); } } static void ural_write(struct ural_softc *sc, uint16_t reg, uint16_t val) { usb_device_request_t req; usbd_status error; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RAL_WRITE_MAC; USETW(req.wValue, val); USETW(req.wIndex, reg); USETW(req.wLength, 0); error = usbd_do_request(sc->sc_udev, &req, NULL); if (error != 0) { device_printf(sc->sc_dev, "could not write MAC register: %s\n", usbd_errstr(error)); } } static void ural_write_multi(struct ural_softc *sc, uint16_t reg, void *buf, int len) { usb_device_request_t req; usbd_status error; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RAL_WRITE_MULTI_MAC; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, len); error = usbd_do_request(sc->sc_udev, &req, buf); if (error != 0) { device_printf(sc->sc_dev, "could not write MAC register: %s\n", usbd_errstr(error)); } } static void ural_bbp_write(struct ural_softc *sc, uint8_t reg, uint8_t val) { uint16_t tmp; int ntries; for (ntries = 0; ntries < 5; ntries++) { if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY)) break; } if (ntries == 5) { device_printf(sc->sc_dev, "could not write to BBP\n"); return; } tmp = reg << 8 | val; ural_write(sc, RAL_PHY_CSR7, tmp); } static uint8_t ural_bbp_read(struct ural_softc *sc, uint8_t reg) { uint16_t val; int ntries; val = RAL_BBP_WRITE | reg << 8; ural_write(sc, RAL_PHY_CSR7, val); for (ntries = 0; ntries < 5; ntries++) { if (!(ural_read(sc, RAL_PHY_CSR8) & RAL_BBP_BUSY)) break; } if (ntries == 5) { device_printf(sc->sc_dev, "could not read BBP\n"); return 0; } return ural_read(sc, RAL_PHY_CSR7) & 0xff; } static void ural_rf_write(struct ural_softc *sc, uint8_t reg, uint32_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 5; ntries++) { if (!(ural_read(sc, RAL_PHY_CSR10) & RAL_RF_LOBUSY)) break; } if (ntries == 5) { device_printf(sc->sc_dev, "could not write to RF\n"); return; } tmp = RAL_RF_BUSY | RAL_RF_20BIT | (val & 0xfffff) << 2 | (reg & 0x3); ural_write(sc, RAL_PHY_CSR9, tmp & 0xffff); ural_write(sc, RAL_PHY_CSR10, tmp >> 16); /* remember last written value in sc */ sc->rf_regs[reg] = val; DPRINTFN(15, ("RF R[%u] <- 0x%05x\n", reg & 0x3, val & 0xfffff)); } /* ARGUSED */ static struct ieee80211_node * -ural_node_alloc(struct ieee80211_node_table *nt __unused) +ural_node_alloc(struct ieee80211vap *vap __unused, + const uint8_t mac[IEEE80211_ADDR_LEN] __unused) { struct ural_node *un; un = malloc(sizeof(struct ural_node), M_80211_NODE, M_NOWAIT | M_ZERO); return un != NULL ? &un->ni : NULL; } static void ural_newassoc(struct ieee80211_node *ni, int isnew) { struct ieee80211vap *vap = ni->ni_vap; ieee80211_amrr_node_init(&URAL_VAP(vap)->amrr, &URAL_NODE(ni)->amn, ni); } static void ural_scan_start(struct ieee80211com *ic) { struct ural_softc *sc = ic->ic_ifp->if_softc; usb_rem_task(sc->sc_udev, &sc->sc_scantask); /* do it in a process context */ sc->sc_scan_action = URAL_SCAN_START; usb_add_task(sc->sc_udev, &sc->sc_scantask, USB_TASKQ_DRIVER); } static void ural_scan_end(struct ieee80211com *ic) { struct ural_softc *sc = ic->ic_ifp->if_softc; usb_rem_task(sc->sc_udev, &sc->sc_scantask); /* do it in a process context */ sc->sc_scan_action = URAL_SCAN_END; usb_add_task(sc->sc_udev, &sc->sc_scantask, USB_TASKQ_DRIVER); } static void ural_set_channel(struct ieee80211com *ic) { struct ural_softc *sc = ic->ic_ifp->if_softc; usb_rem_task(sc->sc_udev, &sc->sc_scantask); /* do it in a process context */ sc->sc_scan_action = URAL_SET_CHANNEL; usb_add_task(sc->sc_udev, &sc->sc_scantask, USB_TASKQ_DRIVER); sc->sc_rates = ieee80211_get_ratetable(ic->ic_curchan); } static void ural_set_chan(struct ural_softc *sc, struct ieee80211_channel *c) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint8_t power, tmp; u_int i, chan; chan = ieee80211_chan2ieee(ic, c); if (chan == 0 || chan == IEEE80211_CHAN_ANY) return; if (IEEE80211_IS_CHAN_2GHZ(c)) power = min(sc->txpow[chan - 1], 31); else power = 31; /* adjust txpower using ifconfig settings */ power -= (100 - ic->ic_txpowlimit) / 8; DPRINTFN(2, ("setting channel to %u, txpower to %u\n", chan, power)); switch (sc->rf_rev) { case RAL_RF_2522: ural_rf_write(sc, RAL_RF1, 0x00814); ural_rf_write(sc, RAL_RF2, ural_rf2522_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040); break; case RAL_RF_2523: ural_rf_write(sc, RAL_RF1, 0x08804); ural_rf_write(sc, RAL_RF2, ural_rf2523_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x38044); ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RAL_RF_2524: ural_rf_write(sc, RAL_RF1, 0x0c808); ural_rf_write(sc, RAL_RF2, ural_rf2524_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040); ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RAL_RF_2525: ural_rf_write(sc, RAL_RF1, 0x08808); ural_rf_write(sc, RAL_RF2, ural_rf2525_hi_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); ural_rf_write(sc, RAL_RF1, 0x08808); ural_rf_write(sc, RAL_RF2, ural_rf2525_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RAL_RF_2525E: ural_rf_write(sc, RAL_RF1, 0x08808); ural_rf_write(sc, RAL_RF2, ural_rf2525e_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); ural_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00286 : 0x00282); break; case RAL_RF_2526: ural_rf_write(sc, RAL_RF2, ural_rf2526_hi_r2[chan - 1]); ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); ural_rf_write(sc, RAL_RF1, 0x08804); ural_rf_write(sc, RAL_RF2, ural_rf2526_r2[chan - 1]); ural_rf_write(sc, RAL_RF3, power << 7 | 0x18044); ural_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); break; /* dual-band RF */ case RAL_RF_5222: for (i = 0; ural_rf5222[i].chan != chan; i++); ural_rf_write(sc, RAL_RF1, ural_rf5222[i].r1); ural_rf_write(sc, RAL_RF2, ural_rf5222[i].r2); ural_rf_write(sc, RAL_RF3, power << 7 | 0x00040); ural_rf_write(sc, RAL_RF4, ural_rf5222[i].r4); break; } if (ic->ic_opmode != IEEE80211_M_MONITOR && (ic->ic_flags & IEEE80211_F_SCAN) == 0) { /* set Japan filter bit for channel 14 */ tmp = ural_bbp_read(sc, 70); tmp &= ~RAL_JAPAN_FILTER; if (chan == 14) tmp |= RAL_JAPAN_FILTER; ural_bbp_write(sc, 70, tmp); /* clear CRC errors */ ural_read(sc, RAL_STA_CSR0); DELAY(10000); ural_disable_rf_tune(sc); } /* XXX doesn't belong here */ /* update basic rate set */ ural_set_basicrates(sc, c); } /* * Disable RF auto-tuning. */ static void ural_disable_rf_tune(struct ural_softc *sc) { uint32_t tmp; if (sc->rf_rev != RAL_RF_2523) { tmp = sc->rf_regs[RAL_RF1] & ~RAL_RF1_AUTOTUNE; ural_rf_write(sc, RAL_RF1, tmp); } tmp = sc->rf_regs[RAL_RF3] & ~RAL_RF3_AUTOTUNE; ural_rf_write(sc, RAL_RF3, tmp); DPRINTFN(2, ("disabling RF autotune\n")); } /* * Refer to IEEE Std 802.11-1999 pp. 123 for more information on TSF * synchronization. */ static void ural_enable_tsf_sync(struct ural_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint16_t logcwmin, preload, tmp; /* first, disable TSF synchronization */ ural_write(sc, RAL_TXRX_CSR19, 0); tmp = (16 * vap->iv_bss->ni_intval) << 4; ural_write(sc, RAL_TXRX_CSR18, tmp); logcwmin = (ic->ic_opmode == IEEE80211_M_IBSS) ? 2 : 0; preload = (ic->ic_opmode == IEEE80211_M_IBSS) ? 320 : 6; tmp = logcwmin << 12 | preload; ural_write(sc, RAL_TXRX_CSR20, tmp); /* finally, enable TSF synchronization */ tmp = RAL_ENABLE_TSF | RAL_ENABLE_TBCN; if (ic->ic_opmode == IEEE80211_M_STA) tmp |= RAL_ENABLE_TSF_SYNC(1); else tmp |= RAL_ENABLE_TSF_SYNC(2) | RAL_ENABLE_BEACON_GENERATOR; ural_write(sc, RAL_TXRX_CSR19, tmp); DPRINTF(("enabling TSF synchronization\n")); } static void ural_update_slot(struct ifnet *ifp) { struct ural_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; uint16_t slottime, sifs, eifs; slottime = (ic->ic_flags & IEEE80211_F_SHSLOT) ? 9 : 20; /* * These settings may sound a bit inconsistent but this is what the * reference driver does. */ if (ic->ic_curmode == IEEE80211_MODE_11B) { sifs = 16 - RAL_RXTX_TURNAROUND; eifs = 364; } else { sifs = 10 - RAL_RXTX_TURNAROUND; eifs = 64; } ural_write(sc, RAL_MAC_CSR10, slottime); ural_write(sc, RAL_MAC_CSR11, sifs); ural_write(sc, RAL_MAC_CSR12, eifs); } static void ural_set_txpreamble(struct ural_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint16_t tmp; tmp = ural_read(sc, RAL_TXRX_CSR10); tmp &= ~RAL_SHORT_PREAMBLE; if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) tmp |= RAL_SHORT_PREAMBLE; ural_write(sc, RAL_TXRX_CSR10, tmp); } static void ural_set_basicrates(struct ural_softc *sc, const struct ieee80211_channel *c) { /* XXX wrong, take from rate set */ /* update basic rate set */ if (IEEE80211_IS_CHAN_5GHZ(c)) { /* 11a basic rates: 6, 12, 24Mbps */ ural_write(sc, RAL_TXRX_CSR11, 0x150); } else if (IEEE80211_IS_CHAN_ANYG(c)) { /* 11g basic rates: 1, 2, 5.5, 11, 6, 12, 24Mbps */ ural_write(sc, RAL_TXRX_CSR11, 0x15f); } else { /* 11b basic rates: 1, 2Mbps */ ural_write(sc, RAL_TXRX_CSR11, 0x3); } } static void ural_set_bssid(struct ural_softc *sc, const uint8_t *bssid) { uint16_t tmp; tmp = bssid[0] | bssid[1] << 8; ural_write(sc, RAL_MAC_CSR5, tmp); tmp = bssid[2] | bssid[3] << 8; ural_write(sc, RAL_MAC_CSR6, tmp); tmp = bssid[4] | bssid[5] << 8; ural_write(sc, RAL_MAC_CSR7, tmp); DPRINTF(("setting BSSID to %6D\n", bssid, ":")); } static void ural_set_macaddr(struct ural_softc *sc, uint8_t *addr) { uint16_t tmp; tmp = addr[0] | addr[1] << 8; ural_write(sc, RAL_MAC_CSR2, tmp); tmp = addr[2] | addr[3] << 8; ural_write(sc, RAL_MAC_CSR3, tmp); tmp = addr[4] | addr[5] << 8; ural_write(sc, RAL_MAC_CSR4, tmp); DPRINTF(("setting MAC address to %6D\n", addr, ":")); } static void ural_update_promisc(struct ural_softc *sc) { struct ifnet *ifp = sc->sc_ifp; uint32_t tmp; tmp = ural_read(sc, RAL_TXRX_CSR2); tmp &= ~RAL_DROP_NOT_TO_ME; if (!(ifp->if_flags & IFF_PROMISC)) tmp |= RAL_DROP_NOT_TO_ME; ural_write(sc, RAL_TXRX_CSR2, tmp); DPRINTF(("%s promiscuous mode\n", (ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving")); } static const char * ural_get_rf(int rev) { switch (rev) { case RAL_RF_2522: return "RT2522"; case RAL_RF_2523: return "RT2523"; case RAL_RF_2524: return "RT2524"; case RAL_RF_2525: return "RT2525"; case RAL_RF_2525E: return "RT2525e"; case RAL_RF_2526: return "RT2526"; case RAL_RF_5222: return "RT5222"; default: return "unknown"; } } static void ural_read_eeprom(struct ural_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint16_t val; ural_eeprom_read(sc, RAL_EEPROM_CONFIG0, &val, 2); val = le16toh(val); sc->rf_rev = (val >> 11) & 0x7; sc->hw_radio = (val >> 10) & 0x1; sc->led_mode = (val >> 6) & 0x7; sc->rx_ant = (val >> 4) & 0x3; sc->tx_ant = (val >> 2) & 0x3; sc->nb_ant = val & 0x3; /* read MAC address */ ural_eeprom_read(sc, RAL_EEPROM_ADDRESS, ic->ic_myaddr, 6); /* read default values for BBP registers */ ural_eeprom_read(sc, RAL_EEPROM_BBP_BASE, sc->bbp_prom, 2 * 16); /* read Tx power for all b/g channels */ ural_eeprom_read(sc, RAL_EEPROM_TXPOWER, sc->txpow, 14); } static int ural_bbp_init(struct ural_softc *sc) { #define N(a) (sizeof (a) / sizeof ((a)[0])) int i, ntries; /* wait for BBP to be ready */ for (ntries = 0; ntries < 100; ntries++) { if (ural_bbp_read(sc, RAL_BBP_VERSION) != 0) break; DELAY(1000); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP\n"); return EIO; } /* initialize BBP registers to default values */ for (i = 0; i < N(ural_def_bbp); i++) ural_bbp_write(sc, ural_def_bbp[i].reg, ural_def_bbp[i].val); #if 0 /* initialize BBP registers to values stored in EEPROM */ for (i = 0; i < 16; i++) { if (sc->bbp_prom[i].reg == 0xff) continue; ural_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } #endif return 0; #undef N } static void ural_set_txantenna(struct ural_softc *sc, int antenna) { uint16_t tmp; uint8_t tx; tx = ural_bbp_read(sc, RAL_BBP_TX) & ~RAL_BBP_ANTMASK; if (antenna == 1) tx |= RAL_BBP_ANTA; else if (antenna == 2) tx |= RAL_BBP_ANTB; else tx |= RAL_BBP_DIVERSITY; /* need to force I/Q flip for RF 2525e, 2526 and 5222 */ if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526 || sc->rf_rev == RAL_RF_5222) tx |= RAL_BBP_FLIPIQ; ural_bbp_write(sc, RAL_BBP_TX, tx); /* update values in PHY_CSR5 and PHY_CSR6 */ tmp = ural_read(sc, RAL_PHY_CSR5) & ~0x7; ural_write(sc, RAL_PHY_CSR5, tmp | (tx & 0x7)); tmp = ural_read(sc, RAL_PHY_CSR6) & ~0x7; ural_write(sc, RAL_PHY_CSR6, tmp | (tx & 0x7)); } static void ural_set_rxantenna(struct ural_softc *sc, int antenna) { uint8_t rx; rx = ural_bbp_read(sc, RAL_BBP_RX) & ~RAL_BBP_ANTMASK; if (antenna == 1) rx |= RAL_BBP_ANTA; else if (antenna == 2) rx |= RAL_BBP_ANTB; else rx |= RAL_BBP_DIVERSITY; /* need to force no I/Q flip for RF 2525e and 2526 */ if (sc->rf_rev == RAL_RF_2525E || sc->rf_rev == RAL_RF_2526) rx &= ~RAL_BBP_FLIPIQ; ural_bbp_write(sc, RAL_BBP_RX, rx); } static void ural_init_locked(struct ural_softc *sc) { #define N(a) (sizeof (a) / sizeof ((a)[0])) struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ural_rx_data *data; uint16_t tmp; usbd_status error; int i, ntries; ural_set_testmode(sc); ural_write(sc, 0x308, 0x00f0); /* XXX magic */ ural_stop(sc); /* initialize MAC registers to default values */ for (i = 0; i < N(ural_def_mac); i++) ural_write(sc, ural_def_mac[i].reg, ural_def_mac[i].val); /* wait for BBP and RF to wake up (this can take a long time!) */ for (ntries = 0; ntries < 100; ntries++) { tmp = ural_read(sc, RAL_MAC_CSR17); if ((tmp & (RAL_BBP_AWAKE | RAL_RF_AWAKE)) == (RAL_BBP_AWAKE | RAL_RF_AWAKE)) break; DELAY(1000); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP/RF to wakeup\n"); goto fail; } /* we're ready! */ ural_write(sc, RAL_MAC_CSR1, RAL_HOST_READY); /* set basic rate set (will be updated later) */ ural_write(sc, RAL_TXRX_CSR11, 0x15f); if (ural_bbp_init(sc) != 0) goto fail; ural_set_chan(sc, ic->ic_curchan); /* clear statistic registers (STA_CSR0 to STA_CSR10) */ ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta); ural_set_txantenna(sc, sc->tx_ant); ural_set_rxantenna(sc, sc->rx_ant); IEEE80211_ADDR_COPY(ic->ic_myaddr, IF_LLADDR(ifp)); ural_set_macaddr(sc, ic->ic_myaddr); /* * Allocate xfer for AMRR statistics requests. */ sc->amrr_xfer = usbd_alloc_xfer(sc->sc_udev); if (sc->amrr_xfer == NULL) { device_printf(sc->sc_dev, "could not allocate AMRR xfer\n"); goto fail; } /* * Open Tx and Rx USB bulk pipes. */ error = usbd_open_pipe(sc->sc_iface, sc->sc_tx_no, USBD_EXCLUSIVE_USE, &sc->sc_tx_pipeh); if (error != 0) { device_printf(sc->sc_dev, "could not open Tx pipe: %s\n", usbd_errstr(error)); goto fail; } error = usbd_open_pipe(sc->sc_iface, sc->sc_rx_no, USBD_EXCLUSIVE_USE, &sc->sc_rx_pipeh); if (error != 0) { device_printf(sc->sc_dev, "could not open Rx pipe: %s\n", usbd_errstr(error)); goto fail; } /* * Allocate Tx and Rx xfer queues. */ error = ural_alloc_tx_list(sc); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Tx list\n"); goto fail; } error = ural_alloc_rx_list(sc); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Rx list\n"); goto fail; } /* * Start up the receive pipe. */ for (i = 0; i < RAL_RX_LIST_COUNT; i++) { data = &sc->rx_data[i]; usbd_setup_xfer(data->xfer, sc->sc_rx_pipeh, data, data->buf, MCLBYTES, USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, ural_rxeof); usbd_transfer(data->xfer); } /* kick Rx */ tmp = RAL_DROP_PHY | RAL_DROP_CRC; if (ic->ic_opmode != IEEE80211_M_MONITOR) { tmp |= RAL_DROP_CTL | RAL_DROP_BAD_VERSION; if (ic->ic_opmode != IEEE80211_M_HOSTAP) tmp |= RAL_DROP_TODS; if (!(ifp->if_flags & IFF_PROMISC)) tmp |= RAL_DROP_NOT_TO_ME; } ural_write(sc, RAL_TXRX_CSR2, tmp); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; return; fail: ural_stop(sc); #undef N } static void ural_init(void *priv) { struct ural_softc *sc = priv; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; RAL_LOCK(sc); ural_init_locked(sc); RAL_UNLOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ieee80211_start_all(ic); /* start all vap's */ } static void ural_stop(void *priv) { struct ural_softc *sc = priv; struct ifnet *ifp = sc->sc_ifp; sc->sc_tx_timer = 0; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); /* disable Rx */ ural_write(sc, RAL_TXRX_CSR2, RAL_DISABLE_RX); /* reset ASIC and BBP (but won't reset MAC registers!) */ ural_write(sc, RAL_MAC_CSR1, RAL_RESET_ASIC | RAL_RESET_BBP); ural_write(sc, RAL_MAC_CSR1, 0); if (sc->amrr_xfer != NULL) { usbd_free_xfer(sc->amrr_xfer); sc->amrr_xfer = NULL; } if (sc->sc_rx_pipeh != NULL) { usbd_abort_pipe(sc->sc_rx_pipeh); usbd_close_pipe(sc->sc_rx_pipeh); sc->sc_rx_pipeh = NULL; } if (sc->sc_tx_pipeh != NULL) { usbd_abort_pipe(sc->sc_tx_pipeh); usbd_close_pipe(sc->sc_tx_pipeh); sc->sc_tx_pipeh = NULL; } ural_free_rx_list(sc); ural_free_tx_list(sc); } static int ural_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct ural_softc *sc = ifp->if_softc; /* prevent management frames from being sent if we're not ready */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { m_freem(m); ieee80211_free_node(ni); return ENETDOWN; } if (sc->tx_queued >= RAL_TX_LIST_COUNT) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; m_freem(m); ieee80211_free_node(ni); return EIO; } ifp->if_opackets++; if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ if (ural_tx_mgt(sc, m, ni) != 0) goto bad; } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ if (ural_tx_raw(sc, m, ni, params) != 0) goto bad; } sc->sc_tx_timer = 5; callout_reset(&sc->watchdog_ch, hz, ural_watchdog, sc); return 0; bad: ifp->if_oerrors++; ieee80211_free_node(ni); return EIO; /* XXX */ } static void ural_amrr_start(struct ural_softc *sc, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ural_vap *uvp = URAL_VAP(vap); /* clear statistic registers (STA_CSR0 to STA_CSR10) */ ural_read_multi(sc, RAL_STA_CSR0, sc->sta, sizeof sc->sta); ieee80211_amrr_node_init(&uvp->amrr, &URAL_NODE(ni)->amn, ni); callout_reset(&uvp->amrr_ch, hz, ural_amrr_timeout, vap); } static void ural_amrr_timeout(void *arg) { struct ieee80211vap *vap = arg; struct ural_softc *sc = vap->iv_ic->ic_ifp->if_softc; usb_device_request_t req; /* * Asynchronously read statistic registers (cleared by read). */ req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RAL_READ_MULTI_MAC; USETW(req.wValue, 0); USETW(req.wIndex, RAL_STA_CSR0); USETW(req.wLength, sizeof sc->sta); usbd_setup_default_xfer(sc->amrr_xfer, sc->sc_udev, vap, USBD_DEFAULT_TIMEOUT, &req, sc->sta, sizeof sc->sta, 0, ural_amrr_update); (void)usbd_transfer(sc->amrr_xfer); } static void ural_amrr_update(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct ieee80211vap *vap = priv; struct ural_vap *uvp = URAL_VAP(vap); struct ifnet *ifp = vap->iv_ic->ic_ifp; struct ural_softc *sc = ifp->if_softc; struct ieee80211_node *ni = vap->iv_bss; int ok, fail; if (status != USBD_NORMAL_COMPLETION) { device_printf(sc->sc_dev, "could not retrieve Tx statistics - " "cancelling automatic rate control\n"); return; } ok = sc->sta[7] + /* TX ok w/o retry */ sc->sta[8]; /* TX ok w/ retry */ fail = sc->sta[9]; /* TX retry-fail count */ ieee80211_amrr_tx_update(&URAL_NODE(ni)->amn, ok+fail, ok, sc->sta[8] + fail); (void) ieee80211_amrr_choose(ni, &URAL_NODE(ni)->amn); ifp->if_oerrors += fail; /* count TX retry-fail as Tx errors */ callout_reset(&uvp->amrr_ch, hz, ural_amrr_timeout, vap); } diff --git a/sys/dev/usb/if_zyd.c b/sys/dev/usb/if_zyd.c index 70629697e342..ecd7df766d15 100644 --- a/sys/dev/usb/if_zyd.c +++ b/sys/dev/usb/if_zyd.c @@ -1,2791 +1,2793 @@ /* $OpenBSD: if_zyd.c,v 1.52 2007/02/11 00:08:04 jsg Exp $ */ /* $NetBSD: if_zyd.c,v 1.7 2007/06/21 04:04:29 kiyohara Exp $ */ /* $FreeBSD$ */ /*- * Copyright (c) 2006 by Damien Bergamini * Copyright (c) 2006 by Florian Stoehr * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* * ZyDAS ZD1211/ZD1211B USB WLAN driver. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "usbdevs.h" #include #include #include #include #include #define ZYD_DEBUG #ifdef ZYD_DEBUG #define DPRINTF(x) do { if (zyddebug > 0) printf x; } while (0) #define DPRINTFN(n, x) do { if (zyddebug > (n)) printf x; } while (0) int zyddebug = 0; #else #define DPRINTF(x) #define DPRINTFN(n, x) #endif static const struct zyd_phy_pair zyd_def_phy[] = ZYD_DEF_PHY; static const struct zyd_phy_pair zyd_def_phyB[] = ZYD_DEF_PHYB; /* various supported device vendors/products */ #define ZYD_ZD1211_DEV(v, p) \ { { USB_VENDOR_##v, USB_PRODUCT_##v##_##p }, ZYD_ZD1211 } #define ZYD_ZD1211B_DEV(v, p) \ { { USB_VENDOR_##v, USB_PRODUCT_##v##_##p }, ZYD_ZD1211B } static const struct zyd_type { struct usb_devno dev; uint8_t rev; #define ZYD_ZD1211 0 #define ZYD_ZD1211B 1 } zyd_devs[] = { ZYD_ZD1211_DEV(3COM2, 3CRUSB10075), ZYD_ZD1211_DEV(ABOCOM, WL54), ZYD_ZD1211_DEV(ASUS, WL159G), ZYD_ZD1211_DEV(CYBERTAN, TG54USB), ZYD_ZD1211_DEV(DRAYTEK, VIGOR550), ZYD_ZD1211_DEV(PLANEX2, GWUS54GD), ZYD_ZD1211_DEV(PLANEX2, GWUS54GZL), ZYD_ZD1211_DEV(PLANEX3, GWUS54GZ), ZYD_ZD1211_DEV(PLANEX3, GWUS54MINI), ZYD_ZD1211_DEV(SAGEM, XG760A), ZYD_ZD1211_DEV(SENAO, NUB8301), ZYD_ZD1211_DEV(SITECOMEU, WL113), ZYD_ZD1211_DEV(SWEEX, ZD1211), ZYD_ZD1211_DEV(TEKRAM, QUICKWLAN), ZYD_ZD1211_DEV(TEKRAM, ZD1211_1), ZYD_ZD1211_DEV(TEKRAM, ZD1211_2), ZYD_ZD1211_DEV(TWINMOS, G240), ZYD_ZD1211_DEV(UMEDIA, ALL0298V2), ZYD_ZD1211_DEV(UMEDIA, TEW429UB_A), ZYD_ZD1211_DEV(UMEDIA, TEW429UB), ZYD_ZD1211_DEV(WISTRONNEWEB, UR055G), ZYD_ZD1211_DEV(ZCOM, ZD1211), ZYD_ZD1211_DEV(ZYDAS, ZD1211), ZYD_ZD1211_DEV(ZYXEL, AG225H), ZYD_ZD1211_DEV(ZYXEL, ZYAIRG220), ZYD_ZD1211_DEV(ZYXEL, G200V2), ZYD_ZD1211B_DEV(ACCTON, SMCWUSBG), ZYD_ZD1211B_DEV(ACCTON, ZD1211B), ZYD_ZD1211B_DEV(ASUS, A9T_WIFI), ZYD_ZD1211B_DEV(BELKIN, F5D7050_V4000), ZYD_ZD1211B_DEV(BELKIN, ZD1211B), ZYD_ZD1211B_DEV(CISCOLINKSYS, WUSBF54G), ZYD_ZD1211B_DEV(FIBERLINE, WL430U), ZYD_ZD1211B_DEV(MELCO, KG54L), ZYD_ZD1211B_DEV(PHILIPS, SNU5600), ZYD_ZD1211B_DEV(PLANEX2, GW_US54GXS), ZYD_ZD1211B_DEV(SAGEM, XG76NA), ZYD_ZD1211B_DEV(SITECOMEU, ZD1211B), ZYD_ZD1211B_DEV(UMEDIA, TEW429UBC1), #if 0 /* Shall we needs? */ ZYD_ZD1211B_DEV(UNKNOWN1, ZD1211B_1), ZYD_ZD1211B_DEV(UNKNOWN1, ZD1211B_2), ZYD_ZD1211B_DEV(UNKNOWN2, ZD1211B), ZYD_ZD1211B_DEV(UNKNOWN3, ZD1211B), #endif ZYD_ZD1211B_DEV(USR, USR5423), ZYD_ZD1211B_DEV(VTECH, ZD1211B), ZYD_ZD1211B_DEV(ZCOM, ZD1211B), ZYD_ZD1211B_DEV(ZYDAS, ZD1211B), ZYD_ZD1211B_DEV(ZYXEL, M202), ZYD_ZD1211B_DEV(ZYXEL, G220V2), }; #define zyd_lookup(v, p) \ ((const struct zyd_type *)usb_lookup(zyd_devs, v, p)) static device_probe_t zyd_match; static device_attach_t zyd_attach; static device_detach_t zyd_detach; static struct ieee80211vap *zyd_vap_create(struct ieee80211com *, const char name[IFNAMSIZ], int unit, int opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]); static void zyd_vap_delete(struct ieee80211vap *); static int zyd_attachhook(struct zyd_softc *); static int zyd_complete_attach(struct zyd_softc *); static int zyd_open_pipes(struct zyd_softc *); static void zyd_close_pipes(struct zyd_softc *); static int zyd_alloc_tx_list(struct zyd_softc *); static void zyd_free_tx_list(struct zyd_softc *); static int zyd_alloc_rx_list(struct zyd_softc *); static void zyd_free_rx_list(struct zyd_softc *); -static struct ieee80211_node *zyd_node_alloc(struct ieee80211_node_table *); +static struct ieee80211_node *zyd_node_alloc(struct ieee80211vap *, + const uint8_t mac[IEEE80211_ADDR_LEN]); static void zyd_task(void *); static int zyd_newstate(struct ieee80211vap *, enum ieee80211_state, int); static int zyd_cmd(struct zyd_softc *, uint16_t, const void *, int, void *, int, u_int); static int zyd_read16(struct zyd_softc *, uint16_t, uint16_t *); static int zyd_read32(struct zyd_softc *, uint16_t, uint32_t *); static int zyd_write16(struct zyd_softc *, uint16_t, uint16_t); static int zyd_write32(struct zyd_softc *, uint16_t, uint32_t); static int zyd_rfwrite(struct zyd_softc *, uint32_t); static void zyd_lock_phy(struct zyd_softc *); static void zyd_unlock_phy(struct zyd_softc *); static int zyd_rfmd_init(struct zyd_rf *); static int zyd_rfmd_switch_radio(struct zyd_rf *, int); static int zyd_rfmd_set_channel(struct zyd_rf *, uint8_t); static int zyd_al2230_init(struct zyd_rf *); static int zyd_al2230_switch_radio(struct zyd_rf *, int); static int zyd_al2230_set_channel(struct zyd_rf *, uint8_t); static int zyd_al2230_init_b(struct zyd_rf *); static int zyd_al7230B_init(struct zyd_rf *); static int zyd_al7230B_switch_radio(struct zyd_rf *, int); static int zyd_al7230B_set_channel(struct zyd_rf *, uint8_t); static int zyd_al2210_init(struct zyd_rf *); static int zyd_al2210_switch_radio(struct zyd_rf *, int); static int zyd_al2210_set_channel(struct zyd_rf *, uint8_t); static int zyd_gct_init(struct zyd_rf *); static int zyd_gct_switch_radio(struct zyd_rf *, int); static int zyd_gct_set_channel(struct zyd_rf *, uint8_t); static int zyd_maxim_init(struct zyd_rf *); static int zyd_maxim_switch_radio(struct zyd_rf *, int); static int zyd_maxim_set_channel(struct zyd_rf *, uint8_t); static int zyd_maxim2_init(struct zyd_rf *); static int zyd_maxim2_switch_radio(struct zyd_rf *, int); static int zyd_maxim2_set_channel(struct zyd_rf *, uint8_t); static int zyd_rf_attach(struct zyd_softc *, uint8_t); static const char *zyd_rf_name(uint8_t); static int zyd_hw_init(struct zyd_softc *); static int zyd_read_eeprom(struct zyd_softc *); static int zyd_set_macaddr(struct zyd_softc *, const uint8_t *); static int zyd_set_bssid(struct zyd_softc *, const uint8_t *); static int zyd_switch_radio(struct zyd_softc *, int); static void zyd_set_led(struct zyd_softc *, int, int); static void zyd_set_multi(void *); static void zyd_update_mcast(struct ifnet *); static int zyd_set_rxfilter(struct zyd_softc *); static void zyd_set_chan(struct zyd_softc *, struct ieee80211_channel *); static int zyd_set_beacon_interval(struct zyd_softc *, int); static void zyd_intr(usbd_xfer_handle, usbd_private_handle, usbd_status); static void zyd_rx_data(struct zyd_softc *, const uint8_t *, uint16_t); static void zyd_rxeof(usbd_xfer_handle, usbd_private_handle, usbd_status); static void zyd_txeof(usbd_xfer_handle, usbd_private_handle, usbd_status); static int zyd_tx_mgt(struct zyd_softc *, struct mbuf *, struct ieee80211_node *); static int zyd_tx_data(struct zyd_softc *, struct mbuf *, struct ieee80211_node *); static void zyd_start(struct ifnet *); static int zyd_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void zyd_watchdog(void *); static int zyd_ioctl(struct ifnet *, u_long, caddr_t); static void zyd_init_locked(struct zyd_softc *); static void zyd_init(void *); static void zyd_stop(struct zyd_softc *, int); static int zyd_loadfirmware(struct zyd_softc *, u_char *, size_t); static void zyd_newassoc(struct ieee80211_node *, int); static void zyd_scantask(void *); static void zyd_scan_start(struct ieee80211com *); static void zyd_scan_end(struct ieee80211com *); static void zyd_set_channel(struct ieee80211com *); static int zyd_match(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); if (!uaa->iface) return UMATCH_NONE; return (zyd_lookup(uaa->vendor, uaa->product) != NULL) ? UMATCH_VENDOR_PRODUCT : UMATCH_NONE; } static int zyd_attachhook(struct zyd_softc *sc) { u_char *firmware; int len, error; if (sc->mac_rev == ZYD_ZD1211) { firmware = (u_char *)zd1211_firmware; len = sizeof(zd1211_firmware); } else { firmware = (u_char *)zd1211b_firmware; len = sizeof(zd1211b_firmware); } error = zyd_loadfirmware(sc, firmware, len); if (error != 0) { device_printf(sc->sc_dev, "could not load firmware (error=%d)\n", error); return error; } sc->sc_flags |= ZD1211_FWLOADED; /* complete the attach process */ return zyd_complete_attach(sc); } static int zyd_attach(device_t dev) { int error = ENXIO; struct zyd_softc *sc = device_get_softc(dev); struct usb_attach_arg *uaa = device_get_ivars(dev); usb_device_descriptor_t* ddesc; struct ifnet *ifp; sc->sc_dev = dev; ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); return ENXIO; } sc->sc_udev = uaa->device; sc->sc_flags = 0; sc->mac_rev = zyd_lookup(uaa->vendor, uaa->product)->rev; ddesc = usbd_get_device_descriptor(sc->sc_udev); if (UGETW(ddesc->bcdDevice) < 0x4330) { device_printf(dev, "device version mismatch: 0x%x " "(only >= 43.30 supported)\n", UGETW(ddesc->bcdDevice)); goto bad; } ifp->if_softc = sc; if_initname(ifp, "zyd", device_get_unit(sc->sc_dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST | IFF_NEEDSGIANT; /* USB stack is still under Giant lock */ ifp->if_init = zyd_init; ifp->if_ioctl = zyd_ioctl; ifp->if_start = zyd_start; IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); IFQ_SET_READY(&ifp->if_snd); STAILQ_INIT(&sc->sc_rqh); error = zyd_attachhook(sc); if (error != 0) { bad: if_free(ifp); return error; } return 0; } static int zyd_complete_attach(struct zyd_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; usbd_status error; uint8_t bands; mtx_init(&sc->sc_mtx, device_get_nameunit(sc->sc_dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); usb_init_task(&sc->sc_scantask, zyd_scantask, sc); usb_init_task(&sc->sc_task, zyd_task, sc); usb_init_task(&sc->sc_mcasttask, zyd_set_multi, sc); callout_init(&sc->sc_watchdog_ch, 0); error = usbd_set_config_no(sc->sc_udev, ZYD_CONFIG_NO, 1); if (error != 0) { device_printf(sc->sc_dev, "setting config no failed\n"); error = ENXIO; goto fail; } error = usbd_device2interface_handle(sc->sc_udev, ZYD_IFACE_INDEX, &sc->sc_iface); if (error != 0) { device_printf(sc->sc_dev, "getting interface handle failed\n"); error = ENXIO; goto fail; } if ((error = zyd_open_pipes(sc)) != 0) { device_printf(sc->sc_dev, "could not open pipes\n"); goto fail; } if ((error = zyd_read_eeprom(sc)) != 0) { device_printf(sc->sc_dev, "could not read EEPROM\n"); goto fail; } if ((error = zyd_rf_attach(sc, sc->rf_rev)) != 0) { device_printf(sc->sc_dev, "could not attach RF, rev 0x%x\n", sc->rf_rev); goto fail; } if ((error = zyd_hw_init(sc)) != 0) { device_printf(sc->sc_dev, "hardware initialization failed\n"); goto fail; } device_printf(sc->sc_dev, "HMAC ZD1211%s, FW %02x.%02x, RF %s, PA %x, address %s\n", (sc->mac_rev == ZYD_ZD1211) ? "": "B", sc->fw_rev >> 8, sc->fw_rev & 0xff, zyd_rf_name(sc->rf_rev), sc->pa_rev, ether_sprintf(ic->ic_myaddr)); IEEE80211_ADDR_COPY(sc->sc_bssid, ic->ic_myaddr); ic->ic_ifp = ifp; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ ic->ic_opmode = IEEE80211_M_STA; /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ | IEEE80211_C_WPA /* 802.11i */ ; bands = 0; setbit(&bands, IEEE80211_MODE_11B); setbit(&bands, IEEE80211_MODE_11G); ieee80211_init_channels(ic, NULL, &bands); ieee80211_ifattach(ic); ic->ic_newassoc = zyd_newassoc; ic->ic_raw_xmit = zyd_raw_xmit; ic->ic_node_alloc = zyd_node_alloc; ic->ic_scan_start = zyd_scan_start; ic->ic_scan_end = zyd_scan_end; ic->ic_set_channel = zyd_set_channel; ic->ic_vap_create = zyd_vap_create; ic->ic_vap_delete = zyd_vap_delete; ic->ic_update_mcast = zyd_update_mcast; bpfattach(ifp, DLT_IEEE802_11_RADIO, sizeof(struct ieee80211_frame) + sizeof(sc->sc_txtap)); sc->sc_rxtap_len = sizeof(sc->sc_rxtap); sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); sc->sc_rxtap.wr_ihdr.it_present = htole32(ZYD_RX_RADIOTAP_PRESENT); sc->sc_txtap_len = sizeof(sc->sc_txtap); sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); sc->sc_txtap.wt_ihdr.it_present = htole32(ZYD_TX_RADIOTAP_PRESENT); if (bootverbose) ieee80211_announce(ic); usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, sc->sc_dev); return error; fail: mtx_destroy(&sc->sc_mtx); return error; } static int zyd_detach(device_t dev) { struct zyd_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; if (!device_is_attached(dev)) return 0; /* protect a race when we have listeners related with the driver. */ ifp->if_flags &= ~IFF_UP; zyd_stop(sc, 1); bpfdetach(ifp); ieee80211_ifdetach(ic); usb_rem_task(sc->sc_udev, &sc->sc_scantask); usb_rem_task(sc->sc_udev, &sc->sc_task); callout_stop(&sc->sc_watchdog_ch); zyd_close_pipes(sc); if_free(ifp); mtx_destroy(&sc->sc_mtx); usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, sc->sc_dev); return 0; } static struct ieee80211vap * zyd_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, int opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct zyd_vap *zvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; zvp = (struct zyd_vap *) malloc(sizeof(struct zyd_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (zvp == NULL) return NULL; vap = &zvp->vap; /* enable s/w bmiss handling for sta mode */ ieee80211_vap_setup(ic, vap, name, unit, opmode, flags | IEEE80211_CLONE_NOBEACONS, bssid, mac); /* override state transition machine */ zvp->newstate = vap->iv_newstate; vap->iv_newstate = zyd_newstate; ieee80211_amrr_init(&zvp->amrr, vap, IEEE80211_AMRR_MIN_SUCCESS_THRESHOLD, IEEE80211_AMRR_MAX_SUCCESS_THRESHOLD, 1000 /* 1 sec */); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status); ic->ic_opmode = opmode; return vap; } static void zyd_vap_delete(struct ieee80211vap *vap) { struct zyd_vap *zvp = ZYD_VAP(vap); ieee80211_amrr_cleanup(&zvp->amrr); ieee80211_vap_detach(vap); free(zvp, M_80211_VAP); } static int zyd_open_pipes(struct zyd_softc *sc) { usb_endpoint_descriptor_t *edesc; int isize; usbd_status error; /* interrupt in */ edesc = usbd_get_endpoint_descriptor(sc->sc_iface, 0x83); if (edesc == NULL) return EINVAL; isize = UGETW(edesc->wMaxPacketSize); if (isize == 0) /* should not happen */ return EINVAL; sc->ibuf = malloc(isize, M_USBDEV, M_NOWAIT); if (sc->ibuf == NULL) return ENOMEM; error = usbd_open_pipe_intr(sc->sc_iface, 0x83, USBD_SHORT_XFER_OK, &sc->zyd_ep[ZYD_ENDPT_IIN], sc, sc->ibuf, isize, zyd_intr, USBD_DEFAULT_INTERVAL); if (error != 0) { device_printf(sc->sc_dev, "open rx intr pipe failed: %s\n", usbd_errstr(error)); goto fail; } /* interrupt out (not necessarily an interrupt pipe) */ error = usbd_open_pipe(sc->sc_iface, 0x04, USBD_EXCLUSIVE_USE, &sc->zyd_ep[ZYD_ENDPT_IOUT]); if (error != 0) { device_printf(sc->sc_dev, "open tx intr pipe failed: %s\n", usbd_errstr(error)); goto fail; } /* bulk in */ error = usbd_open_pipe(sc->sc_iface, 0x82, USBD_EXCLUSIVE_USE, &sc->zyd_ep[ZYD_ENDPT_BIN]); if (error != 0) { device_printf(sc->sc_dev, "open rx pipe failed: %s\n", usbd_errstr(error)); goto fail; } /* bulk out */ error = usbd_open_pipe(sc->sc_iface, 0x01, USBD_EXCLUSIVE_USE, &sc->zyd_ep[ZYD_ENDPT_BOUT]); if (error != 0) { device_printf(sc->sc_dev, "open tx pipe failed: %s\n", usbd_errstr(error)); goto fail; } return 0; fail: zyd_close_pipes(sc); return ENXIO; } static void zyd_close_pipes(struct zyd_softc *sc) { int i; for (i = 0; i < ZYD_ENDPT_CNT; i++) { if (sc->zyd_ep[i] != NULL) { usbd_abort_pipe(sc->zyd_ep[i]); usbd_close_pipe(sc->zyd_ep[i]); sc->zyd_ep[i] = NULL; } } if (sc->ibuf != NULL) { free(sc->ibuf, M_USBDEV); sc->ibuf = NULL; } } static int zyd_alloc_tx_list(struct zyd_softc *sc) { int i, error; sc->tx_queued = 0; for (i = 0; i < ZYD_TX_LIST_CNT; i++) { struct zyd_tx_data *data = &sc->tx_data[i]; data->sc = sc; /* backpointer for callbacks */ data->xfer = usbd_alloc_xfer(sc->sc_udev); if (data->xfer == NULL) { device_printf(sc->sc_dev, "could not allocate tx xfer\n"); error = ENOMEM; goto fail; } data->buf = usbd_alloc_buffer(data->xfer, ZYD_MAX_TXBUFSZ); if (data->buf == NULL) { device_printf(sc->sc_dev, "could not allocate tx buffer\n"); error = ENOMEM; goto fail; } /* clear Tx descriptor */ bzero(data->buf, sizeof(struct zyd_tx_desc)); } return 0; fail: zyd_free_tx_list(sc); return error; } static void zyd_free_tx_list(struct zyd_softc *sc) { int i; for (i = 0; i < ZYD_TX_LIST_CNT; i++) { struct zyd_tx_data *data = &sc->tx_data[i]; if (data->xfer != NULL) { usbd_free_xfer(data->xfer); data->xfer = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } } } static int zyd_alloc_rx_list(struct zyd_softc *sc) { int i, error; for (i = 0; i < ZYD_RX_LIST_CNT; i++) { struct zyd_rx_data *data = &sc->rx_data[i]; data->sc = sc; /* backpointer for callbacks */ data->xfer = usbd_alloc_xfer(sc->sc_udev); if (data->xfer == NULL) { device_printf(sc->sc_dev, "could not allocate rx xfer\n"); error = ENOMEM; goto fail; } data->buf = usbd_alloc_buffer(data->xfer, ZYX_MAX_RXBUFSZ); if (data->buf == NULL) { device_printf(sc->sc_dev, "could not allocate rx buffer\n"); error = ENOMEM; goto fail; } } return 0; fail: zyd_free_rx_list(sc); return error; } static void zyd_free_rx_list(struct zyd_softc *sc) { int i; for (i = 0; i < ZYD_RX_LIST_CNT; i++) { struct zyd_rx_data *data = &sc->rx_data[i]; if (data->xfer != NULL) { usbd_free_xfer(data->xfer); data->xfer = NULL; } } } /* ARGUSED */ static struct ieee80211_node * -zyd_node_alloc(struct ieee80211_node_table *nt __unused) +zyd_node_alloc(struct ieee80211vap *vap __unused, + const uint8_t mac[IEEE80211_ADDR_LEN] __unused) { struct zyd_node *zn; zn = malloc(sizeof(struct zyd_node), M_80211_NODE, M_NOWAIT | M_ZERO); return zn != NULL ? &zn->ni : NULL; } static void zyd_task(void *arg) { struct zyd_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct zyd_vap *zvp = ZYD_VAP(vap); switch (sc->sc_state) { case IEEE80211_S_RUN: { struct ieee80211_node *ni = vap->iv_bss; zyd_set_chan(sc, ic->ic_curchan); if (vap->iv_opmode != IEEE80211_M_MONITOR) { /* turn link LED on */ zyd_set_led(sc, ZYD_LED1, 1); /* make data LED blink upon Tx */ zyd_write32(sc, sc->fwbase + ZYD_FW_LINK_STATUS, 1); IEEE80211_ADDR_COPY(sc->sc_bssid, ni->ni_bssid); zyd_set_bssid(sc, sc->sc_bssid); } if (vap->iv_opmode == IEEE80211_M_STA) { /* fake a join to init the tx rate */ zyd_newassoc(ni, 1); } break; } default: break; } IEEE80211_LOCK(ic); zvp->newstate(vap, sc->sc_state, sc->sc_arg); if (vap->iv_newstate_cb != NULL) vap->iv_newstate_cb(vap, sc->sc_state, sc->sc_arg); IEEE80211_UNLOCK(ic); } static int zyd_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct zyd_vap *zvp = ZYD_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct zyd_softc *sc = ic->ic_ifp->if_softc; usb_rem_task(sc->sc_udev, &sc->sc_task); /* do it in a process context */ sc->sc_state = nstate; sc->sc_arg = arg; if (nstate == IEEE80211_S_INIT) { zvp->newstate(vap, nstate, arg); return 0; } else { usb_add_task(sc->sc_udev, &sc->sc_task, USB_TASKQ_DRIVER); return EINPROGRESS; } } static int zyd_cmd(struct zyd_softc *sc, uint16_t code, const void *idata, int ilen, void *odata, int olen, u_int flags) { usbd_xfer_handle xfer; struct zyd_cmd cmd; struct rq rq; uint16_t xferflags; usbd_status error; if ((xfer = usbd_alloc_xfer(sc->sc_udev)) == NULL) return ENOMEM; cmd.code = htole16(code); bcopy(idata, cmd.data, ilen); xferflags = USBD_FORCE_SHORT_XFER; if (!(flags & ZYD_CMD_FLAG_READ)) xferflags |= USBD_SYNCHRONOUS; else { rq.idata = idata; rq.odata = odata; rq.len = olen / sizeof(struct zyd_pair); STAILQ_INSERT_TAIL(&sc->sc_rqh, &rq, rq); } usbd_setup_xfer(xfer, sc->zyd_ep[ZYD_ENDPT_IOUT], 0, &cmd, sizeof(uint16_t) + ilen, xferflags, ZYD_INTR_TIMEOUT, NULL); error = usbd_transfer(xfer); if (error != USBD_IN_PROGRESS && error != 0) { device_printf(sc->sc_dev, "could not send command (error=%s)\n", usbd_errstr(error)); (void)usbd_free_xfer(xfer); return EIO; } if (!(flags & ZYD_CMD_FLAG_READ)) { (void)usbd_free_xfer(xfer); return 0; /* write: don't wait for reply */ } /* wait at most one second for command reply */ error = tsleep(odata, PCATCH, "zydcmd", hz); if (error == EWOULDBLOCK) device_printf(sc->sc_dev, "zyd_read sleep timeout\n"); STAILQ_REMOVE(&sc->sc_rqh, &rq, rq, rq); (void)usbd_free_xfer(xfer); return error; } static int zyd_read16(struct zyd_softc *sc, uint16_t reg, uint16_t *val) { struct zyd_pair tmp; int error; reg = htole16(reg); error = zyd_cmd(sc, ZYD_CMD_IORD, ®, sizeof(reg), &tmp, sizeof(tmp), ZYD_CMD_FLAG_READ); if (error == 0) *val = le16toh(tmp.val); return error; } static int zyd_read32(struct zyd_softc *sc, uint16_t reg, uint32_t *val) { struct zyd_pair tmp[2]; uint16_t regs[2]; int error; regs[0] = htole16(ZYD_REG32_HI(reg)); regs[1] = htole16(ZYD_REG32_LO(reg)); error = zyd_cmd(sc, ZYD_CMD_IORD, regs, sizeof(regs), tmp, sizeof(tmp), ZYD_CMD_FLAG_READ); if (error == 0) *val = le16toh(tmp[0].val) << 16 | le16toh(tmp[1].val); return error; } static int zyd_write16(struct zyd_softc *sc, uint16_t reg, uint16_t val) { struct zyd_pair pair; pair.reg = htole16(reg); pair.val = htole16(val); return zyd_cmd(sc, ZYD_CMD_IOWR, &pair, sizeof(pair), NULL, 0, 0); } static int zyd_write32(struct zyd_softc *sc, uint16_t reg, uint32_t val) { struct zyd_pair pair[2]; pair[0].reg = htole16(ZYD_REG32_HI(reg)); pair[0].val = htole16(val >> 16); pair[1].reg = htole16(ZYD_REG32_LO(reg)); pair[1].val = htole16(val & 0xffff); return zyd_cmd(sc, ZYD_CMD_IOWR, pair, sizeof(pair), NULL, 0, 0); } static int zyd_rfwrite(struct zyd_softc *sc, uint32_t val) { struct zyd_rf *rf = &sc->sc_rf; struct zyd_rfwrite req; uint16_t cr203; int i; (void)zyd_read16(sc, ZYD_CR203, &cr203); cr203 &= ~(ZYD_RF_IF_LE | ZYD_RF_CLK | ZYD_RF_DATA); req.code = htole16(2); req.width = htole16(rf->width); for (i = 0; i < rf->width; i++) { req.bit[i] = htole16(cr203); if (val & (1 << (rf->width - 1 - i))) req.bit[i] |= htole16(ZYD_RF_DATA); } return zyd_cmd(sc, ZYD_CMD_RFCFG, &req, 4 + 2 * rf->width, NULL, 0, 0); } static void zyd_lock_phy(struct zyd_softc *sc) { uint32_t tmp; (void)zyd_read32(sc, ZYD_MAC_MISC, &tmp); tmp &= ~ZYD_UNLOCK_PHY_REGS; (void)zyd_write32(sc, ZYD_MAC_MISC, tmp); } static void zyd_unlock_phy(struct zyd_softc *sc) { uint32_t tmp; (void)zyd_read32(sc, ZYD_MAC_MISC, &tmp); tmp |= ZYD_UNLOCK_PHY_REGS; (void)zyd_write32(sc, ZYD_MAC_MISC, tmp); } /* * RFMD RF methods. */ static int zyd_rfmd_init(struct zyd_rf *rf) { #define N(a) (sizeof(a) / sizeof((a)[0])) struct zyd_softc *sc = rf->rf_sc; static const struct zyd_phy_pair phyini[] = ZYD_RFMD_PHY; static const uint32_t rfini[] = ZYD_RFMD_RF; int i, error; /* init RF-dependent PHY registers */ for (i = 0; i < N(phyini); i++) { error = zyd_write16(sc, phyini[i].reg, phyini[i].val); if (error != 0) return error; } /* init RFMD radio */ for (i = 0; i < N(rfini); i++) { if ((error = zyd_rfwrite(sc, rfini[i])) != 0) return error; } return 0; #undef N } static int zyd_rfmd_switch_radio(struct zyd_rf *rf, int on) { struct zyd_softc *sc = rf->rf_sc; (void)zyd_write16(sc, ZYD_CR10, on ? 0x89 : 0x15); (void)zyd_write16(sc, ZYD_CR11, on ? 0x00 : 0x81); return 0; } static int zyd_rfmd_set_channel(struct zyd_rf *rf, uint8_t chan) { struct zyd_softc *sc = rf->rf_sc; static const struct { uint32_t r1, r2; } rfprog[] = ZYD_RFMD_CHANTABLE; (void)zyd_rfwrite(sc, rfprog[chan - 1].r1); (void)zyd_rfwrite(sc, rfprog[chan - 1].r2); return 0; } /* * AL2230 RF methods. */ static int zyd_al2230_init(struct zyd_rf *rf) { #define N(a) (sizeof(a) / sizeof((a)[0])) struct zyd_softc *sc = rf->rf_sc; static const struct zyd_phy_pair phyini[] = ZYD_AL2230_PHY; static const uint32_t rfini[] = ZYD_AL2230_RF; int i, error; /* init RF-dependent PHY registers */ for (i = 0; i < N(phyini); i++) { error = zyd_write16(sc, phyini[i].reg, phyini[i].val); if (error != 0) return error; } /* init AL2230 radio */ for (i = 0; i < N(rfini); i++) { if ((error = zyd_rfwrite(sc, rfini[i])) != 0) return error; } return 0; #undef N } static int zyd_al2230_init_b(struct zyd_rf *rf) { #define N(a) (sizeof(a) / sizeof((a)[0])) struct zyd_softc *sc = rf->rf_sc; static const struct zyd_phy_pair phyini[] = ZYD_AL2230_PHY_B; static const uint32_t rfini[] = ZYD_AL2230_RF_B; int i, error; /* init RF-dependent PHY registers */ for (i = 0; i < N(phyini); i++) { error = zyd_write16(sc, phyini[i].reg, phyini[i].val); if (error != 0) return error; } /* init AL2230 radio */ for (i = 0; i < N(rfini); i++) { if ((error = zyd_rfwrite(sc, rfini[i])) != 0) return error; } return 0; #undef N } static int zyd_al2230_switch_radio(struct zyd_rf *rf, int on) { struct zyd_softc *sc = rf->rf_sc; int on251 = (sc->mac_rev == ZYD_ZD1211) ? 0x3f : 0x7f; (void)zyd_write16(sc, ZYD_CR11, on ? 0x00 : 0x04); (void)zyd_write16(sc, ZYD_CR251, on ? on251 : 0x2f); return 0; } static int zyd_al2230_set_channel(struct zyd_rf *rf, uint8_t chan) { struct zyd_softc *sc = rf->rf_sc; static const struct { uint32_t r1, r2, r3; } rfprog[] = ZYD_AL2230_CHANTABLE; (void)zyd_rfwrite(sc, rfprog[chan - 1].r1); (void)zyd_rfwrite(sc, rfprog[chan - 1].r2); (void)zyd_rfwrite(sc, rfprog[chan - 1].r3); (void)zyd_write16(sc, ZYD_CR138, 0x28); (void)zyd_write16(sc, ZYD_CR203, 0x06); return 0; } /* * AL7230B RF methods. */ static int zyd_al7230B_init(struct zyd_rf *rf) { #define N(a) (sizeof(a) / sizeof((a)[0])) struct zyd_softc *sc = rf->rf_sc; static const struct zyd_phy_pair phyini_1[] = ZYD_AL7230B_PHY_1; static const struct zyd_phy_pair phyini_2[] = ZYD_AL7230B_PHY_2; static const struct zyd_phy_pair phyini_3[] = ZYD_AL7230B_PHY_3; static const uint32_t rfini_1[] = ZYD_AL7230B_RF_1; static const uint32_t rfini_2[] = ZYD_AL7230B_RF_2; int i, error; /* for AL7230B, PHY and RF need to be initialized in "phases" */ /* init RF-dependent PHY registers, part one */ for (i = 0; i < N(phyini_1); i++) { error = zyd_write16(sc, phyini_1[i].reg, phyini_1[i].val); if (error != 0) return error; } /* init AL7230B radio, part one */ for (i = 0; i < N(rfini_1); i++) { if ((error = zyd_rfwrite(sc, rfini_1[i])) != 0) return error; } /* init RF-dependent PHY registers, part two */ for (i = 0; i < N(phyini_2); i++) { error = zyd_write16(sc, phyini_2[i].reg, phyini_2[i].val); if (error != 0) return error; } /* init AL7230B radio, part two */ for (i = 0; i < N(rfini_2); i++) { if ((error = zyd_rfwrite(sc, rfini_2[i])) != 0) return error; } /* init RF-dependent PHY registers, part three */ for (i = 0; i < N(phyini_3); i++) { error = zyd_write16(sc, phyini_3[i].reg, phyini_3[i].val); if (error != 0) return error; } return 0; #undef N } static int zyd_al7230B_switch_radio(struct zyd_rf *rf, int on) { struct zyd_softc *sc = rf->rf_sc; (void)zyd_write16(sc, ZYD_CR11, on ? 0x00 : 0x04); (void)zyd_write16(sc, ZYD_CR251, on ? 0x3f : 0x2f); return 0; } static int zyd_al7230B_set_channel(struct zyd_rf *rf, uint8_t chan) { #define N(a) (sizeof(a) / sizeof((a)[0])) struct zyd_softc *sc = rf->rf_sc; static const struct { uint32_t r1, r2; } rfprog[] = ZYD_AL7230B_CHANTABLE; static const uint32_t rfsc[] = ZYD_AL7230B_RF_SETCHANNEL; int i, error; (void)zyd_write16(sc, ZYD_CR240, 0x57); (void)zyd_write16(sc, ZYD_CR251, 0x2f); for (i = 0; i < N(rfsc); i++) { if ((error = zyd_rfwrite(sc, rfsc[i])) != 0) return error; } (void)zyd_write16(sc, ZYD_CR128, 0x14); (void)zyd_write16(sc, ZYD_CR129, 0x12); (void)zyd_write16(sc, ZYD_CR130, 0x10); (void)zyd_write16(sc, ZYD_CR38, 0x38); (void)zyd_write16(sc, ZYD_CR136, 0xdf); (void)zyd_rfwrite(sc, rfprog[chan - 1].r1); (void)zyd_rfwrite(sc, rfprog[chan - 1].r2); (void)zyd_rfwrite(sc, 0x3c9000); (void)zyd_write16(sc, ZYD_CR251, 0x3f); (void)zyd_write16(sc, ZYD_CR203, 0x06); (void)zyd_write16(sc, ZYD_CR240, 0x08); return 0; #undef N } /* * AL2210 RF methods. */ static int zyd_al2210_init(struct zyd_rf *rf) { #define N(a) (sizeof(a) / sizeof((a)[0])) struct zyd_softc *sc = rf->rf_sc; static const struct zyd_phy_pair phyini[] = ZYD_AL2210_PHY; static const uint32_t rfini[] = ZYD_AL2210_RF; uint32_t tmp; int i, error; (void)zyd_write32(sc, ZYD_CR18, 2); /* init RF-dependent PHY registers */ for (i = 0; i < N(phyini); i++) { error = zyd_write16(sc, phyini[i].reg, phyini[i].val); if (error != 0) return error; } /* init AL2210 radio */ for (i = 0; i < N(rfini); i++) { if ((error = zyd_rfwrite(sc, rfini[i])) != 0) return error; } (void)zyd_write16(sc, ZYD_CR47, 0x1e); (void)zyd_read32(sc, ZYD_CR_RADIO_PD, &tmp); (void)zyd_write32(sc, ZYD_CR_RADIO_PD, tmp & ~1); (void)zyd_write32(sc, ZYD_CR_RADIO_PD, tmp | 1); (void)zyd_write32(sc, ZYD_CR_RFCFG, 0x05); (void)zyd_write32(sc, ZYD_CR_RFCFG, 0x00); (void)zyd_write16(sc, ZYD_CR47, 0x1e); (void)zyd_write32(sc, ZYD_CR18, 3); return 0; #undef N } static int zyd_al2210_switch_radio(struct zyd_rf *rf, int on) { /* vendor driver does nothing for this RF chip */ return 0; } static int zyd_al2210_set_channel(struct zyd_rf *rf, uint8_t chan) { struct zyd_softc *sc = rf->rf_sc; static const uint32_t rfprog[] = ZYD_AL2210_CHANTABLE; uint32_t tmp; (void)zyd_write32(sc, ZYD_CR18, 2); (void)zyd_write16(sc, ZYD_CR47, 0x1e); (void)zyd_read32(sc, ZYD_CR_RADIO_PD, &tmp); (void)zyd_write32(sc, ZYD_CR_RADIO_PD, tmp & ~1); (void)zyd_write32(sc, ZYD_CR_RADIO_PD, tmp | 1); (void)zyd_write32(sc, ZYD_CR_RFCFG, 0x05); (void)zyd_write32(sc, ZYD_CR_RFCFG, 0x00); (void)zyd_write16(sc, ZYD_CR47, 0x1e); /* actually set the channel */ (void)zyd_rfwrite(sc, rfprog[chan - 1]); (void)zyd_write32(sc, ZYD_CR18, 3); return 0; } /* * GCT RF methods. */ static int zyd_gct_init(struct zyd_rf *rf) { #define N(a) (sizeof(a) / sizeof((a)[0])) struct zyd_softc *sc = rf->rf_sc; static const struct zyd_phy_pair phyini[] = ZYD_GCT_PHY; static const uint32_t rfini[] = ZYD_GCT_RF; int i, error; /* init RF-dependent PHY registers */ for (i = 0; i < N(phyini); i++) { error = zyd_write16(sc, phyini[i].reg, phyini[i].val); if (error != 0) return error; } /* init cgt radio */ for (i = 0; i < N(rfini); i++) { if ((error = zyd_rfwrite(sc, rfini[i])) != 0) return error; } return 0; #undef N } static int zyd_gct_switch_radio(struct zyd_rf *rf, int on) { /* vendor driver does nothing for this RF chip */ return 0; } static int zyd_gct_set_channel(struct zyd_rf *rf, uint8_t chan) { struct zyd_softc *sc = rf->rf_sc; static const uint32_t rfprog[] = ZYD_GCT_CHANTABLE; (void)zyd_rfwrite(sc, 0x1c0000); (void)zyd_rfwrite(sc, rfprog[chan - 1]); (void)zyd_rfwrite(sc, 0x1c0008); return 0; } /* * Maxim RF methods. */ static int zyd_maxim_init(struct zyd_rf *rf) { #define N(a) (sizeof(a) / sizeof((a)[0])) struct zyd_softc *sc = rf->rf_sc; static const struct zyd_phy_pair phyini[] = ZYD_MAXIM_PHY; static const uint32_t rfini[] = ZYD_MAXIM_RF; uint16_t tmp; int i, error; /* init RF-dependent PHY registers */ for (i = 0; i < N(phyini); i++) { error = zyd_write16(sc, phyini[i].reg, phyini[i].val); if (error != 0) return error; } (void)zyd_read16(sc, ZYD_CR203, &tmp); (void)zyd_write16(sc, ZYD_CR203, tmp & ~(1 << 4)); /* init maxim radio */ for (i = 0; i < N(rfini); i++) { if ((error = zyd_rfwrite(sc, rfini[i])) != 0) return error; } (void)zyd_read16(sc, ZYD_CR203, &tmp); (void)zyd_write16(sc, ZYD_CR203, tmp | (1 << 4)); return 0; #undef N } static int zyd_maxim_switch_radio(struct zyd_rf *rf, int on) { /* vendor driver does nothing for this RF chip */ return 0; } static int zyd_maxim_set_channel(struct zyd_rf *rf, uint8_t chan) { #define N(a) (sizeof(a) / sizeof((a)[0])) struct zyd_softc *sc = rf->rf_sc; static const struct zyd_phy_pair phyini[] = ZYD_MAXIM_PHY; static const uint32_t rfini[] = ZYD_MAXIM_RF; static const struct { uint32_t r1, r2; } rfprog[] = ZYD_MAXIM_CHANTABLE; uint16_t tmp; int i, error; /* * Do the same as we do when initializing it, except for the channel * values coming from the two channel tables. */ /* init RF-dependent PHY registers */ for (i = 0; i < N(phyini); i++) { error = zyd_write16(sc, phyini[i].reg, phyini[i].val); if (error != 0) return error; } (void)zyd_read16(sc, ZYD_CR203, &tmp); (void)zyd_write16(sc, ZYD_CR203, tmp & ~(1 << 4)); /* first two values taken from the chantables */ (void)zyd_rfwrite(sc, rfprog[chan - 1].r1); (void)zyd_rfwrite(sc, rfprog[chan - 1].r2); /* init maxim radio - skipping the two first values */ for (i = 2; i < N(rfini); i++) { if ((error = zyd_rfwrite(sc, rfini[i])) != 0) return error; } (void)zyd_read16(sc, ZYD_CR203, &tmp); (void)zyd_write16(sc, ZYD_CR203, tmp | (1 << 4)); return 0; #undef N } /* * Maxim2 RF methods. */ static int zyd_maxim2_init(struct zyd_rf *rf) { #define N(a) (sizeof(a) / sizeof((a)[0])) struct zyd_softc *sc = rf->rf_sc; static const struct zyd_phy_pair phyini[] = ZYD_MAXIM2_PHY; static const uint32_t rfini[] = ZYD_MAXIM2_RF; uint16_t tmp; int i, error; /* init RF-dependent PHY registers */ for (i = 0; i < N(phyini); i++) { error = zyd_write16(sc, phyini[i].reg, phyini[i].val); if (error != 0) return error; } (void)zyd_read16(sc, ZYD_CR203, &tmp); (void)zyd_write16(sc, ZYD_CR203, tmp & ~(1 << 4)); /* init maxim2 radio */ for (i = 0; i < N(rfini); i++) { if ((error = zyd_rfwrite(sc, rfini[i])) != 0) return error; } (void)zyd_read16(sc, ZYD_CR203, &tmp); (void)zyd_write16(sc, ZYD_CR203, tmp | (1 << 4)); return 0; #undef N } static int zyd_maxim2_switch_radio(struct zyd_rf *rf, int on) { /* vendor driver does nothing for this RF chip */ return 0; } static int zyd_maxim2_set_channel(struct zyd_rf *rf, uint8_t chan) { #define N(a) (sizeof(a) / sizeof((a)[0])) struct zyd_softc *sc = rf->rf_sc; static const struct zyd_phy_pair phyini[] = ZYD_MAXIM2_PHY; static const uint32_t rfini[] = ZYD_MAXIM2_RF; static const struct { uint32_t r1, r2; } rfprog[] = ZYD_MAXIM2_CHANTABLE; uint16_t tmp; int i, error; /* * Do the same as we do when initializing it, except for the channel * values coming from the two channel tables. */ /* init RF-dependent PHY registers */ for (i = 0; i < N(phyini); i++) { error = zyd_write16(sc, phyini[i].reg, phyini[i].val); if (error != 0) return error; } (void)zyd_read16(sc, ZYD_CR203, &tmp); (void)zyd_write16(sc, ZYD_CR203, tmp & ~(1 << 4)); /* first two values taken from the chantables */ (void)zyd_rfwrite(sc, rfprog[chan - 1].r1); (void)zyd_rfwrite(sc, rfprog[chan - 1].r2); /* init maxim2 radio - skipping the two first values */ for (i = 2; i < N(rfini); i++) { if ((error = zyd_rfwrite(sc, rfini[i])) != 0) return error; } (void)zyd_read16(sc, ZYD_CR203, &tmp); (void)zyd_write16(sc, ZYD_CR203, tmp | (1 << 4)); return 0; #undef N } static int zyd_rf_attach(struct zyd_softc *sc, uint8_t type) { struct zyd_rf *rf = &sc->sc_rf; rf->rf_sc = sc; switch (type) { case ZYD_RF_RFMD: rf->init = zyd_rfmd_init; rf->switch_radio = zyd_rfmd_switch_radio; rf->set_channel = zyd_rfmd_set_channel; rf->width = 24; /* 24-bit RF values */ break; case ZYD_RF_AL2230: if (sc->mac_rev == ZYD_ZD1211B) rf->init = zyd_al2230_init_b; else rf->init = zyd_al2230_init; rf->switch_radio = zyd_al2230_switch_radio; rf->set_channel = zyd_al2230_set_channel; rf->width = 24; /* 24-bit RF values */ break; case ZYD_RF_AL7230B: rf->init = zyd_al7230B_init; rf->switch_radio = zyd_al7230B_switch_radio; rf->set_channel = zyd_al7230B_set_channel; rf->width = 24; /* 24-bit RF values */ break; case ZYD_RF_AL2210: rf->init = zyd_al2210_init; rf->switch_radio = zyd_al2210_switch_radio; rf->set_channel = zyd_al2210_set_channel; rf->width = 24; /* 24-bit RF values */ break; case ZYD_RF_GCT: rf->init = zyd_gct_init; rf->switch_radio = zyd_gct_switch_radio; rf->set_channel = zyd_gct_set_channel; rf->width = 21; /* 21-bit RF values */ break; case ZYD_RF_MAXIM_NEW: rf->init = zyd_maxim_init; rf->switch_radio = zyd_maxim_switch_radio; rf->set_channel = zyd_maxim_set_channel; rf->width = 18; /* 18-bit RF values */ break; case ZYD_RF_MAXIM_NEW2: rf->init = zyd_maxim2_init; rf->switch_radio = zyd_maxim2_switch_radio; rf->set_channel = zyd_maxim2_set_channel; rf->width = 18; /* 18-bit RF values */ break; default: device_printf(sc->sc_dev, "sorry, radio \"%s\" is not supported yet\n", zyd_rf_name(type)); return EINVAL; } return 0; } static const char * zyd_rf_name(uint8_t type) { static const char * const zyd_rfs[] = { "unknown", "unknown", "UW2451", "UCHIP", "AL2230", "AL7230B", "THETA", "AL2210", "MAXIM_NEW", "GCT", "PV2000", "RALINK", "INTERSIL", "RFMD", "MAXIM_NEW2", "PHILIPS" }; return zyd_rfs[(type > 15) ? 0 : type]; } static int zyd_hw_init(struct zyd_softc *sc) { struct zyd_rf *rf = &sc->sc_rf; const struct zyd_phy_pair *phyp; uint32_t tmp; int error; /* specify that the plug and play is finished */ (void)zyd_write32(sc, ZYD_MAC_AFTER_PNP, 1); (void)zyd_read16(sc, ZYD_FIRMWARE_BASE_ADDR, &sc->fwbase); DPRINTF(("firmware base address=0x%04x\n", sc->fwbase)); /* retrieve firmware revision number */ (void)zyd_read16(sc, sc->fwbase + ZYD_FW_FIRMWARE_REV, &sc->fw_rev); (void)zyd_write32(sc, ZYD_CR_GPI_EN, 0); (void)zyd_write32(sc, ZYD_MAC_CONT_WIN_LIMIT, 0x7f043f); /* disable interrupts */ (void)zyd_write32(sc, ZYD_CR_INTERRUPT, 0); /* PHY init */ zyd_lock_phy(sc); phyp = (sc->mac_rev == ZYD_ZD1211B) ? zyd_def_phyB : zyd_def_phy; for (; phyp->reg != 0; phyp++) { if ((error = zyd_write16(sc, phyp->reg, phyp->val)) != 0) goto fail; } if (sc->fix_cr157) { if (zyd_read32(sc, ZYD_EEPROM_PHY_REG, &tmp) == 0) (void)zyd_write32(sc, ZYD_CR157, tmp >> 8); } zyd_unlock_phy(sc); /* HMAC init */ zyd_write32(sc, ZYD_MAC_ACK_EXT, 0x00000020); zyd_write32(sc, ZYD_CR_ADDA_MBIAS_WT, 0x30000808); if (sc->mac_rev == ZYD_ZD1211) { zyd_write32(sc, ZYD_MAC_RETRY, 0x00000002); } else { zyd_write32(sc, ZYD_MACB_MAX_RETRY, 0x02020202); zyd_write32(sc, ZYD_MACB_TXPWR_CTL4, 0x007f003f); zyd_write32(sc, ZYD_MACB_TXPWR_CTL3, 0x007f003f); zyd_write32(sc, ZYD_MACB_TXPWR_CTL2, 0x003f001f); zyd_write32(sc, ZYD_MACB_TXPWR_CTL1, 0x001f000f); zyd_write32(sc, ZYD_MACB_AIFS_CTL1, 0x00280028); zyd_write32(sc, ZYD_MACB_AIFS_CTL2, 0x008C003C); zyd_write32(sc, ZYD_MACB_TXOP, 0x01800824); } zyd_write32(sc, ZYD_MAC_SNIFFER, 0x00000000); zyd_write32(sc, ZYD_MAC_RXFILTER, 0x00000000); zyd_write32(sc, ZYD_MAC_GHTBL, 0x00000000); zyd_write32(sc, ZYD_MAC_GHTBH, 0x80000000); zyd_write32(sc, ZYD_MAC_MISC, 0x000000a4); zyd_write32(sc, ZYD_CR_ADDA_PWR_DWN, 0x0000007f); zyd_write32(sc, ZYD_MAC_BCNCFG, 0x00f00401); zyd_write32(sc, ZYD_MAC_PHY_DELAY2, 0x00000000); zyd_write32(sc, ZYD_MAC_ACK_EXT, 0x00000080); zyd_write32(sc, ZYD_CR_ADDA_PWR_DWN, 0x00000000); zyd_write32(sc, ZYD_MAC_SIFS_ACK_TIME, 0x00000100); zyd_write32(sc, ZYD_MAC_DIFS_EIFS_SIFS, 0x0547c032); zyd_write32(sc, ZYD_CR_RX_PE_DELAY, 0x00000070); zyd_write32(sc, ZYD_CR_PS_CTRL, 0x10000000); zyd_write32(sc, ZYD_MAC_RTSCTSRATE, 0x02030203); zyd_write32(sc, ZYD_MAC_RX_THRESHOLD, 0x000c0640); zyd_write32(sc, ZYD_MAC_BACKOFF_PROTECT, 0x00000114); /* RF chip init */ zyd_lock_phy(sc); error = (*rf->init)(rf); zyd_unlock_phy(sc); if (error != 0) { device_printf(sc->sc_dev, "radio initialization failed, error %d\n", error); goto fail; } /* init beacon interval to 100ms */ if ((error = zyd_set_beacon_interval(sc, 100)) != 0) goto fail; fail: return error; } static int zyd_read_eeprom(struct zyd_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t tmp; uint16_t val; int i; /* read MAC address */ (void)zyd_read32(sc, ZYD_EEPROM_MAC_ADDR_P1, &tmp); ic->ic_myaddr[0] = tmp & 0xff; ic->ic_myaddr[1] = tmp >> 8; ic->ic_myaddr[2] = tmp >> 16; ic->ic_myaddr[3] = tmp >> 24; (void)zyd_read32(sc, ZYD_EEPROM_MAC_ADDR_P2, &tmp); ic->ic_myaddr[4] = tmp & 0xff; ic->ic_myaddr[5] = tmp >> 8; (void)zyd_read32(sc, ZYD_EEPROM_POD, &tmp); sc->rf_rev = tmp & 0x0f; sc->fix_cr47 = (tmp >> 8 ) & 0x01; sc->fix_cr157 = (tmp >> 13) & 0x01; sc->pa_rev = (tmp >> 16) & 0x0f; /* read regulatory domain (currently unused) */ (void)zyd_read32(sc, ZYD_EEPROM_SUBID, &tmp); sc->regdomain = tmp >> 16; DPRINTF(("regulatory domain %x\n", sc->regdomain)); /* XXX propagate to net80211 after mapping to SKU */ /* read Tx power calibration tables */ for (i = 0; i < 7; i++) { (void)zyd_read16(sc, ZYD_EEPROM_PWR_CAL + i, &val); sc->pwr_cal[i * 2] = val >> 8; sc->pwr_cal[i * 2 + 1] = val & 0xff; (void)zyd_read16(sc, ZYD_EEPROM_PWR_INT + i, &val); sc->pwr_int[i * 2] = val >> 8; sc->pwr_int[i * 2 + 1] = val & 0xff; (void)zyd_read16(sc, ZYD_EEPROM_36M_CAL + i, &val); sc->ofdm36_cal[i * 2] = val >> 8; sc->ofdm36_cal[i * 2 + 1] = val & 0xff; (void)zyd_read16(sc, ZYD_EEPROM_48M_CAL + i, &val); sc->ofdm48_cal[i * 2] = val >> 8; sc->ofdm48_cal[i * 2 + 1] = val & 0xff; (void)zyd_read16(sc, ZYD_EEPROM_54M_CAL + i, &val); sc->ofdm54_cal[i * 2] = val >> 8; sc->ofdm54_cal[i * 2 + 1] = val & 0xff; } return 0; } static int zyd_set_macaddr(struct zyd_softc *sc, const uint8_t *addr) { uint32_t tmp; tmp = addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]; (void)zyd_write32(sc, ZYD_MAC_MACADRL, tmp); tmp = addr[5] << 8 | addr[4]; (void)zyd_write32(sc, ZYD_MAC_MACADRH, tmp); return 0; } static int zyd_set_bssid(struct zyd_softc *sc, const uint8_t *addr) { uint32_t tmp; tmp = addr[3] << 24 | addr[2] << 16 | addr[1] << 8 | addr[0]; (void)zyd_write32(sc, ZYD_MAC_BSSADRL, tmp); tmp = addr[5] << 8 | addr[4]; (void)zyd_write32(sc, ZYD_MAC_BSSADRH, tmp); return 0; } static int zyd_switch_radio(struct zyd_softc *sc, int on) { struct zyd_rf *rf = &sc->sc_rf; int error; zyd_lock_phy(sc); error = (*rf->switch_radio)(rf, on); zyd_unlock_phy(sc); return error; } static void zyd_set_led(struct zyd_softc *sc, int which, int on) { uint32_t tmp; (void)zyd_read32(sc, ZYD_MAC_TX_PE_CONTROL, &tmp); tmp &= ~which; if (on) tmp |= which; (void)zyd_write32(sc, ZYD_MAC_TX_PE_CONTROL, tmp); } static void zyd_set_multi(void *arg) { struct zyd_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ifmultiaddr *ifma; uint32_t low, high; uint8_t v; if (!(ifp->if_flags & IFF_UP)) return; low = 0x00000000; high = 0x80000000; if (ic->ic_opmode == IEEE80211_M_MONITOR || (ifp->if_flags & (IFF_ALLMULTI | IFF_PROMISC))) { low = 0xffffffff; high = 0xffffffff; } else { IF_ADDR_LOCK(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; v = ((uint8_t *)LLADDR((struct sockaddr_dl *) ifma->ifma_addr))[5] >> 2; if (v < 32) low |= 1 << v; else high |= 1 << (v - 32); } IF_ADDR_UNLOCK(ifp); } /* reprogram multicast global hash table */ zyd_write32(sc, ZYD_MAC_GHTBL, low); zyd_write32(sc, ZYD_MAC_GHTBH, high); } static void zyd_update_mcast(struct ifnet *ifp) { struct zyd_softc *sc = ifp->if_softc; usb_add_task(sc->sc_udev, &sc->sc_mcasttask, USB_TASKQ_DRIVER); } static int zyd_set_rxfilter(struct zyd_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; uint32_t rxfilter; switch (ic->ic_opmode) { case IEEE80211_M_STA: rxfilter = ZYD_FILTER_BSS; break; case IEEE80211_M_IBSS: case IEEE80211_M_HOSTAP: rxfilter = ZYD_FILTER_HOSTAP; break; case IEEE80211_M_MONITOR: rxfilter = ZYD_FILTER_MONITOR; break; default: /* should not get there */ return EINVAL; } return zyd_write32(sc, ZYD_MAC_RXFILTER, rxfilter); } static void zyd_set_chan(struct zyd_softc *sc, struct ieee80211_channel *c) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct zyd_rf *rf = &sc->sc_rf; uint32_t tmp; u_int chan; chan = ieee80211_chan2ieee(ic, c); if (chan == 0 || chan == IEEE80211_CHAN_ANY) { /* XXX should NEVER happen */ device_printf(sc->sc_dev, "%s: invalid channel %x\n", __func__, chan); return; } zyd_lock_phy(sc); (*rf->set_channel)(rf, chan); /* update Tx power */ (void)zyd_write16(sc, ZYD_CR31, sc->pwr_int[chan - 1]); if (sc->mac_rev == ZYD_ZD1211B) { (void)zyd_write16(sc, ZYD_CR67, sc->ofdm36_cal[chan - 1]); (void)zyd_write16(sc, ZYD_CR66, sc->ofdm48_cal[chan - 1]); (void)zyd_write16(sc, ZYD_CR65, sc->ofdm54_cal[chan - 1]); (void)zyd_write16(sc, ZYD_CR68, sc->pwr_cal[chan - 1]); (void)zyd_write16(sc, ZYD_CR69, 0x28); (void)zyd_write16(sc, ZYD_CR69, 0x2a); } if (sc->fix_cr47) { /* set CCK baseband gain from EEPROM */ if (zyd_read32(sc, ZYD_EEPROM_PHY_REG, &tmp) == 0) (void)zyd_write16(sc, ZYD_CR47, tmp & 0xff); } (void)zyd_write32(sc, ZYD_CR_CONFIG_PHILIPS, 0); zyd_unlock_phy(sc); sc->sc_rxtap.wr_chan_freq = sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); sc->sc_rxtap.wr_chan_flags = sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); } static int zyd_set_beacon_interval(struct zyd_softc *sc, int bintval) { /* XXX this is probably broken.. */ (void)zyd_write32(sc, ZYD_CR_ATIM_WND_PERIOD, bintval - 2); (void)zyd_write32(sc, ZYD_CR_PRE_TBTT, bintval - 1); (void)zyd_write32(sc, ZYD_CR_BCN_INTERVAL, bintval); return 0; } static void zyd_intr(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct zyd_softc *sc = (struct zyd_softc *)priv; struct zyd_cmd *cmd; uint32_t datalen; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) return; if (status == USBD_STALLED) { usbd_clear_endpoint_stall_async( sc->zyd_ep[ZYD_ENDPT_IIN]); } return; } cmd = (struct zyd_cmd *)sc->ibuf; if (le16toh(cmd->code) == ZYD_NOTIF_RETRYSTATUS) { struct zyd_notif_retry *retry = (struct zyd_notif_retry *)cmd->data; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct ieee80211_node *ni; DPRINTF(("retry intr: rate=0x%x addr=%s count=%d (0x%x)\n", le16toh(retry->rate), ether_sprintf(retry->macaddr), le16toh(retry->count) & 0xff, le16toh(retry->count))); /* * Find the node to which the packet was sent and update its * retry statistics. In BSS mode, this node is the AP we're * associated to so no lookup is actually needed. */ ni = ieee80211_find_txnode(vap, retry->macaddr); if (ni != NULL) { ieee80211_amrr_tx_complete(&ZYD_NODE(ni)->amn, IEEE80211_AMRR_FAILURE, 1); ieee80211_free_node(ni); } if (le16toh(retry->count) & 0x100) ifp->if_oerrors++; /* too many retries */ } else if (le16toh(cmd->code) == ZYD_NOTIF_IORD) { struct rq *rqp; if (le16toh(*(uint16_t *)cmd->data) == ZYD_CR_INTERRUPT) return; /* HMAC interrupt */ usbd_get_xfer_status(xfer, NULL, NULL, &datalen, NULL); datalen -= sizeof(cmd->code); datalen -= 2; /* XXX: padding? */ STAILQ_FOREACH(rqp, &sc->sc_rqh, rq) { int i; if (sizeof(struct zyd_pair) * rqp->len != datalen) continue; for (i = 0; i < rqp->len; i++) { if (*(((const uint16_t *)rqp->idata) + i) != (((struct zyd_pair *)cmd->data) + i)->reg) break; } if (i != rqp->len) continue; /* copy answer into caller-supplied buffer */ bcopy(cmd->data, rqp->odata, sizeof(struct zyd_pair) * rqp->len); wakeup(rqp->odata); /* wakeup caller */ return; } return; /* unexpected IORD notification */ } else { device_printf(sc->sc_dev, "unknown notification %x\n", le16toh(cmd->code)); } } static void zyd_rx_data(struct zyd_softc *sc, const uint8_t *buf, uint16_t len) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_node *ni; const struct zyd_plcphdr *plcp; const struct zyd_rx_stat *stat; struct mbuf *m; int rlen, rssi, nf; if (len < ZYD_MIN_FRAGSZ) { DPRINTF(("%s: frame too short (length=%d)\n", device_get_nameunit(sc->sc_dev), len)); ifp->if_ierrors++; return; } plcp = (const struct zyd_plcphdr *)buf; stat = (const struct zyd_rx_stat *) (buf + len - sizeof(struct zyd_rx_stat)); if (stat->flags & ZYD_RX_ERROR) { DPRINTF(("%s: RX status indicated error (%x)\n", device_get_nameunit(sc->sc_dev), stat->flags)); ifp->if_ierrors++; return; } /* compute actual frame length */ rlen = len - sizeof(struct zyd_plcphdr) - sizeof(struct zyd_rx_stat) - IEEE80211_CRC_LEN; /* allocate a mbuf to store the frame */ if (rlen > MHLEN) m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); else m = m_gethdr(M_DONTWAIT, MT_DATA); if (m == NULL) { DPRINTF(("%s: could not allocate rx mbuf\n", device_get_nameunit(sc->sc_dev))); ifp->if_ierrors++; return; } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = rlen; bcopy((const uint8_t *)(plcp + 1), mtod(m, uint8_t *), rlen); if (bpf_peers_present(ifp->if_bpf)) { struct zyd_rx_radiotap_header *tap = &sc->sc_rxtap; tap->wr_flags = 0; if (stat->flags & (ZYD_RX_BADCRC16 | ZYD_RX_BADCRC32)) tap->wr_flags |= IEEE80211_RADIOTAP_F_BADFCS; /* XXX toss, no way to express errors */ if (stat->flags & ZYD_RX_DECRYPTERR) tap->wr_flags |= IEEE80211_RADIOTAP_F_BADFCS; tap->wr_rate = ieee80211_plcp2rate(plcp->signal, (stat->flags & ZYD_RX_OFDM) ? IEEE80211_T_OFDM : IEEE80211_T_CCK); tap->wr_antsignal = stat->rssi + -95; tap->wr_antnoise = -95; /* XXX */ bpf_mtap2(ifp->if_bpf, tap, sc->sc_rxtap_len, m); } rssi = stat->rssi > 63 ? 127 : 2 * stat->rssi; nf = -95; /* XXX */ ni = ieee80211_find_rxnode(ic, mtod(m, struct ieee80211_frame_min *)); if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, nf, 0); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, nf, 0); } static void zyd_rxeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct zyd_rx_data *data = priv; struct zyd_softc *sc = data->sc; struct ifnet *ifp = sc->sc_ifp; const struct zyd_rx_desc *desc; int len; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) return; if (status == USBD_STALLED) usbd_clear_endpoint_stall(sc->zyd_ep[ZYD_ENDPT_BIN]); goto skip; } usbd_get_xfer_status(xfer, NULL, NULL, &len, NULL); if (len < ZYD_MIN_RXBUFSZ) { DPRINTFN(3, ("%s: xfer too short (length=%d)\n", device_get_nameunit(sc->sc_dev), len)); ifp->if_ierrors++; /* XXX not really errors */ goto skip; } desc = (const struct zyd_rx_desc *) (data->buf + len - sizeof(struct zyd_rx_desc)); if (UGETW(desc->tag) == ZYD_TAG_MULTIFRAME) { const uint8_t *p = data->buf, *end = p + len; int i; DPRINTFN(3, ("received multi-frame transfer\n")); for (i = 0; i < ZYD_MAX_RXFRAMECNT; i++) { const uint16_t len16 = UGETW(desc->len[i]); if (len16 == 0 || p + len16 > end) break; zyd_rx_data(sc, p, len16); /* next frame is aligned on a 32-bit boundary */ p += (len16 + 3) & ~3; } } else { DPRINTFN(3, ("received single-frame transfer\n")); zyd_rx_data(sc, data->buf, len); } skip: /* setup a new transfer */ usbd_setup_xfer(xfer, sc->zyd_ep[ZYD_ENDPT_BIN], data, NULL, ZYX_MAX_RXBUFSZ, USBD_NO_COPY | USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, zyd_rxeof); (void)usbd_transfer(xfer); } static uint8_t zyd_plcp_signal(int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return 0xb; case 18: return 0xf; case 24: return 0xa; case 36: return 0xe; case 48: return 0x9; case 72: return 0xd; case 96: return 0x8; case 108: return 0xc; /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return 0x0; case 4: return 0x1; case 11: return 0x2; case 22: return 0x3; } return 0xff; /* XXX unsupported/unknown rate */ } static int zyd_tx_mgt(struct zyd_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = sc->sc_ifp; struct zyd_tx_desc *desc; struct zyd_tx_data *data; struct ieee80211_frame *wh; struct ieee80211_key *k; int xferlen, totlen, rate; uint16_t pktlen; usbd_status error; data = &sc->tx_data[0]; desc = (struct zyd_tx_desc *)data->buf; rate = IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan) ? 12 : 2; wh = mtod(m0, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_WEP) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } } data->ni = ni; data->m = m0; wh = mtod(m0, struct ieee80211_frame *); xferlen = sizeof(struct zyd_tx_desc) + m0->m_pkthdr.len; totlen = m0->m_pkthdr.len + IEEE80211_CRC_LEN; /* fill Tx descriptor */ desc->len = htole16(totlen); desc->flags = ZYD_TX_FLAG_BACKOFF; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { /* multicast frames are not sent at OFDM rates in 802.11b/g */ if (totlen > vap->iv_rtsthreshold) { desc->flags |= ZYD_TX_FLAG_RTS; } else if (ZYD_RATE_IS_OFDM(rate) && (ic->ic_flags & IEEE80211_F_USEPROT)) { if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) desc->flags |= ZYD_TX_FLAG_CTS_TO_SELF; else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) desc->flags |= ZYD_TX_FLAG_RTS; } } else desc->flags |= ZYD_TX_FLAG_MULTICAST; if ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_PS_POLL)) desc->flags |= ZYD_TX_FLAG_TYPE(ZYD_TX_TYPE_PS_POLL); desc->phy = zyd_plcp_signal(rate); if (ZYD_RATE_IS_OFDM(rate)) { desc->phy |= ZYD_TX_PHY_OFDM; if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) desc->phy |= ZYD_TX_PHY_5GHZ; } else if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) desc->phy |= ZYD_TX_PHY_SHPREAMBLE; /* actual transmit length (XXX why +10?) */ pktlen = sizeof(struct zyd_tx_desc) + 10; if (sc->mac_rev == ZYD_ZD1211) pktlen += totlen; desc->pktlen = htole16(pktlen); desc->plcp_length = (16 * totlen + rate - 1) / rate; desc->plcp_service = 0; if (rate == 22) { const int remainder = (16 * totlen) % 22; if (remainder != 0 && remainder < 7) desc->plcp_service |= ZYD_PLCP_LENGEXT; } if (bpf_peers_present(ifp->if_bpf)) { struct zyd_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; bpf_mtap2(ifp->if_bpf, tap, sc->sc_txtap_len, m0); } m_copydata(m0, 0, m0->m_pkthdr.len, data->buf + sizeof(struct zyd_tx_desc)); DPRINTFN(10, ("%s: sending mgt frame len=%zu rate=%u xferlen=%u\n", device_get_nameunit(sc->sc_dev), (size_t)m0->m_pkthdr.len, rate, xferlen)); usbd_setup_xfer(data->xfer, sc->zyd_ep[ZYD_ENDPT_BOUT], data, data->buf, xferlen, USBD_FORCE_SHORT_XFER | USBD_NO_COPY, ZYD_TX_TIMEOUT, zyd_txeof); error = usbd_transfer(data->xfer); if (error != USBD_IN_PROGRESS && error != 0) { ifp->if_oerrors++; return EIO; } sc->tx_queued++; return 0; } static void zyd_txeof(usbd_xfer_handle xfer, usbd_private_handle priv, usbd_status status) { struct zyd_tx_data *data = priv; struct zyd_softc *sc = data->sc; struct ifnet *ifp = sc->sc_ifp; struct ieee80211_node *ni; struct mbuf *m; if (status != USBD_NORMAL_COMPLETION) { if (status == USBD_NOT_STARTED || status == USBD_CANCELLED) return; device_printf(sc->sc_dev, "could not transmit buffer: %s\n", usbd_errstr(status)); if (status == USBD_STALLED) { usbd_clear_endpoint_stall_async( sc->zyd_ep[ZYD_ENDPT_BOUT]); } ifp->if_oerrors++; return; } ni = data->ni; /* update rate control statistics */ ieee80211_amrr_tx_complete(&ZYD_NODE(ni)->amn, IEEE80211_AMRR_SUCCESS, 0); /* * Do any tx complete callback. Note this must * be done before releasing the node reference. */ m = data->m; if (m != NULL && m->m_flags & M_TXCB) { ieee80211_process_callback(ni, m, 0); /* XXX status? */ m_freem(m); data->m = NULL; } ieee80211_free_node(ni); data->ni = NULL; sc->tx_queued--; ifp->if_opackets++; sc->tx_timer = 0; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; zyd_start(ifp); } static int zyd_tx_data(struct zyd_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = sc->sc_ifp; struct zyd_tx_desc *desc; struct zyd_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp; struct ieee80211_key *k; int xferlen, totlen, rate; uint16_t pktlen; usbd_status error; wh = mtod(m0, struct ieee80211_frame *); data = &sc->tx_data[0]; desc = (struct zyd_tx_desc *)data->buf; desc->flags = ZYD_TX_FLAG_BACKOFF; tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { rate = tp->mcastrate; desc->flags |= ZYD_TX_FLAG_MULTICAST; } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { rate = tp->ucastrate; } else { (void) ieee80211_amrr_choose(ni, &ZYD_NODE(ni)->amn); rate = ni->ni_txrate; } if (wh->i_fc[1] & IEEE80211_FC1_WEP) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } data->ni = ni; data->m = NULL; xferlen = sizeof(struct zyd_tx_desc) + m0->m_pkthdr.len; totlen = m0->m_pkthdr.len + IEEE80211_CRC_LEN; /* fill Tx descriptor */ desc->len = htole16(totlen); if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { /* multicast frames are not sent at OFDM rates in 802.11b/g */ if (totlen > vap->iv_rtsthreshold) { desc->flags |= ZYD_TX_FLAG_RTS; } else if (ZYD_RATE_IS_OFDM(rate) && (ic->ic_flags & IEEE80211_F_USEPROT)) { if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) desc->flags |= ZYD_TX_FLAG_CTS_TO_SELF; else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) desc->flags |= ZYD_TX_FLAG_RTS; } } if ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_PS_POLL)) desc->flags |= ZYD_TX_FLAG_TYPE(ZYD_TX_TYPE_PS_POLL); desc->phy = zyd_plcp_signal(rate); if (ZYD_RATE_IS_OFDM(rate)) { desc->phy |= ZYD_TX_PHY_OFDM; if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) desc->phy |= ZYD_TX_PHY_5GHZ; } else if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) desc->phy |= ZYD_TX_PHY_SHPREAMBLE; /* actual transmit length (XXX why +10?) */ pktlen = sizeof(struct zyd_tx_desc) + 10; if (sc->mac_rev == ZYD_ZD1211) pktlen += totlen; desc->pktlen = htole16(pktlen); desc->plcp_length = (16 * totlen + rate - 1) / rate; desc->plcp_service = 0; if (rate == 22) { const int remainder = (16 * totlen) % 22; if (remainder != 0 && remainder < 7) desc->plcp_service |= ZYD_PLCP_LENGEXT; } if (bpf_peers_present(ifp->if_bpf)) { struct zyd_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wt_chan_flags = htole16(ic->ic_curchan->ic_flags); bpf_mtap2(ifp->if_bpf, tap, sc->sc_txtap_len, m0); } m_copydata(m0, 0, m0->m_pkthdr.len, data->buf + sizeof(struct zyd_tx_desc)); DPRINTFN(10, ("%s: sending data frame len=%zu rate=%u xferlen=%u\n", device_get_nameunit(sc->sc_dev), (size_t)m0->m_pkthdr.len, rate, xferlen)); m_freem(m0); /* mbuf no longer needed */ usbd_setup_xfer(data->xfer, sc->zyd_ep[ZYD_ENDPT_BOUT], data, data->buf, xferlen, USBD_FORCE_SHORT_XFER | USBD_NO_COPY, ZYD_TX_TIMEOUT, zyd_txeof); error = usbd_transfer(data->xfer); if (error != USBD_IN_PROGRESS && error != 0) { ifp->if_oerrors++; return EIO; } sc->tx_queued++; return 0; } static void zyd_start(struct ifnet *ifp) { struct zyd_softc *sc = ifp->if_softc; struct ieee80211_node *ni; struct mbuf *m; for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; if (sc->tx_queued >= ZYD_TX_LIST_CNT) { IFQ_DRV_PREPEND(&ifp->if_snd, m); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; m = ieee80211_encap(ni, m); if (m == NULL) { ieee80211_free_node(ni); ifp->if_oerrors++; continue; } if (zyd_tx_data(sc, m, ni) != 0) { ieee80211_free_node(ni); ifp->if_oerrors++; break; } sc->tx_timer = 5; callout_reset(&sc->sc_watchdog_ch, hz, zyd_watchdog, sc); } } static int zyd_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct zyd_softc *sc = ifp->if_softc; /* prevent management frames from being sent if we're not ready */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { m_freem(m); ieee80211_free_node(ni); return ENETDOWN; } if (sc->tx_queued >= ZYD_TX_LIST_CNT) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; m_freem(m); ieee80211_free_node(ni); return ENOBUFS; /* XXX */ } ifp->if_opackets++; /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. * XXX raw path */ if (zyd_tx_mgt(sc, m, ni) != 0) goto bad; sc->tx_timer = 5; callout_reset(&sc->sc_watchdog_ch, hz, zyd_watchdog, sc); return 0; bad: ifp->if_oerrors++; ieee80211_free_node(ni); return EIO; /* XXX */ } static void zyd_watchdog(void *arg) { struct zyd_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; if (sc->tx_timer > 0) { if (--sc->tx_timer == 0) { device_printf(sc->sc_dev, "device timeout\n"); /* zyd_init(ifp); XXX needs a process context ? */ ifp->if_oerrors++; return; } callout_reset(&sc->sc_watchdog_ch, hz, zyd_watchdog, sc); } } static int zyd_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct zyd_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; int error = 0, startall = 0; switch (cmd) { case SIOCSIFFLAGS: ZYD_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if ((ifp->if_flags ^ sc->sc_if_flags) & (IFF_ALLMULTI | IFF_PROMISC)) zyd_set_multi(sc); } else { zyd_init_locked(sc); startall = 1; } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) zyd_stop(sc, 1); } sc->sc_if_flags = ifp->if_flags; ZYD_UNLOCK(sc); if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return error; } static void zyd_init_locked(struct zyd_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; int i, error; zyd_stop(sc, 0); IEEE80211_ADDR_COPY(ic->ic_myaddr, IF_LLADDR(ifp)); DPRINTF(("setting MAC address to %s\n", ether_sprintf(ic->ic_myaddr))); error = zyd_set_macaddr(sc, ic->ic_myaddr); if (error != 0) return; /* we'll do software WEP decryption for now */ DPRINTF(("setting encryption type\n")); error = zyd_write32(sc, ZYD_MAC_ENCRYPTION_TYPE, ZYD_ENC_SNIFFER); if (error != 0) return; /* promiscuous mode */ (void)zyd_write32(sc, ZYD_MAC_SNIFFER, 0); /* multicast setup */ (void)zyd_set_multi(sc); (void)zyd_set_rxfilter(sc); /* switch radio transmitter ON */ (void)zyd_switch_radio(sc, 1); /* XXX wrong, can't set here */ /* set basic rates */ if (ic->ic_curmode == IEEE80211_MODE_11B) (void)zyd_write32(sc, ZYD_MAC_BAS_RATE, 0x0003); else if (ic->ic_curmode == IEEE80211_MODE_11A) (void)zyd_write32(sc, ZYD_MAC_BAS_RATE, 0x1500); else /* assumes 802.11b/g */ (void)zyd_write32(sc, ZYD_MAC_BAS_RATE, 0x000f); /* set mandatory rates */ if (ic->ic_curmode == IEEE80211_MODE_11B) (void)zyd_write32(sc, ZYD_MAC_MAN_RATE, 0x000f); else if (ic->ic_curmode == IEEE80211_MODE_11A) (void)zyd_write32(sc, ZYD_MAC_MAN_RATE, 0x1500); else /* assumes 802.11b/g */ (void)zyd_write32(sc, ZYD_MAC_MAN_RATE, 0x150f); /* set default BSS channel */ zyd_set_chan(sc, ic->ic_curchan); /* enable interrupts */ (void)zyd_write32(sc, ZYD_CR_INTERRUPT, ZYD_HWINT_MASK); /* * Allocate Tx and Rx xfer queues. */ if ((error = zyd_alloc_tx_list(sc)) != 0) { device_printf(sc->sc_dev, "could not allocate Tx list\n"); goto fail; } if ((error = zyd_alloc_rx_list(sc)) != 0) { device_printf(sc->sc_dev, "could not allocate Rx list\n"); goto fail; } /* * Start up the receive pipe. */ for (i = 0; i < ZYD_RX_LIST_CNT; i++) { struct zyd_rx_data *data = &sc->rx_data[i]; usbd_setup_xfer(data->xfer, sc->zyd_ep[ZYD_ENDPT_BIN], data, NULL, ZYX_MAX_RXBUFSZ, USBD_NO_COPY | USBD_SHORT_XFER_OK, USBD_NO_TIMEOUT, zyd_rxeof); error = usbd_transfer(data->xfer); if (error != USBD_IN_PROGRESS && error != 0) { device_printf(sc->sc_dev, "could not queue Rx transfer\n"); goto fail; } } ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; return; fail: zyd_stop(sc, 1); return; } static void zyd_init(void *priv) { struct zyd_softc *sc = priv; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; ZYD_LOCK(sc); zyd_init_locked(sc); ZYD_UNLOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ieee80211_start_all(ic); /* start all vap's */ } static void zyd_stop(struct zyd_softc *sc, int disable) { struct ifnet *ifp = sc->sc_ifp; sc->tx_timer = 0; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); /* switch radio transmitter OFF */ (void)zyd_switch_radio(sc, 0); /* disable Rx */ (void)zyd_write32(sc, ZYD_MAC_RXFILTER, 0); /* disable interrupts */ (void)zyd_write32(sc, ZYD_CR_INTERRUPT, 0); usbd_abort_pipe(sc->zyd_ep[ZYD_ENDPT_BIN]); usbd_abort_pipe(sc->zyd_ep[ZYD_ENDPT_BOUT]); zyd_free_rx_list(sc); zyd_free_tx_list(sc); } static int zyd_loadfirmware(struct zyd_softc *sc, u_char *fw, size_t size) { usb_device_request_t req; uint16_t addr; uint8_t stat; DPRINTF(("firmware size=%zu\n", size)); req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = ZYD_DOWNLOADREQ; USETW(req.wIndex, 0); addr = ZYD_FIRMWARE_START_ADDR; while (size > 0) { #if 0 const int mlen = min(size, 4096); #else /* * XXXX: When the transfer size is 4096 bytes, it is not * likely to be able to transfer it. * The cause is port or machine or chip? */ const int mlen = min(size, 64); #endif DPRINTF(("loading firmware block: len=%d, addr=0x%x\n", mlen, addr)); USETW(req.wValue, addr); USETW(req.wLength, mlen); if (usbd_do_request(sc->sc_udev, &req, fw) != 0) return EIO; addr += mlen / 2; fw += mlen; size -= mlen; } /* check whether the upload succeeded */ req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = ZYD_DOWNLOADSTS; USETW(req.wValue, 0); USETW(req.wIndex, 0); USETW(req.wLength, sizeof(stat)); if (usbd_do_request(sc->sc_udev, &req, &stat) != 0) return EIO; return (stat & 0x80) ? EIO : 0; } static void zyd_newassoc(struct ieee80211_node *ni, int isnew) { struct ieee80211vap *vap = ni->ni_vap; ieee80211_amrr_node_init(&ZYD_VAP(vap)->amrr, &ZYD_NODE(ni)->amn, ni); } static void zyd_scan_start(struct ieee80211com *ic) { struct zyd_softc *sc = ic->ic_ifp->if_softc; usb_rem_task(sc->sc_udev, &sc->sc_scantask); /* do it in a process context */ sc->sc_scan_action = ZYD_SCAN_START; usb_add_task(sc->sc_udev, &sc->sc_scantask, USB_TASKQ_DRIVER); } static void zyd_scan_end(struct ieee80211com *ic) { struct zyd_softc *sc = ic->ic_ifp->if_softc; usb_rem_task(sc->sc_udev, &sc->sc_scantask); /* do it in a process context */ sc->sc_scan_action = ZYD_SCAN_END; usb_add_task(sc->sc_udev, &sc->sc_scantask, USB_TASKQ_DRIVER); } static void zyd_set_channel(struct ieee80211com *ic) { struct zyd_softc *sc = ic->ic_ifp->if_softc; usb_rem_task(sc->sc_udev, &sc->sc_scantask); /* do it in a process context */ sc->sc_scan_action = ZYD_SET_CHANNEL; usb_add_task(sc->sc_udev, &sc->sc_scantask, USB_TASKQ_DRIVER); } static void zyd_scantask(void *arg) { struct zyd_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; ZYD_LOCK(sc); switch (sc->sc_scan_action) { case ZYD_SCAN_START: /* want broadcast address while scanning */ zyd_set_bssid(sc, ifp->if_broadcastaddr); break; case ZYD_SCAN_END: /* restore previous bssid */ zyd_set_bssid(sc, sc->sc_bssid); break; case ZYD_SET_CHANNEL: mtx_lock(&Giant); zyd_set_chan(sc, ic->ic_curchan); mtx_unlock(&Giant); break; default: device_printf(sc->sc_dev, "unknown scan action %d\n", sc->sc_scan_action); break; } ZYD_UNLOCK(sc); } static device_method_t zyd_methods[] = { /* Device interface */ DEVMETHOD(device_probe, zyd_match), DEVMETHOD(device_attach, zyd_attach), DEVMETHOD(device_detach, zyd_detach), { 0, 0 } }; static driver_t zyd_driver = { "zyd", zyd_methods, sizeof(struct zyd_softc) }; static devclass_t zyd_devclass; DRIVER_MODULE(zyd, uhub, zyd_driver, zyd_devclass, usbd_driver_load, 0); MODULE_DEPEND(zyd, wlan, 1, 1, 1); MODULE_DEPEND(zyd, wlan_amrr, 1, 1, 1); MODULE_DEPEND(zyd, usb, 1, 1, 1); diff --git a/sys/dev/wpi/if_wpi.c b/sys/dev/wpi/if_wpi.c index 959e20c3aa4b..8257e87749de 100644 --- a/sys/dev/wpi/if_wpi.c +++ b/sys/dev/wpi/if_wpi.c @@ -1,3835 +1,3837 @@ /*- * Copyright (c) 2006,2007 * Damien Bergamini * Benjamin Close * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #define VERSION "20071127" #include __FBSDID("$FreeBSD$"); /* * Driver for Intel PRO/Wireless 3945ABG 802.11 network adapters. * * The 3945ABG network adapter doesn't use traditional hardware as * many other adaptors do. Instead at run time the eeprom is set into a known * state and told to load boot firmware. The boot firmware loads an init and a * main binary firmware image into SRAM on the card via DMA. * Once the firmware is loaded, the driver/hw then * communicate by way of circular dma rings via the the SRAM to the firmware. * * There is 6 memory rings. 1 command ring, 1 rx data ring & 4 tx data rings. * The 4 tx data rings allow for prioritization QoS. * * The rx data ring consists of 32 dma buffers. Two registers are used to * indicate where in the ring the driver and the firmware are up to. The * driver sets the initial read index (reg1) and the initial write index (reg2), * the firmware updates the read index (reg1) on rx of a packet and fires an * interrupt. The driver then processes the buffers starting at reg1 indicating * to the firmware which buffers have been accessed by updating reg2. At the * same time allocating new memory for the processed buffer. * * A similar thing happens with the tx rings. The difference is the firmware * stop processing buffers once the queue is full and until confirmation * of a successful transmition (tx_intr) has occurred. * * The command ring operates in the same manner as the tx queues. * * All communication direct to the card (ie eeprom) is classed as Stage1 * communication * * All communication via the firmware to the card is classed as State2. * The firmware consists of 2 parts. A bootstrap firmware and a runtime * firmware. The bootstrap firmware and runtime firmware are loaded * from host memory via dma to the card then told to execute. From this point * on the majority of communications between the driver and the card goes * via the firmware. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define WPI_DEBUG #ifdef WPI_DEBUG #define DPRINTF(x) do { if (wpi_debug != 0) printf x; } while (0) #define DPRINTFN(n, x) do { if (wpi_debug & n) printf x; } while (0) enum { WPI_DEBUG_UNUSED = 0x00000001, /* Unused */ WPI_DEBUG_HW = 0x00000002, /* Stage 1 (eeprom) debugging */ WPI_DEBUG_TX = 0x00000004, /* Stage 2 TX intrp debugging*/ WPI_DEBUG_RX = 0x00000008, /* Stage 2 RX intrp debugging */ WPI_DEBUG_CMD = 0x00000010, /* Stage 2 CMD intrp debugging*/ WPI_DEBUG_FIRMWARE = 0x00000020, /* firmware(9) loading debug */ WPI_DEBUG_DMA = 0x00000040, /* DMA (de)allocations/syncs */ WPI_DEBUG_SCANNING = 0x00000080, /* Stage 2 Scanning debugging */ WPI_DEBUG_NOTIFY = 0x00000100, /* State 2 Noftif intr debug */ WPI_DEBUG_TEMP = 0x00000200, /* TXPower/Temp Calibration */ WPI_DEBUG_OPS = 0x00000400, /* wpi_ops taskq debug */ WPI_DEBUG_WATCHDOG = 0x00000800, /* Watch dog debug */ WPI_DEBUG_ANY = 0xffffffff }; int wpi_debug = 0; SYSCTL_INT(_debug, OID_AUTO, wpi, CTLFLAG_RW, &wpi_debug, 0, "wpi debug level"); #else #define DPRINTF(x) #define DPRINTFN(n, x) #endif struct wpi_ident { uint16_t vendor; uint16_t device; uint16_t subdevice; const char *name; }; static const struct wpi_ident wpi_ident_table[] = { /* The below entries support ABG regardless of the subid */ { 0x8086, 0x4222, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, { 0x8086, 0x4227, 0x0, "Intel(R) PRO/Wireless 3945ABG" }, /* The below entries only support BG */ { 0x8086, 0x4222, 0x1005, "Intel(R) PRO/Wireless 3945AB" }, { 0x8086, 0x4222, 0x1034, "Intel(R) PRO/Wireless 3945AB" }, { 0x8086, 0x4222, 0x1014, "Intel(R) PRO/Wireless 3945AB" }, { 0x8086, 0x4222, 0x1044, "Intel(R) PRO/Wireless 3945AB" }, { 0, 0, 0, NULL } }; static struct ieee80211vap *wpi_vap_create(struct ieee80211com *, const char name[IFNAMSIZ], int unit, int opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]); static void wpi_vap_delete(struct ieee80211vap *); static int wpi_dma_contig_alloc(struct wpi_softc *, struct wpi_dma_info *, void **, bus_size_t, bus_size_t, int); static void wpi_dma_contig_free(struct wpi_dma_info *); static void wpi_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int wpi_alloc_shared(struct wpi_softc *); static void wpi_free_shared(struct wpi_softc *); static int wpi_alloc_rx_ring(struct wpi_softc *, struct wpi_rx_ring *); static void wpi_reset_rx_ring(struct wpi_softc *, struct wpi_rx_ring *); static void wpi_free_rx_ring(struct wpi_softc *, struct wpi_rx_ring *); static int wpi_alloc_tx_ring(struct wpi_softc *, struct wpi_tx_ring *, int, int); static void wpi_reset_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); static void wpi_free_tx_ring(struct wpi_softc *, struct wpi_tx_ring *); -static struct ieee80211_node *wpi_node_alloc(struct ieee80211_node_table *); +static struct ieee80211_node *wpi_node_alloc(struct ieee80211vap *, + const uint8_t mac[IEEE80211_ADDR_LEN]); static int wpi_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void wpi_mem_lock(struct wpi_softc *); static void wpi_mem_unlock(struct wpi_softc *); static uint32_t wpi_mem_read(struct wpi_softc *, uint16_t); static void wpi_mem_write(struct wpi_softc *, uint16_t, uint32_t); static void wpi_mem_write_region_4(struct wpi_softc *, uint16_t, const uint32_t *, int); static uint16_t wpi_read_prom_data(struct wpi_softc *, uint32_t, void *, int); static int wpi_alloc_fwmem(struct wpi_softc *); static void wpi_free_fwmem(struct wpi_softc *); static int wpi_load_firmware(struct wpi_softc *); static void wpi_unload_firmware(struct wpi_softc *); static int wpi_load_microcode(struct wpi_softc *, const uint8_t *, int); static void wpi_rx_intr(struct wpi_softc *, struct wpi_rx_desc *, struct wpi_rx_data *); static void wpi_tx_intr(struct wpi_softc *, struct wpi_rx_desc *); static void wpi_cmd_intr(struct wpi_softc *, struct wpi_rx_desc *); static void wpi_bmiss(void *, int); static void wpi_notif_intr(struct wpi_softc *); static void wpi_intr(void *); static void wpi_ops(void *, int); static uint8_t wpi_plcp_signal(int); static int wpi_queue_cmd(struct wpi_softc *, int, int, int); static void wpi_watchdog(void *); static int wpi_tx_data(struct wpi_softc *, struct mbuf *, struct ieee80211_node *, int); static void wpi_start(struct ifnet *); static void wpi_start_locked(struct ifnet *); static int wpi_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void wpi_scan_start(struct ieee80211com *); static void wpi_scan_end(struct ieee80211com *); static void wpi_set_channel(struct ieee80211com *); static void wpi_scan_curchan(struct ieee80211_scan_state *, unsigned long); static void wpi_scan_mindwell(struct ieee80211_scan_state *); static int wpi_ioctl(struct ifnet *, u_long, caddr_t); static void wpi_read_eeprom(struct wpi_softc *); static void wpi_read_eeprom_channels(struct wpi_softc *, int); static void wpi_read_eeprom_group(struct wpi_softc *, int); static int wpi_cmd(struct wpi_softc *, int, const void *, int, int); static int wpi_wme_update(struct ieee80211com *); static int wpi_mrr_setup(struct wpi_softc *); static void wpi_set_led(struct wpi_softc *, uint8_t, uint8_t, uint8_t); static void wpi_enable_tsf(struct wpi_softc *, struct ieee80211_node *); #if 0 static int wpi_setup_beacon(struct wpi_softc *, struct ieee80211_node *); #endif static int wpi_auth(struct wpi_softc *, struct ieee80211vap *); static int wpi_run(struct wpi_softc *, struct ieee80211vap *); static int wpi_scan(struct wpi_softc *); static int wpi_config(struct wpi_softc *); static void wpi_stop_master(struct wpi_softc *); static int wpi_power_up(struct wpi_softc *); static int wpi_reset(struct wpi_softc *); static void wpi_hw_config(struct wpi_softc *); static void wpi_init(void *); static void wpi_init_locked(struct wpi_softc *, int); static void wpi_stop(struct wpi_softc *); static void wpi_stop_locked(struct wpi_softc *); static void wpi_newassoc(struct ieee80211_node *, int); static int wpi_set_txpower(struct wpi_softc *, struct ieee80211_channel *, int); static void wpi_calib_timeout(void *); static void wpi_power_calibration(struct wpi_softc *, int); static int wpi_get_power_index(struct wpi_softc *, struct wpi_power_group *, struct ieee80211_channel *, int); static const char *wpi_cmd_str(int); static int wpi_probe(device_t); static int wpi_attach(device_t); static int wpi_detach(device_t); static int wpi_shutdown(device_t); static int wpi_suspend(device_t); static int wpi_resume(device_t); static device_method_t wpi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, wpi_probe), DEVMETHOD(device_attach, wpi_attach), DEVMETHOD(device_detach, wpi_detach), DEVMETHOD(device_shutdown, wpi_shutdown), DEVMETHOD(device_suspend, wpi_suspend), DEVMETHOD(device_resume, wpi_resume), { 0, 0 } }; static driver_t wpi_driver = { "wpi", wpi_methods, sizeof (struct wpi_softc) }; static devclass_t wpi_devclass; DRIVER_MODULE(wpi, pci, wpi_driver, wpi_devclass, 0, 0); static const uint8_t wpi_ridx_to_plcp[] = { /* OFDM: IEEE Std 802.11a-1999, pp. 14 Table 80 */ /* R1-R4 (ral/ural is R4-R1) */ 0xd, 0xf, 0x5, 0x7, 0x9, 0xb, 0x1, 0x3, /* CCK: device-dependent */ 10, 20, 55, 110 }; static const uint8_t wpi_ridx_to_rate[] = { 12, 18, 24, 36, 48, 72, 96, 108, /* OFDM */ 2, 4, 11, 22 /*CCK */ }; static int wpi_probe(device_t dev) { const struct wpi_ident *ident; for (ident = wpi_ident_table; ident->name != NULL; ident++) { if (pci_get_vendor(dev) == ident->vendor && pci_get_device(dev) == ident->device) { device_set_desc(dev, ident->name); return 0; } } return ENXIO; } /** * Load the firmare image from disk to the allocated dma buffer. * we also maintain the reference to the firmware pointer as there * is times where we may need to reload the firmware but we are not * in a context that can access the filesystem (ie taskq cause by restart) * * @return 0 on success, an errno on failure */ static int wpi_load_firmware(struct wpi_softc *sc) { const struct firmware *fp; struct wpi_dma_info *dma = &sc->fw_dma; const struct wpi_firmware_hdr *hdr; const uint8_t *itext, *idata, *rtext, *rdata, *btext; uint32_t itextsz, idatasz, rtextsz, rdatasz, btextsz; int error; DPRINTFN(WPI_DEBUG_FIRMWARE, ("Attempting Loading Firmware from wpi_fw module\n")); WPI_UNLOCK(sc); if (sc->fw_fp == NULL && (sc->fw_fp = firmware_get("wpifw")) == NULL) { device_printf(sc->sc_dev, "could not load firmware image 'wpifw'\n"); error = ENOENT; WPI_LOCK(sc); goto fail; } fp = sc->fw_fp; WPI_LOCK(sc); /* Validate the firmware is minimum a particular version */ if (fp->version < WPI_FW_MINVERSION) { device_printf(sc->sc_dev, "firmware version is too old. Need %d, got %d\n", WPI_FW_MINVERSION, fp->version); error = ENXIO; goto fail; } if (fp->datasize < sizeof (struct wpi_firmware_hdr)) { device_printf(sc->sc_dev, "firmware file too short: %zu bytes\n", fp->datasize); error = ENXIO; goto fail; } hdr = (const struct wpi_firmware_hdr *)fp->data; /* | RUNTIME FIRMWARE | INIT FIRMWARE | BOOT FW | |HDR|<--TEXT-->|<--DATA-->|<--TEXT-->|<--DATA-->|<--TEXT-->| */ rtextsz = le32toh(hdr->rtextsz); rdatasz = le32toh(hdr->rdatasz); itextsz = le32toh(hdr->itextsz); idatasz = le32toh(hdr->idatasz); btextsz = le32toh(hdr->btextsz); /* check that all firmware segments are present */ if (fp->datasize < sizeof (struct wpi_firmware_hdr) + rtextsz + rdatasz + itextsz + idatasz + btextsz) { device_printf(sc->sc_dev, "firmware file too short: %zu bytes\n", fp->datasize); error = ENXIO; /* XXX appropriate error code? */ goto fail; } /* get pointers to firmware segments */ rtext = (const uint8_t *)(hdr + 1); rdata = rtext + rtextsz; itext = rdata + rdatasz; idata = itext + itextsz; btext = idata + idatasz; DPRINTFN(WPI_DEBUG_FIRMWARE, ("Firmware Version: Major %d, Minor %d, Driver %d, \n" "runtime (text: %u, data: %u) init (text: %u, data %u) boot (text %u)\n", (le32toh(hdr->version) & 0xff000000) >> 24, (le32toh(hdr->version) & 0x00ff0000) >> 16, (le32toh(hdr->version) & 0x0000ffff), rtextsz, rdatasz, itextsz, idatasz, btextsz)); DPRINTFN(WPI_DEBUG_FIRMWARE,("rtext 0x%x\n", *(const uint32_t *)rtext)); DPRINTFN(WPI_DEBUG_FIRMWARE,("rdata 0x%x\n", *(const uint32_t *)rdata)); DPRINTFN(WPI_DEBUG_FIRMWARE,("itext 0x%x\n", *(const uint32_t *)itext)); DPRINTFN(WPI_DEBUG_FIRMWARE,("idata 0x%x\n", *(const uint32_t *)idata)); DPRINTFN(WPI_DEBUG_FIRMWARE,("btext 0x%x\n", *(const uint32_t *)btext)); /* sanity checks */ if (rtextsz > WPI_FW_MAIN_TEXT_MAXSZ || rdatasz > WPI_FW_MAIN_DATA_MAXSZ || itextsz > WPI_FW_INIT_TEXT_MAXSZ || idatasz > WPI_FW_INIT_DATA_MAXSZ || btextsz > WPI_FW_BOOT_TEXT_MAXSZ || (btextsz & 3) != 0) { device_printf(sc->sc_dev, "firmware invalid\n"); error = EINVAL; goto fail; } /* copy initialization images into pre-allocated DMA-safe memory */ memcpy(dma->vaddr, idata, idatasz); memcpy(dma->vaddr + WPI_FW_INIT_DATA_MAXSZ, itext, itextsz); bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); /* tell adapter where to find initialization images */ wpi_mem_lock(sc); wpi_mem_write(sc, WPI_MEM_DATA_BASE, dma->paddr); wpi_mem_write(sc, WPI_MEM_DATA_SIZE, idatasz); wpi_mem_write(sc, WPI_MEM_TEXT_BASE, dma->paddr + WPI_FW_INIT_DATA_MAXSZ); wpi_mem_write(sc, WPI_MEM_TEXT_SIZE, itextsz); wpi_mem_unlock(sc); /* load firmware boot code */ if ((error = wpi_load_microcode(sc, btext, btextsz)) != 0) { device_printf(sc->sc_dev, "Failed to load microcode\n"); goto fail; } /* now press "execute" */ WPI_WRITE(sc, WPI_RESET, 0); /* wait at most one second for the first alive notification */ if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { device_printf(sc->sc_dev, "timeout waiting for adapter to initialize\n"); goto fail; } /* copy runtime images into pre-allocated DMA-sage memory */ memcpy(dma->vaddr, rdata, rdatasz); memcpy(dma->vaddr + WPI_FW_MAIN_DATA_MAXSZ, rtext, rtextsz); bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); /* tell adapter where to find runtime images */ wpi_mem_lock(sc); wpi_mem_write(sc, WPI_MEM_DATA_BASE, dma->paddr); wpi_mem_write(sc, WPI_MEM_DATA_SIZE, rdatasz); wpi_mem_write(sc, WPI_MEM_TEXT_BASE, dma->paddr + WPI_FW_MAIN_DATA_MAXSZ); wpi_mem_write(sc, WPI_MEM_TEXT_SIZE, WPI_FW_UPDATED | rtextsz); wpi_mem_unlock(sc); /* wait at most one second for the first alive notification */ if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "wpiinit", hz)) != 0) { device_printf(sc->sc_dev, "timeout waiting for adapter to initialize2\n"); goto fail; } DPRINTFN(WPI_DEBUG_FIRMWARE, ("Firmware loaded to driver successfully\n")); return error; fail: wpi_unload_firmware(sc); return error; } /** * Free the referenced firmware image */ static void wpi_unload_firmware(struct wpi_softc *sc) { if (sc->fw_fp) { WPI_UNLOCK(sc); firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); WPI_LOCK(sc); sc->fw_fp = NULL; } } static int wpi_attach(device_t dev) { struct wpi_softc *sc = device_get_softc(dev); struct ifnet *ifp; struct ieee80211com *ic; int ac, error, supportsa = 1; uint32_t tmp; const struct wpi_ident *ident; sc->sc_dev = dev; if (bootverbose || wpi_debug) device_printf(sc->sc_dev,"Driver Revision %s\n", VERSION); /* * Some card's only support 802.11b/g not a, check to see if * this is one such card. A 0x0 in the subdevice table indicates * the entire subdevice range is to be ignored. */ for (ident = wpi_ident_table; ident->name != NULL; ident++) { if (ident->subdevice && pci_get_subdevice(dev) == ident->subdevice) { supportsa = 0; break; } } /* * Create the taskqueues used by the driver. Primarily * sc_tq handles most the task */ sc->sc_tq = taskqueue_create("wpi_taskq", M_NOWAIT | M_ZERO, taskqueue_thread_enqueue, &sc->sc_tq); taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(dev)); /* Create the tasks that can be queued */ TASK_INIT(&sc->sc_opstask, 0, wpi_ops, sc); TASK_INIT(&sc->sc_bmiss_task, 0, wpi_bmiss, sc); WPI_LOCK_INIT(sc); WPI_CMD_LOCK_INIT(sc); callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0); callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0); if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { device_printf(dev, "chip is in D%d power mode " "-- setting to D0\n", pci_get_powerstate(dev)); pci_set_powerstate(dev, PCI_POWERSTATE_D0); } /* disable the retry timeout register */ pci_write_config(dev, 0x41, 0, 1); /* enable bus-mastering */ pci_enable_busmaster(dev); sc->mem_rid = PCIR_BAR(0); sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE); if (sc->mem == NULL) { device_printf(dev, "could not allocate memory resource\n"); error = ENOMEM; goto fail; } sc->sc_st = rman_get_bustag(sc->mem); sc->sc_sh = rman_get_bushandle(sc->mem); sc->irq_rid = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE | RF_SHAREABLE); if (sc->irq == NULL) { device_printf(dev, "could not allocate interrupt resource\n"); error = ENOMEM; goto fail; } /* * Allocate DMA memory for firmware transfers. */ if ((error = wpi_alloc_fwmem(sc)) != 0) { printf(": could not allocate firmware memory\n"); error = ENOMEM; goto fail; } /* * Put adapter into a known state. */ if ((error = wpi_reset(sc)) != 0) { device_printf(dev, "could not reset adapter\n"); goto fail; } wpi_mem_lock(sc); tmp = wpi_mem_read(sc, WPI_MEM_PCIDEV); if (bootverbose || wpi_debug) device_printf(sc->sc_dev, "Hardware Revision (0x%X)\n", tmp); wpi_mem_unlock(sc); /* Allocate shared page */ if ((error = wpi_alloc_shared(sc)) != 0) { device_printf(dev, "could not allocate shared page\n"); goto fail; } /* tx data queues - 4 for QoS purposes */ for (ac = 0; ac < WME_NUM_AC; ac++) { error = wpi_alloc_tx_ring(sc, &sc->txq[ac], WPI_TX_RING_COUNT, ac); if (error != 0) { device_printf(dev, "could not allocate Tx ring %d\n",ac); goto fail; } } /* command queue to talk to the card's firmware */ error = wpi_alloc_tx_ring(sc, &sc->cmdq, WPI_CMD_RING_COUNT, 4); if (error != 0) { device_printf(dev, "could not allocate command ring\n"); goto fail; } /* receive data queue */ error = wpi_alloc_rx_ring(sc, &sc->rxq); if (error != 0) { device_printf(dev, "could not allocate Rx ring\n"); goto fail; } ifp = sc->sc_ifp = if_alloc(IFT_IEEE80211); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOMEM; goto fail; } ic = ifp->if_l2com; ic->ic_ifp = ifp; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_MONITOR /* monitor mode supported */ | IEEE80211_C_TXPMGT /* tx power management */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_WPA /* 802.11i */ /* XXX looks like WME is partly supported? */ #if 0 | IEEE80211_C_IBSS /* IBSS mode support */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ | IEEE80211_C_WME /* 802.11e */ | IEEE80211_C_HOSTAP /* Host access point mode */ #endif ; /* * Read in the eeprom and also setup the channels for * net80211. We don't set the rates as net80211 does this for us */ wpi_read_eeprom(sc); if (bootverbose || wpi_debug) { device_printf(sc->sc_dev, "Regulatory Domain: %.4s\n", sc->domain); device_printf(sc->sc_dev, "Hardware Type: %c\n", sc->type > 1 ? 'B': '?'); device_printf(sc->sc_dev, "Hardware Revision: %c\n", ((le16toh(sc->rev) & 0xf0) == 0xd0) ? 'D': '?'); device_printf(sc->sc_dev, "SKU %s support 802.11a\n", supportsa ? "does" : "does not"); /* XXX hw_config uses the PCIDEV for the Hardware rev. Must check what sc->rev really represents - benjsc 20070615 */ } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = wpi_init; ifp->if_ioctl = wpi_ioctl; ifp->if_start = wpi_start; IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); ieee80211_ifattach(ic); /* override default methods */ ic->ic_node_alloc = wpi_node_alloc; ic->ic_newassoc = wpi_newassoc; ic->ic_raw_xmit = wpi_raw_xmit; ic->ic_wme.wme_update = wpi_wme_update; ic->ic_scan_start = wpi_scan_start; ic->ic_scan_end = wpi_scan_end; ic->ic_set_channel = wpi_set_channel; ic->ic_scan_curchan = wpi_scan_curchan; ic->ic_scan_mindwell = wpi_scan_mindwell; ic->ic_vap_create = wpi_vap_create; ic->ic_vap_delete = wpi_vap_delete; bpfattach(ifp, DLT_IEEE802_11_RADIO, sizeof (struct ieee80211_frame) + sizeof (sc->sc_txtap)); sc->sc_rxtap_len = sizeof sc->sc_rxtap; sc->sc_rxtap.wr_ihdr.it_len = htole16(sc->sc_rxtap_len); sc->sc_rxtap.wr_ihdr.it_present = htole32(WPI_RX_RADIOTAP_PRESENT); sc->sc_txtap_len = sizeof sc->sc_txtap; sc->sc_txtap.wt_ihdr.it_len = htole16(sc->sc_txtap_len); sc->sc_txtap.wt_ihdr.it_present = htole32(WPI_TX_RADIOTAP_PRESENT); /* * Hook our interrupt after all initialization is complete. */ error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET |INTR_MPSAFE, NULL, wpi_intr, sc, &sc->sc_ih); if (error != 0) { device_printf(dev, "could not set up interrupt\n"); goto fail; } if (bootverbose) ieee80211_announce(ic); #ifdef XXX_DEBUG ieee80211_announce_channels(ic); #endif return 0; fail: wpi_detach(dev); return ENXIO; } static int wpi_detach(device_t dev) { struct wpi_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; int ac; if (ifp != NULL) { wpi_stop(sc); callout_drain(&sc->watchdog_to); callout_drain(&sc->calib_to); bpfdetach(ifp); ieee80211_ifdetach(ic); } WPI_LOCK(sc); if (sc->txq[0].data_dmat) { for (ac = 0; ac < WME_NUM_AC; ac++) wpi_free_tx_ring(sc, &sc->txq[ac]); wpi_free_tx_ring(sc, &sc->cmdq); wpi_free_rx_ring(sc, &sc->rxq); wpi_free_shared(sc); } if (sc->fw_fp != NULL) { wpi_unload_firmware(sc); } if (sc->fw_dma.tag) wpi_free_fwmem(sc); WPI_UNLOCK(sc); if (sc->irq != NULL) { bus_teardown_intr(dev, sc->irq, sc->sc_ih); bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); } if (sc->mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem); if (ifp != NULL) if_free(ifp); taskqueue_free(sc->sc_tq); WPI_LOCK_DESTROY(sc); WPI_CMD_LOCK_DESTROY(sc); return 0; } static struct ieee80211vap * wpi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, int opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct wpi_vap *wvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; wvp = (struct wpi_vap *) malloc(sizeof(struct wpi_vap), M_80211_VAP, M_NOWAIT | M_ZERO); if (wvp == NULL) return NULL; vap = &wvp->vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid, mac); /* override with driver methods */ wvp->newstate = vap->iv_newstate; vap->iv_newstate = wpi_newstate; ieee80211_amrr_init(&wvp->amrr, vap, IEEE80211_AMRR_MIN_SUCCESS_THRESHOLD, IEEE80211_AMRR_MAX_SUCCESS_THRESHOLD, 500 /*ms*/); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status); ic->ic_opmode = opmode; return vap; } static void wpi_vap_delete(struct ieee80211vap *vap) { struct wpi_vap *wvp = WPI_VAP(vap); ieee80211_amrr_cleanup(&wvp->amrr); ieee80211_vap_detach(vap); free(wvp, M_80211_VAP); } static void wpi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { if (error != 0) return; KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); *(bus_addr_t *)arg = segs[0].ds_addr; } /* * Allocates a contiguous block of dma memory of the requested size and * alignment. Due to limitations of the FreeBSD dma subsystem as of 20071217, * allocations greater than 4096 may fail. Hence if the requested alignment is * greater we allocate 'alignment' size extra memory and shift the vaddr and * paddr after the dma load. This bypasses the problem at the cost of a little * more memory. */ static int wpi_dma_contig_alloc(struct wpi_softc *sc, struct wpi_dma_info *dma, void **kvap, bus_size_t size, bus_size_t alignment, int flags) { int error; bus_size_t align; bus_size_t reqsize; DPRINTFN(WPI_DEBUG_DMA, ("Size: %zd - alignment %zd\n", size, alignment)); dma->size = size; dma->tag = NULL; if (alignment > 4096) { align = PAGE_SIZE; reqsize = size + alignment; } else { align = alignment; reqsize = size; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), align, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, reqsize, 1, reqsize, flags, NULL, NULL, &dma->tag); if (error != 0) { device_printf(sc->sc_dev, "could not create shared page DMA tag\n"); goto fail; } error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr_start, flags | BUS_DMA_ZERO, &dma->map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate shared page DMA memory\n"); goto fail; } error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr_start, reqsize, wpi_dma_map_addr, &dma->paddr_start, flags); /* Save the original pointers so we can free all the memory */ dma->paddr = dma->paddr_start; dma->vaddr = dma->vaddr_start; /* * Check the alignment and increment by 4096 until we get the * requested alignment. Fail if can't obtain the alignment * we requested. */ if ((dma->paddr & (alignment -1 )) != 0) { int i; for (i = 0; i < alignment / 4096; i++) { if ((dma->paddr & (alignment - 1 )) == 0) break; dma->paddr += 4096; dma->vaddr += 4096; } if (i == alignment / 4096) { device_printf(sc->sc_dev, "alignment requirement was not satisfied\n"); goto fail; } } if (error != 0) { device_printf(sc->sc_dev, "could not load shared page DMA map\n"); goto fail; } if (kvap != NULL) *kvap = dma->vaddr; return 0; fail: wpi_dma_contig_free(dma); return error; } static void wpi_dma_contig_free(struct wpi_dma_info *dma) { if (dma->tag) { if (dma->map != NULL) { if (dma->paddr_start != 0) { bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dma->tag, dma->map); } bus_dmamem_free(dma->tag, &dma->vaddr_start, dma->map); } bus_dma_tag_destroy(dma->tag); } } /* * Allocate a shared page between host and NIC. */ static int wpi_alloc_shared(struct wpi_softc *sc) { int error; error = wpi_dma_contig_alloc(sc, &sc->shared_dma, (void **)&sc->shared, sizeof (struct wpi_shared), PAGE_SIZE, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate shared area DMA memory\n"); } return error; } static void wpi_free_shared(struct wpi_softc *sc) { wpi_dma_contig_free(&sc->shared_dma); } static int wpi_alloc_rx_ring(struct wpi_softc *sc, struct wpi_rx_ring *ring) { int i, error; ring->cur = 0; error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, WPI_RX_RING_COUNT * sizeof (uint32_t), WPI_RING_DMA_ALIGN, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "%s: could not allocate rx ring DMA memory, error %d\n", __func__, error); goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MJUMPAGESIZE, 1, MJUMPAGESIZE, BUS_DMA_NOWAIT, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "%s: bus_dma_tag_create_failed, error %d\n", __func__, error); goto fail; } /* * Setup Rx buffers. */ for (i = 0; i < WPI_RX_RING_COUNT; i++) { struct wpi_rx_data *data = &ring->data[i]; struct mbuf *m; bus_addr_t paddr; error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "%s: bus_dmamap_create failed, error %d\n", __func__, error); goto fail; } m = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); if (m == NULL) { device_printf(sc->sc_dev, "%s: could not allocate rx mbuf\n", __func__); error = ENOMEM; goto fail; } /* map page */ error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m, caddr_t), MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "%s: bus_dmamap_load failed, error %d\n", __func__, error); m_freem(m); error = ENOMEM; /* XXX unique code */ goto fail; } bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); data->m = m; ring->desc[i] = htole32(paddr); } bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); return 0; fail: wpi_free_rx_ring(sc, ring); return error; } static void wpi_reset_rx_ring(struct wpi_softc *sc, struct wpi_rx_ring *ring) { int ntries; wpi_mem_lock(sc); WPI_WRITE(sc, WPI_RX_CONFIG, 0); for (ntries = 0; ntries < 100; ntries++) { if (WPI_READ(sc, WPI_RX_STATUS) & WPI_RX_IDLE) break; DELAY(10); } wpi_mem_unlock(sc); #ifdef WPI_DEBUG if (ntries == 100) device_printf(sc->sc_dev, "timeout resetting Rx ring\n"); #endif ring->cur = 0; } static void wpi_free_rx_ring(struct wpi_softc *sc, struct wpi_rx_ring *ring) { int i; wpi_dma_contig_free(&ring->desc_dma); for (i = 0; i < WPI_RX_RING_COUNT; i++) if (ring->data[i].m != NULL) m_freem(ring->data[i].m); } static int wpi_alloc_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring, int count, int qid) { struct wpi_tx_data *data; int i, error; ring->qid = qid; ring->count = count; ring->queued = 0; ring->cur = 0; ring->data = NULL; error = wpi_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, count * sizeof (struct wpi_tx_desc), WPI_RING_DMA_ALIGN, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate tx dma memory\n"); goto fail; } /* update shared page with ring's base address */ sc->shared->txbase[qid] = htole32(ring->desc_dma.paddr); error = wpi_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, count * sizeof (struct wpi_tx_cmd), WPI_RING_DMA_ALIGN, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate tx command DMA memory\n"); goto fail; } ring->data = malloc(count * sizeof (struct wpi_tx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate tx data slots\n"); goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, WPI_MAX_SCATTER - 1, MCLBYTES, BUS_DMA_NOWAIT, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { data = &ring->data[i]; error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "could not create tx buf DMA map\n"); goto fail; } bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); } return 0; fail: wpi_free_tx_ring(sc, ring); return error; } static void wpi_reset_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) { struct wpi_tx_data *data; int i, ntries; wpi_mem_lock(sc); WPI_WRITE(sc, WPI_TX_CONFIG(ring->qid), 0); for (ntries = 0; ntries < 100; ntries++) { if (WPI_READ(sc, WPI_TX_STATUS) & WPI_TX_IDLE(ring->qid)) break; DELAY(10); } #ifdef WPI_DEBUG if (ntries == 100) device_printf(sc->sc_dev, "timeout resetting Tx ring %d\n", ring->qid); #endif wpi_mem_unlock(sc); for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } } ring->queued = 0; ring->cur = 0; } static void wpi_free_tx_ring(struct wpi_softc *sc, struct wpi_tx_ring *ring) { struct wpi_tx_data *data; int i; wpi_dma_contig_free(&ring->desc_dma); wpi_dma_contig_free(&ring->cmd_dma); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int wpi_shutdown(device_t dev) { struct wpi_softc *sc = device_get_softc(dev); WPI_LOCK(sc); wpi_stop_locked(sc); wpi_unload_firmware(sc); WPI_UNLOCK(sc); return 0; } static int wpi_suspend(device_t dev) { struct wpi_softc *sc = device_get_softc(dev); wpi_stop(sc); return 0; } static int wpi_resume(device_t dev) { struct wpi_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->sc_ifp; pci_write_config(dev, 0x41, 0, 1); if (ifp->if_flags & IFF_UP) { wpi_init(ifp->if_softc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) wpi_start(ifp); } return 0; } /* ARGSUSED */ static struct ieee80211_node * -wpi_node_alloc(struct ieee80211_node_table *ic) +wpi_node_alloc(struct ieee80211vap *vap __unused, + const uint8_t mac[IEEE80211_ADDR_LEN] __unused) { struct wpi_node *wn; wn = malloc(sizeof (struct wpi_node), M_80211_NODE, M_NOWAIT | M_ZERO); return &wn->ni; } /** * Called by net80211 when ever there is a change to 80211 state machine */ static int wpi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct wpi_vap *wvp = WPI_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct ifnet *ifp = ic->ic_ifp; struct wpi_softc *sc = ifp->if_softc; int error; DPRINTF(("%s: %s -> %s flags 0x%x\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate], sc->flags)); if (nstate == IEEE80211_S_AUTH) { /* Delay the auth transition until we can update the firmware */ error = wpi_queue_cmd(sc, WPI_AUTH, arg, WPI_QUEUE_NORMAL); return (error != 0 ? error : EINPROGRESS); } if (nstate == IEEE80211_S_RUN && vap->iv_state != IEEE80211_S_RUN) { /* set the association id first */ error = wpi_queue_cmd(sc, WPI_RUN, arg, WPI_QUEUE_NORMAL); return (error != 0 ? error : EINPROGRESS); } if (nstate == IEEE80211_S_RUN) { /* RUN -> RUN transition; just restart the timers */ wpi_calib_timeout(sc); /* XXX split out rate control timer */ } return wvp->newstate(vap, nstate, arg); } /* * Grab exclusive access to NIC memory. */ static void wpi_mem_lock(struct wpi_softc *sc) { int ntries; uint32_t tmp; tmp = WPI_READ(sc, WPI_GPIO_CTL); WPI_WRITE(sc, WPI_GPIO_CTL, tmp | WPI_GPIO_MAC); /* spin until we actually get the lock */ for (ntries = 0; ntries < 100; ntries++) { if ((WPI_READ(sc, WPI_GPIO_CTL) & (WPI_GPIO_CLOCK | WPI_GPIO_SLEEP)) == WPI_GPIO_CLOCK) break; DELAY(10); } if (ntries == 100) device_printf(sc->sc_dev, "could not lock memory\n"); } /* * Release lock on NIC memory. */ static void wpi_mem_unlock(struct wpi_softc *sc) { uint32_t tmp = WPI_READ(sc, WPI_GPIO_CTL); WPI_WRITE(sc, WPI_GPIO_CTL, tmp & ~WPI_GPIO_MAC); } static uint32_t wpi_mem_read(struct wpi_softc *sc, uint16_t addr) { WPI_WRITE(sc, WPI_READ_MEM_ADDR, WPI_MEM_4 | addr); return WPI_READ(sc, WPI_READ_MEM_DATA); } static void wpi_mem_write(struct wpi_softc *sc, uint16_t addr, uint32_t data) { WPI_WRITE(sc, WPI_WRITE_MEM_ADDR, WPI_MEM_4 | addr); WPI_WRITE(sc, WPI_WRITE_MEM_DATA, data); } static void wpi_mem_write_region_4(struct wpi_softc *sc, uint16_t addr, const uint32_t *data, int wlen) { for (; wlen > 0; wlen--, data++, addr+=4) wpi_mem_write(sc, addr, *data); } /* * Read data from the EEPROM. We access EEPROM through the MAC instead of * using the traditional bit-bang method. Data is read up until len bytes have * been obtained. */ static uint16_t wpi_read_prom_data(struct wpi_softc *sc, uint32_t addr, void *data, int len) { int ntries; uint32_t val; uint8_t *out = data; wpi_mem_lock(sc); for (; len > 0; len -= 2, addr++) { WPI_WRITE(sc, WPI_EEPROM_CTL, addr << 2); for (ntries = 0; ntries < 10; ntries++) { if ((val = WPI_READ(sc, WPI_EEPROM_CTL)) & WPI_EEPROM_READY) break; DELAY(5); } if (ntries == 10) { device_printf(sc->sc_dev, "could not read EEPROM\n"); return ETIMEDOUT; } *out++= val >> 16; if (len > 1) *out ++= val >> 24; } wpi_mem_unlock(sc); return 0; } /* * The firmware text and data segments are transferred to the NIC using DMA. * The driver just copies the firmware into DMA-safe memory and tells the NIC * where to find it. Once the NIC has copied the firmware into its internal * memory, we can free our local copy in the driver. */ static int wpi_load_microcode(struct wpi_softc *sc, const uint8_t *fw, int size) { int error, ntries; DPRINTFN(WPI_DEBUG_HW,("Loading microcode size 0x%x\n", size)); size /= sizeof(uint32_t); wpi_mem_lock(sc); wpi_mem_write_region_4(sc, WPI_MEM_UCODE_BASE, (const uint32_t *)fw, size); wpi_mem_write(sc, WPI_MEM_UCODE_SRC, 0); wpi_mem_write(sc, WPI_MEM_UCODE_DST, WPI_FW_TEXT); wpi_mem_write(sc, WPI_MEM_UCODE_SIZE, size); /* run microcode */ wpi_mem_write(sc, WPI_MEM_UCODE_CTL, WPI_UC_RUN); /* wait while the adapter is busy copying the firmware */ for (error = 0, ntries = 0; ntries < 1000; ntries++) { uint32_t status = WPI_READ(sc, WPI_TX_STATUS); DPRINTFN(WPI_DEBUG_HW, ("firmware status=0x%x, val=0x%x, result=0x%x\n", status, WPI_TX_IDLE(6), status & WPI_TX_IDLE(6))); if (status & WPI_TX_IDLE(6)) { DPRINTFN(WPI_DEBUG_HW, ("Status Match! - ntries = %d\n", ntries)); break; } DELAY(10); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout transferring firmware\n"); error = ETIMEDOUT; } /* start the microcode executing */ wpi_mem_write(sc, WPI_MEM_UCODE_CTL, WPI_UC_ENABLE); wpi_mem_unlock(sc); return (error); } static void wpi_rx_intr(struct wpi_softc *sc, struct wpi_rx_desc *desc, struct wpi_rx_data *data) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct wpi_rx_ring *ring = &sc->rxq; struct wpi_rx_stat *stat; struct wpi_rx_head *head; struct wpi_rx_tail *tail; struct ieee80211_node *ni; struct mbuf *m, *mnew; bus_addr_t paddr; int error; stat = (struct wpi_rx_stat *)(desc + 1); if (stat->len > WPI_STAT_MAXLEN) { device_printf(sc->sc_dev, "invalid rx statistic header\n"); ifp->if_ierrors++; return; } head = (struct wpi_rx_head *)((caddr_t)(stat + 1) + stat->len); tail = (struct wpi_rx_tail *)((caddr_t)(head + 1) + le16toh(head->len)); DPRINTFN(WPI_DEBUG_RX, ("rx intr: idx=%d len=%d stat len=%d rssi=%d " "rate=%x chan=%d tstamp=%ju\n", ring->cur, le32toh(desc->len), le16toh(head->len), (int8_t)stat->rssi, head->rate, head->chan, (uintmax_t)le64toh(tail->tstamp))); /* XXX don't need mbuf, just dma buffer */ mnew = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); if (mnew == NULL) { DPRINTFN(WPI_DEBUG_RX, ("%s: no mbuf to restock ring\n", __func__)); ic->ic_stats.is_rx_nobuf++; ifp->if_ierrors++; return; } error = bus_dmamap_load(ring->data_dmat, data->map, mtod(mnew, caddr_t), MJUMPAGESIZE, wpi_dma_map_addr, &paddr, BUS_DMA_NOWAIT); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "%s: bus_dmamap_load failed, error %d\n", __func__, error); m_freem(mnew); ic->ic_stats.is_rx_nobuf++; /* XXX need stat */ ifp->if_ierrors++; return; } bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); /* finalize mbuf and swap in new one */ m = data->m; m->m_pkthdr.rcvif = ifp; m->m_data = (caddr_t)(head + 1); m->m_pkthdr.len = m->m_len = le16toh(head->len); data->m = mnew; /* update Rx descriptor */ ring->desc[ring->cur] = htole32(paddr); if (bpf_peers_present(ifp->if_bpf)) { struct wpi_rx_radiotap_header *tap = &sc->sc_rxtap; tap->wr_flags = 0; tap->wr_chan_freq = htole16(ic->ic_channels[head->chan].ic_freq); tap->wr_chan_flags = htole16(ic->ic_channels[head->chan].ic_flags); tap->wr_dbm_antsignal = (int8_t)(stat->rssi - WPI_RSSI_OFFSET); tap->wr_dbm_antnoise = (int8_t)le16toh(stat->noise); tap->wr_tsft = tail->tstamp; tap->wr_antenna = (le16toh(head->flags) >> 4) & 0xf; switch (head->rate) { /* CCK rates */ case 10: tap->wr_rate = 2; break; case 20: tap->wr_rate = 4; break; case 55: tap->wr_rate = 11; break; case 110: tap->wr_rate = 22; break; /* OFDM rates */ case 0xd: tap->wr_rate = 12; break; case 0xf: tap->wr_rate = 18; break; case 0x5: tap->wr_rate = 24; break; case 0x7: tap->wr_rate = 36; break; case 0x9: tap->wr_rate = 48; break; case 0xb: tap->wr_rate = 72; break; case 0x1: tap->wr_rate = 96; break; case 0x3: tap->wr_rate = 108; break; /* unknown rate: should not happen */ default: tap->wr_rate = 0; } if (le16toh(head->flags) & 0x4) tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; bpf_mtap2(ifp->if_bpf, tap, sc->sc_rxtap_len, m); } WPI_UNLOCK(sc); ni = ieee80211_find_rxnode(ic, mtod(m, struct ieee80211_frame_min *)); if (ni != NULL) { (void) ieee80211_input(ni, m, stat->rssi, 0, 0); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, stat->rssi, 0, 0); WPI_LOCK(sc); } static void wpi_tx_intr(struct wpi_softc *sc, struct wpi_rx_desc *desc) { struct ifnet *ifp = sc->sc_ifp; struct wpi_tx_ring *ring = &sc->txq[desc->qid & 0x3]; struct wpi_tx_data *txdata = &ring->data[desc->idx]; struct wpi_tx_stat *stat = (struct wpi_tx_stat *)(desc + 1); struct wpi_node *wn = (struct wpi_node *)txdata->ni; DPRINTFN(WPI_DEBUG_TX, ("tx done: qid=%d idx=%d retries=%d nkill=%d " "rate=%x duration=%d status=%x\n", desc->qid, desc->idx, stat->ntries, stat->nkill, stat->rate, le32toh(stat->duration), le32toh(stat->status))); /* * Update rate control statistics for the node. * XXX we should not count mgmt frames since they're always sent at * the lowest available bit-rate. * XXX frames w/o ACK shouldn't be used either */ wn->amn.amn_txcnt++; if (stat->ntries > 0) { DPRINTFN(3, ("%d retries\n", stat->ntries)); wn->amn.amn_retrycnt++; } /* XXX oerrors should only count errors !maxtries */ if ((le32toh(stat->status) & 0xff) != 1) ifp->if_oerrors++; else ifp->if_opackets++; bus_dmamap_sync(ring->data_dmat, txdata->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, txdata->map); /* XXX handle M_TXCB? */ m_freem(txdata->m); txdata->m = NULL; ieee80211_free_node(txdata->ni); txdata->ni = NULL; ring->queued--; sc->sc_tx_timer = 0; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; wpi_start_locked(ifp); } static void wpi_cmd_intr(struct wpi_softc *sc, struct wpi_rx_desc *desc) { struct wpi_tx_ring *ring = &sc->cmdq; struct wpi_tx_data *data; DPRINTFN(WPI_DEBUG_CMD, ("cmd notification qid=%x idx=%d flags=%x " "type=%s len=%d\n", desc->qid, desc->idx, desc->flags, wpi_cmd_str(desc->type), le32toh(desc->len))); if ((desc->qid & 7) != 4) return; /* not a command ack */ data = &ring->data[desc->idx]; /* if the command was mapped in a mbuf, free it */ if (data->m != NULL) { bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } sc->flags &= ~WPI_FLAG_BUSY; wakeup(&ring->cmd[desc->idx]); } static void wpi_bmiss(void *arg, int npending) { struct wpi_softc *sc = arg; struct ieee80211com *ic = sc->sc_ifp->if_l2com; ieee80211_beacon_miss(ic); } static void wpi_notif_intr(struct wpi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct wpi_rx_desc *desc; struct wpi_rx_data *data; uint32_t hw; hw = le32toh(sc->shared->next); while (sc->rxq.cur != hw) { data = &sc->rxq.data[sc->rxq.cur]; desc = (void *)data->m->m_ext.ext_buf; DPRINTFN(WPI_DEBUG_NOTIFY, ("notify qid=%x idx=%d flags=%x type=%d len=%d\n", desc->qid, desc->idx, desc->flags, desc->type, le32toh(desc->len))); if (!(desc->qid & 0x80)) /* reply to a command */ wpi_cmd_intr(sc, desc); switch (desc->type) { case WPI_RX_DONE: /* a 802.11 frame was received */ wpi_rx_intr(sc, desc, data); break; case WPI_TX_DONE: /* a 802.11 frame has been transmitted */ wpi_tx_intr(sc, desc); break; case WPI_UC_READY: { struct wpi_ucode_info *uc = (struct wpi_ucode_info *)(desc + 1); /* the microcontroller is ready */ DPRINTF(("microcode alive notification version %x " "alive %x\n", le32toh(uc->version), le32toh(uc->valid))); if (le32toh(uc->valid) != 1) { device_printf(sc->sc_dev, "microcontroller initialization failed\n"); wpi_stop_locked(sc); } break; } case WPI_STATE_CHANGED: { uint32_t *status = (uint32_t *)(desc + 1); /* enabled/disabled notification */ DPRINTF(("state changed to %x\n", le32toh(*status))); if (le32toh(*status) & 1) { device_printf(sc->sc_dev, "Radio transmitter is switched off\n"); sc->flags |= WPI_FLAG_HW_RADIO_OFF; ifp->if_drv_flags &= ~IFF_DRV_RUNNING; /* Disable firmware commands */ WPI_WRITE(sc, WPI_UCODE_SET, WPI_DISABLE_CMD); } break; } case WPI_START_SCAN: { struct wpi_start_scan *scan = (struct wpi_start_scan *)(desc + 1); DPRINTFN(WPI_DEBUG_SCANNING, ("scanning channel %d status %x\n", scan->chan, le32toh(scan->status))); break; } case WPI_STOP_SCAN: { struct wpi_stop_scan *scan = (struct wpi_stop_scan *)(desc + 1); struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); DPRINTFN(WPI_DEBUG_SCANNING, ("scan finished nchan=%d status=%d chan=%d\n", scan->nchan, scan->status, scan->chan)); sc->sc_scan_timer = 0; ieee80211_scan_next(vap); break; } case WPI_MISSED_BEACON: { struct wpi_missed_beacon *beacon = (struct wpi_missed_beacon *)(desc + 1); struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); if (le32toh(beacon->consecutive) >= vap->iv_bmissthreshold) { DPRINTF(("Beacon miss: %u >= %u\n", le32toh(beacon->consecutive), vap->iv_bmissthreshold)); taskqueue_enqueue(taskqueue_swi, &sc->sc_bmiss_task); } break; } } sc->rxq.cur = (sc->rxq.cur + 1) % WPI_RX_RING_COUNT; } /* tell the firmware what we have processed */ hw = (hw == 0) ? WPI_RX_RING_COUNT - 1 : hw - 1; WPI_WRITE(sc, WPI_RX_WIDX, hw & ~7); } static void wpi_intr(void *arg) { struct wpi_softc *sc = arg; uint32_t r; WPI_LOCK(sc); r = WPI_READ(sc, WPI_INTR); if (r == 0 || r == 0xffffffff) { WPI_UNLOCK(sc); return; } /* disable interrupts */ WPI_WRITE(sc, WPI_MASK, 0); /* ack interrupts */ WPI_WRITE(sc, WPI_INTR, r); if (r & (WPI_SW_ERROR | WPI_HW_ERROR)) { device_printf(sc->sc_dev, "fatal firmware error\n"); DPRINTFN(6,("(%s)\n", (r & WPI_SW_ERROR) ? "(Software Error)" : "(Hardware Error)")); wpi_queue_cmd(sc, WPI_RESTART, 0, WPI_QUEUE_CLEAR); sc->flags &= ~WPI_FLAG_BUSY; WPI_UNLOCK(sc); return; } if (r & WPI_RX_INTR) wpi_notif_intr(sc); if (r & WPI_ALIVE_INTR) /* firmware initialized */ wakeup(sc); /* re-enable interrupts */ if (sc->sc_ifp->if_flags & IFF_UP) WPI_WRITE(sc, WPI_MASK, WPI_INTR_MASK); WPI_UNLOCK(sc); } static uint8_t wpi_plcp_signal(int rate) { switch (rate) { /* CCK rates (returned values are device-dependent) */ case 2: return 10; case 4: return 20; case 11: return 55; case 22: return 110; /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ /* R1-R4 (ral/ural is R4-R1) */ case 12: return 0xd; case 18: return 0xf; case 24: return 0x5; case 36: return 0x7; case 48: return 0x9; case 72: return 0xb; case 96: return 0x1; case 108: return 0x3; /* unsupported rates (should not get there) */ default: return 0; } } /* quickly determine if a given rate is CCK or OFDM */ #define WPI_RATE_IS_OFDM(rate) ((rate) >= 12 && (rate) != 22) /* * Construct the data packet for a transmit buffer and acutally put * the buffer onto the transmit ring, kicking the card to process the * the buffer. */ static int wpi_tx_data(struct wpi_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, int ac) { struct ieee80211vap *vap = ni->ni_vap; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; const struct chanAccParams *cap = &ic->ic_wme.wme_chanParams; struct wpi_tx_ring *ring = &sc->txq[ac]; struct wpi_tx_desc *desc; struct wpi_tx_data *data; struct wpi_tx_cmd *cmd; struct wpi_cmd_data *tx; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp; struct ieee80211_key *k; struct mbuf *mnew; int i, error, nsegs, rate, hdrlen, ismcast; bus_dma_segment_t segs[WPI_MAX_SCATTER]; desc = &ring->desc[ring->cur]; data = &ring->data[ring->cur]; wh = mtod(m0, struct ieee80211_frame *); hdrlen = ieee80211_hdrsize(wh); ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); if (wh->i_fc[1] & IEEE80211_FC1_WEP) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } cmd = &ring->cmd[ring->cur]; cmd->code = WPI_CMD_TX_DATA; cmd->flags = 0; cmd->qid = ring->qid; cmd->idx = ring->cur; tx = (struct wpi_cmd_data *)cmd->data; tx->flags = htole32(WPI_TX_AUTO_SEQ); tx->timeout = htole16(0); tx->ofdm_mask = 0xff; tx->cck_mask = 0x0f; tx->lifetime = htole32(WPI_LIFETIME_INFINITE); tx->id = ismcast ? WPI_ID_BROADCAST : WPI_ID_BSS; tx->len = htole16(m0->m_pkthdr.len); if (!ismcast) { if ((ni->ni_flags & IEEE80211_NODE_QOS) == 0 || !cap->cap_wmeParams[ac].wmep_noackPolicy) tx->flags |= htole32(WPI_TX_NEED_ACK); if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { tx->flags |= htole32(WPI_TX_NEED_RTS|WPI_TX_FULL_TXOP); tx->rts_ntries = 7; } } /* pick a rate */ tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_MGT) { uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; /* tell h/w to set timestamp in probe responses */ if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) tx->flags |= htole32(WPI_TX_INSERT_TSTAMP); if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) tx->timeout = htole16(3); else tx->timeout = htole16(2); rate = tp->mgmtrate; } else if (ismcast) { rate = tp->mcastrate; } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { rate = tp->ucastrate; } else { (void) ieee80211_amrr_choose(ni, &WPI_NODE(ni)->amn); rate = ni->ni_txrate; } tx->rate = wpi_plcp_signal(rate); /* be very persistant at sending frames out */ #if 0 tx->data_ntries = tp->maxretry; #else tx->data_ntries = 15; /* XXX way too high */ #endif if (bpf_peers_present(ifp->if_bpf)) { struct wpi_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq); tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags); tap->wt_rate = rate; tap->wt_hwqueue = ac; if (wh->i_fc[1] & IEEE80211_FC1_WEP) tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; bpf_mtap2(ifp->if_bpf, tap, sc->sc_txtap_len, m0); } /* save and trim IEEE802.11 header */ m_copydata(m0, 0, hdrlen, (caddr_t)&tx->wh); m_adj(m0, hdrlen); error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (error != 0) { /* XXX use m_collapse */ mnew = m_defrag(m0, M_DONTWAIT); if (mnew == NULL) { device_printf(sc->sc_dev, "could not defragment mbuf\n"); m_freem(m0); return ENOBUFS; } m0 = mnew; error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } } data->m = m0; data->ni = ni; DPRINTFN(WPI_DEBUG_TX, ("sending data: qid=%d idx=%d len=%d nsegs=%d\n", ring->qid, ring->cur, m0->m_pkthdr.len, nsegs)); /* first scatter/gather segment is used by the tx data command */ desc->flags = htole32(WPI_PAD32(m0->m_pkthdr.len) << 28 | (1 + nsegs) << 24); desc->segs[0].addr = htole32(ring->cmd_dma.paddr + ring->cur * sizeof (struct wpi_tx_cmd)); desc->segs[0].len = htole32(4 + sizeof (struct wpi_cmd_data)); for (i = 1; i <= nsegs; i++) { desc->segs[i].addr = htole32(segs[i - 1].ds_addr); desc->segs[i].len = htole32(segs[i - 1].ds_len); } bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); ring->queued++; /* kick ring */ ring->cur = (ring->cur + 1) % WPI_TX_RING_COUNT; WPI_WRITE(sc, WPI_TX_WIDX, ring->qid << 8 | ring->cur); return 0; } /** * Process data waiting to be sent on the IFNET output queue */ static void wpi_start(struct ifnet *ifp) { struct wpi_softc *sc = ifp->if_softc; WPI_LOCK(sc); wpi_start_locked(ifp); WPI_UNLOCK(sc); } static void wpi_start_locked(struct ifnet *ifp) { struct wpi_softc *sc = ifp->if_softc; struct ieee80211_node *ni; struct mbuf *m; int ac; WPI_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; for (;;) { IFQ_POLL(&ifp->if_snd, m); if (m == NULL) break; /* no QoS encapsulation for EAPOL frames */ ac = M_WME_GETAC(m); if (sc->txq[ac].queued > sc->txq[ac].count - 8) { /* there is no place left in this ring */ IFQ_DRV_PREPEND(&ifp->if_snd, m); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } IFQ_DRV_DEQUEUE(&ifp->if_snd, m); ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; m = ieee80211_encap(ni, m); if (m == NULL) { ieee80211_free_node(ni); ifp->if_oerrors++; continue; } if (wpi_tx_data(sc, m, ni, ac) != 0) { ieee80211_free_node(ni); ifp->if_oerrors++; break; } sc->sc_tx_timer = 5; } } static int wpi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ifnet *ifp = ic->ic_ifp; struct wpi_softc *sc = ifp->if_softc; /* prevent management frames from being sent if we're not ready */ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { m_freem(m); ieee80211_free_node(ni); return ENETDOWN; } WPI_LOCK(sc); /* management frames go into ring 0 */ if (sc->txq[0].queued > sc->txq[0].count - 8) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; m_freem(m); WPI_UNLOCK(sc); ieee80211_free_node(ni); return ENOBUFS; /* XXX */ } ifp->if_opackets++; if (wpi_tx_data(sc, m, ni, 0) != 0) goto bad; sc->sc_tx_timer = 5; callout_reset(&sc->watchdog_to, hz, wpi_watchdog, sc); WPI_UNLOCK(sc); return 0; bad: ifp->if_oerrors++; WPI_UNLOCK(sc); ieee80211_free_node(ni); return EIO; /* XXX */ } static int wpi_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct wpi_softc *sc = ifp->if_softc; struct ieee80211com *ic = ifp->if_l2com; struct ifreq *ifr = (struct ifreq *) data; int error = 0, startall = 0; switch (cmd) { case SIOCSIFFLAGS: WPI_LOCK(sc); if ((ifp->if_flags & IFF_UP)) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { wpi_init_locked(sc, 0); startall = 1; } } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) || (sc->flags & WPI_FLAG_HW_RADIO_OFF)) wpi_stop_locked(sc); WPI_UNLOCK(sc); if (startall) ieee80211_start_all(ic); break; case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &ic->ic_media, cmd); break; case SIOCGIFADDR: error = ether_ioctl(ifp, cmd, data); break; default: error = EINVAL; break; } return error; } /* * Extract various information from EEPROM. */ static void wpi_read_eeprom(struct wpi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; int i; /* read the hardware capabilities, revision and SKU type */ wpi_read_prom_data(sc, WPI_EEPROM_CAPABILITIES, &sc->cap,1); wpi_read_prom_data(sc, WPI_EEPROM_REVISION, &sc->rev,2); wpi_read_prom_data(sc, WPI_EEPROM_TYPE, &sc->type, 1); /* read the regulatory domain */ wpi_read_prom_data(sc, WPI_EEPROM_DOMAIN, sc->domain, 4); /* read in the hw MAC address */ wpi_read_prom_data(sc, WPI_EEPROM_MAC, ic->ic_myaddr, 6); /* read the list of authorized channels */ for (i = 0; i < WPI_CHAN_BANDS_COUNT; i++) wpi_read_eeprom_channels(sc,i); /* read the power level calibration info for each group */ for (i = 0; i < WPI_POWER_GROUPS_COUNT; i++) wpi_read_eeprom_group(sc,i); } /* * Send a command to the firmware. */ static int wpi_cmd(struct wpi_softc *sc, int code, const void *buf, int size, int async) { struct wpi_tx_ring *ring = &sc->cmdq; struct wpi_tx_desc *desc; struct wpi_tx_cmd *cmd; #ifdef WPI_DEBUG if (!async) { WPI_LOCK_ASSERT(sc); } #endif DPRINTFN(WPI_DEBUG_CMD,("wpi_cmd %d size %d async %d\n", code, size, async)); if (sc->flags & WPI_FLAG_BUSY) { device_printf(sc->sc_dev, "%s: cmd %d not sent, busy\n", __func__, code); return EAGAIN; } sc->flags|= WPI_FLAG_BUSY; KASSERT(size <= sizeof cmd->data, ("command %d too large: %d bytes", code, size)); desc = &ring->desc[ring->cur]; cmd = &ring->cmd[ring->cur]; cmd->code = code; cmd->flags = 0; cmd->qid = ring->qid; cmd->idx = ring->cur; memcpy(cmd->data, buf, size); desc->flags = htole32(WPI_PAD32(size) << 28 | 1 << 24); desc->segs[0].addr = htole32(ring->cmd_dma.paddr + ring->cur * sizeof (struct wpi_tx_cmd)); desc->segs[0].len = htole32(4 + size); /* kick cmd ring */ ring->cur = (ring->cur + 1) % WPI_CMD_RING_COUNT; WPI_WRITE(sc, WPI_TX_WIDX, ring->qid << 8 | ring->cur); if (async) { sc->flags &= ~ WPI_FLAG_BUSY; return 0; } return msleep(cmd, &sc->sc_mtx, PCATCH, "wpicmd", hz); } static int wpi_wme_update(struct ieee80211com *ic) { #define WPI_EXP2(v) htole16((1 << (v)) - 1) #define WPI_USEC(v) htole16(IEEE80211_TXOP_TO_US(v)) struct wpi_softc *sc = ic->ic_ifp->if_softc; const struct wmeParams *wmep; struct wpi_wme_setup wme; int ac; /* don't override default WME values if WME is not actually enabled */ if (!(ic->ic_flags & IEEE80211_F_WME)) return 0; wme.flags = 0; for (ac = 0; ac < WME_NUM_AC; ac++) { wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; wme.ac[ac].aifsn = wmep->wmep_aifsn; wme.ac[ac].cwmin = WPI_EXP2(wmep->wmep_logcwmin); wme.ac[ac].cwmax = WPI_EXP2(wmep->wmep_logcwmax); wme.ac[ac].txop = WPI_USEC(wmep->wmep_txopLimit); DPRINTF(("setting WME for queue %d aifsn=%d cwmin=%d cwmax=%d " "txop=%d\n", ac, wme.ac[ac].aifsn, wme.ac[ac].cwmin, wme.ac[ac].cwmax, wme.ac[ac].txop)); } return wpi_cmd(sc, WPI_CMD_SET_WME, &wme, sizeof wme, 1); #undef WPI_USEC #undef WPI_EXP2 } /* * Configure h/w multi-rate retries. */ static int wpi_mrr_setup(struct wpi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct wpi_mrr_setup mrr; int i, error; memset(&mrr, 0, sizeof (struct wpi_mrr_setup)); /* CCK rates (not used with 802.11a) */ for (i = WPI_CCK1; i <= WPI_CCK11; i++) { mrr.rates[i].flags = 0; mrr.rates[i].signal = wpi_ridx_to_plcp[i]; /* fallback to the immediate lower CCK rate (if any) */ mrr.rates[i].next = (i == WPI_CCK1) ? WPI_CCK1 : i - 1; /* try one time at this rate before falling back to "next" */ mrr.rates[i].ntries = 1; } /* OFDM rates (not used with 802.11b) */ for (i = WPI_OFDM6; i <= WPI_OFDM54; i++) { mrr.rates[i].flags = 0; mrr.rates[i].signal = wpi_ridx_to_plcp[i]; /* fallback to the immediate lower OFDM rate (if any) */ /* we allow fallback from OFDM/6 to CCK/2 in 11b/g mode */ mrr.rates[i].next = (i == WPI_OFDM6) ? ((ic->ic_curmode == IEEE80211_MODE_11A) ? WPI_OFDM6 : WPI_CCK2) : i - 1; /* try one time at this rate before falling back to "next" */ mrr.rates[i].ntries = 1; } /* setup MRR for control frames */ mrr.which = htole32(WPI_MRR_CTL); error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not setup MRR for control frames\n"); return error; } /* setup MRR for data frames */ mrr.which = htole32(WPI_MRR_DATA); error = wpi_cmd(sc, WPI_CMD_MRR_SETUP, &mrr, sizeof mrr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not setup MRR for data frames\n"); return error; } return 0; } static void wpi_set_led(struct wpi_softc *sc, uint8_t which, uint8_t off, uint8_t on) { struct wpi_cmd_led led; led.which = which; led.unit = htole32(100000); /* on/off in unit of 100ms */ led.off = off; led.on = on; (void)wpi_cmd(sc, WPI_CMD_SET_LED, &led, sizeof led, 1); } static void wpi_enable_tsf(struct wpi_softc *sc, struct ieee80211_node *ni) { struct wpi_cmd_tsf tsf; uint64_t val, mod; memset(&tsf, 0, sizeof tsf); memcpy(&tsf.tstamp, ni->ni_tstamp.data, 8); tsf.bintval = htole16(ni->ni_intval); tsf.lintval = htole16(10); /* compute remaining time until next beacon */ val = (uint64_t)ni->ni_intval * 1024; /* msec -> usec */ mod = le64toh(tsf.tstamp) % val; tsf.binitval = htole32((uint32_t)(val - mod)); if (wpi_cmd(sc, WPI_CMD_TSF, &tsf, sizeof tsf, 1) != 0) device_printf(sc->sc_dev, "could not enable TSF\n"); } #if 0 /* * Build a beacon frame that the firmware will broadcast periodically in * IBSS or HostAP modes. */ static int wpi_setup_beacon(struct wpi_softc *sc, struct ieee80211_node *ni) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct wpi_tx_ring *ring = &sc->cmdq; struct wpi_tx_desc *desc; struct wpi_tx_data *data; struct wpi_tx_cmd *cmd; struct wpi_cmd_beacon *bcn; struct ieee80211_beacon_offsets bo; struct mbuf *m0; bus_addr_t physaddr; int error; desc = &ring->desc[ring->cur]; data = &ring->data[ring->cur]; m0 = ieee80211_beacon_alloc(ic, ni, &bo); if (m0 == NULL) { device_printf(sc->sc_dev, "could not allocate beacon frame\n"); return ENOMEM; } cmd = &ring->cmd[ring->cur]; cmd->code = WPI_CMD_SET_BEACON; cmd->flags = 0; cmd->qid = ring->qid; cmd->idx = ring->cur; bcn = (struct wpi_cmd_beacon *)cmd->data; memset(bcn, 0, sizeof (struct wpi_cmd_beacon)); bcn->id = WPI_ID_BROADCAST; bcn->ofdm_mask = 0xff; bcn->cck_mask = 0x0f; bcn->lifetime = htole32(WPI_LIFETIME_INFINITE); bcn->len = htole16(m0->m_pkthdr.len); bcn->rate = (ic->ic_curmode == IEEE80211_MODE_11A) ? wpi_plcp_signal(12) : wpi_plcp_signal(2); bcn->flags = htole32(WPI_TX_AUTO_SEQ | WPI_TX_INSERT_TSTAMP); /* save and trim IEEE802.11 header */ m_copydata(m0, 0, sizeof (struct ieee80211_frame), (caddr_t)&bcn->wh); m_adj(m0, sizeof (struct ieee80211_frame)); /* assume beacon frame is contiguous */ error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m0, void *), m0->m_pkthdr.len, wpi_dma_map_addr, &physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map beacon\n"); m_freem(m0); return error; } data->m = m0; /* first scatter/gather segment is used by the beacon command */ desc->flags = htole32(WPI_PAD32(m0->m_pkthdr.len) << 28 | 2 << 24); desc->segs[0].addr = htole32(ring->cmd_dma.paddr + ring->cur * sizeof (struct wpi_tx_cmd)); desc->segs[0].len = htole32(4 + sizeof (struct wpi_cmd_beacon)); desc->segs[1].addr = htole32(physaddr); desc->segs[1].len = htole32(m0->m_pkthdr.len); /* kick cmd ring */ ring->cur = (ring->cur + 1) % WPI_CMD_RING_COUNT; WPI_WRITE(sc, WPI_TX_WIDX, ring->qid << 8 | ring->cur); return 0; } #endif static int wpi_auth(struct wpi_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni = vap->iv_bss; struct wpi_node_info node; int error; /* update adapter's configuration */ sc->config.associd = 0; sc->config.filter &= ~htole32(WPI_FILTER_BSS); IEEE80211_ADDR_COPY(sc->config.bssid, ni->ni_bssid); sc->config.chan = ieee80211_chan2ieee(ic, ni->ni_chan); if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) { sc->config.flags |= htole32(WPI_CONFIG_AUTO | WPI_CONFIG_24GHZ); } if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { sc->config.cck_mask = 0; sc->config.ofdm_mask = 0x15; } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { sc->config.cck_mask = 0x03; sc->config.ofdm_mask = 0; } else { /* XXX assume 802.11b/g */ sc->config.cck_mask = 0x0f; sc->config.ofdm_mask = 0x15; } DPRINTF(("config chan %d flags %x cck %x ofdm %x\n", sc->config.chan, sc->config.flags, sc->config.cck_mask, sc->config.ofdm_mask)); error = wpi_cmd(sc, WPI_CMD_CONFIGURE, &sc->config, sizeof (struct wpi_config), 1); if (error != 0) { device_printf(sc->sc_dev, "could not configure\n"); return error; } /* configuration has changed, set Tx power accordingly */ if ((error = wpi_set_txpower(sc, ni->ni_chan, 1)) != 0) { device_printf(sc->sc_dev, "could not set Tx power\n"); return error; } /* add default node */ memset(&node, 0, sizeof node); IEEE80211_ADDR_COPY(node.bssid, ni->ni_bssid); node.id = WPI_ID_BSS; node.rate = (ic->ic_curmode == IEEE80211_MODE_11A) ? wpi_plcp_signal(12) : wpi_plcp_signal(2); node.action = htole32(WPI_ACTION_SET_RATE); node.antenna = WPI_ANTENNA_BOTH; error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 1); if (error != 0) device_printf(sc->sc_dev, "could not add BSS node\n"); return (error); } static int wpi_run(struct wpi_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni = vap->iv_bss; int error; if (vap->iv_opmode == IEEE80211_M_MONITOR) { /* link LED blinks while monitoring */ wpi_set_led(sc, WPI_LED_LINK, 5, 5); return 0; } wpi_enable_tsf(sc, ni); /* update adapter's configuration */ sc->config.associd = htole16(ni->ni_associd & ~0xc000); /* short preamble/slot time are negotiated when associating */ sc->config.flags &= ~htole32(WPI_CONFIG_SHPREAMBLE | WPI_CONFIG_SHSLOT); if (ic->ic_flags & IEEE80211_F_SHSLOT) sc->config.flags |= htole32(WPI_CONFIG_SHSLOT); if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) sc->config.flags |= htole32(WPI_CONFIG_SHPREAMBLE); sc->config.filter |= htole32(WPI_FILTER_BSS); /* XXX put somewhere HC_QOS_SUPPORT_ASSOC + HC_IBSS_START */ DPRINTF(("config chan %d flags %x\n", sc->config.chan, sc->config.flags)); error = wpi_cmd(sc, WPI_CMD_CONFIGURE, &sc->config, sizeof (struct wpi_config), 1); if (error != 0) { device_printf(sc->sc_dev, "could not update configuration\n"); return error; } error = wpi_set_txpower(sc, ni->ni_chan, 1); if (error != 0) { device_printf(sc->sc_dev, "could set txpower\n"); return error; } if (vap->iv_opmode == IEEE80211_M_STA) { /* fake a join to init the tx rate */ wpi_newassoc(ni, 1); } /* link LED always on while associated */ wpi_set_led(sc, WPI_LED_LINK, 0, 1); /* start automatic rate control timer */ callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); return (error); } /* * Send a scan request to the firmware. Since this command is huge, we map it * into a mbufcluster instead of using the pre-allocated set of commands. Note, * much of this code is similar to that in wpi_cmd but because we must manually * construct the probe & channels, we duplicate what's needed here. XXX In the * future, this function should be modified to use wpi_cmd to help cleanup the * code base. */ static int wpi_scan(struct wpi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211_scan_state *ss = ic->ic_scan; struct wpi_tx_ring *ring = &sc->cmdq; struct wpi_tx_desc *desc; struct wpi_tx_data *data; struct wpi_tx_cmd *cmd; struct wpi_scan_hdr *hdr; struct wpi_scan_chan *chan; struct ieee80211_frame *wh; struct ieee80211_rateset *rs; struct ieee80211_channel *c; enum ieee80211_phymode mode; uint8_t *frm; int nrates, pktlen, error, i, nssid; bus_addr_t physaddr; desc = &ring->desc[ring->cur]; data = &ring->data[ring->cur]; data->m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); if (data->m == NULL) { device_printf(sc->sc_dev, "could not allocate mbuf for scan command\n"); return ENOMEM; } cmd = mtod(data->m, struct wpi_tx_cmd *); cmd->code = WPI_CMD_SCAN; cmd->flags = 0; cmd->qid = ring->qid; cmd->idx = ring->cur; hdr = (struct wpi_scan_hdr *)cmd->data; memset(hdr, 0, sizeof(struct wpi_scan_hdr)); /* * Move to the next channel if no packets are received within 5 msecs * after sending the probe request (this helps to reduce the duration * of active scans). */ hdr->quiet = htole16(5); hdr->threshold = htole16(1); if (IEEE80211_IS_CHAN_A(ic->ic_curchan)) { /* send probe requests at 6Mbps */ hdr->tx.rate = wpi_ridx_to_plcp[WPI_OFDM6]; /* Enable crc checking */ hdr->promotion = htole16(1); } else { hdr->flags = htole32(WPI_CONFIG_24GHZ | WPI_CONFIG_AUTO); /* send probe requests at 1Mbps */ hdr->tx.rate = wpi_ridx_to_plcp[WPI_CCK1]; } hdr->tx.id = WPI_ID_BROADCAST; hdr->tx.lifetime = htole32(WPI_LIFETIME_INFINITE); hdr->tx.flags = htole32(WPI_TX_AUTO_SEQ); memset(hdr->scan_essids, 0, sizeof(hdr->scan_essids)); nssid = MIN(ss->ss_nssid, WPI_SCAN_MAX_ESSIDS); for (i = 0; i < nssid; i++) { hdr->scan_essids[i].id = IEEE80211_ELEMID_SSID; hdr->scan_essids[i].esslen = MIN(ss->ss_ssid[i].len, 32); memcpy(hdr->scan_essids[i].essid, ss->ss_ssid[i].ssid, hdr->scan_essids[i].esslen); if (wpi_debug & WPI_DEBUG_SCANNING) { printf("Scanning Essid: "); ieee80211_print_essid(hdr->scan_essids[i].essid, hdr->scan_essids[i].esslen); printf("\n"); } } /* * Build a probe request frame. Most of the following code is a * copy & paste of what is done in net80211. */ wh = (struct ieee80211_frame *)&hdr->scan_essids[4]; wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_REQ; wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; IEEE80211_ADDR_COPY(wh->i_addr1, ifp->if_broadcastaddr); IEEE80211_ADDR_COPY(wh->i_addr2, ic->ic_myaddr); IEEE80211_ADDR_COPY(wh->i_addr3, ifp->if_broadcastaddr); *(u_int16_t *)&wh->i_dur[0] = 0; /* filled by h/w */ *(u_int16_t *)&wh->i_seq[0] = 0; /* filled by h/w */ frm = (uint8_t *)(wh + 1); /* add essid IE, the hardware will fill this in for us */ *frm++ = IEEE80211_ELEMID_SSID; *frm++ = 0; mode = ieee80211_chan2mode(ic->ic_curchan); rs = &ic->ic_sup_rates[mode]; /* add supported rates IE */ *frm++ = IEEE80211_ELEMID_RATES; nrates = rs->rs_nrates; if (nrates > IEEE80211_RATE_SIZE) nrates = IEEE80211_RATE_SIZE; *frm++ = nrates; memcpy(frm, rs->rs_rates, nrates); frm += nrates; /* add supported xrates IE */ if (rs->rs_nrates > IEEE80211_RATE_SIZE) { nrates = rs->rs_nrates - IEEE80211_RATE_SIZE; *frm++ = IEEE80211_ELEMID_XRATES; *frm++ = nrates; memcpy(frm, rs->rs_rates + IEEE80211_RATE_SIZE, nrates); frm += nrates; } /* setup length of probe request */ hdr->tx.len = htole16(frm - (uint8_t *)wh); /* * Construct information about the channel that we * want to scan. The firmware expects this to be directly * after the scan probe request */ c = ic->ic_curchan; chan = (struct wpi_scan_chan *)frm; chan->chan = ieee80211_chan2ieee(ic, c); chan->flags = 0; if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { chan->flags |= WPI_CHAN_ACTIVE; if (nssid != 0) chan->flags |= WPI_CHAN_DIRECT; } chan->gain_dsp = 0x6e; /* Default level */ if (IEEE80211_IS_CHAN_5GHZ(c)) { chan->active = htole16(10); chan->passive = htole16(ss->ss_maxdwell); chan->gain_radio = 0x3b; } else { chan->active = htole16(20); chan->passive = htole16(ss->ss_maxdwell); chan->gain_radio = 0x28; } DPRINTFN(WPI_DEBUG_SCANNING, ("Scanning %u Passive: %d\n", chan->chan, c->ic_flags & IEEE80211_CHAN_PASSIVE)); hdr->nchan++; chan++; frm += sizeof (struct wpi_scan_chan); #if 0 // XXX All Channels.... for (c = &ic->ic_channels[1]; c <= &ic->ic_channels[IEEE80211_CHAN_MAX]; c++) { if ((c->ic_flags & ic->ic_curchan->ic_flags) != ic->ic_curchan->ic_flags) continue; chan->chan = ieee80211_chan2ieee(ic, c); chan->flags = 0; if (!(c->ic_flags & IEEE80211_CHAN_PASSIVE)) { chan->flags |= WPI_CHAN_ACTIVE; if (ic->ic_des_ssid[0].len != 0) chan->flags |= WPI_CHAN_DIRECT; } chan->gain_dsp = 0x6e; /* Default level */ if (IEEE80211_IS_CHAN_5GHZ(c)) { chan->active = htole16(10); chan->passive = htole16(110); chan->gain_radio = 0x3b; } else { chan->active = htole16(20); chan->passive = htole16(120); chan->gain_radio = 0x28; } DPRINTFN(WPI_DEBUG_SCANNING, ("Scanning %u Passive: %d\n", chan->chan, c->ic_flags & IEEE80211_CHAN_PASSIVE)); hdr->nchan++; chan++; frm += sizeof (struct wpi_scan_chan); } #endif hdr->len = htole16(frm - (uint8_t *)hdr); pktlen = frm - (uint8_t *)cmd; error = bus_dmamap_load(ring->data_dmat, data->map, cmd, pktlen, wpi_dma_map_addr, &physaddr, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "could not map scan command\n"); m_freem(data->m); data->m = NULL; return error; } desc->flags = htole32(WPI_PAD32(pktlen) << 28 | 1 << 24); desc->segs[0].addr = htole32(physaddr); desc->segs[0].len = htole32(pktlen); bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); /* kick cmd ring */ ring->cur = (ring->cur + 1) % WPI_CMD_RING_COUNT; WPI_WRITE(sc, WPI_TX_WIDX, ring->qid << 8 | ring->cur); sc->sc_scan_timer = 5; return 0; /* will be notified async. of failure/success */ } /** * Configure the card to listen to a particular channel, this transisions the * card in to being able to receive frames from remote devices. */ static int wpi_config(struct wpi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct wpi_power power; struct wpi_bluetooth bluetooth; struct wpi_node_info node; int error; /* set power mode */ memset(&power, 0, sizeof power); power.flags = htole32(WPI_POWER_CAM|0x8); error = wpi_cmd(sc, WPI_CMD_SET_POWER_MODE, &power, sizeof power, 0); if (error != 0) { device_printf(sc->sc_dev, "could not set power mode\n"); return error; } /* configure bluetooth coexistence */ memset(&bluetooth, 0, sizeof bluetooth); bluetooth.flags = 3; bluetooth.lead = 0xaa; bluetooth.kill = 1; error = wpi_cmd(sc, WPI_CMD_BLUETOOTH, &bluetooth, sizeof bluetooth, 0); if (error != 0) { device_printf(sc->sc_dev, "could not configure bluetooth coexistence\n"); return error; } /* configure adapter */ memset(&sc->config, 0, sizeof (struct wpi_config)); IEEE80211_ADDR_COPY(sc->config.myaddr, ic->ic_myaddr); /*set default channel*/ sc->config.chan = htole16(ieee80211_chan2ieee(ic, ic->ic_curchan)); sc->config.flags = htole32(WPI_CONFIG_TSF); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { sc->config.flags |= htole32(WPI_CONFIG_AUTO | WPI_CONFIG_24GHZ); } sc->config.filter = 0; switch (ic->ic_opmode) { case IEEE80211_M_STA: case IEEE80211_M_WDS: /* No know setup, use STA for now */ sc->config.mode = WPI_MODE_STA; sc->config.filter |= htole32(WPI_FILTER_MULTICAST); break; case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: sc->config.mode = WPI_MODE_IBSS; sc->config.filter |= htole32(WPI_FILTER_BEACON | WPI_FILTER_MULTICAST); break; case IEEE80211_M_HOSTAP: sc->config.mode = WPI_MODE_HOSTAP; break; case IEEE80211_M_MONITOR: sc->config.mode = WPI_MODE_MONITOR; sc->config.filter |= htole32(WPI_FILTER_MULTICAST | WPI_FILTER_CTL | WPI_FILTER_PROMISC); break; } sc->config.cck_mask = 0x0f; /* not yet negotiated */ sc->config.ofdm_mask = 0xff; /* not yet negotiated */ error = wpi_cmd(sc, WPI_CMD_CONFIGURE, &sc->config, sizeof (struct wpi_config), 0); if (error != 0) { device_printf(sc->sc_dev, "configure command failed\n"); return error; } /* configuration has changed, set Tx power accordingly */ if ((error = wpi_set_txpower(sc, ic->ic_curchan, 0)) != 0) { device_printf(sc->sc_dev, "could not set Tx power\n"); return error; } /* add broadcast node */ memset(&node, 0, sizeof node); IEEE80211_ADDR_COPY(node.bssid, ifp->if_broadcastaddr); node.id = WPI_ID_BROADCAST; node.rate = wpi_plcp_signal(2); error = wpi_cmd(sc, WPI_CMD_ADD_NODE, &node, sizeof node, 0); if (error != 0) { device_printf(sc->sc_dev, "could not add broadcast node\n"); return error; } /* Setup rate scalling */ error = wpi_mrr_setup(sc); if (error != 0) { device_printf(sc->sc_dev, "could not setup MRR\n"); return error; } return 0; } static void wpi_stop_master(struct wpi_softc *sc) { uint32_t tmp; int ntries; DPRINTFN(WPI_DEBUG_HW,("Disabling Firmware execution\n")); tmp = WPI_READ(sc, WPI_RESET); WPI_WRITE(sc, WPI_RESET, tmp | WPI_STOP_MASTER | WPI_NEVO_RESET); tmp = WPI_READ(sc, WPI_GPIO_CTL); if ((tmp & WPI_GPIO_PWR_STATUS) == WPI_GPIO_PWR_SLEEP) return; /* already asleep */ for (ntries = 0; ntries < 100; ntries++) { if (WPI_READ(sc, WPI_RESET) & WPI_MASTER_DISABLED) break; DELAY(10); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for master\n"); } } static int wpi_power_up(struct wpi_softc *sc) { uint32_t tmp; int ntries; wpi_mem_lock(sc); tmp = wpi_mem_read(sc, WPI_MEM_POWER); wpi_mem_write(sc, WPI_MEM_POWER, tmp & ~0x03000000); wpi_mem_unlock(sc); for (ntries = 0; ntries < 5000; ntries++) { if (WPI_READ(sc, WPI_GPIO_STATUS) & WPI_POWERED) break; DELAY(10); } if (ntries == 5000) { device_printf(sc->sc_dev, "timeout waiting for NIC to power up\n"); return ETIMEDOUT; } return 0; } static int wpi_reset(struct wpi_softc *sc) { uint32_t tmp; int ntries; DPRINTFN(WPI_DEBUG_HW, ("Resetting the card - clearing any uploaded firmware\n")); /* clear any pending interrupts */ WPI_WRITE(sc, WPI_INTR, 0xffffffff); tmp = WPI_READ(sc, WPI_PLL_CTL); WPI_WRITE(sc, WPI_PLL_CTL, tmp | WPI_PLL_INIT); tmp = WPI_READ(sc, WPI_CHICKEN); WPI_WRITE(sc, WPI_CHICKEN, tmp | WPI_CHICKEN_RXNOLOS); tmp = WPI_READ(sc, WPI_GPIO_CTL); WPI_WRITE(sc, WPI_GPIO_CTL, tmp | WPI_GPIO_INIT); /* wait for clock stabilization */ for (ntries = 0; ntries < 25000; ntries++) { if (WPI_READ(sc, WPI_GPIO_CTL) & WPI_GPIO_CLOCK) break; DELAY(10); } if (ntries == 25000) { device_printf(sc->sc_dev, "timeout waiting for clock stabilization\n"); return ETIMEDOUT; } /* initialize EEPROM */ tmp = WPI_READ(sc, WPI_EEPROM_STATUS); if ((tmp & WPI_EEPROM_VERSION) == 0) { device_printf(sc->sc_dev, "EEPROM not found\n"); return EIO; } WPI_WRITE(sc, WPI_EEPROM_STATUS, tmp & ~WPI_EEPROM_LOCKED); return 0; } static void wpi_hw_config(struct wpi_softc *sc) { uint32_t rev, hw; /* voodoo from the Linux "driver".. */ hw = WPI_READ(sc, WPI_HWCONFIG); rev = pci_read_config(sc->sc_dev, PCIR_REVID, 1); if ((rev & 0xc0) == 0x40) hw |= WPI_HW_ALM_MB; else if (!(rev & 0x80)) hw |= WPI_HW_ALM_MM; if (sc->cap == 0x80) hw |= WPI_HW_SKU_MRC; hw &= ~WPI_HW_REV_D; if ((le16toh(sc->rev) & 0xf0) == 0xd0) hw |= WPI_HW_REV_D; if (sc->type > 1) hw |= WPI_HW_TYPE_B; WPI_WRITE(sc, WPI_HWCONFIG, hw); } static void wpi_rfkill_resume(struct wpi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); int ntries; /* enable firmware again */ WPI_WRITE(sc, WPI_UCODE_CLR, WPI_RADIO_OFF); WPI_WRITE(sc, WPI_UCODE_CLR, WPI_DISABLE_CMD); /* wait for thermal sensors to calibrate */ for (ntries = 0; ntries < 1000; ntries++) { if ((sc->temp = (int)WPI_READ(sc, WPI_TEMPERATURE)) != 0) break; DELAY(10); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for thermal calibration\n"); WPI_UNLOCK(sc); return; } DPRINTFN(WPI_DEBUG_TEMP,("temperature %d\n", sc->temp)); if (wpi_config(sc) != 0) { device_printf(sc->sc_dev, "device config failed\n"); WPI_UNLOCK(sc); return; } ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; sc->flags &= ~WPI_FLAG_HW_RADIO_OFF; if (vap != NULL) { if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) { if (vap->iv_opmode != IEEE80211_M_MONITOR) { taskqueue_enqueue(taskqueue_swi, &sc->sc_bmiss_task); wpi_set_led(sc, WPI_LED_LINK, 0, 1); } else wpi_set_led(sc, WPI_LED_LINK, 5, 5); } else { ieee80211_scan_next(vap); wpi_set_led(sc, WPI_LED_LINK, 20, 2); } } callout_reset(&sc->watchdog_to, hz, wpi_watchdog, sc); } static void wpi_init_locked(struct wpi_softc *sc, int force) { struct ifnet *ifp = sc->sc_ifp; uint32_t tmp; int ntries, qid; wpi_stop_locked(sc); (void)wpi_reset(sc); wpi_mem_lock(sc); wpi_mem_write(sc, WPI_MEM_CLOCK1, 0xa00); DELAY(20); tmp = wpi_mem_read(sc, WPI_MEM_PCIDEV); wpi_mem_write(sc, WPI_MEM_PCIDEV, tmp | 0x800); wpi_mem_unlock(sc); (void)wpi_power_up(sc); wpi_hw_config(sc); /* init Rx ring */ wpi_mem_lock(sc); WPI_WRITE(sc, WPI_RX_BASE, sc->rxq.desc_dma.paddr); WPI_WRITE(sc, WPI_RX_RIDX_PTR, sc->shared_dma.paddr + offsetof(struct wpi_shared, next)); WPI_WRITE(sc, WPI_RX_WIDX, (WPI_RX_RING_COUNT - 1) & ~7); WPI_WRITE(sc, WPI_RX_CONFIG, 0xa9601010); wpi_mem_unlock(sc); /* init Tx rings */ wpi_mem_lock(sc); wpi_mem_write(sc, WPI_MEM_MODE, 2); /* bypass mode */ wpi_mem_write(sc, WPI_MEM_RA, 1); /* enable RA0 */ wpi_mem_write(sc, WPI_MEM_TXCFG, 0x3f); /* enable all 6 Tx rings */ wpi_mem_write(sc, WPI_MEM_BYPASS1, 0x10000); wpi_mem_write(sc, WPI_MEM_BYPASS2, 0x30002); wpi_mem_write(sc, WPI_MEM_MAGIC4, 4); wpi_mem_write(sc, WPI_MEM_MAGIC5, 5); WPI_WRITE(sc, WPI_TX_BASE_PTR, sc->shared_dma.paddr); WPI_WRITE(sc, WPI_MSG_CONFIG, 0xffff05a5); for (qid = 0; qid < 6; qid++) { WPI_WRITE(sc, WPI_TX_CTL(qid), 0); WPI_WRITE(sc, WPI_TX_BASE(qid), 0); WPI_WRITE(sc, WPI_TX_CONFIG(qid), 0x80200008); } wpi_mem_unlock(sc); /* clear "radio off" and "disable command" bits (reversed logic) */ WPI_WRITE(sc, WPI_UCODE_CLR, WPI_RADIO_OFF); WPI_WRITE(sc, WPI_UCODE_CLR, WPI_DISABLE_CMD); sc->flags &= ~WPI_FLAG_HW_RADIO_OFF; /* clear any pending interrupts */ WPI_WRITE(sc, WPI_INTR, 0xffffffff); /* enable interrupts */ WPI_WRITE(sc, WPI_MASK, WPI_INTR_MASK); WPI_WRITE(sc, WPI_UCODE_CLR, WPI_RADIO_OFF); WPI_WRITE(sc, WPI_UCODE_CLR, WPI_RADIO_OFF); if ((wpi_load_firmware(sc)) != 0) { device_printf(sc->sc_dev, "A problem occurred loading the firmware to the driver\n"); return; } /* At this point the firmware is up and running. If the hardware * RF switch is turned off thermal calibration will fail, though * the card is still happy to continue to accept commands, catch * this case and schedule a task to watch for it to be turned on. */ wpi_mem_lock(sc); tmp = wpi_mem_read(sc, WPI_MEM_HW_RADIO_OFF); wpi_mem_unlock(sc); if (!(tmp & 0x1)) { sc->flags |= WPI_FLAG_HW_RADIO_OFF; device_printf(sc->sc_dev,"Radio Transmitter is switched off\n"); goto out; } /* wait for thermal sensors to calibrate */ for (ntries = 0; ntries < 1000; ntries++) { if ((sc->temp = (int)WPI_READ(sc, WPI_TEMPERATURE)) != 0) break; DELAY(10); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for thermal sensors calibration\n"); return; } DPRINTFN(WPI_DEBUG_TEMP,("temperature %d\n", sc->temp)); if (wpi_config(sc) != 0) { device_printf(sc->sc_dev, "device config failed\n"); return; } ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; out: callout_reset(&sc->watchdog_to, hz, wpi_watchdog, sc); } static void wpi_init(void *arg) { struct wpi_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; WPI_LOCK(sc); wpi_init_locked(sc, 0); WPI_UNLOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ieee80211_start_all(ic); /* start all vaps */ } static void wpi_stop_locked(struct wpi_softc *sc) { struct ifnet *ifp = sc->sc_ifp; uint32_t tmp; int ac; sc->sc_tx_timer = 0; sc->sc_scan_timer = 0; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->flags &= ~WPI_FLAG_HW_RADIO_OFF; callout_stop(&sc->watchdog_to); callout_stop(&sc->calib_to); /* disable interrupts */ WPI_WRITE(sc, WPI_MASK, 0); WPI_WRITE(sc, WPI_INTR, WPI_INTR_MASK); WPI_WRITE(sc, WPI_INTR_STATUS, 0xff); WPI_WRITE(sc, WPI_INTR_STATUS, 0x00070000); /* Clear any commands left in the command buffer */ memset(sc->sc_cmd, 0, sizeof(sc->sc_cmd)); memset(sc->sc_cmd_arg, 0, sizeof(sc->sc_cmd_arg)); sc->sc_cmd_cur = 0; sc->sc_cmd_next = 0; wpi_mem_lock(sc); wpi_mem_write(sc, WPI_MEM_MODE, 0); wpi_mem_unlock(sc); /* reset all Tx rings */ for (ac = 0; ac < 4; ac++) wpi_reset_tx_ring(sc, &sc->txq[ac]); wpi_reset_tx_ring(sc, &sc->cmdq); /* reset Rx ring */ wpi_reset_rx_ring(sc, &sc->rxq); wpi_mem_lock(sc); wpi_mem_write(sc, WPI_MEM_CLOCK2, 0x200); wpi_mem_unlock(sc); DELAY(5); wpi_stop_master(sc); tmp = WPI_READ(sc, WPI_RESET); WPI_WRITE(sc, WPI_RESET, tmp | WPI_SW_RESET); sc->flags &= ~WPI_FLAG_BUSY; } static void wpi_stop(struct wpi_softc *sc) { WPI_LOCK(sc); wpi_stop_locked(sc); WPI_UNLOCK(sc); } static void wpi_newassoc(struct ieee80211_node *ni, int isnew) { struct ieee80211vap *vap = ni->ni_vap; struct wpi_vap *wvp = WPI_VAP(vap); ieee80211_amrr_node_init(&wvp->amrr, &WPI_NODE(ni)->amn, ni); } static void wpi_calib_timeout(void *arg) { struct wpi_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); int temp; if (vap->iv_state != IEEE80211_S_RUN) return; /* update sensor data */ temp = (int)WPI_READ(sc, WPI_TEMPERATURE); DPRINTFN(WPI_DEBUG_TEMP,("Temp in calibration is: %d\n", temp)); wpi_power_calibration(sc, temp); callout_reset(&sc->calib_to, 60*hz, wpi_calib_timeout, sc); } /* * This function is called periodically (every 60 seconds) to adjust output * power to temperature changes. */ static void wpi_power_calibration(struct wpi_softc *sc, int temp) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); /* sanity-check read value */ if (temp < -260 || temp > 25) { /* this can't be correct, ignore */ DPRINTFN(WPI_DEBUG_TEMP, ("out-of-range temperature reported: %d\n", temp)); return; } DPRINTFN(WPI_DEBUG_TEMP,("temperature %d->%d\n", sc->temp, temp)); /* adjust Tx power if need be */ if (abs(temp - sc->temp) <= 6) return; sc->temp = temp; if (wpi_set_txpower(sc, vap->iv_bss->ni_chan, 1) != 0) { /* just warn, too bad for the automatic calibration... */ device_printf(sc->sc_dev,"could not adjust Tx power\n"); } } /** * Read the eeprom to find out what channels are valid for the given * band and update net80211 with what we find. */ static void wpi_read_eeprom_channels(struct wpi_softc *sc, int n) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; const struct wpi_chan_band *band = &wpi_bands[n]; struct wpi_eeprom_chan channels[WPI_MAX_CHAN_PER_BAND]; int chan, i, offset, passive; wpi_read_prom_data(sc, band->addr, channels, band->nchan * sizeof (struct wpi_eeprom_chan)); for (i = 0; i < band->nchan; i++) { if (!(channels[i].flags & WPI_EEPROM_CHAN_VALID)) { DPRINTFN(WPI_DEBUG_HW, ("Channel Not Valid: %d, band %d\n", band->chan[i],n)); continue; } passive = 0; chan = band->chan[i]; offset = ic->ic_nchans; /* is active scan allowed on this channel? */ if (!(channels[i].flags & WPI_EEPROM_CHAN_ACTIVE)) { passive = IEEE80211_CHAN_PASSIVE; } if (n == 0) { /* 2GHz band */ ic->ic_channels[offset].ic_ieee = chan; ic->ic_channels[offset].ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_2GHZ); ic->ic_channels[offset].ic_flags = IEEE80211_CHAN_B | passive; offset++; ic->ic_channels[offset].ic_ieee = chan; ic->ic_channels[offset].ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_2GHZ); ic->ic_channels[offset].ic_flags = IEEE80211_CHAN_G | passive; offset++; } else { /* 5GHz band */ /* * Some 3945ABG adapters support channels 7, 8, 11 * and 12 in the 2GHz *and* 5GHz bands. * Because of limitations in our net80211(9) stack, * we can't support these channels in 5GHz band. * XXX not true; just need to map to proper frequency */ if (chan <= 14) continue; ic->ic_channels[offset].ic_ieee = chan; ic->ic_channels[offset].ic_freq = ieee80211_ieee2mhz(chan, IEEE80211_CHAN_5GHZ); ic->ic_channels[offset].ic_flags = IEEE80211_CHAN_A | passive; offset++; } /* save maximum allowed power for this channel */ sc->maxpwr[chan] = channels[i].maxpwr; ic->ic_nchans = offset; #if 0 // XXX We can probably use this an get rid of maxpwr - ben 20070617 ic->ic_channels[chan].ic_maxpower = channels[i].maxpwr; //ic->ic_channels[chan].ic_minpower... //ic->ic_channels[chan].ic_maxregtxpower... #endif DPRINTF(("adding chan %d flags=0x%x maxpwr=%d, offset %d\n", chan, channels[i].flags, sc->maxpwr[chan], offset)); } } static void wpi_read_eeprom_group(struct wpi_softc *sc, int n) { struct wpi_power_group *group = &sc->groups[n]; struct wpi_eeprom_group rgroup; int i; wpi_read_prom_data(sc, WPI_EEPROM_POWER_GRP + n * 32, &rgroup, sizeof rgroup); /* save power group information */ group->chan = rgroup.chan; group->maxpwr = rgroup.maxpwr; /* temperature at which the samples were taken */ group->temp = (int16_t)le16toh(rgroup.temp); DPRINTF(("power group %d: chan=%d maxpwr=%d temp=%d\n", n, group->chan, group->maxpwr, group->temp)); for (i = 0; i < WPI_SAMPLES_COUNT; i++) { group->samples[i].index = rgroup.samples[i].index; group->samples[i].power = rgroup.samples[i].power; DPRINTF(("\tsample %d: index=%d power=%d\n", i, group->samples[i].index, group->samples[i].power)); } } /* * Update Tx power to match what is defined for channel `c'. */ static int wpi_set_txpower(struct wpi_softc *sc, struct ieee80211_channel *c, int async) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct wpi_power_group *group; struct wpi_cmd_txpower txpower; u_int chan; int i; /* get channel number */ chan = ieee80211_chan2ieee(ic, c); /* find the power group to which this channel belongs */ if (IEEE80211_IS_CHAN_5GHZ(c)) { for (group = &sc->groups[1]; group < &sc->groups[4]; group++) if (chan <= group->chan) break; } else group = &sc->groups[0]; memset(&txpower, 0, sizeof txpower); txpower.band = IEEE80211_IS_CHAN_5GHZ(c) ? 0 : 1; txpower.channel = htole16(chan); /* set Tx power for all OFDM and CCK rates */ for (i = 0; i <= 11 ; i++) { /* retrieve Tx power for this channel/rate combination */ int idx = wpi_get_power_index(sc, group, c, wpi_ridx_to_rate[i]); txpower.rates[i].rate = wpi_ridx_to_plcp[i]; if (IEEE80211_IS_CHAN_5GHZ(c)) { txpower.rates[i].gain_radio = wpi_rf_gain_5ghz[idx]; txpower.rates[i].gain_dsp = wpi_dsp_gain_5ghz[idx]; } else { txpower.rates[i].gain_radio = wpi_rf_gain_2ghz[idx]; txpower.rates[i].gain_dsp = wpi_dsp_gain_2ghz[idx]; } DPRINTFN(WPI_DEBUG_TEMP,("chan %d/rate %d: power index %d\n", chan, wpi_ridx_to_rate[i], idx)); } return wpi_cmd(sc, WPI_CMD_TXPOWER, &txpower, sizeof txpower, async); } /* * Determine Tx power index for a given channel/rate combination. * This takes into account the regulatory information from EEPROM and the * current temperature. */ static int wpi_get_power_index(struct wpi_softc *sc, struct wpi_power_group *group, struct ieee80211_channel *c, int rate) { /* fixed-point arithmetic division using a n-bit fractional part */ #define fdivround(a, b, n) \ ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) /* linear interpolation */ #define interpolate(x, x1, y1, x2, y2, n) \ ((y1) + fdivround(((x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct wpi_power_sample *sample; int pwr, idx; u_int chan; /* get channel number */ chan = ieee80211_chan2ieee(ic, c); /* default power is group's maximum power - 3dB */ pwr = group->maxpwr / 2; /* decrease power for highest OFDM rates to reduce distortion */ switch (rate) { case 72: /* 36Mb/s */ pwr -= IEEE80211_IS_CHAN_2GHZ(c) ? 0 : 5; break; case 96: /* 48Mb/s */ pwr -= IEEE80211_IS_CHAN_2GHZ(c) ? 7 : 10; break; case 108: /* 54Mb/s */ pwr -= IEEE80211_IS_CHAN_2GHZ(c) ? 9 : 12; break; } /* never exceed channel's maximum allowed Tx power */ pwr = min(pwr, sc->maxpwr[chan]); /* retrieve power index into gain tables from samples */ for (sample = group->samples; sample < &group->samples[3]; sample++) if (pwr > sample[1].power) break; /* fixed-point linear interpolation using a 19-bit fractional part */ idx = interpolate(pwr, sample[0].power, sample[0].index, sample[1].power, sample[1].index, 19); /* * Adjust power index based on current temperature * - if colder than factory-calibrated: decreate output power * - if warmer than factory-calibrated: increase output power */ idx -= (sc->temp - group->temp) * 11 / 100; /* decrease power for CCK rates (-5dB) */ if (!WPI_RATE_IS_OFDM(rate)) idx += 10; /* keep power index in a valid range */ if (idx < 0) return 0; if (idx > WPI_MAX_PWR_INDEX) return WPI_MAX_PWR_INDEX; return idx; #undef interpolate #undef fdivround } /** * Called by net80211 framework to indicate that a scan * is starting. This function doesn't actually do the scan, * wpi_scan_curchan starts things off. This function is more * of an early warning from the framework we should get ready * for the scan. */ static void wpi_scan_start(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct wpi_softc *sc = ifp->if_softc; wpi_queue_cmd(sc, WPI_SCAN_START, 0, WPI_QUEUE_NORMAL); } /** * Called by the net80211 framework, indicates that the * scan has ended. If there is a scan in progress on the card * then it should be aborted. */ static void wpi_scan_end(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct wpi_softc *sc = ifp->if_softc; wpi_queue_cmd(sc, WPI_SCAN_STOP, 0, WPI_QUEUE_NORMAL); } /** * Called by the net80211 framework to indicate to the driver * that the channel should be changed */ static void wpi_set_channel(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct wpi_softc *sc = ifp->if_softc; /* * Only need to set the channel in Monitor mode. AP scanning and auth * are already taken care of by their respective firmware commands. */ if (ic->ic_opmode == IEEE80211_M_MONITOR) wpi_queue_cmd(sc, WPI_SET_CHAN, 0, WPI_QUEUE_NORMAL); } /** * Called by net80211 to indicate that we need to scan the current * channel. The channel is previously be set via the wpi_set_channel * callback. */ static void wpi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) { struct ieee80211vap *vap = ss->ss_vap; struct ifnet *ifp = vap->iv_ic->ic_ifp; struct wpi_softc *sc = ifp->if_softc; wpi_queue_cmd(sc, WPI_SCAN_CURCHAN, 0, WPI_QUEUE_NORMAL); } /** * Called by the net80211 framework to indicate * the minimum dwell time has been met, terminate the scan. * We don't actually terminate the scan as the firmware will notify * us when it's finished and we have no way to interrupt it. */ static void wpi_scan_mindwell(struct ieee80211_scan_state *ss) { /* NB: don't try to abort scan; wait for firmware to finish */ } /** * The ops function is called to perform some actual work. * because we can't sleep from any of the ic callbacks, we queue an * op task with wpi_queue_cmd and have the taskqueue process that task. * The task that gets cued is a op task, which ends up calling this function. */ static void wpi_ops(void *arg0, int pending) { struct wpi_softc *sc = arg0; struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; int cmd, arg, error; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); again: WPI_CMD_LOCK(sc); cmd = sc->sc_cmd[sc->sc_cmd_cur]; arg = sc->sc_cmd_arg[sc->sc_cmd_cur]; if (cmd == 0) { /* No more commands to process */ WPI_CMD_UNLOCK(sc); return; } sc->sc_cmd[sc->sc_cmd_cur] = 0; /* free the slot */ sc->sc_cmd_arg[sc->sc_cmd_cur] = 0; /* free the slot */ sc->sc_cmd_cur = (sc->sc_cmd_cur + 1) % WPI_CMD_MAXOPS; WPI_CMD_UNLOCK(sc); WPI_LOCK(sc); DPRINTFN(WPI_DEBUG_OPS,("wpi_ops: command: %d\n", cmd)); switch (cmd) { case WPI_RESTART: wpi_init_locked(sc, 0); WPI_UNLOCK(sc); return; case WPI_RF_RESTART: wpi_rfkill_resume(sc); WPI_UNLOCK(sc); return; } if (!(sc->sc_ifp->if_drv_flags & IFF_DRV_RUNNING)) { WPI_UNLOCK(sc); return; } switch (cmd) { case WPI_SCAN_START: /* make the link LED blink while we're scanning */ wpi_set_led(sc, WPI_LED_LINK, 20, 2); sc->flags |= WPI_FLAG_SCANNING; break; case WPI_SCAN_STOP: sc->flags &= ~WPI_FLAG_SCANNING; break; case WPI_SCAN_CURCHAN: if (wpi_scan(sc)) ieee80211_cancel_scan(vap); break; case WPI_SET_CHAN: error = wpi_config(sc); if (error != 0) device_printf(sc->sc_dev, "error %d settting channel\n", error); break; case WPI_AUTH: /* The node must be registered in the firmware before auth */ error = wpi_auth(sc, vap); WPI_UNLOCK(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: could not move to auth state, error %d\n", __func__, error); return; } IEEE80211_LOCK(ic); WPI_VAP(vap)->newstate(vap, IEEE80211_S_AUTH, arg); if (vap->iv_newstate_cb != NULL) vap->iv_newstate_cb(vap, IEEE80211_S_AUTH, arg); IEEE80211_UNLOCK(ic); goto again; case WPI_RUN: error = wpi_run(sc, vap); WPI_UNLOCK(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: could not move to run state, error %d\n", __func__, error); return; } IEEE80211_LOCK(ic); WPI_VAP(vap)->newstate(vap, IEEE80211_S_RUN, arg); if (vap->iv_newstate_cb != NULL) vap->iv_newstate_cb(vap, IEEE80211_S_RUN, arg); IEEE80211_UNLOCK(ic); goto again; } WPI_UNLOCK(sc); /* Take another pass */ goto again; } /** * queue a command for later execution in a different thread. * This is needed as the net80211 callbacks do not allow * sleeping, since we need to sleep to confirm commands have * been processed by the firmware, we must defer execution to * a sleep enabled thread. */ static int wpi_queue_cmd(struct wpi_softc *sc, int cmd, int arg, int flush) { WPI_CMD_LOCK(sc); if (flush) { memset(sc->sc_cmd, 0, sizeof (sc->sc_cmd)); memset(sc->sc_cmd_arg, 0, sizeof (sc->sc_cmd_arg)); sc->sc_cmd_cur = 0; sc->sc_cmd_next = 0; } if (sc->sc_cmd[sc->sc_cmd_next] != 0) { WPI_CMD_UNLOCK(sc); DPRINTF(("%s: command %d dropped\n", __func__, cmd)); return (EBUSY); } sc->sc_cmd[sc->sc_cmd_next] = cmd; sc->sc_cmd_arg[sc->sc_cmd_next] = arg; sc->sc_cmd_next = (sc->sc_cmd_next + 1) % WPI_CMD_MAXOPS; taskqueue_enqueue(sc->sc_tq, &sc->sc_opstask); WPI_CMD_UNLOCK(sc); return 0; } /* * Allocate DMA-safe memory for firmware transfer. */ static int wpi_alloc_fwmem(struct wpi_softc *sc) { /* allocate enough contiguous space to store text and data */ return wpi_dma_contig_alloc(sc, &sc->fw_dma, NULL, WPI_FW_MAIN_TEXT_MAXSZ + WPI_FW_MAIN_DATA_MAXSZ, 1, BUS_DMA_NOWAIT); } static void wpi_free_fwmem(struct wpi_softc *sc) { wpi_dma_contig_free(&sc->fw_dma); } /** * Called every second, wpi_watchdog used by the watch dog timer * to check that the card is still alive */ static void wpi_watchdog(void *arg) { struct wpi_softc *sc = arg; struct ifnet *ifp = sc->sc_ifp; uint32_t tmp; DPRINTFN(WPI_DEBUG_WATCHDOG,("Watchdog: tick\n")); if (sc->flags & WPI_FLAG_HW_RADIO_OFF) { /* No need to lock firmware memory */ tmp = wpi_mem_read(sc, WPI_MEM_HW_RADIO_OFF); if ((tmp & 0x1) == 0) { /* Radio kill switch is still off */ callout_reset(&sc->watchdog_to, hz, wpi_watchdog, sc); return; } device_printf(sc->sc_dev, "Hardware Switch Enabled\n"); wpi_queue_cmd(sc, WPI_RF_RESTART, 0, WPI_QUEUE_CLEAR); return; } if (sc->sc_tx_timer > 0) { if (--sc->sc_tx_timer == 0) { device_printf(sc->sc_dev,"device timeout\n"); ifp->if_oerrors++; wpi_queue_cmd(sc, WPI_RESTART, 0, WPI_QUEUE_CLEAR); } } if (sc->sc_scan_timer > 0) { struct ifnet *ifp = sc->sc_ifp; struct ieee80211com *ic = ifp->if_l2com; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); if (--sc->sc_scan_timer == 0 && vap != NULL) { device_printf(sc->sc_dev,"scan timeout\n"); ieee80211_cancel_scan(vap); wpi_queue_cmd(sc, WPI_RESTART, 0, WPI_QUEUE_CLEAR); } } if (ifp->if_drv_flags & IFF_DRV_RUNNING) callout_reset(&sc->watchdog_to, hz, wpi_watchdog, sc); } #ifdef WPI_DEBUG static const char *wpi_cmd_str(int cmd) { switch (cmd) { case WPI_DISABLE_CMD: return "WPI_DISABLE_CMD"; case WPI_CMD_CONFIGURE: return "WPI_CMD_CONFIGURE"; case WPI_CMD_ASSOCIATE: return "WPI_CMD_ASSOCIATE"; case WPI_CMD_SET_WME: return "WPI_CMD_SET_WME"; case WPI_CMD_TSF: return "WPI_CMD_TSF"; case WPI_CMD_ADD_NODE: return "WPI_CMD_ADD_NODE"; case WPI_CMD_TX_DATA: return "WPI_CMD_TX_DATA"; case WPI_CMD_MRR_SETUP: return "WPI_CMD_MRR_SETUP"; case WPI_CMD_SET_LED: return "WPI_CMD_SET_LED"; case WPI_CMD_SET_POWER_MODE: return "WPI_CMD_SET_POWER_MODE"; case WPI_CMD_SCAN: return "WPI_CMD_SCAN"; case WPI_CMD_SET_BEACON:return "WPI_CMD_SET_BEACON"; case WPI_CMD_TXPOWER: return "WPI_CMD_TXPOWER"; case WPI_CMD_BLUETOOTH: return "WPI_CMD_BLUETOOTH"; default: KASSERT(1, ("Unknown Command: %d\n", cmd)); return "UNKNOWN CMD"; /* Make the compiler happy */ } } #endif MODULE_DEPEND(wpi, pci, 1, 1, 1); MODULE_DEPEND(wpi, wlan, 1, 1, 1); MODULE_DEPEND(wpi, firmware, 1, 1, 1); MODULE_DEPEND(wpi, wlan_amrr, 1, 1, 1); diff --git a/sys/net80211/ieee80211_node.c b/sys/net80211/ieee80211_node.c index c6aadb2be34a..61cdeab04f54 100644 --- a/sys/net80211/ieee80211_node.c +++ b/sys/net80211/ieee80211_node.c @@ -1,2501 +1,2502 @@ /*- * Copyright (c) 2001 Atsushi Onoe * Copyright (c) 2002-2008 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Association id's are managed with a bit vector. */ #define IEEE80211_AID_SET(_vap, b) \ ((_vap)->iv_aid_bitmap[IEEE80211_AID(b) / 32] |= \ (1 << (IEEE80211_AID(b) % 32))) #define IEEE80211_AID_CLR(_vap, b) \ ((_vap)->iv_aid_bitmap[IEEE80211_AID(b) / 32] &= \ ~(1 << (IEEE80211_AID(b) % 32))) #define IEEE80211_AID_ISSET(_vap, b) \ ((_vap)->iv_aid_bitmap[IEEE80211_AID(b) / 32] & (1 << (IEEE80211_AID(b) % 32))) #ifdef IEEE80211_DEBUG_REFCNT #define REFCNT_LOC "%s (%s:%u) %p<%s> refcnt %d\n", __func__, func, line #else #define REFCNT_LOC "%s %p<%s> refcnt %d\n", __func__ #endif static int ieee80211_sta_join1(struct ieee80211_node *); -static struct ieee80211_node *node_alloc(struct ieee80211_node_table *); +static struct ieee80211_node *node_alloc(struct ieee80211vap *, + const uint8_t [IEEE80211_ADDR_LEN]); static void node_cleanup(struct ieee80211_node *); static void node_free(struct ieee80211_node *); static void node_age(struct ieee80211_node *); static int8_t node_getrssi(const struct ieee80211_node *); static void node_getsignal(const struct ieee80211_node *, int8_t *, int8_t *); static void node_getmimoinfo(const struct ieee80211_node *, struct ieee80211_mimo_info *); static void _ieee80211_free_node(struct ieee80211_node *); static void ieee80211_node_table_init(struct ieee80211com *ic, struct ieee80211_node_table *nt, const char *name, int inact, int keymaxix); static void ieee80211_node_table_reset(struct ieee80211_node_table *, struct ieee80211vap *); static void ieee80211_node_reclaim(struct ieee80211_node *); static void ieee80211_node_table_cleanup(struct ieee80211_node_table *nt); static void ieee80211_erp_timeout(struct ieee80211com *); MALLOC_DEFINE(M_80211_NODE, "80211node", "802.11 node state"); MALLOC_DEFINE(M_80211_NODE_IE, "80211nodeie", "802.11 node ie"); void ieee80211_node_attach(struct ieee80211com *ic) { ieee80211_node_table_init(ic, &ic->ic_sta, "station", IEEE80211_INACT_INIT, ic->ic_max_keyix); callout_init(&ic->ic_inact, CALLOUT_MPSAFE); callout_reset(&ic->ic_inact, IEEE80211_INACT_WAIT*hz, ieee80211_node_timeout, ic); ic->ic_node_alloc = node_alloc; ic->ic_node_free = node_free; ic->ic_node_cleanup = node_cleanup; ic->ic_node_age = node_age; ic->ic_node_drain = node_age; /* NB: same as age */ ic->ic_node_getrssi = node_getrssi; ic->ic_node_getsignal = node_getsignal; ic->ic_node_getmimoinfo = node_getmimoinfo; /* * Set flags to be propagated to all vap's; * these define default behaviour/configuration. */ ic->ic_flags_ext |= IEEE80211_FEXT_INACT; /* inactivity processing */ } void ieee80211_node_detach(struct ieee80211com *ic) { callout_drain(&ic->ic_inact); ieee80211_node_table_cleanup(&ic->ic_sta); } void ieee80211_node_vattach(struct ieee80211vap *vap) { /* NB: driver can override */ vap->iv_max_aid = IEEE80211_AID_DEF; /* default station inactivity timer setings */ vap->iv_inact_init = IEEE80211_INACT_INIT; vap->iv_inact_auth = IEEE80211_INACT_AUTH; vap->iv_inact_run = IEEE80211_INACT_RUN; vap->iv_inact_probe = IEEE80211_INACT_PROBE; } void ieee80211_node_latevattach(struct ieee80211vap *vap) { if (vap->iv_opmode == IEEE80211_M_HOSTAP) { /* XXX should we allow max aid to be zero? */ if (vap->iv_max_aid < IEEE80211_AID_MIN) { vap->iv_max_aid = IEEE80211_AID_MIN; if_printf(vap->iv_ifp, "WARNING: max aid too small, changed to %d\n", vap->iv_max_aid); } MALLOC(vap->iv_aid_bitmap, uint32_t *, howmany(vap->iv_max_aid, 32) * sizeof(uint32_t), M_80211_NODE, M_NOWAIT | M_ZERO); if (vap->iv_aid_bitmap == NULL) { /* XXX no way to recover */ printf("%s: no memory for AID bitmap, max aid %d!\n", __func__, vap->iv_max_aid); vap->iv_max_aid = 0; } } ieee80211_reset_bss(vap); vap->iv_auth = ieee80211_authenticator_get(vap->iv_bss->ni_authmode); } void ieee80211_node_vdetach(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; ieee80211_node_table_reset(&ic->ic_sta, vap); if (vap->iv_bss != NULL) { ieee80211_free_node(vap->iv_bss); vap->iv_bss = NULL; } if (vap->iv_aid_bitmap != NULL) { FREE(vap->iv_aid_bitmap, M_80211_NODE); vap->iv_aid_bitmap = NULL; } } /* * Port authorize/unauthorize interfaces for use by an authenticator. */ void ieee80211_node_authorize(struct ieee80211_node *ni) { ni->ni_flags |= IEEE80211_NODE_AUTH; ni->ni_inact_reload = ni->ni_vap->iv_inact_run; ni->ni_inact = ni->ni_inact_reload; } void ieee80211_node_unauthorize(struct ieee80211_node *ni) { ni->ni_flags &= ~IEEE80211_NODE_AUTH; ni->ni_inact_reload = ni->ni_vap->iv_inact_auth; if (ni->ni_inact > ni->ni_inact_reload) ni->ni_inact = ni->ni_inact_reload; } /* * Set/change the channel. The rate set is also updated as * to insure a consistent view by drivers. * XXX should be private but hostap needs it to deal with CSA */ void ieee80211_node_set_chan(struct ieee80211_node *ni, struct ieee80211_channel *chan) { struct ieee80211com *ic = ni->ni_ic; KASSERT(chan != IEEE80211_CHAN_ANYC, ("no channel")); ni->ni_chan = chan; if (IEEE80211_IS_CHAN_HT(chan)) { /* * XXX Gotta be careful here; the rate set returned by * ieee80211_get_suprates is actually any HT rate * set so blindly copying it will be bad. We must * install the legacy rate est in ni_rates and the * HT rate set in ni_htrates. */ ni->ni_htrates = *ieee80211_get_suphtrates(ic, chan); } ni->ni_rates = *ieee80211_get_suprates(ic, chan); } static __inline void copy_bss(struct ieee80211_node *nbss, const struct ieee80211_node *obss) { /* propagate useful state */ nbss->ni_authmode = obss->ni_authmode; nbss->ni_txpower = obss->ni_txpower; nbss->ni_vlan = obss->ni_vlan; /* XXX statistics? */ /* XXX legacy WDS bssid? */ } void ieee80211_create_ibss(struct ieee80211vap* vap, struct ieee80211_channel *chan) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni; IEEE80211_DPRINTF(vap, IEEE80211_MSG_SCAN, "%s: creating ibss on channel %u\n", __func__, ieee80211_chan2ieee(ic, chan)); ni = ieee80211_alloc_node(&ic->ic_sta, vap, vap->iv_myaddr); if (ni == NULL) { /* XXX recovery? */ return; } IEEE80211_ADDR_COPY(ni->ni_bssid, vap->iv_myaddr); ni->ni_esslen = vap->iv_des_ssid[0].len; memcpy(ni->ni_essid, vap->iv_des_ssid[0].ssid, ni->ni_esslen); if (vap->iv_bss != NULL) copy_bss(ni, vap->iv_bss); ni->ni_intval = ic->ic_bintval; if (vap->iv_flags & IEEE80211_F_PRIVACY) ni->ni_capinfo |= IEEE80211_CAPINFO_PRIVACY; if (ic->ic_phytype == IEEE80211_T_FH) { ni->ni_fhdwell = 200; /* XXX */ ni->ni_fhindex = 1; } if (vap->iv_opmode == IEEE80211_M_IBSS) { vap->iv_flags |= IEEE80211_F_SIBSS; ni->ni_capinfo |= IEEE80211_CAPINFO_IBSS; /* XXX */ if (vap->iv_flags & IEEE80211_F_DESBSSID) IEEE80211_ADDR_COPY(ni->ni_bssid, vap->iv_des_bssid); else { get_random_bytes(ni->ni_bssid, IEEE80211_ADDR_LEN); /* clear group bit, add local bit */ ni->ni_bssid[0] = (ni->ni_bssid[0] &~ 0x01) | 0x02; } } else if (vap->iv_opmode == IEEE80211_M_AHDEMO) { if (vap->iv_flags & IEEE80211_F_DESBSSID) IEEE80211_ADDR_COPY(ni->ni_bssid, vap->iv_des_bssid); else memset(ni->ni_bssid, 0, IEEE80211_ADDR_LEN); } /* * Fix the channel and related attributes. */ /* clear DFS CAC state on previous channel */ if (ic->ic_bsschan != IEEE80211_CHAN_ANYC && ic->ic_bsschan->ic_freq != chan->ic_freq && IEEE80211_IS_CHAN_CACDONE(ic->ic_bsschan)) ieee80211_dfs_cac_clear(ic, ic->ic_bsschan); ic->ic_bsschan = chan; ieee80211_node_set_chan(ni, chan); ic->ic_curmode = ieee80211_chan2mode(chan); /* * Do mode-specific setup. */ if (IEEE80211_IS_CHAN_FULL(chan)) { if (IEEE80211_IS_CHAN_ANYG(chan)) { /* * Use a mixed 11b/11g basic rate set. */ ieee80211_setbasicrates(&ni->ni_rates, IEEE80211_MODE_11G); if (vap->iv_flags & IEEE80211_F_PUREG) { /* * Also mark OFDM rates basic so 11b * stations do not join (WiFi compliance). */ ieee80211_addbasicrates(&ni->ni_rates, IEEE80211_MODE_11A); } } else if (IEEE80211_IS_CHAN_B(chan)) { /* * Force pure 11b rate set. */ ieee80211_setbasicrates(&ni->ni_rates, IEEE80211_MODE_11B); } } (void) ieee80211_sta_join1(ieee80211_ref_node(ni)); } /* * Reset bss state on transition to the INIT state. * Clear any stations from the table (they have been * deauth'd) and reset the bss node (clears key, rate * etc. state). */ void ieee80211_reset_bss(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni, *obss; ieee80211_node_table_reset(&ic->ic_sta, vap); /* XXX multi-bss: wrong */ ieee80211_reset_erp(ic); ni = ieee80211_alloc_node(&ic->ic_sta, vap, vap->iv_myaddr); KASSERT(ni != NULL, ("unable to setup inital BSS node")); obss = vap->iv_bss; vap->iv_bss = ieee80211_ref_node(ni); if (obss != NULL) { copy_bss(ni, obss); ni->ni_intval = ic->ic_bintval; ieee80211_free_node(obss); } else IEEE80211_ADDR_COPY(ni->ni_bssid, vap->iv_myaddr); } static int match_ssid(const struct ieee80211_node *ni, int nssid, const struct ieee80211_scan_ssid ssids[]) { int i; for (i = 0; i < nssid; i++) { if (ni->ni_esslen == ssids[i].len && memcmp(ni->ni_essid, ssids[i].ssid, ni->ni_esslen) == 0) return 1; } return 0; } /* * Test a node for suitability/compatibility. */ static int check_bss(struct ieee80211vap *vap, struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; uint8_t rate; if (isclr(ic->ic_chan_active, ieee80211_chan2ieee(ic, ni->ni_chan))) return 0; if (vap->iv_opmode == IEEE80211_M_IBSS) { if ((ni->ni_capinfo & IEEE80211_CAPINFO_IBSS) == 0) return 0; } else { if ((ni->ni_capinfo & IEEE80211_CAPINFO_ESS) == 0) return 0; } if (vap->iv_flags & IEEE80211_F_PRIVACY) { if ((ni->ni_capinfo & IEEE80211_CAPINFO_PRIVACY) == 0) return 0; } else { /* XXX does this mean privacy is supported or required? */ if (ni->ni_capinfo & IEEE80211_CAPINFO_PRIVACY) return 0; } rate = ieee80211_fix_rate(ni, &ni->ni_rates, IEEE80211_F_JOIN | IEEE80211_F_DONEGO | IEEE80211_F_DOFRATE); if (rate & IEEE80211_RATE_BASIC) return 0; if (vap->iv_des_nssid != 0 && !match_ssid(ni, vap->iv_des_nssid, vap->iv_des_ssid)) return 0; if ((vap->iv_flags & IEEE80211_F_DESBSSID) && !IEEE80211_ADDR_EQ(vap->iv_des_bssid, ni->ni_bssid)) return 0; return 1; } #ifdef IEEE80211_DEBUG /* * Display node suitability/compatibility. */ static void check_bss_debug(struct ieee80211vap *vap, struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; uint8_t rate; int fail; fail = 0; if (isclr(ic->ic_chan_active, ieee80211_chan2ieee(ic, ni->ni_chan))) fail |= 0x01; if (vap->iv_opmode == IEEE80211_M_IBSS) { if ((ni->ni_capinfo & IEEE80211_CAPINFO_IBSS) == 0) fail |= 0x02; } else { if ((ni->ni_capinfo & IEEE80211_CAPINFO_ESS) == 0) fail |= 0x02; } if (vap->iv_flags & IEEE80211_F_PRIVACY) { if ((ni->ni_capinfo & IEEE80211_CAPINFO_PRIVACY) == 0) fail |= 0x04; } else { /* XXX does this mean privacy is supported or required? */ if (ni->ni_capinfo & IEEE80211_CAPINFO_PRIVACY) fail |= 0x04; } rate = ieee80211_fix_rate(ni, &ni->ni_rates, IEEE80211_F_JOIN | IEEE80211_F_DONEGO | IEEE80211_F_DOFRATE); if (rate & IEEE80211_RATE_BASIC) fail |= 0x08; if (vap->iv_des_nssid != 0 && !match_ssid(ni, vap->iv_des_nssid, vap->iv_des_ssid)) fail |= 0x10; if ((vap->iv_flags & IEEE80211_F_DESBSSID) && !IEEE80211_ADDR_EQ(vap->iv_des_bssid, ni->ni_bssid)) fail |= 0x20; printf(" %c %s", fail ? '-' : '+', ether_sprintf(ni->ni_macaddr)); printf(" %s%c", ether_sprintf(ni->ni_bssid), fail & 0x20 ? '!' : ' '); printf(" %3d%c", ieee80211_chan2ieee(ic, ni->ni_chan), fail & 0x01 ? '!' : ' '); printf(" %2dM%c", (rate & IEEE80211_RATE_VAL) / 2, fail & 0x08 ? '!' : ' '); printf(" %4s%c", (ni->ni_capinfo & IEEE80211_CAPINFO_ESS) ? "ess" : (ni->ni_capinfo & IEEE80211_CAPINFO_IBSS) ? "ibss" : "????", fail & 0x02 ? '!' : ' '); printf(" %3s%c ", (ni->ni_capinfo & IEEE80211_CAPINFO_PRIVACY) ? "wep" : "no", fail & 0x04 ? '!' : ' '); ieee80211_print_essid(ni->ni_essid, ni->ni_esslen); printf("%s\n", fail & 0x10 ? "!" : ""); } #endif /* IEEE80211_DEBUG */ /* * Handle 802.11 ad hoc network merge. The * convention, set by the Wireless Ethernet Compatibility Alliance * (WECA), is that an 802.11 station will change its BSSID to match * the "oldest" 802.11 ad hoc network, on the same channel, that * has the station's desired SSID. The "oldest" 802.11 network * sends beacons with the greatest TSF timestamp. * * The caller is assumed to validate TSF's before attempting a merge. * * Return !0 if the BSSID changed, 0 otherwise. */ int ieee80211_ibss_merge(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; #ifdef IEEE80211_DEBUG struct ieee80211com *ic = ni->ni_ic; #endif if (ni == vap->iv_bss || IEEE80211_ADDR_EQ(ni->ni_bssid, vap->iv_bss->ni_bssid)) { /* unchanged, nothing to do */ return 0; } if (!check_bss(vap, ni)) { /* capabilities mismatch */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC, "%s: merge failed, capabilities mismatch\n", __func__); #ifdef IEEE80211_DEBUG if (ieee80211_msg_assoc(vap)) check_bss_debug(vap, ni); #endif vap->iv_stats.is_ibss_capmismatch++; return 0; } IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC, "%s: new bssid %s: %s preamble, %s slot time%s\n", __func__, ether_sprintf(ni->ni_bssid), ic->ic_flags&IEEE80211_F_SHPREAMBLE ? "short" : "long", ic->ic_flags&IEEE80211_F_SHSLOT ? "short" : "long", ic->ic_flags&IEEE80211_F_USEPROT ? ", protection" : "" ); return ieee80211_sta_join1(ieee80211_ref_node(ni)); } /* * Calculate HT channel promotion flags for all vaps. * This assumes ni_chan have been setup for each vap. */ static int gethtadjustflags(struct ieee80211com *ic) { struct ieee80211vap *vap; int flags; flags = 0; /* XXX locking */ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { if (vap->iv_state < IEEE80211_S_RUN) continue; switch (vap->iv_opmode) { case IEEE80211_M_WDS: case IEEE80211_M_STA: case IEEE80211_M_AHDEMO: case IEEE80211_M_HOSTAP: case IEEE80211_M_IBSS: flags |= ieee80211_htchanflags(vap->iv_bss->ni_chan); break; default: break; } } return flags; } /* * Check if the current channel needs to change based on whether * any vap's are using HT20/HT40. This is used sync the state of * ic_curchan after a channel width change on a running vap. */ void ieee80211_sync_curchan(struct ieee80211com *ic) { struct ieee80211_channel *c; c = ieee80211_ht_adjust_channel(ic, ic->ic_curchan, gethtadjustflags(ic)); if (c != ic->ic_curchan) { ic->ic_curchan = c; ic->ic_curmode = ieee80211_chan2mode(ic->ic_curchan); ic->ic_set_channel(ic); } } /* * Change the current channel. The request channel may be * promoted if other vap's are operating with HT20/HT40. */ void ieee80211_setcurchan(struct ieee80211com *ic, struct ieee80211_channel *c) { if (ic->ic_htcaps & IEEE80211_HTC_HT) { int flags = gethtadjustflags(ic); /* * Check for channel promotion required to support the * set of running vap's. This assumes we are called * after ni_chan is setup for each vap. */ /* NB: this assumes IEEE80211_FEXT_USEHT40 > IEEE80211_FEXT_HT */ if (flags > ieee80211_htchanflags(c)) c = ieee80211_ht_adjust_channel(ic, c, flags); } ic->ic_bsschan = ic->ic_curchan = c; ic->ic_curmode = ieee80211_chan2mode(ic->ic_curchan); ic->ic_set_channel(ic); } /* * Join the specified IBSS/BSS network. The node is assumed to * be passed in with a held reference. */ static int ieee80211_sta_join1(struct ieee80211_node *selbs) { struct ieee80211vap *vap = selbs->ni_vap; struct ieee80211com *ic = selbs->ni_ic; struct ieee80211_node *obss; int canreassoc; /* * Committed to selbs, setup state. */ obss = vap->iv_bss; /* * Check if old+new node have the same address in which * case we can reassociate when operating in sta mode. */ canreassoc = (obss != NULL && vap->iv_state == IEEE80211_S_RUN && IEEE80211_ADDR_EQ(obss->ni_macaddr, selbs->ni_macaddr)); vap->iv_bss = selbs; /* NB: caller assumed to bump refcnt */ if (obss != NULL) { copy_bss(selbs, obss); ieee80211_node_reclaim(obss); obss = NULL; /* NB: guard against later use */ } /* * Delete unusable rates; we've already checked * that the negotiated rate set is acceptable. */ ieee80211_fix_rate(vap->iv_bss, &vap->iv_bss->ni_rates, IEEE80211_F_DODEL | IEEE80211_F_JOIN); ieee80211_setcurchan(ic, selbs->ni_chan); /* * Set the erp state (mostly the slot time) to deal with * the auto-select case; this should be redundant if the * mode is locked. */ ieee80211_reset_erp(ic); ieee80211_wme_initparams(vap); if (vap->iv_opmode == IEEE80211_M_STA) { if (canreassoc) { /* Reassociate */ ieee80211_new_state(vap, IEEE80211_S_ASSOC, 1); } else { /* * Act as if we received a DEAUTH frame in case we * are invoked from the RUN state. This will cause * us to try to re-authenticate if we are operating * as a station. */ ieee80211_new_state(vap, IEEE80211_S_AUTH, IEEE80211_FC0_SUBTYPE_DEAUTH); } } else ieee80211_new_state(vap, IEEE80211_S_RUN, -1); return 1; } int ieee80211_sta_join(struct ieee80211vap *vap, const struct ieee80211_scan_entry *se) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni; ni = ieee80211_alloc_node(&ic->ic_sta, vap, se->se_macaddr); if (ni == NULL) { /* XXX msg */ return 0; } /* * Expand scan state into node's format. * XXX may not need all this stuff */ IEEE80211_ADDR_COPY(ni->ni_bssid, se->se_bssid); ni->ni_esslen = se->se_ssid[1]; memcpy(ni->ni_essid, se->se_ssid+2, ni->ni_esslen); ni->ni_rstamp = se->se_rstamp; ni->ni_tstamp.tsf = se->se_tstamp.tsf; ni->ni_intval = se->se_intval; ni->ni_capinfo = se->se_capinfo; ni->ni_chan = se->se_chan; ni->ni_timoff = se->se_timoff; ni->ni_fhdwell = se->se_fhdwell; ni->ni_fhindex = se->se_fhindex; ni->ni_erp = se->se_erp; IEEE80211_RSSI_LPF(ni->ni_avgrssi, se->se_rssi); ni->ni_noise = se->se_noise; if (ieee80211_ies_init(&ni->ni_ies, se->se_ies.data, se->se_ies.len)) { ieee80211_ies_expand(&ni->ni_ies); if (ni->ni_ies.ath_ie != NULL) ieee80211_parse_ath(ni, ni->ni_ies.ath_ie); if (ni->ni_ies.htcap_ie != NULL) ieee80211_parse_htcap(ni, ni->ni_ies.htcap_ie); if (ni->ni_ies.htinfo_ie != NULL) ieee80211_parse_htinfo(ni, ni->ni_ies.htinfo_ie); } vap->iv_dtim_period = se->se_dtimperiod; vap->iv_dtim_count = 0; /* NB: must be after ni_chan is setup */ ieee80211_setup_rates(ni, se->se_rates, se->se_xrates, IEEE80211_F_DOSORT); return ieee80211_sta_join1(ieee80211_ref_node(ni)); } /* * Leave the specified IBSS/BSS network. The node is assumed to * be passed in with a held reference. */ void ieee80211_sta_leave(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; ic->ic_node_cleanup(ni); ieee80211_notify_node_leave(ni); } /* * Send a deauthenticate frame and drop the station. */ void ieee80211_node_deauth(struct ieee80211_node *ni, int reason) { /* NB: bump the refcnt to be sure temporay nodes are not reclaimed */ ieee80211_ref_node(ni); if (ni->ni_associd != 0) IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DEAUTH, reason); ieee80211_node_leave(ni); ieee80211_free_node(ni); } static struct ieee80211_node * -node_alloc(struct ieee80211_node_table *nt) +node_alloc(struct ieee80211vap *vap, const uint8_t macaddr[IEEE80211_ADDR_LEN]) { struct ieee80211_node *ni; MALLOC(ni, struct ieee80211_node *, sizeof(struct ieee80211_node), M_80211_NODE, M_NOWAIT | M_ZERO); return ni; } /* * Initialize an ie blob with the specified data. If previous * data exists re-use the data block. As a side effect we clear * all references to specific ie's; the caller is required to * recalculate them. */ int ieee80211_ies_init(struct ieee80211_ies *ies, const uint8_t *data, int len) { /* NB: assumes data+len are the last fields */ memset(ies, 0, offsetof(struct ieee80211_ies, data)); if (ies->data != NULL && ies->len != len) { /* data size changed */ FREE(ies->data, M_80211_NODE_IE); ies->data = NULL; } if (ies->data == NULL) { MALLOC(ies->data, uint8_t *, len, M_80211_NODE_IE, M_NOWAIT); if (ies->data == NULL) { ies->len = 0; /* NB: pointers have already been zero'd above */ return 0; } } memcpy(ies->data, data, len); ies->len = len; return 1; } /* * Reclaim storage for an ie blob. */ void ieee80211_ies_cleanup(struct ieee80211_ies *ies) { if (ies->data != NULL) FREE(ies->data, M_80211_NODE_IE); } /* * Expand an ie blob data contents and to fillin individual * ie pointers. The data blob is assumed to be well-formed; * we don't do any validity checking of ie lengths. */ void ieee80211_ies_expand(struct ieee80211_ies *ies) { uint8_t *ie; int ielen; ie = ies->data; ielen = ies->len; while (ielen > 0) { switch (ie[0]) { case IEEE80211_ELEMID_VENDOR: if (iswpaoui(ie)) ies->wpa_ie = ie; else if (iswmeoui(ie)) ies->wme_ie = ie; else if (isatherosoui(ie)) ies->ath_ie = ie; break; case IEEE80211_ELEMID_RSN: ies->rsn_ie = ie; break; case IEEE80211_ELEMID_HTCAP: ies->htcap_ie = ie; break; } ielen -= 2 + ie[1]; ie += 2 + ie[1]; } } /* * Reclaim any resources in a node and reset any critical * state. Typically nodes are free'd immediately after, * but in some cases the storage may be reused so we need * to insure consistent state (should probably fix that). */ static void node_cleanup(struct ieee80211_node *ni) { #define N(a) (sizeof(a)/sizeof(a[0])) struct ieee80211vap *vap = ni->ni_vap; int i; /* NB: preserve ni_table */ if (ni->ni_flags & IEEE80211_NODE_PWR_MGT) { if (vap->iv_opmode != IEEE80211_M_STA) vap->iv_ps_sta--; ni->ni_flags &= ~IEEE80211_NODE_PWR_MGT; IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni, "power save mode off, %u sta's in ps mode", vap->iv_ps_sta); } /* * Cleanup any HT-related state. */ if (ni->ni_flags & IEEE80211_NODE_HT) ieee80211_ht_node_cleanup(ni); /* * Clear AREF flag that marks the authorization refcnt bump * has happened. This is probably not needed as the node * should always be removed from the table so not found but * do it just in case. */ ni->ni_flags &= ~IEEE80211_NODE_AREF; /* * Drain power save queue and, if needed, clear TIM. */ if (ieee80211_node_saveq_drain(ni) != 0 && vap->iv_set_tim != NULL) vap->iv_set_tim(ni, 0); ni->ni_associd = 0; if (ni->ni_challenge != NULL) { FREE(ni->ni_challenge, M_80211_NODE); ni->ni_challenge = NULL; } /* * Preserve SSID, WPA, and WME ie's so the bss node is * reusable during a re-auth/re-assoc state transition. * If we remove these data they will not be recreated * because they come from a probe-response or beacon frame * which cannot be expected prior to the association-response. * This should not be an issue when operating in other modes * as stations leaving always go through a full state transition * which will rebuild this state. * * XXX does this leave us open to inheriting old state? */ for (i = 0; i < N(ni->ni_rxfrag); i++) if (ni->ni_rxfrag[i] != NULL) { m_freem(ni->ni_rxfrag[i]); ni->ni_rxfrag[i] = NULL; } /* * Must be careful here to remove any key map entry w/o a LOR. */ ieee80211_node_delucastkey(ni); #undef N } static void node_free(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; ic->ic_node_cleanup(ni); ieee80211_ies_cleanup(&ni->ni_ies); IEEE80211_NODE_SAVEQ_DESTROY(ni); IEEE80211_NODE_WDSQ_DESTROY(ni); FREE(ni, M_80211_NODE); } static void node_age(struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; #if 0 IEEE80211_NODE_LOCK_ASSERT(&ic->ic_sta); #endif /* * Age frames on the power save queue. */ if (ieee80211_node_saveq_age(ni) != 0 && IEEE80211_NODE_SAVEQ_QLEN(ni) == 0 && vap->iv_set_tim != NULL) vap->iv_set_tim(ni, 0); /* * Age frames on the wds pending queue. */ if (IEEE80211_NODE_WDSQ_QLEN(ni) != 0) ieee80211_node_wdsq_age(ni); /* * Age out HT resources (e.g. frames on the * A-MPDU reorder queues). */ if (ni->ni_associd != 0 && (ni->ni_flags & IEEE80211_NODE_HT)) ieee80211_ht_node_age(ni); } static int8_t node_getrssi(const struct ieee80211_node *ni) { uint32_t avgrssi = ni->ni_avgrssi; int32_t rssi; if (avgrssi == IEEE80211_RSSI_DUMMY_MARKER) return 0; rssi = IEEE80211_RSSI_GET(avgrssi); return rssi < 0 ? 0 : rssi > 127 ? 127 : rssi; } static void node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) { *rssi = node_getrssi(ni); *noise = ni->ni_noise; } static void node_getmimoinfo(const struct ieee80211_node *ni, struct ieee80211_mimo_info *info) { /* XXX zero data? */ } struct ieee80211_node * ieee80211_alloc_node(struct ieee80211_node_table *nt, struct ieee80211vap *vap, const uint8_t macaddr[IEEE80211_ADDR_LEN]) { struct ieee80211com *ic = nt->nt_ic; struct ieee80211_node *ni; int hash; - ni = ic->ic_node_alloc(nt); + ni = ic->ic_node_alloc(vap, macaddr); if (ni == NULL) { vap->iv_stats.is_rx_nodealloc++; return NULL; } IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "%s %p<%s> in %s table\n", __func__, ni, ether_sprintf(macaddr), nt->nt_name); IEEE80211_ADDR_COPY(ni->ni_macaddr, macaddr); hash = IEEE80211_NODE_HASH(macaddr); ieee80211_node_initref(ni); /* mark referenced */ ni->ni_chan = IEEE80211_CHAN_ANYC; ni->ni_authmode = IEEE80211_AUTH_OPEN; ni->ni_txpower = ic->ic_txpowlimit; /* max power */ ieee80211_crypto_resetkey(vap, &ni->ni_ucastkey, IEEE80211_KEYIX_NONE); ni->ni_avgrssi = IEEE80211_RSSI_DUMMY_MARKER; ni->ni_inact_reload = nt->nt_inact_init; ni->ni_inact = ni->ni_inact_reload; ni->ni_ath_defkeyix = 0x7fff; IEEE80211_NODE_SAVEQ_INIT(ni, "unknown"); IEEE80211_NODE_WDSQ_INIT(ni, "unknown"); IEEE80211_NODE_LOCK(nt); TAILQ_INSERT_TAIL(&nt->nt_node, ni, ni_list); LIST_INSERT_HEAD(&nt->nt_hash[hash], ni, ni_hash); ni->ni_table = nt; ni->ni_vap = vap; ni->ni_ic = ic; IEEE80211_NODE_UNLOCK(nt); return ni; } /* * Craft a temporary node suitable for sending a management frame * to the specified station. We craft only as much state as we * need to do the work since the node will be immediately reclaimed * once the send completes. */ struct ieee80211_node * ieee80211_tmp_node(struct ieee80211vap *vap, const uint8_t macaddr[IEEE80211_ADDR_LEN]) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni; - ni = ic->ic_node_alloc(&ic->ic_sta); + ni = ic->ic_node_alloc(vap, macaddr); if (ni != NULL) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "%s %p<%s>\n", __func__, ni, ether_sprintf(macaddr)); ni->ni_table = NULL; /* NB: pedantic */ ni->ni_ic = ic; /* NB: needed to set channel */ ni->ni_vap = vap; IEEE80211_ADDR_COPY(ni->ni_macaddr, macaddr); IEEE80211_ADDR_COPY(ni->ni_bssid, vap->iv_bss->ni_bssid); ieee80211_node_initref(ni); /* mark referenced */ /* NB: required by ieee80211_fix_rate */ ieee80211_node_set_chan(ni, vap->iv_bss->ni_chan); ieee80211_crypto_resetkey(vap, &ni->ni_ucastkey, IEEE80211_KEYIX_NONE); /* XXX optimize away */ IEEE80211_NODE_SAVEQ_INIT(ni, "unknown"); IEEE80211_NODE_WDSQ_INIT(ni, "unknown"); } else { /* XXX msg */ vap->iv_stats.is_rx_nodealloc++; } return ni; } struct ieee80211_node * ieee80211_dup_bss(struct ieee80211vap *vap, const uint8_t macaddr[IEEE80211_ADDR_LEN]) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni; ni = ieee80211_alloc_node(&ic->ic_sta, vap, macaddr); if (ni != NULL) { /* * Inherit from iv_bss. */ ni->ni_authmode = vap->iv_bss->ni_authmode; ni->ni_vlan = vap->iv_bss->ni_vlan; /* XXX?? */ IEEE80211_ADDR_COPY(ni->ni_bssid, vap->iv_bss->ni_bssid); ieee80211_node_set_chan(ni, vap->iv_bss->ni_chan); } return ni; } /* * Create a bss node for a legacy WDS vap. The far end does * not associate so we just create create a new node and * simulate an association. The caller is responsible for * installing the node as the bss node and handling any further * setup work like authorizing the port. */ struct ieee80211_node * ieee80211_node_create_wds(struct ieee80211vap *vap, const uint8_t bssid[IEEE80211_ADDR_LEN], struct ieee80211_channel *chan) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni; /* XXX check if node already in sta table? */ ni = ieee80211_alloc_node(&ic->ic_sta, vap, bssid); if (ni != NULL) { ni->ni_wdsvap = vap; IEEE80211_ADDR_COPY(ni->ni_bssid, bssid); /* * Inherit any manually configured settings. */ ni->ni_authmode = vap->iv_bss->ni_authmode; ni->ni_vlan = vap->iv_bss->ni_vlan; ieee80211_node_set_chan(ni, chan); /* NB: propagate ssid so available to WPA supplicant */ ni->ni_esslen = vap->iv_des_ssid[0].len; memcpy(ni->ni_essid, vap->iv_des_ssid[0].ssid, ni->ni_esslen); /* NB: no associd for peer */ /* * There are no management frames to use to * discover neighbor capabilities, so blindly * propagate the local configuration. */ if (vap->iv_flags & IEEE80211_F_WME) ni->ni_flags |= IEEE80211_NODE_QOS; if (vap->iv_flags & IEEE80211_F_FF) ni->ni_flags |= IEEE80211_NODE_FF; if ((ic->ic_htcaps & IEEE80211_HTC_HT) && (vap->iv_flags_ext & IEEE80211_FEXT_HT)) { /* * Device is HT-capable and HT is enabled for * the vap; setup HT operation. On return * ni_chan will be adjusted to an HT channel. */ ieee80211_ht_wds_init(ni); } else { struct ieee80211_channel *c = ni->ni_chan; /* * Force a legacy channel to be used. */ c = ieee80211_find_channel(ic, c->ic_freq, c->ic_flags &~ IEEE80211_CHAN_HT); KASSERT(c != NULL, ("no legacy channel, %u/%x", ni->ni_chan->ic_freq, ni->ni_chan->ic_flags)); ni->ni_chan = c; } } return ni; } struct ieee80211_node * #ifdef IEEE80211_DEBUG_REFCNT ieee80211_find_node_locked_debug(struct ieee80211_node_table *nt, const uint8_t macaddr[IEEE80211_ADDR_LEN], const char *func, int line) #else ieee80211_find_node_locked(struct ieee80211_node_table *nt, const uint8_t macaddr[IEEE80211_ADDR_LEN]) #endif { struct ieee80211_node *ni; int hash; IEEE80211_NODE_LOCK_ASSERT(nt); hash = IEEE80211_NODE_HASH(macaddr); LIST_FOREACH(ni, &nt->nt_hash[hash], ni_hash) { if (IEEE80211_ADDR_EQ(ni->ni_macaddr, macaddr)) { ieee80211_ref_node(ni); /* mark referenced */ #ifdef IEEE80211_DEBUG_REFCNT IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_NODE, "%s (%s:%u) %p<%s> refcnt %d\n", __func__, func, line, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)); #endif return ni; } } return NULL; } struct ieee80211_node * #ifdef IEEE80211_DEBUG_REFCNT ieee80211_find_node_debug(struct ieee80211_node_table *nt, const uint8_t macaddr[IEEE80211_ADDR_LEN], const char *func, int line) #else ieee80211_find_node(struct ieee80211_node_table *nt, const uint8_t macaddr[IEEE80211_ADDR_LEN]) #endif { struct ieee80211_node *ni; IEEE80211_NODE_LOCK(nt); ni = ieee80211_find_node_locked(nt, macaddr); IEEE80211_NODE_UNLOCK(nt); return ni; } struct ieee80211_node * #ifdef IEEE80211_DEBUG_REFCNT ieee80211_find_vap_node_locked_debug(struct ieee80211_node_table *nt, const struct ieee80211vap *vap, const uint8_t macaddr[IEEE80211_ADDR_LEN], const char *func, int line) #else ieee80211_find_vap_node_locked(struct ieee80211_node_table *nt, const struct ieee80211vap *vap, const uint8_t macaddr[IEEE80211_ADDR_LEN]) #endif { struct ieee80211_node *ni; int hash; IEEE80211_NODE_LOCK_ASSERT(nt); hash = IEEE80211_NODE_HASH(macaddr); LIST_FOREACH(ni, &nt->nt_hash[hash], ni_hash) { if (ni->ni_vap == vap && IEEE80211_ADDR_EQ(ni->ni_macaddr, macaddr)) { ieee80211_ref_node(ni); /* mark referenced */ #ifdef IEEE80211_DEBUG_REFCNT IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_NODE, "%s (%s:%u) %p<%s> refcnt %d\n", __func__, func, line, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)); #endif return ni; } } return NULL; } struct ieee80211_node * #ifdef IEEE80211_DEBUG_REFCNT ieee80211_find_vap_node_debug(struct ieee80211_node_table *nt, const struct ieee80211vap *vap, const uint8_t macaddr[IEEE80211_ADDR_LEN], const char *func, int line) #else ieee80211_find_vap_node(struct ieee80211_node_table *nt, const struct ieee80211vap *vap, const uint8_t macaddr[IEEE80211_ADDR_LEN]) #endif { struct ieee80211_node *ni; IEEE80211_NODE_LOCK(nt); ni = ieee80211_find_vap_node_locked(nt, vap, macaddr); IEEE80211_NODE_UNLOCK(nt); return ni; } /* * Fake up a node; this handles node discovery in adhoc mode. * Note that for the driver's benefit we we treat this like * an association so the driver has an opportunity to setup * it's private state. */ struct ieee80211_node * ieee80211_fakeup_adhoc_node(struct ieee80211vap *vap, const uint8_t macaddr[IEEE80211_ADDR_LEN]) { struct ieee80211_node *ni; IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "%s: mac<%s>\n", __func__, ether_sprintf(macaddr)); ni = ieee80211_dup_bss(vap, macaddr); if (ni != NULL) { struct ieee80211com *ic = vap->iv_ic; /* XXX no rate negotiation; just dup */ ni->ni_rates = vap->iv_bss->ni_rates; if (vap->iv_opmode == IEEE80211_M_AHDEMO) { /* * In adhoc demo mode there are no management * frames to use to discover neighbor capabilities, * so blindly propagate the local configuration * so we can do interesting things (e.g. use * WME to disable ACK's). */ if (vap->iv_flags & IEEE80211_F_WME) ni->ni_flags |= IEEE80211_NODE_QOS; if (vap->iv_flags & IEEE80211_F_FF) ni->ni_flags |= IEEE80211_NODE_FF; } if (ic->ic_newassoc != NULL) ic->ic_newassoc(ni, 1); /* XXX not right for 802.1x/WPA */ ieee80211_node_authorize(ni); } return ni; } void ieee80211_init_neighbor(struct ieee80211_node *ni, const struct ieee80211_frame *wh, const struct ieee80211_scanparams *sp) { ni->ni_esslen = sp->ssid[1]; memcpy(ni->ni_essid, sp->ssid + 2, sp->ssid[1]); IEEE80211_ADDR_COPY(ni->ni_bssid, wh->i_addr3); memcpy(ni->ni_tstamp.data, sp->tstamp, sizeof(ni->ni_tstamp)); ni->ni_intval = sp->bintval; ni->ni_capinfo = sp->capinfo; ni->ni_chan = ni->ni_ic->ic_curchan; ni->ni_fhdwell = sp->fhdwell; ni->ni_fhindex = sp->fhindex; ni->ni_erp = sp->erp; ni->ni_timoff = sp->timoff; if (ieee80211_ies_init(&ni->ni_ies, sp->ies, sp->ies_len)) { ieee80211_ies_expand(&ni->ni_ies); if (ni->ni_ies.ath_ie != NULL) ieee80211_parse_ath(ni, ni->ni_ies.ath_ie); } /* NB: must be after ni_chan is setup */ ieee80211_setup_rates(ni, sp->rates, sp->xrates, IEEE80211_F_DOSORT | IEEE80211_F_DOFRATE | IEEE80211_F_DONEGO | IEEE80211_F_DODEL); } /* * Do node discovery in adhoc mode on receipt of a beacon * or probe response frame. Note that for the driver's * benefit we we treat this like an association so the * driver has an opportunity to setup it's private state. */ struct ieee80211_node * ieee80211_add_neighbor(struct ieee80211vap *vap, const struct ieee80211_frame *wh, const struct ieee80211_scanparams *sp) { struct ieee80211_node *ni; IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "%s: mac<%s>\n", __func__, ether_sprintf(wh->i_addr2)); ni = ieee80211_dup_bss(vap, wh->i_addr2);/* XXX alloc_node? */ if (ni != NULL) { struct ieee80211com *ic = vap->iv_ic; ieee80211_init_neighbor(ni, wh, sp); if (ic->ic_newassoc != NULL) ic->ic_newassoc(ni, 1); /* XXX not right for 802.1x/WPA */ ieee80211_node_authorize(ni); } return ni; } #define IS_CTL(wh) \ ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) #define IS_PSPOLL(wh) \ ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_PS_POLL) #define IS_BAR(wh) \ ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_BAR) #define IS_PROBEREQ(wh) \ ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) \ == (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_REQ)) #define IS_BCAST_PROBEREQ(wh) \ (IS_PROBEREQ(wh) && IEEE80211_IS_MULTICAST( \ ((const struct ieee80211_frame *)(wh))->i_addr3)) static __inline struct ieee80211_node * _find_rxnode(struct ieee80211_node_table *nt, const struct ieee80211_frame_min *wh) { /* XXX 4-address frames? */ if (IS_CTL(wh) && !IS_PSPOLL(wh) && !IS_BAR(wh) /*&& !IS_RTS(ah)*/) return ieee80211_find_node_locked(nt, wh->i_addr1); if (IS_BCAST_PROBEREQ(wh)) return NULL; /* spam bcast probe req to all vap's */ return ieee80211_find_node_locked(nt, wh->i_addr2); } /* * Locate the node for sender, track state, and then pass the * (referenced) node up to the 802.11 layer for its use. Note * we can return NULL if the sender is not in the table. */ struct ieee80211_node * #ifdef IEEE80211_DEBUG_REFCNT ieee80211_find_rxnode_debug(struct ieee80211com *ic, const struct ieee80211_frame_min *wh, const char *func, int line) #else ieee80211_find_rxnode(struct ieee80211com *ic, const struct ieee80211_frame_min *wh) #endif { struct ieee80211_node_table *nt; struct ieee80211_node *ni; nt = &ic->ic_sta; IEEE80211_NODE_LOCK(nt); ni = _find_rxnode(nt, wh); IEEE80211_NODE_UNLOCK(nt); return ni; } /* * Like ieee80211_find_rxnode but use the supplied h/w * key index as a hint to locate the node in the key * mapping table. If an entry is present at the key * index we return it; otherwise do a normal lookup and * update the mapping table if the station has a unicast * key assigned to it. */ struct ieee80211_node * #ifdef IEEE80211_DEBUG_REFCNT ieee80211_find_rxnode_withkey_debug(struct ieee80211com *ic, const struct ieee80211_frame_min *wh, ieee80211_keyix keyix, const char *func, int line) #else ieee80211_find_rxnode_withkey(struct ieee80211com *ic, const struct ieee80211_frame_min *wh, ieee80211_keyix keyix) #endif { struct ieee80211_node_table *nt; struct ieee80211_node *ni; nt = &ic->ic_sta; IEEE80211_NODE_LOCK(nt); if (nt->nt_keyixmap != NULL && keyix < nt->nt_keyixmax) ni = nt->nt_keyixmap[keyix]; else ni = NULL; if (ni == NULL) { ni = _find_rxnode(nt, wh); if (ni != NULL && nt->nt_keyixmap != NULL) { /* * If the station has a unicast key cache slot * assigned update the key->node mapping table. */ keyix = ni->ni_ucastkey.wk_rxkeyix; /* XXX can keyixmap[keyix] != NULL? */ if (keyix < nt->nt_keyixmax && nt->nt_keyixmap[keyix] == NULL) { IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_NODE, "%s: add key map entry %p<%s> refcnt %d\n", __func__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)+1); nt->nt_keyixmap[keyix] = ieee80211_ref_node(ni); } } } else { if (IS_BCAST_PROBEREQ(wh)) ni = NULL; /* spam bcast probe req to all vap's */ else ieee80211_ref_node(ni); } IEEE80211_NODE_UNLOCK(nt); return ni; } #undef IS_BCAST_PROBEREQ #undef IS_PROBEREQ #undef IS_BAR #undef IS_PSPOLL #undef IS_CTL /* * Return a reference to the appropriate node for sending * a data frame. This handles node discovery in adhoc networks. */ struct ieee80211_node * #ifdef IEEE80211_DEBUG_REFCNT ieee80211_find_txnode_debug(struct ieee80211vap *vap, const uint8_t macaddr[IEEE80211_ADDR_LEN], const char *func, int line) #else ieee80211_find_txnode(struct ieee80211vap *vap, const uint8_t macaddr[IEEE80211_ADDR_LEN]) #endif { struct ieee80211_node_table *nt = &vap->iv_ic->ic_sta; struct ieee80211_node *ni; /* * The destination address should be in the node table * unless this is a multicast/broadcast frame. We can * also optimize station mode operation, all frames go * to the bss node. */ /* XXX can't hold lock across dup_bss 'cuz of recursive locking */ IEEE80211_NODE_LOCK(nt); if (vap->iv_opmode == IEEE80211_M_STA || vap->iv_opmode == IEEE80211_M_WDS || IEEE80211_IS_MULTICAST(macaddr)) ni = ieee80211_ref_node(vap->iv_bss); else { ni = ieee80211_find_node_locked(nt, macaddr); if (vap->iv_opmode == IEEE80211_M_HOSTAP && (ni != NULL && ni->ni_associd == 0)) { /* * Station is not associated; don't permit the * data frame to be sent by returning NULL. This * is kinda a kludge but the least intrusive way * to add this check into all drivers. */ ieee80211_unref_node(&ni); /* NB: null's ni */ } } IEEE80211_NODE_UNLOCK(nt); if (ni == NULL) { if (vap->iv_opmode == IEEE80211_M_IBSS || vap->iv_opmode == IEEE80211_M_AHDEMO) { /* * In adhoc mode cons up a node for the destination. * Note that we need an additional reference for the * caller to be consistent with * ieee80211_find_node_locked. */ ni = ieee80211_fakeup_adhoc_node(vap, macaddr); if (ni != NULL) (void) ieee80211_ref_node(ni); } else { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_OUTPUT, macaddr, "no node, discard frame (%s)", __func__); vap->iv_stats.is_tx_nonode++; } } return ni; } static void _ieee80211_free_node(struct ieee80211_node *ni) { struct ieee80211_node_table *nt = ni->ni_table; /* * NB: careful about referencing the vap as it may be * gone if the last reference was held by a driver. * We know the com will always be present so it's safe * to use ni_ic below to reclaim resources. */ #if 0 IEEE80211_DPRINTF(vap, IEEE80211_MSG_NODE, "%s %p<%s> in %s table\n", __func__, ni, ether_sprintf(ni->ni_macaddr), nt != NULL ? nt->nt_name : ""); #endif if (ni->ni_associd != 0) { struct ieee80211vap *vap = ni->ni_vap; if (vap->iv_aid_bitmap != NULL) IEEE80211_AID_CLR(vap, ni->ni_associd); } if (nt != NULL) { TAILQ_REMOVE(&nt->nt_node, ni, ni_list); LIST_REMOVE(ni, ni_hash); } ni->ni_ic->ic_node_free(ni); } void #ifdef IEEE80211_DEBUG_REFCNT ieee80211_free_node_debug(struct ieee80211_node *ni, const char *func, int line) #else ieee80211_free_node(struct ieee80211_node *ni) #endif { struct ieee80211_node_table *nt = ni->ni_table; #ifdef IEEE80211_DEBUG_REFCNT IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_NODE, "%s (%s:%u) %p<%s> refcnt %d\n", __func__, func, line, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)-1); #endif if (nt != NULL) { IEEE80211_NODE_LOCK(nt); if (ieee80211_node_dectestref(ni)) { /* * Last reference, reclaim state. */ _ieee80211_free_node(ni); } else if (ieee80211_node_refcnt(ni) == 1 && nt->nt_keyixmap != NULL) { ieee80211_keyix keyix; /* * Check for a last reference in the key mapping table. */ keyix = ni->ni_ucastkey.wk_rxkeyix; if (keyix < nt->nt_keyixmax && nt->nt_keyixmap[keyix] == ni) { IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_NODE, "%s: %p<%s> clear key map entry", __func__, ni, ether_sprintf(ni->ni_macaddr)); nt->nt_keyixmap[keyix] = NULL; ieee80211_node_decref(ni); /* XXX needed? */ _ieee80211_free_node(ni); } } IEEE80211_NODE_UNLOCK(nt); } else { if (ieee80211_node_dectestref(ni)) _ieee80211_free_node(ni); } } /* * Reclaim a unicast key and clear any key cache state. */ int ieee80211_node_delucastkey(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct ieee80211_node_table *nt = &ic->ic_sta; struct ieee80211_node *nikey; ieee80211_keyix keyix; int isowned, status; /* * NB: We must beware of LOR here; deleting the key * can cause the crypto layer to block traffic updates * which can generate a LOR against the node table lock; * grab it here and stash the key index for our use below. * * Must also beware of recursion on the node table lock. * When called from node_cleanup we may already have * the node table lock held. Unfortunately there's no * way to separate out this path so we must do this * conditionally. */ isowned = IEEE80211_NODE_IS_LOCKED(nt); if (!isowned) IEEE80211_NODE_LOCK(nt); nikey = NULL; status = 1; /* NB: success */ if (!IEEE80211_KEY_UNDEFINED(&ni->ni_ucastkey)) { keyix = ni->ni_ucastkey.wk_rxkeyix; status = ieee80211_crypto_delkey(ni->ni_vap, &ni->ni_ucastkey); if (nt->nt_keyixmap != NULL && keyix < nt->nt_keyixmax) { nikey = nt->nt_keyixmap[keyix]; nt->nt_keyixmap[keyix] = NULL;; } } if (!isowned) IEEE80211_NODE_UNLOCK(nt); if (nikey != NULL) { KASSERT(nikey == ni, ("key map out of sync, ni %p nikey %p", ni, nikey)); IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_NODE, "%s: delete key map entry %p<%s> refcnt %d\n", __func__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)-1); ieee80211_free_node(ni); } return status; } /* * Reclaim a node. If this is the last reference count then * do the normal free work. Otherwise remove it from the node * table and mark it gone by clearing the back-reference. */ static void node_reclaim(struct ieee80211_node_table *nt, struct ieee80211_node *ni) { ieee80211_keyix keyix; IEEE80211_NODE_LOCK_ASSERT(nt); IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_NODE, "%s: remove %p<%s> from %s table, refcnt %d\n", __func__, ni, ether_sprintf(ni->ni_macaddr), nt->nt_name, ieee80211_node_refcnt(ni)-1); /* * Clear any entry in the unicast key mapping table. * We need to do it here so rx lookups don't find it * in the mapping table even if it's not in the hash * table. We cannot depend on the mapping table entry * being cleared because the node may not be free'd. */ keyix = ni->ni_ucastkey.wk_rxkeyix; if (nt->nt_keyixmap != NULL && keyix < nt->nt_keyixmax && nt->nt_keyixmap[keyix] == ni) { IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_NODE, "%s: %p<%s> clear key map entry\n", __func__, ni, ether_sprintf(ni->ni_macaddr)); nt->nt_keyixmap[keyix] = NULL; ieee80211_node_decref(ni); /* NB: don't need free */ } if (!ieee80211_node_dectestref(ni)) { /* * Other references are present, just remove the * node from the table so it cannot be found. When * the references are dropped storage will be * reclaimed. */ TAILQ_REMOVE(&nt->nt_node, ni, ni_list); LIST_REMOVE(ni, ni_hash); ni->ni_table = NULL; /* clear reference */ } else _ieee80211_free_node(ni); } /* * Reclaim a (bss) node. Decrement the refcnt and reclaim * the node if the only other reference to it is in the sta * table. This is effectively ieee80211_free_node followed * by node_reclaim when the refcnt is 1 (after the free). */ static void ieee80211_node_reclaim(struct ieee80211_node *ni) { struct ieee80211_node_table *nt = ni->ni_table; KASSERT(nt != NULL, ("reclaim node not in table")); #ifdef IEEE80211_DEBUG_REFCNT IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_NODE, "%s %p<%s> refcnt %d\n", __func__, ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)-1); #endif IEEE80211_NODE_LOCK(nt); if (ieee80211_node_dectestref(ni)) { /* * Last reference, reclaim state. */ _ieee80211_free_node(ni); nt = NULL; } else if (ieee80211_node_refcnt(ni) == 1 && nt->nt_keyixmap != NULL) { ieee80211_keyix keyix; /* * Check for a last reference in the key mapping table. */ keyix = ni->ni_ucastkey.wk_rxkeyix; if (keyix < nt->nt_keyixmax && nt->nt_keyixmap[keyix] == ni) { IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_NODE, "%s: %p<%s> clear key map entry", __func__, ni, ether_sprintf(ni->ni_macaddr)); nt->nt_keyixmap[keyix] = NULL; ieee80211_node_decref(ni); /* XXX needed? */ _ieee80211_free_node(ni); nt = NULL; } } if (nt != NULL && ieee80211_node_refcnt(ni) == 1) { /* * Last reference is in the sta table; complete * the reclaim. This handles bss nodes being * recycled: the node has two references, one for * iv_bss and one for the table. After dropping * the iv_bss ref above we need to reclaim the sta * table reference. */ ieee80211_node_decref(ni); /* NB: be pendantic */ _ieee80211_free_node(ni); } IEEE80211_NODE_UNLOCK(nt); } /* * Node table support. */ static void ieee80211_node_table_init(struct ieee80211com *ic, struct ieee80211_node_table *nt, const char *name, int inact, int keyixmax) { struct ifnet *ifp = ic->ic_ifp; nt->nt_ic = ic; IEEE80211_NODE_LOCK_INIT(nt, ifp->if_xname); IEEE80211_NODE_ITERATE_LOCK_INIT(nt, ifp->if_xname); TAILQ_INIT(&nt->nt_node); nt->nt_name = name; nt->nt_scangen = 1; nt->nt_inact_init = inact; nt->nt_keyixmax = keyixmax; if (nt->nt_keyixmax > 0) { MALLOC(nt->nt_keyixmap, struct ieee80211_node **, keyixmax * sizeof(struct ieee80211_node *), M_80211_NODE, M_NOWAIT | M_ZERO); if (nt->nt_keyixmap == NULL) if_printf(ic->ic_ifp, "Cannot allocate key index map with %u entries\n", keyixmax); } else nt->nt_keyixmap = NULL; } static void ieee80211_node_table_reset(struct ieee80211_node_table *nt, struct ieee80211vap *match) { struct ieee80211_node *ni, *next; IEEE80211_NODE_LOCK(nt); TAILQ_FOREACH_SAFE(ni, &nt->nt_node, ni_list, next) { if (match != NULL && ni->ni_vap != match) continue; /* XXX can this happen? if so need's work */ if (ni->ni_associd != 0) { struct ieee80211vap *vap = ni->ni_vap; if (vap->iv_auth->ia_node_leave != NULL) vap->iv_auth->ia_node_leave(ni); if (vap->iv_aid_bitmap != NULL) IEEE80211_AID_CLR(vap, ni->ni_associd); } ni->ni_wdsvap = NULL; /* clear reference */ node_reclaim(nt, ni); } if (match != NULL && match->iv_opmode == IEEE80211_M_WDS) { /* * Make a separate pass to clear references to this vap * held by DWDS entries. They will not be matched above * because ni_vap will point to the ap vap but we still * need to clear ni_wdsvap when the WDS vap is destroyed * and/or reset. */ TAILQ_FOREACH_SAFE(ni, &nt->nt_node, ni_list, next) if (ni->ni_wdsvap == match) ni->ni_wdsvap = NULL; } IEEE80211_NODE_UNLOCK(nt); } static void ieee80211_node_table_cleanup(struct ieee80211_node_table *nt) { ieee80211_node_table_reset(nt, NULL); if (nt->nt_keyixmap != NULL) { #ifdef DIAGNOSTIC /* XXX verify all entries are NULL */ int i; for (i = 0; i < nt->nt_keyixmax; i++) if (nt->nt_keyixmap[i] != NULL) printf("%s: %s[%u] still active\n", __func__, nt->nt_name, i); #endif FREE(nt->nt_keyixmap, M_80211_NODE); nt->nt_keyixmap = NULL; } IEEE80211_NODE_ITERATE_LOCK_DESTROY(nt); IEEE80211_NODE_LOCK_DESTROY(nt); } /* * Timeout inactive stations and do related housekeeping. * Note that we cannot hold the node lock while sending a * frame as this would lead to a LOR. Instead we use a * generation number to mark nodes that we've scanned and * drop the lock and restart a scan if we have to time out * a node. Since we are single-threaded by virtue of * controlling the inactivity timer we can be sure this will * process each node only once. */ static void ieee80211_timeout_stations(struct ieee80211com *ic) { struct ieee80211_node_table *nt = &ic->ic_sta; struct ieee80211vap *vap; struct ieee80211_node *ni; int gen = 0; IEEE80211_NODE_ITERATE_LOCK(nt); gen = ++nt->nt_scangen; restart: IEEE80211_NODE_LOCK(nt); TAILQ_FOREACH(ni, &nt->nt_node, ni_list) { if (ni->ni_scangen == gen) /* previously handled */ continue; ni->ni_scangen = gen; /* * Ignore entries for which have yet to receive an * authentication frame. These are transient and * will be reclaimed when the last reference to them * goes away (when frame xmits complete). */ vap = ni->ni_vap; /* * Only process stations when in RUN state. This * insures, for example, that we don't timeout an * inactive station during CAC. Note that CSA state * is actually handled in ieee80211_node_timeout as * it applies to more than timeout processing. */ if (vap->iv_state != IEEE80211_S_RUN) continue; /* XXX can vap be NULL? */ if ((vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_STA) && (ni->ni_flags & IEEE80211_NODE_AREF) == 0) continue; /* * Free fragment if not needed anymore * (last fragment older than 1s). * XXX doesn't belong here, move to node_age */ if (ni->ni_rxfrag[0] != NULL && ticks > ni->ni_rxfragstamp + hz) { m_freem(ni->ni_rxfrag[0]); ni->ni_rxfrag[0] = NULL; } if (ni->ni_inact > 0) ni->ni_inact--; /* * Special case ourself; we may be idle for extended periods * of time and regardless reclaiming our state is wrong. * XXX run ic_node_age */ if (ni == vap->iv_bss) continue; if (ni->ni_associd != 0 || (vap->iv_opmode == IEEE80211_M_IBSS || vap->iv_opmode == IEEE80211_M_AHDEMO)) { /* * Age/drain resources held by the station. */ ic->ic_node_age(ni); /* * Probe the station before time it out. We * send a null data frame which may not be * universally supported by drivers (need it * for ps-poll support so it should be...). * * XXX don't probe the station unless we've * received a frame from them (and have * some idea of the rates they are capable * of); this will get fixed more properly * soon with better handling of the rate set. */ if ((vap->iv_flags_ext & IEEE80211_FEXT_INACT) && (0 < ni->ni_inact && ni->ni_inact <= vap->iv_inact_probe) && ni->ni_rates.rs_nrates != 0) { IEEE80211_NOTE(vap, IEEE80211_MSG_INACT | IEEE80211_MSG_NODE, ni, "%s", "probe station due to inactivity"); /* * Grab a reference before unlocking the table * so the node cannot be reclaimed before we * send the frame. ieee80211_send_nulldata * understands we've done this and reclaims the * ref for us as needed. */ ieee80211_ref_node(ni); IEEE80211_NODE_UNLOCK(nt); ieee80211_send_nulldata(ni); /* XXX stat? */ goto restart; } } if ((vap->iv_flags_ext & IEEE80211_FEXT_INACT) && ni->ni_inact <= 0) { IEEE80211_NOTE(vap, IEEE80211_MSG_INACT | IEEE80211_MSG_NODE, ni, "station timed out due to inactivity " "(refcnt %u)", ieee80211_node_refcnt(ni)); /* * Send a deauthenticate frame and drop the station. * This is somewhat complicated due to reference counts * and locking. At this point a station will typically * have a reference count of 1. ieee80211_node_leave * will do a "free" of the node which will drop the * reference count. But in the meantime a reference * wil be held by the deauth frame. The actual reclaim * of the node will happen either after the tx is * completed or by ieee80211_node_leave. * * Separately we must drop the node lock before sending * in case the driver takes a lock, as this can result * in a LOR between the node lock and the driver lock. */ ieee80211_ref_node(ni); IEEE80211_NODE_UNLOCK(nt); if (ni->ni_associd != 0) { IEEE80211_SEND_MGMT(ni, IEEE80211_FC0_SUBTYPE_DEAUTH, IEEE80211_REASON_AUTH_EXPIRE); } ieee80211_node_leave(ni); ieee80211_free_node(ni); vap->iv_stats.is_node_timeout++; goto restart; } } IEEE80211_NODE_UNLOCK(nt); IEEE80211_NODE_ITERATE_UNLOCK(nt); } /* * Aggressively reclaim resources. This should be used * only in a critical situation to reclaim mbuf resources. */ void ieee80211_drain(struct ieee80211com *ic) { struct ieee80211_node_table *nt = &ic->ic_sta; struct ieee80211vap *vap; struct ieee80211_node *ni; IEEE80211_NODE_LOCK(nt); TAILQ_FOREACH(ni, &nt->nt_node, ni_list) { /* * Ignore entries for which have yet to receive an * authentication frame. These are transient and * will be reclaimed when the last reference to them * goes away (when frame xmits complete). */ vap = ni->ni_vap; /* * Only process stations when in RUN state. This * insures, for example, that we don't timeout an * inactive station during CAC. Note that CSA state * is actually handled in ieee80211_node_timeout as * it applies to more than timeout processing. */ if (vap->iv_state != IEEE80211_S_RUN) continue; /* XXX can vap be NULL? */ if ((vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_STA) && (ni->ni_flags & IEEE80211_NODE_AREF) == 0) continue; /* * Free fragments. * XXX doesn't belong here, move to node_drain */ if (ni->ni_rxfrag[0] != NULL) { m_freem(ni->ni_rxfrag[0]); ni->ni_rxfrag[0] = NULL; } /* * Drain resources held by the station. */ ic->ic_node_drain(ni); } IEEE80211_NODE_UNLOCK(nt); } /* * Per-ieee80211com inactivity timer callback. */ void ieee80211_node_timeout(void *arg) { struct ieee80211com *ic = arg; /* * Defer timeout processing if a channel switch is pending. * We typically need to be mute so not doing things that * might generate frames is good to handle in one place. * Supressing the station timeout processing may extend the * lifetime of inactive stations (by not decrementing their * idle counters) but this should be ok unless the CSA is * active for an unusually long time. */ if ((ic->ic_flags & IEEE80211_F_CSAPENDING) == 0) { ieee80211_scan_timeout(ic); ieee80211_timeout_stations(ic); IEEE80211_LOCK(ic); ieee80211_erp_timeout(ic); ieee80211_ht_timeout(ic); IEEE80211_UNLOCK(ic); } callout_reset(&ic->ic_inact, IEEE80211_INACT_WAIT*hz, ieee80211_node_timeout, ic); } void ieee80211_iterate_nodes(struct ieee80211_node_table *nt, ieee80211_iter_func *f, void *arg) { struct ieee80211_node *ni; u_int gen; IEEE80211_NODE_ITERATE_LOCK(nt); gen = ++nt->nt_scangen; restart: IEEE80211_NODE_LOCK(nt); TAILQ_FOREACH(ni, &nt->nt_node, ni_list) { if (ni->ni_scangen != gen) { ni->ni_scangen = gen; (void) ieee80211_ref_node(ni); IEEE80211_NODE_UNLOCK(nt); (*f)(arg, ni); ieee80211_free_node(ni); goto restart; } } IEEE80211_NODE_UNLOCK(nt); IEEE80211_NODE_ITERATE_UNLOCK(nt); } void ieee80211_dump_node(struct ieee80211_node_table *nt, struct ieee80211_node *ni) { printf("0x%p: mac %s refcnt %d\n", ni, ether_sprintf(ni->ni_macaddr), ieee80211_node_refcnt(ni)); printf("\tscangen %u authmode %u flags 0x%x\n", ni->ni_scangen, ni->ni_authmode, ni->ni_flags); printf("\tassocid 0x%x txpower %u vlan %u\n", ni->ni_associd, ni->ni_txpower, ni->ni_vlan); printf("\ttxseq %u rxseq %u fragno %u rxfragstamp %u\n", ni->ni_txseqs[IEEE80211_NONQOS_TID], ni->ni_rxseqs[IEEE80211_NONQOS_TID] >> IEEE80211_SEQ_SEQ_SHIFT, ni->ni_rxseqs[IEEE80211_NONQOS_TID] & IEEE80211_SEQ_FRAG_MASK, ni->ni_rxfragstamp); printf("\trstamp %u rssi %d noise %d intval %u capinfo 0x%x\n", ni->ni_rstamp, node_getrssi(ni), ni->ni_noise, ni->ni_intval, ni->ni_capinfo); printf("\tbssid %s essid \"%.*s\" channel %u:0x%x\n", ether_sprintf(ni->ni_bssid), ni->ni_esslen, ni->ni_essid, ni->ni_chan->ic_freq, ni->ni_chan->ic_flags); printf("\tinact %u txrate %u\n", ni->ni_inact, ni->ni_txrate); printf("\thtcap %x htparam %x htctlchan %u ht2ndchan %u\n", ni->ni_htcap, ni->ni_htparam, ni->ni_htctlchan, ni->ni_ht2ndchan); printf("\thtopmode %x htstbc %x chw %u\n", ni->ni_htopmode, ni->ni_htstbc, ni->ni_chw); } void ieee80211_dump_nodes(struct ieee80211_node_table *nt) { ieee80211_iterate_nodes(nt, (ieee80211_iter_func *) ieee80211_dump_node, nt); } static void ieee80211_notify_erp_locked(struct ieee80211com *ic) { struct ieee80211vap *vap; IEEE80211_LOCK_ASSERT(ic); TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) if (vap->iv_opmode == IEEE80211_M_HOSTAP) ieee80211_beacon_notify(vap, IEEE80211_BEACON_ERP); } void ieee80211_notify_erp(struct ieee80211com *ic) { IEEE80211_LOCK(ic); ieee80211_notify_erp_locked(ic); IEEE80211_UNLOCK(ic); } /* * Handle a station joining an 11g network. */ static void ieee80211_node_join_11g(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; IEEE80211_LOCK_ASSERT(ic); /* * Station isn't capable of short slot time. Bump * the count of long slot time stations and disable * use of short slot time. Note that the actual switch * over to long slot time use may not occur until the * next beacon transmission (per sec. 7.3.1.4 of 11g). */ if ((ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_SLOTTIME) == 0) { ic->ic_longslotsta++; IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ASSOC, ni, "station needs long slot time, count %d", ic->ic_longslotsta); /* XXX vap's w/ conflicting needs won't work */ if (!IEEE80211_IS_CHAN_108G(ic->ic_bsschan)) { /* * Don't force slot time when switched to turbo * mode as non-ERP stations won't be present; this * need only be done when on the normal G channel. */ ieee80211_set_shortslottime(ic, 0); } } /* * If the new station is not an ERP station * then bump the counter and enable protection * if configured. */ if (!ieee80211_iserp_rateset(&ni->ni_rates)) { ic->ic_nonerpsta++; IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ASSOC, ni, "station is !ERP, %d non-ERP stations associated", ic->ic_nonerpsta); /* * If station does not support short preamble * then we must enable use of Barker preamble. */ if ((ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE) == 0) { IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ASSOC, ni, "%s", "station needs long preamble"); ic->ic_flags |= IEEE80211_F_USEBARKER; ic->ic_flags &= ~IEEE80211_F_SHPREAMBLE; } /* * If protection is configured and this is the first * indication we should use protection, enable it. */ if (ic->ic_protmode != IEEE80211_PROT_NONE && ic->ic_nonerpsta == 1 && (ic->ic_flags_ext & IEEE80211_FEXT_NONERP_PR) == 0) { IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_ASSOC, "%s: enable use of protection\n", __func__); ic->ic_flags |= IEEE80211_F_USEPROT; ieee80211_notify_erp_locked(ic); } } else ni->ni_flags |= IEEE80211_NODE_ERP; } void ieee80211_node_join(struct ieee80211_node *ni, int resp) { struct ieee80211com *ic = ni->ni_ic; struct ieee80211vap *vap = ni->ni_vap; int newassoc; if (ni->ni_associd == 0) { uint16_t aid; KASSERT(vap->iv_aid_bitmap != NULL, ("no aid bitmap")); /* * It would be good to search the bitmap * more efficiently, but this will do for now. */ for (aid = 1; aid < vap->iv_max_aid; aid++) { if (!IEEE80211_AID_ISSET(vap, aid)) break; } if (aid >= vap->iv_max_aid) { IEEE80211_SEND_MGMT(ni, resp, IEEE80211_STATUS_TOOMANY); ieee80211_node_leave(ni); return; } ni->ni_associd = aid | 0xc000; ni->ni_jointime = time_uptime; IEEE80211_LOCK(ic); IEEE80211_AID_SET(vap, ni->ni_associd); vap->iv_sta_assoc++; ic->ic_sta_assoc++; if (IEEE80211_IS_CHAN_HT(ic->ic_bsschan)) ieee80211_ht_node_join(ni); if (IEEE80211_IS_CHAN_ANYG(ic->ic_bsschan) && IEEE80211_IS_CHAN_FULL(ic->ic_bsschan)) ieee80211_node_join_11g(ni); IEEE80211_UNLOCK(ic); newassoc = 1; } else newassoc = 0; IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC | IEEE80211_MSG_DEBUG, ni, "station associated at aid %d: %s preamble, %s slot time%s%s%s%s%s%s", IEEE80211_NODE_AID(ni), ic->ic_flags & IEEE80211_F_SHPREAMBLE ? "short" : "long", ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", ic->ic_flags & IEEE80211_F_USEPROT ? ", protection" : "", ni->ni_flags & IEEE80211_NODE_QOS ? ", QoS" : "", ni->ni_flags & IEEE80211_NODE_HT ? (ni->ni_chw == 20 ? ", HT20" : ", HT40") : "", ni->ni_flags & IEEE80211_NODE_AMPDU ? " (+AMPDU)" : "", IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_FF) ? ", fast-frames" : "", IEEE80211_ATH_CAP(vap, ni, IEEE80211_NODE_TURBOP) ? ", turbo" : "" ); /* give driver a chance to setup state like ni_txrate */ if (ic->ic_newassoc != NULL) ic->ic_newassoc(ni, newassoc); IEEE80211_SEND_MGMT(ni, resp, IEEE80211_STATUS_SUCCESS); /* tell the authenticator about new station */ if (vap->iv_auth->ia_node_join != NULL) vap->iv_auth->ia_node_join(ni); ieee80211_notify_node_join(ni, resp == IEEE80211_FC0_SUBTYPE_ASSOC_RESP); } static void disable_protection(struct ieee80211com *ic) { KASSERT(ic->ic_nonerpsta == 0 && (ic->ic_flags_ext & IEEE80211_FEXT_NONERP_PR) == 0, ("%d non ERP stations, flags 0x%x", ic->ic_nonerpsta, ic->ic_flags_ext)); ic->ic_flags &= ~IEEE80211_F_USEPROT; /* XXX verify mode? */ if (ic->ic_caps & IEEE80211_C_SHPREAMBLE) { ic->ic_flags |= IEEE80211_F_SHPREAMBLE; ic->ic_flags &= ~IEEE80211_F_USEBARKER; } ieee80211_notify_erp_locked(ic); } /* * Handle a station leaving an 11g network. */ static void ieee80211_node_leave_11g(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; IEEE80211_LOCK_ASSERT(ic); KASSERT(IEEE80211_IS_CHAN_ANYG(ic->ic_bsschan), ("not in 11g, bss %u:0x%x", ic->ic_bsschan->ic_freq, ic->ic_bsschan->ic_flags)); /* * If a long slot station do the slot time bookkeeping. */ if ((ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_SLOTTIME) == 0) { KASSERT(ic->ic_longslotsta > 0, ("bogus long slot station count %d", ic->ic_longslotsta)); ic->ic_longslotsta--; IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ASSOC, ni, "long slot time station leaves, count now %d", ic->ic_longslotsta); if (ic->ic_longslotsta == 0) { /* * Re-enable use of short slot time if supported * and not operating in IBSS mode (per spec). */ if ((ic->ic_caps & IEEE80211_C_SHSLOT) && ic->ic_opmode != IEEE80211_M_IBSS) { IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_ASSOC, "%s: re-enable use of short slot time\n", __func__); ieee80211_set_shortslottime(ic, 1); } } } /* * If a non-ERP station do the protection-related bookkeeping. */ if ((ni->ni_flags & IEEE80211_NODE_ERP) == 0) { KASSERT(ic->ic_nonerpsta > 0, ("bogus non-ERP station count %d", ic->ic_nonerpsta)); ic->ic_nonerpsta--; IEEE80211_NOTE(ni->ni_vap, IEEE80211_MSG_ASSOC, ni, "non-ERP station leaves, count now %d%s", ic->ic_nonerpsta, (ic->ic_flags_ext & IEEE80211_FEXT_NONERP_PR) ? " (non-ERP sta present)" : ""); if (ic->ic_nonerpsta == 0 && (ic->ic_flags_ext & IEEE80211_FEXT_NONERP_PR) == 0) { IEEE80211_DPRINTF(ni->ni_vap, IEEE80211_MSG_ASSOC, "%s: disable use of protection\n", __func__); disable_protection(ic); } } } /* * Time out presence of an overlapping bss with non-ERP * stations. When operating in hostap mode we listen for * beacons from other stations and if we identify a non-ERP * station is present we enable protection. To identify * when all non-ERP stations are gone we time out this * condition. */ static void ieee80211_erp_timeout(struct ieee80211com *ic) { IEEE80211_LOCK_ASSERT(ic); if ((ic->ic_flags_ext & IEEE80211_FEXT_NONERP_PR) && time_after(ticks, ic->ic_lastnonerp + IEEE80211_NONERP_PRESENT_AGE)) { #if 0 IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC, ni, "%s", "age out non-ERP sta present on channel"); #endif ic->ic_flags_ext &= ~IEEE80211_FEXT_NONERP_PR; if (ic->ic_nonerpsta == 0) disable_protection(ic); } } /* * Handle bookkeeping for station deauthentication/disassociation * when operating as an ap. */ void ieee80211_node_leave(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_node_table *nt = ni->ni_table; IEEE80211_NOTE(vap, IEEE80211_MSG_ASSOC | IEEE80211_MSG_DEBUG, ni, "station with aid %d leaves", IEEE80211_NODE_AID(ni)); KASSERT(vap->iv_opmode != IEEE80211_M_STA, ("unexpected operating mode %u", vap->iv_opmode)); /* * If node wasn't previously associated all * we need to do is reclaim the reference. */ /* XXX ibss mode bypasses 11g and notification */ if (ni->ni_associd == 0) goto done; /* * Tell the authenticator the station is leaving. * Note that we must do this before yanking the * association id as the authenticator uses the * associd to locate it's state block. */ if (vap->iv_auth->ia_node_leave != NULL) vap->iv_auth->ia_node_leave(ni); IEEE80211_LOCK(ic); IEEE80211_AID_CLR(vap, ni->ni_associd); ni->ni_associd = 0; vap->iv_sta_assoc--; ic->ic_sta_assoc--; if (IEEE80211_IS_CHAN_HT(ic->ic_bsschan)) ieee80211_ht_node_leave(ni); if (IEEE80211_IS_CHAN_ANYG(ic->ic_bsschan) && IEEE80211_IS_CHAN_FULL(ic->ic_bsschan)) ieee80211_node_leave_11g(ni); IEEE80211_UNLOCK(ic); /* * Cleanup station state. In particular clear various * state that might otherwise be reused if the node * is reused before the reference count goes to zero * (and memory is reclaimed). */ ieee80211_sta_leave(ni); done: /* * Remove the node from any table it's recorded in and * drop the caller's reference. Removal from the table * is important to insure the node is not reprocessed * for inactivity. */ if (nt != NULL) { IEEE80211_NODE_LOCK(nt); node_reclaim(nt, ni); IEEE80211_NODE_UNLOCK(nt); } else ieee80211_free_node(ni); } struct rssiinfo { struct ieee80211vap *vap; int rssi_samples; uint32_t rssi_total; }; static void get_hostap_rssi(void *arg, struct ieee80211_node *ni) { struct rssiinfo *info = arg; struct ieee80211vap *vap = ni->ni_vap; int8_t rssi; if (info->vap != vap) return; /* only associated stations */ if (ni->ni_associd == 0) return; rssi = vap->iv_ic->ic_node_getrssi(ni); if (rssi != 0) { info->rssi_samples++; info->rssi_total += rssi; } } static void get_adhoc_rssi(void *arg, struct ieee80211_node *ni) { struct rssiinfo *info = arg; struct ieee80211vap *vap = ni->ni_vap; int8_t rssi; if (info->vap != vap) return; /* only neighbors */ /* XXX check bssid */ if ((ni->ni_capinfo & IEEE80211_CAPINFO_IBSS) == 0) return; rssi = vap->iv_ic->ic_node_getrssi(ni); if (rssi != 0) { info->rssi_samples++; info->rssi_total += rssi; } } int8_t ieee80211_getrssi(struct ieee80211vap *vap) { #define NZ(x) ((x) == 0 ? 1 : (x)) struct ieee80211com *ic = vap->iv_ic; struct rssiinfo info; info.rssi_total = 0; info.rssi_samples = 0; info.vap = vap; switch (vap->iv_opmode) { case IEEE80211_M_IBSS: /* average of all ibss neighbors */ case IEEE80211_M_AHDEMO: /* average of all neighbors */ ieee80211_iterate_nodes(&ic->ic_sta, get_adhoc_rssi, &info); break; case IEEE80211_M_HOSTAP: /* average of all associated stations */ ieee80211_iterate_nodes(&ic->ic_sta, get_hostap_rssi, &info); break; case IEEE80211_M_MONITOR: /* XXX */ case IEEE80211_M_STA: /* use stats from associated ap */ default: if (vap->iv_bss != NULL) info.rssi_total = ic->ic_node_getrssi(vap->iv_bss); info.rssi_samples = 1; break; } return info.rssi_total / NZ(info.rssi_samples); #undef NZ } void ieee80211_getsignal(struct ieee80211vap *vap, int8_t *rssi, int8_t *noise) { if (vap->iv_bss == NULL) /* NB: shouldn't happen */ return; vap->iv_ic->ic_node_getsignal(vap->iv_bss, rssi, noise); /* for non-station mode return avg'd rssi accounting */ if (vap->iv_opmode != IEEE80211_M_STA) *rssi = ieee80211_getrssi(vap); } diff --git a/sys/net80211/ieee80211_var.h b/sys/net80211/ieee80211_var.h index 9b362f6f0714..f92b10f3a6cd 100644 --- a/sys/net80211/ieee80211_var.h +++ b/sys/net80211/ieee80211_var.h @@ -1,758 +1,759 @@ /*- * Copyright (c) 2001 Atsushi Onoe * Copyright (c) 2002-2008 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _NET80211_IEEE80211_VAR_H_ #define _NET80211_IEEE80211_VAR_H_ /* * Definitions for IEEE 802.11 drivers. */ /* NB: portability glue must go first */ #ifdef __NetBSD__ #include #elif __FreeBSD__ #include #elif __linux__ #include #else #error "No support for your operating system!" #endif #include #include #include #include #include /* for ieee80211_stats */ #include #include #include #include #define IEEE80211_TXPOWER_MAX 100 /* .5 dbM (XXX units?) */ #define IEEE80211_TXPOWER_MIN 0 /* kill radio */ #define IEEE80211_DTIM_DEFAULT 1 /* default DTIM period */ #define IEEE80211_BINTVAL_DEFAULT 100 /* default beacon interval (TU's) */ #define IEEE80211_BMISS_MAX 2 /* maximum consecutive bmiss allowed */ #define IEEE80211_HWBMISS_DEFAULT 7 /* h/w bmiss threshold (beacons) */ #define IEEE80211_BGSCAN_INTVAL_MIN 15 /* min bg scan intvl (secs) */ #define IEEE80211_BGSCAN_INTVAL_DEFAULT (5*60) /* default bg scan intvl */ #define IEEE80211_BGSCAN_IDLE_MIN 100 /* min idle time (ms) */ #define IEEE80211_BGSCAN_IDLE_DEFAULT 250 /* default idle time (ms) */ #define IEEE80211_SCAN_VALID_MIN 10 /* min scan valid time (secs) */ #define IEEE80211_SCAN_VALID_DEFAULT 60 /* default scan valid time */ #define IEEE80211_PS_SLEEP 0x1 /* STA is in power saving mode */ #define IEEE80211_PS_MAX_QUEUE 50 /* maximum saved packets */ #define IEEE80211_FIXED_RATE_NONE 0xff #define IEEE80211_TXMAX_DEFAULT 6 /* default ucast max retries */ #define IEEE80211_RTS_DEFAULT IEEE80211_RTS_MAX #define IEEE80211_FRAG_DEFAULT IEEE80211_FRAG_MAX #define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024) #define IEEE80211_TU_TO_MS(x) (((x) * 1024) / 1000) #define IEEE80211_TU_TO_TICKS(x)(((x) * 1024 * hz) / (1000 * 1000)) /* * 802.11 control state is split into a common portion that maps * 1-1 to a physical device and one or more "Virtual AP's" (VAP) * that are bound to an ieee80211com instance and share a single * underlying device. Each VAP has a corresponding OS device * entity through which traffic flows and that applications use * for issuing ioctls, etc. */ /* * Data common to one or more virtual AP's. State shared by * the underlying device and the net80211 layer is exposed here; * e.g. device-specific callbacks. */ struct ieee80211vap; typedef void (*ieee80211vap_attach)(struct ieee80211vap *); struct ieee80211_appie { uint16_t ie_len; /* size of ie_data */ uint8_t ie_data[]; /* user-specified IE's */ }; struct ieee80211com { struct ifnet *ic_ifp; /* associated device */ ieee80211_com_lock_t ic_comlock; /* state update lock */ TAILQ_HEAD(, ieee80211vap) ic_vaps; /* list of vap instances */ struct ieee80211_stats ic_stats; /* statistics */ int ic_headroom; /* driver tx headroom needs */ enum ieee80211_phytype ic_phytype; /* XXX wrong for multi-mode */ enum ieee80211_opmode ic_opmode; /* operation mode */ struct ifmedia ic_media; /* interface media config */ uint8_t ic_myaddr[IEEE80211_ADDR_LEN]; struct callout ic_inact; /* inactivity processing */ struct task ic_parent_task; /* deferred parent processing */ uint32_t ic_flags; /* state flags */ uint32_t ic_flags_ext; /* extended state flags */ uint32_t ic_flags_ven; /* vendor state flags */ uint32_t ic_caps; /* capabilities */ uint32_t ic_htcaps; /* HT capabilities */ uint32_t ic_cryptocaps; /* crypto capabilities */ uint8_t ic_modecaps[2]; /* set of mode capabilities */ uint8_t ic_promisc; /* vap's needing promisc mode */ uint8_t ic_allmulti; /* vap's needing all multicast*/ uint8_t ic_nrunning; /* vap's marked running */ uint8_t ic_curmode; /* current mode */ uint16_t ic_bintval; /* beacon interval */ uint16_t ic_lintval; /* listen interval */ uint16_t ic_holdover; /* PM hold over duration */ uint16_t ic_txpowlimit; /* global tx power limit */ struct ieee80211_rateset ic_sup_rates[IEEE80211_MODE_MAX]; /* * Channel state: * * ic_channels is the set of available channels for the device; * it is setup by the driver * ic_nchans is the number of valid entries in ic_channels * ic_chan_avail is a bit vector of these channels used to check * whether a channel is available w/o searching the channel table. * ic_chan_active is a (potentially) constrained subset of * ic_chan_avail that reflects any mode setting or user-specified * limit on the set of channels to use/scan * ic_curchan is the current channel the device is set to; it may * be different from ic_bsschan when we are off-channel scanning * or otherwise doing background work * ic_bsschan is the channel selected for operation; it may * be undefined (IEEE80211_CHAN_ANYC) * ic_prevchan is a cached ``previous channel'' used to optimize * lookups when switching back+forth between two channels * (e.g. for dynamic turbo) */ int ic_nchans; /* # entries in ic_channels */ struct ieee80211_channel ic_channels[IEEE80211_CHAN_MAX+1]; uint8_t ic_chan_avail[IEEE80211_CHAN_BYTES]; uint8_t ic_chan_active[IEEE80211_CHAN_BYTES]; uint8_t ic_chan_scan[IEEE80211_CHAN_BYTES]; struct ieee80211_channel *ic_curchan; /* current channel */ struct ieee80211_channel *ic_bsschan; /* bss channel */ struct ieee80211_channel *ic_prevchan; /* previous channel */ struct ieee80211_regdomain ic_regdomain;/* regulatory data */ struct ieee80211_appie *ic_countryie; /* calculated country ie */ struct ieee80211_channel *ic_countryie_chan; /* 802.11h/DFS state */ struct ieee80211_channel *ic_csa_newchan;/* channel for doing CSA */ int ic_csa_count; /* count for doing CSA */ struct ieee80211_dfs_state ic_dfs; /* DFS state */ struct ieee80211_scan_state *ic_scan; /* scan state */ int ic_lastdata; /* time of last data frame */ int ic_lastscan; /* time last scan completed */ /* NB: this is the union of all vap stations/neighbors */ int ic_max_keyix; /* max h/w key index */ struct ieee80211_node_table ic_sta; /* stations/neighbors */ /* XXX multi-bss: split out common/vap parts */ struct ieee80211_wme_state ic_wme; /* WME/WMM state */ /* XXX multi-bss: can per-vap be done/make sense? */ enum ieee80211_protmode ic_protmode; /* 802.11g protection mode */ uint16_t ic_nonerpsta; /* # non-ERP stations */ uint16_t ic_longslotsta; /* # long slot time stations */ uint16_t ic_sta_assoc; /* stations associated */ uint16_t ic_ht_sta_assoc;/* HT stations associated */ uint16_t ic_ht40_sta_assoc;/* HT40 stations associated */ uint8_t ic_curhtprotmode;/* HTINFO bss state */ enum ieee80211_protmode ic_htprotmode; /* HT protection mode */ int ic_lastnonerp; /* last time non-ERP sta noted*/ int ic_lastnonht; /* last time non-HT sta noted */ /* virtual ap create/delete */ struct ieee80211vap* (*ic_vap_create)(struct ieee80211com *, const char name[IFNAMSIZ], int unit, int opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t macaddr[IEEE80211_ADDR_LEN]); void (*ic_vap_delete)(struct ieee80211vap *); /* operating mode attachment */ ieee80211vap_attach ic_vattach[IEEE80211_OPMODE_MAX]; /* return hardware/radio capabilities */ void (*ic_getradiocaps)(struct ieee80211com *, int *, struct ieee80211_channel []); /* check and/or prepare regdomain state change */ int (*ic_setregdomain)(struct ieee80211com *, struct ieee80211_regdomain *, int, struct ieee80211_channel []); /* send/recv 802.11 management frame */ int (*ic_send_mgmt)(struct ieee80211_node *, int, int); /* send raw 802.11 frame */ int (*ic_raw_xmit)(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); /* update device state for 802.11 slot time change */ void (*ic_updateslot)(struct ifnet *); /* handle multicast state changes */ void (*ic_update_mcast)(struct ifnet *); /* handle promiscuous mode changes */ void (*ic_update_promisc)(struct ifnet *); /* new station association callback/notification */ void (*ic_newassoc)(struct ieee80211_node *, int); /* node state management */ - struct ieee80211_node* (*ic_node_alloc)(struct ieee80211_node_table *); + struct ieee80211_node* (*ic_node_alloc)(struct ieee80211vap *, + const uint8_t [IEEE80211_ADDR_LEN]); void (*ic_node_free)(struct ieee80211_node *); void (*ic_node_cleanup)(struct ieee80211_node *); void (*ic_node_age)(struct ieee80211_node *); void (*ic_node_drain)(struct ieee80211_node *); int8_t (*ic_node_getrssi)(const struct ieee80211_node*); void (*ic_node_getsignal)(const struct ieee80211_node*, int8_t *, int8_t *); void (*ic_node_getmimoinfo)( const struct ieee80211_node*, struct ieee80211_mimo_info *); /* scanning support */ void (*ic_scan_start)(struct ieee80211com *); void (*ic_scan_end)(struct ieee80211com *); void (*ic_set_channel)(struct ieee80211com *); void (*ic_scan_curchan)(struct ieee80211_scan_state *, unsigned long); void (*ic_scan_mindwell)(struct ieee80211_scan_state *); /* * 802.11n ADDBA support. A simple/generic implementation * of A-MPDU tx aggregation is provided; the driver may * override these methods to provide their own support. * A-MPDU rx re-ordering happens automatically if the * driver passes out-of-order frames to ieee80211_input * from an assocated HT station. */ void (*ic_recv_action)(struct ieee80211_node *, const uint8_t *frm, const uint8_t *efrm); int (*ic_send_action)(struct ieee80211_node *, int category, int action, uint16_t args[4]); /* check if A-MPDU should be enabled this station+ac */ int (*ic_ampdu_enable)(struct ieee80211_node *, struct ieee80211_tx_ampdu *); /* start/stop doing A-MPDU tx aggregation for a station */ int (*ic_addba_request)(struct ieee80211_node *, struct ieee80211_tx_ampdu *, int dialogtoken, int baparamset, int batimeout); int (*ic_addba_response)(struct ieee80211_node *, struct ieee80211_tx_ampdu *, int status, int baparamset, int batimeout); void (*ic_addba_stop)(struct ieee80211_node *, struct ieee80211_tx_ampdu *); }; struct ieee80211_aclator; struct ieee80211vap { struct ifmedia iv_media; /* interface media config */ struct ifnet *iv_ifp; /* associated device */ struct bpf_if *iv_rawbpf; /* packet filter structure */ struct sysctl_ctx_list *iv_sysctl; /* dynamic sysctl context */ struct sysctl_oid *iv_oid; /* net.wlan.X sysctl oid */ TAILQ_ENTRY(ieee80211vap) iv_next; /* list of vap instances */ struct ieee80211com *iv_ic; /* back ptr to common state */ uint32_t iv_debug; /* debug msg flags */ struct ieee80211_stats iv_stats; /* statistics */ uint8_t iv_myaddr[IEEE80211_ADDR_LEN]; uint32_t iv_flags; /* state flags */ uint32_t iv_flags_ext; /* extended state flags */ uint32_t iv_flags_ven; /* vendor state flags */ uint32_t iv_caps; /* capabilities */ uint32_t iv_htcaps; /* HT capabilities */ enum ieee80211_opmode iv_opmode; /* operation mode */ enum ieee80211_state iv_state; /* state machine state */ void (*iv_newstate_cb)(struct ieee80211vap *, enum ieee80211_state, int); struct callout iv_mgtsend; /* mgmt frame response timer */ /* inactivity timer settings */ int iv_inact_init; /* setting for new station */ int iv_inact_auth; /* auth but not assoc setting */ int iv_inact_run; /* authorized setting */ int iv_inact_probe; /* inactive probe time */ int iv_des_nssid; /* # desired ssids */ struct ieee80211_scan_ssid iv_des_ssid[1];/* desired ssid table */ uint8_t iv_des_bssid[IEEE80211_ADDR_LEN]; struct ieee80211_channel *iv_des_chan; /* desired channel */ uint16_t iv_des_mode; /* desired mode */ int iv_nicknamelen; /* XXX junk */ uint8_t iv_nickname[IEEE80211_NWID_LEN]; u_int iv_bgscanidle; /* bg scan idle threshold */ u_int iv_bgscanintvl; /* bg scan min interval */ u_int iv_scanvalid; /* scan cache valid threshold */ u_int iv_scanreq_duration; u_int iv_scanreq_mindwell; u_int iv_scanreq_maxdwell; uint16_t iv_scanreq_flags;/* held scan request params */ uint8_t iv_scanreq_nssid; struct ieee80211_scan_ssid iv_scanreq_ssid[IEEE80211_SCAN_MAX_SSID]; /* sta-mode roaming state */ enum ieee80211_roamingmode iv_roaming; /* roaming mode */ struct ieee80211_roamparam iv_roamparms[IEEE80211_MODE_MAX]; uint8_t iv_bmissthreshold; uint8_t iv_bmiss_count; /* current beacon miss count */ int iv_bmiss_max; /* max bmiss before scan */ uint16_t iv_swbmiss_count;/* beacons in last period */ uint16_t iv_swbmiss_period;/* s/w bmiss period */ struct callout iv_swbmiss; /* s/w beacon miss timer */ int iv_ampdu_rxmax; /* A-MPDU rx limit (bytes) */ int iv_ampdu_density;/* A-MPDU density */ int iv_ampdu_limit; /* A-MPDU tx limit (bytes) */ int iv_amsdu_limit; /* A-MSDU tx limit (bytes) */ u_int iv_ampdu_mintraffic[WME_NUM_AC]; uint32_t *iv_aid_bitmap; /* association id map */ uint16_t iv_max_aid; uint16_t iv_sta_assoc; /* stations associated */ uint16_t iv_ps_sta; /* stations in power save */ uint16_t iv_ps_pending; /* ps sta's w/ pending frames */ uint16_t iv_txseq; /* mcast xmit seq# space */ uint16_t iv_tim_len; /* ic_tim_bitmap size (bytes) */ uint8_t *iv_tim_bitmap; /* power-save stations w/ data*/ uint8_t iv_dtim_period; /* DTIM period */ uint8_t iv_dtim_count; /* DTIM count from last bcn */ /* set/unset aid pwrsav state */ int iv_csa_count; /* count for doing CSA */ struct ieee80211_node *iv_bss; /* information for this node */ struct ieee80211_txparam iv_txparms[IEEE80211_MODE_MAX]; uint16_t iv_rtsthreshold; uint16_t iv_fragthreshold; int iv_inact_timer; /* inactivity timer wait */ /* application-specified IE's to attach to mgt frames */ struct ieee80211_appie *iv_appie_beacon; struct ieee80211_appie *iv_appie_probereq; struct ieee80211_appie *iv_appie_proberesp; struct ieee80211_appie *iv_appie_assocreq; struct ieee80211_appie *iv_appie_assocresp; struct ieee80211_appie *iv_appie_wpa; uint8_t *iv_wpa_ie; uint8_t *iv_rsn_ie; uint16_t iv_max_keyix; /* max h/w key index */ ieee80211_keyix iv_def_txkey; /* default/group tx key index */ struct ieee80211_key iv_nw_keys[IEEE80211_WEP_NKID]; int (*iv_key_alloc)(struct ieee80211vap *, const struct ieee80211_key *, ieee80211_keyix *, ieee80211_keyix *); int (*iv_key_delete)(struct ieee80211vap *, const struct ieee80211_key *); int (*iv_key_set)(struct ieee80211vap *, const struct ieee80211_key *, const uint8_t mac[IEEE80211_ADDR_LEN]); void (*iv_key_update_begin)(struct ieee80211vap *); void (*iv_key_update_end)(struct ieee80211vap *); const struct ieee80211_authenticator *iv_auth; /* authenticator glue */ void *iv_ec; /* private auth state */ const struct ieee80211_aclator *iv_acl; /* acl glue */ void *iv_as; /* private aclator state */ /* operate-mode detach hook */ void (*iv_opdetach)(struct ieee80211vap *); /* receive processing */ int (*iv_input)(struct ieee80211_node *, struct mbuf *, int rssi, int noise, uint32_t rstamp); void (*iv_recv_mgmt)(struct ieee80211_node *, struct mbuf *, int, int, int, uint32_t); void (*iv_deliver_data)(struct ieee80211vap *, struct ieee80211_node *, struct mbuf *); #if 0 /* send processing */ int (*iv_send_mgmt)(struct ieee80211_node *, int, int); #endif /* beacon miss processing */ void (*iv_bmiss)(struct ieee80211vap *); /* reset device state after 802.11 parameter/state change */ int (*iv_reset)(struct ieee80211vap *, u_long); /* [schedule] beacon frame update */ void (*iv_update_beacon)(struct ieee80211vap *, int); /* power save handling */ void (*iv_update_ps)(struct ieee80211vap *, int); int (*iv_set_tim)(struct ieee80211_node *, int); /* state machine processing */ int (*iv_newstate)(struct ieee80211vap *, enum ieee80211_state, int); /* 802.3 output method for raw frame xmit */ int (*iv_output)(struct ifnet *, struct mbuf *, struct sockaddr *, struct rtentry *); }; MALLOC_DECLARE(M_80211_VAP); #define IEEE80211_ADDR_EQ(a1,a2) (memcmp(a1,a2,IEEE80211_ADDR_LEN) == 0) #define IEEE80211_ADDR_COPY(dst,src) memcpy(dst,src,IEEE80211_ADDR_LEN) /* ic_flags/iv_flags */ #define IEEE80211_F_TURBOP 0x00000001 /* CONF: ATH Turbo enabled*/ #define IEEE80211_F_COMP 0x00000002 /* CONF: ATH comp enabled */ #define IEEE80211_F_FF 0x00000004 /* CONF: ATH FF enabled */ #define IEEE80211_F_BURST 0x00000008 /* CONF: bursting enabled */ /* NB: this is intentionally setup to be IEEE80211_CAPINFO_PRIVACY */ #define IEEE80211_F_PRIVACY 0x00000010 /* CONF: privacy enabled */ #define IEEE80211_F_PUREG 0x00000020 /* CONF: 11g w/o 11b sta's */ #define IEEE80211_F_SCAN 0x00000080 /* STATUS: scanning */ #define IEEE80211_F_ASCAN 0x00000100 /* STATUS: active scan */ #define IEEE80211_F_SIBSS 0x00000200 /* STATUS: start IBSS */ /* NB: this is intentionally setup to be IEEE80211_CAPINFO_SHORT_SLOTTIME */ #define IEEE80211_F_SHSLOT 0x00000400 /* STATUS: use short slot time*/ #define IEEE80211_F_PMGTON 0x00000800 /* CONF: Power mgmt enable */ #define IEEE80211_F_DESBSSID 0x00001000 /* CONF: des_bssid is set */ #define IEEE80211_F_WME 0x00002000 /* CONF: enable WME use */ #define IEEE80211_F_BGSCAN 0x00004000 /* CONF: bg scan enabled (???)*/ #define IEEE80211_F_SWRETRY 0x00008000 /* CONF: sw tx retry enabled */ #define IEEE80211_F_TXPOW_FIXED 0x00010000 /* TX Power: fixed rate */ #define IEEE80211_F_IBSSON 0x00020000 /* CONF: IBSS creation enable */ #define IEEE80211_F_SHPREAMBLE 0x00040000 /* STATUS: use short preamble */ #define IEEE80211_F_DATAPAD 0x00080000 /* CONF: do alignment pad */ #define IEEE80211_F_USEPROT 0x00100000 /* STATUS: protection enabled */ #define IEEE80211_F_USEBARKER 0x00200000 /* STATUS: use barker preamble*/ #define IEEE80211_F_CSAPENDING 0x00400000 /* STATUS: chan switch pending*/ #define IEEE80211_F_WPA1 0x00800000 /* CONF: WPA enabled */ #define IEEE80211_F_WPA2 0x01000000 /* CONF: WPA2 enabled */ #define IEEE80211_F_WPA 0x01800000 /* CONF: WPA/WPA2 enabled */ #define IEEE80211_F_DROPUNENC 0x02000000 /* CONF: drop unencrypted */ #define IEEE80211_F_COUNTERM 0x04000000 /* CONF: TKIP countermeasures */ #define IEEE80211_F_HIDESSID 0x08000000 /* CONF: hide SSID in beacon */ #define IEEE80211_F_NOBRIDGE 0x10000000 /* CONF: dis. internal bridge */ #define IEEE80211_F_PCF 0x20000000 /* CONF: PCF enabled */ #define IEEE80211_F_DOTH 0x40000000 /* CONF: 11h enabled */ #define IEEE80211_F_DWDS 0x80000000 /* CONF: Dynamic WDS enabled */ /* Atheros protocol-specific flags */ #define IEEE80211_F_ATHEROS \ (IEEE80211_F_FF | IEEE80211_F_COMP | IEEE80211_F_TURBOP) /* Check if an Atheros capability was negotiated for use */ #define IEEE80211_ATH_CAP(vap, ni, bit) \ ((vap)->iv_flags & (ni)->ni_ath_flags & (bit)) /* ic_flags_ext/iv_flags_ext */ #define IEEE80211_FEXT_NONHT_PR 0x00000001 /* STATUS: non-HT sta present */ #define IEEE80211_FEXT_INACT 0x00000002 /* CONF: sta inact handling */ #define IEEE80211_FEXT_SCANWAIT 0x00000004 /* STATUS: awaiting scan */ /* 0x00000006 reserved */ #define IEEE80211_FEXT_BGSCAN 0x00000008 /* STATUS: complete bgscan */ #define IEEE80211_FEXT_WPS 0x00000010 /* CONF: WPS enabled */ #define IEEE80211_FEXT_TSN 0x00000020 /* CONF: TSN enabled */ #define IEEE80211_FEXT_SCANREQ 0x00000040 /* STATUS: scan req params */ #define IEEE80211_FEXT_RESUME 0x00000080 /* STATUS: start on resume */ #define IEEE80211_FEXT_DFS 0x00000800 /* CONF: DFS enabled */ #define IEEE80211_FEXT_NONERP_PR 0x00000200 /* STATUS: non-ERP sta present*/ #define IEEE80211_FEXT_SWBMISS 0x00000400 /* CONF: do bmiss in s/w */ #define IEEE80211_FEXT_DOTD 0x00001000 /* CONF: 11d enabled */ /* NB: immutable: should be set only when creating a vap */ #define IEEE80211_FEXT_WDSLEGACY 0x00010000 /* CONF: legacy WDS operation */ #define IEEE80211_FEXT_PROBECHAN 0x00020000 /* CONF: probe passive channel*/ #define IEEE80211_FEXT_HT 0x00080000 /* CONF: HT supported */ #define IEEE80211_FEXT_AMPDU_TX 0x00100000 /* CONF: A-MPDU tx supported */ #define IEEE80211_FEXT_AMPDU_RX 0x00200000 /* CONF: A-MPDU tx supported */ #define IEEE80211_FEXT_AMSDU_TX 0x00400000 /* CONF: A-MSDU tx supported */ #define IEEE80211_FEXT_AMSDU_RX 0x00800000 /* CONF: A-MSDU tx supported */ #define IEEE80211_FEXT_USEHT40 0x01000000 /* CONF: 20/40 use enabled */ #define IEEE80211_FEXT_PUREN 0x02000000 /* CONF: 11n w/o legacy sta's */ #define IEEE80211_FEXT_SHORTGI20 0x04000000 /* CONF: short GI in HT20 */ #define IEEE80211_FEXT_SHORTGI40 0x08000000 /* CONF: short GI in HT40 */ #define IEEE80211_FEXT_HTCOMPAT 0x10000000 /* CONF: HT vendor OUI's */ /* ic_caps/iv_caps: device driver capabilities */ /* 0x2f available */ #define IEEE80211_C_STA 0x00000001 /* CAPABILITY: STA available */ #define IEEE80211_C_FF 0x00000040 /* CAPABILITY: ATH FF avail */ #define IEEE80211_C_TURBOP 0x00000080 /* CAPABILITY: ATH Turbo avail*/ #define IEEE80211_C_IBSS 0x00000100 /* CAPABILITY: IBSS available */ #define IEEE80211_C_PMGT 0x00000200 /* CAPABILITY: Power mgmt */ #define IEEE80211_C_HOSTAP 0x00000400 /* CAPABILITY: HOSTAP avail */ #define IEEE80211_C_AHDEMO 0x00000800 /* CAPABILITY: Old Adhoc Demo */ #define IEEE80211_C_SWRETRY 0x00001000 /* CAPABILITY: sw tx retry */ #define IEEE80211_C_TXPMGT 0x00002000 /* CAPABILITY: tx power mgmt */ #define IEEE80211_C_SHSLOT 0x00004000 /* CAPABILITY: short slottime */ #define IEEE80211_C_SHPREAMBLE 0x00008000 /* CAPABILITY: short preamble */ #define IEEE80211_C_MONITOR 0x00010000 /* CAPABILITY: monitor mode */ #define IEEE80211_C_DFS 0x00020000 /* CAPABILITY: DFS/radar avail*/ /* 0x7c0000 available */ #define IEEE80211_C_WPA1 0x00800000 /* CAPABILITY: WPA1 avail */ #define IEEE80211_C_WPA2 0x01000000 /* CAPABILITY: WPA2 avail */ #define IEEE80211_C_WPA 0x01800000 /* CAPABILITY: WPA1+WPA2 avail*/ #define IEEE80211_C_BURST 0x02000000 /* CAPABILITY: frame bursting */ #define IEEE80211_C_WME 0x04000000 /* CAPABILITY: WME avail */ #define IEEE80211_C_WDS 0x08000000 /* CAPABILITY: 4-addr support */ /* 0x10000000 reserved */ #define IEEE80211_C_BGSCAN 0x20000000 /* CAPABILITY: bg scanning */ #define IEEE80211_C_TXFRAG 0x40000000 /* CAPABILITY: tx fragments */ /* XXX protection/barker? */ #define IEEE80211_C_OPMODE \ (IEEE80211_C_STA | IEEE80211_C_IBSS | IEEE80211_C_HOSTAP | \ IEEE80211_C_AHDEMO | IEEE80211_C_MONITOR | IEEE80211_C_WDS) /* * ic_htcaps/iv_htcaps: HT-specific device/driver capabilities * * NB: the low 16-bits are the 802.11 definitions, the upper * 16-bits are used to define s/w/driver capabilities. */ #define IEEE80211_HTC_AMPDU 0x00010000 /* CAPABILITY: A-MPDU tx */ #define IEEE80211_HTC_AMSDU 0x00020000 /* CAPABILITY: A-MSDU tx */ /* NB: HT40 is implied by IEEE80211_HTCAP_CHWIDTH40 */ #define IEEE80211_HTC_HT 0x00040000 /* CAPABILITY: HT operation */ void ieee80211_ifattach(struct ieee80211com *); void ieee80211_ifdetach(struct ieee80211com *); int ieee80211_vap_setup(struct ieee80211com *, struct ieee80211vap *, const char name[IFNAMSIZ], int unit, int opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t macaddr[IEEE80211_ADDR_LEN]); int ieee80211_vap_attach(struct ieee80211vap *, ifm_change_cb_t, ifm_stat_cb_t); void ieee80211_vap_detach(struct ieee80211vap *); const struct ieee80211_rateset *ieee80211_get_suprates(struct ieee80211com *ic, const struct ieee80211_channel *); void ieee80211_announce(struct ieee80211com *); void ieee80211_announce_channels(struct ieee80211com *); void ieee80211_drain(struct ieee80211com *); void ieee80211_media_init(struct ieee80211com *); struct ieee80211com *ieee80211_find_vap(const uint8_t mac[IEEE80211_ADDR_LEN]); int ieee80211_media_change(struct ifnet *); void ieee80211_media_status(struct ifnet *, struct ifmediareq *); int ieee80211_ioctl(struct ifnet *, u_long, caddr_t); int ieee80211_rate2media(struct ieee80211com *, int, enum ieee80211_phymode); int ieee80211_media2rate(int); int ieee80211_mhz2ieee(u_int, u_int); int ieee80211_chan2ieee(struct ieee80211com *, const struct ieee80211_channel *); u_int ieee80211_ieee2mhz(u_int, u_int); struct ieee80211_channel *ieee80211_find_channel(struct ieee80211com *, int freq, int flags); struct ieee80211_channel *ieee80211_find_channel_byieee(struct ieee80211com *, int ieee, int flags); int ieee80211_setmode(struct ieee80211com *, enum ieee80211_phymode); enum ieee80211_phymode ieee80211_chan2mode(const struct ieee80211_channel *); /* * Key update synchronization methods. XXX should not be visible. */ static __inline void ieee80211_key_update_begin(struct ieee80211vap *vap) { vap->iv_key_update_begin(vap); } static __inline void ieee80211_key_update_end(struct ieee80211vap *vap) { vap->iv_key_update_end(vap); } /* * XXX these need to be here for IEEE80211_F_DATAPAD */ /* * Return the space occupied by the 802.11 header and any * padding required by the driver. This works for a * management or data frame. */ static __inline int ieee80211_hdrspace(struct ieee80211com *ic, const void *data) { int size = ieee80211_hdrsize(data); if (ic->ic_flags & IEEE80211_F_DATAPAD) size = roundup(size, sizeof(uint32_t)); return size; } /* * Like ieee80211_hdrspace, but handles any type of frame. */ static __inline int ieee80211_anyhdrspace(struct ieee80211com *ic, const void *data) { int size = ieee80211_anyhdrsize(data); if (ic->ic_flags & IEEE80211_F_DATAPAD) size = roundup(size, sizeof(uint32_t)); return size; } /* * Notify a vap that beacon state has been updated. */ static __inline void ieee80211_beacon_notify(struct ieee80211vap *vap, int what) { if (vap->iv_state == IEEE80211_S_RUN) vap->iv_update_beacon(vap, what); } /* * Calculate HT channel promotion flags for a channel. * XXX belongs in ieee80211_ht.h but needs IEEE80211_FEXT_* */ static __inline int ieee80211_htchanflags(const struct ieee80211_channel *c) { return IEEE80211_IS_CHAN_HT40(c) ? IEEE80211_FEXT_HT | IEEE80211_FEXT_USEHT40 : IEEE80211_IS_CHAN_HT(c) ? IEEE80211_FEXT_HT : 0; } /* * Debugging facilities compiled in when IEEE80211_DEBUG is defined. * * The intent is that any problem in the net80211 layer can be * diagnosed by inspecting the statistics (dumped by the wlanstats * program) and/or the msgs generated by net80211. Messages are * broken into functional classes and can be controlled with the * wlandebug program. Certain of these msg groups are for facilities * that are no longer part of net80211 (e.g. IEEE80211_MSG_DOT1X). */ #define IEEE80211_MSG_11N 0x80000000 /* 11n mode debug */ #define IEEE80211_MSG_DEBUG 0x40000000 /* IFF_DEBUG equivalent */ #define IEEE80211_MSG_DUMPPKTS 0x20000000 /* IFF_LINK2 equivalant */ #define IEEE80211_MSG_CRYPTO 0x10000000 /* crypto work */ #define IEEE80211_MSG_INPUT 0x08000000 /* input handling */ #define IEEE80211_MSG_XRATE 0x04000000 /* rate set handling */ #define IEEE80211_MSG_ELEMID 0x02000000 /* element id parsing */ #define IEEE80211_MSG_NODE 0x01000000 /* node handling */ #define IEEE80211_MSG_ASSOC 0x00800000 /* association handling */ #define IEEE80211_MSG_AUTH 0x00400000 /* authentication handling */ #define IEEE80211_MSG_SCAN 0x00200000 /* scanning */ #define IEEE80211_MSG_OUTPUT 0x00100000 /* output handling */ #define IEEE80211_MSG_STATE 0x00080000 /* state machine */ #define IEEE80211_MSG_POWER 0x00040000 /* power save handling */ #define IEEE80211_MSG_DOT1X 0x00020000 /* 802.1x authenticator */ #define IEEE80211_MSG_DOT1XSM 0x00010000 /* 802.1x state machine */ #define IEEE80211_MSG_RADIUS 0x00008000 /* 802.1x radius client */ #define IEEE80211_MSG_RADDUMP 0x00004000 /* dump 802.1x radius packets */ #define IEEE80211_MSG_RADKEYS 0x00002000 /* dump 802.1x keys */ #define IEEE80211_MSG_WPA 0x00001000 /* WPA/RSN protocol */ #define IEEE80211_MSG_ACL 0x00000800 /* ACL handling */ #define IEEE80211_MSG_WME 0x00000400 /* WME protocol */ #define IEEE80211_MSG_SUPERG 0x00000200 /* Atheros SuperG protocol */ #define IEEE80211_MSG_DOTH 0x00000100 /* 802.11h support */ #define IEEE80211_MSG_INACT 0x00000080 /* inactivity handling */ #define IEEE80211_MSG_ROAM 0x00000040 /* sta-mode roaming */ #define IEEE80211_MSG_RATECTL 0x00000020 /* tx rate control */ #define IEEE80211_MSG_ACTION 0x00000010 /* action frame handling */ #define IEEE80211_MSG_WDS 0x00000008 /* WDS handling */ #define IEEE80211_MSG_IOCTL 0x00000004 /* ioctl handling */ #define IEEE80211_MSG_ANY 0xffffffff /* anything */ #ifdef IEEE80211_DEBUG #define ieee80211_msg(_vap, _m) ((_vap)->iv_debug & (_m)) #define IEEE80211_DPRINTF(_vap, _m, _fmt, ...) do { \ if (ieee80211_msg(_vap, _m)) \ ieee80211_note(_vap, _fmt, __VA_ARGS__); \ } while (0) #define IEEE80211_NOTE(_vap, _m, _ni, _fmt, ...) do { \ if (ieee80211_msg(_vap, _m)) \ ieee80211_note_mac(_vap, (_ni)->ni_macaddr, _fmt, __VA_ARGS__);\ } while (0) #define IEEE80211_NOTE_MAC(_vap, _m, _mac, _fmt, ...) do { \ if (ieee80211_msg(_vap, _m)) \ ieee80211_note_mac(_vap, _mac, _fmt, __VA_ARGS__); \ } while (0) #define IEEE80211_NOTE_FRAME(_vap, _m, _wh, _fmt, ...) do { \ if (ieee80211_msg(_vap, _m)) \ ieee80211_note_frame(_vap, _wh, _fmt, __VA_ARGS__); \ } while (0) void ieee80211_note(struct ieee80211vap *, const char *, ...); void ieee80211_note_mac(struct ieee80211vap *, const uint8_t mac[IEEE80211_ADDR_LEN], const char *, ...); void ieee80211_note_frame(struct ieee80211vap *, const struct ieee80211_frame *, const char *, ...); #define ieee80211_msg_debug(_vap) \ ((_vap)->iv_debug & IEEE80211_MSG_DEBUG) #define ieee80211_msg_dumppkts(_vap) \ ((_vap)->iv_debug & IEEE80211_MSG_DUMPPKTS) #define ieee80211_msg_input(_vap) \ ((_vap)->iv_debug & IEEE80211_MSG_INPUT) #define ieee80211_msg_radius(_vap) \ ((_vap)->iv_debug & IEEE80211_MSG_RADIUS) #define ieee80211_msg_dumpradius(_vap) \ ((_vap)->iv_debug & IEEE80211_MSG_RADDUMP) #define ieee80211_msg_dumpradkeys(_vap) \ ((_vap)->iv_debug & IEEE80211_MSG_RADKEYS) #define ieee80211_msg_scan(_vap) \ ((_vap)->iv_debug & IEEE80211_MSG_SCAN) #define ieee80211_msg_assoc(_vap) \ ((_vap)->iv_debug & IEEE80211_MSG_ASSOC) /* * Emit a debug message about discarding a frame or information * element. One format is for extracting the mac address from * the frame header; the other is for when a header is not * available or otherwise appropriate. */ #define IEEE80211_DISCARD(_vap, _m, _wh, _type, _fmt, ...) do { \ if ((_vap)->iv_debug & (_m)) \ ieee80211_discard_frame(_vap, _wh, _type, _fmt, __VA_ARGS__);\ } while (0) #define IEEE80211_DISCARD_IE(_vap, _m, _wh, _type, _fmt, ...) do { \ if ((_vap)->iv_debug & (_m)) \ ieee80211_discard_ie(_vap, _wh, _type, _fmt, __VA_ARGS__);\ } while (0) #define IEEE80211_DISCARD_MAC(_vap, _m, _mac, _type, _fmt, ...) do { \ if ((_vap)->iv_debug & (_m)) \ ieee80211_discard_mac(_vap, _mac, _type, _fmt, __VA_ARGS__);\ } while (0) void ieee80211_discard_frame(struct ieee80211vap *, const struct ieee80211_frame *, const char *type, const char *fmt, ...); void ieee80211_discard_ie(struct ieee80211vap *, const struct ieee80211_frame *, const char *type, const char *fmt, ...); void ieee80211_discard_mac(struct ieee80211vap *, const uint8_t mac[IEEE80211_ADDR_LEN], const char *type, const char *fmt, ...); #else #define IEEE80211_DPRINTF(_vap, _m, _fmt, ...) #define IEEE80211_NOTE(_vap, _m, _ni, _fmt, ...) #define IEEE80211_NOTE_FRAME(_vap, _m, _wh, _fmt, ...) #define IEEE80211_NOTE_MAC(_vap, _m, _mac, _fmt, ...) #define ieee80211_msg_dumppkts(_vap) 0 #define ieee80211_msg(_vap, _m) 0 #define IEEE80211_DISCARD(_vap, _m, _wh, _type, _fmt, ...) #define IEEE80211_DISCARD_IE(_vap, _m, _wh, _type, _fmt, ...) #define IEEE80211_DISCARD_MAC(_vap, _m, _mac, _type, _fmt, ...) #endif #endif /* _NET80211_IEEE80211_VAR_H_ */