Index: head/sys/dev/ath/if_ath_keycache.c =================================================================== --- head/sys/dev/ath/if_ath_keycache.c (revision 309685) +++ head/sys/dev/ath/if_ath_keycache.c (revision 309686) @@ -1,535 +1,536 @@ /*- * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Driver for the Atheros Wireless LAN controller. * * This software is derived from work of Atsushi Onoe; his contribution * is greatly appreciated. */ #include "opt_inet.h" #include "opt_ath.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ATH_DEBUG static void ath_keyprint(struct ath_softc *sc, const char *tag, u_int ix, const HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN]) { static const char *ciphers[] = { "WEP", "AES-OCB", "AES-CCM", "CKIP", "TKIP", "CLR", }; int i, n; printf("%s: [%02u] %-7s ", tag, ix, ciphers[hk->kv_type]); for (i = 0, n = hk->kv_len; i < n; i++) printf("%02x", hk->kv_val[i]); printf(" mac %s", ether_sprintf(mac)); if (hk->kv_type == HAL_CIPHER_TKIP) { printf(" %s ", sc->sc_splitmic ? "mic" : "rxmic"); for (i = 0; i < sizeof(hk->kv_mic); i++) printf("%02x", hk->kv_mic[i]); if (!sc->sc_splitmic) { printf(" txmic "); for (i = 0; i < sizeof(hk->kv_txmic); i++) printf("%02x", hk->kv_txmic[i]); } } printf("\n"); } #endif /* * Set a TKIP key into the hardware. This handles the * potential distribution of key state to multiple key * cache slots for TKIP. */ static int ath_keyset_tkip(struct ath_softc *sc, const struct ieee80211_key *k, HAL_KEYVAL *hk, const u_int8_t mac[IEEE80211_ADDR_LEN]) { #define IEEE80211_KEY_XR (IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV) static const u_int8_t zerobssid[IEEE80211_ADDR_LEN]; struct ath_hal *ah = sc->sc_ah; KASSERT(k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP, ("got a non-TKIP key, cipher %u", k->wk_cipher->ic_cipher)); if ((k->wk_flags & IEEE80211_KEY_XR) == IEEE80211_KEY_XR) { if (sc->sc_splitmic) { /* * TX key goes at first index, RX key at the rx index. * The hal handles the MIC keys at index+64. */ memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_mic)); KEYPRINTF(sc, k->wk_keyix, hk, zerobssid); if (!ath_hal_keyset(ah, k->wk_keyix, hk, zerobssid)) return 0; memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic)); KEYPRINTF(sc, k->wk_keyix+32, hk, mac); /* XXX delete tx key on failure? */ return ath_hal_keyset(ah, k->wk_keyix+32, hk, mac); } else { /* * Room for both TX+RX MIC keys in one key cache * slot, just set key at the first index; the hal * will handle the rest. */ memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic)); memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic)); KEYPRINTF(sc, k->wk_keyix, hk, mac); return ath_hal_keyset(ah, k->wk_keyix, hk, mac); } } else if (k->wk_flags & IEEE80211_KEY_XMIT) { if (sc->sc_splitmic) { /* * NB: must pass MIC key in expected location when * the keycache only holds one MIC key per entry. */ memcpy(hk->kv_mic, k->wk_txmic, sizeof(hk->kv_txmic)); } else memcpy(hk->kv_txmic, k->wk_txmic, sizeof(hk->kv_txmic)); KEYPRINTF(sc, k->wk_keyix, hk, mac); return ath_hal_keyset(ah, k->wk_keyix, hk, mac); } else if (k->wk_flags & IEEE80211_KEY_RECV) { memcpy(hk->kv_mic, k->wk_rxmic, sizeof(hk->kv_mic)); KEYPRINTF(sc, k->wk_keyix, hk, mac); return ath_hal_keyset(ah, k->wk_keyix, hk, mac); } return 0; #undef IEEE80211_KEY_XR } /* * Set a net80211 key into the hardware. This handles the * potential distribution of key state to multiple key * cache slots for TKIP with hardware MIC support. */ int ath_keyset(struct ath_softc *sc, struct ieee80211vap *vap, const struct ieee80211_key *k, struct ieee80211_node *bss) { static const u_int8_t ciphermap[] = { HAL_CIPHER_WEP, /* IEEE80211_CIPHER_WEP */ HAL_CIPHER_TKIP, /* IEEE80211_CIPHER_TKIP */ HAL_CIPHER_AES_OCB, /* IEEE80211_CIPHER_AES_OCB */ HAL_CIPHER_AES_CCM, /* IEEE80211_CIPHER_AES_CCM */ (u_int8_t) -1, /* 4 is not allocated */ HAL_CIPHER_CKIP, /* IEEE80211_CIPHER_CKIP */ HAL_CIPHER_CLR, /* IEEE80211_CIPHER_NONE */ }; struct ath_hal *ah = sc->sc_ah; const struct ieee80211_cipher *cip = k->wk_cipher; u_int8_t gmac[IEEE80211_ADDR_LEN]; const u_int8_t *mac; HAL_KEYVAL hk; int ret; memset(&hk, 0, sizeof(hk)); /* * Software crypto uses a "clear key" so non-crypto * state kept in the key cache are maintained and * so that rx frames have an entry to match. */ if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0) { KASSERT(cip->ic_cipher < nitems(ciphermap), ("invalid cipher type %u", cip->ic_cipher)); hk.kv_type = ciphermap[cip->ic_cipher]; hk.kv_len = k->wk_keylen; memcpy(hk.kv_val, k->wk_key, k->wk_keylen); } else hk.kv_type = HAL_CIPHER_CLR; /* * If we're installing a clear cipher key and * the hardware doesn't support that, just succeed. * Leave it up to the net80211 layer to figure it out. */ if (hk.kv_type == HAL_CIPHER_CLR && sc->sc_hasclrkey == 0) { return (1); } /* * XXX TODO: check this: * * Group keys on hardware that supports multicast frame * key search should only be done in adhoc/hostap mode, * not STA mode. * * XXX TODO: what about mesh, tdma? */ #if 0 if ((vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS) && #else if ( #endif (k->wk_flags & IEEE80211_KEY_GROUP) && sc->sc_mcastkey) { /* * Group keys on hardware that supports multicast frame * key search use a MAC that is the sender's address with * the multicast bit set instead of the app-specified address. */ IEEE80211_ADDR_COPY(gmac, bss->ni_macaddr); gmac[0] |= 0x01; mac = gmac; } else mac = k->wk_macaddr; ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); if (hk.kv_type == HAL_CIPHER_TKIP && (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) { ret = ath_keyset_tkip(sc, k, &hk, mac); } else { KEYPRINTF(sc, k->wk_keyix, &hk, mac); ret = ath_hal_keyset(ah, k->wk_keyix, &hk, mac); } ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return (ret); } /* * Allocate tx/rx key slots for TKIP. We allocate two slots for * each key, one for decrypt/encrypt and the other for the MIC. */ static u_int16_t key_alloc_2pair(struct ath_softc *sc, ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix) { u_int i, keyix; KASSERT(sc->sc_splitmic, ("key cache !split")); /* XXX could optimize */ for (i = 0; i < nitems(sc->sc_keymap)/4; i++) { u_int8_t b = sc->sc_keymap[i]; if (b != 0xff) { /* * One or more slots in this byte are free. */ keyix = i*NBBY; while (b & 1) { again: keyix++; b >>= 1; } /* XXX IEEE80211_KEY_XMIT | IEEE80211_KEY_RECV */ if (isset(sc->sc_keymap, keyix+32) || isset(sc->sc_keymap, keyix+64) || isset(sc->sc_keymap, keyix+32+64)) { /* full pair unavailable */ /* XXX statistic */ if (keyix == (i+1)*NBBY) { /* no slots were appropriate, advance */ continue; } goto again; } setbit(sc->sc_keymap, keyix); setbit(sc->sc_keymap, keyix+64); setbit(sc->sc_keymap, keyix+32); setbit(sc->sc_keymap, keyix+32+64); DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key pair %u,%u %u,%u\n", __func__, keyix, keyix+64, keyix+32, keyix+32+64); *txkeyix = keyix; *rxkeyix = keyix+32; return 1; } } DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__); return 0; } /* * Allocate tx/rx key slots for TKIP. We allocate two slots for * each key, one for decrypt/encrypt and the other for the MIC. */ static u_int16_t key_alloc_pair(struct ath_softc *sc, ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix) { u_int i, keyix; KASSERT(!sc->sc_splitmic, ("key cache split")); /* XXX could optimize */ for (i = 0; i < nitems(sc->sc_keymap)/4; i++) { u_int8_t b = sc->sc_keymap[i]; if (b != 0xff) { /* * One or more slots in this byte are free. */ keyix = i*NBBY; while (b & 1) { again: keyix++; b >>= 1; } if (isset(sc->sc_keymap, keyix+64)) { /* full pair unavailable */ /* XXX statistic */ if (keyix == (i+1)*NBBY) { /* no slots were appropriate, advance */ continue; } goto again; } setbit(sc->sc_keymap, keyix); setbit(sc->sc_keymap, keyix+64); DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key pair %u,%u\n", __func__, keyix, keyix+64); *txkeyix = *rxkeyix = keyix; return 1; } } DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of pair space\n", __func__); return 0; } /* * Allocate a single key cache slot. */ static int key_alloc_single(struct ath_softc *sc, ieee80211_keyix *txkeyix, ieee80211_keyix *rxkeyix) { u_int i, keyix; if (sc->sc_hasclrkey == 0) { /* * Map to slot 0 for the AR5210. */ *txkeyix = *rxkeyix = 0; return (1); } /* XXX try i,i+32,i+64,i+32+64 to minimize key pair conflicts */ for (i = 0; i < nitems(sc->sc_keymap); i++) { u_int8_t b = sc->sc_keymap[i]; if (b != 0xff) { /* * One or more slots are free. */ keyix = i*NBBY; while (b & 1) keyix++, b >>= 1; setbit(sc->sc_keymap, keyix); DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: key %u\n", __func__, keyix); *txkeyix = *rxkeyix = keyix; return 1; } } DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: out of space\n", __func__); return 0; } /* * Allocate one or more key cache slots for a uniacst key. The * key itself is needed only to identify the cipher. For hardware * TKIP with split cipher+MIC keys we allocate two key cache slot * pairs so that we can setup separate TX and RX MIC keys. Note * that the MIC key for a TKIP key at slot i is assumed by the * hardware to be at slot i+64. This limits TKIP keys to the first * 64 entries. */ int ath_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k, ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix) { struct ath_softc *sc = vap->iv_ic->ic_softc; /* * Group key allocation must be handled specially for * parts that do not support multicast key cache search * functionality. For those parts the key id must match * the h/w key index so lookups find the right key. On * parts w/ the key search facility we install the sender's * mac address (with the high bit set) and let the hardware * find the key w/o using the key id. This is preferred as * it permits us to support multiple users for adhoc and/or * multi-station operation. */ if (k->wk_keyix != IEEE80211_KEYIX_NONE) { /* * Only global keys should have key index assigned. */ if (!(&vap->iv_nw_keys[0] <= k && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) { /* should not happen */ DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: bogus group key\n", __func__); return 0; } if (vap->iv_opmode != IEEE80211_M_HOSTAP || !(k->wk_flags & IEEE80211_KEY_GROUP) || !sc->sc_mcastkey) { /* * XXX we pre-allocate the global keys so * have no way to check if they've already * been allocated. */ - *keyix = *rxkeyix = k - vap->iv_nw_keys; + *keyix = *rxkeyix = + ieee80211_crypto_get_key_wepidx(vap, k); return 1; } /* * Group key and device supports multicast key search. */ k->wk_keyix = IEEE80211_KEYIX_NONE; } /* * We allocate two pair for TKIP when using the h/w to do * the MIC. For everything else, including software crypto, * we allocate a single entry. Note that s/w crypto requires * a pass-through slot on the 5211 and 5212. The 5210 does * not support pass-through cache entries and we map all * those requests to slot 0. */ if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { return key_alloc_single(sc, keyix, rxkeyix); } else if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP && (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) { if (sc->sc_splitmic) return key_alloc_2pair(sc, keyix, rxkeyix); else return key_alloc_pair(sc, keyix, rxkeyix); } else { return key_alloc_single(sc, keyix, rxkeyix); } } /* * Delete an entry in the key cache allocated by ath_key_alloc. */ int ath_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) { struct ath_softc *sc = vap->iv_ic->ic_softc; struct ath_hal *ah = sc->sc_ah; const struct ieee80211_cipher *cip = k->wk_cipher; u_int keyix = k->wk_keyix; DPRINTF(sc, ATH_DEBUG_KEYCACHE, "%s: delete key %u\n", __func__, keyix); ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ath_hal_keyreset(ah, keyix); /* * Handle split tx/rx keying required for TKIP with h/w MIC. */ if (cip->ic_cipher == IEEE80211_CIPHER_TKIP && (k->wk_flags & IEEE80211_KEY_SWMIC) == 0 && sc->sc_splitmic) ath_hal_keyreset(ah, keyix+32); /* RX key */ if (keyix >= IEEE80211_WEP_NKID) { /* * Don't touch keymap entries for global keys so * they are never considered for dynamic allocation. */ clrbit(sc->sc_keymap, keyix); if (cip->ic_cipher == IEEE80211_CIPHER_TKIP && (k->wk_flags & IEEE80211_KEY_SWMIC) == 0) { clrbit(sc->sc_keymap, keyix+64); /* TX key MIC */ if (sc->sc_splitmic) { /* +32 for RX key, +32+64 for RX key MIC */ clrbit(sc->sc_keymap, keyix+32); clrbit(sc->sc_keymap, keyix+32+64); } } } ath_power_restore_power_state(sc); ATH_UNLOCK(sc); return 1; } /* * Set the key cache contents for the specified key. Key cache * slot(s) must already have been allocated by ath_key_alloc. */ int ath_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) { struct ath_softc *sc = vap->iv_ic->ic_softc; return ath_keyset(sc, vap, k, vap->iv_bss); } Index: head/sys/dev/mwl/if_mwl.c =================================================================== --- head/sys/dev/mwl/if_mwl.c (revision 309685) +++ head/sys/dev/mwl/if_mwl.c (revision 309686) @@ -1,4834 +1,4834 @@ /*- * Copyright (c) 2007-2009 Sam Leffler, Errno Consulting * Copyright (c) 2007-2008 Marvell Semiconductor, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Driver for the Marvell 88W8363 Wireless LAN controller. */ #include "opt_inet.h" #include "opt_mwl.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #endif /* INET */ #include #include /* idiomatic shorthands: MS = mask+shift, SM = shift+mask */ #define MS(v,x) (((v) & x) >> x##_S) #define SM(v,x) (((v) << x##_S) & x) static struct ieee80211vap *mwl_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void mwl_vap_delete(struct ieee80211vap *); static int mwl_setupdma(struct mwl_softc *); static int mwl_hal_reset(struct mwl_softc *sc); static int mwl_init(struct mwl_softc *); static void mwl_parent(struct ieee80211com *); static int mwl_reset(struct ieee80211vap *, u_long); static void mwl_stop(struct mwl_softc *); static void mwl_start(struct mwl_softc *); static int mwl_transmit(struct ieee80211com *, struct mbuf *); static int mwl_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static int mwl_media_change(struct ifnet *); static void mwl_watchdog(void *); static int mwl_ioctl(struct ieee80211com *, u_long, void *); static void mwl_radar_proc(void *, int); static void mwl_chanswitch_proc(void *, int); static void mwl_bawatchdog_proc(void *, int); static int mwl_key_alloc(struct ieee80211vap *, struct ieee80211_key *, ieee80211_keyix *, ieee80211_keyix *); static int mwl_key_delete(struct ieee80211vap *, const struct ieee80211_key *); static int mwl_key_set(struct ieee80211vap *, const struct ieee80211_key *); static int _mwl_key_set(struct ieee80211vap *, const struct ieee80211_key *, const uint8_t mac[IEEE80211_ADDR_LEN]); static int mwl_mode_init(struct mwl_softc *); static void mwl_update_mcast(struct ieee80211com *); static void mwl_update_promisc(struct ieee80211com *); static void mwl_updateslot(struct ieee80211com *); static int mwl_beacon_setup(struct ieee80211vap *); static void mwl_beacon_update(struct ieee80211vap *, int); #ifdef MWL_HOST_PS_SUPPORT static void mwl_update_ps(struct ieee80211vap *, int); static int mwl_set_tim(struct ieee80211_node *, int); #endif static int mwl_dma_setup(struct mwl_softc *); static void mwl_dma_cleanup(struct mwl_softc *); static struct ieee80211_node *mwl_node_alloc(struct ieee80211vap *, const uint8_t [IEEE80211_ADDR_LEN]); static void mwl_node_cleanup(struct ieee80211_node *); static void mwl_node_drain(struct ieee80211_node *); static void mwl_node_getsignal(const struct ieee80211_node *, int8_t *, int8_t *); static void mwl_node_getmimoinfo(const struct ieee80211_node *, struct ieee80211_mimo_info *); static int mwl_rxbuf_init(struct mwl_softc *, struct mwl_rxbuf *); static void mwl_rx_proc(void *, int); static void mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *, int); static int mwl_tx_setup(struct mwl_softc *, int, int); static int mwl_wme_update(struct ieee80211com *); static void mwl_tx_cleanupq(struct mwl_softc *, struct mwl_txq *); static void mwl_tx_cleanup(struct mwl_softc *); static uint16_t mwl_calcformat(uint8_t rate, const struct ieee80211_node *); static int mwl_tx_start(struct mwl_softc *, struct ieee80211_node *, struct mwl_txbuf *, struct mbuf *); static void mwl_tx_proc(void *, int); static int mwl_chan_set(struct mwl_softc *, struct ieee80211_channel *); static void mwl_draintxq(struct mwl_softc *); static void mwl_cleartxq(struct mwl_softc *, struct ieee80211vap *); static int mwl_recv_action(struct ieee80211_node *, const struct ieee80211_frame *, const uint8_t *, const uint8_t *); static int mwl_addba_request(struct ieee80211_node *, struct ieee80211_tx_ampdu *, int dialogtoken, int baparamset, int batimeout); static int mwl_addba_response(struct ieee80211_node *, struct ieee80211_tx_ampdu *, int status, int baparamset, int batimeout); static void mwl_addba_stop(struct ieee80211_node *, struct ieee80211_tx_ampdu *); static int mwl_startrecv(struct mwl_softc *); static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *, struct ieee80211_channel *); static int mwl_setapmode(struct ieee80211vap *, struct ieee80211_channel*); static void mwl_scan_start(struct ieee80211com *); static void mwl_scan_end(struct ieee80211com *); static void mwl_set_channel(struct ieee80211com *); static int mwl_peerstadb(struct ieee80211_node *, int aid, int staid, MWL_HAL_PEERINFO *pi); static int mwl_localstadb(struct ieee80211vap *); static int mwl_newstate(struct ieee80211vap *, enum ieee80211_state, int); static int allocstaid(struct mwl_softc *sc, int aid); static void delstaid(struct mwl_softc *sc, int staid); static void mwl_newassoc(struct ieee80211_node *, int); static void mwl_agestations(void *); static int mwl_setregdomain(struct ieee80211com *, struct ieee80211_regdomain *, int, struct ieee80211_channel []); static void mwl_getradiocaps(struct ieee80211com *, int, int *, struct ieee80211_channel []); static int mwl_getchannels(struct mwl_softc *); static void mwl_sysctlattach(struct mwl_softc *); static void mwl_announce(struct mwl_softc *); SYSCTL_NODE(_hw, OID_AUTO, mwl, CTLFLAG_RD, 0, "Marvell driver parameters"); static int mwl_rxdesc = MWL_RXDESC; /* # rx desc's to allocate */ SYSCTL_INT(_hw_mwl, OID_AUTO, rxdesc, CTLFLAG_RW, &mwl_rxdesc, 0, "rx descriptors allocated"); static int mwl_rxbuf = MWL_RXBUF; /* # rx buffers to allocate */ SYSCTL_INT(_hw_mwl, OID_AUTO, rxbuf, CTLFLAG_RWTUN, &mwl_rxbuf, 0, "rx buffers allocated"); static int mwl_txbuf = MWL_TXBUF; /* # tx buffers to allocate */ SYSCTL_INT(_hw_mwl, OID_AUTO, txbuf, CTLFLAG_RWTUN, &mwl_txbuf, 0, "tx buffers allocated"); static int mwl_txcoalesce = 8; /* # tx packets to q before poking f/w*/ SYSCTL_INT(_hw_mwl, OID_AUTO, txcoalesce, CTLFLAG_RWTUN, &mwl_txcoalesce, 0, "tx buffers to send at once"); static int mwl_rxquota = MWL_RXBUF; /* # max buffers to process */ SYSCTL_INT(_hw_mwl, OID_AUTO, rxquota, CTLFLAG_RWTUN, &mwl_rxquota, 0, "max rx buffers to process per interrupt"); static int mwl_rxdmalow = 3; /* # min buffers for wakeup */ SYSCTL_INT(_hw_mwl, OID_AUTO, rxdmalow, CTLFLAG_RWTUN, &mwl_rxdmalow, 0, "min free rx buffers before restarting traffic"); #ifdef MWL_DEBUG static int mwl_debug = 0; SYSCTL_INT(_hw_mwl, OID_AUTO, debug, CTLFLAG_RWTUN, &mwl_debug, 0, "control debugging printfs"); enum { MWL_DEBUG_XMIT = 0x00000001, /* basic xmit operation */ MWL_DEBUG_XMIT_DESC = 0x00000002, /* xmit descriptors */ MWL_DEBUG_RECV = 0x00000004, /* basic recv operation */ MWL_DEBUG_RECV_DESC = 0x00000008, /* recv descriptors */ MWL_DEBUG_RESET = 0x00000010, /* reset processing */ MWL_DEBUG_BEACON = 0x00000020, /* beacon handling */ MWL_DEBUG_INTR = 0x00000040, /* ISR */ MWL_DEBUG_TX_PROC = 0x00000080, /* tx ISR proc */ MWL_DEBUG_RX_PROC = 0x00000100, /* rx ISR proc */ MWL_DEBUG_KEYCACHE = 0x00000200, /* key cache management */ MWL_DEBUG_STATE = 0x00000400, /* 802.11 state transitions */ MWL_DEBUG_NODE = 0x00000800, /* node management */ MWL_DEBUG_RECV_ALL = 0x00001000, /* trace all frames (beacons) */ MWL_DEBUG_TSO = 0x00002000, /* TSO processing */ MWL_DEBUG_AMPDU = 0x00004000, /* BA stream handling */ MWL_DEBUG_ANY = 0xffffffff }; #define IS_BEACON(wh) \ ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK|IEEE80211_FC0_SUBTYPE_MASK)) == \ (IEEE80211_FC0_TYPE_MGT|IEEE80211_FC0_SUBTYPE_BEACON)) #define IFF_DUMPPKTS_RECV(sc, wh) \ ((sc->sc_debug & MWL_DEBUG_RECV) && \ ((sc->sc_debug & MWL_DEBUG_RECV_ALL) || !IS_BEACON(wh))) #define IFF_DUMPPKTS_XMIT(sc) \ (sc->sc_debug & MWL_DEBUG_XMIT) #define DPRINTF(sc, m, fmt, ...) do { \ if (sc->sc_debug & (m)) \ printf(fmt, __VA_ARGS__); \ } while (0) #define KEYPRINTF(sc, hk, mac) do { \ if (sc->sc_debug & MWL_DEBUG_KEYCACHE) \ mwl_keyprint(sc, __func__, hk, mac); \ } while (0) static void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix); static void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix); #else #define IFF_DUMPPKTS_RECV(sc, wh) 0 #define IFF_DUMPPKTS_XMIT(sc) 0 #define DPRINTF(sc, m, fmt, ...) do { (void )sc; } while (0) #define KEYPRINTF(sc, k, mac) do { (void )sc; } while (0) #endif static MALLOC_DEFINE(M_MWLDEV, "mwldev", "mwl driver dma buffers"); /* * Each packet has fixed front matter: a 2-byte length * of the payload, followed by a 4-address 802.11 header * (regardless of the actual header and always w/o any * QoS header). The payload then follows. */ struct mwltxrec { uint16_t fwlen; struct ieee80211_frame_addr4 wh; } __packed; /* * Read/Write shorthands for accesses to BAR 0. Note * that all BAR 1 operations are done in the "hal" and * there should be no reference to them here. */ #ifdef MWL_DEBUG static __inline uint32_t RD4(struct mwl_softc *sc, bus_size_t off) { return bus_space_read_4(sc->sc_io0t, sc->sc_io0h, off); } #endif static __inline void WR4(struct mwl_softc *sc, bus_size_t off, uint32_t val) { bus_space_write_4(sc->sc_io0t, sc->sc_io0h, off, val); } int mwl_attach(uint16_t devid, struct mwl_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct mwl_hal *mh; int error = 0; DPRINTF(sc, MWL_DEBUG_ANY, "%s: devid 0x%x\n", __func__, devid); /* * Setup the RX free list lock early, so it can be consistently * removed. */ MWL_RXFREE_INIT(sc); mh = mwl_hal_attach(sc->sc_dev, devid, sc->sc_io1h, sc->sc_io1t, sc->sc_dmat); if (mh == NULL) { device_printf(sc->sc_dev, "unable to attach HAL\n"); error = EIO; goto bad; } sc->sc_mh = mh; /* * Load firmware so we can get setup. We arbitrarily * pick station firmware; we'll re-load firmware as * needed so setting up the wrong mode isn't a big deal. */ if (mwl_hal_fwload(mh, NULL) != 0) { device_printf(sc->sc_dev, "unable to setup builtin firmware\n"); error = EIO; goto bad1; } if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) { device_printf(sc->sc_dev, "unable to fetch h/w specs\n"); error = EIO; goto bad1; } error = mwl_getchannels(sc); if (error != 0) goto bad1; sc->sc_txantenna = 0; /* h/w default */ sc->sc_rxantenna = 0; /* h/w default */ sc->sc_invalid = 0; /* ready to go, enable int handling */ sc->sc_ageinterval = MWL_AGEINTERVAL; /* * Allocate tx+rx descriptors and populate the lists. * We immediately push the information to the firmware * as otherwise it gets upset. */ error = mwl_dma_setup(sc); if (error != 0) { device_printf(sc->sc_dev, "failed to setup descriptors: %d\n", error); goto bad1; } error = mwl_setupdma(sc); /* push to firmware */ if (error != 0) /* NB: mwl_setupdma prints msg */ goto bad1; callout_init(&sc->sc_timer, 1); callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0); mbufq_init(&sc->sc_snd, ifqmaxlen); sc->sc_tq = taskqueue_create("mwl_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->sc_tq); taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->sc_dev)); TASK_INIT(&sc->sc_rxtask, 0, mwl_rx_proc, sc); TASK_INIT(&sc->sc_radartask, 0, mwl_radar_proc, sc); TASK_INIT(&sc->sc_chanswitchtask, 0, mwl_chanswitch_proc, sc); TASK_INIT(&sc->sc_bawatchdogtask, 0, mwl_bawatchdog_proc, sc); /* NB: insure BK queue is the lowest priority h/w queue */ if (!mwl_tx_setup(sc, WME_AC_BK, MWL_WME_AC_BK)) { device_printf(sc->sc_dev, "unable to setup xmit queue for %s traffic!\n", ieee80211_wme_acnames[WME_AC_BK]); error = EIO; goto bad2; } if (!mwl_tx_setup(sc, WME_AC_BE, MWL_WME_AC_BE) || !mwl_tx_setup(sc, WME_AC_VI, MWL_WME_AC_VI) || !mwl_tx_setup(sc, WME_AC_VO, MWL_WME_AC_VO)) { /* * Not enough hardware tx queues to properly do WME; * just punt and assign them all to the same h/w queue. * We could do a better job of this if, for example, * we allocate queues when we switch from station to * AP mode. */ if (sc->sc_ac2q[WME_AC_VI] != NULL) mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_VI]); if (sc->sc_ac2q[WME_AC_BE] != NULL) mwl_tx_cleanupq(sc, sc->sc_ac2q[WME_AC_BE]); sc->sc_ac2q[WME_AC_BE] = sc->sc_ac2q[WME_AC_BK]; sc->sc_ac2q[WME_AC_VI] = sc->sc_ac2q[WME_AC_BK]; sc->sc_ac2q[WME_AC_VO] = sc->sc_ac2q[WME_AC_BK]; } TASK_INIT(&sc->sc_txtask, 0, mwl_tx_proc, sc); ic->ic_softc = sc; ic->ic_name = device_get_nameunit(sc->sc_dev); /* XXX not right but it's not used anywhere important */ ic->ic_phytype = IEEE80211_T_OFDM; ic->ic_opmode = IEEE80211_M_STA; ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_HOSTAP /* hostap mode */ | IEEE80211_C_MONITOR /* monitor mode */ #if 0 | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ #endif | IEEE80211_C_MBSS /* mesh point link mode */ | IEEE80211_C_WDS /* WDS supported */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WME /* WME/WMM supported */ | IEEE80211_C_BURST /* xmit bursting supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ | IEEE80211_C_TXFRAG /* handle tx frags */ | IEEE80211_C_TXPMGT /* capable of txpow mgt */ | IEEE80211_C_DFS /* DFS supported */ ; ic->ic_htcaps = IEEE80211_HTCAP_SMPS_ENA /* SM PS mode enabled */ | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width */ | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ | IEEE80211_HTCAP_RXSTBC_2STREAM/* 1-2 spatial streams */ #if MWL_AGGR_SIZE == 7935 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */ #else | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ #endif #if 0 | IEEE80211_HTCAP_PSMP /* PSMP supported */ | IEEE80211_HTCAP_40INTOLERANT /* 40MHz intolerant */ #endif /* s/w capabilities */ | IEEE80211_HTC_HT /* HT operation */ | IEEE80211_HTC_AMPDU /* tx A-MPDU */ | IEEE80211_HTC_AMSDU /* tx A-MSDU */ | IEEE80211_HTC_SMPS /* SMPS available */ ; /* * Mark h/w crypto support. * XXX no way to query h/w support. */ ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP | IEEE80211_CRYPTO_AES_CCM | IEEE80211_CRYPTO_TKIP | IEEE80211_CRYPTO_TKIPMIC ; /* * Transmit requires space in the packet for a special * format transmit record and optional padding between * this record and the payload. Ask the net80211 layer * to arrange this when encapsulating packets so we can * add it efficiently. */ ic->ic_headroom = sizeof(struct mwltxrec) - sizeof(struct ieee80211_frame); IEEE80211_ADDR_COPY(ic->ic_macaddr, sc->sc_hwspecs.macAddr); /* call MI attach routine. */ ieee80211_ifattach(ic); ic->ic_setregdomain = mwl_setregdomain; ic->ic_getradiocaps = mwl_getradiocaps; /* override default methods */ ic->ic_raw_xmit = mwl_raw_xmit; ic->ic_newassoc = mwl_newassoc; ic->ic_updateslot = mwl_updateslot; ic->ic_update_mcast = mwl_update_mcast; ic->ic_update_promisc = mwl_update_promisc; ic->ic_wme.wme_update = mwl_wme_update; ic->ic_transmit = mwl_transmit; ic->ic_ioctl = mwl_ioctl; ic->ic_parent = mwl_parent; ic->ic_node_alloc = mwl_node_alloc; sc->sc_node_cleanup = ic->ic_node_cleanup; ic->ic_node_cleanup = mwl_node_cleanup; sc->sc_node_drain = ic->ic_node_drain; ic->ic_node_drain = mwl_node_drain; ic->ic_node_getsignal = mwl_node_getsignal; ic->ic_node_getmimoinfo = mwl_node_getmimoinfo; ic->ic_scan_start = mwl_scan_start; ic->ic_scan_end = mwl_scan_end; ic->ic_set_channel = mwl_set_channel; sc->sc_recv_action = ic->ic_recv_action; ic->ic_recv_action = mwl_recv_action; sc->sc_addba_request = ic->ic_addba_request; ic->ic_addba_request = mwl_addba_request; sc->sc_addba_response = ic->ic_addba_response; ic->ic_addba_response = mwl_addba_response; sc->sc_addba_stop = ic->ic_addba_stop; ic->ic_addba_stop = mwl_addba_stop; ic->ic_vap_create = mwl_vap_create; ic->ic_vap_delete = mwl_vap_delete; ieee80211_radiotap_attach(ic, &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), MWL_TX_RADIOTAP_PRESENT, &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), MWL_RX_RADIOTAP_PRESENT); /* * Setup dynamic sysctl's now that country code and * regdomain are available from the hal. */ mwl_sysctlattach(sc); if (bootverbose) ieee80211_announce(ic); mwl_announce(sc); return 0; bad2: mwl_dma_cleanup(sc); bad1: mwl_hal_detach(mh); bad: MWL_RXFREE_DESTROY(sc); sc->sc_invalid = 1; return error; } int mwl_detach(struct mwl_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; MWL_LOCK(sc); mwl_stop(sc); MWL_UNLOCK(sc); /* * NB: the order of these is important: * o call the 802.11 layer before detaching the hal to * insure callbacks into the driver to delete global * key cache entries can be handled * o reclaim the tx queue data structures after calling * the 802.11 layer as we'll get called back to reclaim * node state and potentially want to use them * o to cleanup the tx queues the hal is called, so detach * it last * Other than that, it's straightforward... */ ieee80211_ifdetach(ic); callout_drain(&sc->sc_watchdog); mwl_dma_cleanup(sc); MWL_RXFREE_DESTROY(sc); mwl_tx_cleanup(sc); mwl_hal_detach(sc->sc_mh); mbufq_drain(&sc->sc_snd); return 0; } /* * MAC address handling for multiple BSS on the same radio. * The first vap uses the MAC address from the EEPROM. For * subsequent vap's we set the U/L bit (bit 1) in the MAC * address and use the next six bits as an index. */ static void assign_address(struct mwl_softc *sc, uint8_t mac[IEEE80211_ADDR_LEN], int clone) { int i; if (clone && mwl_hal_ismbsscapable(sc->sc_mh)) { /* NB: we only do this if h/w supports multiple bssid */ for (i = 0; i < 32; i++) if ((sc->sc_bssidmask & (1<sc_bssidmask |= 1<sc_nbssid0++; } static void reclaim_address(struct mwl_softc *sc, const uint8_t mac[IEEE80211_ADDR_LEN]) { int i = mac[0] >> 2; if (i != 0 || --sc->sc_nbssid0 == 0) sc->sc_bssidmask &= ~(1<ic_softc; struct mwl_hal *mh = sc->sc_mh; struct ieee80211vap *vap, *apvap; struct mwl_hal_vap *hvap; struct mwl_vap *mvp; uint8_t mac[IEEE80211_ADDR_LEN]; IEEE80211_ADDR_COPY(mac, mac0); switch (opmode) { case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: if ((flags & IEEE80211_CLONE_MACADDR) == 0) assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); hvap = mwl_hal_newvap(mh, MWL_HAL_AP, mac); if (hvap == NULL) { if ((flags & IEEE80211_CLONE_MACADDR) == 0) reclaim_address(sc, mac); return NULL; } break; case IEEE80211_M_STA: if ((flags & IEEE80211_CLONE_MACADDR) == 0) assign_address(sc, mac, flags & IEEE80211_CLONE_BSSID); hvap = mwl_hal_newvap(mh, MWL_HAL_STA, mac); if (hvap == NULL) { if ((flags & IEEE80211_CLONE_MACADDR) == 0) reclaim_address(sc, mac); return NULL; } /* no h/w beacon miss support; always use s/w */ flags |= IEEE80211_CLONE_NOBEACONS; break; case IEEE80211_M_WDS: hvap = NULL; /* NB: we use associated AP vap */ if (sc->sc_napvaps == 0) return NULL; /* no existing AP vap */ break; case IEEE80211_M_MONITOR: hvap = NULL; break; case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: default: return NULL; } mvp = malloc(sizeof(struct mwl_vap), M_80211_VAP, M_WAITOK | M_ZERO); mvp->mv_hvap = hvap; if (opmode == IEEE80211_M_WDS) { /* * WDS vaps must have an associated AP vap; find one. * XXX not right. */ TAILQ_FOREACH(apvap, &ic->ic_vaps, iv_next) if (apvap->iv_opmode == IEEE80211_M_HOSTAP) { mvp->mv_ap_hvap = MWL_VAP(apvap)->mv_hvap; break; } KASSERT(mvp->mv_ap_hvap != NULL, ("no ap vap")); } vap = &mvp->mv_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); /* override with driver methods */ mvp->mv_newstate = vap->iv_newstate; vap->iv_newstate = mwl_newstate; vap->iv_max_keyix = 0; /* XXX */ vap->iv_key_alloc = mwl_key_alloc; vap->iv_key_delete = mwl_key_delete; vap->iv_key_set = mwl_key_set; #ifdef MWL_HOST_PS_SUPPORT if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) { vap->iv_update_ps = mwl_update_ps; mvp->mv_set_tim = vap->iv_set_tim; vap->iv_set_tim = mwl_set_tim; } #endif vap->iv_reset = mwl_reset; vap->iv_update_beacon = mwl_beacon_update; /* override max aid so sta's cannot assoc when we're out of sta id's */ vap->iv_max_aid = MWL_MAXSTAID; /* override default A-MPDU rx parameters */ vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_64K; vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_4; /* complete setup */ ieee80211_vap_attach(vap, mwl_media_change, ieee80211_media_status, mac); switch (vap->iv_opmode) { case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: case IEEE80211_M_STA: /* * Setup sta db entry for local address. */ mwl_localstadb(vap); if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_MBSS) sc->sc_napvaps++; else sc->sc_nstavaps++; break; case IEEE80211_M_WDS: sc->sc_nwdsvaps++; break; default: break; } /* * Setup overall operating mode. */ if (sc->sc_napvaps) ic->ic_opmode = IEEE80211_M_HOSTAP; else if (sc->sc_nstavaps) ic->ic_opmode = IEEE80211_M_STA; else ic->ic_opmode = opmode; return vap; } static void mwl_vap_delete(struct ieee80211vap *vap) { struct mwl_vap *mvp = MWL_VAP(vap); struct mwl_softc *sc = vap->iv_ic->ic_softc; struct mwl_hal *mh = sc->sc_mh; struct mwl_hal_vap *hvap = mvp->mv_hvap; enum ieee80211_opmode opmode = vap->iv_opmode; /* XXX disallow ap vap delete if WDS still present */ if (sc->sc_running) { /* quiesce h/w while we remove the vap */ mwl_hal_intrset(mh, 0); /* disable interrupts */ } ieee80211_vap_detach(vap); switch (opmode) { case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: case IEEE80211_M_STA: KASSERT(hvap != NULL, ("no hal vap handle")); (void) mwl_hal_delstation(hvap, vap->iv_myaddr); mwl_hal_delvap(hvap); if (opmode == IEEE80211_M_HOSTAP || opmode == IEEE80211_M_MBSS) sc->sc_napvaps--; else sc->sc_nstavaps--; /* XXX don't do it for IEEE80211_CLONE_MACADDR */ reclaim_address(sc, vap->iv_myaddr); break; case IEEE80211_M_WDS: sc->sc_nwdsvaps--; break; default: break; } mwl_cleartxq(sc, vap); free(mvp, M_80211_VAP); if (sc->sc_running) mwl_hal_intrset(mh, sc->sc_imask); } void mwl_suspend(struct mwl_softc *sc) { MWL_LOCK(sc); mwl_stop(sc); MWL_UNLOCK(sc); } void mwl_resume(struct mwl_softc *sc) { int error = EDOOFUS; MWL_LOCK(sc); if (sc->sc_ic.ic_nrunning > 0) error = mwl_init(sc); MWL_UNLOCK(sc); if (error == 0) ieee80211_start_all(&sc->sc_ic); /* start all vap's */ } void mwl_shutdown(void *arg) { struct mwl_softc *sc = arg; MWL_LOCK(sc); mwl_stop(sc); MWL_UNLOCK(sc); } /* * Interrupt handler. Most of the actual processing is deferred. */ void mwl_intr(void *arg) { struct mwl_softc *sc = arg; struct mwl_hal *mh = sc->sc_mh; uint32_t status; if (sc->sc_invalid) { /* * The hardware is not ready/present, don't touch anything. * Note this can happen early on if the IRQ is shared. */ DPRINTF(sc, MWL_DEBUG_ANY, "%s: invalid; ignored\n", __func__); return; } /* * Figure out the reason(s) for the interrupt. */ mwl_hal_getisr(mh, &status); /* NB: clears ISR too */ if (status == 0) /* must be a shared irq */ return; DPRINTF(sc, MWL_DEBUG_INTR, "%s: status 0x%x imask 0x%x\n", __func__, status, sc->sc_imask); if (status & MACREG_A2HRIC_BIT_RX_RDY) taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); if (status & MACREG_A2HRIC_BIT_TX_DONE) taskqueue_enqueue(sc->sc_tq, &sc->sc_txtask); if (status & MACREG_A2HRIC_BIT_BA_WATCHDOG) taskqueue_enqueue(sc->sc_tq, &sc->sc_bawatchdogtask); if (status & MACREG_A2HRIC_BIT_OPC_DONE) mwl_hal_cmddone(mh); if (status & MACREG_A2HRIC_BIT_MAC_EVENT) { ; } if (status & MACREG_A2HRIC_BIT_ICV_ERROR) { /* TKIP ICV error */ sc->sc_stats.mst_rx_badtkipicv++; } if (status & MACREG_A2HRIC_BIT_QUEUE_EMPTY) { /* 11n aggregation queue is empty, re-fill */ ; } if (status & MACREG_A2HRIC_BIT_QUEUE_FULL) { ; } if (status & MACREG_A2HRIC_BIT_RADAR_DETECT) { /* radar detected, process event */ taskqueue_enqueue(sc->sc_tq, &sc->sc_radartask); } if (status & MACREG_A2HRIC_BIT_CHAN_SWITCH) { /* DFS channel switch */ taskqueue_enqueue(sc->sc_tq, &sc->sc_chanswitchtask); } } static void mwl_radar_proc(void *arg, int pending) { struct mwl_softc *sc = arg; struct ieee80211com *ic = &sc->sc_ic; DPRINTF(sc, MWL_DEBUG_ANY, "%s: radar detected, pending %u\n", __func__, pending); sc->sc_stats.mst_radardetect++; /* XXX stop h/w BA streams? */ IEEE80211_LOCK(ic); ieee80211_dfs_notify_radar(ic, ic->ic_curchan); IEEE80211_UNLOCK(ic); } static void mwl_chanswitch_proc(void *arg, int pending) { struct mwl_softc *sc = arg; struct ieee80211com *ic = &sc->sc_ic; DPRINTF(sc, MWL_DEBUG_ANY, "%s: channel switch notice, pending %u\n", __func__, pending); IEEE80211_LOCK(ic); sc->sc_csapending = 0; ieee80211_csa_completeswitch(ic); IEEE80211_UNLOCK(ic); } static void mwl_bawatchdog(const MWL_HAL_BASTREAM *sp) { struct ieee80211_node *ni = sp->data[0]; /* send DELBA and drop the stream */ ieee80211_ampdu_stop(ni, sp->data[1], IEEE80211_REASON_UNSPECIFIED); } static void mwl_bawatchdog_proc(void *arg, int pending) { struct mwl_softc *sc = arg; struct mwl_hal *mh = sc->sc_mh; const MWL_HAL_BASTREAM *sp; uint8_t bitmap, n; sc->sc_stats.mst_bawatchdog++; if (mwl_hal_getwatchdogbitmap(mh, &bitmap) != 0) { DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: could not get bitmap\n", __func__); sc->sc_stats.mst_bawatchdog_failed++; return; } DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bitmap 0x%x\n", __func__, bitmap); if (bitmap == 0xff) { n = 0; /* disable all ba streams */ for (bitmap = 0; bitmap < 8; bitmap++) { sp = mwl_hal_bastream_lookup(mh, bitmap); if (sp != NULL) { mwl_bawatchdog(sp); n++; } } if (n == 0) { DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: no BA streams found\n", __func__); sc->sc_stats.mst_bawatchdog_empty++; } } else if (bitmap != 0xaa) { /* disable a single ba stream */ sp = mwl_hal_bastream_lookup(mh, bitmap); if (sp != NULL) { mwl_bawatchdog(sp); } else { DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: no BA stream %d\n", __func__, bitmap); sc->sc_stats.mst_bawatchdog_notfound++; } } } /* * Convert net80211 channel to a HAL channel. */ static void mwl_mapchan(MWL_HAL_CHANNEL *hc, const struct ieee80211_channel *chan) { hc->channel = chan->ic_ieee; *(uint32_t *)&hc->channelFlags = 0; if (IEEE80211_IS_CHAN_2GHZ(chan)) hc->channelFlags.FreqBand = MWL_FREQ_BAND_2DOT4GHZ; else if (IEEE80211_IS_CHAN_5GHZ(chan)) hc->channelFlags.FreqBand = MWL_FREQ_BAND_5GHZ; if (IEEE80211_IS_CHAN_HT40(chan)) { hc->channelFlags.ChnlWidth = MWL_CH_40_MHz_WIDTH; if (IEEE80211_IS_CHAN_HT40U(chan)) hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_ABOVE_CTRL_CH; else hc->channelFlags.ExtChnlOffset = MWL_EXT_CH_BELOW_CTRL_CH; } else hc->channelFlags.ChnlWidth = MWL_CH_20_MHz_WIDTH; /* XXX 10MHz channels */ } /* * Inform firmware of our tx/rx dma setup. The BAR 0 * writes below are for compatibility with older firmware. * For current firmware we send this information with a * cmd block via mwl_hal_sethwdma. */ static int mwl_setupdma(struct mwl_softc *sc) { int error, i; sc->sc_hwdma.rxDescRead = sc->sc_rxdma.dd_desc_paddr; WR4(sc, sc->sc_hwspecs.rxDescRead, sc->sc_hwdma.rxDescRead); WR4(sc, sc->sc_hwspecs.rxDescWrite, sc->sc_hwdma.rxDescRead); for (i = 0; i < MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; i++) { struct mwl_txq *txq = &sc->sc_txq[i]; sc->sc_hwdma.wcbBase[i] = txq->dma.dd_desc_paddr; WR4(sc, sc->sc_hwspecs.wcbBase[i], sc->sc_hwdma.wcbBase[i]); } sc->sc_hwdma.maxNumTxWcb = mwl_txbuf; sc->sc_hwdma.maxNumWCB = MWL_NUM_TX_QUEUES-MWL_NUM_ACK_QUEUES; error = mwl_hal_sethwdma(sc->sc_mh, &sc->sc_hwdma); if (error != 0) { device_printf(sc->sc_dev, "unable to setup tx/rx dma; hal status %u\n", error); /* XXX */ } return error; } /* * Inform firmware of tx rate parameters. * Called after a channel change. */ static int mwl_setcurchanrates(struct mwl_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; const struct ieee80211_rateset *rs; MWL_HAL_TXRATE rates; memset(&rates, 0, sizeof(rates)); rs = ieee80211_get_suprates(ic, ic->ic_curchan); /* rate used to send management frames */ rates.MgtRate = rs->rs_rates[0] & IEEE80211_RATE_VAL; /* rate used to send multicast frames */ rates.McastRate = rates.MgtRate; return mwl_hal_settxrate_auto(sc->sc_mh, &rates); } /* * Inform firmware of tx rate parameters. Called whenever * user-settable params change and after a channel change. */ static int mwl_setrates(struct ieee80211vap *vap) { struct mwl_vap *mvp = MWL_VAP(vap); struct ieee80211_node *ni = vap->iv_bss; const struct ieee80211_txparam *tp = ni->ni_txparms; MWL_HAL_TXRATE rates; KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state)); /* * Update the h/w rate map. * NB: 0x80 for MCS is passed through unchanged */ memset(&rates, 0, sizeof(rates)); /* rate used to send management frames */ rates.MgtRate = tp->mgmtrate; /* rate used to send multicast frames */ rates.McastRate = tp->mcastrate; /* while here calculate EAPOL fixed rate cookie */ mvp->mv_eapolformat = htole16(mwl_calcformat(rates.MgtRate, ni)); return mwl_hal_settxrate(mvp->mv_hvap, tp->ucastrate != IEEE80211_FIXED_RATE_NONE ? RATE_FIXED : RATE_AUTO, &rates); } /* * Setup a fixed xmit rate cookie for EAPOL frames. */ static void mwl_seteapolformat(struct ieee80211vap *vap) { struct mwl_vap *mvp = MWL_VAP(vap); struct ieee80211_node *ni = vap->iv_bss; enum ieee80211_phymode mode; uint8_t rate; KASSERT(vap->iv_state == IEEE80211_S_RUN, ("state %d", vap->iv_state)); mode = ieee80211_chan2mode(ni->ni_chan); /* * Use legacy rates when operating a mixed HT+non-HT bss. * NB: this may violate POLA for sta and wds vap's. */ if (mode == IEEE80211_MODE_11NA && (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0) rate = vap->iv_txparms[IEEE80211_MODE_11A].mgmtrate; else if (mode == IEEE80211_MODE_11NG && (vap->iv_flags_ht & IEEE80211_FHT_PUREN) == 0) rate = vap->iv_txparms[IEEE80211_MODE_11G].mgmtrate; else rate = vap->iv_txparms[mode].mgmtrate; mvp->mv_eapolformat = htole16(mwl_calcformat(rate, ni)); } /* * Map SKU+country code to region code for radar bin'ing. */ static int mwl_map2regioncode(const struct ieee80211_regdomain *rd) { switch (rd->regdomain) { case SKU_FCC: case SKU_FCC3: return DOMAIN_CODE_FCC; case SKU_CA: return DOMAIN_CODE_IC; case SKU_ETSI: case SKU_ETSI2: case SKU_ETSI3: if (rd->country == CTRY_SPAIN) return DOMAIN_CODE_SPAIN; if (rd->country == CTRY_FRANCE || rd->country == CTRY_FRANCE2) return DOMAIN_CODE_FRANCE; /* XXX force 1.3.1 radar type */ return DOMAIN_CODE_ETSI_131; case SKU_JAPAN: return DOMAIN_CODE_MKK; case SKU_ROW: return DOMAIN_CODE_DGT; /* Taiwan */ case SKU_APAC: case SKU_APAC2: case SKU_APAC3: return DOMAIN_CODE_AUS; /* Australia */ } /* XXX KOREA? */ return DOMAIN_CODE_FCC; /* XXX? */ } static int mwl_hal_reset(struct mwl_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct mwl_hal *mh = sc->sc_mh; mwl_hal_setantenna(mh, WL_ANTENNATYPE_RX, sc->sc_rxantenna); mwl_hal_setantenna(mh, WL_ANTENNATYPE_TX, sc->sc_txantenna); mwl_hal_setradio(mh, 1, WL_AUTO_PREAMBLE); mwl_hal_setwmm(sc->sc_mh, (ic->ic_flags & IEEE80211_F_WME) != 0); mwl_chan_set(sc, ic->ic_curchan); /* NB: RF/RA performance tuned for indoor mode */ mwl_hal_setrateadaptmode(mh, 0); mwl_hal_setoptimizationlevel(mh, (ic->ic_flags & IEEE80211_F_BURST) != 0); mwl_hal_setregioncode(mh, mwl_map2regioncode(&ic->ic_regdomain)); mwl_hal_setaggampduratemode(mh, 1, 80); /* XXX */ mwl_hal_setcfend(mh, 0); /* XXX */ return 1; } static int mwl_init(struct mwl_softc *sc) { struct mwl_hal *mh = sc->sc_mh; int error = 0; MWL_LOCK_ASSERT(sc); /* * Stop anything previously setup. This is safe * whether this is the first time through or not. */ mwl_stop(sc); /* * Push vap-independent state to the firmware. */ if (!mwl_hal_reset(sc)) { device_printf(sc->sc_dev, "unable to reset hardware\n"); return EIO; } /* * Setup recv (once); transmit is already good to go. */ error = mwl_startrecv(sc); if (error != 0) { device_printf(sc->sc_dev, "unable to start recv logic\n"); return error; } /* * Enable interrupts. */ sc->sc_imask = MACREG_A2HRIC_BIT_RX_RDY | MACREG_A2HRIC_BIT_TX_DONE | MACREG_A2HRIC_BIT_OPC_DONE #if 0 | MACREG_A2HRIC_BIT_MAC_EVENT #endif | MACREG_A2HRIC_BIT_ICV_ERROR | MACREG_A2HRIC_BIT_RADAR_DETECT | MACREG_A2HRIC_BIT_CHAN_SWITCH #if 0 | MACREG_A2HRIC_BIT_QUEUE_EMPTY #endif | MACREG_A2HRIC_BIT_BA_WATCHDOG | MACREQ_A2HRIC_BIT_TX_ACK ; sc->sc_running = 1; mwl_hal_intrset(mh, sc->sc_imask); callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc); return 0; } static void mwl_stop(struct mwl_softc *sc) { MWL_LOCK_ASSERT(sc); if (sc->sc_running) { /* * Shutdown the hardware and driver. */ sc->sc_running = 0; callout_stop(&sc->sc_watchdog); sc->sc_tx_timer = 0; mwl_draintxq(sc); } } static int mwl_reset_vap(struct ieee80211vap *vap, int state) { struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; struct ieee80211com *ic = vap->iv_ic; if (state == IEEE80211_S_RUN) mwl_setrates(vap); /* XXX off by 1? */ mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold); /* XXX auto? 20/40 split? */ mwl_hal_sethtgi(hvap, (vap->iv_flags_ht & (IEEE80211_FHT_SHORTGI20|IEEE80211_FHT_SHORTGI40)) ? 1 : 0); mwl_hal_setnprot(hvap, ic->ic_htprotmode == IEEE80211_PROT_NONE ? HTPROTECT_NONE : HTPROTECT_AUTO); /* XXX txpower cap */ /* re-setup beacons */ if (state == IEEE80211_S_RUN && (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_MBSS || vap->iv_opmode == IEEE80211_M_IBSS)) { mwl_setapmode(vap, vap->iv_bss->ni_chan); mwl_hal_setnprotmode(hvap, MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE)); return mwl_beacon_setup(vap); } return 0; } /* * Reset the hardware w/o losing operational state. * Used to to reset or reload hardware state for a vap. */ static int mwl_reset(struct ieee80211vap *vap, u_long cmd) { struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; int error = 0; if (hvap != NULL) { /* WDS, MONITOR, etc. */ struct ieee80211com *ic = vap->iv_ic; struct mwl_softc *sc = ic->ic_softc; struct mwl_hal *mh = sc->sc_mh; /* XXX handle DWDS sta vap change */ /* XXX do we need to disable interrupts? */ mwl_hal_intrset(mh, 0); /* disable interrupts */ error = mwl_reset_vap(vap, vap->iv_state); mwl_hal_intrset(mh, sc->sc_imask); } return error; } /* * Allocate a tx buffer for sending a frame. The * packet is assumed to have the WME AC stored so * we can use it to select the appropriate h/w queue. */ static struct mwl_txbuf * mwl_gettxbuf(struct mwl_softc *sc, struct mwl_txq *txq) { struct mwl_txbuf *bf; /* * Grab a TX buffer and associated resources. */ MWL_TXQ_LOCK(txq); bf = STAILQ_FIRST(&txq->free); if (bf != NULL) { STAILQ_REMOVE_HEAD(&txq->free, bf_list); txq->nfree--; } MWL_TXQ_UNLOCK(txq); if (bf == NULL) DPRINTF(sc, MWL_DEBUG_XMIT, "%s: out of xmit buffers on q %d\n", __func__, txq->qnum); return bf; } /* * Return a tx buffer to the queue it came from. Note there * are two cases because we must preserve the order of buffers * as it reflects the fixed order of descriptors in memory * (the firmware pre-fetches descriptors so we cannot reorder). */ static void mwl_puttxbuf_head(struct mwl_txq *txq, struct mwl_txbuf *bf) { bf->bf_m = NULL; bf->bf_node = NULL; MWL_TXQ_LOCK(txq); STAILQ_INSERT_HEAD(&txq->free, bf, bf_list); txq->nfree++; MWL_TXQ_UNLOCK(txq); } static void mwl_puttxbuf_tail(struct mwl_txq *txq, struct mwl_txbuf *bf) { bf->bf_m = NULL; bf->bf_node = NULL; MWL_TXQ_LOCK(txq); STAILQ_INSERT_TAIL(&txq->free, bf, bf_list); txq->nfree++; MWL_TXQ_UNLOCK(txq); } static int mwl_transmit(struct ieee80211com *ic, struct mbuf *m) { struct mwl_softc *sc = ic->ic_softc; int error; MWL_LOCK(sc); if (!sc->sc_running) { MWL_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { MWL_UNLOCK(sc); return (error); } mwl_start(sc); MWL_UNLOCK(sc); return (0); } static void mwl_start(struct mwl_softc *sc) { struct ieee80211_node *ni; struct mwl_txbuf *bf; struct mbuf *m; struct mwl_txq *txq = NULL; /* XXX silence gcc */ int nqueued; MWL_LOCK_ASSERT(sc); if (!sc->sc_running || sc->sc_invalid) return; nqueued = 0; while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { /* * Grab the node for the destination. */ ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; KASSERT(ni != NULL, ("no node")); m->m_pkthdr.rcvif = NULL; /* committed, clear ref */ /* * Grab a TX buffer and associated resources. * We honor the classification by the 802.11 layer. */ txq = sc->sc_ac2q[M_WME_GETAC(m)]; bf = mwl_gettxbuf(sc, txq); if (bf == NULL) { m_freem(m); ieee80211_free_node(ni); #ifdef MWL_TX_NODROP sc->sc_stats.mst_tx_qstop++; break; #else DPRINTF(sc, MWL_DEBUG_XMIT, "%s: tail drop on q %d\n", __func__, txq->qnum); sc->sc_stats.mst_tx_qdrop++; continue; #endif /* MWL_TX_NODROP */ } /* * Pass the frame to the h/w for transmission. */ if (mwl_tx_start(sc, ni, bf, m)) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); mwl_puttxbuf_head(txq, bf); ieee80211_free_node(ni); continue; } nqueued++; if (nqueued >= mwl_txcoalesce) { /* * Poke the firmware to process queued frames; * see below about (lack of) locking. */ nqueued = 0; mwl_hal_txstart(sc->sc_mh, 0/*XXX*/); } } if (nqueued) { /* * NB: We don't need to lock against tx done because * this just prods the firmware to check the transmit * descriptors. The firmware will also start fetching * descriptors by itself if it notices new ones are * present when it goes to deliver a tx done interrupt * to the host. So if we race with tx done processing * it's ok. Delivering the kick here rather than in * mwl_tx_start is an optimization to avoid poking the * firmware for each packet. * * NB: the queue id isn't used so 0 is ok. */ mwl_hal_txstart(sc->sc_mh, 0/*XXX*/); } } static int mwl_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct mwl_softc *sc = ic->ic_softc; struct mwl_txbuf *bf; struct mwl_txq *txq; if (!sc->sc_running || sc->sc_invalid) { m_freem(m); return ENETDOWN; } /* * Grab a TX buffer and associated resources. * Note that we depend on the classification * by the 802.11 layer to get to the right h/w * queue. Management frames must ALWAYS go on * queue 1 but we cannot just force that here * because we may receive non-mgt frames. */ txq = sc->sc_ac2q[M_WME_GETAC(m)]; bf = mwl_gettxbuf(sc, txq); if (bf == NULL) { sc->sc_stats.mst_tx_qstop++; m_freem(m); return ENOBUFS; } /* * Pass the frame to the h/w for transmission. */ if (mwl_tx_start(sc, ni, bf, m)) { mwl_puttxbuf_head(txq, bf); return EIO; /* XXX */ } /* * NB: We don't need to lock against tx done because * this just prods the firmware to check the transmit * descriptors. The firmware will also start fetching * descriptors by itself if it notices new ones are * present when it goes to deliver a tx done interrupt * to the host. So if we race with tx done processing * it's ok. Delivering the kick here rather than in * mwl_tx_start is an optimization to avoid poking the * firmware for each packet. * * NB: the queue id isn't used so 0 is ok. */ mwl_hal_txstart(sc->sc_mh, 0/*XXX*/); return 0; } static int mwl_media_change(struct ifnet *ifp) { struct ieee80211vap *vap = ifp->if_softc; int error; error = ieee80211_media_change(ifp); /* NB: only the fixed rate can change and that doesn't need a reset */ if (error == ENETRESET) { mwl_setrates(vap); error = 0; } return error; } #ifdef MWL_DEBUG static void mwl_keyprint(struct mwl_softc *sc, const char *tag, const MWL_HAL_KEYVAL *hk, const uint8_t mac[IEEE80211_ADDR_LEN]) { static const char *ciphers[] = { "WEP", "TKIP", "AES-CCM", }; int i, n; printf("%s: [%u] %-7s", tag, hk->keyIndex, ciphers[hk->keyTypeId]); for (i = 0, n = hk->keyLen; i < n; i++) printf(" %02x", hk->key.aes[i]); printf(" mac %s", ether_sprintf(mac)); if (hk->keyTypeId == KEY_TYPE_ID_TKIP) { printf(" %s", "rxmic"); for (i = 0; i < sizeof(hk->key.tkip.rxMic); i++) printf(" %02x", hk->key.tkip.rxMic[i]); printf(" txmic"); for (i = 0; i < sizeof(hk->key.tkip.txMic); i++) printf(" %02x", hk->key.tkip.txMic[i]); } printf(" flags 0x%x\n", hk->keyFlags); } #endif /* * Allocate a key cache slot for a unicast key. The * firmware handles key allocation and every station is * guaranteed key space so we are always successful. */ static int mwl_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k, ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix) { struct mwl_softc *sc = vap->iv_ic->ic_softc; if (k->wk_keyix != IEEE80211_KEYIX_NONE || (k->wk_flags & IEEE80211_KEY_GROUP)) { if (!(&vap->iv_nw_keys[0] <= k && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) { /* should not happen */ DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: bogus group key\n", __func__); return 0; } /* give the caller what they requested */ - *keyix = *rxkeyix = k - vap->iv_nw_keys; + *keyix = *rxkeyix = ieee80211_crypto_get_key_wepidx(vap, k); } else { /* * Firmware handles key allocation. */ *keyix = *rxkeyix = 0; } return 1; } /* * Delete a key entry allocated by mwl_key_alloc. */ static int mwl_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) { struct mwl_softc *sc = vap->iv_ic->ic_softc; struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; MWL_HAL_KEYVAL hk; const uint8_t bcastaddr[IEEE80211_ADDR_LEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; if (hvap == NULL) { if (vap->iv_opmode != IEEE80211_M_WDS) { /* XXX monitor mode? */ DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: no hvap for opmode %d\n", __func__, vap->iv_opmode); return 0; } hvap = MWL_VAP(vap)->mv_ap_hvap; } DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: delete key %u\n", __func__, k->wk_keyix); memset(&hk, 0, sizeof(hk)); hk.keyIndex = k->wk_keyix; switch (k->wk_cipher->ic_cipher) { case IEEE80211_CIPHER_WEP: hk.keyTypeId = KEY_TYPE_ID_WEP; break; case IEEE80211_CIPHER_TKIP: hk.keyTypeId = KEY_TYPE_ID_TKIP; break; case IEEE80211_CIPHER_AES_CCM: hk.keyTypeId = KEY_TYPE_ID_AES; break; default: /* XXX should not happen */ DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n", __func__, k->wk_cipher->ic_cipher); return 0; } return (mwl_hal_keyreset(hvap, &hk, bcastaddr) == 0); /*XXX*/ } static __inline int addgroupflags(MWL_HAL_KEYVAL *hk, const struct ieee80211_key *k) { if (k->wk_flags & IEEE80211_KEY_GROUP) { if (k->wk_flags & IEEE80211_KEY_XMIT) hk->keyFlags |= KEY_FLAG_TXGROUPKEY; if (k->wk_flags & IEEE80211_KEY_RECV) hk->keyFlags |= KEY_FLAG_RXGROUPKEY; return 1; } else return 0; } /* * Set the key cache contents for the specified key. Key cache * slot(s) must already have been allocated by mwl_key_alloc. */ static int mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) { return (_mwl_key_set(vap, k, k->wk_macaddr)); } static int _mwl_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k, const uint8_t mac[IEEE80211_ADDR_LEN]) { #define GRPXMIT (IEEE80211_KEY_XMIT | IEEE80211_KEY_GROUP) /* NB: static wep keys are marked GROUP+tx/rx; GTK will be tx or rx */ #define IEEE80211_IS_STATICKEY(k) \ (((k)->wk_flags & (GRPXMIT|IEEE80211_KEY_RECV)) == \ (GRPXMIT|IEEE80211_KEY_RECV)) struct mwl_softc *sc = vap->iv_ic->ic_softc; struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; const struct ieee80211_cipher *cip = k->wk_cipher; const uint8_t *macaddr; MWL_HAL_KEYVAL hk; KASSERT((k->wk_flags & IEEE80211_KEY_SWCRYPT) == 0, ("s/w crypto set?")); if (hvap == NULL) { if (vap->iv_opmode != IEEE80211_M_WDS) { /* XXX monitor mode? */ DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: no hvap for opmode %d\n", __func__, vap->iv_opmode); return 0; } hvap = MWL_VAP(vap)->mv_ap_hvap; } memset(&hk, 0, sizeof(hk)); hk.keyIndex = k->wk_keyix; switch (cip->ic_cipher) { case IEEE80211_CIPHER_WEP: hk.keyTypeId = KEY_TYPE_ID_WEP; hk.keyLen = k->wk_keylen; if (k->wk_keyix == vap->iv_def_txkey) hk.keyFlags = KEY_FLAG_WEP_TXKEY; if (!IEEE80211_IS_STATICKEY(k)) { /* NB: WEP is never used for the PTK */ (void) addgroupflags(&hk, k); } break; case IEEE80211_CIPHER_TKIP: hk.keyTypeId = KEY_TYPE_ID_TKIP; hk.key.tkip.tsc.high = (uint32_t)(k->wk_keytsc >> 16); hk.key.tkip.tsc.low = (uint16_t)k->wk_keytsc; hk.keyFlags = KEY_FLAG_TSC_VALID | KEY_FLAG_MICKEY_VALID; hk.keyLen = k->wk_keylen + IEEE80211_MICBUF_SIZE; if (!addgroupflags(&hk, k)) hk.keyFlags |= KEY_FLAG_PAIRWISE; break; case IEEE80211_CIPHER_AES_CCM: hk.keyTypeId = KEY_TYPE_ID_AES; hk.keyLen = k->wk_keylen; if (!addgroupflags(&hk, k)) hk.keyFlags |= KEY_FLAG_PAIRWISE; break; default: /* XXX should not happen */ DPRINTF(sc, MWL_DEBUG_KEYCACHE, "%s: unknown cipher %d\n", __func__, k->wk_cipher->ic_cipher); return 0; } /* * NB: tkip mic keys get copied here too; the layout * just happens to match that in ieee80211_key. */ memcpy(hk.key.aes, k->wk_key, hk.keyLen); /* * Locate address of sta db entry for writing key; * the convention unfortunately is somewhat different * than how net80211, hostapd, and wpa_supplicant think. */ if (vap->iv_opmode == IEEE80211_M_STA) { /* * NB: keys plumbed before the sta reaches AUTH state * will be discarded or written to the wrong sta db * entry because iv_bss is meaningless. This is ok * (right now) because we handle deferred plumbing of * WEP keys when the sta reaches AUTH state. */ macaddr = vap->iv_bss->ni_bssid; if ((k->wk_flags & IEEE80211_KEY_GROUP) == 0) { /* XXX plumb to local sta db too for static key wep */ mwl_hal_keyset(hvap, &hk, vap->iv_myaddr); } } else if (vap->iv_opmode == IEEE80211_M_WDS && vap->iv_state != IEEE80211_S_RUN) { /* * Prior to RUN state a WDS vap will not it's BSS node * setup so we will plumb the key to the wrong mac * address (it'll be our local address). Workaround * this for the moment by grabbing the correct address. */ macaddr = vap->iv_des_bssid; } else if ((k->wk_flags & GRPXMIT) == GRPXMIT) macaddr = vap->iv_myaddr; else macaddr = mac; KEYPRINTF(sc, &hk, macaddr); return (mwl_hal_keyset(hvap, &hk, macaddr) == 0); #undef IEEE80211_IS_STATICKEY #undef GRPXMIT } /* * Set the multicast filter contents into the hardware. * XXX f/w has no support; just defer to the os. */ static void mwl_setmcastfilter(struct mwl_softc *sc) { #if 0 struct ether_multi *enm; struct ether_multistep estep; uint8_t macs[IEEE80211_ADDR_LEN*MWL_HAL_MCAST_MAX];/* XXX stack use */ uint8_t *mp; int nmc; mp = macs; nmc = 0; ETHER_FIRST_MULTI(estep, &sc->sc_ec, enm); while (enm != NULL) { /* XXX Punt on ranges. */ if (nmc == MWL_HAL_MCAST_MAX || !IEEE80211_ADDR_EQ(enm->enm_addrlo, enm->enm_addrhi)) { ifp->if_flags |= IFF_ALLMULTI; return; } IEEE80211_ADDR_COPY(mp, enm->enm_addrlo); mp += IEEE80211_ADDR_LEN, nmc++; ETHER_NEXT_MULTI(estep, enm); } ifp->if_flags &= ~IFF_ALLMULTI; mwl_hal_setmcast(sc->sc_mh, nmc, macs); #endif } static int mwl_mode_init(struct mwl_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct mwl_hal *mh = sc->sc_mh; mwl_hal_setpromisc(mh, ic->ic_promisc > 0); mwl_setmcastfilter(sc); return 0; } /* * Callback from the 802.11 layer after a multicast state change. */ static void mwl_update_mcast(struct ieee80211com *ic) { struct mwl_softc *sc = ic->ic_softc; mwl_setmcastfilter(sc); } /* * Callback from the 802.11 layer after a promiscuous mode change. * Note this interface does not check the operating mode as this * is an internal callback and we are expected to honor the current * state (e.g. this is used for setting the interface in promiscuous * mode when operating in hostap mode to do ACS). */ static void mwl_update_promisc(struct ieee80211com *ic) { struct mwl_softc *sc = ic->ic_softc; mwl_hal_setpromisc(sc->sc_mh, ic->ic_promisc > 0); } /* * Callback from the 802.11 layer to update the slot time * based on the current setting. We use it to notify the * firmware of ERP changes and the f/w takes care of things * like slot time and preamble. */ static void mwl_updateslot(struct ieee80211com *ic) { struct mwl_softc *sc = ic->ic_softc; struct mwl_hal *mh = sc->sc_mh; int prot; /* NB: can be called early; suppress needless cmds */ if (!sc->sc_running) return; /* * Calculate the ERP flags. The firwmare will use * this to carry out the appropriate measures. */ prot = 0; if (IEEE80211_IS_CHAN_ANYG(ic->ic_curchan)) { if ((ic->ic_flags & IEEE80211_F_SHSLOT) == 0) prot |= IEEE80211_ERP_NON_ERP_PRESENT; if (ic->ic_flags & IEEE80211_F_USEPROT) prot |= IEEE80211_ERP_USE_PROTECTION; if (ic->ic_flags & IEEE80211_F_USEBARKER) prot |= IEEE80211_ERP_LONG_PREAMBLE; } DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x %s slot, (prot 0x%x ic_flags 0x%x)\n", __func__, ic->ic_curchan->ic_freq, ic->ic_curchan->ic_flags, ic->ic_flags & IEEE80211_F_SHSLOT ? "short" : "long", prot, ic->ic_flags); mwl_hal_setgprot(mh, prot); } /* * Setup the beacon frame. */ static int mwl_beacon_setup(struct ieee80211vap *vap) { struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; struct ieee80211_node *ni = vap->iv_bss; struct mbuf *m; m = ieee80211_beacon_alloc(ni); if (m == NULL) return ENOBUFS; mwl_hal_setbeacon(hvap, mtod(m, const void *), m->m_len); m_free(m); return 0; } /* * Update the beacon frame in response to a change. */ static void mwl_beacon_update(struct ieee80211vap *vap, int item) { struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; struct ieee80211com *ic = vap->iv_ic; KASSERT(hvap != NULL, ("no beacon")); switch (item) { case IEEE80211_BEACON_ERP: mwl_updateslot(ic); break; case IEEE80211_BEACON_HTINFO: mwl_hal_setnprotmode(hvap, MS(ic->ic_curhtprotmode, IEEE80211_HTINFO_OPMODE)); break; case IEEE80211_BEACON_CAPS: case IEEE80211_BEACON_WME: case IEEE80211_BEACON_APPIE: case IEEE80211_BEACON_CSA: break; case IEEE80211_BEACON_TIM: /* NB: firmware always forms TIM */ return; } /* XXX retain beacon frame and update */ mwl_beacon_setup(vap); } static void mwl_load_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { bus_addr_t *paddr = (bus_addr_t*) arg; KASSERT(error == 0, ("error %u on bus_dma callback", error)); *paddr = segs->ds_addr; } #ifdef MWL_HOST_PS_SUPPORT /* * Handle power save station occupancy changes. */ static void mwl_update_ps(struct ieee80211vap *vap, int nsta) { struct mwl_vap *mvp = MWL_VAP(vap); if (nsta == 0 || mvp->mv_last_ps_sta == 0) mwl_hal_setpowersave_bss(mvp->mv_hvap, nsta); mvp->mv_last_ps_sta = nsta; } /* * Handle associated station power save state changes. */ static int mwl_set_tim(struct ieee80211_node *ni, int set) { struct ieee80211vap *vap = ni->ni_vap; struct mwl_vap *mvp = MWL_VAP(vap); if (mvp->mv_set_tim(ni, set)) { /* NB: state change */ mwl_hal_setpowersave_sta(mvp->mv_hvap, IEEE80211_AID(ni->ni_associd), set); return 1; } else return 0; } #endif /* MWL_HOST_PS_SUPPORT */ static int mwl_desc_setup(struct mwl_softc *sc, const char *name, struct mwl_descdma *dd, int nbuf, size_t bufsize, int ndesc, size_t descsize) { uint8_t *ds; int error; DPRINTF(sc, MWL_DEBUG_RESET, "%s: %s DMA: %u bufs (%ju) %u desc/buf (%ju)\n", __func__, name, nbuf, (uintmax_t) bufsize, ndesc, (uintmax_t) descsize); dd->dd_name = name; dd->dd_desc_len = nbuf * ndesc * descsize; /* * Setup DMA descriptor area. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ dd->dd_desc_len, /* maxsize */ 1, /* nsegments */ dd->dd_desc_len, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &dd->dd_dmat); if (error != 0) { device_printf(sc->sc_dev, "cannot allocate %s DMA tag\n", dd->dd_name); return error; } /* allocate descriptors */ error = bus_dmamem_alloc(dd->dd_dmat, (void**) &dd->dd_desc, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &dd->dd_dmamap); if (error != 0) { device_printf(sc->sc_dev, "unable to alloc memory for %u %s descriptors, " "error %u\n", nbuf * ndesc, dd->dd_name, error); goto fail1; } error = bus_dmamap_load(dd->dd_dmat, dd->dd_dmamap, dd->dd_desc, dd->dd_desc_len, mwl_load_cb, &dd->dd_desc_paddr, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "unable to map %s descriptors, error %u\n", dd->dd_name, error); goto fail2; } ds = dd->dd_desc; memset(ds, 0, dd->dd_desc_len); DPRINTF(sc, MWL_DEBUG_RESET, "%s: %s DMA map: %p (%lu) -> 0x%jx (%lu)\n", __func__, dd->dd_name, ds, (u_long) dd->dd_desc_len, (uintmax_t) dd->dd_desc_paddr, /*XXX*/ (u_long) dd->dd_desc_len); return 0; fail2: bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); fail1: bus_dma_tag_destroy(dd->dd_dmat); memset(dd, 0, sizeof(*dd)); return error; #undef DS2PHYS } static void mwl_desc_cleanup(struct mwl_softc *sc, struct mwl_descdma *dd) { bus_dmamap_unload(dd->dd_dmat, dd->dd_dmamap); bus_dmamem_free(dd->dd_dmat, dd->dd_desc, dd->dd_dmamap); bus_dma_tag_destroy(dd->dd_dmat); memset(dd, 0, sizeof(*dd)); } /* * Construct a tx q's free list. The order of entries on * the list must reflect the physical layout of tx descriptors * because the firmware pre-fetches descriptors. * * XXX might be better to use indices into the buffer array. */ static void mwl_txq_reset(struct mwl_softc *sc, struct mwl_txq *txq) { struct mwl_txbuf *bf; int i; bf = txq->dma.dd_bufptr; STAILQ_INIT(&txq->free); for (i = 0; i < mwl_txbuf; i++, bf++) STAILQ_INSERT_TAIL(&txq->free, bf, bf_list); txq->nfree = i; } #define DS2PHYS(_dd, _ds) \ ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) static int mwl_txdma_setup(struct mwl_softc *sc, struct mwl_txq *txq) { int error, bsize, i; struct mwl_txbuf *bf; struct mwl_txdesc *ds; error = mwl_desc_setup(sc, "tx", &txq->dma, mwl_txbuf, sizeof(struct mwl_txbuf), MWL_TXDESC, sizeof(struct mwl_txdesc)); if (error != 0) return error; /* allocate and setup tx buffers */ bsize = mwl_txbuf * sizeof(struct mwl_txbuf); bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO); if (bf == NULL) { device_printf(sc->sc_dev, "malloc of %u tx buffers failed\n", mwl_txbuf); return ENOMEM; } txq->dma.dd_bufptr = bf; ds = txq->dma.dd_desc; for (i = 0; i < mwl_txbuf; i++, bf++, ds += MWL_TXDESC) { bf->bf_desc = ds; bf->bf_daddr = DS2PHYS(&txq->dma, ds); error = bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &bf->bf_dmamap); if (error != 0) { device_printf(sc->sc_dev, "unable to create dmamap for tx " "buffer %u, error %u\n", i, error); return error; } } mwl_txq_reset(sc, txq); return 0; } static void mwl_txdma_cleanup(struct mwl_softc *sc, struct mwl_txq *txq) { struct mwl_txbuf *bf; int i; bf = txq->dma.dd_bufptr; for (i = 0; i < mwl_txbuf; i++, bf++) { KASSERT(bf->bf_m == NULL, ("mbuf on free list")); KASSERT(bf->bf_node == NULL, ("node on free list")); if (bf->bf_dmamap != NULL) bus_dmamap_destroy(sc->sc_dmat, bf->bf_dmamap); } STAILQ_INIT(&txq->free); txq->nfree = 0; if (txq->dma.dd_bufptr != NULL) { free(txq->dma.dd_bufptr, M_MWLDEV); txq->dma.dd_bufptr = NULL; } if (txq->dma.dd_desc_len != 0) mwl_desc_cleanup(sc, &txq->dma); } static int mwl_rxdma_setup(struct mwl_softc *sc) { int error, jumbosize, bsize, i; struct mwl_rxbuf *bf; struct mwl_jumbo *rbuf; struct mwl_rxdesc *ds; caddr_t data; error = mwl_desc_setup(sc, "rx", &sc->sc_rxdma, mwl_rxdesc, sizeof(struct mwl_rxbuf), 1, sizeof(struct mwl_rxdesc)); if (error != 0) return error; /* * Receive is done to a private pool of jumbo buffers. * This allows us to attach to mbuf's and avoid re-mapping * memory on each rx we post. We allocate a large chunk * of memory and manage it in the driver. The mbuf free * callback method is used to reclaim frames after sending * them up the stack. By default we allocate 2x the number of * rx descriptors configured so we have some slop to hold * us while frames are processed. */ if (mwl_rxbuf < 2*mwl_rxdesc) { device_printf(sc->sc_dev, "too few rx dma buffers (%d); increasing to %d\n", mwl_rxbuf, 2*mwl_rxdesc); mwl_rxbuf = 2*mwl_rxdesc; } jumbosize = roundup(MWL_AGGR_SIZE, PAGE_SIZE); sc->sc_rxmemsize = mwl_rxbuf*jumbosize; error = bus_dma_tag_create(sc->sc_dmat, /* parent */ PAGE_SIZE, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sc->sc_rxmemsize, /* maxsize */ 1, /* nsegments */ sc->sc_rxmemsize, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockarg */ &sc->sc_rxdmat); if (error != 0) { device_printf(sc->sc_dev, "could not create rx DMA tag\n"); return error; } error = bus_dmamem_alloc(sc->sc_rxdmat, (void**) &sc->sc_rxmem, BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &sc->sc_rxmap); if (error != 0) { device_printf(sc->sc_dev, "could not alloc %ju bytes of rx DMA memory\n", (uintmax_t) sc->sc_rxmemsize); return error; } error = bus_dmamap_load(sc->sc_rxdmat, sc->sc_rxmap, sc->sc_rxmem, sc->sc_rxmemsize, mwl_load_cb, &sc->sc_rxmem_paddr, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "could not load rx DMA map\n"); return error; } /* * Allocate rx buffers and set them up. */ bsize = mwl_rxdesc * sizeof(struct mwl_rxbuf); bf = malloc(bsize, M_MWLDEV, M_NOWAIT | M_ZERO); if (bf == NULL) { device_printf(sc->sc_dev, "malloc of %u rx buffers failed\n", bsize); return error; } sc->sc_rxdma.dd_bufptr = bf; STAILQ_INIT(&sc->sc_rxbuf); ds = sc->sc_rxdma.dd_desc; for (i = 0; i < mwl_rxdesc; i++, bf++, ds++) { bf->bf_desc = ds; bf->bf_daddr = DS2PHYS(&sc->sc_rxdma, ds); /* pre-assign dma buffer */ bf->bf_data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize); /* NB: tail is intentional to preserve descriptor order */ STAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); } /* * Place remainder of dma memory buffers on the free list. */ SLIST_INIT(&sc->sc_rxfree); for (; i < mwl_rxbuf; i++) { data = ((uint8_t *)sc->sc_rxmem) + (i*jumbosize); rbuf = MWL_JUMBO_DATA2BUF(data); SLIST_INSERT_HEAD(&sc->sc_rxfree, rbuf, next); sc->sc_nrxfree++; } return 0; } #undef DS2PHYS static void mwl_rxdma_cleanup(struct mwl_softc *sc) { if (sc->sc_rxmem_paddr != 0) { bus_dmamap_unload(sc->sc_rxdmat, sc->sc_rxmap); sc->sc_rxmem_paddr = 0; } if (sc->sc_rxmem != NULL) { bus_dmamem_free(sc->sc_rxdmat, sc->sc_rxmem, sc->sc_rxmap); sc->sc_rxmem = NULL; } if (sc->sc_rxdma.dd_bufptr != NULL) { free(sc->sc_rxdma.dd_bufptr, M_MWLDEV); sc->sc_rxdma.dd_bufptr = NULL; } if (sc->sc_rxdma.dd_desc_len != 0) mwl_desc_cleanup(sc, &sc->sc_rxdma); } static int mwl_dma_setup(struct mwl_softc *sc) { int error, i; error = mwl_rxdma_setup(sc); if (error != 0) { mwl_rxdma_cleanup(sc); return error; } for (i = 0; i < MWL_NUM_TX_QUEUES; i++) { error = mwl_txdma_setup(sc, &sc->sc_txq[i]); if (error != 0) { mwl_dma_cleanup(sc); return error; } } return 0; } static void mwl_dma_cleanup(struct mwl_softc *sc) { int i; for (i = 0; i < MWL_NUM_TX_QUEUES; i++) mwl_txdma_cleanup(sc, &sc->sc_txq[i]); mwl_rxdma_cleanup(sc); } static struct ieee80211_node * mwl_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ieee80211com *ic = vap->iv_ic; struct mwl_softc *sc = ic->ic_softc; const size_t space = sizeof(struct mwl_node); struct mwl_node *mn; mn = malloc(space, M_80211_NODE, M_NOWAIT|M_ZERO); if (mn == NULL) { /* XXX stat+msg */ return NULL; } DPRINTF(sc, MWL_DEBUG_NODE, "%s: mn %p\n", __func__, mn); return &mn->mn_node; } static void mwl_node_cleanup(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct mwl_softc *sc = ic->ic_softc; struct mwl_node *mn = MWL_NODE(ni); DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p ic %p staid %d\n", __func__, ni, ni->ni_ic, mn->mn_staid); if (mn->mn_staid != 0) { struct ieee80211vap *vap = ni->ni_vap; if (mn->mn_hvap != NULL) { if (vap->iv_opmode == IEEE80211_M_STA) mwl_hal_delstation(mn->mn_hvap, vap->iv_myaddr); else mwl_hal_delstation(mn->mn_hvap, ni->ni_macaddr); } /* * NB: legacy WDS peer sta db entry is installed using * the associate ap's hvap; use it again to delete it. * XXX can vap be NULL? */ else if (vap->iv_opmode == IEEE80211_M_WDS && MWL_VAP(vap)->mv_ap_hvap != NULL) mwl_hal_delstation(MWL_VAP(vap)->mv_ap_hvap, ni->ni_macaddr); delstaid(sc, mn->mn_staid); mn->mn_staid = 0; } sc->sc_node_cleanup(ni); } /* * Reclaim rx dma buffers from packets sitting on the ampdu * reorder queue for a station. We replace buffers with a * system cluster (if available). */ static void mwl_ampdu_rxdma_reclaim(struct ieee80211_rx_ampdu *rap) { #if 0 int i, n, off; struct mbuf *m; void *cl; n = rap->rxa_qframes; for (i = 0; i < rap->rxa_wnd && n > 0; i++) { m = rap->rxa_m[i]; if (m == NULL) continue; n--; /* our dma buffers have a well-known free routine */ if ((m->m_flags & M_EXT) == 0 || m->m_ext.ext_free != mwl_ext_free) continue; /* * Try to allocate a cluster and move the data. */ off = m->m_data - m->m_ext.ext_buf; if (off + m->m_pkthdr.len > MCLBYTES) { /* XXX no AMSDU for now */ continue; } cl = pool_cache_get_paddr(&mclpool_cache, 0, &m->m_ext.ext_paddr); if (cl != NULL) { /* * Copy the existing data to the cluster, remove * the rx dma buffer, and attach the cluster in * its place. Note we preserve the offset to the * data so frames being bridged can still prepend * their headers without adding another mbuf. */ memcpy((caddr_t) cl + off, m->m_data, m->m_pkthdr.len); MEXTREMOVE(m); MEXTADD(m, cl, MCLBYTES, 0, NULL, &mclpool_cache); /* setup mbuf like _MCLGET does */ m->m_flags |= M_CLUSTER | M_EXT_RW; _MOWNERREF(m, M_EXT | M_CLUSTER); /* NB: m_data is clobbered by MEXTADDR, adjust */ m->m_data += off; } } #endif } /* * Callback to reclaim resources. We first let the * net80211 layer do it's thing, then if we are still * blocked by a lack of rx dma buffers we walk the ampdu * reorder q's to reclaim buffers by copying to a system * cluster. */ static void mwl_node_drain(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct mwl_softc *sc = ic->ic_softc; struct mwl_node *mn = MWL_NODE(ni); DPRINTF(sc, MWL_DEBUG_NODE, "%s: ni %p vap %p staid %d\n", __func__, ni, ni->ni_vap, mn->mn_staid); /* NB: call up first to age out ampdu q's */ sc->sc_node_drain(ni); /* XXX better to not check low water mark? */ if (sc->sc_rxblocked && mn->mn_staid != 0 && (ni->ni_flags & IEEE80211_NODE_HT)) { uint8_t tid; /* * Walk the reorder q and reclaim rx dma buffers by copying * the packet contents into clusters. */ for (tid = 0; tid < WME_NUM_TID; tid++) { struct ieee80211_rx_ampdu *rap; rap = &ni->ni_rx_ampdu[tid]; if ((rap->rxa_flags & IEEE80211_AGGR_XCHGPEND) == 0) continue; if (rap->rxa_qframes) mwl_ampdu_rxdma_reclaim(rap); } } } static void mwl_node_getsignal(const struct ieee80211_node *ni, int8_t *rssi, int8_t *noise) { *rssi = ni->ni_ic->ic_node_getrssi(ni); #ifdef MWL_ANT_INFO_SUPPORT #if 0 /* XXX need to smooth data */ *noise = -MWL_NODE_CONST(ni)->mn_ai.nf; #else *noise = -95; /* XXX */ #endif #else *noise = -95; /* XXX */ #endif } /* * Convert Hardware per-antenna rssi info to common format: * Let a1, a2, a3 represent the amplitudes per chain * Let amax represent max[a1, a2, a3] * Rssi1_dBm = RSSI_dBm + 20*log10(a1/amax) * Rssi1_dBm = RSSI_dBm + 20*log10(a1) - 20*log10(amax) * We store a table that is 4*20*log10(idx) - the extra 4 is to store or * maintain some extra precision. * * Values are stored in .5 db format capped at 127. */ static void mwl_node_getmimoinfo(const struct ieee80211_node *ni, struct ieee80211_mimo_info *mi) { #define CVT(_dst, _src) do { \ (_dst) = rssi + ((logdbtbl[_src] - logdbtbl[rssi_max]) >> 2); \ (_dst) = (_dst) > 64 ? 127 : ((_dst) << 1); \ } while (0) static const int8_t logdbtbl[32] = { 0, 0, 24, 38, 48, 56, 62, 68, 72, 76, 80, 83, 86, 89, 92, 94, 96, 98, 100, 102, 104, 106, 107, 109, 110, 112, 113, 115, 116, 117, 118, 119 }; const struct mwl_node *mn = MWL_NODE_CONST(ni); uint8_t rssi = mn->mn_ai.rsvd1/2; /* XXX */ uint32_t rssi_max; rssi_max = mn->mn_ai.rssi_a; if (mn->mn_ai.rssi_b > rssi_max) rssi_max = mn->mn_ai.rssi_b; if (mn->mn_ai.rssi_c > rssi_max) rssi_max = mn->mn_ai.rssi_c; CVT(mi->rssi[0], mn->mn_ai.rssi_a); CVT(mi->rssi[1], mn->mn_ai.rssi_b); CVT(mi->rssi[2], mn->mn_ai.rssi_c); mi->noise[0] = mn->mn_ai.nf_a; mi->noise[1] = mn->mn_ai.nf_b; mi->noise[2] = mn->mn_ai.nf_c; #undef CVT } static __inline void * mwl_getrxdma(struct mwl_softc *sc) { struct mwl_jumbo *buf; void *data; /* * Allocate from jumbo pool. */ MWL_RXFREE_LOCK(sc); buf = SLIST_FIRST(&sc->sc_rxfree); if (buf == NULL) { DPRINTF(sc, MWL_DEBUG_ANY, "%s: out of rx dma buffers\n", __func__); sc->sc_stats.mst_rx_nodmabuf++; data = NULL; } else { SLIST_REMOVE_HEAD(&sc->sc_rxfree, next); sc->sc_nrxfree--; data = MWL_JUMBO_BUF2DATA(buf); } MWL_RXFREE_UNLOCK(sc); return data; } static __inline void mwl_putrxdma(struct mwl_softc *sc, void *data) { struct mwl_jumbo *buf; /* XXX bounds check data */ MWL_RXFREE_LOCK(sc); buf = MWL_JUMBO_DATA2BUF(data); SLIST_INSERT_HEAD(&sc->sc_rxfree, buf, next); sc->sc_nrxfree++; MWL_RXFREE_UNLOCK(sc); } static int mwl_rxbuf_init(struct mwl_softc *sc, struct mwl_rxbuf *bf) { struct mwl_rxdesc *ds; ds = bf->bf_desc; if (bf->bf_data == NULL) { bf->bf_data = mwl_getrxdma(sc); if (bf->bf_data == NULL) { /* mark descriptor to be skipped */ ds->RxControl = EAGLE_RXD_CTRL_OS_OWN; /* NB: don't need PREREAD */ MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREWRITE); sc->sc_stats.mst_rxbuf_failed++; return ENOMEM; } } /* * NB: DMA buffer contents is known to be unmodified * so there's no need to flush the data cache. */ /* * Setup descriptor. */ ds->QosCtrl = 0; ds->RSSI = 0; ds->Status = EAGLE_RXD_STATUS_IDLE; ds->Channel = 0; ds->PktLen = htole16(MWL_AGGR_SIZE); ds->SQ2 = 0; ds->pPhysBuffData = htole32(MWL_JUMBO_DMA_ADDR(sc, bf->bf_data)); /* NB: don't touch pPhysNext, set once */ ds->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN; MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return 0; } static void mwl_ext_free(struct mbuf *m, void *data, void *arg) { struct mwl_softc *sc = arg; /* XXX bounds check data */ mwl_putrxdma(sc, data); /* * If we were previously blocked by a lack of rx dma buffers * check if we now have enough to restart rx interrupt handling. * NB: we know we are called at splvm which is above splnet. */ if (sc->sc_rxblocked && sc->sc_nrxfree > mwl_rxdmalow) { sc->sc_rxblocked = 0; mwl_hal_intrset(sc->sc_mh, sc->sc_imask); } } struct mwl_frame_bar { u_int8_t i_fc[2]; u_int8_t i_dur[2]; u_int8_t i_ra[IEEE80211_ADDR_LEN]; u_int8_t i_ta[IEEE80211_ADDR_LEN]; /* ctl, seq, FCS */ } __packed; /* * Like ieee80211_anyhdrsize, but handles BAR frames * specially so the logic below to piece the 802.11 * header together works. */ static __inline int mwl_anyhdrsize(const void *data) { const struct ieee80211_frame *wh = data; if ((wh->i_fc[0]&IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL) { switch (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) { case IEEE80211_FC0_SUBTYPE_CTS: case IEEE80211_FC0_SUBTYPE_ACK: return sizeof(struct ieee80211_frame_ack); case IEEE80211_FC0_SUBTYPE_BAR: return sizeof(struct mwl_frame_bar); } return sizeof(struct ieee80211_frame_min); } else return ieee80211_hdrsize(data); } static void mwl_handlemicerror(struct ieee80211com *ic, const uint8_t *data) { const struct ieee80211_frame *wh; struct ieee80211_node *ni; wh = (const struct ieee80211_frame *)(data + sizeof(uint16_t)); ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh); if (ni != NULL) { ieee80211_notify_michael_failure(ni->ni_vap, wh, 0); ieee80211_free_node(ni); } } /* * Convert hardware signal strength to rssi. The value * provided by the device has the noise floor added in; * we need to compensate for this but we don't have that * so we use a fixed value. * * The offset of 8 is good for both 2.4 and 5GHz. The LNA * offset is already set as part of the initial gain. This * will give at least +/- 3dB for 2.4GHz and +/- 5dB for 5GHz. */ static __inline int cvtrssi(uint8_t ssi) { int rssi = (int) ssi + 8; /* XXX hack guess until we have a real noise floor */ rssi = 2*(87 - rssi); /* NB: .5 dBm units */ return (rssi < 0 ? 0 : rssi > 127 ? 127 : rssi); } static void mwl_rx_proc(void *arg, int npending) { struct mwl_softc *sc = arg; struct ieee80211com *ic = &sc->sc_ic; struct mwl_rxbuf *bf; struct mwl_rxdesc *ds; struct mbuf *m; struct ieee80211_qosframe *wh; struct ieee80211_qosframe_addr4 *wh4; struct ieee80211_node *ni; struct mwl_node *mn; int off, len, hdrlen, pktlen, rssi, ntodo; uint8_t *data, status; void *newdata; int16_t nf; DPRINTF(sc, MWL_DEBUG_RX_PROC, "%s: pending %u rdptr 0x%x wrptr 0x%x\n", __func__, npending, RD4(sc, sc->sc_hwspecs.rxDescRead), RD4(sc, sc->sc_hwspecs.rxDescWrite)); nf = -96; /* XXX */ bf = sc->sc_rxnext; for (ntodo = mwl_rxquota; ntodo > 0; ntodo--) { if (bf == NULL) bf = STAILQ_FIRST(&sc->sc_rxbuf); ds = bf->bf_desc; data = bf->bf_data; if (data == NULL) { /* * If data allocation failed previously there * will be no buffer; try again to re-populate it. * Note the firmware will not advance to the next * descriptor with a dma buffer so we must mimic * this or we'll get out of sync. */ DPRINTF(sc, MWL_DEBUG_ANY, "%s: rx buf w/o dma memory\n", __func__); (void) mwl_rxbuf_init(sc, bf); sc->sc_stats.mst_rx_dmabufmissing++; break; } MWL_RXDESC_SYNC(sc, ds, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (ds->RxControl != EAGLE_RXD_CTRL_DMA_OWN) break; #ifdef MWL_DEBUG if (sc->sc_debug & MWL_DEBUG_RECV_DESC) mwl_printrxbuf(bf, 0); #endif status = ds->Status; if (status & EAGLE_RXD_STATUS_DECRYPT_ERR_MASK) { counter_u64_add(ic->ic_ierrors, 1); sc->sc_stats.mst_rx_crypto++; /* * NB: Check EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR * for backwards compatibility. */ if (status != EAGLE_RXD_STATUS_GENERAL_DECRYPT_ERR && (status & EAGLE_RXD_STATUS_TKIP_MIC_DECRYPT_ERR)) { /* * MIC error, notify upper layers. */ bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD); mwl_handlemicerror(ic, data); sc->sc_stats.mst_rx_tkipmic++; } /* XXX too painful to tap packets */ goto rx_next; } /* * Sync the data buffer. */ len = le16toh(ds->PktLen); bus_dmamap_sync(sc->sc_rxdmat, sc->sc_rxmap, BUS_DMASYNC_POSTREAD); /* * The 802.11 header is provided all or in part at the front; * use it to calculate the true size of the header that we'll * construct below. We use this to figure out where to copy * payload prior to constructing the header. */ hdrlen = mwl_anyhdrsize(data + sizeof(uint16_t)); off = sizeof(uint16_t) + sizeof(struct ieee80211_frame_addr4); /* calculate rssi early so we can re-use for each aggregate */ rssi = cvtrssi(ds->RSSI); pktlen = hdrlen + (len - off); /* * NB: we know our frame is at least as large as * IEEE80211_MIN_LEN because there is a 4-address * frame at the front. Hence there's no need to * vet the packet length. If the frame in fact * is too small it should be discarded at the * net80211 layer. */ /* * Attach dma buffer to an mbuf. We tried * doing this based on the packet size (i.e. * copying small packets) but it turns out to * be a net loss. The tradeoff might be system * dependent (cache architecture is important). */ MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { DPRINTF(sc, MWL_DEBUG_ANY, "%s: no rx mbuf\n", __func__); sc->sc_stats.mst_rx_nombuf++; goto rx_next; } /* * Acquire the replacement dma buffer before * processing the frame. If we're out of dma * buffers we disable rx interrupts and wait * for the free pool to reach mlw_rxdmalow buffers * before starting to do work again. If the firmware * runs out of descriptors then it will toss frames * which is better than our doing it as that can * starve our processing. It is also important that * we always process rx'd frames in case they are * A-MPDU as otherwise the host's view of the BA * window may get out of sync with the firmware. */ newdata = mwl_getrxdma(sc); if (newdata == NULL) { /* NB: stat+msg in mwl_getrxdma */ m_free(m); /* disable RX interrupt and mark state */ mwl_hal_intrset(sc->sc_mh, sc->sc_imask &~ MACREG_A2HRIC_BIT_RX_RDY); sc->sc_rxblocked = 1; ieee80211_drain(ic); /* XXX check rxblocked and immediately start again? */ goto rx_stop; } bf->bf_data = newdata; /* * Attach the dma buffer to the mbuf; * mwl_rxbuf_init will re-setup the rx * descriptor using the replacement dma * buffer we just installed above. */ MEXTADD(m, data, MWL_AGGR_SIZE, mwl_ext_free, data, sc, 0, EXT_NET_DRV); m->m_data += off - hdrlen; m->m_pkthdr.len = m->m_len = pktlen; /* NB: dma buffer assumed read-only */ /* * Piece 802.11 header together. */ wh = mtod(m, struct ieee80211_qosframe *); /* NB: don't need to do this sometimes but ... */ /* XXX special case so we can memcpy after m_devget? */ ovbcopy(data + sizeof(uint16_t), wh, hdrlen); if (IEEE80211_QOS_HAS_SEQ(wh)) { if (IEEE80211_IS_DSTODS(wh)) { wh4 = mtod(m, struct ieee80211_qosframe_addr4*); *(uint16_t *)wh4->i_qos = ds->QosCtrl; } else { *(uint16_t *)wh->i_qos = ds->QosCtrl; } } /* * The f/w strips WEP header but doesn't clear * the WEP bit; mark the packet with M_WEP so * net80211 will treat the data as decrypted. * While here also clear the PWR_MGT bit since * power save is handled by the firmware and * passing this up will potentially cause the * upper layer to put a station in power save * (except when configured with MWL_HOST_PS_SUPPORT). */ if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) m->m_flags |= M_WEP; #ifdef MWL_HOST_PS_SUPPORT wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; #else wh->i_fc[1] &= ~(IEEE80211_FC1_PROTECTED | IEEE80211_FC1_PWR_MGT); #endif if (ieee80211_radiotap_active(ic)) { struct mwl_rx_radiotap_header *tap = &sc->sc_rx_th; tap->wr_flags = 0; tap->wr_rate = ds->Rate; tap->wr_antsignal = rssi + nf; tap->wr_antnoise = nf; } if (IFF_DUMPPKTS_RECV(sc, wh)) { ieee80211_dump_pkt(ic, mtod(m, caddr_t), len, ds->Rate, rssi); } /* dispatch */ ni = ieee80211_find_rxnode(ic, (const struct ieee80211_frame_min *) wh); if (ni != NULL) { mn = MWL_NODE(ni); #ifdef MWL_ANT_INFO_SUPPORT mn->mn_ai.rssi_a = ds->ai.rssi_a; mn->mn_ai.rssi_b = ds->ai.rssi_b; mn->mn_ai.rssi_c = ds->ai.rssi_c; mn->mn_ai.rsvd1 = rssi; #endif /* tag AMPDU aggregates for reorder processing */ if (ni->ni_flags & IEEE80211_NODE_HT) m->m_flags |= M_AMPDU; (void) ieee80211_input(ni, m, rssi, nf); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, nf); rx_next: /* NB: ignore ENOMEM so we process more descriptors */ (void) mwl_rxbuf_init(sc, bf); bf = STAILQ_NEXT(bf, bf_list); } rx_stop: sc->sc_rxnext = bf; if (mbufq_first(&sc->sc_snd) != NULL) { /* NB: kick fw; the tx thread may have been preempted */ mwl_hal_txstart(sc->sc_mh, 0); mwl_start(sc); } } static void mwl_txq_init(struct mwl_softc *sc, struct mwl_txq *txq, int qnum) { struct mwl_txbuf *bf, *bn; struct mwl_txdesc *ds; MWL_TXQ_LOCK_INIT(sc, txq); txq->qnum = qnum; txq->txpri = 0; /* XXX */ #if 0 /* NB: q setup by mwl_txdma_setup XXX */ STAILQ_INIT(&txq->free); #endif STAILQ_FOREACH(bf, &txq->free, bf_list) { bf->bf_txq = txq; ds = bf->bf_desc; bn = STAILQ_NEXT(bf, bf_list); if (bn == NULL) bn = STAILQ_FIRST(&txq->free); ds->pPhysNext = htole32(bn->bf_daddr); } STAILQ_INIT(&txq->active); } /* * Setup a hardware data transmit queue for the specified * access control. We record the mapping from ac's * to h/w queues for use by mwl_tx_start. */ static int mwl_tx_setup(struct mwl_softc *sc, int ac, int mvtype) { struct mwl_txq *txq; if (ac >= nitems(sc->sc_ac2q)) { device_printf(sc->sc_dev, "AC %u out of range, max %zu!\n", ac, nitems(sc->sc_ac2q)); return 0; } if (mvtype >= MWL_NUM_TX_QUEUES) { device_printf(sc->sc_dev, "mvtype %u out of range, max %u!\n", mvtype, MWL_NUM_TX_QUEUES); return 0; } txq = &sc->sc_txq[mvtype]; mwl_txq_init(sc, txq, mvtype); sc->sc_ac2q[ac] = txq; return 1; } /* * Update WME parameters for a transmit queue. */ static int mwl_txq_update(struct mwl_softc *sc, int ac) { #define MWL_EXPONENT_TO_VALUE(v) ((1<sc_ic; struct mwl_txq *txq = sc->sc_ac2q[ac]; struct wmeParams *wmep = &ic->ic_wme.wme_chanParams.cap_wmeParams[ac]; struct mwl_hal *mh = sc->sc_mh; int aifs, cwmin, cwmax, txoplim; aifs = wmep->wmep_aifsn; /* XXX in sta mode need to pass log values for cwmin/max */ cwmin = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmin); cwmax = MWL_EXPONENT_TO_VALUE(wmep->wmep_logcwmax); txoplim = wmep->wmep_txopLimit; /* NB: units of 32us */ if (mwl_hal_setedcaparams(mh, txq->qnum, cwmin, cwmax, aifs, txoplim)) { device_printf(sc->sc_dev, "unable to update hardware queue " "parameters for %s traffic!\n", ieee80211_wme_acnames[ac]); return 0; } return 1; #undef MWL_EXPONENT_TO_VALUE } /* * Callback from the 802.11 layer to update WME parameters. */ static int mwl_wme_update(struct ieee80211com *ic) { struct mwl_softc *sc = ic->ic_softc; return !mwl_txq_update(sc, WME_AC_BE) || !mwl_txq_update(sc, WME_AC_BK) || !mwl_txq_update(sc, WME_AC_VI) || !mwl_txq_update(sc, WME_AC_VO) ? EIO : 0; } /* * Reclaim resources for a setup queue. */ static void mwl_tx_cleanupq(struct mwl_softc *sc, struct mwl_txq *txq) { /* XXX hal work? */ MWL_TXQ_LOCK_DESTROY(txq); } /* * Reclaim all tx queue resources. */ static void mwl_tx_cleanup(struct mwl_softc *sc) { int i; for (i = 0; i < MWL_NUM_TX_QUEUES; i++) mwl_tx_cleanupq(sc, &sc->sc_txq[i]); } static int mwl_tx_dmasetup(struct mwl_softc *sc, struct mwl_txbuf *bf, struct mbuf *m0) { struct mbuf *m; int error; /* * Load the DMA map so any coalescing is done. This * also calculates the number of descriptors we need. */ error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error == EFBIG) { /* XXX packet requires too many descriptors */ bf->bf_nseg = MWL_TXDESC+1; } else if (error != 0) { sc->sc_stats.mst_tx_busdma++; m_freem(m0); return error; } /* * Discard null packets and check for packets that * require too many TX descriptors. We try to convert * the latter to a cluster. */ if (error == EFBIG) { /* too many desc's, linearize */ sc->sc_stats.mst_tx_linear++; #if MWL_TXDESC > 1 m = m_collapse(m0, M_NOWAIT, MWL_TXDESC); #else m = m_defrag(m0, M_NOWAIT); #endif if (m == NULL) { m_freem(m0); sc->sc_stats.mst_tx_nombuf++; return ENOMEM; } m0 = m; error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m0, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error != 0) { sc->sc_stats.mst_tx_busdma++; m_freem(m0); return error; } KASSERT(bf->bf_nseg <= MWL_TXDESC, ("too many segments after defrag; nseg %u", bf->bf_nseg)); } else if (bf->bf_nseg == 0) { /* null packet, discard */ sc->sc_stats.mst_tx_nodata++; m_freem(m0); return EIO; } DPRINTF(sc, MWL_DEBUG_XMIT, "%s: m %p len %u\n", __func__, m0, m0->m_pkthdr.len); bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREWRITE); bf->bf_m = m0; return 0; } static __inline int mwl_cvtlegacyrate(int rate) { switch (rate) { case 2: return 0; case 4: return 1; case 11: return 2; case 22: return 3; case 44: return 4; case 12: return 5; case 18: return 6; case 24: return 7; case 36: return 8; case 48: return 9; case 72: return 10; case 96: return 11; case 108:return 12; } return 0; } /* * Calculate fixed tx rate information per client state; * this value is suitable for writing to the Format field * of a tx descriptor. */ static uint16_t mwl_calcformat(uint8_t rate, const struct ieee80211_node *ni) { uint16_t fmt; fmt = SM(3, EAGLE_TXD_ANTENNA) | (IEEE80211_IS_CHAN_HT40D(ni->ni_chan) ? EAGLE_TXD_EXTCHAN_LO : EAGLE_TXD_EXTCHAN_HI); if (rate & IEEE80211_RATE_MCS) { /* HT MCS */ fmt |= EAGLE_TXD_FORMAT_HT /* NB: 0x80 implicitly stripped from ucastrate */ | SM(rate, EAGLE_TXD_RATE); /* XXX short/long GI may be wrong; re-check */ if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { fmt |= EAGLE_TXD_CHW_40 | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40 ? EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG); } else { fmt |= EAGLE_TXD_CHW_20 | (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20 ? EAGLE_TXD_GI_SHORT : EAGLE_TXD_GI_LONG); } } else { /* legacy rate */ fmt |= EAGLE_TXD_FORMAT_LEGACY | SM(mwl_cvtlegacyrate(rate), EAGLE_TXD_RATE) | EAGLE_TXD_CHW_20 /* XXX iv_flags & IEEE80211_F_SHPREAMBLE? */ | (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE ? EAGLE_TXD_PREAMBLE_SHORT : EAGLE_TXD_PREAMBLE_LONG); } return fmt; } static int mwl_tx_start(struct mwl_softc *sc, struct ieee80211_node *ni, struct mwl_txbuf *bf, struct mbuf *m0) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = ni->ni_vap; int error, iswep, ismcast; int hdrlen, copyhdrlen, pktlen; struct mwl_txdesc *ds; struct mwl_txq *txq; struct ieee80211_frame *wh; struct mwltxrec *tr; struct mwl_node *mn; uint16_t qos; #if MWL_TXDESC > 1 int i; #endif wh = mtod(m0, struct ieee80211_frame *); iswep = wh->i_fc[1] & IEEE80211_FC1_PROTECTED; ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); hdrlen = ieee80211_anyhdrsize(wh); copyhdrlen = hdrlen; pktlen = m0->m_pkthdr.len; if (IEEE80211_QOS_HAS_SEQ(wh)) { if (IEEE80211_IS_DSTODS(wh)) { qos = *(uint16_t *) (((struct ieee80211_qosframe_addr4 *) wh)->i_qos); copyhdrlen -= sizeof(qos); } else qos = *(uint16_t *) (((struct ieee80211_qosframe *) wh)->i_qos); } else qos = 0; if (iswep) { const struct ieee80211_cipher *cip; struct ieee80211_key *k; /* * Construct the 802.11 header+trailer for an encrypted * frame. The only reason this can fail is because of an * unknown or unsupported cipher/key type. * * NB: we do this even though the firmware will ignore * what we've done for WEP and TKIP as we need the * ExtIV filled in for CCMP and this also adjusts * the headers which simplifies our work below. */ k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { /* * This can happen when the key is yanked after the * frame was queued. Just discard the frame; the * 802.11 layer counts failures and provides * debugging/diagnostics. */ m_freem(m0); return EIO; } /* * Adjust the packet length for the crypto additions * done during encap and any other bits that the f/w * will add later on. */ cip = k->wk_cipher; pktlen += cip->ic_header + cip->ic_miclen + cip->ic_trailer; /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (ieee80211_radiotap_active_vap(vap)) { sc->sc_tx_th.wt_flags = 0; /* XXX */ if (iswep) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; #if 0 sc->sc_tx_th.wt_rate = ds->DataRate; #endif sc->sc_tx_th.wt_txpower = ni->ni_txpower; sc->sc_tx_th.wt_antenna = sc->sc_txantenna; ieee80211_radiotap_tx(vap, m0); } /* * Copy up/down the 802.11 header; the firmware requires * we present a 2-byte payload length followed by a * 4-address header (w/o QoS), followed (optionally) by * any WEP/ExtIV header (but only filled in for CCMP). * We are assured the mbuf has sufficient headroom to * prepend in-place by the setup of ic_headroom in * mwl_attach. */ if (hdrlen < sizeof(struct mwltxrec)) { const int space = sizeof(struct mwltxrec) - hdrlen; if (M_LEADINGSPACE(m0) < space) { /* NB: should never happen */ device_printf(sc->sc_dev, "not enough headroom, need %d found %zd, " "m_flags 0x%x m_len %d\n", space, M_LEADINGSPACE(m0), m0->m_flags, m0->m_len); ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *), m0->m_len, 0, -1); m_freem(m0); sc->sc_stats.mst_tx_noheadroom++; return EIO; } M_PREPEND(m0, space, M_NOWAIT); } tr = mtod(m0, struct mwltxrec *); if (wh != (struct ieee80211_frame *) &tr->wh) ovbcopy(wh, &tr->wh, hdrlen); /* * Note: the "firmware length" is actually the length * of the fully formed "802.11 payload". That is, it's * everything except for the 802.11 header. In particular * this includes all crypto material including the MIC! */ tr->fwlen = htole16(pktlen - hdrlen); /* * Load the DMA map so any coalescing is done. This * also calculates the number of descriptors we need. */ error = mwl_tx_dmasetup(sc, bf, m0); if (error != 0) { /* NB: stat collected in mwl_tx_dmasetup */ DPRINTF(sc, MWL_DEBUG_XMIT, "%s: unable to setup dma\n", __func__); return error; } bf->bf_node = ni; /* NB: held reference */ m0 = bf->bf_m; /* NB: may have changed */ tr = mtod(m0, struct mwltxrec *); wh = (struct ieee80211_frame *)&tr->wh; /* * Formulate tx descriptor. */ ds = bf->bf_desc; txq = bf->bf_txq; ds->QosCtrl = qos; /* NB: already little-endian */ #if MWL_TXDESC == 1 /* * NB: multiframes should be zero because the descriptors * are initialized to zero. This should handle the case * where the driver is built with MWL_TXDESC=1 but we are * using firmware with multi-segment support. */ ds->PktPtr = htole32(bf->bf_segs[0].ds_addr); ds->PktLen = htole16(bf->bf_segs[0].ds_len); #else ds->multiframes = htole32(bf->bf_nseg); ds->PktLen = htole16(m0->m_pkthdr.len); for (i = 0; i < bf->bf_nseg; i++) { ds->PktPtrArray[i] = htole32(bf->bf_segs[i].ds_addr); ds->PktLenArray[i] = htole16(bf->bf_segs[i].ds_len); } #endif /* NB: pPhysNext, DataRate, and SapPktInfo setup once, don't touch */ ds->Format = 0; ds->pad = 0; ds->ack_wcb_addr = 0; mn = MWL_NODE(ni); /* * Select transmit rate. */ switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) { case IEEE80211_FC0_TYPE_MGT: sc->sc_stats.mst_tx_mgmt++; /* fall thru... */ case IEEE80211_FC0_TYPE_CTL: /* NB: assign to BE q to avoid bursting */ ds->TxPriority = MWL_WME_AC_BE; break; case IEEE80211_FC0_TYPE_DATA: if (!ismcast) { const struct ieee80211_txparam *tp = ni->ni_txparms; /* * EAPOL frames get forced to a fixed rate and w/o * aggregation; otherwise check for any fixed rate * for the client (may depend on association state). */ if (m0->m_flags & M_EAPOL) { const struct mwl_vap *mvp = MWL_VAP_CONST(vap); ds->Format = mvp->mv_eapolformat; ds->pad = htole16( EAGLE_TXD_FIXED_RATE | EAGLE_TXD_DONT_AGGR); } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { /* XXX pre-calculate per node */ ds->Format = htole16( mwl_calcformat(tp->ucastrate, ni)); ds->pad = htole16(EAGLE_TXD_FIXED_RATE); } /* NB: EAPOL frames will never have qos set */ if (qos == 0) ds->TxPriority = txq->qnum; #if MWL_MAXBA > 3 else if (mwl_bastream_match(&mn->mn_ba[3], qos)) ds->TxPriority = mn->mn_ba[3].txq; #endif #if MWL_MAXBA > 2 else if (mwl_bastream_match(&mn->mn_ba[2], qos)) ds->TxPriority = mn->mn_ba[2].txq; #endif #if MWL_MAXBA > 1 else if (mwl_bastream_match(&mn->mn_ba[1], qos)) ds->TxPriority = mn->mn_ba[1].txq; #endif #if MWL_MAXBA > 0 else if (mwl_bastream_match(&mn->mn_ba[0], qos)) ds->TxPriority = mn->mn_ba[0].txq; #endif else ds->TxPriority = txq->qnum; } else ds->TxPriority = txq->qnum; break; default: device_printf(sc->sc_dev, "bogus frame type 0x%x (%s)\n", wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK, __func__); sc->sc_stats.mst_tx_badframetype++; m_freem(m0); return EIO; } if (IFF_DUMPPKTS_XMIT(sc)) ieee80211_dump_pkt(ic, mtod(m0, const uint8_t *)+sizeof(uint16_t), m0->m_len - sizeof(uint16_t), ds->DataRate, -1); MWL_TXQ_LOCK(txq); ds->Status = htole32(EAGLE_TXD_STATUS_FW_OWNED); STAILQ_INSERT_TAIL(&txq->active, bf, bf_list); MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); sc->sc_tx_timer = 5; MWL_TXQ_UNLOCK(txq); return 0; } static __inline int mwl_cvtlegacyrix(int rix) { static const int ieeerates[] = { 2, 4, 11, 22, 44, 12, 18, 24, 36, 48, 72, 96, 108 }; return (rix < nitems(ieeerates) ? ieeerates[rix] : 0); } /* * Process completed xmit descriptors from the specified queue. */ static int mwl_tx_processq(struct mwl_softc *sc, struct mwl_txq *txq) { #define EAGLE_TXD_STATUS_MCAST \ (EAGLE_TXD_STATUS_MULTICAST_TX | EAGLE_TXD_STATUS_BROADCAST_TX) struct ieee80211com *ic = &sc->sc_ic; struct mwl_txbuf *bf; struct mwl_txdesc *ds; struct ieee80211_node *ni; struct mwl_node *an; int nreaped; uint32_t status; DPRINTF(sc, MWL_DEBUG_TX_PROC, "%s: tx queue %u\n", __func__, txq->qnum); for (nreaped = 0;; nreaped++) { MWL_TXQ_LOCK(txq); bf = STAILQ_FIRST(&txq->active); if (bf == NULL) { MWL_TXQ_UNLOCK(txq); break; } ds = bf->bf_desc; MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (ds->Status & htole32(EAGLE_TXD_STATUS_FW_OWNED)) { MWL_TXQ_UNLOCK(txq); break; } STAILQ_REMOVE_HEAD(&txq->active, bf_list); MWL_TXQ_UNLOCK(txq); #ifdef MWL_DEBUG if (sc->sc_debug & MWL_DEBUG_XMIT_DESC) mwl_printtxbuf(bf, txq->qnum, nreaped); #endif ni = bf->bf_node; if (ni != NULL) { an = MWL_NODE(ni); status = le32toh(ds->Status); if (status & EAGLE_TXD_STATUS_OK) { uint16_t Format = le16toh(ds->Format); uint8_t txant = MS(Format, EAGLE_TXD_ANTENNA); sc->sc_stats.mst_ant_tx[txant]++; if (status & EAGLE_TXD_STATUS_OK_RETRY) sc->sc_stats.mst_tx_retries++; if (status & EAGLE_TXD_STATUS_OK_MORE_RETRY) sc->sc_stats.mst_tx_mretries++; if (txq->qnum >= MWL_WME_AC_VO) ic->ic_wme.wme_hipri_traffic++; ni->ni_txrate = MS(Format, EAGLE_TXD_RATE); if ((Format & EAGLE_TXD_FORMAT_HT) == 0) { ni->ni_txrate = mwl_cvtlegacyrix( ni->ni_txrate); } else ni->ni_txrate |= IEEE80211_RATE_MCS; sc->sc_stats.mst_tx_rate = ni->ni_txrate; } else { if (status & EAGLE_TXD_STATUS_FAILED_LINK_ERROR) sc->sc_stats.mst_tx_linkerror++; if (status & EAGLE_TXD_STATUS_FAILED_XRETRY) sc->sc_stats.mst_tx_xretries++; if (status & EAGLE_TXD_STATUS_FAILED_AGING) sc->sc_stats.mst_tx_aging++; if (bf->bf_m->m_flags & M_FF) sc->sc_stats.mst_ff_txerr++; } if (bf->bf_m->m_flags & M_TXCB) /* XXX strip fw len in case header inspected */ m_adj(bf->bf_m, sizeof(uint16_t)); ieee80211_tx_complete(ni, bf->bf_m, (status & EAGLE_TXD_STATUS_OK) == 0); } else m_freem(bf->bf_m); ds->Status = htole32(EAGLE_TXD_STATUS_IDLE); bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); mwl_puttxbuf_tail(txq, bf); } return nreaped; #undef EAGLE_TXD_STATUS_MCAST } /* * Deferred processing of transmit interrupt; special-cased * for four hardware queues, 0-3. */ static void mwl_tx_proc(void *arg, int npending) { struct mwl_softc *sc = arg; int nreaped; /* * Process each active queue. */ nreaped = 0; if (!STAILQ_EMPTY(&sc->sc_txq[0].active)) nreaped += mwl_tx_processq(sc, &sc->sc_txq[0]); if (!STAILQ_EMPTY(&sc->sc_txq[1].active)) nreaped += mwl_tx_processq(sc, &sc->sc_txq[1]); if (!STAILQ_EMPTY(&sc->sc_txq[2].active)) nreaped += mwl_tx_processq(sc, &sc->sc_txq[2]); if (!STAILQ_EMPTY(&sc->sc_txq[3].active)) nreaped += mwl_tx_processq(sc, &sc->sc_txq[3]); if (nreaped != 0) { sc->sc_tx_timer = 0; if (mbufq_first(&sc->sc_snd) != NULL) { /* NB: kick fw; the tx thread may have been preempted */ mwl_hal_txstart(sc->sc_mh, 0); mwl_start(sc); } } } static void mwl_tx_draintxq(struct mwl_softc *sc, struct mwl_txq *txq) { struct ieee80211_node *ni; struct mwl_txbuf *bf; u_int ix; /* * NB: this assumes output has been stopped and * we do not need to block mwl_tx_tasklet */ for (ix = 0;; ix++) { MWL_TXQ_LOCK(txq); bf = STAILQ_FIRST(&txq->active); if (bf == NULL) { MWL_TXQ_UNLOCK(txq); break; } STAILQ_REMOVE_HEAD(&txq->active, bf_list); MWL_TXQ_UNLOCK(txq); #ifdef MWL_DEBUG if (sc->sc_debug & MWL_DEBUG_RESET) { struct ieee80211com *ic = &sc->sc_ic; const struct mwltxrec *tr = mtod(bf->bf_m, const struct mwltxrec *); mwl_printtxbuf(bf, txq->qnum, ix); ieee80211_dump_pkt(ic, (const uint8_t *)&tr->wh, bf->bf_m->m_len - sizeof(tr->fwlen), 0, -1); } #endif /* MWL_DEBUG */ bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); ni = bf->bf_node; if (ni != NULL) { /* * Reclaim node reference. */ ieee80211_free_node(ni); } m_freem(bf->bf_m); mwl_puttxbuf_tail(txq, bf); } } /* * Drain the transmit queues and reclaim resources. */ static void mwl_draintxq(struct mwl_softc *sc) { int i; for (i = 0; i < MWL_NUM_TX_QUEUES; i++) mwl_tx_draintxq(sc, &sc->sc_txq[i]); sc->sc_tx_timer = 0; } #ifdef MWL_DIAGAPI /* * Reset the transmit queues to a pristine state after a fw download. */ static void mwl_resettxq(struct mwl_softc *sc) { int i; for (i = 0; i < MWL_NUM_TX_QUEUES; i++) mwl_txq_reset(sc, &sc->sc_txq[i]); } #endif /* MWL_DIAGAPI */ /* * Clear the transmit queues of any frames submitted for the * specified vap. This is done when the vap is deleted so we * don't potentially reference the vap after it is gone. * Note we cannot remove the frames; we only reclaim the node * reference. */ static void mwl_cleartxq(struct mwl_softc *sc, struct ieee80211vap *vap) { struct mwl_txq *txq; struct mwl_txbuf *bf; int i; for (i = 0; i < MWL_NUM_TX_QUEUES; i++) { txq = &sc->sc_txq[i]; MWL_TXQ_LOCK(txq); STAILQ_FOREACH(bf, &txq->active, bf_list) { struct ieee80211_node *ni = bf->bf_node; if (ni != NULL && ni->ni_vap == vap) { bf->bf_node = NULL; ieee80211_free_node(ni); } } MWL_TXQ_UNLOCK(txq); } } static int mwl_recv_action(struct ieee80211_node *ni, const struct ieee80211_frame *wh, const uint8_t *frm, const uint8_t *efrm) { struct mwl_softc *sc = ni->ni_ic->ic_softc; const struct ieee80211_action *ia; ia = (const struct ieee80211_action *) frm; if (ia->ia_category == IEEE80211_ACTION_CAT_HT && ia->ia_action == IEEE80211_ACTION_HT_MIMOPWRSAVE) { const struct ieee80211_action_ht_mimopowersave *mps = (const struct ieee80211_action_ht_mimopowersave *) ia; mwl_hal_setmimops(sc->sc_mh, ni->ni_macaddr, mps->am_control & IEEE80211_A_HT_MIMOPWRSAVE_ENA, MS(mps->am_control, IEEE80211_A_HT_MIMOPWRSAVE_MODE)); return 0; } else return sc->sc_recv_action(ni, wh, frm, efrm); } static int mwl_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int dialogtoken, int baparamset, int batimeout) { struct mwl_softc *sc = ni->ni_ic->ic_softc; struct ieee80211vap *vap = ni->ni_vap; struct mwl_node *mn = MWL_NODE(ni); struct mwl_bastate *bas; bas = tap->txa_private; if (bas == NULL) { const MWL_HAL_BASTREAM *sp; /* * Check for a free BA stream slot. */ #if MWL_MAXBA > 3 if (mn->mn_ba[3].bastream == NULL) bas = &mn->mn_ba[3]; else #endif #if MWL_MAXBA > 2 if (mn->mn_ba[2].bastream == NULL) bas = &mn->mn_ba[2]; else #endif #if MWL_MAXBA > 1 if (mn->mn_ba[1].bastream == NULL) bas = &mn->mn_ba[1]; else #endif #if MWL_MAXBA > 0 if (mn->mn_ba[0].bastream == NULL) bas = &mn->mn_ba[0]; else #endif { /* sta already has max BA streams */ /* XXX assign BA stream to highest priority tid */ DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: already has max bastreams\n", __func__); sc->sc_stats.mst_ampdu_reject++; return 0; } /* NB: no held reference to ni */ sp = mwl_hal_bastream_alloc(MWL_VAP(vap)->mv_hvap, (baparamset & IEEE80211_BAPS_POLICY_IMMEDIATE) != 0, ni->ni_macaddr, tap->txa_tid, ni->ni_htparam, ni, tap); if (sp == NULL) { /* * No available stream, return 0 so no * a-mpdu aggregation will be done. */ DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: no bastream available\n", __func__); sc->sc_stats.mst_ampdu_nostream++; return 0; } DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: alloc bastream %p\n", __func__, sp); /* NB: qos is left zero so we won't match in mwl_tx_start */ bas->bastream = sp; tap->txa_private = bas; } /* fetch current seq# from the firmware; if available */ if (mwl_hal_bastream_get_seqno(sc->sc_mh, bas->bastream, vap->iv_opmode == IEEE80211_M_STA ? vap->iv_myaddr : ni->ni_macaddr, &tap->txa_start) != 0) tap->txa_start = 0; return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout); } static int mwl_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int code, int baparamset, int batimeout) { struct mwl_softc *sc = ni->ni_ic->ic_softc; struct mwl_bastate *bas; bas = tap->txa_private; if (bas == NULL) { /* XXX should not happen */ DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: no BA stream allocated, TID %d\n", __func__, tap->txa_tid); sc->sc_stats.mst_addba_nostream++; return 0; } if (code == IEEE80211_STATUS_SUCCESS) { struct ieee80211vap *vap = ni->ni_vap; int bufsiz, error; /* * Tell the firmware to setup the BA stream; * we know resources are available because we * pre-allocated one before forming the request. */ bufsiz = MS(baparamset, IEEE80211_BAPS_BUFSIZ); if (bufsiz == 0) bufsiz = IEEE80211_AGGR_BAWMAX; error = mwl_hal_bastream_create(MWL_VAP(vap)->mv_hvap, bas->bastream, bufsiz, bufsiz, tap->txa_start); if (error != 0) { /* * Setup failed, return immediately so no a-mpdu * aggregation will be done. */ mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream); mwl_bastream_free(bas); tap->txa_private = NULL; DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: create failed, error %d, bufsiz %d TID %d " "htparam 0x%x\n", __func__, error, bufsiz, tap->txa_tid, ni->ni_htparam); sc->sc_stats.mst_bacreate_failed++; return 0; } /* NB: cache txq to avoid ptr indirect */ mwl_bastream_setup(bas, tap->txa_tid, bas->bastream->txq); DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: bastream %p assigned to txq %d TID %d bufsiz %d " "htparam 0x%x\n", __func__, bas->bastream, bas->txq, tap->txa_tid, bufsiz, ni->ni_htparam); } else { /* * Other side NAK'd us; return the resources. */ DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: request failed with code %d, destroy bastream %p\n", __func__, code, bas->bastream); mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream); mwl_bastream_free(bas); tap->txa_private = NULL; } /* NB: firmware sends BAR so we don't need to */ return sc->sc_addba_response(ni, tap, code, baparamset, batimeout); } static void mwl_addba_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) { struct mwl_softc *sc = ni->ni_ic->ic_softc; struct mwl_bastate *bas; bas = tap->txa_private; if (bas != NULL) { DPRINTF(sc, MWL_DEBUG_AMPDU, "%s: destroy bastream %p\n", __func__, bas->bastream); mwl_hal_bastream_destroy(sc->sc_mh, bas->bastream); mwl_bastream_free(bas); tap->txa_private = NULL; } sc->sc_addba_stop(ni, tap); } /* * Setup the rx data structures. This should only be * done once or we may get out of sync with the firmware. */ static int mwl_startrecv(struct mwl_softc *sc) { if (!sc->sc_recvsetup) { struct mwl_rxbuf *bf, *prev; struct mwl_rxdesc *ds; prev = NULL; STAILQ_FOREACH(bf, &sc->sc_rxbuf, bf_list) { int error = mwl_rxbuf_init(sc, bf); if (error != 0) { DPRINTF(sc, MWL_DEBUG_RECV, "%s: mwl_rxbuf_init failed %d\n", __func__, error); return error; } if (prev != NULL) { ds = prev->bf_desc; ds->pPhysNext = htole32(bf->bf_daddr); } prev = bf; } if (prev != NULL) { ds = prev->bf_desc; ds->pPhysNext = htole32(STAILQ_FIRST(&sc->sc_rxbuf)->bf_daddr); } sc->sc_recvsetup = 1; } mwl_mode_init(sc); /* set filters, etc. */ return 0; } static MWL_HAL_APMODE mwl_getapmode(const struct ieee80211vap *vap, struct ieee80211_channel *chan) { MWL_HAL_APMODE mode; if (IEEE80211_IS_CHAN_HT(chan)) { if (vap->iv_flags_ht & IEEE80211_FHT_PUREN) mode = AP_MODE_N_ONLY; else if (IEEE80211_IS_CHAN_5GHZ(chan)) mode = AP_MODE_AandN; else if (vap->iv_flags & IEEE80211_F_PUREG) mode = AP_MODE_GandN; else mode = AP_MODE_BandGandN; } else if (IEEE80211_IS_CHAN_ANYG(chan)) { if (vap->iv_flags & IEEE80211_F_PUREG) mode = AP_MODE_G_ONLY; else mode = AP_MODE_MIXED; } else if (IEEE80211_IS_CHAN_B(chan)) mode = AP_MODE_B_ONLY; else if (IEEE80211_IS_CHAN_A(chan)) mode = AP_MODE_A_ONLY; else mode = AP_MODE_MIXED; /* XXX should not happen? */ return mode; } static int mwl_setapmode(struct ieee80211vap *vap, struct ieee80211_channel *chan) { struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; return mwl_hal_setapmode(hvap, mwl_getapmode(vap, chan)); } /* * Set/change channels. */ static int mwl_chan_set(struct mwl_softc *sc, struct ieee80211_channel *chan) { struct mwl_hal *mh = sc->sc_mh; struct ieee80211com *ic = &sc->sc_ic; MWL_HAL_CHANNEL hchan; int maxtxpow; DPRINTF(sc, MWL_DEBUG_RESET, "%s: chan %u MHz/flags 0x%x\n", __func__, chan->ic_freq, chan->ic_flags); /* * Convert to a HAL channel description with * the flags constrained to reflect the current * operating mode. */ mwl_mapchan(&hchan, chan); mwl_hal_intrset(mh, 0); /* disable interrupts */ #if 0 mwl_draintxq(sc); /* clear pending tx frames */ #endif mwl_hal_setchannel(mh, &hchan); /* * Tx power is cap'd by the regulatory setting and * possibly a user-set limit. We pass the min of * these to the hal to apply them to the cal data * for this channel. * XXX min bound? */ maxtxpow = 2*chan->ic_maxregpower; if (maxtxpow > ic->ic_txpowlimit) maxtxpow = ic->ic_txpowlimit; mwl_hal_settxpower(mh, &hchan, maxtxpow / 2); /* NB: potentially change mcast/mgt rates */ mwl_setcurchanrates(sc); /* * Update internal state. */ sc->sc_tx_th.wt_chan_freq = htole16(chan->ic_freq); sc->sc_rx_th.wr_chan_freq = htole16(chan->ic_freq); if (IEEE80211_IS_CHAN_A(chan)) { sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_A); sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_A); } else if (IEEE80211_IS_CHAN_ANYG(chan)) { sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_G); sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_G); } else { sc->sc_tx_th.wt_chan_flags = htole16(IEEE80211_CHAN_B); sc->sc_rx_th.wr_chan_flags = htole16(IEEE80211_CHAN_B); } sc->sc_curchan = hchan; mwl_hal_intrset(mh, sc->sc_imask); return 0; } static void mwl_scan_start(struct ieee80211com *ic) { struct mwl_softc *sc = ic->ic_softc; DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__); } static void mwl_scan_end(struct ieee80211com *ic) { struct mwl_softc *sc = ic->ic_softc; DPRINTF(sc, MWL_DEBUG_STATE, "%s\n", __func__); } static void mwl_set_channel(struct ieee80211com *ic) { struct mwl_softc *sc = ic->ic_softc; (void) mwl_chan_set(sc, ic->ic_curchan); } /* * Handle a channel switch request. We inform the firmware * and mark the global state to suppress various actions. * NB: we issue only one request to the fw; we may be called * multiple times if there are multiple vap's. */ static void mwl_startcsa(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct mwl_softc *sc = ic->ic_softc; MWL_HAL_CHANNEL hchan; if (sc->sc_csapending) return; mwl_mapchan(&hchan, ic->ic_csa_newchan); /* 1 =>'s quiet channel */ mwl_hal_setchannelswitchie(sc->sc_mh, &hchan, 1, ic->ic_csa_count); sc->sc_csapending = 1; } /* * Plumb any static WEP key for the station. This is * necessary as we must propagate the key from the * global key table of the vap to each sta db entry. */ static void mwl_setanywepkey(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { if ((vap->iv_flags & (IEEE80211_F_PRIVACY|IEEE80211_F_WPA)) == IEEE80211_F_PRIVACY && vap->iv_def_txkey != IEEE80211_KEYIX_NONE && vap->iv_nw_keys[vap->iv_def_txkey].wk_keyix != IEEE80211_KEYIX_NONE) (void) _mwl_key_set(vap, &vap->iv_nw_keys[vap->iv_def_txkey], mac); } static int mwl_peerstadb(struct ieee80211_node *ni, int aid, int staid, MWL_HAL_PEERINFO *pi) { #define WME(ie) ((const struct ieee80211_wme_info *) ie) struct ieee80211vap *vap = ni->ni_vap; struct mwl_hal_vap *hvap; int error; if (vap->iv_opmode == IEEE80211_M_WDS) { /* * WDS vap's do not have a f/w vap; instead they piggyback * on an AP vap and we must install the sta db entry and * crypto state using that AP's handle (the WDS vap has none). */ hvap = MWL_VAP(vap)->mv_ap_hvap; } else hvap = MWL_VAP(vap)->mv_hvap; error = mwl_hal_newstation(hvap, ni->ni_macaddr, aid, staid, pi, ni->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT), ni->ni_ies.wme_ie != NULL ? WME(ni->ni_ies.wme_ie)->wme_info : 0); if (error == 0) { /* * Setup security for this station. For sta mode this is * needed even though do the same thing on transition to * AUTH state because the call to mwl_hal_newstation * clobbers the crypto state we setup. */ mwl_setanywepkey(vap, ni->ni_macaddr); } return error; #undef WME } static void mwl_setglobalkeys(struct ieee80211vap *vap) { struct ieee80211_key *wk; wk = &vap->iv_nw_keys[0]; for (; wk < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; wk++) if (wk->wk_keyix != IEEE80211_KEYIX_NONE) (void) _mwl_key_set(vap, wk, vap->iv_myaddr); } /* * Convert a legacy rate set to a firmware bitmask. */ static uint32_t get_rate_bitmap(const struct ieee80211_rateset *rs) { uint32_t rates; int i; rates = 0; for (i = 0; i < rs->rs_nrates; i++) switch (rs->rs_rates[i] & IEEE80211_RATE_VAL) { case 2: rates |= 0x001; break; case 4: rates |= 0x002; break; case 11: rates |= 0x004; break; case 22: rates |= 0x008; break; case 44: rates |= 0x010; break; case 12: rates |= 0x020; break; case 18: rates |= 0x040; break; case 24: rates |= 0x080; break; case 36: rates |= 0x100; break; case 48: rates |= 0x200; break; case 72: rates |= 0x400; break; case 96: rates |= 0x800; break; case 108: rates |= 0x1000; break; } return rates; } /* * Construct an HT firmware bitmask from an HT rate set. */ static uint32_t get_htrate_bitmap(const struct ieee80211_htrateset *rs) { uint32_t rates; int i; rates = 0; for (i = 0; i < rs->rs_nrates; i++) { if (rs->rs_rates[i] < 16) rates |= 1<rs_rates[i]; } return rates; } /* * Craft station database entry for station. * NB: use host byte order here, the hal handles byte swapping. */ static MWL_HAL_PEERINFO * mkpeerinfo(MWL_HAL_PEERINFO *pi, const struct ieee80211_node *ni) { const struct ieee80211vap *vap = ni->ni_vap; memset(pi, 0, sizeof(*pi)); pi->LegacyRateBitMap = get_rate_bitmap(&ni->ni_rates); pi->CapInfo = ni->ni_capinfo; if (ni->ni_flags & IEEE80211_NODE_HT) { /* HT capabilities, etc */ pi->HTCapabilitiesInfo = ni->ni_htcap; /* XXX pi.HTCapabilitiesInfo */ pi->MacHTParamInfo = ni->ni_htparam; pi->HTRateBitMap = get_htrate_bitmap(&ni->ni_htrates); pi->AddHtInfo.ControlChan = ni->ni_htctlchan; pi->AddHtInfo.AddChan = ni->ni_ht2ndchan; pi->AddHtInfo.OpMode = ni->ni_htopmode; pi->AddHtInfo.stbc = ni->ni_htstbc; /* constrain according to local configuration */ if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI40) == 0) pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI40; if ((vap->iv_flags_ht & IEEE80211_FHT_SHORTGI20) == 0) pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_SHORTGI20; if (ni->ni_chw != 40) pi->HTCapabilitiesInfo &= ~IEEE80211_HTCAP_CHWIDTH40; } return pi; } /* * Re-create the local sta db entry for a vap to ensure * up to date WME state is pushed to the firmware. Because * this resets crypto state this must be followed by a * reload of any keys in the global key table. */ static int mwl_localstadb(struct ieee80211vap *vap) { #define WME(ie) ((const struct ieee80211_wme_info *) ie) struct mwl_hal_vap *hvap = MWL_VAP(vap)->mv_hvap; struct ieee80211_node *bss; MWL_HAL_PEERINFO pi; int error; switch (vap->iv_opmode) { case IEEE80211_M_STA: bss = vap->iv_bss; error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0, vap->iv_state == IEEE80211_S_RUN ? mkpeerinfo(&pi, bss) : NULL, (bss->ni_flags & (IEEE80211_NODE_QOS | IEEE80211_NODE_HT)), bss->ni_ies.wme_ie != NULL ? WME(bss->ni_ies.wme_ie)->wme_info : 0); if (error == 0) mwl_setglobalkeys(vap); break; case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: error = mwl_hal_newstation(hvap, vap->iv_myaddr, 0, 0, NULL, vap->iv_flags & IEEE80211_F_WME, 0); if (error == 0) mwl_setglobalkeys(vap); break; default: error = 0; break; } return error; #undef WME } static int mwl_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct mwl_vap *mvp = MWL_VAP(vap); struct mwl_hal_vap *hvap = mvp->mv_hvap; struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni = NULL; struct mwl_softc *sc = ic->ic_softc; struct mwl_hal *mh = sc->sc_mh; enum ieee80211_state ostate = vap->iv_state; int error; DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: %s -> %s\n", vap->iv_ifp->if_xname, __func__, ieee80211_state_name[ostate], ieee80211_state_name[nstate]); callout_stop(&sc->sc_timer); /* * Clear current radar detection state. */ if (ostate == IEEE80211_S_CAC) { /* stop quiet mode radar detection */ mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_STOP); } else if (sc->sc_radarena) { /* stop in-service radar detection */ mwl_hal_setradardetection(mh, DR_DFS_DISABLE); sc->sc_radarena = 0; } /* * Carry out per-state actions before doing net80211 work. */ if (nstate == IEEE80211_S_INIT) { /* NB: only ap+sta vap's have a fw entity */ if (hvap != NULL) mwl_hal_stop(hvap); } else if (nstate == IEEE80211_S_SCAN) { mwl_hal_start(hvap); /* NB: this disables beacon frames */ mwl_hal_setinframode(hvap); } else if (nstate == IEEE80211_S_AUTH) { /* * Must create a sta db entry in case a WEP key needs to * be plumbed. This entry will be overwritten if we * associate; otherwise it will be reclaimed on node free. */ ni = vap->iv_bss; MWL_NODE(ni)->mn_hvap = hvap; (void) mwl_peerstadb(ni, 0, 0, NULL); } else if (nstate == IEEE80211_S_CSA) { /* XXX move to below? */ if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_MBSS) mwl_startcsa(vap); } else if (nstate == IEEE80211_S_CAC) { /* XXX move to below? */ /* stop ap xmit and enable quiet mode radar detection */ mwl_hal_setradardetection(mh, DR_CHK_CHANNEL_AVAILABLE_START); } /* * Invoke the parent method to do net80211 work. */ error = mvp->mv_newstate(vap, nstate, arg); /* * Carry out work that must be done after net80211 runs; * this work requires up to date state (e.g. iv_bss). */ if (error == 0 && nstate == IEEE80211_S_RUN) { /* NB: collect bss node again, it may have changed */ ni = vap->iv_bss; DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s(RUN): iv_flags 0x%08x bintvl %d bssid %s " "capinfo 0x%04x chan %d\n", vap->iv_ifp->if_xname, __func__, vap->iv_flags, ni->ni_intval, ether_sprintf(ni->ni_bssid), ni->ni_capinfo, ieee80211_chan2ieee(ic, ic->ic_curchan)); /* * Recreate local sta db entry to update WME/HT state. */ mwl_localstadb(vap); switch (vap->iv_opmode) { case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: if (ostate == IEEE80211_S_CAC) { /* enable in-service radar detection */ mwl_hal_setradardetection(mh, DR_IN_SERVICE_MONITOR_START); sc->sc_radarena = 1; } /* * Allocate and setup the beacon frame * (and related state). */ error = mwl_reset_vap(vap, IEEE80211_S_RUN); if (error != 0) { DPRINTF(sc, MWL_DEBUG_STATE, "%s: beacon setup failed, error %d\n", __func__, error); goto bad; } /* NB: must be after setting up beacon */ mwl_hal_start(hvap); break; case IEEE80211_M_STA: DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: aid 0x%x\n", vap->iv_ifp->if_xname, __func__, ni->ni_associd); /* * Set state now that we're associated. */ mwl_hal_setassocid(hvap, ni->ni_bssid, ni->ni_associd); mwl_setrates(vap); mwl_hal_setrtsthreshold(hvap, vap->iv_rtsthreshold); if ((vap->iv_flags & IEEE80211_F_DWDS) && sc->sc_ndwdsvaps++ == 0) mwl_hal_setdwds(mh, 1); break; case IEEE80211_M_WDS: DPRINTF(sc, MWL_DEBUG_STATE, "%s: %s: bssid %s\n", vap->iv_ifp->if_xname, __func__, ether_sprintf(ni->ni_bssid)); mwl_seteapolformat(vap); break; default: break; } /* * Set CS mode according to operating channel; * this mostly an optimization for 5GHz. * * NB: must follow mwl_hal_start which resets csmode */ if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan)) mwl_hal_setcsmode(mh, CSMODE_AGGRESSIVE); else mwl_hal_setcsmode(mh, CSMODE_AUTO_ENA); /* * Start timer to prod firmware. */ if (sc->sc_ageinterval != 0) callout_reset(&sc->sc_timer, sc->sc_ageinterval*hz, mwl_agestations, sc); } else if (nstate == IEEE80211_S_SLEEP) { /* XXX set chip in power save */ } else if ((vap->iv_flags & IEEE80211_F_DWDS) && --sc->sc_ndwdsvaps == 0) mwl_hal_setdwds(mh, 0); bad: return error; } /* * Manage station id's; these are separate from AID's * as AID's may have values out of the range of possible * station id's acceptable to the firmware. */ static int allocstaid(struct mwl_softc *sc, int aid) { int staid; if (!(0 < aid && aid < MWL_MAXSTAID) || isset(sc->sc_staid, aid)) { /* NB: don't use 0 */ for (staid = 1; staid < MWL_MAXSTAID; staid++) if (isclr(sc->sc_staid, staid)) break; } else staid = aid; setbit(sc->sc_staid, staid); return staid; } static void delstaid(struct mwl_softc *sc, int staid) { clrbit(sc->sc_staid, staid); } /* * Setup driver-specific state for a newly associated node. * Note that we're called also on a re-associate, the isnew * param tells us if this is the first time or not. */ static void mwl_newassoc(struct ieee80211_node *ni, int isnew) { struct ieee80211vap *vap = ni->ni_vap; struct mwl_softc *sc = vap->iv_ic->ic_softc; struct mwl_node *mn = MWL_NODE(ni); MWL_HAL_PEERINFO pi; uint16_t aid; int error; aid = IEEE80211_AID(ni->ni_associd); if (isnew) { mn->mn_staid = allocstaid(sc, aid); mn->mn_hvap = MWL_VAP(vap)->mv_hvap; } else { mn = MWL_NODE(ni); /* XXX reset BA stream? */ } DPRINTF(sc, MWL_DEBUG_NODE, "%s: mac %s isnew %d aid %d staid %d\n", __func__, ether_sprintf(ni->ni_macaddr), isnew, aid, mn->mn_staid); error = mwl_peerstadb(ni, aid, mn->mn_staid, mkpeerinfo(&pi, ni)); if (error != 0) { DPRINTF(sc, MWL_DEBUG_NODE, "%s: error %d creating sta db entry\n", __func__, error); /* XXX how to deal with error? */ } } /* * Periodically poke the firmware to age out station state * (power save queues, pending tx aggregates). */ static void mwl_agestations(void *arg) { struct mwl_softc *sc = arg; mwl_hal_setkeepalive(sc->sc_mh); if (sc->sc_ageinterval != 0) /* NB: catch dynamic changes */ callout_schedule(&sc->sc_timer, sc->sc_ageinterval*hz); } static const struct mwl_hal_channel * findhalchannel(const MWL_HAL_CHANNELINFO *ci, int ieee) { int i; for (i = 0; i < ci->nchannels; i++) { const struct mwl_hal_channel *hc = &ci->channels[i]; if (hc->ieee == ieee) return hc; } return NULL; } static int mwl_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, int nchan, struct ieee80211_channel chans[]) { struct mwl_softc *sc = ic->ic_softc; struct mwl_hal *mh = sc->sc_mh; const MWL_HAL_CHANNELINFO *ci; int i; for (i = 0; i < nchan; i++) { struct ieee80211_channel *c = &chans[i]; const struct mwl_hal_channel *hc; if (IEEE80211_IS_CHAN_2GHZ(c)) { mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_2DOT4GHZ, IEEE80211_IS_CHAN_HT40(c) ? MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci); } else if (IEEE80211_IS_CHAN_5GHZ(c)) { mwl_hal_getchannelinfo(mh, MWL_FREQ_BAND_5GHZ, IEEE80211_IS_CHAN_HT40(c) ? MWL_CH_40_MHz_WIDTH : MWL_CH_20_MHz_WIDTH, &ci); } else { device_printf(sc->sc_dev, "%s: channel %u freq %u/0x%x not 2.4/5GHz\n", __func__, c->ic_ieee, c->ic_freq, c->ic_flags); return EINVAL; } /* * Verify channel has cal data and cap tx power. */ hc = findhalchannel(ci, c->ic_ieee); if (hc != NULL) { if (c->ic_maxpower > 2*hc->maxTxPow) c->ic_maxpower = 2*hc->maxTxPow; goto next; } if (IEEE80211_IS_CHAN_HT40(c)) { /* * Look for the extension channel since the * hal table only has the primary channel. */ hc = findhalchannel(ci, c->ic_extieee); if (hc != NULL) { if (c->ic_maxpower > 2*hc->maxTxPow) c->ic_maxpower = 2*hc->maxTxPow; goto next; } } device_printf(sc->sc_dev, "%s: no cal data for channel %u ext %u freq %u/0x%x\n", __func__, c->ic_ieee, c->ic_extieee, c->ic_freq, c->ic_flags); return EINVAL; next: ; } return 0; } #define IEEE80211_CHAN_HTG (IEEE80211_CHAN_HT|IEEE80211_CHAN_G) #define IEEE80211_CHAN_HTA (IEEE80211_CHAN_HT|IEEE80211_CHAN_A) static void addht40channels(struct ieee80211_channel chans[], int maxchans, int *nchans, const MWL_HAL_CHANNELINFO *ci, int flags) { int i, error; for (i = 0; i < ci->nchannels; i++) { const struct mwl_hal_channel *hc = &ci->channels[i]; error = ieee80211_add_channel_ht40(chans, maxchans, nchans, hc->ieee, hc->maxTxPow, flags); if (error != 0 && error != ENOENT) break; } } static void addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans, const MWL_HAL_CHANNELINFO *ci, const uint8_t bands[]) { int i, error; error = 0; for (i = 0; i < ci->nchannels && error == 0; i++) { const struct mwl_hal_channel *hc = &ci->channels[i]; error = ieee80211_add_channel(chans, maxchans, nchans, hc->ieee, hc->freq, hc->maxTxPow, 0, bands); } } static void getchannels(struct mwl_softc *sc, int maxchans, int *nchans, struct ieee80211_channel chans[]) { const MWL_HAL_CHANNELINFO *ci; uint8_t bands[IEEE80211_MODE_BYTES]; /* * Use the channel info from the hal to craft the * channel list. Note that we pass back an unsorted * list; the caller is required to sort it for us * (if desired). */ *nchans = 0; if (mwl_hal_getchannelinfo(sc->sc_mh, MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) { memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); setbit(bands, IEEE80211_MODE_11NG); addchannels(chans, maxchans, nchans, ci, bands); } if (mwl_hal_getchannelinfo(sc->sc_mh, MWL_FREQ_BAND_5GHZ, MWL_CH_20_MHz_WIDTH, &ci) == 0) { memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11A); setbit(bands, IEEE80211_MODE_11NA); addchannels(chans, maxchans, nchans, ci, bands); } if (mwl_hal_getchannelinfo(sc->sc_mh, MWL_FREQ_BAND_2DOT4GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0) addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTG); if (mwl_hal_getchannelinfo(sc->sc_mh, MWL_FREQ_BAND_5GHZ, MWL_CH_40_MHz_WIDTH, &ci) == 0) addht40channels(chans, maxchans, nchans, ci, IEEE80211_CHAN_HTA); } static void mwl_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans, struct ieee80211_channel chans[]) { struct mwl_softc *sc = ic->ic_softc; getchannels(sc, maxchans, nchans, chans); } static int mwl_getchannels(struct mwl_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; /* * Use the channel info from the hal to craft the * channel list for net80211. Note that we pass up * an unsorted list; net80211 will sort it for us. */ memset(ic->ic_channels, 0, sizeof(ic->ic_channels)); ic->ic_nchans = 0; getchannels(sc, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels); ic->ic_regdomain.regdomain = SKU_DEBUG; ic->ic_regdomain.country = CTRY_DEFAULT; ic->ic_regdomain.location = 'I'; ic->ic_regdomain.isocc[0] = ' '; /* XXX? */ ic->ic_regdomain.isocc[1] = ' '; return (ic->ic_nchans == 0 ? EIO : 0); } #undef IEEE80211_CHAN_HTA #undef IEEE80211_CHAN_HTG #ifdef MWL_DEBUG static void mwl_printrxbuf(const struct mwl_rxbuf *bf, u_int ix) { const struct mwl_rxdesc *ds = bf->bf_desc; uint32_t status = le32toh(ds->Status); printf("R[%2u] (DS.V:%p DS.P:0x%jx) NEXT:%08x DATA:%08x RC:%02x%s\n" " STAT:%02x LEN:%04x RSSI:%02x CHAN:%02x RATE:%02x QOS:%04x HT:%04x\n", ix, ds, (uintmax_t)bf->bf_daddr, le32toh(ds->pPhysNext), le32toh(ds->pPhysBuffData), ds->RxControl, ds->RxControl != EAGLE_RXD_CTRL_DRIVER_OWN ? "" : (status & EAGLE_RXD_STATUS_OK) ? " *" : " !", ds->Status, le16toh(ds->PktLen), ds->RSSI, ds->Channel, ds->Rate, le16toh(ds->QosCtrl), le16toh(ds->HtSig2)); } static void mwl_printtxbuf(const struct mwl_txbuf *bf, u_int qnum, u_int ix) { const struct mwl_txdesc *ds = bf->bf_desc; uint32_t status = le32toh(ds->Status); printf("Q%u[%3u]", qnum, ix); printf(" (DS.V:%p DS.P:0x%jx)\n", ds, (uintmax_t)bf->bf_daddr); printf(" NEXT:%08x DATA:%08x LEN:%04x STAT:%08x%s\n", le32toh(ds->pPhysNext), le32toh(ds->PktPtr), le16toh(ds->PktLen), status, status & EAGLE_TXD_STATUS_USED ? "" : (status & 3) != 0 ? " *" : " !"); printf(" RATE:%02x PRI:%x QOS:%04x SAP:%08x FORMAT:%04x\n", ds->DataRate, ds->TxPriority, le16toh(ds->QosCtrl), le32toh(ds->SapPktInfo), le16toh(ds->Format)); #if MWL_TXDESC > 1 printf(" MULTIFRAMES:%u LEN:%04x %04x %04x %04x %04x %04x\n" , le32toh(ds->multiframes) , le16toh(ds->PktLenArray[0]), le16toh(ds->PktLenArray[1]) , le16toh(ds->PktLenArray[2]), le16toh(ds->PktLenArray[3]) , le16toh(ds->PktLenArray[4]), le16toh(ds->PktLenArray[5]) ); printf(" DATA:%08x %08x %08x %08x %08x %08x\n" , le32toh(ds->PktPtrArray[0]), le32toh(ds->PktPtrArray[1]) , le32toh(ds->PktPtrArray[2]), le32toh(ds->PktPtrArray[3]) , le32toh(ds->PktPtrArray[4]), le32toh(ds->PktPtrArray[5]) ); #endif #if 0 { const uint8_t *cp = (const uint8_t *) ds; int i; for (i = 0; i < sizeof(struct mwl_txdesc); i++) { printf("%02x ", cp[i]); if (((i+1) % 16) == 0) printf("\n"); } printf("\n"); } #endif } #endif /* MWL_DEBUG */ #if 0 static void mwl_txq_dump(struct mwl_txq *txq) { struct mwl_txbuf *bf; int i = 0; MWL_TXQ_LOCK(txq); STAILQ_FOREACH(bf, &txq->active, bf_list) { struct mwl_txdesc *ds = bf->bf_desc; MWL_TXDESC_SYNC(txq, ds, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); #ifdef MWL_DEBUG mwl_printtxbuf(bf, txq->qnum, i); #endif i++; } MWL_TXQ_UNLOCK(txq); } #endif static void mwl_watchdog(void *arg) { struct mwl_softc *sc = arg; callout_reset(&sc->sc_watchdog, hz, mwl_watchdog, sc); if (sc->sc_tx_timer == 0 || --sc->sc_tx_timer > 0) return; if (sc->sc_running && !sc->sc_invalid) { if (mwl_hal_setkeepalive(sc->sc_mh)) device_printf(sc->sc_dev, "transmit timeout (firmware hung?)\n"); else device_printf(sc->sc_dev, "transmit timeout\n"); #if 0 mwl_reset(sc); mwl_txq_dump(&sc->sc_txq[0]);/*XXX*/ #endif counter_u64_add(sc->sc_ic.ic_oerrors, 1); sc->sc_stats.mst_watchdog++; } } #ifdef MWL_DIAGAPI /* * Diagnostic interface to the HAL. This is used by various * tools to do things like retrieve register contents for * debugging. The mechanism is intentionally opaque so that * it can change frequently w/o concern for compatibility. */ static int mwl_ioctl_diag(struct mwl_softc *sc, struct mwl_diag *md) { struct mwl_hal *mh = sc->sc_mh; u_int id = md->md_id & MWL_DIAG_ID; void *indata = NULL; void *outdata = NULL; u_int32_t insize = md->md_in_size; u_int32_t outsize = md->md_out_size; int error = 0; if (md->md_id & MWL_DIAG_IN) { /* * Copy in data. */ indata = malloc(insize, M_TEMP, M_NOWAIT); if (indata == NULL) { error = ENOMEM; goto bad; } error = copyin(md->md_in_data, indata, insize); if (error) goto bad; } if (md->md_id & MWL_DIAG_DYN) { /* * Allocate a buffer for the results (otherwise the HAL * returns a pointer to a buffer where we can read the * results). Note that we depend on the HAL leaving this * pointer for us to use below in reclaiming the buffer; * may want to be more defensive. */ outdata = malloc(outsize, M_TEMP, M_NOWAIT); if (outdata == NULL) { error = ENOMEM; goto bad; } } if (mwl_hal_getdiagstate(mh, id, indata, insize, &outdata, &outsize)) { if (outsize < md->md_out_size) md->md_out_size = outsize; if (outdata != NULL) error = copyout(outdata, md->md_out_data, md->md_out_size); } else { error = EINVAL; } bad: if ((md->md_id & MWL_DIAG_IN) && indata != NULL) free(indata, M_TEMP); if ((md->md_id & MWL_DIAG_DYN) && outdata != NULL) free(outdata, M_TEMP); return error; } static int mwl_ioctl_reset(struct mwl_softc *sc, struct mwl_diag *md) { struct mwl_hal *mh = sc->sc_mh; int error; MWL_LOCK_ASSERT(sc); if (md->md_id == 0 && mwl_hal_fwload(mh, NULL) != 0) { device_printf(sc->sc_dev, "unable to load firmware\n"); return EIO; } if (mwl_hal_gethwspecs(mh, &sc->sc_hwspecs) != 0) { device_printf(sc->sc_dev, "unable to fetch h/w specs\n"); return EIO; } error = mwl_setupdma(sc); if (error != 0) { /* NB: mwl_setupdma prints a msg */ return error; } /* * Reset tx/rx data structures; after reload we must * re-start the driver's notion of the next xmit/recv. */ mwl_draintxq(sc); /* clear pending frames */ mwl_resettxq(sc); /* rebuild tx q lists */ sc->sc_rxnext = NULL; /* force rx to start at the list head */ return 0; } #endif /* MWL_DIAGAPI */ static void mwl_parent(struct ieee80211com *ic) { struct mwl_softc *sc = ic->ic_softc; int startall = 0; MWL_LOCK(sc); if (ic->ic_nrunning > 0) { if (sc->sc_running) { /* * To avoid rescanning another access point, * do not call mwl_init() here. Instead, * only reflect promisc mode settings. */ mwl_mode_init(sc); } else { /* * Beware of being called during attach/detach * to reset promiscuous mode. In that case we * will still be marked UP but not RUNNING. * However trying to re-init the interface * is the wrong thing to do as we've already * torn down much of our state. There's * probably a better way to deal with this. */ if (!sc->sc_invalid) { mwl_init(sc); /* XXX lose error */ startall = 1; } } } else mwl_stop(sc); MWL_UNLOCK(sc); if (startall) ieee80211_start_all(ic); } static int mwl_ioctl(struct ieee80211com *ic, u_long cmd, void *data) { struct mwl_softc *sc = ic->ic_softc; struct ifreq *ifr = data; int error = 0; switch (cmd) { case SIOCGMVSTATS: mwl_hal_gethwstats(sc->sc_mh, &sc->sc_stats.hw_stats); #if 0 /* NB: embed these numbers to get a consistent view */ sc->sc_stats.mst_tx_packets = ifp->if_get_counter(ifp, IFCOUNTER_OPACKETS); sc->sc_stats.mst_rx_packets = ifp->if_get_counter(ifp, IFCOUNTER_IPACKETS); #endif /* * NB: Drop the softc lock in case of a page fault; * we'll accept any potential inconsisentcy in the * statistics. The alternative is to copy the data * to a local structure. */ return (copyout(&sc->sc_stats, ifr->ifr_data, sizeof (sc->sc_stats))); #ifdef MWL_DIAGAPI case SIOCGMVDIAG: /* XXX check privs */ return mwl_ioctl_diag(sc, (struct mwl_diag *) ifr); case SIOCGMVRESET: /* XXX check privs */ MWL_LOCK(sc); error = mwl_ioctl_reset(sc,(struct mwl_diag *) ifr); MWL_UNLOCK(sc); break; #endif /* MWL_DIAGAPI */ default: error = ENOTTY; break; } return (error); } #ifdef MWL_DEBUG static int mwl_sysctl_debug(SYSCTL_HANDLER_ARGS) { struct mwl_softc *sc = arg1; int debug, error; debug = sc->sc_debug | (mwl_hal_getdebug(sc->sc_mh) << 24); error = sysctl_handle_int(oidp, &debug, 0, req); if (error || !req->newptr) return error; mwl_hal_setdebug(sc->sc_mh, debug >> 24); sc->sc_debug = debug & 0x00ffffff; return 0; } #endif /* MWL_DEBUG */ static void mwl_sysctlattach(struct mwl_softc *sc) { #ifdef MWL_DEBUG struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); sc->sc_debug = mwl_debug; SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0, mwl_sysctl_debug, "I", "control debugging printfs"); #endif } /* * Announce various information on device/driver attach. */ static void mwl_announce(struct mwl_softc *sc) { device_printf(sc->sc_dev, "Rev A%d hardware, v%d.%d.%d.%d firmware (regioncode %d)\n", sc->sc_hwspecs.hwVersion, (sc->sc_hwspecs.fwReleaseNumber>>24) & 0xff, (sc->sc_hwspecs.fwReleaseNumber>>16) & 0xff, (sc->sc_hwspecs.fwReleaseNumber>>8) & 0xff, (sc->sc_hwspecs.fwReleaseNumber>>0) & 0xff, sc->sc_hwspecs.regionCode); sc->sc_fwrelease = sc->sc_hwspecs.fwReleaseNumber; if (bootverbose) { int i; for (i = 0; i <= WME_AC_VO; i++) { struct mwl_txq *txq = sc->sc_ac2q[i]; device_printf(sc->sc_dev, "Use hw queue %u for %s traffic\n", txq->qnum, ieee80211_wme_acnames[i]); } } if (bootverbose || mwl_rxdesc != MWL_RXDESC) device_printf(sc->sc_dev, "using %u rx descriptors\n", mwl_rxdesc); if (bootverbose || mwl_rxbuf != MWL_RXBUF) device_printf(sc->sc_dev, "using %u rx buffers\n", mwl_rxbuf); if (bootverbose || mwl_txbuf != MWL_TXBUF) device_printf(sc->sc_dev, "using %u tx buffers\n", mwl_txbuf); if (bootverbose && mwl_hal_ismbsscapable(sc->sc_mh)) device_printf(sc->sc_dev, "multi-bss support\n"); #ifdef MWL_TX_NODROP if (bootverbose) device_printf(sc->sc_dev, "no tx drop\n"); #endif } Index: head/sys/dev/rtwn/if_rtwn_cam.c =================================================================== --- head/sys/dev/rtwn/if_rtwn_cam.c (revision 309685) +++ head/sys/dev/rtwn/if_rtwn_cam.c (revision 309686) @@ -1,360 +1,364 @@ /* $OpenBSD: if_urtwn.c,v 1.16 2011/02/10 17:26:40 jakemsr Exp $ */ /*- * Copyright (c) 2010 Damien Bergamini * Copyright (c) 2014 Kevin Lo * Copyright (c) 2015-2016 Andriy Voskoboinyk * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include void rtwn_init_cam(struct rtwn_softc *sc) { /* Invalidate all CAM entries. */ rtwn_write_4(sc, R92C_CAMCMD, R92C_CAMCMD_POLLING | R92C_CAMCMD_CLR); } static int rtwn_cam_write(struct rtwn_softc *sc, uint32_t addr, uint32_t data) { int error; error = rtwn_write_4(sc, R92C_CAMWRITE, data); if (error != 0) return (error); error = rtwn_write_4(sc, R92C_CAMCMD, R92C_CAMCMD_POLLING | R92C_CAMCMD_WRITE | SM(R92C_CAMCMD_ADDR, addr)); return (error); } void rtwn_init_seccfg(struct rtwn_softc *sc) { uint16_t seccfg; /* Select decryption / encryption flags. */ seccfg = 0; switch (sc->sc_hwcrypto) { case RTWN_CRYPTO_SW: break; /* nothing to do */ case RTWN_CRYPTO_PAIR: /* NB: TXUCKEY_DEF / RXUCKEY_DEF are required for RTL8192C */ seccfg = R92C_SECCFG_TXUCKEY_DEF | R92C_SECCFG_RXUCKEY_DEF | R92C_SECCFG_TXENC_ENA | R92C_SECCFG_RXDEC_ENA | R92C_SECCFG_MC_SRCH_DIS; break; case RTWN_CRYPTO_FULL: seccfg = R92C_SECCFG_TXUCKEY_DEF | R92C_SECCFG_RXUCKEY_DEF | R92C_SECCFG_TXENC_ENA | R92C_SECCFG_RXDEC_ENA | R92C_SECCFG_TXBCKEY_DEF | R92C_SECCFG_RXBCKEY_DEF; break; default: KASSERT(0, ("%s: case %d was not handled\n", __func__, sc->sc_hwcrypto)); break; } RTWN_DPRINTF(sc, RTWN_DEBUG_KEY, "%s: seccfg %04X, hwcrypto %d\n", __func__, seccfg, sc->sc_hwcrypto); rtwn_write_2(sc, R92C_SECCFG, seccfg); } int rtwn_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k, ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix) { struct rtwn_softc *sc = vap->iv_ic->ic_softc; int i, start; if (&vap->iv_nw_keys[0] <= k && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { +#if __FreeBSD_version > 1200018 + *keyix = ieee80211_crypto_get_key_wepidx(vap, k); +#else *keyix = k - vap->iv_nw_keys; +#endif if (sc->sc_hwcrypto != RTWN_CRYPTO_FULL) k->wk_flags |= IEEE80211_KEY_SWCRYPT; else { RTWN_LOCK(sc); if (isset(sc->keys_bmap, *keyix)) { device_printf(sc->sc_dev, "%s: group key slot %d is already used!\n", __func__, *keyix); /* XXX recover? */ RTWN_UNLOCK(sc); return (0); } setbit(sc->keys_bmap, *keyix); RTWN_UNLOCK(sc); } goto end; } start = sc->cam_entry_limit; switch (sc->sc_hwcrypto) { case RTWN_CRYPTO_SW: k->wk_flags |= IEEE80211_KEY_SWCRYPT; *keyix = 0; goto end; case RTWN_CRYPTO_PAIR: /* all slots for pairwise keys. */ start = 0; RTWN_LOCK(sc); if (sc->sc_flags & RTWN_FLAG_CAM_FIXED) start = 4; RTWN_UNLOCK(sc); break; case RTWN_CRYPTO_FULL: /* first 4 - for group keys, others for pairwise. */ start = 4; break; default: KASSERT(0, ("%s: case %d was not handled!\n", __func__, sc->sc_hwcrypto)); break; } RTWN_LOCK(sc); for (i = start; i < sc->cam_entry_limit; i++) { if (isclr(sc->keys_bmap, i)) { setbit(sc->keys_bmap, i); *keyix = i; break; } } RTWN_UNLOCK(sc); if (i == sc->cam_entry_limit) { #if __FreeBSD_version > 1200008 /* XXX check and remove keys with the same MAC address */ k->wk_flags |= IEEE80211_KEY_SWCRYPT; *keyix = 0; #else device_printf(sc->sc_dev, "%s: no free space in the key table\n", __func__); return (0); #endif } end: *rxkeyix = *keyix; return (1); } static int rtwn_key_set_cb0(struct rtwn_softc *sc, const struct ieee80211_key *k) { uint8_t algo, keyid; int i, error; if (sc->sc_hwcrypto == RTWN_CRYPTO_FULL && k->wk_keyix < IEEE80211_WEP_NKID) keyid = k->wk_keyix; else keyid = 0; /* Map net80211 cipher to HW crypto algorithm. */ switch (k->wk_cipher->ic_cipher) { case IEEE80211_CIPHER_WEP: if (k->wk_keylen < 8) algo = R92C_CAM_ALGO_WEP40; else algo = R92C_CAM_ALGO_WEP104; break; case IEEE80211_CIPHER_TKIP: algo = R92C_CAM_ALGO_TKIP; break; case IEEE80211_CIPHER_AES_CCM: algo = R92C_CAM_ALGO_AES; break; default: device_printf(sc->sc_dev, "%s: unknown cipher %u\n", __func__, k->wk_cipher->ic_cipher); return (EINVAL); } RTWN_DPRINTF(sc, RTWN_DEBUG_KEY, "%s: keyix %u, keyid %u, algo %u/%u, flags %04X, len %u, " "macaddr %s\n", __func__, k->wk_keyix, keyid, k->wk_cipher->ic_cipher, algo, k->wk_flags, k->wk_keylen, ether_sprintf(k->wk_macaddr)); /* Clear high bits. */ rtwn_cam_write(sc, R92C_CAM_CTL6(k->wk_keyix), 0); rtwn_cam_write(sc, R92C_CAM_CTL7(k->wk_keyix), 0); /* Write key. */ for (i = 0; i < 4; i++) { error = rtwn_cam_write(sc, R92C_CAM_KEY(k->wk_keyix, i), le32dec(&k->wk_key[i * 4])); if (error != 0) goto fail; } /* Write CTL0 last since that will validate the CAM entry. */ error = rtwn_cam_write(sc, R92C_CAM_CTL1(k->wk_keyix), le32dec(&k->wk_macaddr[2])); if (error != 0) goto fail; error = rtwn_cam_write(sc, R92C_CAM_CTL0(k->wk_keyix), SM(R92C_CAM_ALGO, algo) | SM(R92C_CAM_KEYID, keyid) | SM(R92C_CAM_MACLO, le16dec(&k->wk_macaddr[0])) | R92C_CAM_VALID); if (error != 0) goto fail; return (0); fail: device_printf(sc->sc_dev, "%s fails, error %d\n", __func__, error); return (error); } static void rtwn_key_set_cb(struct rtwn_softc *sc, union sec_param *data) { const struct ieee80211_key *k = &data->key; (void) rtwn_key_set_cb0(sc, k); } int rtwn_init_static_keys(struct rtwn_softc *sc, struct rtwn_vap *rvp) { int i, error; if (sc->sc_hwcrypto != RTWN_CRYPTO_FULL) return (0); /* nothing to do */ for (i = 0; i < IEEE80211_WEP_NKID; i++) { const struct ieee80211_key *k = rvp->keys[i]; if (k != NULL) { error = rtwn_key_set_cb0(sc, k); if (error != 0) return (error); } } return (0); } static void rtwn_key_del_cb(struct rtwn_softc *sc, union sec_param *data) { struct ieee80211_key *k = &data->key; int i; RTWN_DPRINTF(sc, RTWN_DEBUG_KEY, "%s: keyix %u, flags %04X, macaddr %s\n", __func__, k->wk_keyix, k->wk_flags, ether_sprintf(k->wk_macaddr)); rtwn_cam_write(sc, R92C_CAM_CTL0(k->wk_keyix), 0); rtwn_cam_write(sc, R92C_CAM_CTL1(k->wk_keyix), 0); /* Clear key. */ for (i = 0; i < 4; i++) rtwn_cam_write(sc, R92C_CAM_KEY(k->wk_keyix, i), 0); clrbit(sc->keys_bmap, k->wk_keyix); } static int rtwn_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k, int set) { struct rtwn_softc *sc = vap->iv_ic->ic_softc; if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { /* Not for us. */ return (1); } if (&vap->iv_nw_keys[0] <= k && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { #if __FreeBSD_version <= 1200008 struct ieee80211_key *k1 = &vap->iv_nw_keys[k->wk_keyix]; if (sc->sc_hwcrypto != RTWN_CRYPTO_FULL) { k1->wk_flags |= IEEE80211_KEY_SWCRYPT; return (k->wk_cipher->ic_setkey(k1)); } else { #else if (sc->sc_hwcrypto == RTWN_CRYPTO_FULL) { #endif struct rtwn_vap *rvp = RTWN_VAP(vap); RTWN_LOCK(sc); rvp->keys[k->wk_keyix] = (set ? k : NULL); if ((sc->sc_flags & RTWN_RUNNING) == 0) { if (!set) clrbit(sc->keys_bmap, k->wk_keyix); RTWN_UNLOCK(sc); return (1); } RTWN_UNLOCK(sc); } } return (!rtwn_cmd_sleepable(sc, k, sizeof(*k), set ? rtwn_key_set_cb : rtwn_key_del_cb)); } int rtwn_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) { return (rtwn_process_key(vap, k, 1)); } int rtwn_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) { return (rtwn_process_key(vap, k, 0)); } Index: head/sys/dev/usb/wlan/if_rsu.c =================================================================== --- head/sys/dev/usb/wlan/if_rsu.c (revision 309685) +++ head/sys/dev/usb/wlan/if_rsu.c (revision 309686) @@ -1,3447 +1,3447 @@ /* $OpenBSD: if_rsu.c,v 1.17 2013/04/15 09:23:01 mglocker Exp $ */ /*- * Copyright (c) 2010 Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /* * Driver for Realtek RTL8188SU/RTL8191SU/RTL8192SU. * * TODO: * o tx a-mpdu * o monitor / hostap / ibss / mesh * o power-save operation */ #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "usbdevs.h" #define USB_DEBUG_VAR rsu_debug #include #include #ifdef USB_DEBUG static int rsu_debug = 0; SYSCTL_NODE(_hw_usb, OID_AUTO, rsu, CTLFLAG_RW, 0, "USB rsu"); SYSCTL_INT(_hw_usb_rsu, OID_AUTO, debug, CTLFLAG_RWTUN, &rsu_debug, 0, "Debug level"); #define RSU_DPRINTF(_sc, _flg, ...) \ do \ if (((_flg) == (RSU_DEBUG_ANY)) || (rsu_debug & (_flg))) \ device_printf((_sc)->sc_dev, __VA_ARGS__); \ while (0) #else #define RSU_DPRINTF(_sc, _flg, ...) #endif static int rsu_enable_11n = 1; TUNABLE_INT("hw.usb.rsu.enable_11n", &rsu_enable_11n); #define RSU_DEBUG_ANY 0xffffffff #define RSU_DEBUG_TX 0x00000001 #define RSU_DEBUG_RX 0x00000002 #define RSU_DEBUG_RESET 0x00000004 #define RSU_DEBUG_CALIB 0x00000008 #define RSU_DEBUG_STATE 0x00000010 #define RSU_DEBUG_SCAN 0x00000020 #define RSU_DEBUG_FWCMD 0x00000040 #define RSU_DEBUG_TXDONE 0x00000080 #define RSU_DEBUG_FW 0x00000100 #define RSU_DEBUG_FWDBG 0x00000200 #define RSU_DEBUG_AMPDU 0x00000400 #define RSU_DEBUG_KEY 0x00000800 static const STRUCT_USB_HOST_ID rsu_devs[] = { #define RSU_HT_NOT_SUPPORTED 0 #define RSU_HT_SUPPORTED 1 #define RSU_DEV_HT(v,p) { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, \ RSU_HT_SUPPORTED) } #define RSU_DEV(v,p) { USB_VPI(USB_VENDOR_##v, USB_PRODUCT_##v##_##p, \ RSU_HT_NOT_SUPPORTED) } RSU_DEV(ASUS, RTL8192SU), RSU_DEV(AZUREWAVE, RTL8192SU_4), RSU_DEV_HT(ACCTON, RTL8192SU), RSU_DEV_HT(ASUS, USBN10), RSU_DEV_HT(AZUREWAVE, RTL8192SU_1), RSU_DEV_HT(AZUREWAVE, RTL8192SU_2), RSU_DEV_HT(AZUREWAVE, RTL8192SU_3), RSU_DEV_HT(AZUREWAVE, RTL8192SU_5), RSU_DEV_HT(BELKIN, RTL8192SU_1), RSU_DEV_HT(BELKIN, RTL8192SU_2), RSU_DEV_HT(BELKIN, RTL8192SU_3), RSU_DEV_HT(CONCEPTRONIC2, RTL8192SU_1), RSU_DEV_HT(CONCEPTRONIC2, RTL8192SU_2), RSU_DEV_HT(CONCEPTRONIC2, RTL8192SU_3), RSU_DEV_HT(COREGA, RTL8192SU), RSU_DEV_HT(DLINK2, DWA131A1), RSU_DEV_HT(DLINK2, RTL8192SU_1), RSU_DEV_HT(DLINK2, RTL8192SU_2), RSU_DEV_HT(EDIMAX, RTL8192SU_1), RSU_DEV_HT(EDIMAX, RTL8192SU_2), RSU_DEV_HT(EDIMAX, EW7622UMN), RSU_DEV_HT(GUILLEMOT, HWGUN54), RSU_DEV_HT(GUILLEMOT, HWNUM300), RSU_DEV_HT(HAWKING, RTL8192SU_1), RSU_DEV_HT(HAWKING, RTL8192SU_2), RSU_DEV_HT(PLANEX2, GWUSNANO), RSU_DEV_HT(REALTEK, RTL8171), RSU_DEV_HT(REALTEK, RTL8172), RSU_DEV_HT(REALTEK, RTL8173), RSU_DEV_HT(REALTEK, RTL8174), RSU_DEV_HT(REALTEK, RTL8192SU), RSU_DEV_HT(REALTEK, RTL8712), RSU_DEV_HT(REALTEK, RTL8713), RSU_DEV_HT(SENAO, RTL8192SU_1), RSU_DEV_HT(SENAO, RTL8192SU_2), RSU_DEV_HT(SITECOMEU, WL349V1), RSU_DEV_HT(SITECOMEU, WL353), RSU_DEV_HT(SWEEX2, LW154), RSU_DEV_HT(TRENDNET, TEW646UBH), #undef RSU_DEV_HT #undef RSU_DEV }; static device_probe_t rsu_match; static device_attach_t rsu_attach; static device_detach_t rsu_detach; static usb_callback_t rsu_bulk_tx_callback_be_bk; static usb_callback_t rsu_bulk_tx_callback_vi_vo; static usb_callback_t rsu_bulk_tx_callback_h2c; static usb_callback_t rsu_bulk_rx_callback; static usb_error_t rsu_do_request(struct rsu_softc *, struct usb_device_request *, void *); static struct ieee80211vap * rsu_vap_create(struct ieee80211com *, const char name[], int, enum ieee80211_opmode, int, const uint8_t bssid[], const uint8_t mac[]); static void rsu_vap_delete(struct ieee80211vap *); static void rsu_scan_start(struct ieee80211com *); static void rsu_scan_end(struct ieee80211com *); static void rsu_getradiocaps(struct ieee80211com *, int, int *, struct ieee80211_channel[]); static void rsu_set_channel(struct ieee80211com *); static void rsu_scan_curchan(struct ieee80211_scan_state *, unsigned long); static void rsu_scan_mindwell(struct ieee80211_scan_state *); static uint8_t rsu_get_multi_pos(const uint8_t[]); static void rsu_set_multi(struct rsu_softc *); static void rsu_update_mcast(struct ieee80211com *); static int rsu_alloc_rx_list(struct rsu_softc *); static void rsu_free_rx_list(struct rsu_softc *); static int rsu_alloc_tx_list(struct rsu_softc *); static void rsu_free_tx_list(struct rsu_softc *); static void rsu_free_list(struct rsu_softc *, struct rsu_data [], int); static struct rsu_data *_rsu_getbuf(struct rsu_softc *); static struct rsu_data *rsu_getbuf(struct rsu_softc *); static void rsu_freebuf(struct rsu_softc *, struct rsu_data *); static int rsu_write_region_1(struct rsu_softc *, uint16_t, uint8_t *, int); static void rsu_write_1(struct rsu_softc *, uint16_t, uint8_t); static void rsu_write_2(struct rsu_softc *, uint16_t, uint16_t); static void rsu_write_4(struct rsu_softc *, uint16_t, uint32_t); static int rsu_read_region_1(struct rsu_softc *, uint16_t, uint8_t *, int); static uint8_t rsu_read_1(struct rsu_softc *, uint16_t); static uint16_t rsu_read_2(struct rsu_softc *, uint16_t); static uint32_t rsu_read_4(struct rsu_softc *, uint16_t); static int rsu_fw_iocmd(struct rsu_softc *, uint32_t); static uint8_t rsu_efuse_read_1(struct rsu_softc *, uint16_t); static int rsu_read_rom(struct rsu_softc *); static int rsu_fw_cmd(struct rsu_softc *, uint8_t, void *, int); static void rsu_calib_task(void *, int); static void rsu_tx_task(void *, int); static int rsu_newstate(struct ieee80211vap *, enum ieee80211_state, int); static int rsu_key_alloc(struct ieee80211vap *, struct ieee80211_key *, ieee80211_keyix *, ieee80211_keyix *); static int rsu_process_key(struct ieee80211vap *, const struct ieee80211_key *, int); static int rsu_key_set(struct ieee80211vap *, const struct ieee80211_key *); static int rsu_key_delete(struct ieee80211vap *, const struct ieee80211_key *); static int rsu_cam_read(struct rsu_softc *, uint8_t, uint32_t *); static void rsu_cam_write(struct rsu_softc *, uint8_t, uint32_t); static int rsu_key_check(struct rsu_softc *, ieee80211_keyix, int); static uint8_t rsu_crypto_mode(struct rsu_softc *, u_int, int); static int rsu_set_key_group(struct rsu_softc *, const struct ieee80211_key *); static int rsu_set_key_pair(struct rsu_softc *, const struct ieee80211_key *); static int rsu_reinit_static_keys(struct rsu_softc *); static int rsu_delete_key(struct rsu_softc *sc, ieee80211_keyix); static void rsu_delete_key_pair_cb(void *, int); static int rsu_site_survey(struct rsu_softc *, struct ieee80211_scan_ssid *); static int rsu_join_bss(struct rsu_softc *, struct ieee80211_node *); static int rsu_disconnect(struct rsu_softc *); static int rsu_hwrssi_to_rssi(struct rsu_softc *, int hw_rssi); static void rsu_event_survey(struct rsu_softc *, uint8_t *, int); static void rsu_event_join_bss(struct rsu_softc *, uint8_t *, int); static void rsu_rx_event(struct rsu_softc *, uint8_t, uint8_t *, int); static void rsu_rx_multi_event(struct rsu_softc *, uint8_t *, int); static int8_t rsu_get_rssi(struct rsu_softc *, int, void *); static struct mbuf * rsu_rx_copy_to_mbuf(struct rsu_softc *, struct r92s_rx_stat *, int); static struct ieee80211_node * rsu_rx_frame(struct rsu_softc *, struct mbuf *, int8_t *); static struct mbuf * rsu_rx_multi_frame(struct rsu_softc *, uint8_t *, int); static struct mbuf * rsu_rxeof(struct usb_xfer *, struct rsu_data *); static void rsu_txeof(struct usb_xfer *, struct rsu_data *); static int rsu_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void rsu_init(struct rsu_softc *); static int rsu_tx_start(struct rsu_softc *, struct ieee80211_node *, struct mbuf *, struct rsu_data *); static int rsu_transmit(struct ieee80211com *, struct mbuf *); static void rsu_start(struct rsu_softc *); static void _rsu_start(struct rsu_softc *); static void rsu_parent(struct ieee80211com *); static void rsu_stop(struct rsu_softc *); static void rsu_ms_delay(struct rsu_softc *, int); static device_method_t rsu_methods[] = { DEVMETHOD(device_probe, rsu_match), DEVMETHOD(device_attach, rsu_attach), DEVMETHOD(device_detach, rsu_detach), DEVMETHOD_END }; static driver_t rsu_driver = { .name = "rsu", .methods = rsu_methods, .size = sizeof(struct rsu_softc) }; static devclass_t rsu_devclass; DRIVER_MODULE(rsu, uhub, rsu_driver, rsu_devclass, NULL, 0); MODULE_DEPEND(rsu, wlan, 1, 1, 1); MODULE_DEPEND(rsu, usb, 1, 1, 1); MODULE_DEPEND(rsu, firmware, 1, 1, 1); MODULE_VERSION(rsu, 1); USB_PNP_HOST_INFO(rsu_devs); static const uint8_t rsu_chan_2ghz[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 }; static uint8_t rsu_wme_ac_xfer_map[4] = { [WME_AC_BE] = RSU_BULK_TX_BE_BK, [WME_AC_BK] = RSU_BULK_TX_BE_BK, [WME_AC_VI] = RSU_BULK_TX_VI_VO, [WME_AC_VO] = RSU_BULK_TX_VI_VO, }; /* XXX hard-coded */ #define RSU_H2C_ENDPOINT 3 static const struct usb_config rsu_config[RSU_N_TRANSFER] = { [RSU_BULK_RX] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = RSU_RXBUFSZ, .flags = { .pipe_bof = 1, .short_xfer_ok = 1 }, .callback = rsu_bulk_rx_callback }, [RSU_BULK_TX_BE_BK] = { .type = UE_BULK, .endpoint = 0x06, .direction = UE_DIR_OUT, .bufsize = RSU_TXBUFSZ, .flags = { .ext_buffer = 1, .pipe_bof = 1, .force_short_xfer = 1 }, .callback = rsu_bulk_tx_callback_be_bk, .timeout = RSU_TX_TIMEOUT }, [RSU_BULK_TX_VI_VO] = { .type = UE_BULK, .endpoint = 0x04, .direction = UE_DIR_OUT, .bufsize = RSU_TXBUFSZ, .flags = { .ext_buffer = 1, .pipe_bof = 1, .force_short_xfer = 1 }, .callback = rsu_bulk_tx_callback_vi_vo, .timeout = RSU_TX_TIMEOUT }, [RSU_BULK_TX_H2C] = { .type = UE_BULK, .endpoint = 0x0d, .direction = UE_DIR_OUT, .bufsize = RSU_TXBUFSZ, .flags = { .ext_buffer = 1, .pipe_bof = 1, .short_xfer_ok = 1 }, .callback = rsu_bulk_tx_callback_h2c, .timeout = RSU_TX_TIMEOUT }, }; static int rsu_match(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); if (uaa->usb_mode != USB_MODE_HOST || uaa->info.bIfaceIndex != 0 || uaa->info.bConfigIndex != 0) return (ENXIO); return (usbd_lookup_id_by_uaa(rsu_devs, sizeof(rsu_devs), uaa)); } static int rsu_send_mgmt(struct ieee80211_node *ni, int type, int arg) { return (ENOTSUP); } static void rsu_update_chw(struct ieee80211com *ic) { } /* * notification from net80211 that it'd like to do A-MPDU on the given TID. * * Note: this actually hangs traffic at the present moment, so don't use it. * The firmware debug does indiciate it's sending and establishing a TX AMPDU * session, but then no traffic flows. */ static int rsu_ampdu_enable(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) { #if 0 struct rsu_softc *sc = ni->ni_ic->ic_softc; struct r92s_add_ba_req req; /* Don't enable if it's requested or running */ if (IEEE80211_AMPDU_REQUESTED(tap)) return (0); if (IEEE80211_AMPDU_RUNNING(tap)) return (0); /* We've decided to send addba; so send it */ req.tid = htole32(tap->txa_tid); /* Attempt net80211 state */ if (ieee80211_ampdu_tx_request_ext(ni, tap->txa_tid) != 1) return (0); /* Send the firmware command */ RSU_DPRINTF(sc, RSU_DEBUG_AMPDU, "%s: establishing AMPDU TX for TID %d\n", __func__, tap->txa_tid); RSU_LOCK(sc); if (rsu_fw_cmd(sc, R92S_CMD_ADDBA_REQ, &req, sizeof(req)) != 1) { RSU_UNLOCK(sc); /* Mark failure */ (void) ieee80211_ampdu_tx_request_active_ext(ni, tap->txa_tid, 0); return (0); } RSU_UNLOCK(sc); /* Mark success; we don't get any further notifications */ (void) ieee80211_ampdu_tx_request_active_ext(ni, tap->txa_tid, 1); #endif /* Return 0, we're driving this ourselves */ return (0); } static int rsu_wme_update(struct ieee80211com *ic) { /* Firmware handles this; not our problem */ return (0); } static int rsu_attach(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); struct rsu_softc *sc = device_get_softc(self); struct ieee80211com *ic = &sc->sc_ic; int error; uint8_t iface_index; struct usb_interface *iface; const char *rft; device_set_usb_desc(self); sc->sc_udev = uaa->device; sc->sc_dev = self; if (rsu_enable_11n) sc->sc_ht = !! (USB_GET_DRIVER_INFO(uaa) & RSU_HT_SUPPORTED); /* Get number of endpoints */ iface = usbd_get_iface(sc->sc_udev, 0); sc->sc_nendpoints = iface->idesc->bNumEndpoints; /* Endpoints are hard-coded for now, so enforce 4-endpoint only */ if (sc->sc_nendpoints != 4) { device_printf(sc->sc_dev, "the driver currently only supports 4-endpoint devices\n"); return (ENXIO); } mtx_init(&sc->sc_mtx, device_get_nameunit(self), MTX_NETWORK_LOCK, MTX_DEF); RSU_DELKEY_BMAP_LOCK_INIT(sc); TIMEOUT_TASK_INIT(taskqueue_thread, &sc->calib_task, 0, rsu_calib_task, sc); TASK_INIT(&sc->del_key_task, 0, rsu_delete_key_pair_cb, sc); TASK_INIT(&sc->tx_task, 0, rsu_tx_task, sc); mbufq_init(&sc->sc_snd, ifqmaxlen); /* Allocate Tx/Rx buffers. */ error = rsu_alloc_rx_list(sc); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Rx buffers\n"); goto fail_usb; } error = rsu_alloc_tx_list(sc); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Tx buffers\n"); rsu_free_rx_list(sc); goto fail_usb; } iface_index = 0; error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, rsu_config, RSU_N_TRANSFER, sc, &sc->sc_mtx); if (error) { device_printf(sc->sc_dev, "could not allocate USB transfers, err=%s\n", usbd_errstr(error)); goto fail_usb; } RSU_LOCK(sc); /* Read chip revision. */ sc->cut = MS(rsu_read_4(sc, R92S_PMC_FSM), R92S_PMC_FSM_CUT); if (sc->cut != 3) sc->cut = (sc->cut >> 1) + 1; error = rsu_read_rom(sc); RSU_UNLOCK(sc); if (error != 0) { device_printf(self, "could not read ROM\n"); goto fail_rom; } /* Figure out TX/RX streams */ switch (sc->rom[84]) { case 0x0: sc->sc_rftype = RTL8712_RFCONFIG_1T1R; sc->sc_nrxstream = 1; sc->sc_ntxstream = 1; rft = "1T1R"; break; case 0x1: sc->sc_rftype = RTL8712_RFCONFIG_1T2R; sc->sc_nrxstream = 2; sc->sc_ntxstream = 1; rft = "1T2R"; break; case 0x2: sc->sc_rftype = RTL8712_RFCONFIG_2T2R; sc->sc_nrxstream = 2; sc->sc_ntxstream = 2; rft = "2T2R"; break; default: device_printf(sc->sc_dev, "%s: unknown board type (rfconfig=0x%02x)\n", __func__, sc->rom[84]); goto fail_rom; } IEEE80211_ADDR_COPY(ic->ic_macaddr, &sc->rom[0x12]); device_printf(self, "MAC/BB RTL8712 cut %d %s\n", sc->cut, rft); ic->ic_softc = sc; ic->ic_name = device_get_nameunit(self); ic->ic_phytype = IEEE80211_T_OFDM; /* Not only, but not used. */ ic->ic_opmode = IEEE80211_M_STA; /* Default to BSS mode. */ /* Set device capabilities. */ ic->ic_caps = IEEE80211_C_STA | /* station mode */ #if 0 IEEE80211_C_BGSCAN | /* Background scan. */ #endif IEEE80211_C_SHPREAMBLE | /* Short preamble supported. */ IEEE80211_C_WME | /* WME/QoS */ IEEE80211_C_SHSLOT | /* Short slot time supported. */ IEEE80211_C_WPA; /* WPA/RSN. */ ic->ic_cryptocaps = IEEE80211_CRYPTO_WEP | IEEE80211_CRYPTO_TKIP | IEEE80211_CRYPTO_AES_CCM; /* Check if HT support is present. */ if (sc->sc_ht) { device_printf(sc->sc_dev, "%s: enabling 11n\n", __func__); /* Enable basic HT */ ic->ic_htcaps = IEEE80211_HTC_HT | #if 0 IEEE80211_HTC_AMPDU | #endif IEEE80211_HTC_AMSDU | IEEE80211_HTCAP_MAXAMSDU_3839 | IEEE80211_HTCAP_SMPS_OFF; ic->ic_htcaps |= IEEE80211_HTCAP_CHWIDTH40; /* set number of spatial streams */ ic->ic_txstream = sc->sc_ntxstream; ic->ic_rxstream = sc->sc_nrxstream; } ic->ic_flags_ext |= IEEE80211_FEXT_SCAN_OFFLOAD; rsu_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels); ieee80211_ifattach(ic); ic->ic_raw_xmit = rsu_raw_xmit; ic->ic_scan_start = rsu_scan_start; ic->ic_scan_end = rsu_scan_end; ic->ic_getradiocaps = rsu_getradiocaps; ic->ic_set_channel = rsu_set_channel; ic->ic_scan_curchan = rsu_scan_curchan; ic->ic_scan_mindwell = rsu_scan_mindwell; ic->ic_vap_create = rsu_vap_create; ic->ic_vap_delete = rsu_vap_delete; ic->ic_update_mcast = rsu_update_mcast; ic->ic_parent = rsu_parent; ic->ic_transmit = rsu_transmit; ic->ic_send_mgmt = rsu_send_mgmt; ic->ic_update_chw = rsu_update_chw; ic->ic_ampdu_enable = rsu_ampdu_enable; ic->ic_wme.wme_update = rsu_wme_update; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), RSU_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), RSU_RX_RADIOTAP_PRESENT); if (bootverbose) ieee80211_announce(ic); return (0); fail_rom: usbd_transfer_unsetup(sc->sc_xfer, RSU_N_TRANSFER); fail_usb: mtx_destroy(&sc->sc_mtx); return (ENXIO); } static int rsu_detach(device_t self) { struct rsu_softc *sc = device_get_softc(self); struct ieee80211com *ic = &sc->sc_ic; RSU_LOCK(sc); rsu_stop(sc); RSU_UNLOCK(sc); usbd_transfer_unsetup(sc->sc_xfer, RSU_N_TRANSFER); /* * Free buffers /before/ we detach from net80211, else node * references to destroyed vaps will lead to a panic. */ /* Free Tx/Rx buffers. */ RSU_LOCK(sc); rsu_free_tx_list(sc); rsu_free_rx_list(sc); RSU_UNLOCK(sc); /* Frames are freed; detach from net80211 */ ieee80211_ifdetach(ic); taskqueue_drain_timeout(taskqueue_thread, &sc->calib_task); taskqueue_drain(taskqueue_thread, &sc->del_key_task); taskqueue_drain(taskqueue_thread, &sc->tx_task); RSU_DELKEY_BMAP_LOCK_DESTROY(sc); mtx_destroy(&sc->sc_mtx); return (0); } static usb_error_t rsu_do_request(struct rsu_softc *sc, struct usb_device_request *req, void *data) { usb_error_t err; int ntries = 10; RSU_ASSERT_LOCKED(sc); while (ntries--) { err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, req, data, 0, NULL, 250 /* ms */); if (err == 0 || err == USB_ERR_NOT_CONFIGURED) break; DPRINTFN(1, "Control request failed, %s (retrying)\n", usbd_errstr(err)); rsu_ms_delay(sc, 10); } return (err); } static struct ieee80211vap * rsu_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct rsu_vap *uvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return (NULL); uvp = malloc(sizeof(struct rsu_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &uvp->vap; if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid) != 0) { /* out of memory */ free(uvp, M_80211_VAP); return (NULL); } /* override state transition machine */ uvp->newstate = vap->iv_newstate; vap->iv_newstate = rsu_newstate; vap->iv_key_alloc = rsu_key_alloc; vap->iv_key_set = rsu_key_set; vap->iv_key_delete = rsu_key_delete; /* Limits from the r92su driver */ vap->iv_ampdu_density = IEEE80211_HTCAP_MPDUDENSITY_16; vap->iv_ampdu_rxmax = IEEE80211_HTCAP_MAXRXAMPDU_32K; /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); ic->ic_opmode = opmode; return (vap); } static void rsu_vap_delete(struct ieee80211vap *vap) { struct rsu_vap *uvp = RSU_VAP(vap); ieee80211_vap_detach(vap); free(uvp, M_80211_VAP); } static void rsu_scan_start(struct ieee80211com *ic) { struct rsu_softc *sc = ic->ic_softc; struct ieee80211_scan_state *ss = ic->ic_scan; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); int error; /* Scanning is done by the firmware. */ RSU_LOCK(sc); sc->sc_active_scan = !!(ss->ss_flags & IEEE80211_SCAN_ACTIVE); /* XXX TODO: force awake if in network-sleep? */ error = rsu_site_survey(sc, ss->ss_nssid > 0 ? &ss->ss_ssid[0] : NULL); RSU_UNLOCK(sc); if (error != 0) { device_printf(sc->sc_dev, "could not send site survey command\n"); ieee80211_cancel_scan(vap); } } static void rsu_scan_end(struct ieee80211com *ic) { /* Nothing to do here. */ } static void rsu_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans, struct ieee80211_channel chans[]) { struct rsu_softc *sc = ic->ic_softc; uint8_t bands[IEEE80211_MODE_BYTES]; /* Set supported .11b and .11g rates. */ memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); if (sc->sc_ht) setbit(bands, IEEE80211_MODE_11NG); ieee80211_add_channel_list_2ghz(chans, maxchans, nchans, rsu_chan_2ghz, nitems(rsu_chan_2ghz), bands, 0); } static void rsu_set_channel(struct ieee80211com *ic __unused) { /* We are unable to switch channels, yet. */ } static void rsu_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) { /* Scan is done in rsu_scan_start(). */ } /** * Called by the net80211 framework to indicate * the minimum dwell time has been met, terminate the scan. * We don't actually terminate the scan as the firmware will notify * us when it's finished and we have no way to interrupt it. */ static void rsu_scan_mindwell(struct ieee80211_scan_state *ss) { /* NB: don't try to abort scan; wait for firmware to finish */ } /* * The same as rtwn_get_multi_pos() / rtwn_set_multi(). */ static uint8_t rsu_get_multi_pos(const uint8_t maddr[]) { uint64_t mask = 0x00004d101df481b4; uint8_t pos = 0x27; /* initial value */ int i, j; for (i = 0; i < IEEE80211_ADDR_LEN; i++) for (j = (i == 0) ? 1 : 0; j < 8; j++) if ((maddr[i] >> j) & 1) pos ^= (mask >> (i * 8 + j - 1)); pos &= 0x3f; return (pos); } static void rsu_set_multi(struct rsu_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; uint32_t mfilt[2]; RSU_ASSERT_LOCKED(sc); /* general structure was copied from ath(4). */ if (ic->ic_allmulti == 0) { struct ieee80211vap *vap; struct ifnet *ifp; struct ifmultiaddr *ifma; /* * Merge multicast addresses to form the hardware filter. */ mfilt[0] = mfilt[1] = 0; TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { ifp = vap->iv_ifp; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { caddr_t dl; uint8_t pos; dl = LLADDR((struct sockaddr_dl *) ifma->ifma_addr); pos = rsu_get_multi_pos(dl); mfilt[pos / 32] |= (1 << (pos % 32)); } if_maddr_runlock(ifp); } } else mfilt[0] = mfilt[1] = ~0; rsu_write_4(sc, R92S_MAR + 0, mfilt[0]); rsu_write_4(sc, R92S_MAR + 4, mfilt[1]); RSU_DPRINTF(sc, RSU_DEBUG_STATE, "%s: MC filter %08x:%08x\n", __func__, mfilt[0], mfilt[1]); } static void rsu_update_mcast(struct ieee80211com *ic) { struct rsu_softc *sc = ic->ic_softc; RSU_LOCK(sc); if (sc->sc_running) rsu_set_multi(sc); RSU_UNLOCK(sc); } static int rsu_alloc_list(struct rsu_softc *sc, struct rsu_data data[], int ndata, int maxsz) { int i, error; for (i = 0; i < ndata; i++) { struct rsu_data *dp = &data[i]; dp->sc = sc; dp->m = NULL; dp->buf = malloc(maxsz, M_USBDEV, M_NOWAIT); if (dp->buf == NULL) { device_printf(sc->sc_dev, "could not allocate buffer\n"); error = ENOMEM; goto fail; } dp->ni = NULL; } return (0); fail: rsu_free_list(sc, data, ndata); return (error); } static int rsu_alloc_rx_list(struct rsu_softc *sc) { int error, i; error = rsu_alloc_list(sc, sc->sc_rx, RSU_RX_LIST_COUNT, RSU_RXBUFSZ); if (error != 0) return (error); STAILQ_INIT(&sc->sc_rx_active); STAILQ_INIT(&sc->sc_rx_inactive); for (i = 0; i < RSU_RX_LIST_COUNT; i++) STAILQ_INSERT_HEAD(&sc->sc_rx_inactive, &sc->sc_rx[i], next); return (0); } static int rsu_alloc_tx_list(struct rsu_softc *sc) { int error, i; error = rsu_alloc_list(sc, sc->sc_tx, RSU_TX_LIST_COUNT, RSU_TXBUFSZ); if (error != 0) return (error); STAILQ_INIT(&sc->sc_tx_inactive); for (i = 0; i != RSU_N_TRANSFER; i++) { STAILQ_INIT(&sc->sc_tx_active[i]); STAILQ_INIT(&sc->sc_tx_pending[i]); } for (i = 0; i < RSU_TX_LIST_COUNT; i++) { STAILQ_INSERT_HEAD(&sc->sc_tx_inactive, &sc->sc_tx[i], next); } return (0); } static void rsu_free_tx_list(struct rsu_softc *sc) { int i; /* prevent further allocations from TX list(s) */ STAILQ_INIT(&sc->sc_tx_inactive); for (i = 0; i != RSU_N_TRANSFER; i++) { STAILQ_INIT(&sc->sc_tx_active[i]); STAILQ_INIT(&sc->sc_tx_pending[i]); } rsu_free_list(sc, sc->sc_tx, RSU_TX_LIST_COUNT); } static void rsu_free_rx_list(struct rsu_softc *sc) { /* prevent further allocations from RX list(s) */ STAILQ_INIT(&sc->sc_rx_inactive); STAILQ_INIT(&sc->sc_rx_active); rsu_free_list(sc, sc->sc_rx, RSU_RX_LIST_COUNT); } static void rsu_free_list(struct rsu_softc *sc, struct rsu_data data[], int ndata) { int i; for (i = 0; i < ndata; i++) { struct rsu_data *dp = &data[i]; if (dp->buf != NULL) { free(dp->buf, M_USBDEV); dp->buf = NULL; } if (dp->ni != NULL) { ieee80211_free_node(dp->ni); dp->ni = NULL; } } } static struct rsu_data * _rsu_getbuf(struct rsu_softc *sc) { struct rsu_data *bf; bf = STAILQ_FIRST(&sc->sc_tx_inactive); if (bf != NULL) STAILQ_REMOVE_HEAD(&sc->sc_tx_inactive, next); else bf = NULL; return (bf); } static struct rsu_data * rsu_getbuf(struct rsu_softc *sc) { struct rsu_data *bf; RSU_ASSERT_LOCKED(sc); bf = _rsu_getbuf(sc); if (bf == NULL) { RSU_DPRINTF(sc, RSU_DEBUG_TX, "%s: no buffers\n", __func__); } return (bf); } static void rsu_freebuf(struct rsu_softc *sc, struct rsu_data *bf) { RSU_ASSERT_LOCKED(sc); STAILQ_INSERT_TAIL(&sc->sc_tx_inactive, bf, next); } static int rsu_write_region_1(struct rsu_softc *sc, uint16_t addr, uint8_t *buf, int len) { usb_device_request_t req; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = R92S_REQ_REGS; USETW(req.wValue, addr); USETW(req.wIndex, 0); USETW(req.wLength, len); return (rsu_do_request(sc, &req, buf)); } static void rsu_write_1(struct rsu_softc *sc, uint16_t addr, uint8_t val) { rsu_write_region_1(sc, addr, &val, 1); } static void rsu_write_2(struct rsu_softc *sc, uint16_t addr, uint16_t val) { val = htole16(val); rsu_write_region_1(sc, addr, (uint8_t *)&val, 2); } static void rsu_write_4(struct rsu_softc *sc, uint16_t addr, uint32_t val) { val = htole32(val); rsu_write_region_1(sc, addr, (uint8_t *)&val, 4); } static int rsu_read_region_1(struct rsu_softc *sc, uint16_t addr, uint8_t *buf, int len) { usb_device_request_t req; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = R92S_REQ_REGS; USETW(req.wValue, addr); USETW(req.wIndex, 0); USETW(req.wLength, len); return (rsu_do_request(sc, &req, buf)); } static uint8_t rsu_read_1(struct rsu_softc *sc, uint16_t addr) { uint8_t val; if (rsu_read_region_1(sc, addr, &val, 1) != 0) return (0xff); return (val); } static uint16_t rsu_read_2(struct rsu_softc *sc, uint16_t addr) { uint16_t val; if (rsu_read_region_1(sc, addr, (uint8_t *)&val, 2) != 0) return (0xffff); return (le16toh(val)); } static uint32_t rsu_read_4(struct rsu_softc *sc, uint16_t addr) { uint32_t val; if (rsu_read_region_1(sc, addr, (uint8_t *)&val, 4) != 0) return (0xffffffff); return (le32toh(val)); } static int rsu_fw_iocmd(struct rsu_softc *sc, uint32_t iocmd) { int ntries; rsu_write_4(sc, R92S_IOCMD_CTRL, iocmd); rsu_ms_delay(sc, 1); for (ntries = 0; ntries < 50; ntries++) { if (rsu_read_4(sc, R92S_IOCMD_CTRL) == 0) return (0); rsu_ms_delay(sc, 1); } return (ETIMEDOUT); } static uint8_t rsu_efuse_read_1(struct rsu_softc *sc, uint16_t addr) { uint32_t reg; int ntries; reg = rsu_read_4(sc, R92S_EFUSE_CTRL); reg = RW(reg, R92S_EFUSE_CTRL_ADDR, addr); reg &= ~R92S_EFUSE_CTRL_VALID; rsu_write_4(sc, R92S_EFUSE_CTRL, reg); /* Wait for read operation to complete. */ for (ntries = 0; ntries < 100; ntries++) { reg = rsu_read_4(sc, R92S_EFUSE_CTRL); if (reg & R92S_EFUSE_CTRL_VALID) return (MS(reg, R92S_EFUSE_CTRL_DATA)); rsu_ms_delay(sc, 1); } device_printf(sc->sc_dev, "could not read efuse byte at address 0x%x\n", addr); return (0xff); } static int rsu_read_rom(struct rsu_softc *sc) { uint8_t *rom = sc->rom; uint16_t addr = 0; uint32_t reg; uint8_t off, msk; int i; /* Make sure that ROM type is eFuse and that autoload succeeded. */ reg = rsu_read_1(sc, R92S_EE_9346CR); if ((reg & (R92S_9356SEL | R92S_EEPROM_EN)) != R92S_EEPROM_EN) return (EIO); /* Turn on 2.5V to prevent eFuse leakage. */ reg = rsu_read_1(sc, R92S_EFUSE_TEST + 3); rsu_write_1(sc, R92S_EFUSE_TEST + 3, reg | 0x80); rsu_ms_delay(sc, 1); rsu_write_1(sc, R92S_EFUSE_TEST + 3, reg & ~0x80); /* Read full ROM image. */ memset(&sc->rom, 0xff, sizeof(sc->rom)); while (addr < 512) { reg = rsu_efuse_read_1(sc, addr); if (reg == 0xff) break; addr++; off = reg >> 4; msk = reg & 0xf; for (i = 0; i < 4; i++) { if (msk & (1 << i)) continue; rom[off * 8 + i * 2 + 0] = rsu_efuse_read_1(sc, addr); addr++; rom[off * 8 + i * 2 + 1] = rsu_efuse_read_1(sc, addr); addr++; } } #ifdef USB_DEBUG if (rsu_debug >= 5) { /* Dump ROM content. */ printf("\n"); for (i = 0; i < sizeof(sc->rom); i++) printf("%02x:", rom[i]); printf("\n"); } #endif return (0); } static int rsu_fw_cmd(struct rsu_softc *sc, uint8_t code, void *buf, int len) { const uint8_t which = RSU_H2C_ENDPOINT; struct rsu_data *data; struct r92s_tx_desc *txd; struct r92s_fw_cmd_hdr *cmd; int cmdsz; int xferlen; RSU_ASSERT_LOCKED(sc); data = rsu_getbuf(sc); if (data == NULL) return (ENOMEM); /* Blank the entire payload, just to be safe */ memset(data->buf, '\0', RSU_TXBUFSZ); /* Round-up command length to a multiple of 8 bytes. */ /* XXX TODO: is this required? */ cmdsz = (len + 7) & ~7; xferlen = sizeof(*txd) + sizeof(*cmd) + cmdsz; KASSERT(xferlen <= RSU_TXBUFSZ, ("%s: invalid length", __func__)); memset(data->buf, 0, xferlen); /* Setup Tx descriptor. */ txd = (struct r92s_tx_desc *)data->buf; txd->txdw0 = htole32( SM(R92S_TXDW0_OFFSET, sizeof(*txd)) | SM(R92S_TXDW0_PKTLEN, sizeof(*cmd) + cmdsz) | R92S_TXDW0_OWN | R92S_TXDW0_FSG | R92S_TXDW0_LSG); txd->txdw1 = htole32(SM(R92S_TXDW1_QSEL, R92S_TXDW1_QSEL_H2C)); /* Setup command header. */ cmd = (struct r92s_fw_cmd_hdr *)&txd[1]; cmd->len = htole16(cmdsz); cmd->code = code; cmd->seq = sc->cmd_seq; sc->cmd_seq = (sc->cmd_seq + 1) & 0x7f; /* Copy command payload. */ memcpy(&cmd[1], buf, len); RSU_DPRINTF(sc, RSU_DEBUG_TX | RSU_DEBUG_FWCMD, "%s: Tx cmd code=0x%x len=0x%x\n", __func__, code, cmdsz); data->buflen = xferlen; STAILQ_INSERT_TAIL(&sc->sc_tx_pending[which], data, next); usbd_transfer_start(sc->sc_xfer[which]); return (0); } /* ARGSUSED */ static void rsu_calib_task(void *arg, int pending __unused) { struct rsu_softc *sc = arg; #ifdef notyet uint32_t reg; #endif RSU_DPRINTF(sc, RSU_DEBUG_CALIB, "%s: running calibration task\n", __func__); RSU_LOCK(sc); #ifdef notyet /* Read WPS PBC status. */ rsu_write_1(sc, R92S_MAC_PINMUX_CTRL, R92S_GPIOMUX_EN | SM(R92S_GPIOSEL_GPIO, R92S_GPIOSEL_GPIO_JTAG)); rsu_write_1(sc, R92S_GPIO_IO_SEL, rsu_read_1(sc, R92S_GPIO_IO_SEL) & ~R92S_GPIO_WPS); reg = rsu_read_1(sc, R92S_GPIO_CTRL); if (reg != 0xff && (reg & R92S_GPIO_WPS)) DPRINTF(("WPS PBC is pushed\n")); #endif /* Read current signal level. */ if (rsu_fw_iocmd(sc, 0xf4000001) == 0) { sc->sc_currssi = rsu_read_4(sc, R92S_IOCMD_DATA); RSU_DPRINTF(sc, RSU_DEBUG_CALIB, "%s: RSSI=%d (%d)\n", __func__, sc->sc_currssi, rsu_hwrssi_to_rssi(sc, sc->sc_currssi)); } if (sc->sc_calibrating) taskqueue_enqueue_timeout(taskqueue_thread, &sc->calib_task, hz); RSU_UNLOCK(sc); } static void rsu_tx_task(void *arg, int pending __unused) { struct rsu_softc *sc = arg; RSU_LOCK(sc); _rsu_start(sc); RSU_UNLOCK(sc); } #define RSU_PWR_UNKNOWN 0x0 #define RSU_PWR_ACTIVE 0x1 #define RSU_PWR_OFF 0x2 #define RSU_PWR_SLEEP 0x3 /* * Set the current power state. * * The rtlwifi code doesn't do this so aggressively; it * waits for an idle period after association with * no traffic before doing this. * * For now - it's on in all states except RUN, and * in RUN it'll transition to allow sleep. */ struct r92s_pwr_cmd { uint8_t mode; uint8_t smart_ps; uint8_t bcn_pass_time; }; static int rsu_set_fw_power_state(struct rsu_softc *sc, int state) { struct r92s_set_pwr_mode cmd; //struct r92s_pwr_cmd cmd; int error; RSU_ASSERT_LOCKED(sc); /* only change state if required */ if (sc->sc_curpwrstate == state) return (0); memset(&cmd, 0, sizeof(cmd)); switch (state) { case RSU_PWR_ACTIVE: /* Force the hardware awake */ rsu_write_1(sc, R92S_USB_HRPWM, R92S_USB_HRPWM_PS_ST_ACTIVE | R92S_USB_HRPWM_PS_ALL_ON); cmd.mode = R92S_PS_MODE_ACTIVE; break; case RSU_PWR_SLEEP: cmd.mode = R92S_PS_MODE_DTIM; /* XXX configurable? */ cmd.smart_ps = 1; /* XXX 2 if doing p2p */ cmd.bcn_pass_time = 5; /* in 100mS usb.c, linux/rtlwifi */ break; case RSU_PWR_OFF: cmd.mode = R92S_PS_MODE_RADIOOFF; break; default: device_printf(sc->sc_dev, "%s: unknown ps mode (%d)\n", __func__, state); return (ENXIO); } RSU_DPRINTF(sc, RSU_DEBUG_RESET, "%s: setting ps mode to %d (mode %d)\n", __func__, state, cmd.mode); error = rsu_fw_cmd(sc, R92S_CMD_SET_PWR_MODE, &cmd, sizeof(cmd)); if (error == 0) sc->sc_curpwrstate = state; return (error); } static int rsu_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct rsu_vap *uvp = RSU_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct rsu_softc *sc = ic->ic_softc; struct ieee80211_node *ni; struct ieee80211_rateset *rs; enum ieee80211_state ostate; int error, startcal = 0; ostate = vap->iv_state; RSU_DPRINTF(sc, RSU_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[ostate], ieee80211_state_name[nstate]); IEEE80211_UNLOCK(ic); if (ostate == IEEE80211_S_RUN) { RSU_LOCK(sc); /* Stop calibration. */ sc->sc_calibrating = 0; /* Pause Tx for AC queues. */ rsu_write_1(sc, R92S_TXPAUSE, R92S_TXPAUSE_AC); usb_pause_mtx(&sc->sc_mtx, USB_MS_TO_TICKS(10)); RSU_UNLOCK(sc); taskqueue_drain_timeout(taskqueue_thread, &sc->calib_task); taskqueue_drain(taskqueue_thread, &sc->tx_task); RSU_LOCK(sc); /* Disassociate from our current BSS. */ rsu_disconnect(sc); /* Reinstall static keys. */ if (sc->sc_running) rsu_reinit_static_keys(sc); } else RSU_LOCK(sc); switch (nstate) { case IEEE80211_S_INIT: (void) rsu_set_fw_power_state(sc, RSU_PWR_ACTIVE); break; case IEEE80211_S_AUTH: ni = ieee80211_ref_node(vap->iv_bss); (void) rsu_set_fw_power_state(sc, RSU_PWR_ACTIVE); error = rsu_join_bss(sc, ni); ieee80211_free_node(ni); if (error != 0) { device_printf(sc->sc_dev, "could not send join command\n"); } break; case IEEE80211_S_RUN: /* Flush all AC queues. */ rsu_write_1(sc, R92S_TXPAUSE, 0); ni = ieee80211_ref_node(vap->iv_bss); rs = &ni->ni_rates; /* Indicate highest supported rate. */ ni->ni_txrate = rs->rs_rates[rs->rs_nrates - 1]; (void) rsu_set_fw_power_state(sc, RSU_PWR_SLEEP); ieee80211_free_node(ni); startcal = 1; break; default: break; } if (startcal != 0) { sc->sc_calibrating = 1; /* Start periodic calibration. */ taskqueue_enqueue_timeout(taskqueue_thread, &sc->calib_task, hz); } RSU_UNLOCK(sc); IEEE80211_LOCK(ic); return (uvp->newstate(vap, nstate, arg)); } static int rsu_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k, ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix) { struct rsu_softc *sc = vap->iv_ic->ic_softc; int is_checked = 0; if (&vap->iv_nw_keys[0] <= k && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { - *keyix = k - vap->iv_nw_keys; + *keyix = ieee80211_crypto_get_key_wepidx(vap, k); } else { if (vap->iv_opmode != IEEE80211_M_STA) { *keyix = 0; /* TODO: obtain keyix from node id */ is_checked = 1; k->wk_flags |= IEEE80211_KEY_SWCRYPT; } else *keyix = R92S_MACID_BSS; } if (!is_checked) { RSU_LOCK(sc); if (isset(sc->keys_bmap, *keyix)) { device_printf(sc->sc_dev, "%s: key slot %d is already used!\n", __func__, *keyix); RSU_UNLOCK(sc); return (0); } setbit(sc->keys_bmap, *keyix); RSU_UNLOCK(sc); } *rxkeyix = *keyix; return (1); } static int rsu_process_key(struct ieee80211vap *vap, const struct ieee80211_key *k, int set) { struct rsu_softc *sc = vap->iv_ic->ic_softc; int ret; if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { /* Not for us. */ return (1); } /* Handle group keys. */ if (&vap->iv_nw_keys[0] <= k && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) { KASSERT(k->wk_keyix < nitems(sc->group_keys), ("keyix %u > %zu\n", k->wk_keyix, nitems(sc->group_keys))); RSU_LOCK(sc); sc->group_keys[k->wk_keyix] = (set ? k : NULL); if (!sc->sc_running) { /* Static keys will be set during device startup. */ RSU_UNLOCK(sc); return (1); } if (set) ret = rsu_set_key_group(sc, k); else ret = rsu_delete_key(sc, k->wk_keyix); RSU_UNLOCK(sc); return (!ret); } if (set) { /* wait for pending key removal */ taskqueue_drain(taskqueue_thread, &sc->del_key_task); RSU_LOCK(sc); ret = rsu_set_key_pair(sc, k); RSU_UNLOCK(sc); } else { RSU_DELKEY_BMAP_LOCK(sc); setbit(sc->free_keys_bmap, k->wk_keyix); RSU_DELKEY_BMAP_UNLOCK(sc); /* workaround ieee80211_node_delucastkey() locking */ taskqueue_enqueue(taskqueue_thread, &sc->del_key_task); ret = 0; /* fake success */ } return (!ret); } static int rsu_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) { return (rsu_process_key(vap, k, 1)); } static int rsu_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) { return (rsu_process_key(vap, k, 0)); } static int rsu_cam_read(struct rsu_softc *sc, uint8_t addr, uint32_t *val) { int ntries; rsu_write_4(sc, R92S_CAMCMD, R92S_CAMCMD_POLLING | SM(R92S_CAMCMD_ADDR, addr)); for (ntries = 0; ntries < 10; ntries++) { if (!(rsu_read_4(sc, R92S_CAMCMD) & R92S_CAMCMD_POLLING)) break; usb_pause_mtx(&sc->sc_mtx, USB_MS_TO_TICKS(1)); } if (ntries == 10) { device_printf(sc->sc_dev, "%s: cannot read CAM entry at address %02X\n", __func__, addr); return (ETIMEDOUT); } *val = rsu_read_4(sc, R92S_CAMREAD); return (0); } static void rsu_cam_write(struct rsu_softc *sc, uint8_t addr, uint32_t data) { rsu_write_4(sc, R92S_CAMWRITE, data); rsu_write_4(sc, R92S_CAMCMD, R92S_CAMCMD_POLLING | R92S_CAMCMD_WRITE | SM(R92S_CAMCMD_ADDR, addr)); } static int rsu_key_check(struct rsu_softc *sc, ieee80211_keyix keyix, int is_valid) { uint32_t val; int error, ntries; for (ntries = 0; ntries < 20; ntries++) { usb_pause_mtx(&sc->sc_mtx, USB_MS_TO_TICKS(1)); error = rsu_cam_read(sc, R92S_CAM_CTL0(keyix), &val); if (error != 0) { device_printf(sc->sc_dev, "%s: cannot check key status!\n", __func__); return (error); } if (((val & R92S_CAM_VALID) == 0) ^ is_valid) break; } if (ntries == 20) { device_printf(sc->sc_dev, "%s: key %d is %s marked as valid, rejecting request\n", __func__, keyix, is_valid ? "not" : "still"); return (EIO); } return (0); } /* * Map net80211 cipher to RTL8712 security mode. */ static uint8_t rsu_crypto_mode(struct rsu_softc *sc, u_int cipher, int keylen) { switch (cipher) { case IEEE80211_CIPHER_WEP: return keylen < 8 ? R92S_KEY_ALGO_WEP40 : R92S_KEY_ALGO_WEP104; case IEEE80211_CIPHER_TKIP: return R92S_KEY_ALGO_TKIP; case IEEE80211_CIPHER_AES_CCM: return R92S_KEY_ALGO_AES; default: device_printf(sc->sc_dev, "unknown cipher %d\n", cipher); return R92S_KEY_ALGO_INVALID; } } static int rsu_set_key_group(struct rsu_softc *sc, const struct ieee80211_key *k) { struct r92s_fw_cmd_set_key key; uint8_t algo; int error; RSU_ASSERT_LOCKED(sc); /* Map net80211 cipher to HW crypto algorithm. */ algo = rsu_crypto_mode(sc, k->wk_cipher->ic_cipher, k->wk_keylen); if (algo == R92S_KEY_ALGO_INVALID) return (EINVAL); memset(&key, 0, sizeof(key)); key.algo = algo; key.cam_id = k->wk_keyix; key.grpkey = (k->wk_flags & IEEE80211_KEY_GROUP) != 0; memcpy(key.key, k->wk_key, MIN(k->wk_keylen, sizeof(key.key))); RSU_DPRINTF(sc, RSU_DEBUG_KEY | RSU_DEBUG_FWCMD, "%s: keyix %u, group %u, algo %u/%u, flags %04X, len %u, " "macaddr %s\n", __func__, key.cam_id, key.grpkey, k->wk_cipher->ic_cipher, key.algo, k->wk_flags, k->wk_keylen, ether_sprintf(k->wk_macaddr)); error = rsu_fw_cmd(sc, R92S_CMD_SET_KEY, &key, sizeof(key)); if (error != 0) { device_printf(sc->sc_dev, "%s: cannot send firmware command, error %d\n", __func__, error); return (error); } return (rsu_key_check(sc, k->wk_keyix, 1)); } static int rsu_set_key_pair(struct rsu_softc *sc, const struct ieee80211_key *k) { struct r92s_fw_cmd_set_key_mac key; uint8_t algo; int error; RSU_ASSERT_LOCKED(sc); if (!sc->sc_running) return (ESHUTDOWN); /* Map net80211 cipher to HW crypto algorithm. */ algo = rsu_crypto_mode(sc, k->wk_cipher->ic_cipher, k->wk_keylen); if (algo == R92S_KEY_ALGO_INVALID) return (EINVAL); memset(&key, 0, sizeof(key)); key.algo = algo; memcpy(key.macaddr, k->wk_macaddr, sizeof(key.macaddr)); memcpy(key.key, k->wk_key, MIN(k->wk_keylen, sizeof(key.key))); RSU_DPRINTF(sc, RSU_DEBUG_KEY | RSU_DEBUG_FWCMD, "%s: keyix %u, algo %u/%u, flags %04X, len %u, macaddr %s\n", __func__, k->wk_keyix, k->wk_cipher->ic_cipher, key.algo, k->wk_flags, k->wk_keylen, ether_sprintf(key.macaddr)); error = rsu_fw_cmd(sc, R92S_CMD_SET_STA_KEY, &key, sizeof(key)); if (error != 0) { device_printf(sc->sc_dev, "%s: cannot send firmware command, error %d\n", __func__, error); return (error); } return (rsu_key_check(sc, k->wk_keyix, 1)); } static int rsu_reinit_static_keys(struct rsu_softc *sc) { int i, error; for (i = 0; i < nitems(sc->group_keys); i++) { if (sc->group_keys[i] != NULL) { error = rsu_set_key_group(sc, sc->group_keys[i]); if (error != 0) { device_printf(sc->sc_dev, "%s: failed to set static key %d, " "error %d\n", __func__, i, error); return (error); } } } return (0); } static int rsu_delete_key(struct rsu_softc *sc, ieee80211_keyix keyix) { struct r92s_fw_cmd_set_key key; uint32_t val; int error; RSU_ASSERT_LOCKED(sc); if (!sc->sc_running) return (0); /* check if it was automatically removed by firmware */ error = rsu_cam_read(sc, R92S_CAM_CTL0(keyix), &val); if (error == 0 && (val & R92S_CAM_VALID) == 0) { RSU_DPRINTF(sc, RSU_DEBUG_KEY, "%s: key %u does not exist\n", __func__, keyix); clrbit(sc->keys_bmap, keyix); return (0); } memset(&key, 0, sizeof(key)); key.cam_id = keyix; RSU_DPRINTF(sc, RSU_DEBUG_KEY | RSU_DEBUG_FWCMD, "%s: removing key %u\n", __func__, key.cam_id); error = rsu_fw_cmd(sc, R92S_CMD_SET_KEY, &key, sizeof(key)); if (error != 0) { device_printf(sc->sc_dev, "%s: cannot send firmware command, error %d\n", __func__, error); goto finish; } usb_pause_mtx(&sc->sc_mtx, USB_MS_TO_TICKS(5)); /* * Clear 'valid' bit manually (cannot be done via firmware command). * Used for key check + when firmware command cannot be sent. */ finish: rsu_cam_write(sc, R92S_CAM_CTL0(keyix), 0); clrbit(sc->keys_bmap, keyix); return (rsu_key_check(sc, keyix, 0)); } static void rsu_delete_key_pair_cb(void *arg, int pending __unused) { struct rsu_softc *sc = arg; int i; RSU_DELKEY_BMAP_LOCK(sc); for (i = IEEE80211_WEP_NKID; i < R92S_CAM_ENTRY_LIMIT; i++) { if (isset(sc->free_keys_bmap, i)) { RSU_DELKEY_BMAP_UNLOCK(sc); RSU_LOCK(sc); RSU_DPRINTF(sc, RSU_DEBUG_KEY, "%s: calling rsu_delete_key() with keyix = %d\n", __func__, i); (void) rsu_delete_key(sc, i); RSU_UNLOCK(sc); RSU_DELKEY_BMAP_LOCK(sc); clrbit(sc->free_keys_bmap, i); /* bmap can be changed */ i = IEEE80211_WEP_NKID - 1; continue; } } RSU_DELKEY_BMAP_UNLOCK(sc); } static int rsu_site_survey(struct rsu_softc *sc, struct ieee80211_scan_ssid *ssid) { struct r92s_fw_cmd_sitesurvey cmd; RSU_ASSERT_LOCKED(sc); memset(&cmd, 0, sizeof(cmd)); /* TODO: passive channels? */ if (sc->sc_active_scan) cmd.active = htole32(1); cmd.limit = htole32(48); if (ssid != NULL) { sc->sc_extra_scan = 1; cmd.ssidlen = htole32(ssid->len); memcpy(cmd.ssid, ssid->ssid, ssid->len); } #ifdef USB_DEBUG if (rsu_debug & (RSU_DEBUG_SCAN | RSU_DEBUG_FWCMD)) { device_printf(sc->sc_dev, "sending site survey command, active %d", le32toh(cmd.active)); if (ssid != NULL) { printf(", ssid: "); ieee80211_print_essid(cmd.ssid, le32toh(cmd.ssidlen)); } printf("\n"); } #endif return (rsu_fw_cmd(sc, R92S_CMD_SITE_SURVEY, &cmd, sizeof(cmd))); } static int rsu_join_bss(struct rsu_softc *sc, struct ieee80211_node *ni) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = ni->ni_vap; struct ndis_wlan_bssid_ex *bss; struct ndis_802_11_fixed_ies *fixed; struct r92s_fw_cmd_auth auth; uint8_t buf[sizeof(*bss) + 128] __aligned(4); uint8_t *frm; uint8_t opmode; int error; RSU_ASSERT_LOCKED(sc); /* Let the FW decide the opmode based on the capinfo field. */ opmode = NDIS802_11AUTOUNKNOWN; RSU_DPRINTF(sc, RSU_DEBUG_RESET, "%s: setting operating mode to %d\n", __func__, opmode); error = rsu_fw_cmd(sc, R92S_CMD_SET_OPMODE, &opmode, sizeof(opmode)); if (error != 0) return (error); memset(&auth, 0, sizeof(auth)); if (vap->iv_flags & IEEE80211_F_WPA) { auth.mode = R92S_AUTHMODE_WPA; auth.dot1x = (ni->ni_authmode == IEEE80211_AUTH_8021X); } else auth.mode = R92S_AUTHMODE_OPEN; RSU_DPRINTF(sc, RSU_DEBUG_RESET, "%s: setting auth mode to %d\n", __func__, auth.mode); error = rsu_fw_cmd(sc, R92S_CMD_SET_AUTH, &auth, sizeof(auth)); if (error != 0) return (error); memset(buf, 0, sizeof(buf)); bss = (struct ndis_wlan_bssid_ex *)buf; IEEE80211_ADDR_COPY(bss->macaddr, ni->ni_bssid); bss->ssid.ssidlen = htole32(ni->ni_esslen); memcpy(bss->ssid.ssid, ni->ni_essid, ni->ni_esslen); if (vap->iv_flags & (IEEE80211_F_PRIVACY | IEEE80211_F_WPA)) bss->privacy = htole32(1); bss->rssi = htole32(ni->ni_avgrssi); if (ic->ic_curmode == IEEE80211_MODE_11B) bss->networktype = htole32(NDIS802_11DS); else bss->networktype = htole32(NDIS802_11OFDM24); bss->config.len = htole32(sizeof(bss->config)); bss->config.bintval = htole32(ni->ni_intval); bss->config.dsconfig = htole32(ieee80211_chan2ieee(ic, ni->ni_chan)); bss->inframode = htole32(NDIS802_11INFRASTRUCTURE); /* XXX verify how this is supposed to look! */ memcpy(bss->supprates, ni->ni_rates.rs_rates, ni->ni_rates.rs_nrates); /* Write the fixed fields of the beacon frame. */ fixed = (struct ndis_802_11_fixed_ies *)&bss[1]; memcpy(&fixed->tstamp, ni->ni_tstamp.data, 8); fixed->bintval = htole16(ni->ni_intval); fixed->capabilities = htole16(ni->ni_capinfo); /* Write IEs to be included in the association request. */ frm = (uint8_t *)&fixed[1]; frm = ieee80211_add_rsn(frm, vap); frm = ieee80211_add_wpa(frm, vap); frm = ieee80211_add_qos(frm, ni); if ((ic->ic_flags & IEEE80211_F_WME) && (ni->ni_ies.wme_ie != NULL)) frm = ieee80211_add_wme_info(frm, &ic->ic_wme); if (ni->ni_flags & IEEE80211_NODE_HT) { frm = ieee80211_add_htcap(frm, ni); frm = ieee80211_add_htinfo(frm, ni); } bss->ieslen = htole32(frm - (uint8_t *)fixed); bss->len = htole32(((frm - buf) + 3) & ~3); RSU_DPRINTF(sc, RSU_DEBUG_RESET | RSU_DEBUG_FWCMD, "%s: sending join bss command to %s chan %d\n", __func__, ether_sprintf(bss->macaddr), le32toh(bss->config.dsconfig)); return (rsu_fw_cmd(sc, R92S_CMD_JOIN_BSS, buf, sizeof(buf))); } static int rsu_disconnect(struct rsu_softc *sc) { uint32_t zero = 0; /* :-) */ /* Disassociate from our current BSS. */ RSU_DPRINTF(sc, RSU_DEBUG_STATE | RSU_DEBUG_FWCMD, "%s: sending disconnect command\n", __func__); return (rsu_fw_cmd(sc, R92S_CMD_DISCONNECT, &zero, sizeof(zero))); } /* * Map the hardware provided RSSI value to a signal level. * For the most part it's just something we divide by and cap * so it doesn't overflow the representation by net80211. */ static int rsu_hwrssi_to_rssi(struct rsu_softc *sc, int hw_rssi) { int v; if (hw_rssi == 0) return (0); v = hw_rssi >> 4; if (v > 80) v = 80; return (v); } static void rsu_event_survey(struct rsu_softc *sc, uint8_t *buf, int len) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_frame *wh; struct ndis_wlan_bssid_ex *bss; struct ieee80211_rx_stats rxs; struct mbuf *m; int pktlen; if (__predict_false(len < sizeof(*bss))) return; bss = (struct ndis_wlan_bssid_ex *)buf; if (__predict_false(len < sizeof(*bss) + le32toh(bss->ieslen))) return; RSU_DPRINTF(sc, RSU_DEBUG_SCAN, "%s: found BSS %s: len=%d chan=%d inframode=%d " "networktype=%d privacy=%d, RSSI=%d\n", __func__, ether_sprintf(bss->macaddr), le32toh(bss->len), le32toh(bss->config.dsconfig), le32toh(bss->inframode), le32toh(bss->networktype), le32toh(bss->privacy), le32toh(bss->rssi)); /* Build a fake beacon frame to let net80211 do all the parsing. */ /* XXX TODO: just call the new scan API methods! */ pktlen = sizeof(*wh) + le32toh(bss->ieslen); if (__predict_false(pktlen > MCLBYTES)) return; m = m_get2(pktlen, M_NOWAIT, MT_DATA, M_PKTHDR); if (__predict_false(m == NULL)) return; wh = mtod(m, struct ieee80211_frame *); wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_BEACON; wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; USETW(wh->i_dur, 0); IEEE80211_ADDR_COPY(wh->i_addr1, ieee80211broadcastaddr); IEEE80211_ADDR_COPY(wh->i_addr2, bss->macaddr); IEEE80211_ADDR_COPY(wh->i_addr3, bss->macaddr); *(uint16_t *)wh->i_seq = 0; memcpy(&wh[1], (uint8_t *)&bss[1], le32toh(bss->ieslen)); /* Finalize mbuf. */ m->m_pkthdr.len = m->m_len = pktlen; /* Set channel flags for input path */ bzero(&rxs, sizeof(rxs)); rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ; rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI; rxs.c_ieee = le32toh(bss->config.dsconfig); rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ); /* This is a number from 0..100; so let's just divide it down a bit */ rxs.c_rssi = le32toh(bss->rssi) / 2; rxs.c_nf = -96; if (ieee80211_add_rx_params(m, &rxs) == 0) return; /* XXX avoid a LOR */ RSU_UNLOCK(sc); ieee80211_input_mimo_all(ic, m); RSU_LOCK(sc); } static void rsu_event_join_bss(struct rsu_softc *sc, uint8_t *buf, int len) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct ieee80211_node *ni = vap->iv_bss; struct r92s_event_join_bss *rsp; uint32_t tmp; int res; if (__predict_false(len < sizeof(*rsp))) return; rsp = (struct r92s_event_join_bss *)buf; res = (int)le32toh(rsp->join_res); RSU_DPRINTF(sc, RSU_DEBUG_STATE | RSU_DEBUG_FWCMD, "%s: Rx join BSS event len=%d res=%d\n", __func__, len, res); /* * XXX Don't do this; there's likely a better way to tell * the caller we failed. */ if (res <= 0) { RSU_UNLOCK(sc); ieee80211_new_state(vap, IEEE80211_S_SCAN, -1); RSU_LOCK(sc); return; } tmp = le32toh(rsp->associd); if (tmp >= vap->iv_max_aid) { DPRINTF("Assoc ID overflow\n"); tmp = 1; } RSU_DPRINTF(sc, RSU_DEBUG_STATE | RSU_DEBUG_FWCMD, "%s: associated with %s associd=%d\n", __func__, ether_sprintf(rsp->bss.macaddr), tmp); /* XXX is this required? What's the top two bits for again? */ ni->ni_associd = tmp | 0xc000; RSU_UNLOCK(sc); ieee80211_new_state(vap, IEEE80211_S_RUN, IEEE80211_FC0_SUBTYPE_ASSOC_RESP); RSU_LOCK(sc); } static void rsu_event_addba_req_report(struct rsu_softc *sc, uint8_t *buf, int len) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct r92s_add_ba_event *ba = (void *) buf; struct ieee80211_node *ni; if (len < sizeof(*ba)) { device_printf(sc->sc_dev, "%s: short read (%d)\n", __func__, len); return; } if (vap == NULL) return; RSU_DPRINTF(sc, RSU_DEBUG_AMPDU, "%s: mac=%s, tid=%d, ssn=%d\n", __func__, ether_sprintf(ba->mac_addr), (int) ba->tid, (int) le16toh(ba->ssn)); /* XXX do node lookup; this is STA specific */ ni = ieee80211_ref_node(vap->iv_bss); ieee80211_ampdu_rx_start_ext(ni, ba->tid, le16toh(ba->ssn) >> 4, 32); ieee80211_free_node(ni); } static void rsu_rx_event(struct rsu_softc *sc, uint8_t code, uint8_t *buf, int len) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); RSU_DPRINTF(sc, RSU_DEBUG_RX | RSU_DEBUG_FWCMD, "%s: Rx event code=%d len=%d\n", __func__, code, len); switch (code) { case R92S_EVT_SURVEY: rsu_event_survey(sc, buf, len); break; case R92S_EVT_SURVEY_DONE: RSU_DPRINTF(sc, RSU_DEBUG_SCAN, "%s: %s scan done, found %d BSS\n", __func__, sc->sc_extra_scan ? "direct" : "broadcast", le32toh(*(uint32_t *)buf)); if (sc->sc_extra_scan == 1) { /* Send broadcast probe request. */ sc->sc_extra_scan = 0; if (vap != NULL && rsu_site_survey(sc, NULL) != 0) { RSU_UNLOCK(sc); ieee80211_cancel_scan(vap); RSU_LOCK(sc); } break; } if (vap != NULL) { RSU_UNLOCK(sc); ieee80211_scan_done(vap); RSU_LOCK(sc); } break; case R92S_EVT_JOIN_BSS: if (vap->iv_state == IEEE80211_S_AUTH) rsu_event_join_bss(sc, buf, len); break; case R92S_EVT_DEL_STA: RSU_DPRINTF(sc, RSU_DEBUG_FWCMD | RSU_DEBUG_STATE, "%s: disassociated from %s\n", __func__, ether_sprintf(buf)); if (vap->iv_state == IEEE80211_S_RUN && IEEE80211_ADDR_EQ(vap->iv_bss->ni_bssid, buf)) { RSU_UNLOCK(sc); ieee80211_new_state(vap, IEEE80211_S_SCAN, -1); RSU_LOCK(sc); } break; case R92S_EVT_WPS_PBC: RSU_DPRINTF(sc, RSU_DEBUG_RX | RSU_DEBUG_FWCMD, "%s: WPS PBC pushed.\n", __func__); break; case R92S_EVT_FWDBG: buf[60] = '\0'; RSU_DPRINTF(sc, RSU_DEBUG_FWDBG, "FWDBG: %s\n", (char *)buf); break; case R92S_EVT_ADDBA_REQ_REPORT: rsu_event_addba_req_report(sc, buf, len); break; default: device_printf(sc->sc_dev, "%s: unhandled code (%d)\n", __func__, code); break; } } static void rsu_rx_multi_event(struct rsu_softc *sc, uint8_t *buf, int len) { struct r92s_fw_cmd_hdr *cmd; int cmdsz; RSU_DPRINTF(sc, RSU_DEBUG_RX, "%s: Rx events len=%d\n", __func__, len); /* Skip Rx status. */ buf += sizeof(struct r92s_rx_stat); len -= sizeof(struct r92s_rx_stat); /* Process all events. */ for (;;) { /* Check that command header fits. */ if (__predict_false(len < sizeof(*cmd))) break; cmd = (struct r92s_fw_cmd_hdr *)buf; /* Check that command payload fits. */ cmdsz = le16toh(cmd->len); if (__predict_false(len < sizeof(*cmd) + cmdsz)) break; /* Process firmware event. */ rsu_rx_event(sc, cmd->code, (uint8_t *)&cmd[1], cmdsz); if (!(cmd->seq & R92S_FW_CMD_MORE)) break; buf += sizeof(*cmd) + cmdsz; len -= sizeof(*cmd) + cmdsz; } } static int8_t rsu_get_rssi(struct rsu_softc *sc, int rate, void *physt) { static const int8_t cckoff[] = { 14, -2, -20, -40 }; struct r92s_rx_phystat *phy; struct r92s_rx_cck *cck; uint8_t rpt; int8_t rssi; if (rate <= 3) { cck = (struct r92s_rx_cck *)physt; rpt = (cck->agc_rpt >> 6) & 0x3; rssi = cck->agc_rpt & 0x3e; rssi = cckoff[rpt] - rssi; } else { /* OFDM/HT. */ phy = (struct r92s_rx_phystat *)physt; rssi = ((le32toh(phy->phydw1) >> 1) & 0x7f) - 106; } return (rssi); } static struct mbuf * rsu_rx_copy_to_mbuf(struct rsu_softc *sc, struct r92s_rx_stat *stat, int totlen) { struct ieee80211com *ic = &sc->sc_ic; struct mbuf *m; uint32_t rxdw0; int pktlen; rxdw0 = le32toh(stat->rxdw0); if (__predict_false(rxdw0 & (R92S_RXDW0_CRCERR | R92S_RXDW0_ICVERR))) { RSU_DPRINTF(sc, RSU_DEBUG_RX, "%s: RX flags error (%s)\n", __func__, rxdw0 & R92S_RXDW0_CRCERR ? "CRC" : "ICV"); goto fail; } pktlen = MS(rxdw0, R92S_RXDW0_PKTLEN); if (__predict_false(pktlen < sizeof (struct ieee80211_frame_ack))) { RSU_DPRINTF(sc, RSU_DEBUG_RX, "%s: frame is too short: %d\n", __func__, pktlen); goto fail; } m = m_get2(totlen, M_NOWAIT, MT_DATA, M_PKTHDR); if (__predict_false(m == NULL)) { device_printf(sc->sc_dev, "%s: could not allocate RX mbuf\n", __func__); goto fail; } /* Finalize mbuf. */ memcpy(mtod(m, uint8_t *), (uint8_t *)stat, totlen); m->m_pkthdr.len = m->m_len = totlen; return (m); fail: counter_u64_add(ic->ic_ierrors, 1); return (NULL); } static struct ieee80211_node * rsu_rx_frame(struct rsu_softc *sc, struct mbuf *m, int8_t *rssi_p) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_frame_min *wh; struct r92s_rx_stat *stat; uint32_t rxdw0, rxdw3; uint8_t cipher, rate; int infosz; stat = mtod(m, struct r92s_rx_stat *); rxdw0 = le32toh(stat->rxdw0); rxdw3 = le32toh(stat->rxdw3); rate = MS(rxdw3, R92S_RXDW3_RATE); cipher = MS(rxdw0, R92S_RXDW0_CIPHER); infosz = MS(rxdw0, R92S_RXDW0_INFOSZ) * 8; /* Get RSSI from PHY status descriptor if present. */ if (infosz != 0 && (rxdw0 & R92S_RXDW0_PHYST)) *rssi_p = rsu_get_rssi(sc, rate, &stat[1]); else { /* Cheat and get the last calibrated RSSI */ *rssi_p = rsu_hwrssi_to_rssi(sc, sc->sc_currssi); } if (ieee80211_radiotap_active(ic)) { struct rsu_rx_radiotap_header *tap = &sc->sc_rxtap; /* Map HW rate index to 802.11 rate. */ tap->wr_flags = 0; /* TODO */ if (rate < 12) { switch (rate) { /* CCK. */ case 0: tap->wr_rate = 2; break; case 1: tap->wr_rate = 4; break; case 2: tap->wr_rate = 11; break; case 3: tap->wr_rate = 22; break; /* OFDM. */ case 4: tap->wr_rate = 12; break; case 5: tap->wr_rate = 18; break; case 6: tap->wr_rate = 24; break; case 7: tap->wr_rate = 36; break; case 8: tap->wr_rate = 48; break; case 9: tap->wr_rate = 72; break; case 10: tap->wr_rate = 96; break; case 11: tap->wr_rate = 108; break; } } else { /* MCS0~15. */ /* Bit 7 set means HT MCS instead of rate. */ tap->wr_rate = 0x80 | (rate - 12); } tap->wr_dbm_antsignal = *rssi_p; tap->wr_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags); }; /* Hardware does Rx TCP checksum offload. */ if (rxdw3 & R92S_RXDW3_TCPCHKVALID) { if (__predict_true(rxdw3 & R92S_RXDW3_TCPCHKRPT)) m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; } /* Drop descriptor. */ m_adj(m, sizeof(*stat) + infosz); wh = mtod(m, struct ieee80211_frame_min *); if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && cipher != R92S_KEY_ALGO_NONE) { m->m_flags |= M_WEP; } RSU_DPRINTF(sc, RSU_DEBUG_RX, "%s: Rx frame len %d, rate %d, infosz %d\n", __func__, m->m_len, rate, infosz); if (m->m_len >= sizeof(*wh)) return (ieee80211_find_rxnode(ic, wh)); return (NULL); } static struct mbuf * rsu_rx_multi_frame(struct rsu_softc *sc, uint8_t *buf, int len) { struct r92s_rx_stat *stat; uint32_t rxdw0; int totlen, pktlen, infosz, npkts; struct mbuf *m, *m0 = NULL, *prevm = NULL; /* * don't pass packets to the ieee80211 framework if the driver isn't * RUNNING. */ if (!sc->sc_running) return (NULL); /* Get the number of encapsulated frames. */ stat = (struct r92s_rx_stat *)buf; npkts = MS(le32toh(stat->rxdw2), R92S_RXDW2_PKTCNT); RSU_DPRINTF(sc, RSU_DEBUG_RX, "%s: Rx %d frames in one chunk\n", __func__, npkts); /* Process all of them. */ while (npkts-- > 0) { if (__predict_false(len < sizeof(*stat))) break; stat = (struct r92s_rx_stat *)buf; rxdw0 = le32toh(stat->rxdw0); pktlen = MS(rxdw0, R92S_RXDW0_PKTLEN); if (__predict_false(pktlen == 0)) break; infosz = MS(rxdw0, R92S_RXDW0_INFOSZ) * 8; /* Make sure everything fits in xfer. */ totlen = sizeof(*stat) + infosz + pktlen; if (__predict_false(totlen > len)) break; /* Process 802.11 frame. */ m = rsu_rx_copy_to_mbuf(sc, stat, totlen); if (m0 == NULL) m0 = m; if (prevm == NULL) prevm = m; else { prevm->m_next = m; prevm = m; } /* Next chunk is 128-byte aligned. */ totlen = (totlen + 127) & ~127; buf += totlen; len -= totlen; } return (m0); } static struct mbuf * rsu_rxeof(struct usb_xfer *xfer, struct rsu_data *data) { struct rsu_softc *sc = data->sc; struct ieee80211com *ic = &sc->sc_ic; struct r92s_rx_stat *stat; int len; usbd_xfer_status(xfer, &len, NULL, NULL, NULL); if (__predict_false(len < sizeof(*stat))) { DPRINTF("xfer too short %d\n", len); counter_u64_add(ic->ic_ierrors, 1); return (NULL); } /* Determine if it is a firmware C2H event or an 802.11 frame. */ stat = (struct r92s_rx_stat *)data->buf; if ((le32toh(stat->rxdw1) & 0x1ff) == 0x1ff) { rsu_rx_multi_event(sc, data->buf, len); /* No packets to process. */ return (NULL); } else return (rsu_rx_multi_frame(sc, data->buf, len)); } static void rsu_bulk_rx_callback(struct usb_xfer *xfer, usb_error_t error) { struct rsu_softc *sc = usbd_xfer_softc(xfer); struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_node *ni; struct mbuf *m = NULL, *next; struct rsu_data *data; int8_t rssi; RSU_ASSERT_LOCKED(sc); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: data = STAILQ_FIRST(&sc->sc_rx_active); if (data == NULL) goto tr_setup; STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next); m = rsu_rxeof(xfer, data); STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: data = STAILQ_FIRST(&sc->sc_rx_inactive); if (data == NULL) { KASSERT(m == NULL, ("mbuf isn't NULL")); return; } STAILQ_REMOVE_HEAD(&sc->sc_rx_inactive, next); STAILQ_INSERT_TAIL(&sc->sc_rx_active, data, next); usbd_xfer_set_frame_data(xfer, 0, data->buf, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); /* * To avoid LOR we should unlock our private mutex here to call * ieee80211_input() because here is at the end of a USB * callback and safe to unlock. */ while (m != NULL) { next = m->m_next; m->m_next = NULL; ni = rsu_rx_frame(sc, m, &rssi); RSU_UNLOCK(sc); if (ni != NULL) { if (ni->ni_flags & IEEE80211_NODE_HT) m->m_flags |= M_AMPDU; (void)ieee80211_input(ni, m, rssi, -96); ieee80211_free_node(ni); } else (void)ieee80211_input_all(ic, m, rssi, -96); RSU_LOCK(sc); m = next; } break; default: /* needs it to the inactive queue due to a error. */ data = STAILQ_FIRST(&sc->sc_rx_active); if (data != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_rx_active, next); STAILQ_INSERT_TAIL(&sc->sc_rx_inactive, data, next); } if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } break; } } static void rsu_txeof(struct usb_xfer *xfer, struct rsu_data *data) { #ifdef USB_DEBUG struct rsu_softc *sc = usbd_xfer_softc(xfer); #endif RSU_DPRINTF(sc, RSU_DEBUG_TXDONE, "%s: called; data=%p\n", __func__, data); if (data->m) { /* XXX status? */ ieee80211_tx_complete(data->ni, data->m, 0); data->m = NULL; data->ni = NULL; } } static void rsu_bulk_tx_callback_sub(struct usb_xfer *xfer, usb_error_t error, uint8_t which) { struct rsu_softc *sc = usbd_xfer_softc(xfer); struct ieee80211com *ic = &sc->sc_ic; struct rsu_data *data; RSU_ASSERT_LOCKED(sc); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: data = STAILQ_FIRST(&sc->sc_tx_active[which]); if (data == NULL) goto tr_setup; RSU_DPRINTF(sc, RSU_DEBUG_TXDONE, "%s: transfer done %p\n", __func__, data); STAILQ_REMOVE_HEAD(&sc->sc_tx_active[which], next); rsu_txeof(xfer, data); rsu_freebuf(sc, data); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: data = STAILQ_FIRST(&sc->sc_tx_pending[which]); if (data == NULL) { RSU_DPRINTF(sc, RSU_DEBUG_TXDONE, "%s: empty pending queue sc %p\n", __func__, sc); return; } STAILQ_REMOVE_HEAD(&sc->sc_tx_pending[which], next); STAILQ_INSERT_TAIL(&sc->sc_tx_active[which], data, next); usbd_xfer_set_frame_data(xfer, 0, data->buf, data->buflen); RSU_DPRINTF(sc, RSU_DEBUG_TXDONE, "%s: submitting transfer %p\n", __func__, data); usbd_transfer_submit(xfer); break; default: data = STAILQ_FIRST(&sc->sc_tx_active[which]); if (data != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_tx_active[which], next); rsu_txeof(xfer, data); rsu_freebuf(sc, data); } counter_u64_add(ic->ic_oerrors, 1); if (error != USB_ERR_CANCELLED) { usbd_xfer_set_stall(xfer); goto tr_setup; } break; } /* * XXX TODO: if the queue is low, flush out FF TX frames. * Remember to unlock the driver for now; net80211 doesn't * defer it for us. */ } static void rsu_bulk_tx_callback_be_bk(struct usb_xfer *xfer, usb_error_t error) { struct rsu_softc *sc = usbd_xfer_softc(xfer); rsu_bulk_tx_callback_sub(xfer, error, RSU_BULK_TX_BE_BK); /* This kicks the TX taskqueue */ rsu_start(sc); } static void rsu_bulk_tx_callback_vi_vo(struct usb_xfer *xfer, usb_error_t error) { struct rsu_softc *sc = usbd_xfer_softc(xfer); rsu_bulk_tx_callback_sub(xfer, error, RSU_BULK_TX_VI_VO); /* This kicks the TX taskqueue */ rsu_start(sc); } static void rsu_bulk_tx_callback_h2c(struct usb_xfer *xfer, usb_error_t error) { struct rsu_softc *sc = usbd_xfer_softc(xfer); rsu_bulk_tx_callback_sub(xfer, error, RSU_BULK_TX_H2C); /* This kicks the TX taskqueue */ rsu_start(sc); } /* * Transmit the given frame. * * This doesn't free the node or mbuf upon failure. */ static int rsu_tx_start(struct rsu_softc *sc, struct ieee80211_node *ni, struct mbuf *m0, struct rsu_data *data) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_frame *wh; struct ieee80211_key *k = NULL; struct r92s_tx_desc *txd; uint8_t type, cipher; int prio = 0; uint8_t which; int hasqos; int xferlen; int qid; RSU_ASSERT_LOCKED(sc); wh = mtod(m0, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; RSU_DPRINTF(sc, RSU_DEBUG_TX, "%s: data=%p, m=%p\n", __func__, data, m0); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { device_printf(sc->sc_dev, "ieee80211_crypto_encap returns NULL.\n"); /* XXX we don't expect the fragmented frames */ return (ENOBUFS); } wh = mtod(m0, struct ieee80211_frame *); } /* If we have QoS then use it */ /* XXX TODO: mbuf WME/PRI versus TID? */ if (IEEE80211_QOS_HAS_SEQ(wh)) { /* Has QoS */ prio = M_WME_GETAC(m0); which = rsu_wme_ac_xfer_map[prio]; hasqos = 1; } else { /* Non-QoS TID */ /* XXX TODO: tid=0 for non-qos TID? */ which = rsu_wme_ac_xfer_map[WME_AC_BE]; hasqos = 0; prio = 0; } qid = rsu_ac2qid[prio]; #if 0 switch (type) { case IEEE80211_FC0_TYPE_CTL: case IEEE80211_FC0_TYPE_MGT: which = rsu_wme_ac_xfer_map[WME_AC_VO]; break; default: which = rsu_wme_ac_xfer_map[M_WME_GETAC(m0)]; break; } hasqos = 0; #endif RSU_DPRINTF(sc, RSU_DEBUG_TX, "%s: pri=%d, which=%d, hasqos=%d\n", __func__, prio, which, hasqos); /* Fill Tx descriptor. */ txd = (struct r92s_tx_desc *)data->buf; memset(txd, 0, sizeof(*txd)); txd->txdw0 |= htole32( SM(R92S_TXDW0_PKTLEN, m0->m_pkthdr.len) | SM(R92S_TXDW0_OFFSET, sizeof(*txd)) | R92S_TXDW0_OWN | R92S_TXDW0_FSG | R92S_TXDW0_LSG); txd->txdw1 |= htole32( SM(R92S_TXDW1_MACID, R92S_MACID_BSS) | SM(R92S_TXDW1_QSEL, qid)); if (!hasqos) txd->txdw1 |= htole32(R92S_TXDW1_NONQOS); if (k != NULL && !(k->wk_flags & IEEE80211_KEY_SWENCRYPT)) { switch (k->wk_cipher->ic_cipher) { case IEEE80211_CIPHER_WEP: cipher = R92S_TXDW1_CIPHER_WEP; break; case IEEE80211_CIPHER_TKIP: cipher = R92S_TXDW1_CIPHER_TKIP; break; case IEEE80211_CIPHER_AES_CCM: cipher = R92S_TXDW1_CIPHER_AES; break; default: cipher = R92S_TXDW1_CIPHER_NONE; } txd->txdw1 |= htole32( SM(R92S_TXDW1_CIPHER, cipher) | SM(R92S_TXDW1_KEYIDX, k->wk_keyix)); } /* XXX todo: set AGGEN bit if appropriate? */ txd->txdw2 |= htole32(R92S_TXDW2_BK); if (IEEE80211_IS_MULTICAST(wh->i_addr1)) txd->txdw2 |= htole32(R92S_TXDW2_BMCAST); /* * Firmware will use and increment the sequence number for the * specified priority. */ txd->txdw3 |= htole32(SM(R92S_TXDW3_SEQ, prio)); if (ieee80211_radiotap_active_vap(vap)) { struct rsu_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_chan_freq = htole16(ic->ic_curchan->ic_freq); tap->wt_chan_flags = htole16(ic->ic_curchan->ic_flags); ieee80211_radiotap_tx(vap, m0); } xferlen = sizeof(*txd) + m0->m_pkthdr.len; m_copydata(m0, 0, m0->m_pkthdr.len, (caddr_t)&txd[1]); data->buflen = xferlen; data->ni = ni; data->m = m0; STAILQ_INSERT_TAIL(&sc->sc_tx_pending[which], data, next); /* start transfer, if any */ usbd_transfer_start(sc->sc_xfer[which]); return (0); } static int rsu_transmit(struct ieee80211com *ic, struct mbuf *m) { struct rsu_softc *sc = ic->ic_softc; int error; RSU_LOCK(sc); if (!sc->sc_running) { RSU_UNLOCK(sc); return (ENXIO); } /* * XXX TODO: ensure that we treat 'm' as a list of frames * to transmit! */ error = mbufq_enqueue(&sc->sc_snd, m); if (error) { RSU_DPRINTF(sc, RSU_DEBUG_TX, "%s: mbufq_enable: failed (%d)\n", __func__, error); RSU_UNLOCK(sc); return (error); } RSU_UNLOCK(sc); /* This kicks the TX taskqueue */ rsu_start(sc); return (0); } static void rsu_drain_mbufq(struct rsu_softc *sc) { struct mbuf *m; struct ieee80211_node *ni; RSU_ASSERT_LOCKED(sc); while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; m->m_pkthdr.rcvif = NULL; ieee80211_free_node(ni); m_freem(m); } } static void _rsu_start(struct rsu_softc *sc) { struct ieee80211_node *ni; struct rsu_data *bf; struct mbuf *m; RSU_ASSERT_LOCKED(sc); while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { bf = rsu_getbuf(sc); if (bf == NULL) { RSU_DPRINTF(sc, RSU_DEBUG_TX, "%s: failed to get buffer\n", __func__); mbufq_prepend(&sc->sc_snd, m); break; } ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; m->m_pkthdr.rcvif = NULL; if (rsu_tx_start(sc, ni, m, bf) != 0) { RSU_DPRINTF(sc, RSU_DEBUG_TX, "%s: failed to transmit\n", __func__); if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); rsu_freebuf(sc, bf); ieee80211_free_node(ni); m_freem(m); break; } } } static void rsu_start(struct rsu_softc *sc) { taskqueue_enqueue(taskqueue_thread, &sc->tx_task); } static void rsu_parent(struct ieee80211com *ic) { struct rsu_softc *sc = ic->ic_softc; int startall = 0; RSU_LOCK(sc); if (ic->ic_nrunning > 0) { if (!sc->sc_running) { rsu_init(sc); startall = 1; } } else if (sc->sc_running) rsu_stop(sc); RSU_UNLOCK(sc); if (startall) ieee80211_start_all(ic); } /* * Power on sequence for A-cut adapters. */ static void rsu_power_on_acut(struct rsu_softc *sc) { uint32_t reg; rsu_write_1(sc, R92S_SPS0_CTRL + 1, 0x53); rsu_write_1(sc, R92S_SPS0_CTRL + 0, 0x57); /* Enable AFE macro block's bandgap and Mbias. */ rsu_write_1(sc, R92S_AFE_MISC, rsu_read_1(sc, R92S_AFE_MISC) | R92S_AFE_MISC_BGEN | R92S_AFE_MISC_MBEN); /* Enable LDOA15 block. */ rsu_write_1(sc, R92S_LDOA15_CTRL, rsu_read_1(sc, R92S_LDOA15_CTRL) | R92S_LDA15_EN); rsu_write_1(sc, R92S_SPS1_CTRL, rsu_read_1(sc, R92S_SPS1_CTRL) | R92S_SPS1_LDEN); rsu_ms_delay(sc, 2000); /* Enable switch regulator block. */ rsu_write_1(sc, R92S_SPS1_CTRL, rsu_read_1(sc, R92S_SPS1_CTRL) | R92S_SPS1_SWEN); rsu_write_4(sc, R92S_SPS1_CTRL, 0x00a7b267); rsu_write_1(sc, R92S_SYS_ISO_CTRL + 1, rsu_read_1(sc, R92S_SYS_ISO_CTRL + 1) | 0x08); rsu_write_1(sc, R92S_SYS_FUNC_EN + 1, rsu_read_1(sc, R92S_SYS_FUNC_EN + 1) | 0x20); rsu_write_1(sc, R92S_SYS_ISO_CTRL + 1, rsu_read_1(sc, R92S_SYS_ISO_CTRL + 1) & ~0x90); /* Enable AFE clock. */ rsu_write_1(sc, R92S_AFE_XTAL_CTRL + 1, rsu_read_1(sc, R92S_AFE_XTAL_CTRL + 1) & ~0x04); /* Enable AFE PLL macro block. */ rsu_write_1(sc, R92S_AFE_PLL_CTRL, rsu_read_1(sc, R92S_AFE_PLL_CTRL) | 0x11); /* Attach AFE PLL to MACTOP/BB. */ rsu_write_1(sc, R92S_SYS_ISO_CTRL, rsu_read_1(sc, R92S_SYS_ISO_CTRL) & ~0x11); /* Switch to 40MHz clock instead of 80MHz. */ rsu_write_2(sc, R92S_SYS_CLKR, rsu_read_2(sc, R92S_SYS_CLKR) & ~R92S_SYS_CLKSEL); /* Enable MAC clock. */ rsu_write_2(sc, R92S_SYS_CLKR, rsu_read_2(sc, R92S_SYS_CLKR) | R92S_MAC_CLK_EN | R92S_SYS_CLK_EN); rsu_write_1(sc, R92S_PMC_FSM, 0x02); /* Enable digital core and IOREG R/W. */ rsu_write_1(sc, R92S_SYS_FUNC_EN + 1, rsu_read_1(sc, R92S_SYS_FUNC_EN + 1) | 0x08); rsu_write_1(sc, R92S_SYS_FUNC_EN + 1, rsu_read_1(sc, R92S_SYS_FUNC_EN + 1) | 0x80); /* Switch the control path to firmware. */ reg = rsu_read_2(sc, R92S_SYS_CLKR); reg = (reg & ~R92S_SWHW_SEL) | R92S_FWHW_SEL; rsu_write_2(sc, R92S_SYS_CLKR, reg); rsu_write_2(sc, R92S_CR, 0x37fc); /* Fix USB RX FIFO issue. */ rsu_write_1(sc, 0xfe5c, rsu_read_1(sc, 0xfe5c) | 0x80); rsu_write_1(sc, 0x00ab, rsu_read_1(sc, 0x00ab) | 0xc0); rsu_write_1(sc, R92S_SYS_CLKR, rsu_read_1(sc, R92S_SYS_CLKR) & ~R92S_SYS_CPU_CLKSEL); } /* * Power on sequence for B-cut and C-cut adapters. */ static void rsu_power_on_bcut(struct rsu_softc *sc) { uint32_t reg; int ntries; /* Prevent eFuse leakage. */ rsu_write_1(sc, 0x37, 0xb0); rsu_ms_delay(sc, 10); rsu_write_1(sc, 0x37, 0x30); /* Switch the control path to hardware. */ reg = rsu_read_2(sc, R92S_SYS_CLKR); if (reg & R92S_FWHW_SEL) { rsu_write_2(sc, R92S_SYS_CLKR, reg & ~(R92S_SWHW_SEL | R92S_FWHW_SEL)); } rsu_write_1(sc, R92S_SYS_FUNC_EN + 1, rsu_read_1(sc, R92S_SYS_FUNC_EN + 1) & ~0x8c); rsu_ms_delay(sc, 1); rsu_write_1(sc, R92S_SPS0_CTRL + 1, 0x53); rsu_write_1(sc, R92S_SPS0_CTRL + 0, 0x57); reg = rsu_read_1(sc, R92S_AFE_MISC); rsu_write_1(sc, R92S_AFE_MISC, reg | R92S_AFE_MISC_BGEN); rsu_write_1(sc, R92S_AFE_MISC, reg | R92S_AFE_MISC_BGEN | R92S_AFE_MISC_MBEN | R92S_AFE_MISC_I32_EN); /* Enable PLL. */ rsu_write_1(sc, R92S_LDOA15_CTRL, rsu_read_1(sc, R92S_LDOA15_CTRL) | R92S_LDA15_EN); rsu_write_1(sc, R92S_LDOV12D_CTRL, rsu_read_1(sc, R92S_LDOV12D_CTRL) | R92S_LDV12_EN); rsu_write_1(sc, R92S_SYS_ISO_CTRL + 1, rsu_read_1(sc, R92S_SYS_ISO_CTRL + 1) | 0x08); rsu_write_1(sc, R92S_SYS_FUNC_EN + 1, rsu_read_1(sc, R92S_SYS_FUNC_EN + 1) | 0x20); /* Support 64KB IMEM. */ rsu_write_1(sc, R92S_SYS_ISO_CTRL + 1, rsu_read_1(sc, R92S_SYS_ISO_CTRL + 1) & ~0x97); /* Enable AFE clock. */ rsu_write_1(sc, R92S_AFE_XTAL_CTRL + 1, rsu_read_1(sc, R92S_AFE_XTAL_CTRL + 1) & ~0x04); /* Enable AFE PLL macro block. */ reg = rsu_read_1(sc, R92S_AFE_PLL_CTRL); rsu_write_1(sc, R92S_AFE_PLL_CTRL, reg | 0x11); rsu_ms_delay(sc, 1); rsu_write_1(sc, R92S_AFE_PLL_CTRL, reg | 0x51); rsu_ms_delay(sc, 1); rsu_write_1(sc, R92S_AFE_PLL_CTRL, reg | 0x11); rsu_ms_delay(sc, 1); /* Attach AFE PLL to MACTOP/BB. */ rsu_write_1(sc, R92S_SYS_ISO_CTRL, rsu_read_1(sc, R92S_SYS_ISO_CTRL) & ~0x11); /* Switch to 40MHz clock. */ rsu_write_1(sc, R92S_SYS_CLKR, 0x00); /* Disable CPU clock and 80MHz SSC. */ rsu_write_1(sc, R92S_SYS_CLKR, rsu_read_1(sc, R92S_SYS_CLKR) | 0xa0); /* Enable MAC clock. */ rsu_write_2(sc, R92S_SYS_CLKR, rsu_read_2(sc, R92S_SYS_CLKR) | R92S_MAC_CLK_EN | R92S_SYS_CLK_EN); rsu_write_1(sc, R92S_PMC_FSM, 0x02); /* Enable digital core and IOREG R/W. */ rsu_write_1(sc, R92S_SYS_FUNC_EN + 1, rsu_read_1(sc, R92S_SYS_FUNC_EN + 1) | 0x08); rsu_write_1(sc, R92S_SYS_FUNC_EN + 1, rsu_read_1(sc, R92S_SYS_FUNC_EN + 1) | 0x80); /* Switch the control path to firmware. */ reg = rsu_read_2(sc, R92S_SYS_CLKR); reg = (reg & ~R92S_SWHW_SEL) | R92S_FWHW_SEL; rsu_write_2(sc, R92S_SYS_CLKR, reg); rsu_write_2(sc, R92S_CR, 0x37fc); /* Fix USB RX FIFO issue. */ rsu_write_1(sc, 0xfe5c, rsu_read_1(sc, 0xfe5c) | 0x80); rsu_write_1(sc, R92S_SYS_CLKR, rsu_read_1(sc, R92S_SYS_CLKR) & ~R92S_SYS_CPU_CLKSEL); rsu_write_1(sc, 0xfe1c, 0x80); /* Make sure TxDMA is ready to download firmware. */ for (ntries = 0; ntries < 20; ntries++) { reg = rsu_read_1(sc, R92S_TCR); if ((reg & (R92S_TCR_IMEM_CHK_RPT | R92S_TCR_EMEM_CHK_RPT)) == (R92S_TCR_IMEM_CHK_RPT | R92S_TCR_EMEM_CHK_RPT)) break; rsu_ms_delay(sc, 1); } if (ntries == 20) { RSU_DPRINTF(sc, RSU_DEBUG_RESET | RSU_DEBUG_TX, "%s: TxDMA is not ready\n", __func__); /* Reset TxDMA. */ reg = rsu_read_1(sc, R92S_CR); rsu_write_1(sc, R92S_CR, reg & ~R92S_CR_TXDMA_EN); rsu_ms_delay(sc, 1); rsu_write_1(sc, R92S_CR, reg | R92S_CR_TXDMA_EN); } } static void rsu_power_off(struct rsu_softc *sc) { /* Turn RF off. */ rsu_write_1(sc, R92S_RF_CTRL, 0x00); rsu_ms_delay(sc, 5); /* Turn MAC off. */ /* Switch control path. */ rsu_write_1(sc, R92S_SYS_CLKR + 1, 0x38); /* Reset MACTOP. */ rsu_write_1(sc, R92S_SYS_FUNC_EN + 1, 0x70); rsu_write_1(sc, R92S_PMC_FSM, 0x06); rsu_write_1(sc, R92S_SYS_ISO_CTRL + 0, 0xf9); rsu_write_1(sc, R92S_SYS_ISO_CTRL + 1, 0xe8); /* Disable AFE PLL. */ rsu_write_1(sc, R92S_AFE_PLL_CTRL, 0x00); /* Disable A15V. */ rsu_write_1(sc, R92S_LDOA15_CTRL, 0x54); /* Disable eFuse 1.2V. */ rsu_write_1(sc, R92S_SYS_FUNC_EN + 1, 0x50); rsu_write_1(sc, R92S_LDOV12D_CTRL, 0x24); /* Enable AFE macro block's bandgap and Mbias. */ rsu_write_1(sc, R92S_AFE_MISC, 0x30); /* Disable 1.6V LDO. */ rsu_write_1(sc, R92S_SPS0_CTRL + 0, 0x56); rsu_write_1(sc, R92S_SPS0_CTRL + 1, 0x43); /* Firmware - tell it to switch things off */ (void) rsu_set_fw_power_state(sc, RSU_PWR_OFF); } static int rsu_fw_loadsection(struct rsu_softc *sc, const uint8_t *buf, int len) { const uint8_t which = rsu_wme_ac_xfer_map[WME_AC_VO]; struct rsu_data *data; struct r92s_tx_desc *txd; int mlen; while (len > 0) { data = rsu_getbuf(sc); if (data == NULL) return (ENOMEM); txd = (struct r92s_tx_desc *)data->buf; memset(txd, 0, sizeof(*txd)); if (len <= RSU_TXBUFSZ - sizeof(*txd)) { /* Last chunk. */ txd->txdw0 |= htole32(R92S_TXDW0_LINIP); mlen = len; } else mlen = RSU_TXBUFSZ - sizeof(*txd); txd->txdw0 |= htole32(SM(R92S_TXDW0_PKTLEN, mlen)); memcpy(&txd[1], buf, mlen); data->buflen = sizeof(*txd) + mlen; RSU_DPRINTF(sc, RSU_DEBUG_TX | RSU_DEBUG_FW | RSU_DEBUG_RESET, "%s: starting transfer %p\n", __func__, data); STAILQ_INSERT_TAIL(&sc->sc_tx_pending[which], data, next); buf += mlen; len -= mlen; } usbd_transfer_start(sc->sc_xfer[which]); return (0); } static int rsu_load_firmware(struct rsu_softc *sc) { const struct r92s_fw_hdr *hdr; struct r92s_fw_priv *dmem; struct ieee80211com *ic = &sc->sc_ic; const uint8_t *imem, *emem; int imemsz, ememsz; const struct firmware *fw; size_t size; uint32_t reg; int ntries, error; if (rsu_read_1(sc, R92S_TCR) & R92S_TCR_FWRDY) { RSU_DPRINTF(sc, RSU_DEBUG_ANY, "%s: Firmware already loaded\n", __func__); return (0); } RSU_UNLOCK(sc); /* Read firmware image from the filesystem. */ if ((fw = firmware_get("rsu-rtl8712fw")) == NULL) { device_printf(sc->sc_dev, "%s: failed load firmware of file rsu-rtl8712fw\n", __func__); RSU_LOCK(sc); return (ENXIO); } RSU_LOCK(sc); size = fw->datasize; if (size < sizeof(*hdr)) { device_printf(sc->sc_dev, "firmware too short\n"); error = EINVAL; goto fail; } hdr = (const struct r92s_fw_hdr *)fw->data; if (hdr->signature != htole16(0x8712) && hdr->signature != htole16(0x8192)) { device_printf(sc->sc_dev, "invalid firmware signature 0x%x\n", le16toh(hdr->signature)); error = EINVAL; goto fail; } DPRINTF("FW V%d %02x-%02x %02x:%02x\n", le16toh(hdr->version), hdr->month, hdr->day, hdr->hour, hdr->minute); /* Make sure that driver and firmware are in sync. */ if (hdr->privsz != htole32(sizeof(*dmem))) { device_printf(sc->sc_dev, "unsupported firmware image\n"); error = EINVAL; goto fail; } /* Get FW sections sizes. */ imemsz = le32toh(hdr->imemsz); ememsz = le32toh(hdr->sramsz); /* Check that all FW sections fit in image. */ if (size < sizeof(*hdr) + imemsz + ememsz) { device_printf(sc->sc_dev, "firmware too short\n"); error = EINVAL; goto fail; } imem = (const uint8_t *)&hdr[1]; emem = imem + imemsz; /* Load IMEM section. */ error = rsu_fw_loadsection(sc, imem, imemsz); if (error != 0) { device_printf(sc->sc_dev, "could not load firmware section %s\n", "IMEM"); goto fail; } /* Wait for load to complete. */ for (ntries = 0; ntries != 50; ntries++) { rsu_ms_delay(sc, 10); reg = rsu_read_1(sc, R92S_TCR); if (reg & R92S_TCR_IMEM_CODE_DONE) break; } if (ntries == 50) { device_printf(sc->sc_dev, "timeout waiting for IMEM transfer\n"); error = ETIMEDOUT; goto fail; } /* Load EMEM section. */ error = rsu_fw_loadsection(sc, emem, ememsz); if (error != 0) { device_printf(sc->sc_dev, "could not load firmware section %s\n", "EMEM"); goto fail; } /* Wait for load to complete. */ for (ntries = 0; ntries != 50; ntries++) { rsu_ms_delay(sc, 10); reg = rsu_read_2(sc, R92S_TCR); if (reg & R92S_TCR_EMEM_CODE_DONE) break; } if (ntries == 50) { device_printf(sc->sc_dev, "timeout waiting for EMEM transfer\n"); error = ETIMEDOUT; goto fail; } /* Enable CPU. */ rsu_write_1(sc, R92S_SYS_CLKR, rsu_read_1(sc, R92S_SYS_CLKR) | R92S_SYS_CPU_CLKSEL); if (!(rsu_read_1(sc, R92S_SYS_CLKR) & R92S_SYS_CPU_CLKSEL)) { device_printf(sc->sc_dev, "could not enable system clock\n"); error = EIO; goto fail; } rsu_write_2(sc, R92S_SYS_FUNC_EN, rsu_read_2(sc, R92S_SYS_FUNC_EN) | R92S_FEN_CPUEN); if (!(rsu_read_2(sc, R92S_SYS_FUNC_EN) & R92S_FEN_CPUEN)) { device_printf(sc->sc_dev, "could not enable microcontroller\n"); error = EIO; goto fail; } /* Wait for CPU to initialize. */ for (ntries = 0; ntries < 100; ntries++) { if (rsu_read_1(sc, R92S_TCR) & R92S_TCR_IMEM_RDY) break; rsu_ms_delay(sc, 1); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for microcontroller\n"); error = ETIMEDOUT; goto fail; } /* Update DMEM section before loading. */ dmem = __DECONST(struct r92s_fw_priv *, &hdr->priv); memset(dmem, 0, sizeof(*dmem)); dmem->hci_sel = R92S_HCI_SEL_USB | R92S_HCI_SEL_8172; dmem->nendpoints = sc->sc_nendpoints; dmem->chip_version = sc->cut; dmem->rf_config = sc->sc_rftype; dmem->vcs_type = R92S_VCS_TYPE_AUTO; dmem->vcs_mode = R92S_VCS_MODE_RTS_CTS; dmem->turbo_mode = 0; dmem->bw40_en = !! (ic->ic_htcaps & IEEE80211_HTCAP_CHWIDTH40); dmem->amsdu2ampdu_en = !! (sc->sc_ht); dmem->ampdu_en = !! (sc->sc_ht); dmem->agg_offload = !! (sc->sc_ht); dmem->qos_en = 1; dmem->ps_offload = 1; dmem->lowpower_mode = 1; /* XXX TODO: configurable? */ /* Load DMEM section. */ error = rsu_fw_loadsection(sc, (uint8_t *)dmem, sizeof(*dmem)); if (error != 0) { device_printf(sc->sc_dev, "could not load firmware section %s\n", "DMEM"); goto fail; } /* Wait for load to complete. */ for (ntries = 0; ntries < 100; ntries++) { if (rsu_read_1(sc, R92S_TCR) & R92S_TCR_DMEM_CODE_DONE) break; rsu_ms_delay(sc, 1); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for %s transfer\n", "DMEM"); error = ETIMEDOUT; goto fail; } /* Wait for firmware readiness. */ for (ntries = 0; ntries < 60; ntries++) { if (!(rsu_read_1(sc, R92S_TCR) & R92S_TCR_FWRDY)) break; rsu_ms_delay(sc, 1); } if (ntries == 60) { device_printf(sc->sc_dev, "timeout waiting for firmware readiness\n"); error = ETIMEDOUT; goto fail; } fail: firmware_put(fw, FIRMWARE_UNLOAD); return (error); } static int rsu_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct rsu_softc *sc = ic->ic_softc; struct rsu_data *bf; /* prevent management frames from being sent if we're not ready */ if (!sc->sc_running) { m_freem(m); return (ENETDOWN); } RSU_LOCK(sc); bf = rsu_getbuf(sc); if (bf == NULL) { m_freem(m); RSU_UNLOCK(sc); return (ENOBUFS); } if (rsu_tx_start(sc, ni, m, bf) != 0) { m_freem(m); rsu_freebuf(sc, bf); RSU_UNLOCK(sc); return (EIO); } RSU_UNLOCK(sc); return (0); } static void rsu_init(struct rsu_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint8_t macaddr[IEEE80211_ADDR_LEN]; int error; int i; RSU_ASSERT_LOCKED(sc); /* Ensure the mbuf queue is drained */ rsu_drain_mbufq(sc); /* Reset power management state. */ rsu_write_1(sc, R92S_USB_HRPWM, 0); /* Power on adapter. */ if (sc->cut == 1) rsu_power_on_acut(sc); else rsu_power_on_bcut(sc); /* Load firmware. */ error = rsu_load_firmware(sc); if (error != 0) goto fail; /* Enable Rx TCP checksum offload. */ rsu_write_4(sc, R92S_RCR, rsu_read_4(sc, R92S_RCR) | 0x04000000); rsu_write_4(sc, R92S_CR, rsu_read_4(sc, R92S_CR) & ~0xff000000); /* Use 128 bytes pages. */ rsu_write_1(sc, 0x00b5, rsu_read_1(sc, 0x00b5) | 0x01); /* Enable USB Rx aggregation. */ rsu_write_1(sc, 0x00bd, rsu_read_1(sc, 0x00bd) | 0x80); /* Set USB Rx aggregation threshold. */ rsu_write_1(sc, 0x00d9, 0x01); /* Set USB Rx aggregation timeout (1.7ms/4). */ rsu_write_1(sc, 0xfe5b, 0x04); /* Fix USB Rx FIFO issue. */ rsu_write_1(sc, 0xfe5c, rsu_read_1(sc, 0xfe5c) | 0x80); /* Set MAC address. */ IEEE80211_ADDR_COPY(macaddr, vap ? vap->iv_myaddr : ic->ic_macaddr); rsu_write_region_1(sc, R92S_MACID, macaddr, IEEE80211_ADDR_LEN); /* It really takes 1.5 seconds for the firmware to boot: */ rsu_ms_delay(sc, 2000); RSU_DPRINTF(sc, RSU_DEBUG_RESET, "%s: setting MAC address to %s\n", __func__, ether_sprintf(macaddr)); error = rsu_fw_cmd(sc, R92S_CMD_SET_MAC_ADDRESS, macaddr, IEEE80211_ADDR_LEN); if (error != 0) { device_printf(sc->sc_dev, "could not set MAC address\n"); goto fail; } /* Append PHY status. */ rsu_write_4(sc, R92S_RCR, rsu_read_4(sc, R92S_RCR) | 0x02000000); /* Setup multicast filter (must be done after firmware loading). */ rsu_set_multi(sc); /* Set PS mode fully active */ error = rsu_set_fw_power_state(sc, RSU_PWR_ACTIVE); if (error != 0) { device_printf(sc->sc_dev, "could not set PS mode\n"); goto fail; } /* Install static keys (if any). */ error = rsu_reinit_static_keys(sc); if (error != 0) goto fail; sc->sc_extra_scan = 0; usbd_transfer_start(sc->sc_xfer[RSU_BULK_RX]); /* We're ready to go. */ sc->sc_running = 1; return; fail: /* Need to stop all failed transfers, if any */ for (i = 0; i != RSU_N_TRANSFER; i++) usbd_transfer_stop(sc->sc_xfer[i]); } static void rsu_stop(struct rsu_softc *sc) { int i; RSU_ASSERT_LOCKED(sc); sc->sc_running = 0; sc->sc_calibrating = 0; taskqueue_cancel_timeout(taskqueue_thread, &sc->calib_task, NULL); taskqueue_cancel(taskqueue_thread, &sc->tx_task, NULL); /* Power off adapter. */ rsu_power_off(sc); /* * CAM is not accessible after shutdown; * all entries are marked (by firmware?) as invalid. */ memset(sc->free_keys_bmap, 0, sizeof(sc->free_keys_bmap)); memset(sc->keys_bmap, 0, sizeof(sc->keys_bmap)); for (i = 0; i < RSU_N_TRANSFER; i++) usbd_transfer_stop(sc->sc_xfer[i]); /* Ensure the mbuf queue is drained */ rsu_drain_mbufq(sc); } /* * Note: usb_pause_mtx() actually releases the mutex before calling pause(), * which breaks any kind of driver serialisation. */ static void rsu_ms_delay(struct rsu_softc *sc, int ms) { //usb_pause_mtx(&sc->sc_mtx, hz / 1000); DELAY(ms * 1000); } Index: head/sys/dev/usb/wlan/if_rum.c =================================================================== --- head/sys/dev/usb/wlan/if_rum.c (revision 309685) +++ head/sys/dev/usb/wlan/if_rum.c (revision 309686) @@ -1,3319 +1,3319 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2005-2007 Damien Bergamini * Copyright (c) 2006 Niall O'Higgins * Copyright (c) 2007-2008 Hans Petter Selasky * Copyright (c) 2015 Andriy Voskoboinyk * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /*- * Ralink Technology RT2501USB/RT2601USB chipset driver * http://www.ralinktech.com.tw/ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef INET #include #include #include #include #include #endif #include #include #include #include #include #include #include "usbdevs.h" #define USB_DEBUG_VAR rum_debug #include #include #include #include #ifdef USB_DEBUG static int rum_debug = 0; static SYSCTL_NODE(_hw_usb, OID_AUTO, rum, CTLFLAG_RW, 0, "USB rum"); SYSCTL_INT(_hw_usb_rum, OID_AUTO, debug, CTLFLAG_RWTUN, &rum_debug, 0, "Debug level"); #endif static const STRUCT_USB_HOST_ID rum_devs[] = { #define RUM_DEV(v,p) { USB_VP(USB_VENDOR_##v, USB_PRODUCT_##v##_##p) } RUM_DEV(ABOCOM, HWU54DM), RUM_DEV(ABOCOM, RT2573_2), RUM_DEV(ABOCOM, RT2573_3), RUM_DEV(ABOCOM, RT2573_4), RUM_DEV(ABOCOM, WUG2700), RUM_DEV(AMIT, CGWLUSB2GO), RUM_DEV(ASUS, RT2573_1), RUM_DEV(ASUS, RT2573_2), RUM_DEV(BELKIN, F5D7050A), RUM_DEV(BELKIN, F5D9050V3), RUM_DEV(CISCOLINKSYS, WUSB54GC), RUM_DEV(CISCOLINKSYS, WUSB54GR), RUM_DEV(CONCEPTRONIC2, C54RU2), RUM_DEV(COREGA, CGWLUSB2GL), RUM_DEV(COREGA, CGWLUSB2GPX), RUM_DEV(DICKSMITH, CWD854F), RUM_DEV(DICKSMITH, RT2573), RUM_DEV(EDIMAX, EW7318USG), RUM_DEV(DLINK2, DWLG122C1), RUM_DEV(DLINK2, WUA1340), RUM_DEV(DLINK2, DWA111), RUM_DEV(DLINK2, DWA110), RUM_DEV(GIGABYTE, GNWB01GS), RUM_DEV(GIGABYTE, GNWI05GS), RUM_DEV(GIGASET, RT2573), RUM_DEV(GOODWAY, RT2573), RUM_DEV(GUILLEMOT, HWGUSB254LB), RUM_DEV(GUILLEMOT, HWGUSB254V2AP), RUM_DEV(HUAWEI3COM, WUB320G), RUM_DEV(MELCO, G54HP), RUM_DEV(MELCO, SG54HP), RUM_DEV(MELCO, SG54HG), RUM_DEV(MELCO, WLIUCG), RUM_DEV(MELCO, WLRUCG), RUM_DEV(MELCO, WLRUCGAOSS), RUM_DEV(MSI, RT2573_1), RUM_DEV(MSI, RT2573_2), RUM_DEV(MSI, RT2573_3), RUM_DEV(MSI, RT2573_4), RUM_DEV(NOVATECH, RT2573), RUM_DEV(PLANEX2, GWUS54HP), RUM_DEV(PLANEX2, GWUS54MINI2), RUM_DEV(PLANEX2, GWUSMM), RUM_DEV(QCOM, RT2573), RUM_DEV(QCOM, RT2573_2), RUM_DEV(QCOM, RT2573_3), RUM_DEV(RALINK, RT2573), RUM_DEV(RALINK, RT2573_2), RUM_DEV(RALINK, RT2671), RUM_DEV(SITECOMEU, WL113R2), RUM_DEV(SITECOMEU, WL172), RUM_DEV(SPARKLAN, RT2573), RUM_DEV(SURECOM, RT2573), #undef RUM_DEV }; static device_probe_t rum_match; static device_attach_t rum_attach; static device_detach_t rum_detach; static usb_callback_t rum_bulk_read_callback; static usb_callback_t rum_bulk_write_callback; static usb_error_t rum_do_request(struct rum_softc *sc, struct usb_device_request *req, void *data); static usb_error_t rum_do_mcu_request(struct rum_softc *sc, int); static struct ieee80211vap *rum_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void rum_vap_delete(struct ieee80211vap *); static void rum_cmdq_cb(void *, int); static int rum_cmd_sleepable(struct rum_softc *, const void *, size_t, uint8_t, CMD_FUNC_PROTO); static void rum_tx_free(struct rum_tx_data *, int); static void rum_setup_tx_list(struct rum_softc *); static void rum_reset_tx_list(struct rum_softc *, struct ieee80211vap *); static void rum_unsetup_tx_list(struct rum_softc *); static void rum_beacon_miss(struct ieee80211vap *); static void rum_sta_recv_mgmt(struct ieee80211_node *, struct mbuf *, int, const struct ieee80211_rx_stats *, int, int); static int rum_set_power_state(struct rum_softc *, int); static int rum_newstate(struct ieee80211vap *, enum ieee80211_state, int); static uint8_t rum_crypto_mode(struct rum_softc *, u_int, int); static void rum_setup_tx_desc(struct rum_softc *, struct rum_tx_desc *, struct ieee80211_key *, uint32_t, uint8_t, uint8_t, int, int, int); static uint32_t rum_tx_crypto_flags(struct rum_softc *, struct ieee80211_node *, const struct ieee80211_key *); static int rum_tx_mgt(struct rum_softc *, struct mbuf *, struct ieee80211_node *); static int rum_tx_raw(struct rum_softc *, struct mbuf *, struct ieee80211_node *, const struct ieee80211_bpf_params *); static int rum_tx_data(struct rum_softc *, struct mbuf *, struct ieee80211_node *); static int rum_transmit(struct ieee80211com *, struct mbuf *); static void rum_start(struct rum_softc *); static void rum_parent(struct ieee80211com *); static void rum_eeprom_read(struct rum_softc *, uint16_t, void *, int); static uint32_t rum_read(struct rum_softc *, uint16_t); static void rum_read_multi(struct rum_softc *, uint16_t, void *, int); static usb_error_t rum_write(struct rum_softc *, uint16_t, uint32_t); static usb_error_t rum_write_multi(struct rum_softc *, uint16_t, void *, size_t); static usb_error_t rum_setbits(struct rum_softc *, uint16_t, uint32_t); static usb_error_t rum_clrbits(struct rum_softc *, uint16_t, uint32_t); static usb_error_t rum_modbits(struct rum_softc *, uint16_t, uint32_t, uint32_t); static int rum_bbp_busy(struct rum_softc *); static void rum_bbp_write(struct rum_softc *, uint8_t, uint8_t); static uint8_t rum_bbp_read(struct rum_softc *, uint8_t); static void rum_rf_write(struct rum_softc *, uint8_t, uint32_t); static void rum_select_antenna(struct rum_softc *); static void rum_enable_mrr(struct rum_softc *); static void rum_set_txpreamble(struct rum_softc *); static void rum_set_basicrates(struct rum_softc *); static void rum_select_band(struct rum_softc *, struct ieee80211_channel *); static void rum_set_chan(struct rum_softc *, struct ieee80211_channel *); static void rum_set_maxretry(struct rum_softc *, struct ieee80211vap *); static int rum_enable_tsf_sync(struct rum_softc *); static void rum_enable_tsf(struct rum_softc *); static void rum_abort_tsf_sync(struct rum_softc *); static void rum_get_tsf(struct rum_softc *, uint64_t *); static void rum_update_slot_cb(struct rum_softc *, union sec_param *, uint8_t); static void rum_update_slot(struct ieee80211com *); static int rum_wme_update(struct ieee80211com *); static void rum_set_bssid(struct rum_softc *, const uint8_t *); static void rum_set_macaddr(struct rum_softc *, const uint8_t *); static void rum_update_mcast(struct ieee80211com *); static void rum_update_promisc(struct ieee80211com *); static void rum_setpromisc(struct rum_softc *); static const char *rum_get_rf(int); static void rum_read_eeprom(struct rum_softc *); static int rum_bbp_wakeup(struct rum_softc *); static int rum_bbp_init(struct rum_softc *); static void rum_clr_shkey_regs(struct rum_softc *); static int rum_init(struct rum_softc *); static void rum_stop(struct rum_softc *); static void rum_load_microcode(struct rum_softc *, const uint8_t *, size_t); static int rum_set_sleep_time(struct rum_softc *, uint16_t); static int rum_reset(struct ieee80211vap *, u_long); static int rum_set_beacon(struct rum_softc *, struct ieee80211vap *); static int rum_alloc_beacon(struct rum_softc *, struct ieee80211vap *); static void rum_update_beacon_cb(struct rum_softc *, union sec_param *, uint8_t); static void rum_update_beacon(struct ieee80211vap *, int); static int rum_common_key_set(struct rum_softc *, struct ieee80211_key *, uint16_t); static void rum_group_key_set_cb(struct rum_softc *, union sec_param *, uint8_t); static void rum_group_key_del_cb(struct rum_softc *, union sec_param *, uint8_t); static void rum_pair_key_set_cb(struct rum_softc *, union sec_param *, uint8_t); static void rum_pair_key_del_cb(struct rum_softc *, union sec_param *, uint8_t); static int rum_key_alloc(struct ieee80211vap *, struct ieee80211_key *, ieee80211_keyix *, ieee80211_keyix *); static int rum_key_set(struct ieee80211vap *, const struct ieee80211_key *); static int rum_key_delete(struct ieee80211vap *, const struct ieee80211_key *); static int rum_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void rum_scan_start(struct ieee80211com *); static void rum_scan_end(struct ieee80211com *); static void rum_set_channel(struct ieee80211com *); static void rum_getradiocaps(struct ieee80211com *, int, int *, struct ieee80211_channel[]); static int rum_get_rssi(struct rum_softc *, uint8_t); static void rum_ratectl_start(struct rum_softc *, struct ieee80211_node *); static void rum_ratectl_timeout(void *); static void rum_ratectl_task(void *, int); static int rum_pause(struct rum_softc *, int); static const struct { uint32_t reg; uint32_t val; } rum_def_mac[] = { { RT2573_TXRX_CSR0, 0x025fb032 }, { RT2573_TXRX_CSR1, 0x9eaa9eaf }, { RT2573_TXRX_CSR2, 0x8a8b8c8d }, { RT2573_TXRX_CSR3, 0x00858687 }, { RT2573_TXRX_CSR7, 0x2e31353b }, { RT2573_TXRX_CSR8, 0x2a2a2a2c }, { RT2573_TXRX_CSR15, 0x0000000f }, { RT2573_MAC_CSR6, 0x00000fff }, { RT2573_MAC_CSR8, 0x016c030a }, { RT2573_MAC_CSR10, 0x00000718 }, { RT2573_MAC_CSR12, 0x00000004 }, { RT2573_MAC_CSR13, 0x00007f00 }, { RT2573_SEC_CSR2, 0x00000000 }, { RT2573_SEC_CSR3, 0x00000000 }, { RT2573_SEC_CSR4, 0x00000000 }, { RT2573_PHY_CSR1, 0x000023b0 }, { RT2573_PHY_CSR5, 0x00040a06 }, { RT2573_PHY_CSR6, 0x00080606 }, { RT2573_PHY_CSR7, 0x00000408 }, { RT2573_AIFSN_CSR, 0x00002273 }, { RT2573_CWMIN_CSR, 0x00002344 }, { RT2573_CWMAX_CSR, 0x000034aa } }; static const struct { uint8_t reg; uint8_t val; } rum_def_bbp[] = { { 3, 0x80 }, { 15, 0x30 }, { 17, 0x20 }, { 21, 0xc8 }, { 22, 0x38 }, { 23, 0x06 }, { 24, 0xfe }, { 25, 0x0a }, { 26, 0x0d }, { 32, 0x0b }, { 34, 0x12 }, { 37, 0x07 }, { 39, 0xf8 }, { 41, 0x60 }, { 53, 0x10 }, { 54, 0x18 }, { 60, 0x10 }, { 61, 0x04 }, { 62, 0x04 }, { 75, 0xfe }, { 86, 0xfe }, { 88, 0xfe }, { 90, 0x0f }, { 99, 0x00 }, { 102, 0x16 }, { 107, 0x04 } }; static const uint8_t rum_chan_2ghz[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 }; static const uint8_t rum_chan_5ghz[] = { 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 }; static const struct rfprog { uint8_t chan; uint32_t r1, r2, r3, r4; } rum_rf5226[] = { { 1, 0x00b03, 0x001e1, 0x1a014, 0x30282 }, { 2, 0x00b03, 0x001e1, 0x1a014, 0x30287 }, { 3, 0x00b03, 0x001e2, 0x1a014, 0x30282 }, { 4, 0x00b03, 0x001e2, 0x1a014, 0x30287 }, { 5, 0x00b03, 0x001e3, 0x1a014, 0x30282 }, { 6, 0x00b03, 0x001e3, 0x1a014, 0x30287 }, { 7, 0x00b03, 0x001e4, 0x1a014, 0x30282 }, { 8, 0x00b03, 0x001e4, 0x1a014, 0x30287 }, { 9, 0x00b03, 0x001e5, 0x1a014, 0x30282 }, { 10, 0x00b03, 0x001e5, 0x1a014, 0x30287 }, { 11, 0x00b03, 0x001e6, 0x1a014, 0x30282 }, { 12, 0x00b03, 0x001e6, 0x1a014, 0x30287 }, { 13, 0x00b03, 0x001e7, 0x1a014, 0x30282 }, { 14, 0x00b03, 0x001e8, 0x1a014, 0x30284 }, { 34, 0x00b03, 0x20266, 0x36014, 0x30282 }, { 38, 0x00b03, 0x20267, 0x36014, 0x30284 }, { 42, 0x00b03, 0x20268, 0x36014, 0x30286 }, { 46, 0x00b03, 0x20269, 0x36014, 0x30288 }, { 36, 0x00b03, 0x00266, 0x26014, 0x30288 }, { 40, 0x00b03, 0x00268, 0x26014, 0x30280 }, { 44, 0x00b03, 0x00269, 0x26014, 0x30282 }, { 48, 0x00b03, 0x0026a, 0x26014, 0x30284 }, { 52, 0x00b03, 0x0026b, 0x26014, 0x30286 }, { 56, 0x00b03, 0x0026c, 0x26014, 0x30288 }, { 60, 0x00b03, 0x0026e, 0x26014, 0x30280 }, { 64, 0x00b03, 0x0026f, 0x26014, 0x30282 }, { 100, 0x00b03, 0x0028a, 0x2e014, 0x30280 }, { 104, 0x00b03, 0x0028b, 0x2e014, 0x30282 }, { 108, 0x00b03, 0x0028c, 0x2e014, 0x30284 }, { 112, 0x00b03, 0x0028d, 0x2e014, 0x30286 }, { 116, 0x00b03, 0x0028e, 0x2e014, 0x30288 }, { 120, 0x00b03, 0x002a0, 0x2e014, 0x30280 }, { 124, 0x00b03, 0x002a1, 0x2e014, 0x30282 }, { 128, 0x00b03, 0x002a2, 0x2e014, 0x30284 }, { 132, 0x00b03, 0x002a3, 0x2e014, 0x30286 }, { 136, 0x00b03, 0x002a4, 0x2e014, 0x30288 }, { 140, 0x00b03, 0x002a6, 0x2e014, 0x30280 }, { 149, 0x00b03, 0x002a8, 0x2e014, 0x30287 }, { 153, 0x00b03, 0x002a9, 0x2e014, 0x30289 }, { 157, 0x00b03, 0x002ab, 0x2e014, 0x30281 }, { 161, 0x00b03, 0x002ac, 0x2e014, 0x30283 }, { 165, 0x00b03, 0x002ad, 0x2e014, 0x30285 } }, rum_rf5225[] = { { 1, 0x00b33, 0x011e1, 0x1a014, 0x30282 }, { 2, 0x00b33, 0x011e1, 0x1a014, 0x30287 }, { 3, 0x00b33, 0x011e2, 0x1a014, 0x30282 }, { 4, 0x00b33, 0x011e2, 0x1a014, 0x30287 }, { 5, 0x00b33, 0x011e3, 0x1a014, 0x30282 }, { 6, 0x00b33, 0x011e3, 0x1a014, 0x30287 }, { 7, 0x00b33, 0x011e4, 0x1a014, 0x30282 }, { 8, 0x00b33, 0x011e4, 0x1a014, 0x30287 }, { 9, 0x00b33, 0x011e5, 0x1a014, 0x30282 }, { 10, 0x00b33, 0x011e5, 0x1a014, 0x30287 }, { 11, 0x00b33, 0x011e6, 0x1a014, 0x30282 }, { 12, 0x00b33, 0x011e6, 0x1a014, 0x30287 }, { 13, 0x00b33, 0x011e7, 0x1a014, 0x30282 }, { 14, 0x00b33, 0x011e8, 0x1a014, 0x30284 }, { 34, 0x00b33, 0x01266, 0x26014, 0x30282 }, { 38, 0x00b33, 0x01267, 0x26014, 0x30284 }, { 42, 0x00b33, 0x01268, 0x26014, 0x30286 }, { 46, 0x00b33, 0x01269, 0x26014, 0x30288 }, { 36, 0x00b33, 0x01266, 0x26014, 0x30288 }, { 40, 0x00b33, 0x01268, 0x26014, 0x30280 }, { 44, 0x00b33, 0x01269, 0x26014, 0x30282 }, { 48, 0x00b33, 0x0126a, 0x26014, 0x30284 }, { 52, 0x00b33, 0x0126b, 0x26014, 0x30286 }, { 56, 0x00b33, 0x0126c, 0x26014, 0x30288 }, { 60, 0x00b33, 0x0126e, 0x26014, 0x30280 }, { 64, 0x00b33, 0x0126f, 0x26014, 0x30282 }, { 100, 0x00b33, 0x0128a, 0x2e014, 0x30280 }, { 104, 0x00b33, 0x0128b, 0x2e014, 0x30282 }, { 108, 0x00b33, 0x0128c, 0x2e014, 0x30284 }, { 112, 0x00b33, 0x0128d, 0x2e014, 0x30286 }, { 116, 0x00b33, 0x0128e, 0x2e014, 0x30288 }, { 120, 0x00b33, 0x012a0, 0x2e014, 0x30280 }, { 124, 0x00b33, 0x012a1, 0x2e014, 0x30282 }, { 128, 0x00b33, 0x012a2, 0x2e014, 0x30284 }, { 132, 0x00b33, 0x012a3, 0x2e014, 0x30286 }, { 136, 0x00b33, 0x012a4, 0x2e014, 0x30288 }, { 140, 0x00b33, 0x012a6, 0x2e014, 0x30280 }, { 149, 0x00b33, 0x012a8, 0x2e014, 0x30287 }, { 153, 0x00b33, 0x012a9, 0x2e014, 0x30289 }, { 157, 0x00b33, 0x012ab, 0x2e014, 0x30281 }, { 161, 0x00b33, 0x012ac, 0x2e014, 0x30283 }, { 165, 0x00b33, 0x012ad, 0x2e014, 0x30285 } }; static const struct usb_config rum_config[RUM_N_TRANSFER] = { [RUM_BULK_WR] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_OUT, .bufsize = (MCLBYTES + RT2573_TX_DESC_SIZE + 8), .flags = {.pipe_bof = 1,.force_short_xfer = 1,}, .callback = rum_bulk_write_callback, .timeout = 5000, /* ms */ }, [RUM_BULK_RD] = { .type = UE_BULK, .endpoint = UE_ADDR_ANY, .direction = UE_DIR_IN, .bufsize = (MCLBYTES + RT2573_RX_DESC_SIZE), .flags = {.pipe_bof = 1,.short_xfer_ok = 1,}, .callback = rum_bulk_read_callback, }, }; static int rum_match(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); if (uaa->usb_mode != USB_MODE_HOST) return (ENXIO); if (uaa->info.bConfigIndex != 0) return (ENXIO); if (uaa->info.bIfaceIndex != RT2573_IFACE_INDEX) return (ENXIO); return (usbd_lookup_id_by_uaa(rum_devs, sizeof(rum_devs), uaa)); } static int rum_attach(device_t self) { struct usb_attach_arg *uaa = device_get_ivars(self); struct rum_softc *sc = device_get_softc(self); struct ieee80211com *ic = &sc->sc_ic; uint32_t tmp; uint8_t iface_index; int error, ntries; device_set_usb_desc(self); sc->sc_udev = uaa->device; sc->sc_dev = self; RUM_LOCK_INIT(sc); RUM_CMDQ_LOCK_INIT(sc); mbufq_init(&sc->sc_snd, ifqmaxlen); iface_index = RT2573_IFACE_INDEX; error = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_xfer, rum_config, RUM_N_TRANSFER, sc, &sc->sc_mtx); if (error) { device_printf(self, "could not allocate USB transfers, " "err=%s\n", usbd_errstr(error)); goto detach; } RUM_LOCK(sc); /* retrieve RT2573 rev. no */ for (ntries = 0; ntries < 100; ntries++) { if ((tmp = rum_read(sc, RT2573_MAC_CSR0)) != 0) break; if (rum_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for chip to settle\n"); RUM_UNLOCK(sc); goto detach; } /* retrieve MAC address and various other things from EEPROM */ rum_read_eeprom(sc); device_printf(sc->sc_dev, "MAC/BBP RT2573 (rev 0x%05x), RF %s\n", tmp, rum_get_rf(sc->rf_rev)); rum_load_microcode(sc, rt2573_ucode, sizeof(rt2573_ucode)); RUM_UNLOCK(sc); ic->ic_softc = sc; ic->ic_name = device_get_nameunit(self); ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_IBSS /* IBSS mode supported */ | IEEE80211_C_MONITOR /* monitor mode supported */ | IEEE80211_C_HOSTAP /* HostAp mode supported */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_TXPMGT /* tx power management */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_BGSCAN /* bg scanning supported */ | IEEE80211_C_WPA /* 802.11i */ | IEEE80211_C_WME /* 802.11e */ | IEEE80211_C_PMGT /* Station-side power mgmt */ | IEEE80211_C_SWSLEEP /* net80211 managed power mgmt */ ; ic->ic_cryptocaps = IEEE80211_CRYPTO_WEP | IEEE80211_CRYPTO_AES_CCM | IEEE80211_CRYPTO_TKIPMIC | IEEE80211_CRYPTO_TKIP; rum_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels); ieee80211_ifattach(ic); ic->ic_update_promisc = rum_update_promisc; ic->ic_raw_xmit = rum_raw_xmit; ic->ic_scan_start = rum_scan_start; ic->ic_scan_end = rum_scan_end; ic->ic_set_channel = rum_set_channel; ic->ic_getradiocaps = rum_getradiocaps; ic->ic_transmit = rum_transmit; ic->ic_parent = rum_parent; ic->ic_vap_create = rum_vap_create; ic->ic_vap_delete = rum_vap_delete; ic->ic_updateslot = rum_update_slot; ic->ic_wme.wme_update = rum_wme_update; ic->ic_update_mcast = rum_update_mcast; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), RT2573_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), RT2573_RX_RADIOTAP_PRESENT); TASK_INIT(&sc->cmdq_task, 0, rum_cmdq_cb, sc); if (bootverbose) ieee80211_announce(ic); return (0); detach: rum_detach(self); return (ENXIO); /* failure */ } static int rum_detach(device_t self) { struct rum_softc *sc = device_get_softc(self); struct ieee80211com *ic = &sc->sc_ic; /* Prevent further ioctls */ RUM_LOCK(sc); sc->sc_detached = 1; RUM_UNLOCK(sc); /* stop all USB transfers */ usbd_transfer_unsetup(sc->sc_xfer, RUM_N_TRANSFER); /* free TX list, if any */ RUM_LOCK(sc); rum_unsetup_tx_list(sc); RUM_UNLOCK(sc); if (ic->ic_softc == sc) { ieee80211_draintask(ic, &sc->cmdq_task); ieee80211_ifdetach(ic); } mbufq_drain(&sc->sc_snd); RUM_CMDQ_LOCK_DESTROY(sc); RUM_LOCK_DESTROY(sc); return (0); } static usb_error_t rum_do_request(struct rum_softc *sc, struct usb_device_request *req, void *data) { usb_error_t err; int ntries = 10; while (ntries--) { err = usbd_do_request_flags(sc->sc_udev, &sc->sc_mtx, req, data, 0, NULL, 250 /* ms */); if (err == 0) break; DPRINTFN(1, "Control request failed, %s (retrying)\n", usbd_errstr(err)); if (rum_pause(sc, hz / 100)) break; } return (err); } static usb_error_t rum_do_mcu_request(struct rum_softc *sc, int request) { struct usb_device_request req; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RT2573_MCU_CNTL; USETW(req.wValue, request); USETW(req.wIndex, 0); USETW(req.wLength, 0); return (rum_do_request(sc, &req, NULL)); } static struct ieee80211vap * rum_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct rum_softc *sc = ic->ic_softc; struct rum_vap *rvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; rvp = malloc(sizeof(struct rum_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &rvp->vap; /* enable s/w bmiss handling for sta mode */ if (ieee80211_vap_setup(ic, vap, name, unit, opmode, flags | IEEE80211_CLONE_NOBEACONS, bssid) != 0) { /* out of memory */ free(rvp, M_80211_VAP); return (NULL); } /* override state transition machine */ rvp->newstate = vap->iv_newstate; vap->iv_newstate = rum_newstate; vap->iv_key_alloc = rum_key_alloc; vap->iv_key_set = rum_key_set; vap->iv_key_delete = rum_key_delete; vap->iv_update_beacon = rum_update_beacon; vap->iv_reset = rum_reset; vap->iv_max_aid = RT2573_ADDR_MAX; if (opmode == IEEE80211_M_STA) { /* * Move device to the sleep state when * beacon is received and there is no data for us. * * Used only for IEEE80211_S_SLEEP state. */ rvp->recv_mgmt = vap->iv_recv_mgmt; vap->iv_recv_mgmt = rum_sta_recv_mgmt; /* Ignored while sleeping. */ rvp->bmiss = vap->iv_bmiss; vap->iv_bmiss = rum_beacon_miss; } usb_callout_init_mtx(&rvp->ratectl_ch, &sc->sc_mtx, 0); TASK_INIT(&rvp->ratectl_task, 0, rum_ratectl_task, rvp); ieee80211_ratectl_init(vap); ieee80211_ratectl_setinterval(vap, 1000 /* 1 sec */); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); ic->ic_opmode = opmode; return vap; } static void rum_vap_delete(struct ieee80211vap *vap) { struct rum_vap *rvp = RUM_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct rum_softc *sc = ic->ic_softc; /* Put vap into INIT state. */ ieee80211_new_state(vap, IEEE80211_S_INIT, -1); ieee80211_draintask(ic, &vap->iv_nstate_task); RUM_LOCK(sc); /* Cancel any unfinished Tx. */ rum_reset_tx_list(sc, vap); RUM_UNLOCK(sc); usb_callout_drain(&rvp->ratectl_ch); ieee80211_draintask(ic, &rvp->ratectl_task); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); m_freem(rvp->bcn_mbuf); free(rvp, M_80211_VAP); } static void rum_cmdq_cb(void *arg, int pending) { struct rum_softc *sc = arg; struct rum_cmdq *rc; RUM_CMDQ_LOCK(sc); while (sc->cmdq[sc->cmdq_first].func != NULL) { rc = &sc->cmdq[sc->cmdq_first]; RUM_CMDQ_UNLOCK(sc); RUM_LOCK(sc); rc->func(sc, &rc->data, rc->rvp_id); RUM_UNLOCK(sc); RUM_CMDQ_LOCK(sc); memset(rc, 0, sizeof (*rc)); sc->cmdq_first = (sc->cmdq_first + 1) % RUM_CMDQ_SIZE; } RUM_CMDQ_UNLOCK(sc); } static int rum_cmd_sleepable(struct rum_softc *sc, const void *ptr, size_t len, uint8_t rvp_id, CMD_FUNC_PROTO) { struct ieee80211com *ic = &sc->sc_ic; KASSERT(len <= sizeof(union sec_param), ("buffer overflow")); RUM_CMDQ_LOCK(sc); if (sc->cmdq[sc->cmdq_last].func != NULL) { device_printf(sc->sc_dev, "%s: cmdq overflow\n", __func__); RUM_CMDQ_UNLOCK(sc); return EAGAIN; } if (ptr != NULL) memcpy(&sc->cmdq[sc->cmdq_last].data, ptr, len); sc->cmdq[sc->cmdq_last].rvp_id = rvp_id; sc->cmdq[sc->cmdq_last].func = func; sc->cmdq_last = (sc->cmdq_last + 1) % RUM_CMDQ_SIZE; RUM_CMDQ_UNLOCK(sc); ieee80211_runtask(ic, &sc->cmdq_task); return 0; } static void rum_tx_free(struct rum_tx_data *data, int txerr) { struct rum_softc *sc = data->sc; if (data->m != NULL) { ieee80211_tx_complete(data->ni, data->m, txerr); data->m = NULL; data->ni = NULL; } STAILQ_INSERT_TAIL(&sc->tx_free, data, next); sc->tx_nfree++; } static void rum_setup_tx_list(struct rum_softc *sc) { struct rum_tx_data *data; int i; sc->tx_nfree = 0; STAILQ_INIT(&sc->tx_q); STAILQ_INIT(&sc->tx_free); for (i = 0; i < RUM_TX_LIST_COUNT; i++) { data = &sc->tx_data[i]; data->sc = sc; STAILQ_INSERT_TAIL(&sc->tx_free, data, next); sc->tx_nfree++; } } static void rum_reset_tx_list(struct rum_softc *sc, struct ieee80211vap *vap) { struct rum_tx_data *data, *tmp; KASSERT(vap != NULL, ("%s: vap is NULL\n", __func__)); STAILQ_FOREACH_SAFE(data, &sc->tx_q, next, tmp) { if (data->ni != NULL && data->ni->ni_vap == vap) { ieee80211_free_node(data->ni); data->ni = NULL; KASSERT(data->m != NULL, ("%s: m is NULL\n", __func__)); m_freem(data->m); data->m = NULL; STAILQ_REMOVE(&sc->tx_q, data, rum_tx_data, next); STAILQ_INSERT_TAIL(&sc->tx_free, data, next); sc->tx_nfree++; } } } static void rum_unsetup_tx_list(struct rum_softc *sc) { struct rum_tx_data *data; int i; /* make sure any subsequent use of the queues will fail */ sc->tx_nfree = 0; STAILQ_INIT(&sc->tx_q); STAILQ_INIT(&sc->tx_free); /* free up all node references and mbufs */ for (i = 0; i < RUM_TX_LIST_COUNT; i++) { data = &sc->tx_data[i]; if (data->m != NULL) { m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } } } static void rum_beacon_miss(struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct rum_softc *sc = ic->ic_softc; struct rum_vap *rvp = RUM_VAP(vap); int sleep; RUM_LOCK(sc); if (sc->sc_sleeping && sc->sc_sleep_end < ticks) { DPRINTFN(12, "dropping 'sleeping' bit, " "device must be awake now\n"); sc->sc_sleeping = 0; } sleep = sc->sc_sleeping; RUM_UNLOCK(sc); if (!sleep) rvp->bmiss(vap); #ifdef USB_DEBUG else DPRINTFN(13, "bmiss event is ignored whilst sleeping\n"); #endif } static void rum_sta_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, const struct ieee80211_rx_stats *rxs, int rssi, int nf) { struct ieee80211vap *vap = ni->ni_vap; struct rum_softc *sc = vap->iv_ic->ic_softc; struct rum_vap *rvp = RUM_VAP(vap); if (vap->iv_state == IEEE80211_S_SLEEP && subtype == IEEE80211_FC0_SUBTYPE_BEACON) { RUM_LOCK(sc); DPRINTFN(12, "beacon, mybss %d (flags %02X)\n", !!(sc->last_rx_flags & RT2573_RX_MYBSS), sc->last_rx_flags); if ((sc->last_rx_flags & (RT2573_RX_MYBSS | RT2573_RX_BC)) == (RT2573_RX_MYBSS | RT2573_RX_BC)) { /* * Put it to sleep here; in case if there is a data * for us, iv_recv_mgmt() will wakeup the device via * SLEEP -> RUN state transition. */ rum_set_power_state(sc, 1); } RUM_UNLOCK(sc); } rvp->recv_mgmt(ni, m, subtype, rxs, rssi, nf); } static int rum_set_power_state(struct rum_softc *sc, int sleep) { usb_error_t uerror; RUM_LOCK_ASSERT(sc); DPRINTFN(12, "moving to %s state (sleep time %u)\n", sleep ? "sleep" : "awake", sc->sc_sleep_time); uerror = rum_do_mcu_request(sc, sleep ? RT2573_MCU_SLEEP : RT2573_MCU_WAKEUP); if (uerror != USB_ERR_NORMAL_COMPLETION) { device_printf(sc->sc_dev, "%s: could not change power state: %s\n", __func__, usbd_errstr(uerror)); return (EIO); } sc->sc_sleeping = !!sleep; sc->sc_sleep_end = sleep ? ticks + sc->sc_sleep_time : 0; return (0); } static int rum_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct rum_vap *rvp = RUM_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct rum_softc *sc = ic->ic_softc; const struct ieee80211_txparam *tp; enum ieee80211_state ostate; struct ieee80211_node *ni; usb_error_t uerror; int ret = 0; ostate = vap->iv_state; DPRINTF("%s -> %s\n", ieee80211_state_name[ostate], ieee80211_state_name[nstate]); IEEE80211_UNLOCK(ic); RUM_LOCK(sc); usb_callout_stop(&rvp->ratectl_ch); if (ostate == IEEE80211_S_SLEEP && vap->iv_opmode == IEEE80211_M_STA) { rum_clrbits(sc, RT2573_TXRX_CSR4, RT2573_ACKCTS_PWRMGT); rum_clrbits(sc, RT2573_MAC_CSR11, RT2573_AUTO_WAKEUP); /* * Ignore any errors; * any subsequent TX will wakeup it anyway */ (void) rum_set_power_state(sc, 0); } switch (nstate) { case IEEE80211_S_INIT: if (ostate == IEEE80211_S_RUN) rum_abort_tsf_sync(sc); break; case IEEE80211_S_RUN: if (ostate == IEEE80211_S_SLEEP) break; /* already handled */ ni = ieee80211_ref_node(vap->iv_bss); if (vap->iv_opmode != IEEE80211_M_MONITOR) { if (ic->ic_bsschan == IEEE80211_CHAN_ANYC || ni->ni_chan == IEEE80211_CHAN_ANYC) { ret = EINVAL; goto run_fail; } rum_update_slot_cb(sc, NULL, 0); rum_enable_mrr(sc); rum_set_txpreamble(sc); rum_set_basicrates(sc); rum_set_maxretry(sc, vap); IEEE80211_ADDR_COPY(sc->sc_bssid, ni->ni_bssid); rum_set_bssid(sc, sc->sc_bssid); } if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS) { if ((ret = rum_alloc_beacon(sc, vap)) != 0) goto run_fail; } if (vap->iv_opmode != IEEE80211_M_MONITOR && vap->iv_opmode != IEEE80211_M_AHDEMO) { if ((ret = rum_enable_tsf_sync(sc)) != 0) goto run_fail; } else rum_enable_tsf(sc); /* enable automatic rate adaptation */ tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) rum_ratectl_start(sc, ni); run_fail: ieee80211_free_node(ni); break; case IEEE80211_S_SLEEP: /* Implemented for STA mode only. */ if (vap->iv_opmode != IEEE80211_M_STA) break; uerror = rum_setbits(sc, RT2573_MAC_CSR11, RT2573_AUTO_WAKEUP); if (uerror != USB_ERR_NORMAL_COMPLETION) { ret = EIO; break; } uerror = rum_setbits(sc, RT2573_TXRX_CSR4, RT2573_ACKCTS_PWRMGT); if (uerror != USB_ERR_NORMAL_COMPLETION) { ret = EIO; break; } ret = rum_set_power_state(sc, 1); if (ret != 0) { device_printf(sc->sc_dev, "%s: could not move to the SLEEP state: %s\n", __func__, usbd_errstr(uerror)); } break; default: break; } RUM_UNLOCK(sc); IEEE80211_LOCK(ic); return (ret == 0 ? rvp->newstate(vap, nstate, arg) : ret); } static void rum_bulk_write_callback(struct usb_xfer *xfer, usb_error_t error) { struct rum_softc *sc = usbd_xfer_softc(xfer); struct ieee80211vap *vap; struct rum_tx_data *data; struct mbuf *m; struct usb_page_cache *pc; unsigned int len; int actlen, sumlen; usbd_xfer_status(xfer, &actlen, &sumlen, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(11, "transfer complete, %d bytes\n", actlen); /* free resources */ data = usbd_xfer_get_priv(xfer); rum_tx_free(data, 0); usbd_xfer_set_priv(xfer, NULL); /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: data = STAILQ_FIRST(&sc->tx_q); if (data) { STAILQ_REMOVE_HEAD(&sc->tx_q, next); m = data->m; if (m->m_pkthdr.len > (int)(MCLBYTES + RT2573_TX_DESC_SIZE)) { DPRINTFN(0, "data overflow, %u bytes\n", m->m_pkthdr.len); m->m_pkthdr.len = (MCLBYTES + RT2573_TX_DESC_SIZE); } pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_in(pc, 0, &data->desc, RT2573_TX_DESC_SIZE); usbd_m_copy_in(pc, RT2573_TX_DESC_SIZE, m, 0, m->m_pkthdr.len); vap = data->ni->ni_vap; if (ieee80211_radiotap_active_vap(vap)) { struct rum_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = data->rate; tap->wt_antenna = sc->tx_ant; ieee80211_radiotap_tx(vap, m); } /* align end on a 4-bytes boundary */ len = (RT2573_TX_DESC_SIZE + m->m_pkthdr.len + 3) & ~3; if ((len % 64) == 0) len += 4; DPRINTFN(11, "sending frame len=%u xferlen=%u\n", m->m_pkthdr.len, len); usbd_xfer_set_frame_len(xfer, 0, len); usbd_xfer_set_priv(xfer, data); usbd_transfer_submit(xfer); } rum_start(sc); break; default: /* Error */ DPRINTFN(11, "transfer error, %s\n", usbd_errstr(error)); counter_u64_add(sc->sc_ic.ic_oerrors, 1); data = usbd_xfer_get_priv(xfer); if (data != NULL) { rum_tx_free(data, error); usbd_xfer_set_priv(xfer, NULL); } if (error != USB_ERR_CANCELLED) { if (error == USB_ERR_TIMEOUT) device_printf(sc->sc_dev, "device timeout\n"); /* * Try to clear stall first, also if other * errors occur, hence clearing stall * introduces a 50 ms delay: */ usbd_xfer_set_stall(xfer); goto tr_setup; } break; } } static void rum_bulk_read_callback(struct usb_xfer *xfer, usb_error_t error) { struct rum_softc *sc = usbd_xfer_softc(xfer); struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_frame_min *wh; struct ieee80211_node *ni; struct mbuf *m = NULL; struct usb_page_cache *pc; uint32_t flags; uint8_t rssi = 0; int len; usbd_xfer_status(xfer, &len, NULL, NULL, NULL); switch (USB_GET_STATE(xfer)) { case USB_ST_TRANSFERRED: DPRINTFN(15, "rx done, actlen=%d\n", len); if (len < RT2573_RX_DESC_SIZE) { DPRINTF("%s: xfer too short %d\n", device_get_nameunit(sc->sc_dev), len); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } len -= RT2573_RX_DESC_SIZE; pc = usbd_xfer_get_frame(xfer, 0); usbd_copy_out(pc, 0, &sc->sc_rx_desc, RT2573_RX_DESC_SIZE); rssi = rum_get_rssi(sc, sc->sc_rx_desc.rssi); flags = le32toh(sc->sc_rx_desc.flags); sc->last_rx_flags = flags; if (len < ((flags >> 16) & 0xfff)) { DPRINTFN(5, "%s: frame is truncated from %d to %d " "bytes\n", device_get_nameunit(sc->sc_dev), (flags >> 16) & 0xfff, len); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } len = (flags >> 16) & 0xfff; if (len < sizeof(struct ieee80211_frame_ack)) { DPRINTFN(5, "%s: frame too short %d\n", device_get_nameunit(sc->sc_dev), len); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } if (flags & RT2573_RX_CRC_ERROR) { /* * This should not happen since we did not * request to receive those frames when we * filled RUM_TXRX_CSR2: */ DPRINTFN(5, "PHY or CRC error\n"); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } if ((flags & RT2573_RX_DEC_MASK) != RT2573_RX_DEC_OK) { switch (flags & RT2573_RX_DEC_MASK) { case RT2573_RX_IV_ERROR: DPRINTFN(5, "IV/EIV error\n"); break; case RT2573_RX_MIC_ERROR: DPRINTFN(5, "MIC error\n"); break; case RT2573_RX_KEY_ERROR: DPRINTFN(5, "Key error\n"); break; } counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } m = m_get2(len, M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { DPRINTF("could not allocate mbuf\n"); counter_u64_add(ic->ic_ierrors, 1); goto tr_setup; } usbd_copy_out(pc, RT2573_RX_DESC_SIZE, mtod(m, uint8_t *), len); wh = mtod(m, struct ieee80211_frame_min *); if ((wh->i_fc[1] & IEEE80211_FC1_PROTECTED) && (flags & RT2573_RX_CIP_MASK) != RT2573_RX_CIP_MODE(RT2573_MODE_NOSEC)) { wh->i_fc[1] &= ~IEEE80211_FC1_PROTECTED; m->m_flags |= M_WEP; } /* finalize mbuf */ m->m_pkthdr.len = m->m_len = len; if (ieee80211_radiotap_active(ic)) { struct rum_rx_radiotap_header *tap = &sc->sc_rxtap; tap->wr_flags = 0; tap->wr_rate = ieee80211_plcp2rate(sc->sc_rx_desc.rate, (flags & RT2573_RX_OFDM) ? IEEE80211_T_OFDM : IEEE80211_T_CCK); rum_get_tsf(sc, &tap->wr_tsf); tap->wr_antsignal = RT2573_NOISE_FLOOR + rssi; tap->wr_antnoise = RT2573_NOISE_FLOOR; tap->wr_antenna = sc->rx_ant; } /* FALLTHROUGH */ case USB_ST_SETUP: tr_setup: usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer)); usbd_transfer_submit(xfer); /* * At the end of a USB callback it is always safe to unlock * the private mutex of a device! That is why we do the * "ieee80211_input" here, and not some lines up! */ RUM_UNLOCK(sc); if (m) { if (m->m_len >= sizeof(struct ieee80211_frame_min)) ni = ieee80211_find_rxnode(ic, wh); else ni = NULL; if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, RT2573_NOISE_FLOOR); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, RT2573_NOISE_FLOOR); } RUM_LOCK(sc); rum_start(sc); return; default: /* Error */ if (error != USB_ERR_CANCELLED) { /* try to clear stall first */ usbd_xfer_set_stall(xfer); goto tr_setup; } return; } } static uint8_t rum_plcp_signal(int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return 0xb; case 18: return 0xf; case 24: return 0xa; case 36: return 0xe; case 48: return 0x9; case 72: return 0xd; case 96: return 0x8; case 108: return 0xc; /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return 0x0; case 4: return 0x1; case 11: return 0x2; case 22: return 0x3; } return 0xff; /* XXX unsupported/unknown rate */ } /* * Map net80211 cipher to RT2573 security mode. */ static uint8_t rum_crypto_mode(struct rum_softc *sc, u_int cipher, int keylen) { switch (cipher) { case IEEE80211_CIPHER_WEP: return (keylen < 8 ? RT2573_MODE_WEP40 : RT2573_MODE_WEP104); case IEEE80211_CIPHER_TKIP: return RT2573_MODE_TKIP; case IEEE80211_CIPHER_AES_CCM: return RT2573_MODE_AES_CCMP; default: device_printf(sc->sc_dev, "unknown cipher %d\n", cipher); return 0; } } static void rum_setup_tx_desc(struct rum_softc *sc, struct rum_tx_desc *desc, struct ieee80211_key *k, uint32_t flags, uint8_t xflags, uint8_t qid, int hdrlen, int len, int rate) { struct ieee80211com *ic = &sc->sc_ic; struct wmeParams *wmep = &sc->wme_params[qid]; uint16_t plcp_length; int remainder; flags |= RT2573_TX_VALID; flags |= len << 16; if (k != NULL && !(k->wk_flags & IEEE80211_KEY_SWCRYPT)) { const struct ieee80211_cipher *cip = k->wk_cipher; len += cip->ic_header + cip->ic_trailer + cip->ic_miclen; desc->eiv = 0; /* for WEP */ cip->ic_setiv(k, (uint8_t *)&desc->iv); } /* setup PLCP fields */ desc->plcp_signal = rum_plcp_signal(rate); desc->plcp_service = 4; len += IEEE80211_CRC_LEN; if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) { flags |= RT2573_TX_OFDM; plcp_length = len & 0xfff; desc->plcp_length_hi = plcp_length >> 6; desc->plcp_length_lo = plcp_length & 0x3f; } else { if (rate == 0) rate = 2; /* avoid division by zero */ plcp_length = howmany(16 * len, rate); if (rate == 22) { remainder = (16 * len) % 22; if (remainder != 0 && remainder < 7) desc->plcp_service |= RT2573_PLCP_LENGEXT; } desc->plcp_length_hi = plcp_length >> 8; desc->plcp_length_lo = plcp_length & 0xff; if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) desc->plcp_signal |= 0x08; } desc->flags = htole32(flags); desc->hdrlen = hdrlen; desc->xflags = xflags; desc->wme = htole16(RT2573_QID(qid) | RT2573_AIFSN(wmep->wmep_aifsn) | RT2573_LOGCWMIN(wmep->wmep_logcwmin) | RT2573_LOGCWMAX(wmep->wmep_logcwmax)); } static int rum_sendprot(struct rum_softc *sc, const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate) { struct ieee80211com *ic = ni->ni_ic; const struct ieee80211_frame *wh; struct rum_tx_data *data; struct mbuf *mprot; int protrate, pktlen, flags, isshort; uint16_t dur; RUM_LOCK_ASSERT(sc); KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY, ("protection %d", prot)); wh = mtod(m, const struct ieee80211_frame *); pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN; protrate = ieee80211_ctl_rate(ic->ic_rt, rate); isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort) + ieee80211_ack_duration(ic->ic_rt, rate, isshort); flags = 0; if (prot == IEEE80211_PROT_RTSCTS) { /* NB: CTS is the same size as an ACK */ dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort); flags |= RT2573_TX_NEED_ACK; mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur); } else { mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur); } if (mprot == NULL) { /* XXX stat + msg */ return (ENOBUFS); } data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; data->m = mprot; data->ni = ieee80211_ref_node(ni); data->rate = protrate; rum_setup_tx_desc(sc, &data->desc, NULL, flags, 0, 0, 0, mprot->m_pkthdr.len, protrate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[RUM_BULK_WR]); return 0; } static uint32_t rum_tx_crypto_flags(struct rum_softc *sc, struct ieee80211_node *ni, const struct ieee80211_key *k) { struct ieee80211vap *vap = ni->ni_vap; u_int cipher; uint32_t flags = 0; uint8_t mode, pos; if (!(k->wk_flags & IEEE80211_KEY_SWCRYPT)) { cipher = k->wk_cipher->ic_cipher; pos = k->wk_keyix; mode = rum_crypto_mode(sc, cipher, k->wk_keylen); if (mode == 0) return 0; flags |= RT2573_TX_CIP_MODE(mode); /* Do not trust GROUP flag */ if (!(k >= &vap->iv_nw_keys[0] && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) flags |= RT2573_TX_KEY_PAIR; else pos += 0 * RT2573_SKEY_MAX; /* vap id */ flags |= RT2573_TX_KEY_ID(pos); if (cipher == IEEE80211_CIPHER_TKIP) flags |= RT2573_TX_TKIPMIC; } return flags; } static int rum_tx_mgt(struct rum_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = &sc->sc_ic; struct rum_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp; struct ieee80211_key *k = NULL; uint32_t flags = 0; uint16_t dur; uint8_t ac, type, xflags = 0; int hdrlen; RUM_LOCK_ASSERT(sc); data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; wh = mtod(m0, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; hdrlen = ieee80211_anyhdrsize(wh); ac = M_WME_GETAC(m0); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_get_txkey(ni, m0); if (k == NULL) return (ENOENT); if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) && !k->wk_cipher->ic_encap(k, m0)) return (ENOBUFS); wh = mtod(m0, struct ieee80211_frame *); } tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_curchan)]; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2573_TX_NEED_ACK; dur = ieee80211_ack_duration(ic->ic_rt, tp->mgmtrate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); USETW(wh->i_dur, dur); /* tell hardware to add timestamp for probe responses */ if (type == IEEE80211_FC0_TYPE_MGT && (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_PROBE_RESP) flags |= RT2573_TX_TIMESTAMP; } if (type != IEEE80211_FC0_TYPE_CTL && !IEEE80211_QOS_HAS_SEQ(wh)) xflags |= RT2573_TX_HWSEQ; if (k != NULL) flags |= rum_tx_crypto_flags(sc, ni, k); data->m = m0; data->ni = ni; data->rate = tp->mgmtrate; rum_setup_tx_desc(sc, &data->desc, k, flags, xflags, ac, hdrlen, m0->m_pkthdr.len, tp->mgmtrate); DPRINTFN(10, "sending mgt frame len=%d rate=%d\n", m0->m_pkthdr.len + (int)RT2573_TX_DESC_SIZE, tp->mgmtrate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[RUM_BULK_WR]); return (0); } static int rum_tx_raw(struct rum_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct ieee80211_frame *wh; struct rum_tx_data *data; uint32_t flags; uint8_t ac, type, xflags = 0; int rate, error; RUM_LOCK_ASSERT(sc); wh = mtod(m0, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; ac = params->ibp_pri & 3; rate = params->ibp_rate0; if (!ieee80211_isratevalid(ic->ic_rt, rate)) return (EINVAL); flags = 0; if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) flags |= RT2573_TX_NEED_ACK; if (params->ibp_flags & (IEEE80211_BPF_RTS|IEEE80211_BPF_CTS)) { error = rum_sendprot(sc, m0, ni, params->ibp_flags & IEEE80211_BPF_RTS ? IEEE80211_PROT_RTSCTS : IEEE80211_PROT_CTSONLY, rate); if (error || sc->tx_nfree == 0) return (ENOBUFS); flags |= RT2573_TX_LONG_RETRY | RT2573_TX_IFS_SIFS; } if (type != IEEE80211_FC0_TYPE_CTL && !IEEE80211_QOS_HAS_SEQ(wh)) xflags |= RT2573_TX_HWSEQ; data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; data->m = m0; data->ni = ni; data->rate = rate; /* XXX need to setup descriptor ourself */ rum_setup_tx_desc(sc, &data->desc, NULL, flags, xflags, ac, 0, m0->m_pkthdr.len, rate); DPRINTFN(10, "sending raw frame len=%u rate=%u\n", m0->m_pkthdr.len, rate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[RUM_BULK_WR]); return 0; } static int rum_tx_data(struct rum_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = &sc->sc_ic; struct rum_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp; struct ieee80211_key *k = NULL; uint32_t flags = 0; uint16_t dur; uint8_t ac, type, qos, xflags = 0; int error, hdrlen, rate; RUM_LOCK_ASSERT(sc); wh = mtod(m0, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; hdrlen = ieee80211_anyhdrsize(wh); if (IEEE80211_QOS_HAS_SEQ(wh)) qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; else qos = 0; ac = M_WME_GETAC(m0); tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; if (IEEE80211_IS_MULTICAST(wh->i_addr1)) rate = tp->mcastrate; else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = tp->ucastrate; else if (m0->m_flags & M_EAPOL) rate = tp->mgmtrate; else { (void) ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; } if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_get_txkey(ni, m0); if (k == NULL) { m_freem(m0); return (ENOENT); } if ((k->wk_flags & IEEE80211_KEY_SWCRYPT) && !k->wk_cipher->ic_encap(k, m0)) { m_freem(m0); return (ENOBUFS); } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (type != IEEE80211_FC0_TYPE_CTL && !IEEE80211_QOS_HAS_SEQ(wh)) xflags |= RT2573_TX_HWSEQ; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { int prot = IEEE80211_PROT_NONE; if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) prot = IEEE80211_PROT_RTSCTS; else if ((ic->ic_flags & IEEE80211_F_USEPROT) && ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) prot = ic->ic_protmode; if (prot != IEEE80211_PROT_NONE) { error = rum_sendprot(sc, m0, ni, prot, rate); if (error || sc->tx_nfree == 0) { m_freem(m0); return ENOBUFS; } flags |= RT2573_TX_LONG_RETRY | RT2573_TX_IFS_SIFS; } } if (k != NULL) flags |= rum_tx_crypto_flags(sc, ni, k); data = STAILQ_FIRST(&sc->tx_free); STAILQ_REMOVE_HEAD(&sc->tx_free, next); sc->tx_nfree--; data->m = m0; data->ni = ni; data->rate = rate; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { /* Unicast frame, check if an ACK is expected. */ if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != IEEE80211_QOS_ACKPOLICY_NOACK) flags |= RT2573_TX_NEED_ACK; dur = ieee80211_ack_duration(ic->ic_rt, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); USETW(wh->i_dur, dur); } rum_setup_tx_desc(sc, &data->desc, k, flags, xflags, ac, hdrlen, m0->m_pkthdr.len, rate); DPRINTFN(10, "sending frame len=%d rate=%d\n", m0->m_pkthdr.len + (int)RT2573_TX_DESC_SIZE, rate); STAILQ_INSERT_TAIL(&sc->tx_q, data, next); usbd_transfer_start(sc->sc_xfer[RUM_BULK_WR]); return 0; } static int rum_transmit(struct ieee80211com *ic, struct mbuf *m) { struct rum_softc *sc = ic->ic_softc; int error; RUM_LOCK(sc); if (!sc->sc_running) { RUM_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { RUM_UNLOCK(sc); return (error); } rum_start(sc); RUM_UNLOCK(sc); return (0); } static void rum_start(struct rum_softc *sc) { struct ieee80211_node *ni; struct mbuf *m; RUM_LOCK_ASSERT(sc); if (!sc->sc_running) return; while (sc->tx_nfree >= RUM_TX_MINFREE && (m = mbufq_dequeue(&sc->sc_snd)) != NULL) { ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (rum_tx_data(sc, m, ni) != 0) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); break; } } } static void rum_parent(struct ieee80211com *ic) { struct rum_softc *sc = ic->ic_softc; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); RUM_LOCK(sc); if (sc->sc_detached) { RUM_UNLOCK(sc); return; } RUM_UNLOCK(sc); if (ic->ic_nrunning > 0) { if (rum_init(sc) == 0) ieee80211_start_all(ic); else ieee80211_stop(vap); } else rum_stop(sc); } static void rum_eeprom_read(struct rum_softc *sc, uint16_t addr, void *buf, int len) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RT2573_READ_EEPROM; USETW(req.wValue, 0); USETW(req.wIndex, addr); USETW(req.wLength, len); error = rum_do_request(sc, &req, buf); if (error != 0) { device_printf(sc->sc_dev, "could not read EEPROM: %s\n", usbd_errstr(error)); } } static uint32_t rum_read(struct rum_softc *sc, uint16_t reg) { uint32_t val; rum_read_multi(sc, reg, &val, sizeof val); return le32toh(val); } static void rum_read_multi(struct rum_softc *sc, uint16_t reg, void *buf, int len) { struct usb_device_request req; usb_error_t error; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = RT2573_READ_MULTI_MAC; USETW(req.wValue, 0); USETW(req.wIndex, reg); USETW(req.wLength, len); error = rum_do_request(sc, &req, buf); if (error != 0) { device_printf(sc->sc_dev, "could not multi read MAC register: %s\n", usbd_errstr(error)); } } static usb_error_t rum_write(struct rum_softc *sc, uint16_t reg, uint32_t val) { uint32_t tmp = htole32(val); return (rum_write_multi(sc, reg, &tmp, sizeof tmp)); } static usb_error_t rum_write_multi(struct rum_softc *sc, uint16_t reg, void *buf, size_t len) { struct usb_device_request req; usb_error_t error; size_t offset; req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = RT2573_WRITE_MULTI_MAC; USETW(req.wValue, 0); /* write at most 64 bytes at a time */ for (offset = 0; offset < len; offset += 64) { USETW(req.wIndex, reg + offset); USETW(req.wLength, MIN(len - offset, 64)); error = rum_do_request(sc, &req, (char *)buf + offset); if (error != 0) { device_printf(sc->sc_dev, "could not multi write MAC register: %s\n", usbd_errstr(error)); return (error); } } return (USB_ERR_NORMAL_COMPLETION); } static usb_error_t rum_setbits(struct rum_softc *sc, uint16_t reg, uint32_t mask) { return (rum_write(sc, reg, rum_read(sc, reg) | mask)); } static usb_error_t rum_clrbits(struct rum_softc *sc, uint16_t reg, uint32_t mask) { return (rum_write(sc, reg, rum_read(sc, reg) & ~mask)); } static usb_error_t rum_modbits(struct rum_softc *sc, uint16_t reg, uint32_t set, uint32_t unset) { return (rum_write(sc, reg, (rum_read(sc, reg) & ~unset) | set)); } static int rum_bbp_busy(struct rum_softc *sc) { int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(rum_read(sc, RT2573_PHY_CSR3) & RT2573_BBP_BUSY)) break; if (rum_pause(sc, hz / 100)) break; } if (ntries == 100) return (ETIMEDOUT); return (0); } static void rum_bbp_write(struct rum_softc *sc, uint8_t reg, uint8_t val) { uint32_t tmp; DPRINTFN(2, "reg=0x%08x\n", reg); if (rum_bbp_busy(sc) != 0) { device_printf(sc->sc_dev, "could not write to BBP\n"); return; } tmp = RT2573_BBP_BUSY | (reg & 0x7f) << 8 | val; rum_write(sc, RT2573_PHY_CSR3, tmp); } static uint8_t rum_bbp_read(struct rum_softc *sc, uint8_t reg) { uint32_t val; int ntries; DPRINTFN(2, "reg=0x%08x\n", reg); if (rum_bbp_busy(sc) != 0) { device_printf(sc->sc_dev, "could not read BBP\n"); return 0; } val = RT2573_BBP_BUSY | RT2573_BBP_READ | reg << 8; rum_write(sc, RT2573_PHY_CSR3, val); for (ntries = 0; ntries < 100; ntries++) { val = rum_read(sc, RT2573_PHY_CSR3); if (!(val & RT2573_BBP_BUSY)) return val & 0xff; if (rum_pause(sc, hz / 100)) break; } device_printf(sc->sc_dev, "could not read BBP\n"); return 0; } static void rum_rf_write(struct rum_softc *sc, uint8_t reg, uint32_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(rum_read(sc, RT2573_PHY_CSR4) & RT2573_RF_BUSY)) break; if (rum_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to RF\n"); return; } tmp = RT2573_RF_BUSY | RT2573_RF_20BIT | (val & 0xfffff) << 2 | (reg & 3); rum_write(sc, RT2573_PHY_CSR4, tmp); /* remember last written value in sc */ sc->rf_regs[reg] = val; DPRINTFN(15, "RF R[%u] <- 0x%05x\n", reg & 3, val & 0xfffff); } static void rum_select_antenna(struct rum_softc *sc) { uint8_t bbp4, bbp77; uint32_t tmp; bbp4 = rum_bbp_read(sc, 4); bbp77 = rum_bbp_read(sc, 77); /* TBD */ /* make sure Rx is disabled before switching antenna */ tmp = rum_read(sc, RT2573_TXRX_CSR0); rum_write(sc, RT2573_TXRX_CSR0, tmp | RT2573_DISABLE_RX); rum_bbp_write(sc, 4, bbp4); rum_bbp_write(sc, 77, bbp77); rum_write(sc, RT2573_TXRX_CSR0, tmp); } /* * Enable multi-rate retries for frames sent at OFDM rates. * In 802.11b/g mode, allow fallback to CCK rates. */ static void rum_enable_mrr(struct rum_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; if (!IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan)) { rum_setbits(sc, RT2573_TXRX_CSR4, RT2573_MRR_ENABLED | RT2573_MRR_CCK_FALLBACK); } else { rum_modbits(sc, RT2573_TXRX_CSR4, RT2573_MRR_ENABLED, RT2573_MRR_CCK_FALLBACK); } } static void rum_set_txpreamble(struct rum_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) rum_setbits(sc, RT2573_TXRX_CSR4, RT2573_SHORT_PREAMBLE); else rum_clrbits(sc, RT2573_TXRX_CSR4, RT2573_SHORT_PREAMBLE); } static void rum_set_basicrates(struct rum_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; /* update basic rate set */ if (ic->ic_curmode == IEEE80211_MODE_11B) { /* 11b basic rates: 1, 2Mbps */ rum_write(sc, RT2573_TXRX_CSR5, 0x3); } else if (IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan)) { /* 11a basic rates: 6, 12, 24Mbps */ rum_write(sc, RT2573_TXRX_CSR5, 0x150); } else { /* 11b/g basic rates: 1, 2, 5.5, 11Mbps */ rum_write(sc, RT2573_TXRX_CSR5, 0xf); } } /* * Reprogram MAC/BBP to switch to a new band. Values taken from the reference * driver. */ static void rum_select_band(struct rum_softc *sc, struct ieee80211_channel *c) { uint8_t bbp17, bbp35, bbp96, bbp97, bbp98, bbp104; /* update all BBP registers that depend on the band */ bbp17 = 0x20; bbp96 = 0x48; bbp104 = 0x2c; bbp35 = 0x50; bbp97 = 0x48; bbp98 = 0x48; if (IEEE80211_IS_CHAN_5GHZ(c)) { bbp17 += 0x08; bbp96 += 0x10; bbp104 += 0x0c; bbp35 += 0x10; bbp97 += 0x10; bbp98 += 0x10; } if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) { bbp17 += 0x10; bbp96 += 0x10; bbp104 += 0x10; } sc->bbp17 = bbp17; rum_bbp_write(sc, 17, bbp17); rum_bbp_write(sc, 96, bbp96); rum_bbp_write(sc, 104, bbp104); if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) { rum_bbp_write(sc, 75, 0x80); rum_bbp_write(sc, 86, 0x80); rum_bbp_write(sc, 88, 0x80); } rum_bbp_write(sc, 35, bbp35); rum_bbp_write(sc, 97, bbp97); rum_bbp_write(sc, 98, bbp98); if (IEEE80211_IS_CHAN_2GHZ(c)) { rum_modbits(sc, RT2573_PHY_CSR0, RT2573_PA_PE_2GHZ, RT2573_PA_PE_5GHZ); } else { rum_modbits(sc, RT2573_PHY_CSR0, RT2573_PA_PE_5GHZ, RT2573_PA_PE_2GHZ); } } static void rum_set_chan(struct rum_softc *sc, struct ieee80211_channel *c) { struct ieee80211com *ic = &sc->sc_ic; const struct rfprog *rfprog; uint8_t bbp3, bbp94 = RT2573_BBPR94_DEFAULT; int8_t power; int i, chan; chan = ieee80211_chan2ieee(ic, c); if (chan == 0 || chan == IEEE80211_CHAN_ANY) return; /* select the appropriate RF settings based on what EEPROM says */ rfprog = (sc->rf_rev == RT2573_RF_5225 || sc->rf_rev == RT2573_RF_2527) ? rum_rf5225 : rum_rf5226; /* find the settings for this channel (we know it exists) */ for (i = 0; rfprog[i].chan != chan; i++); power = sc->txpow[i]; if (power < 0) { bbp94 += power; power = 0; } else if (power > 31) { bbp94 += power - 31; power = 31; } /* * If we are switching from the 2GHz band to the 5GHz band or * vice-versa, BBP registers need to be reprogrammed. */ if (c->ic_flags != ic->ic_curchan->ic_flags) { rum_select_band(sc, c); rum_select_antenna(sc); } ic->ic_curchan = c; rum_rf_write(sc, RT2573_RF1, rfprog[i].r1); rum_rf_write(sc, RT2573_RF2, rfprog[i].r2); rum_rf_write(sc, RT2573_RF3, rfprog[i].r3 | power << 7); rum_rf_write(sc, RT2573_RF4, rfprog[i].r4 | sc->rffreq << 10); rum_rf_write(sc, RT2573_RF1, rfprog[i].r1); rum_rf_write(sc, RT2573_RF2, rfprog[i].r2); rum_rf_write(sc, RT2573_RF3, rfprog[i].r3 | power << 7 | 1); rum_rf_write(sc, RT2573_RF4, rfprog[i].r4 | sc->rffreq << 10); rum_rf_write(sc, RT2573_RF1, rfprog[i].r1); rum_rf_write(sc, RT2573_RF2, rfprog[i].r2); rum_rf_write(sc, RT2573_RF3, rfprog[i].r3 | power << 7); rum_rf_write(sc, RT2573_RF4, rfprog[i].r4 | sc->rffreq << 10); rum_pause(sc, hz / 100); /* enable smart mode for MIMO-capable RFs */ bbp3 = rum_bbp_read(sc, 3); bbp3 &= ~RT2573_SMART_MODE; if (sc->rf_rev == RT2573_RF_5225 || sc->rf_rev == RT2573_RF_2527) bbp3 |= RT2573_SMART_MODE; rum_bbp_write(sc, 3, bbp3); if (bbp94 != RT2573_BBPR94_DEFAULT) rum_bbp_write(sc, 94, bbp94); /* give the chip some extra time to do the switchover */ rum_pause(sc, hz / 100); } static void rum_set_maxretry(struct rum_softc *sc, struct ieee80211vap *vap) { const struct ieee80211_txparam *tp; struct ieee80211_node *ni = vap->iv_bss; struct rum_vap *rvp = RUM_VAP(vap); tp = &vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)]; rvp->maxretry = tp->maxretry < 0xf ? tp->maxretry : 0xf; rum_modbits(sc, RT2573_TXRX_CSR4, RT2573_SHORT_RETRY(rvp->maxretry) | RT2573_LONG_RETRY(rvp->maxretry), RT2573_SHORT_RETRY_MASK | RT2573_LONG_RETRY_MASK); } /* * Enable TSF synchronization and tell h/w to start sending beacons for IBSS * and HostAP operating modes. */ static int rum_enable_tsf_sync(struct rum_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t tmp; uint16_t bintval; if (vap->iv_opmode != IEEE80211_M_STA) { /* * Change default 16ms TBTT adjustment to 8ms. * Must be done before enabling beacon generation. */ if (rum_write(sc, RT2573_TXRX_CSR10, 1 << 12 | 8) != 0) return EIO; } tmp = rum_read(sc, RT2573_TXRX_CSR9) & 0xff000000; /* set beacon interval (in 1/16ms unit) */ bintval = vap->iv_bss->ni_intval; tmp |= bintval * 16; tmp |= RT2573_TSF_TIMER_EN | RT2573_TBTT_TIMER_EN; switch (vap->iv_opmode) { case IEEE80211_M_STA: /* * Local TSF is always updated with remote TSF on beacon * reception. */ tmp |= RT2573_TSF_SYNC_MODE(RT2573_TSF_SYNC_MODE_STA); break; case IEEE80211_M_IBSS: /* * Local TSF is updated with remote TSF on beacon reception * only if the remote TSF is greater than local TSF. */ tmp |= RT2573_TSF_SYNC_MODE(RT2573_TSF_SYNC_MODE_IBSS); tmp |= RT2573_BCN_TX_EN; break; case IEEE80211_M_HOSTAP: /* SYNC with nobody */ tmp |= RT2573_TSF_SYNC_MODE(RT2573_TSF_SYNC_MODE_HOSTAP); tmp |= RT2573_BCN_TX_EN; break; default: device_printf(sc->sc_dev, "Enabling TSF failed. undefined opmode %d\n", vap->iv_opmode); return EINVAL; } if (rum_write(sc, RT2573_TXRX_CSR9, tmp) != 0) return EIO; /* refresh current sleep time */ return (rum_set_sleep_time(sc, bintval)); } static void rum_enable_tsf(struct rum_softc *sc) { rum_modbits(sc, RT2573_TXRX_CSR9, RT2573_TSF_TIMER_EN | RT2573_TSF_SYNC_MODE(RT2573_TSF_SYNC_MODE_DIS), 0x00ffffff); } static void rum_abort_tsf_sync(struct rum_softc *sc) { rum_clrbits(sc, RT2573_TXRX_CSR9, 0x00ffffff); } static void rum_get_tsf(struct rum_softc *sc, uint64_t *buf) { rum_read_multi(sc, RT2573_TXRX_CSR12, buf, sizeof (*buf)); } static void rum_update_slot_cb(struct rum_softc *sc, union sec_param *data, uint8_t rvp_id) { struct ieee80211com *ic = &sc->sc_ic; uint8_t slottime; slottime = IEEE80211_GET_SLOTTIME(ic); rum_modbits(sc, RT2573_MAC_CSR9, slottime, 0xff); DPRINTF("setting slot time to %uus\n", slottime); } static void rum_update_slot(struct ieee80211com *ic) { rum_cmd_sleepable(ic->ic_softc, NULL, 0, 0, rum_update_slot_cb); } static int rum_wme_update(struct ieee80211com *ic) { const struct wmeParams *chanp = ic->ic_wme.wme_chanParams.cap_wmeParams; struct rum_softc *sc = ic->ic_softc; int error = 0; RUM_LOCK(sc); error = rum_write(sc, RT2573_AIFSN_CSR, chanp[WME_AC_VO].wmep_aifsn << 12 | chanp[WME_AC_VI].wmep_aifsn << 8 | chanp[WME_AC_BK].wmep_aifsn << 4 | chanp[WME_AC_BE].wmep_aifsn); if (error) goto print_err; error = rum_write(sc, RT2573_CWMIN_CSR, chanp[WME_AC_VO].wmep_logcwmin << 12 | chanp[WME_AC_VI].wmep_logcwmin << 8 | chanp[WME_AC_BK].wmep_logcwmin << 4 | chanp[WME_AC_BE].wmep_logcwmin); if (error) goto print_err; error = rum_write(sc, RT2573_CWMAX_CSR, chanp[WME_AC_VO].wmep_logcwmax << 12 | chanp[WME_AC_VI].wmep_logcwmax << 8 | chanp[WME_AC_BK].wmep_logcwmax << 4 | chanp[WME_AC_BE].wmep_logcwmax); if (error) goto print_err; error = rum_write(sc, RT2573_TXOP01_CSR, chanp[WME_AC_BK].wmep_txopLimit << 16 | chanp[WME_AC_BE].wmep_txopLimit); if (error) goto print_err; error = rum_write(sc, RT2573_TXOP23_CSR, chanp[WME_AC_VO].wmep_txopLimit << 16 | chanp[WME_AC_VI].wmep_txopLimit); if (error) goto print_err; memcpy(sc->wme_params, chanp, sizeof(*chanp) * WME_NUM_AC); print_err: RUM_UNLOCK(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: WME update failed, error %d\n", __func__, error); } return (error); } static void rum_set_bssid(struct rum_softc *sc, const uint8_t *bssid) { rum_write(sc, RT2573_MAC_CSR4, bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24); rum_write(sc, RT2573_MAC_CSR5, bssid[4] | bssid[5] << 8 | RT2573_NUM_BSSID_MSK(1)); } static void rum_set_macaddr(struct rum_softc *sc, const uint8_t *addr) { rum_write(sc, RT2573_MAC_CSR2, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24); rum_write(sc, RT2573_MAC_CSR3, addr[4] | addr[5] << 8 | 0xff << 16); } static void rum_setpromisc(struct rum_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; if (ic->ic_promisc == 0) rum_setbits(sc, RT2573_TXRX_CSR0, RT2573_DROP_NOT_TO_ME); else rum_clrbits(sc, RT2573_TXRX_CSR0, RT2573_DROP_NOT_TO_ME); DPRINTF("%s promiscuous mode\n", ic->ic_promisc > 0 ? "entering" : "leaving"); } static void rum_update_promisc(struct ieee80211com *ic) { struct rum_softc *sc = ic->ic_softc; RUM_LOCK(sc); if (sc->sc_running) rum_setpromisc(sc); RUM_UNLOCK(sc); } static void rum_update_mcast(struct ieee80211com *ic) { /* Ignore. */ } static const char * rum_get_rf(int rev) { switch (rev) { case RT2573_RF_2527: return "RT2527 (MIMO XR)"; case RT2573_RF_2528: return "RT2528"; case RT2573_RF_5225: return "RT5225 (MIMO XR)"; case RT2573_RF_5226: return "RT5226"; default: return "unknown"; } } static void rum_read_eeprom(struct rum_softc *sc) { uint16_t val; #ifdef RUM_DEBUG int i; #endif /* read MAC address */ rum_eeprom_read(sc, RT2573_EEPROM_ADDRESS, sc->sc_ic.ic_macaddr, 6); rum_eeprom_read(sc, RT2573_EEPROM_ANTENNA, &val, 2); val = le16toh(val); sc->rf_rev = (val >> 11) & 0x1f; sc->hw_radio = (val >> 10) & 0x1; sc->rx_ant = (val >> 4) & 0x3; sc->tx_ant = (val >> 2) & 0x3; sc->nb_ant = val & 0x3; DPRINTF("RF revision=%d\n", sc->rf_rev); rum_eeprom_read(sc, RT2573_EEPROM_CONFIG2, &val, 2); val = le16toh(val); sc->ext_5ghz_lna = (val >> 6) & 0x1; sc->ext_2ghz_lna = (val >> 4) & 0x1; DPRINTF("External 2GHz LNA=%d\nExternal 5GHz LNA=%d\n", sc->ext_2ghz_lna, sc->ext_5ghz_lna); rum_eeprom_read(sc, RT2573_EEPROM_RSSI_2GHZ_OFFSET, &val, 2); val = le16toh(val); if ((val & 0xff) != 0xff) sc->rssi_2ghz_corr = (int8_t)(val & 0xff); /* signed */ /* Only [-10, 10] is valid */ if (sc->rssi_2ghz_corr < -10 || sc->rssi_2ghz_corr > 10) sc->rssi_2ghz_corr = 0; rum_eeprom_read(sc, RT2573_EEPROM_RSSI_5GHZ_OFFSET, &val, 2); val = le16toh(val); if ((val & 0xff) != 0xff) sc->rssi_5ghz_corr = (int8_t)(val & 0xff); /* signed */ /* Only [-10, 10] is valid */ if (sc->rssi_5ghz_corr < -10 || sc->rssi_5ghz_corr > 10) sc->rssi_5ghz_corr = 0; if (sc->ext_2ghz_lna) sc->rssi_2ghz_corr -= 14; if (sc->ext_5ghz_lna) sc->rssi_5ghz_corr -= 14; DPRINTF("RSSI 2GHz corr=%d\nRSSI 5GHz corr=%d\n", sc->rssi_2ghz_corr, sc->rssi_5ghz_corr); rum_eeprom_read(sc, RT2573_EEPROM_FREQ_OFFSET, &val, 2); val = le16toh(val); if ((val & 0xff) != 0xff) sc->rffreq = val & 0xff; DPRINTF("RF freq=%d\n", sc->rffreq); /* read Tx power for all a/b/g channels */ rum_eeprom_read(sc, RT2573_EEPROM_TXPOWER, sc->txpow, 14); /* XXX default Tx power for 802.11a channels */ memset(sc->txpow + 14, 24, sizeof (sc->txpow) - 14); #ifdef RUM_DEBUG for (i = 0; i < 14; i++) DPRINTF("Channel=%d Tx power=%d\n", i + 1, sc->txpow[i]); #endif /* read default values for BBP registers */ rum_eeprom_read(sc, RT2573_EEPROM_BBP_BASE, sc->bbp_prom, 2 * 16); #ifdef RUM_DEBUG for (i = 0; i < 14; i++) { if (sc->bbp_prom[i].reg == 0 || sc->bbp_prom[i].reg == 0xff) continue; DPRINTF("BBP R%d=%02x\n", sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } #endif } static int rum_bbp_wakeup(struct rum_softc *sc) { unsigned int ntries; for (ntries = 0; ntries < 100; ntries++) { if (rum_read(sc, RT2573_MAC_CSR12) & 8) break; rum_write(sc, RT2573_MAC_CSR12, 4); /* force wakeup */ if (rum_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP/RF to wakeup\n"); return (ETIMEDOUT); } return (0); } static int rum_bbp_init(struct rum_softc *sc) { int i, ntries; /* wait for BBP to be ready */ for (ntries = 0; ntries < 100; ntries++) { const uint8_t val = rum_bbp_read(sc, 0); if (val != 0 && val != 0xff) break; if (rum_pause(sc, hz / 100)) break; } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP\n"); return EIO; } /* initialize BBP registers to default values */ for (i = 0; i < nitems(rum_def_bbp); i++) rum_bbp_write(sc, rum_def_bbp[i].reg, rum_def_bbp[i].val); /* write vendor-specific BBP values (from EEPROM) */ for (i = 0; i < 16; i++) { if (sc->bbp_prom[i].reg == 0 || sc->bbp_prom[i].reg == 0xff) continue; rum_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } return 0; } static void rum_clr_shkey_regs(struct rum_softc *sc) { rum_write(sc, RT2573_SEC_CSR0, 0); rum_write(sc, RT2573_SEC_CSR1, 0); rum_write(sc, RT2573_SEC_CSR5, 0); } static int rum_init(struct rum_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t tmp; int i, ret; RUM_LOCK(sc); if (sc->sc_running) { ret = 0; goto end; } /* initialize MAC registers to default values */ for (i = 0; i < nitems(rum_def_mac); i++) rum_write(sc, rum_def_mac[i].reg, rum_def_mac[i].val); /* reset some WME parameters to default values */ sc->wme_params[0].wmep_aifsn = 2; sc->wme_params[0].wmep_logcwmin = 4; sc->wme_params[0].wmep_logcwmax = 10; /* set host ready */ rum_write(sc, RT2573_MAC_CSR1, RT2573_RESET_ASIC | RT2573_RESET_BBP); rum_write(sc, RT2573_MAC_CSR1, 0); /* wait for BBP/RF to wakeup */ if ((ret = rum_bbp_wakeup(sc)) != 0) goto end; if ((ret = rum_bbp_init(sc)) != 0) goto end; /* select default channel */ rum_select_band(sc, ic->ic_curchan); rum_select_antenna(sc); rum_set_chan(sc, ic->ic_curchan); /* clear STA registers */ rum_read_multi(sc, RT2573_STA_CSR0, sc->sta, sizeof sc->sta); /* clear security registers (if required) */ if (sc->sc_clr_shkeys == 0) { rum_clr_shkey_regs(sc); sc->sc_clr_shkeys = 1; } rum_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr); /* initialize ASIC */ rum_write(sc, RT2573_MAC_CSR1, RT2573_HOST_READY); /* * Allocate Tx and Rx xfer queues. */ rum_setup_tx_list(sc); /* update Rx filter */ tmp = rum_read(sc, RT2573_TXRX_CSR0) & 0xffff; tmp |= RT2573_DROP_PHY_ERROR | RT2573_DROP_CRC_ERROR; if (ic->ic_opmode != IEEE80211_M_MONITOR) { tmp |= RT2573_DROP_CTL | RT2573_DROP_VER_ERROR | RT2573_DROP_ACKCTS; if (ic->ic_opmode != IEEE80211_M_HOSTAP) tmp |= RT2573_DROP_TODS; if (ic->ic_promisc == 0) tmp |= RT2573_DROP_NOT_TO_ME; } rum_write(sc, RT2573_TXRX_CSR0, tmp); sc->sc_running = 1; usbd_xfer_set_stall(sc->sc_xfer[RUM_BULK_WR]); usbd_transfer_start(sc->sc_xfer[RUM_BULK_RD]); end: RUM_UNLOCK(sc); if (ret != 0) rum_stop(sc); return ret; } static void rum_stop(struct rum_softc *sc) { RUM_LOCK(sc); if (!sc->sc_running) { RUM_UNLOCK(sc); return; } sc->sc_running = 0; RUM_UNLOCK(sc); /* * Drain the USB transfers, if not already drained: */ usbd_transfer_drain(sc->sc_xfer[RUM_BULK_WR]); usbd_transfer_drain(sc->sc_xfer[RUM_BULK_RD]); RUM_LOCK(sc); rum_unsetup_tx_list(sc); /* disable Rx */ rum_setbits(sc, RT2573_TXRX_CSR0, RT2573_DISABLE_RX); /* reset ASIC */ rum_write(sc, RT2573_MAC_CSR1, RT2573_RESET_ASIC | RT2573_RESET_BBP); rum_write(sc, RT2573_MAC_CSR1, 0); RUM_UNLOCK(sc); } static void rum_load_microcode(struct rum_softc *sc, const uint8_t *ucode, size_t size) { uint16_t reg = RT2573_MCU_CODE_BASE; usb_error_t err; /* copy firmware image into NIC */ for (; size >= 4; reg += 4, ucode += 4, size -= 4) { err = rum_write(sc, reg, UGETDW(ucode)); if (err) { /* firmware already loaded ? */ device_printf(sc->sc_dev, "Firmware load " "failure! (ignored)\n"); break; } } err = rum_do_mcu_request(sc, RT2573_MCU_RUN); if (err != USB_ERR_NORMAL_COMPLETION) { device_printf(sc->sc_dev, "could not run firmware: %s\n", usbd_errstr(err)); } /* give the chip some time to boot */ rum_pause(sc, hz / 8); } static int rum_set_sleep_time(struct rum_softc *sc, uint16_t bintval) { struct ieee80211com *ic = &sc->sc_ic; usb_error_t uerror; int exp, delay; RUM_LOCK_ASSERT(sc); exp = ic->ic_lintval / bintval; delay = ic->ic_lintval % bintval; if (exp > RT2573_TBCN_EXP_MAX) exp = RT2573_TBCN_EXP_MAX; if (delay > RT2573_TBCN_DELAY_MAX) delay = RT2573_TBCN_DELAY_MAX; uerror = rum_modbits(sc, RT2573_MAC_CSR11, RT2573_TBCN_EXP(exp) | RT2573_TBCN_DELAY(delay), RT2573_TBCN_EXP(RT2573_TBCN_EXP_MAX) | RT2573_TBCN_DELAY(RT2573_TBCN_DELAY_MAX)); if (uerror != USB_ERR_NORMAL_COMPLETION) return (EIO); sc->sc_sleep_time = IEEE80211_TU_TO_TICKS(exp * bintval + delay); return (0); } static int rum_reset(struct ieee80211vap *vap, u_long cmd) { struct ieee80211com *ic = vap->iv_ic; struct ieee80211_node *ni; struct rum_softc *sc = ic->ic_softc; int error; switch (cmd) { case IEEE80211_IOC_POWERSAVE: case IEEE80211_IOC_PROTMODE: case IEEE80211_IOC_RTSTHRESHOLD: error = 0; break; case IEEE80211_IOC_POWERSAVESLEEP: ni = ieee80211_ref_node(vap->iv_bss); RUM_LOCK(sc); error = rum_set_sleep_time(sc, ni->ni_intval); if (vap->iv_state == IEEE80211_S_SLEEP) { /* Use new values for wakeup timer. */ rum_clrbits(sc, RT2573_MAC_CSR11, RT2573_AUTO_WAKEUP); rum_setbits(sc, RT2573_MAC_CSR11, RT2573_AUTO_WAKEUP); } /* XXX send reassoc */ RUM_UNLOCK(sc); ieee80211_free_node(ni); break; default: error = ENETRESET; break; } return (error); } static int rum_set_beacon(struct rum_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct rum_vap *rvp = RUM_VAP(vap); struct mbuf *m = rvp->bcn_mbuf; const struct ieee80211_txparam *tp; struct rum_tx_desc desc; RUM_LOCK_ASSERT(sc); if (m == NULL) return EINVAL; if (ic->ic_bsschan == IEEE80211_CHAN_ANYC) return EINVAL; tp = &vap->iv_txparms[ieee80211_chan2mode(ic->ic_bsschan)]; rum_setup_tx_desc(sc, &desc, NULL, RT2573_TX_TIMESTAMP, RT2573_TX_HWSEQ, 0, 0, m->m_pkthdr.len, tp->mgmtrate); /* copy the Tx descriptor into NIC memory */ if (rum_write_multi(sc, RT2573_HW_BCN_BASE(0), (uint8_t *)&desc, RT2573_TX_DESC_SIZE) != 0) return EIO; /* copy beacon header and payload into NIC memory */ if (rum_write_multi(sc, RT2573_HW_BCN_BASE(0) + RT2573_TX_DESC_SIZE, mtod(m, uint8_t *), m->m_pkthdr.len) != 0) return EIO; return 0; } static int rum_alloc_beacon(struct rum_softc *sc, struct ieee80211vap *vap) { struct rum_vap *rvp = RUM_VAP(vap); struct ieee80211_node *ni = vap->iv_bss; struct mbuf *m; if (ni->ni_chan == IEEE80211_CHAN_ANYC) return EINVAL; m = ieee80211_beacon_alloc(ni); if (m == NULL) return ENOMEM; if (rvp->bcn_mbuf != NULL) m_freem(rvp->bcn_mbuf); rvp->bcn_mbuf = m; return (rum_set_beacon(sc, vap)); } static void rum_update_beacon_cb(struct rum_softc *sc, union sec_param *data, uint8_t rvp_id) { struct ieee80211vap *vap = data->vap; rum_set_beacon(sc, vap); } static void rum_update_beacon(struct ieee80211vap *vap, int item) { struct ieee80211com *ic = vap->iv_ic; struct rum_softc *sc = ic->ic_softc; struct rum_vap *rvp = RUM_VAP(vap); struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; struct ieee80211_node *ni = vap->iv_bss; struct mbuf *m = rvp->bcn_mbuf; int mcast = 0; RUM_LOCK(sc); if (m == NULL) { m = ieee80211_beacon_alloc(ni); if (m == NULL) { device_printf(sc->sc_dev, "%s: could not allocate beacon frame\n", __func__); RUM_UNLOCK(sc); return; } rvp->bcn_mbuf = m; } switch (item) { case IEEE80211_BEACON_ERP: rum_update_slot(ic); break; case IEEE80211_BEACON_TIM: mcast = 1; /*TODO*/ break; default: break; } RUM_UNLOCK(sc); setbit(bo->bo_flags, item); ieee80211_beacon_update(ni, m, mcast); rum_cmd_sleepable(sc, &vap, sizeof(vap), 0, rum_update_beacon_cb); } static int rum_common_key_set(struct rum_softc *sc, struct ieee80211_key *k, uint16_t base) { if (rum_write_multi(sc, base, k->wk_key, k->wk_keylen)) return EIO; if (k->wk_cipher->ic_cipher == IEEE80211_CIPHER_TKIP) { if (rum_write_multi(sc, base + IEEE80211_KEYBUF_SIZE, k->wk_txmic, 8)) return EIO; if (rum_write_multi(sc, base + IEEE80211_KEYBUF_SIZE + 8, k->wk_rxmic, 8)) return EIO; } return 0; } static void rum_group_key_set_cb(struct rum_softc *sc, union sec_param *data, uint8_t rvp_id) { struct ieee80211_key *k = &data->key; uint8_t mode; if (sc->sc_clr_shkeys == 0) { rum_clr_shkey_regs(sc); sc->sc_clr_shkeys = 1; } mode = rum_crypto_mode(sc, k->wk_cipher->ic_cipher, k->wk_keylen); if (mode == 0) goto print_err; DPRINTFN(1, "setting group key %d for vap %d, mode %d " "(tx %s, rx %s)\n", k->wk_keyix, rvp_id, mode, (k->wk_flags & IEEE80211_KEY_XMIT) ? "on" : "off", (k->wk_flags & IEEE80211_KEY_RECV) ? "on" : "off"); /* Install the key. */ if (rum_common_key_set(sc, k, RT2573_SKEY(rvp_id, k->wk_keyix)) != 0) goto print_err; /* Set cipher mode. */ if (rum_modbits(sc, rvp_id < 2 ? RT2573_SEC_CSR1 : RT2573_SEC_CSR5, mode << (rvp_id % 2 + k->wk_keyix) * RT2573_SKEY_MAX, RT2573_MODE_MASK << (rvp_id % 2 + k->wk_keyix) * RT2573_SKEY_MAX) != 0) goto print_err; /* Mark this key as valid. */ if (rum_setbits(sc, RT2573_SEC_CSR0, 1 << (rvp_id * RT2573_SKEY_MAX + k->wk_keyix)) != 0) goto print_err; return; print_err: device_printf(sc->sc_dev, "%s: cannot set group key %d for vap %d\n", __func__, k->wk_keyix, rvp_id); } static void rum_group_key_del_cb(struct rum_softc *sc, union sec_param *data, uint8_t rvp_id) { struct ieee80211_key *k = &data->key; DPRINTF("%s: removing group key %d for vap %d\n", __func__, k->wk_keyix, rvp_id); rum_clrbits(sc, rvp_id < 2 ? RT2573_SEC_CSR1 : RT2573_SEC_CSR5, RT2573_MODE_MASK << (rvp_id % 2 + k->wk_keyix) * RT2573_SKEY_MAX); rum_clrbits(sc, RT2573_SEC_CSR0, rvp_id * RT2573_SKEY_MAX + k->wk_keyix); } static void rum_pair_key_set_cb(struct rum_softc *sc, union sec_param *data, uint8_t rvp_id) { struct ieee80211_key *k = &data->key; uint8_t buf[IEEE80211_ADDR_LEN + 1]; uint8_t mode; mode = rum_crypto_mode(sc, k->wk_cipher->ic_cipher, k->wk_keylen); if (mode == 0) goto print_err; DPRINTFN(1, "setting pairwise key %d for vap %d, mode %d " "(tx %s, rx %s)\n", k->wk_keyix, rvp_id, mode, (k->wk_flags & IEEE80211_KEY_XMIT) ? "on" : "off", (k->wk_flags & IEEE80211_KEY_RECV) ? "on" : "off"); /* Install the key. */ if (rum_common_key_set(sc, k, RT2573_PKEY(k->wk_keyix)) != 0) goto print_err; IEEE80211_ADDR_COPY(buf, k->wk_macaddr); buf[IEEE80211_ADDR_LEN] = mode; /* Set transmitter address and cipher mode. */ if (rum_write_multi(sc, RT2573_ADDR_ENTRY(k->wk_keyix), buf, sizeof buf) != 0) goto print_err; /* Enable key table lookup for this vap. */ if (sc->vap_key_count[rvp_id]++ == 0) if (rum_setbits(sc, RT2573_SEC_CSR4, 1 << rvp_id) != 0) goto print_err; /* Mark this key as valid. */ if (rum_setbits(sc, k->wk_keyix < 32 ? RT2573_SEC_CSR2 : RT2573_SEC_CSR3, 1 << (k->wk_keyix % 32)) != 0) goto print_err; return; print_err: device_printf(sc->sc_dev, "%s: cannot set pairwise key %d, vap %d\n", __func__, k->wk_keyix, rvp_id); } static void rum_pair_key_del_cb(struct rum_softc *sc, union sec_param *data, uint8_t rvp_id) { struct ieee80211_key *k = &data->key; DPRINTF("%s: removing key %d\n", __func__, k->wk_keyix); rum_clrbits(sc, (k->wk_keyix < 32) ? RT2573_SEC_CSR2 : RT2573_SEC_CSR3, 1 << (k->wk_keyix % 32)); sc->keys_bmap &= ~(1ULL << k->wk_keyix); if (--sc->vap_key_count[rvp_id] == 0) rum_clrbits(sc, RT2573_SEC_CSR4, 1 << rvp_id); } static int rum_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k, ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix) { struct rum_softc *sc = vap->iv_ic->ic_softc; uint8_t i; if (!(&vap->iv_nw_keys[0] <= k && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) { if (!(k->wk_flags & IEEE80211_KEY_SWCRYPT)) { RUM_LOCK(sc); for (i = 0; i < RT2573_ADDR_MAX; i++) { if ((sc->keys_bmap & (1ULL << i)) == 0) { sc->keys_bmap |= (1ULL << i); *keyix = i; break; } } RUM_UNLOCK(sc); if (i == RT2573_ADDR_MAX) { device_printf(sc->sc_dev, "%s: no free space in the key table\n", __func__); return 0; } } else *keyix = 0; } else { - *keyix = k - vap->iv_nw_keys; + *keyix = ieee80211_crypto_get_key_wepidx(vap, k); } *rxkeyix = *keyix; return 1; } static int rum_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) { struct rum_softc *sc = vap->iv_ic->ic_softc; int group; if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { /* Not for us. */ return 1; } group = k >= &vap->iv_nw_keys[0] && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; return !rum_cmd_sleepable(sc, k, sizeof(*k), 0, group ? rum_group_key_set_cb : rum_pair_key_set_cb); } static int rum_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) { struct rum_softc *sc = vap->iv_ic->ic_softc; int group; if (k->wk_flags & IEEE80211_KEY_SWCRYPT) { /* Not for us. */ return 1; } group = k >= &vap->iv_nw_keys[0] && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]; return !rum_cmd_sleepable(sc, k, sizeof(*k), 0, group ? rum_group_key_del_cb : rum_pair_key_del_cb); } static int rum_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct rum_softc *sc = ni->ni_ic->ic_softc; int ret; RUM_LOCK(sc); /* prevent management frames from being sent if we're not ready */ if (!sc->sc_running) { ret = ENETDOWN; goto bad; } if (sc->tx_nfree < RUM_TX_MINFREE) { ret = EIO; goto bad; } if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ if ((ret = rum_tx_mgt(sc, m, ni)) != 0) goto bad; } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ if ((ret = rum_tx_raw(sc, m, ni, params)) != 0) goto bad; } RUM_UNLOCK(sc); return 0; bad: RUM_UNLOCK(sc); m_freem(m); return ret; } static void rum_ratectl_start(struct rum_softc *sc, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct rum_vap *rvp = RUM_VAP(vap); /* clear statistic registers (STA_CSR0 to STA_CSR5) */ rum_read_multi(sc, RT2573_STA_CSR0, sc->sta, sizeof sc->sta); usb_callout_reset(&rvp->ratectl_ch, hz, rum_ratectl_timeout, rvp); } static void rum_ratectl_timeout(void *arg) { struct rum_vap *rvp = arg; struct ieee80211vap *vap = &rvp->vap; struct ieee80211com *ic = vap->iv_ic; ieee80211_runtask(ic, &rvp->ratectl_task); } static void rum_ratectl_task(void *arg, int pending) { struct rum_vap *rvp = arg; struct ieee80211vap *vap = &rvp->vap; struct rum_softc *sc = vap->iv_ic->ic_softc; struct ieee80211_ratectl_tx_stats *txs = &sc->sc_txs; int ok[3], fail; RUM_LOCK(sc); /* read and clear statistic registers (STA_CSR0 to STA_CSR5) */ rum_read_multi(sc, RT2573_STA_CSR0, sc->sta, sizeof(sc->sta)); ok[0] = (le32toh(sc->sta[4]) & 0xffff); /* TX ok w/o retry */ ok[1] = (le32toh(sc->sta[4]) >> 16); /* TX ok w/ one retry */ ok[2] = (le32toh(sc->sta[5]) & 0xffff); /* TX ok w/ multiple retries */ fail = (le32toh(sc->sta[5]) >> 16); /* TX retry-fail count */ txs->flags = IEEE80211_RATECTL_TX_STATS_RETRIES; txs->nframes = ok[0] + ok[1] + ok[2] + fail; txs->nsuccess = txs->nframes - fail; /* XXX at least */ txs->nretries = ok[1] + ok[2] * 2 + fail * (rvp->maxretry + 1); if (txs->nframes != 0) ieee80211_ratectl_tx_update(vap, txs); /* count TX retry-fail as Tx errors */ if_inc_counter(vap->iv_ifp, IFCOUNTER_OERRORS, fail); usb_callout_reset(&rvp->ratectl_ch, hz, rum_ratectl_timeout, rvp); RUM_UNLOCK(sc); } static void rum_scan_start(struct ieee80211com *ic) { struct rum_softc *sc = ic->ic_softc; RUM_LOCK(sc); rum_abort_tsf_sync(sc); rum_set_bssid(sc, ieee80211broadcastaddr); RUM_UNLOCK(sc); } static void rum_scan_end(struct ieee80211com *ic) { struct rum_softc *sc = ic->ic_softc; if (ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN) { RUM_LOCK(sc); if (ic->ic_opmode != IEEE80211_M_AHDEMO) rum_enable_tsf_sync(sc); else rum_enable_tsf(sc); rum_set_bssid(sc, sc->sc_bssid); RUM_UNLOCK(sc); } } static void rum_set_channel(struct ieee80211com *ic) { struct rum_softc *sc = ic->ic_softc; RUM_LOCK(sc); rum_set_chan(sc, ic->ic_curchan); RUM_UNLOCK(sc); } static void rum_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans, struct ieee80211_channel chans[]) { struct rum_softc *sc = ic->ic_softc; uint8_t bands[IEEE80211_MODE_BYTES]; memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); ieee80211_add_channel_list_2ghz(chans, maxchans, nchans, rum_chan_2ghz, nitems(rum_chan_2ghz), bands, 0); if (sc->rf_rev == RT2573_RF_5225 || sc->rf_rev == RT2573_RF_5226) { setbit(bands, IEEE80211_MODE_11A); ieee80211_add_channel_list_5ghz(chans, maxchans, nchans, rum_chan_5ghz, nitems(rum_chan_5ghz), bands, 0); } } static int rum_get_rssi(struct rum_softc *sc, uint8_t raw) { struct ieee80211com *ic = &sc->sc_ic; int lna, agc, rssi; lna = (raw >> 5) & 0x3; agc = raw & 0x1f; if (lna == 0) { /* * No RSSI mapping * * NB: Since RSSI is relative to noise floor, -1 is * adequate for caller to know error happened. */ return -1; } rssi = (2 * agc) - RT2573_NOISE_FLOOR; if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { rssi += sc->rssi_2ghz_corr; if (lna == 1) rssi -= 64; else if (lna == 2) rssi -= 74; else if (lna == 3) rssi -= 90; } else { rssi += sc->rssi_5ghz_corr; if (!sc->ext_5ghz_lna && lna != 1) rssi += 4; if (lna == 1) rssi -= 64; else if (lna == 2) rssi -= 86; else if (lna == 3) rssi -= 100; } return rssi; } static int rum_pause(struct rum_softc *sc, int timeout) { usb_pause_mtx(&sc->sc_mtx, timeout); return (0); } static device_method_t rum_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rum_match), DEVMETHOD(device_attach, rum_attach), DEVMETHOD(device_detach, rum_detach), DEVMETHOD_END }; static driver_t rum_driver = { .name = "rum", .methods = rum_methods, .size = sizeof(struct rum_softc), }; static devclass_t rum_devclass; DRIVER_MODULE(rum, uhub, rum_driver, rum_devclass, NULL, 0); MODULE_DEPEND(rum, wlan, 1, 1, 1); MODULE_DEPEND(rum, usb, 1, 1, 1); MODULE_VERSION(rum, 1); USB_PNP_HOST_INFO(rum_devs); Index: head/sys/net80211/ieee80211_crypto.c =================================================================== --- head/sys/net80211/ieee80211_crypto.c (revision 309685) +++ head/sys/net80211/ieee80211_crypto.c (revision 309686) @@ -1,789 +1,789 @@ /*- * Copyright (c) 2001 Atsushi Onoe * Copyright (c) 2002-2008 Sam Leffler, Errno Consulting * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * IEEE 802.11 generic crypto support. */ #include "opt_wlan.h" #include #include #include #include #include #include #include #include /* XXX ETHER_HDR_LEN */ #include MALLOC_DEFINE(M_80211_CRYPTO, "80211crypto", "802.11 crypto state"); static int _ieee80211_crypto_delkey(struct ieee80211vap *, struct ieee80211_key *); /* * Table of registered cipher modules. */ static const struct ieee80211_cipher *ciphers[IEEE80211_CIPHER_MAX]; /* * Default "null" key management routines. */ static int null_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *k, ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix) { if (!(&vap->iv_nw_keys[0] <= k && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID])) { /* * Not in the global key table, the driver should handle this * by allocating a slot in the h/w key table/cache. In * lieu of that return key slot 0 for any unicast key * request. We disallow the request if this is a group key. * This default policy does the right thing for legacy hardware * with a 4 key table. It also handles devices that pass * packets through untouched when marked with the WEP bit * and key index 0. */ if (k->wk_flags & IEEE80211_KEY_GROUP) return 0; *keyix = 0; /* NB: use key index 0 for ucast key */ } else { - *keyix = k - vap->iv_nw_keys; + *keyix = ieee80211_crypto_get_key_wepidx(vap, k); } *rxkeyix = IEEE80211_KEYIX_NONE; /* XXX maybe *keyix? */ return 1; } static int null_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *k) { return 1; } static int null_key_set(struct ieee80211vap *vap, const struct ieee80211_key *k) { return 1; } static void null_key_update(struct ieee80211vap *vap) {} /* * Write-arounds for common operations. */ static __inline void cipher_detach(struct ieee80211_key *key) { key->wk_cipher->ic_detach(key); } static __inline void * cipher_attach(struct ieee80211vap *vap, struct ieee80211_key *key) { return key->wk_cipher->ic_attach(vap, key); } /* * Wrappers for driver key management methods. */ static __inline int dev_key_alloc(struct ieee80211vap *vap, struct ieee80211_key *key, ieee80211_keyix *keyix, ieee80211_keyix *rxkeyix) { return vap->iv_key_alloc(vap, key, keyix, rxkeyix); } static __inline int dev_key_delete(struct ieee80211vap *vap, const struct ieee80211_key *key) { return vap->iv_key_delete(vap, key); } static __inline int dev_key_set(struct ieee80211vap *vap, const struct ieee80211_key *key) { return vap->iv_key_set(vap, key); } /* * Setup crypto support for a device/shared instance. */ void ieee80211_crypto_attach(struct ieee80211com *ic) { /* NB: we assume everything is pre-zero'd */ ciphers[IEEE80211_CIPHER_NONE] = &ieee80211_cipher_none; } /* * Teardown crypto support. */ void ieee80211_crypto_detach(struct ieee80211com *ic) { } /* * Setup crypto support for a vap. */ void ieee80211_crypto_vattach(struct ieee80211vap *vap) { int i; /* NB: we assume everything is pre-zero'd */ vap->iv_max_keyix = IEEE80211_WEP_NKID; vap->iv_def_txkey = IEEE80211_KEYIX_NONE; for (i = 0; i < IEEE80211_WEP_NKID; i++) ieee80211_crypto_resetkey(vap, &vap->iv_nw_keys[i], IEEE80211_KEYIX_NONE); /* * Initialize the driver key support routines to noop entries. * This is useful especially for the cipher test modules. */ vap->iv_key_alloc = null_key_alloc; vap->iv_key_set = null_key_set; vap->iv_key_delete = null_key_delete; vap->iv_key_update_begin = null_key_update; vap->iv_key_update_end = null_key_update; } /* * Teardown crypto support for a vap. */ void ieee80211_crypto_vdetach(struct ieee80211vap *vap) { ieee80211_crypto_delglobalkeys(vap); } /* * Register a crypto cipher module. */ void ieee80211_crypto_register(const struct ieee80211_cipher *cip) { if (cip->ic_cipher >= IEEE80211_CIPHER_MAX) { printf("%s: cipher %s has an invalid cipher index %u\n", __func__, cip->ic_name, cip->ic_cipher); return; } if (ciphers[cip->ic_cipher] != NULL && ciphers[cip->ic_cipher] != cip) { printf("%s: cipher %s registered with a different template\n", __func__, cip->ic_name); return; } ciphers[cip->ic_cipher] = cip; } /* * Unregister a crypto cipher module. */ void ieee80211_crypto_unregister(const struct ieee80211_cipher *cip) { if (cip->ic_cipher >= IEEE80211_CIPHER_MAX) { printf("%s: cipher %s has an invalid cipher index %u\n", __func__, cip->ic_name, cip->ic_cipher); return; } if (ciphers[cip->ic_cipher] != NULL && ciphers[cip->ic_cipher] != cip) { printf("%s: cipher %s registered with a different template\n", __func__, cip->ic_name); return; } /* NB: don't complain about not being registered */ /* XXX disallow if references */ ciphers[cip->ic_cipher] = NULL; } int ieee80211_crypto_available(u_int cipher) { return cipher < IEEE80211_CIPHER_MAX && ciphers[cipher] != NULL; } /* XXX well-known names! */ static const char *cipher_modnames[IEEE80211_CIPHER_MAX] = { [IEEE80211_CIPHER_WEP] = "wlan_wep", [IEEE80211_CIPHER_TKIP] = "wlan_tkip", [IEEE80211_CIPHER_AES_OCB] = "wlan_aes_ocb", [IEEE80211_CIPHER_AES_CCM] = "wlan_ccmp", [IEEE80211_CIPHER_TKIPMIC] = "#4", /* NB: reserved */ [IEEE80211_CIPHER_CKIP] = "wlan_ckip", [IEEE80211_CIPHER_NONE] = "wlan_none", }; /* NB: there must be no overlap between user-supplied and device-owned flags */ CTASSERT((IEEE80211_KEY_COMMON & IEEE80211_KEY_DEVICE) == 0); /* * Establish a relationship between the specified key and cipher * and, if necessary, allocate a hardware index from the driver. * Note that when a fixed key index is required it must be specified. * * This must be the first call applied to a key; all the other key * routines assume wk_cipher is setup. * * Locking must be handled by the caller using: * ieee80211_key_update_begin(vap); * ieee80211_key_update_end(vap); */ int ieee80211_crypto_newkey(struct ieee80211vap *vap, int cipher, int flags, struct ieee80211_key *key) { struct ieee80211com *ic = vap->iv_ic; const struct ieee80211_cipher *cip; ieee80211_keyix keyix, rxkeyix; void *keyctx; int oflags; IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO, "%s: cipher %u flags 0x%x keyix %u\n", __func__, cipher, flags, key->wk_keyix); /* * Validate cipher and set reference to cipher routines. */ if (cipher >= IEEE80211_CIPHER_MAX) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO, "%s: invalid cipher %u\n", __func__, cipher); vap->iv_stats.is_crypto_badcipher++; return 0; } cip = ciphers[cipher]; if (cip == NULL) { /* * Auto-load cipher module if we have a well-known name * for it. It might be better to use string names rather * than numbers and craft a module name based on the cipher * name; e.g. wlan_cipher_. */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO, "%s: unregistered cipher %u, load module %s\n", __func__, cipher, cipher_modnames[cipher]); ieee80211_load_module(cipher_modnames[cipher]); /* * If cipher module loaded it should immediately * call ieee80211_crypto_register which will fill * in the entry in the ciphers array. */ cip = ciphers[cipher]; if (cip == NULL) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO, "%s: unable to load cipher %u, module %s\n", __func__, cipher, cipher_modnames[cipher]); vap->iv_stats.is_crypto_nocipher++; return 0; } } oflags = key->wk_flags; flags &= IEEE80211_KEY_COMMON; /* NB: preserve device attributes */ flags |= (oflags & IEEE80211_KEY_DEVICE); /* * If the hardware does not support the cipher then * fallback to a host-based implementation. */ if ((ic->ic_cryptocaps & (1<ic_name); flags |= IEEE80211_KEY_SWCRYPT; } /* * Hardware TKIP with software MIC is an important * combination; we handle it by flagging each key, * the cipher modules honor it. */ if (cipher == IEEE80211_CIPHER_TKIP && (ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIPMIC) == 0) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO, "%s: no h/w support for TKIP MIC, falling back to s/w\n", __func__); flags |= IEEE80211_KEY_SWMIC; } /* * Bind cipher to key instance. Note we do this * after checking the device capabilities so the * cipher module can optimize space usage based on * whether or not it needs to do the cipher work. */ if (key->wk_cipher != cip || key->wk_flags != flags) { /* * Fillin the flags so cipher modules can see s/w * crypto requirements and potentially allocate * different state and/or attach different method * pointers. */ key->wk_flags = flags; keyctx = cip->ic_attach(vap, key); if (keyctx == NULL) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO, "%s: unable to attach cipher %s\n", __func__, cip->ic_name); key->wk_flags = oflags; /* restore old flags */ vap->iv_stats.is_crypto_attachfail++; return 0; } cipher_detach(key); key->wk_cipher = cip; /* XXX refcnt? */ key->wk_private = keyctx; } /* * Ask the driver for a key index if we don't have one. * Note that entries in the global key table always have * an index; this means it's safe to call this routine * for these entries just to setup the reference to the * cipher template. Note also that when using software * crypto we also call the driver to give us a key index. */ if ((key->wk_flags & IEEE80211_KEY_DEVKEY) == 0) { if (!dev_key_alloc(vap, key, &keyix, &rxkeyix)) { /* * Unable to setup driver state. */ vap->iv_stats.is_crypto_keyfail++; IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO, "%s: unable to setup cipher %s\n", __func__, cip->ic_name); return 0; } if (key->wk_flags != flags) { /* * Driver overrode flags we setup; typically because * resources were unavailable to handle _this_ key. * Re-attach the cipher context to allow cipher * modules to handle differing requirements. */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO, "%s: driver override for cipher %s, flags " "0x%x -> 0x%x\n", __func__, cip->ic_name, oflags, key->wk_flags); keyctx = cip->ic_attach(vap, key); if (keyctx == NULL) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO, "%s: unable to attach cipher %s with " "flags 0x%x\n", __func__, cip->ic_name, key->wk_flags); key->wk_flags = oflags; /* restore old flags */ vap->iv_stats.is_crypto_attachfail++; return 0; } cipher_detach(key); key->wk_cipher = cip; /* XXX refcnt? */ key->wk_private = keyctx; } key->wk_keyix = keyix; key->wk_rxkeyix = rxkeyix; key->wk_flags |= IEEE80211_KEY_DEVKEY; } return 1; } /* * Remove the key (no locking, for internal use). */ static int _ieee80211_crypto_delkey(struct ieee80211vap *vap, struct ieee80211_key *key) { KASSERT(key->wk_cipher != NULL, ("No cipher!")); IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO, "%s: %s keyix %u flags 0x%x rsc %ju tsc %ju len %u\n", __func__, key->wk_cipher->ic_name, key->wk_keyix, key->wk_flags, key->wk_keyrsc[IEEE80211_NONQOS_TID], key->wk_keytsc, key->wk_keylen); if (key->wk_flags & IEEE80211_KEY_DEVKEY) { /* * Remove hardware entry. */ /* XXX key cache */ if (!dev_key_delete(vap, key)) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO, "%s: driver did not delete key index %u\n", __func__, key->wk_keyix); vap->iv_stats.is_crypto_delkey++; /* XXX recovery? */ } } cipher_detach(key); memset(key, 0, sizeof(*key)); ieee80211_crypto_resetkey(vap, key, IEEE80211_KEYIX_NONE); return 1; } /* * Remove the specified key. */ int ieee80211_crypto_delkey(struct ieee80211vap *vap, struct ieee80211_key *key) { int status; ieee80211_key_update_begin(vap); status = _ieee80211_crypto_delkey(vap, key); ieee80211_key_update_end(vap); return status; } /* * Clear the global key table. */ void ieee80211_crypto_delglobalkeys(struct ieee80211vap *vap) { int i; ieee80211_key_update_begin(vap); for (i = 0; i < IEEE80211_WEP_NKID; i++) (void) _ieee80211_crypto_delkey(vap, &vap->iv_nw_keys[i]); ieee80211_key_update_end(vap); } /* * Set the contents of the specified key. * * Locking must be handled by the caller using: * ieee80211_key_update_begin(vap); * ieee80211_key_update_end(vap); */ int ieee80211_crypto_setkey(struct ieee80211vap *vap, struct ieee80211_key *key) { const struct ieee80211_cipher *cip = key->wk_cipher; KASSERT(cip != NULL, ("No cipher!")); IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO, "%s: %s keyix %u flags 0x%x mac %s rsc %ju tsc %ju len %u\n", __func__, cip->ic_name, key->wk_keyix, key->wk_flags, ether_sprintf(key->wk_macaddr), key->wk_keyrsc[IEEE80211_NONQOS_TID], key->wk_keytsc, key->wk_keylen); if ((key->wk_flags & IEEE80211_KEY_DEVKEY) == 0) { /* XXX nothing allocated, should not happen */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO, "%s: no device key setup done; should not happen!\n", __func__); vap->iv_stats.is_crypto_setkey_nokey++; return 0; } /* * Give cipher a chance to validate key contents. * XXX should happen before modifying state. */ if (!cip->ic_setkey(key)) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_CRYPTO, "%s: cipher %s rejected key index %u len %u flags 0x%x\n", __func__, cip->ic_name, key->wk_keyix, key->wk_keylen, key->wk_flags); vap->iv_stats.is_crypto_setkey_cipher++; return 0; } return dev_key_set(vap, key); } /* * Return index if the key is a WEP key (0..3); -1 otherwise. * * This is different to "get_keyid" which defaults to returning * 0 for unicast keys; it assumes that it won't be used for WEP. */ int ieee80211_crypto_get_key_wepidx(const struct ieee80211vap *vap, const struct ieee80211_key *k) { if (k >= &vap->iv_nw_keys[0] && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) return (k - vap->iv_nw_keys); return (-1); } /* * Note: only supports a single unicast key (0). */ uint8_t ieee80211_crypto_get_keyid(struct ieee80211vap *vap, struct ieee80211_key *k) { if (k >= &vap->iv_nw_keys[0] && k < &vap->iv_nw_keys[IEEE80211_WEP_NKID]) return (k - vap->iv_nw_keys); else return (0); } struct ieee80211_key * ieee80211_crypto_get_txkey(struct ieee80211_node *ni, struct mbuf *m) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_frame *wh; /* * Multicast traffic always uses the multicast key. * Otherwise if a unicast key is set we use that and * it is always key index 0. When no unicast key is * set we fall back to the default transmit key. */ wh = mtod(m, struct ieee80211_frame *); if (IEEE80211_IS_MULTICAST(wh->i_addr1) || IEEE80211_KEY_UNDEFINED(&ni->ni_ucastkey)) { if (vap->iv_def_txkey == IEEE80211_KEYIX_NONE) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr1, "no default transmit key (%s) deftxkey %u", __func__, vap->iv_def_txkey); vap->iv_stats.is_tx_nodefkey++; return NULL; } return &vap->iv_nw_keys[vap->iv_def_txkey]; } return &ni->ni_ucastkey; } /* * Add privacy headers appropriate for the specified key. */ struct ieee80211_key * ieee80211_crypto_encap(struct ieee80211_node *ni, struct mbuf *m) { struct ieee80211_key *k; const struct ieee80211_cipher *cip; if ((k = ieee80211_crypto_get_txkey(ni, m)) != NULL) { cip = k->wk_cipher; return (cip->ic_encap(k, m) ? k : NULL); } return NULL; } /* * Validate and strip privacy headers (and trailer) for a * received frame that has the WEP/Privacy bit set. */ int ieee80211_crypto_decap(struct ieee80211_node *ni, struct mbuf *m, int hdrlen, struct ieee80211_key **key) { #define IEEE80211_WEP_HDRLEN (IEEE80211_WEP_IVLEN + IEEE80211_WEP_KIDLEN) #define IEEE80211_WEP_MINLEN \ (sizeof(struct ieee80211_frame) + \ IEEE80211_WEP_HDRLEN + IEEE80211_WEP_CRCLEN) struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_key *k; struct ieee80211_frame *wh; const struct ieee80211_rx_stats *rxs; const struct ieee80211_cipher *cip; uint8_t keyid; /* * Check for hardware decryption and IV stripping. * If the IV is stripped then we definitely can't find a key. * Set the key to NULL but return true; upper layers * will need to handle a NULL key for a successful * decrypt. */ rxs = ieee80211_get_rx_params_ptr(m); if ((rxs != NULL) && (rxs->c_pktflags & IEEE80211_RX_F_DECRYPTED)) { if (rxs->c_pktflags & IEEE80211_RX_F_IV_STRIP) { /* * Hardware decrypted, IV stripped. * We can't find a key with a stripped IV. * Return successful. */ *key = NULL; return (1); } } /* NB: this minimum size data frame could be bigger */ if (m->m_pkthdr.len < IEEE80211_WEP_MINLEN) { IEEE80211_DPRINTF(vap, IEEE80211_MSG_ANY, "%s: WEP data frame too short, len %u\n", __func__, m->m_pkthdr.len); vap->iv_stats.is_rx_tooshort++; /* XXX need unique stat? */ *key = NULL; return (0); } /* * Locate the key. If unicast and there is no unicast * key then we fall back to the key id in the header. * This assumes unicast keys are only configured when * the key id in the header is meaningless (typically 0). */ wh = mtod(m, struct ieee80211_frame *); m_copydata(m, hdrlen + IEEE80211_WEP_IVLEN, sizeof(keyid), &keyid); if (IEEE80211_IS_MULTICAST(wh->i_addr1) || IEEE80211_KEY_UNDEFINED(&ni->ni_ucastkey)) k = &vap->iv_nw_keys[keyid >> 6]; else k = &ni->ni_ucastkey; /* * Insure crypto header is contiguous for all decap work. */ cip = k->wk_cipher; if (m->m_len < hdrlen + cip->ic_header && (m = m_pullup(m, hdrlen + cip->ic_header)) == NULL) { IEEE80211_NOTE_MAC(vap, IEEE80211_MSG_CRYPTO, wh->i_addr2, "unable to pullup %s header", cip->ic_name); vap->iv_stats.is_rx_wepfail++; /* XXX */ *key = NULL; return (0); } /* * Attempt decryption. * * If we fail then don't return the key - return NULL * and an error. */ if (cip->ic_decap(k, m, hdrlen)) { /* success */ *key = k; return (1); } /* Failure */ *key = NULL; return (0); #undef IEEE80211_WEP_MINLEN #undef IEEE80211_WEP_HDRLEN } /* * Check and remove any MIC. */ int ieee80211_crypto_demic(struct ieee80211vap *vap, struct ieee80211_key *k, struct mbuf *m, int force) { const struct ieee80211_cipher *cip; const struct ieee80211_rx_stats *rxs; struct ieee80211_frame *wh; rxs = ieee80211_get_rx_params_ptr(m); wh = mtod(m, struct ieee80211_frame *); /* * Handle demic / mic errors from hardware-decrypted offload devices. */ if ((rxs != NULL) && (rxs->c_pktflags & IEEE80211_RX_F_DECRYPTED)) { if (rxs->c_pktflags & IEEE80211_RX_F_FAIL_MIC) { /* * Hardware has said MIC failed. We don't care about * whether it was stripped or not. * * Eventually - teach the demic methods in crypto * modules to handle a NULL key and not to dereference * it. */ ieee80211_notify_michael_failure(vap, wh, -1); return (0); } if (rxs->c_pktflags & IEEE80211_RX_F_MMIC_STRIP) { /* * Hardware has decrypted and not indicated a * MIC failure and has stripped the MIC. * We may not have a key, so for now just * return OK. */ return (1); } } /* * If we don't have a key at this point then we don't * have to demic anything. */ if (k == NULL) return (1); cip = k->wk_cipher; return (cip->ic_miclen > 0 ? cip->ic_demic(k, m, force) : 1); } static void load_ucastkey(void *arg, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211_key *k; if (vap->iv_state != IEEE80211_S_RUN) return; k = &ni->ni_ucastkey; if (k->wk_flags & IEEE80211_KEY_DEVKEY) dev_key_set(vap, k); } /* * Re-load all keys known to the 802.11 layer that may * have hardware state backing them. This is used by * drivers on resume to push keys down into the device. */ void ieee80211_crypto_reload_keys(struct ieee80211com *ic) { struct ieee80211vap *vap; int i; /* * Keys in the global key table of each vap. */ /* NB: used only during resume so don't lock for now */ TAILQ_FOREACH(vap, &ic->ic_vaps, iv_next) { if (vap->iv_state != IEEE80211_S_RUN) continue; for (i = 0; i < IEEE80211_WEP_NKID; i++) { const struct ieee80211_key *k = &vap->iv_nw_keys[i]; if (k->wk_flags & IEEE80211_KEY_DEVKEY) dev_key_set(vap, k); } } /* * Unicast keys. */ ieee80211_iterate_nodes(&ic->ic_sta, load_ucastkey, NULL); }